1/*-
2 * SPDX-License-Identifier: (BSD-2-Clause AND ISC)
3 *
4 * Copyright (c) 2002 Michael Shalayeff
5 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*-
31 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
32 *
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 */
45
46/*
47 * $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $
48 *
49 * Revisions picked from OpenBSD after revision 1.110 import:
50 * 1.119 - don't m_copydata() beyond the len of mbuf in pfsync_input()
51 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
52 * 1.120, 1.175 - use monotonic time_uptime
53 * 1.122 - reduce number of updates for non-TCP sessions
54 * 1.125, 1.127 - rewrite merge or stale processing
55 * 1.128 - cleanups
56 * 1.146 - bzero() mbuf before sparsely filling it with data
57 * 1.170 - SIOCSIFMTU checks
58 * 1.126, 1.142 - deferred packets processing
59 * 1.173 - correct expire time processing
60 */
61
62#include <sys/cdefs.h>
63#include "opt_inet.h"
64#include "opt_inet6.h"
65#include "opt_pf.h"
66
67#include <sys/param.h>
68#include <sys/bus.h>
69#include <sys/endian.h>
70#include <sys/interrupt.h>
71#include <sys/kernel.h>
72#include <sys/lock.h>
73#include <sys/mbuf.h>
74#include <sys/module.h>
75#include <sys/mutex.h>
76#include <sys/nv.h>
77#include <sys/priv.h>
78#include <sys/smp.h>
79#include <sys/socket.h>
80#include <sys/sockio.h>
81#include <sys/sysctl.h>
82#include <sys/syslog.h>
83
84#include <net/bpf.h>
85#include <net/if.h>
86#include <net/if_var.h>
87#include <net/if_clone.h>
88#include <net/if_private.h>
89#include <net/if_types.h>
90#include <net/vnet.h>
91#include <net/pfvar.h>
92#include <net/route.h>
93#include <net/if_pfsync.h>
94
95#include <netinet/if_ether.h>
96#include <netinet/in.h>
97#include <netinet/in_var.h>
98#include <netinet6/in6_var.h>
99#include <netinet/ip.h>
100#include <netinet/ip6.h>
101#include <netinet/ip_carp.h>
102#include <netinet/ip_var.h>
103#include <netinet/tcp.h>
104#include <netinet/tcp_fsm.h>
105#include <netinet/tcp_seq.h>
106
107#include <netinet/ip6.h>
108#include <netinet6/ip6_var.h>
109#include <netinet6/scope6_var.h>
110
111#include <netpfil/pf/pfsync_nv.h>
112
113struct pfsync_bucket;
114struct pfsync_softc;
115
116union inet_template {
117	struct ip	ipv4;
118	struct ip6_hdr	ipv6;
119};
120
121#define PFSYNC_MINPKT ( \
122	sizeof(union inet_template) + \
123	sizeof(struct pfsync_header) + \
124	sizeof(struct pfsync_subheader) )
125
126static int	pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
127		    struct pfsync_state_peer *);
128static int	pfsync_in_clr(struct mbuf *, int, int, int, int);
129static int	pfsync_in_ins(struct mbuf *, int, int, int, int);
130static int	pfsync_in_iack(struct mbuf *, int, int, int, int);
131static int	pfsync_in_upd(struct mbuf *, int, int, int, int);
132static int	pfsync_in_upd_c(struct mbuf *, int, int, int, int);
133static int	pfsync_in_ureq(struct mbuf *, int, int, int, int);
134static int	pfsync_in_del_c(struct mbuf *, int, int, int, int);
135static int	pfsync_in_bus(struct mbuf *, int, int, int, int);
136static int	pfsync_in_tdb(struct mbuf *, int, int, int, int);
137static int	pfsync_in_eof(struct mbuf *, int, int, int, int);
138static int	pfsync_in_error(struct mbuf *, int, int, int, int);
139
140static int (*pfsync_acts[])(struct mbuf *, int, int, int, int) = {
141	pfsync_in_clr,			/* PFSYNC_ACT_CLR */
142	pfsync_in_ins,			/* PFSYNC_ACT_INS_1301 */
143	pfsync_in_iack,			/* PFSYNC_ACT_INS_ACK */
144	pfsync_in_upd,			/* PFSYNC_ACT_UPD_1301 */
145	pfsync_in_upd_c,		/* PFSYNC_ACT_UPD_C */
146	pfsync_in_ureq,			/* PFSYNC_ACT_UPD_REQ */
147	pfsync_in_error,		/* PFSYNC_ACT_DEL */
148	pfsync_in_del_c,		/* PFSYNC_ACT_DEL_C */
149	pfsync_in_error,		/* PFSYNC_ACT_INS_F */
150	pfsync_in_error,		/* PFSYNC_ACT_DEL_F */
151	pfsync_in_bus,			/* PFSYNC_ACT_BUS */
152	pfsync_in_tdb,			/* PFSYNC_ACT_TDB */
153	pfsync_in_eof,			/* PFSYNC_ACT_EOF */
154	pfsync_in_ins,			/* PFSYNC_ACT_INS_1400 */
155	pfsync_in_upd,			/* PFSYNC_ACT_UPD_1400 */
156};
157
158struct pfsync_q {
159	void		(*write)(struct pf_kstate *, void *);
160	size_t		len;
161	u_int8_t	action;
162};
163
164/* We have the following sync queues */
165enum pfsync_q_id {
166	PFSYNC_Q_INS_1301,
167	PFSYNC_Q_INS_1400,
168	PFSYNC_Q_IACK,
169	PFSYNC_Q_UPD_1301,
170	PFSYNC_Q_UPD_1400,
171	PFSYNC_Q_UPD_C,
172	PFSYNC_Q_DEL_C,
173	PFSYNC_Q_COUNT,
174};
175
176/* Functions for building messages for given queue */
177static void	pfsync_out_state_1301(struct pf_kstate *, void *);
178static void	pfsync_out_state_1400(struct pf_kstate *, void *);
179static void	pfsync_out_iack(struct pf_kstate *, void *);
180static void	pfsync_out_upd_c(struct pf_kstate *, void *);
181static void	pfsync_out_del_c(struct pf_kstate *, void *);
182
183/* Attach those functions to queue */
184static struct pfsync_q pfsync_qs[] = {
185	{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_INS_1301 },
186	{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_INS_1400 },
187	{ pfsync_out_iack,       sizeof(struct pfsync_ins_ack),    PFSYNC_ACT_INS_ACK },
188	{ pfsync_out_state_1301, sizeof(struct pfsync_state_1301), PFSYNC_ACT_UPD_1301 },
189	{ pfsync_out_state_1400, sizeof(struct pfsync_state_1400), PFSYNC_ACT_UPD_1400 },
190	{ pfsync_out_upd_c,      sizeof(struct pfsync_upd_c),      PFSYNC_ACT_UPD_C },
191	{ pfsync_out_del_c,      sizeof(struct pfsync_del_c),      PFSYNC_ACT_DEL_C }
192};
193
194/* Map queue to pf_kstate->sync_state */
195static u_int8_t pfsync_qid_sstate[] = {
196	PFSYNC_S_INS,   /* PFSYNC_Q_INS_1301 */
197	PFSYNC_S_INS,   /* PFSYNC_Q_INS_1400 */
198	PFSYNC_S_IACK,  /* PFSYNC_Q_IACK */
199	PFSYNC_S_UPD,   /* PFSYNC_Q_UPD_1301 */
200	PFSYNC_S_UPD,   /* PFSYNC_Q_UPD_1400 */
201	PFSYNC_S_UPD_C, /* PFSYNC_Q_UPD_C */
202	PFSYNC_S_DEL_C, /* PFSYNC_Q_DEL_C */
203};
204
205/* Map pf_kstate->sync_state to queue */
206static enum pfsync_q_id pfsync_sstate_to_qid(u_int8_t);
207
208static void	pfsync_q_ins(struct pf_kstate *, int sync_state, bool);
209static void	pfsync_q_del(struct pf_kstate *, bool, struct pfsync_bucket *);
210
211static void	pfsync_update_state(struct pf_kstate *);
212static void	pfsync_tx(struct pfsync_softc *, struct mbuf *);
213
214struct pfsync_upd_req_item {
215	TAILQ_ENTRY(pfsync_upd_req_item)	ur_entry;
216	struct pfsync_upd_req			ur_msg;
217};
218
219struct pfsync_deferral {
220	struct pfsync_softc		*pd_sc;
221	TAILQ_ENTRY(pfsync_deferral)	pd_entry;
222	struct callout			pd_tmo;
223
224	struct pf_kstate		*pd_st;
225	struct mbuf			*pd_m;
226};
227
228struct pfsync_bucket
229{
230	int			b_id;
231	struct pfsync_softc	*b_sc;
232	struct mtx		b_mtx;
233	struct callout		b_tmo;
234	int			b_flags;
235#define	PFSYNCF_BUCKET_PUSH	0x00000001
236
237	size_t			b_len;
238	TAILQ_HEAD(, pf_kstate)			b_qs[PFSYNC_Q_COUNT];
239	TAILQ_HEAD(, pfsync_upd_req_item)	b_upd_req_list;
240	TAILQ_HEAD(, pfsync_deferral)		b_deferrals;
241	u_int			b_deferred;
242	uint8_t			*b_plus;
243	size_t			b_pluslen;
244
245	struct  ifaltq b_snd;
246};
247
248struct pfsync_softc {
249	/* Configuration */
250	struct ifnet		*sc_ifp;
251	struct ifnet		*sc_sync_if;
252	struct ip_moptions	sc_imo;
253	struct ip6_moptions	sc_im6o;
254	struct sockaddr_storage	sc_sync_peer;
255	uint32_t		sc_flags;
256	uint8_t			sc_maxupdates;
257	union inet_template     sc_template;
258	struct mtx		sc_mtx;
259	uint32_t		sc_version;
260
261	/* Queued data */
262	struct pfsync_bucket	*sc_buckets;
263
264	/* Bulk update info */
265	struct mtx		sc_bulk_mtx;
266	uint32_t		sc_ureq_sent;
267	int			sc_bulk_tries;
268	uint32_t		sc_ureq_received;
269	int			sc_bulk_hashid;
270	uint64_t		sc_bulk_stateid;
271	uint32_t		sc_bulk_creatorid;
272	struct callout		sc_bulk_tmo;
273	struct callout		sc_bulkfail_tmo;
274};
275
276#define	PFSYNC_LOCK(sc)		mtx_lock(&(sc)->sc_mtx)
277#define	PFSYNC_UNLOCK(sc)	mtx_unlock(&(sc)->sc_mtx)
278#define	PFSYNC_LOCK_ASSERT(sc)	mtx_assert(&(sc)->sc_mtx, MA_OWNED)
279
280#define PFSYNC_BUCKET_LOCK(b)		mtx_lock(&(b)->b_mtx)
281#define PFSYNC_BUCKET_UNLOCK(b)		mtx_unlock(&(b)->b_mtx)
282#define PFSYNC_BUCKET_LOCK_ASSERT(b)	mtx_assert(&(b)->b_mtx, MA_OWNED)
283
284#define	PFSYNC_BLOCK(sc)	mtx_lock(&(sc)->sc_bulk_mtx)
285#define	PFSYNC_BUNLOCK(sc)	mtx_unlock(&(sc)->sc_bulk_mtx)
286#define	PFSYNC_BLOCK_ASSERT(sc)	mtx_assert(&(sc)->sc_bulk_mtx, MA_OWNED)
287
288#define PFSYNC_DEFER_TIMEOUT	20
289
290static const char pfsyncname[] = "pfsync";
291static MALLOC_DEFINE(M_PFSYNC, pfsyncname, "pfsync(4) data");
292VNET_DEFINE_STATIC(struct pfsync_softc	*, pfsyncif) = NULL;
293#define	V_pfsyncif		VNET(pfsyncif)
294VNET_DEFINE_STATIC(void *, pfsync_swi_cookie) = NULL;
295#define	V_pfsync_swi_cookie	VNET(pfsync_swi_cookie)
296VNET_DEFINE_STATIC(struct intr_event *, pfsync_swi_ie);
297#define	V_pfsync_swi_ie		VNET(pfsync_swi_ie)
298VNET_DEFINE_STATIC(struct pfsyncstats, pfsyncstats);
299#define	V_pfsyncstats		VNET(pfsyncstats)
300VNET_DEFINE_STATIC(int, pfsync_carp_adj) = CARP_MAXSKEW;
301#define	V_pfsync_carp_adj	VNET(pfsync_carp_adj)
302VNET_DEFINE_STATIC(unsigned int, pfsync_defer_timeout) = PFSYNC_DEFER_TIMEOUT;
303#define	V_pfsync_defer_timeout	VNET(pfsync_defer_timeout)
304
305static void	pfsync_timeout(void *);
306static void	pfsync_push(struct pfsync_bucket *);
307static void	pfsync_push_all(struct pfsync_softc *);
308static void	pfsyncintr(void *);
309static int	pfsync_multicast_setup(struct pfsync_softc *, struct ifnet *,
310		    struct in_mfilter *, struct in6_mfilter *);
311static void	pfsync_multicast_cleanup(struct pfsync_softc *);
312static void	pfsync_pointers_init(void);
313static void	pfsync_pointers_uninit(void);
314static int	pfsync_init(void);
315static void	pfsync_uninit(void);
316
317static unsigned long pfsync_buckets;
318
319SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
320    "PFSYNC");
321SYSCTL_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_VNET | CTLFLAG_RW,
322    &VNET_NAME(pfsyncstats), pfsyncstats,
323    "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
324SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_VNET | CTLFLAG_RW,
325    &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
326SYSCTL_ULONG(_net_pfsync, OID_AUTO, pfsync_buckets, CTLFLAG_RDTUN,
327    &pfsync_buckets, 0, "Number of pfsync hash buckets");
328SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW,
329    &VNET_NAME(pfsync_defer_timeout), 0, "Deferred packet timeout (in ms)");
330
331static int	pfsync_clone_create(struct if_clone *, int, caddr_t);
332static void	pfsync_clone_destroy(struct ifnet *);
333static int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
334		    struct pf_state_peer *);
335static int	pfsyncoutput(struct ifnet *, struct mbuf *,
336		    const struct sockaddr *, struct route *);
337static int	pfsyncioctl(struct ifnet *, u_long, caddr_t);
338
339static int	pfsync_defer(struct pf_kstate *, struct mbuf *);
340static void	pfsync_undefer(struct pfsync_deferral *, int);
341static void	pfsync_undefer_state_locked(struct pf_kstate *, int);
342static void	pfsync_undefer_state(struct pf_kstate *, int);
343static void	pfsync_defer_tmo(void *);
344
345static void	pfsync_request_update(u_int32_t, u_int64_t);
346static bool	pfsync_update_state_req(struct pf_kstate *);
347
348static void	pfsync_drop(struct pfsync_softc *);
349static void	pfsync_sendout(int, int);
350static void	pfsync_send_plus(void *, size_t);
351
352static void	pfsync_bulk_start(void);
353static void	pfsync_bulk_status(u_int8_t);
354static void	pfsync_bulk_update(void *);
355static void	pfsync_bulk_fail(void *);
356
357static void	pfsync_detach_ifnet(struct ifnet *);
358
359static int pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *,
360    struct pfsync_kstatus *);
361static int pfsync_kstatus_to_softc(struct pfsync_kstatus *,
362    struct pfsync_softc *);
363
364#ifdef IPSEC
365static void	pfsync_update_net_tdb(struct pfsync_tdb *);
366#endif
367static struct pfsync_bucket	*pfsync_get_bucket(struct pfsync_softc *,
368		    struct pf_kstate *);
369
370#define PFSYNC_MAX_BULKTRIES	12
371
372VNET_DEFINE(struct if_clone *, pfsync_cloner);
373#define	V_pfsync_cloner	VNET(pfsync_cloner)
374
375const struct in6_addr in6addr_linklocal_pfsync_group =
376	{{{ 0xff, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
377	    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0 }}};
378static int
379pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
380{
381	struct pfsync_softc *sc;
382	struct ifnet *ifp;
383	struct pfsync_bucket *b;
384	int c;
385	enum pfsync_q_id q;
386
387	if (unit != 0)
388		return (EINVAL);
389
390	if (! pfsync_buckets)
391		pfsync_buckets = mp_ncpus * 2;
392
393	sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
394	sc->sc_flags |= PFSYNCF_OK;
395	sc->sc_maxupdates = 128;
396	sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT;
397
398	ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
399	if (ifp == NULL) {
400		free(sc, M_PFSYNC);
401		return (ENOSPC);
402	}
403	if_initname(ifp, pfsyncname, unit);
404	ifp->if_softc = sc;
405	ifp->if_ioctl = pfsyncioctl;
406	ifp->if_output = pfsyncoutput;
407	ifp->if_type = IFT_PFSYNC;
408	ifp->if_hdrlen = sizeof(struct pfsync_header);
409	ifp->if_mtu = ETHERMTU;
410	mtx_init(&sc->sc_mtx, pfsyncname, NULL, MTX_DEF);
411	mtx_init(&sc->sc_bulk_mtx, "pfsync bulk", NULL, MTX_DEF);
412	callout_init_mtx(&sc->sc_bulk_tmo, &sc->sc_bulk_mtx, 0);
413	callout_init_mtx(&sc->sc_bulkfail_tmo, &sc->sc_bulk_mtx, 0);
414
415	if_attach(ifp);
416
417	bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
418
419	sc->sc_buckets = mallocarray(pfsync_buckets, sizeof(*sc->sc_buckets),
420	    M_PFSYNC, M_ZERO | M_WAITOK);
421	for (c = 0; c < pfsync_buckets; c++) {
422		b = &sc->sc_buckets[c];
423		mtx_init(&b->b_mtx, "pfsync bucket", NULL, MTX_DEF);
424
425		b->b_id = c;
426		b->b_sc = sc;
427		b->b_len = PFSYNC_MINPKT;
428
429		for (q = 0; q < PFSYNC_Q_COUNT; q++)
430			TAILQ_INIT(&b->b_qs[q]);
431
432		TAILQ_INIT(&b->b_upd_req_list);
433		TAILQ_INIT(&b->b_deferrals);
434
435		callout_init(&b->b_tmo, 1);
436
437		b->b_snd.ifq_maxlen = ifqmaxlen;
438	}
439
440	V_pfsyncif = sc;
441
442	return (0);
443}
444
445static void
446pfsync_clone_destroy(struct ifnet *ifp)
447{
448	struct pfsync_softc *sc = ifp->if_softc;
449	struct pfsync_bucket *b;
450	int c, ret;
451
452	for (c = 0; c < pfsync_buckets; c++) {
453		b = &sc->sc_buckets[c];
454		/*
455		 * At this stage, everything should have already been
456		 * cleared by pfsync_uninit(), and we have only to
457		 * drain callouts.
458		 */
459		PFSYNC_BUCKET_LOCK(b);
460		while (b->b_deferred > 0) {
461			struct pfsync_deferral *pd =
462			    TAILQ_FIRST(&b->b_deferrals);
463
464			ret = callout_stop(&pd->pd_tmo);
465			PFSYNC_BUCKET_UNLOCK(b);
466			if (ret > 0) {
467				pfsync_undefer(pd, 1);
468			} else {
469				callout_drain(&pd->pd_tmo);
470			}
471			PFSYNC_BUCKET_LOCK(b);
472		}
473		MPASS(b->b_deferred == 0);
474		MPASS(TAILQ_EMPTY(&b->b_deferrals));
475		PFSYNC_BUCKET_UNLOCK(b);
476
477		free(b->b_plus, M_PFSYNC);
478		b->b_plus = NULL;
479		b->b_pluslen = 0;
480
481		callout_drain(&b->b_tmo);
482	}
483
484	callout_drain(&sc->sc_bulkfail_tmo);
485	callout_drain(&sc->sc_bulk_tmo);
486
487	if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
488		(*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
489	bpfdetach(ifp);
490	if_detach(ifp);
491
492	pfsync_drop(sc);
493
494	if_free(ifp);
495	pfsync_multicast_cleanup(sc);
496	mtx_destroy(&sc->sc_mtx);
497	mtx_destroy(&sc->sc_bulk_mtx);
498
499	free(sc->sc_buckets, M_PFSYNC);
500	free(sc, M_PFSYNC);
501
502	V_pfsyncif = NULL;
503}
504
505static int
506pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
507    struct pf_state_peer *d)
508{
509	if (s->scrub.scrub_flag && d->scrub == NULL) {
510		d->scrub = uma_zalloc(V_pf_state_scrub_z, M_NOWAIT | M_ZERO);
511		if (d->scrub == NULL)
512			return (ENOMEM);
513	}
514
515	return (0);
516}
517
518static int
519pfsync_state_import(union pfsync_state_union *sp, int flags, int msg_version)
520{
521	struct pfsync_softc *sc = V_pfsyncif;
522#ifndef	__NO_STRICT_ALIGNMENT
523	struct pfsync_state_key key[2];
524#endif
525	struct pfsync_state_key *kw, *ks;
526	struct pf_kstate	*st = NULL;
527	struct pf_state_key *skw = NULL, *sks = NULL;
528	struct pf_krule *r = NULL;
529	struct pfi_kkif	*kif;
530	int error;
531
532	PF_RULES_RASSERT();
533
534	if (sp->pfs_1301.creatorid == 0) {
535		if (V_pf_status.debug >= PF_DEBUG_MISC)
536			printf("%s: invalid creator id: %08x\n", __func__,
537			    ntohl(sp->pfs_1301.creatorid));
538		return (EINVAL);
539	}
540
541	if ((kif = pfi_kkif_find(sp->pfs_1301.ifname)) == NULL) {
542		if (V_pf_status.debug >= PF_DEBUG_MISC)
543			printf("%s: unknown interface: %s\n", __func__,
544			    sp->pfs_1301.ifname);
545		if (flags & PFSYNC_SI_IOCTL)
546			return (EINVAL);
547		return (0);	/* skip this state */
548	}
549
550	/*
551	 * If the ruleset checksums match or the state is coming from the ioctl,
552	 * it's safe to associate the state with the rule of that number.
553	 */
554	if (sp->pfs_1301.rule != htonl(-1) && sp->pfs_1301.anchor == htonl(-1) &&
555	    (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->pfs_1301.rule) <
556	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
557		r = pf_main_ruleset.rules[
558		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->pfs_1301.rule)];
559	else
560		r = &V_pf_default_rule;
561
562	if ((r->max_states &&
563	    counter_u64_fetch(r->states_cur) >= r->max_states))
564		goto cleanup;
565
566	/*
567	 * XXXGL: consider M_WAITOK in ioctl path after.
568	 */
569	st = pf_alloc_state(M_NOWAIT);
570	if (__predict_false(st == NULL))
571		goto cleanup;
572
573	if ((skw = uma_zalloc(V_pf_state_key_z, M_NOWAIT)) == NULL)
574		goto cleanup;
575
576#ifndef	__NO_STRICT_ALIGNMENT
577	bcopy(&sp->pfs_1301.key, key, sizeof(struct pfsync_state_key) * 2);
578	kw = &key[PF_SK_WIRE];
579	ks = &key[PF_SK_STACK];
580#else
581	kw = &sp->pfs_1301.key[PF_SK_WIRE];
582	ks = &sp->pfs_1301.key[PF_SK_STACK];
583#endif
584
585	if (PF_ANEQ(&kw->addr[0], &ks->addr[0], sp->pfs_1301.af) ||
586	    PF_ANEQ(&kw->addr[1], &ks->addr[1], sp->pfs_1301.af) ||
587	    kw->port[0] != ks->port[0] ||
588	    kw->port[1] != ks->port[1]) {
589		sks = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
590		if (sks == NULL)
591			goto cleanup;
592	} else
593		sks = skw;
594
595	/* allocate memory for scrub info */
596	if (pfsync_alloc_scrub_memory(&sp->pfs_1301.src, &st->src) ||
597	    pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst))
598		goto cleanup;
599
600	/* Copy to state key(s). */
601	skw->addr[0] = kw->addr[0];
602	skw->addr[1] = kw->addr[1];
603	skw->port[0] = kw->port[0];
604	skw->port[1] = kw->port[1];
605	skw->proto = sp->pfs_1301.proto;
606	skw->af = sp->pfs_1301.af;
607	if (sks != skw) {
608		sks->addr[0] = ks->addr[0];
609		sks->addr[1] = ks->addr[1];
610		sks->port[0] = ks->port[0];
611		sks->port[1] = ks->port[1];
612		sks->proto = sp->pfs_1301.proto;
613		sks->af = sp->pfs_1301.af;
614	}
615
616	/* copy to state */
617	bcopy(&sp->pfs_1301.rt_addr, &st->rt_addr, sizeof(st->rt_addr));
618	st->creation = (time_uptime - ntohl(sp->pfs_1301.creation)) * 1000;
619	st->expire = pf_get_uptime();
620	if (sp->pfs_1301.expire) {
621		uint32_t timeout;
622
623		timeout = r->timeout[sp->pfs_1301.timeout];
624		if (!timeout)
625			timeout = V_pf_default_rule.timeout[sp->pfs_1301.timeout];
626
627		/* sp->expire may have been adaptively scaled by export. */
628		st->expire -= (timeout - ntohl(sp->pfs_1301.expire)) * 1000;
629	}
630
631	st->direction = sp->pfs_1301.direction;
632	st->act.log = sp->pfs_1301.log;
633	st->timeout = sp->pfs_1301.timeout;
634
635	switch (msg_version) {
636		case PFSYNC_MSG_VERSION_1301:
637			st->state_flags = sp->pfs_1301.state_flags;
638			/*
639			 * In FreeBSD 13 pfsync lacks many attributes. Copy them
640			 * from the rule if possible. If rule can't be matched
641			 * clear any set options as we can't recover their
642			 * parameters.
643			*/
644			if (r == &V_pf_default_rule) {
645				st->state_flags &= ~PFSTATE_SETMASK;
646			} else {
647				/*
648				 * Similar to pf_rule_to_actions(). This code
649				 * won't set the actions properly if they come
650				 * from multiple "match" rules as only rule
651				 * creating the state is send over pfsync.
652				 */
653				st->act.qid = r->qid;
654				st->act.pqid = r->pqid;
655				st->act.rtableid = r->rtableid;
656				if (r->scrub_flags & PFSTATE_SETTOS)
657					st->act.set_tos = r->set_tos;
658				st->act.min_ttl = r->min_ttl;
659				st->act.max_mss = r->max_mss;
660				st->state_flags |= (r->scrub_flags &
661				    (PFSTATE_NODF|PFSTATE_RANDOMID|
662				    PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|
663				    PFSTATE_SETPRIO));
664				if (r->dnpipe || r->dnrpipe) {
665					if (r->free_flags & PFRULE_DN_IS_PIPE)
666						st->state_flags |= PFSTATE_DN_IS_PIPE;
667					else
668						st->state_flags &= ~PFSTATE_DN_IS_PIPE;
669				}
670				st->act.dnpipe = r->dnpipe;
671				st->act.dnrpipe = r->dnrpipe;
672			}
673			break;
674		case PFSYNC_MSG_VERSION_1400:
675			st->state_flags = ntohs(sp->pfs_1400.state_flags);
676			st->act.qid = ntohs(sp->pfs_1400.qid);
677			st->act.pqid = ntohs(sp->pfs_1400.pqid);
678			st->act.dnpipe = ntohs(sp->pfs_1400.dnpipe);
679			st->act.dnrpipe = ntohs(sp->pfs_1400.dnrpipe);
680			st->act.rtableid = ntohl(sp->pfs_1400.rtableid);
681			st->act.min_ttl = sp->pfs_1400.min_ttl;
682			st->act.set_tos = sp->pfs_1400.set_tos;
683			st->act.max_mss = ntohs(sp->pfs_1400.max_mss);
684			st->act.set_prio[0] = sp->pfs_1400.set_prio[0];
685			st->act.set_prio[1] = sp->pfs_1400.set_prio[1];
686			st->rt = sp->pfs_1400.rt;
687			if (st->rt && (st->rt_kif = pfi_kkif_find(sp->pfs_1400.rt_ifname)) == NULL) {
688				if (V_pf_status.debug >= PF_DEBUG_MISC)
689					printf("%s: unknown route interface: %s\n",
690					    __func__, sp->pfs_1400.rt_ifname);
691				if (flags & PFSYNC_SI_IOCTL)
692					error = EINVAL;
693				else
694					error = 0;
695				goto cleanup_keys;
696			}
697			break;
698		default:
699			panic("%s: Unsupported pfsync_msg_version %d",
700			    __func__, msg_version);
701	}
702
703	st->id = sp->pfs_1301.id;
704	st->creatorid = sp->pfs_1301.creatorid;
705	pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src);
706	pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
707
708	st->rule.ptr = r;
709	st->nat_rule.ptr = NULL;
710	st->anchor.ptr = NULL;
711
712	st->pfsync_time = time_uptime;
713	st->sync_state = PFSYNC_S_NONE;
714
715	if (!(flags & PFSYNC_SI_IOCTL))
716		st->state_flags |= PFSTATE_NOSYNC;
717
718	if ((error = pf_state_insert(kif, kif, skw, sks, st)) != 0)
719		goto cleanup_state;
720
721	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
722	counter_u64_add(r->states_cur, 1);
723	counter_u64_add(r->states_tot, 1);
724
725	if (!(flags & PFSYNC_SI_IOCTL)) {
726		st->state_flags &= ~PFSTATE_NOSYNC;
727		if (st->state_flags & PFSTATE_ACK) {
728			struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
729			PFSYNC_BUCKET_LOCK(b);
730			pfsync_q_ins(st, PFSYNC_S_IACK, true);
731			PFSYNC_BUCKET_UNLOCK(b);
732
733			pfsync_push_all(sc);
734		}
735	}
736	st->state_flags &= ~PFSTATE_ACK;
737	PF_STATE_UNLOCK(st);
738
739	return (0);
740
741cleanup:
742	error = ENOMEM;
743cleanup_keys:
744	if (skw == sks)
745		sks = NULL;
746	uma_zfree(V_pf_state_key_z, skw);
747	uma_zfree(V_pf_state_key_z, sks);
748
749cleanup_state:	/* pf_state_insert() frees the state keys. */
750	if (st) {
751		st->timeout = PFTM_UNLINKED; /* appease an assert */
752		pf_free_state(st);
753	}
754	return (error);
755}
756
757#ifdef INET
758static int
759pfsync_input(struct mbuf **mp, int *offp __unused, int proto __unused)
760{
761	struct pfsync_softc *sc = V_pfsyncif;
762	struct mbuf *m = *mp;
763	struct ip *ip = mtod(m, struct ip *);
764	struct pfsync_header *ph;
765	struct pfsync_subheader subh;
766
767	int offset, len, flags = 0;
768	int rv;
769	uint16_t count;
770
771	PF_RULES_RLOCK_TRACKER;
772
773	*mp = NULL;
774	V_pfsyncstats.pfsyncs_ipackets++;
775
776	/* Verify that we have a sync interface configured. */
777	if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
778	    (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
779		goto done;
780
781	/* verify that the packet came in on the right interface */
782	if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
783		V_pfsyncstats.pfsyncs_badif++;
784		goto done;
785	}
786
787	if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
788	if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
789	/* verify that the IP TTL is 255. */
790	if (ip->ip_ttl != PFSYNC_DFLTTL) {
791		V_pfsyncstats.pfsyncs_badttl++;
792		goto done;
793	}
794
795	offset = ip->ip_hl << 2;
796	if (m->m_pkthdr.len < offset + sizeof(*ph)) {
797		V_pfsyncstats.pfsyncs_hdrops++;
798		goto done;
799	}
800
801	if (offset + sizeof(*ph) > m->m_len) {
802		if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
803			V_pfsyncstats.pfsyncs_hdrops++;
804			return (IPPROTO_DONE);
805		}
806		ip = mtod(m, struct ip *);
807	}
808	ph = (struct pfsync_header *)((char *)ip + offset);
809
810	/* verify the version */
811	if (ph->version != PFSYNC_VERSION) {
812		V_pfsyncstats.pfsyncs_badver++;
813		goto done;
814	}
815
816	len = ntohs(ph->len) + offset;
817	if (m->m_pkthdr.len < len) {
818		V_pfsyncstats.pfsyncs_badlen++;
819		goto done;
820	}
821
822	/*
823	 * Trusting pf_chksum during packet processing, as well as seeking
824	 * in interface name tree, require holding PF_RULES_RLOCK().
825	 */
826	PF_RULES_RLOCK();
827	if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
828		flags = PFSYNC_SI_CKSUM;
829
830	offset += sizeof(*ph);
831	while (offset <= len - sizeof(subh)) {
832		m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
833		offset += sizeof(subh);
834
835		if (subh.action >= PFSYNC_ACT_MAX) {
836			V_pfsyncstats.pfsyncs_badact++;
837			PF_RULES_RUNLOCK();
838			goto done;
839		}
840
841		count = ntohs(subh.count);
842		V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
843		rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action);
844		if (rv == -1) {
845			PF_RULES_RUNLOCK();
846			return (IPPROTO_DONE);
847		}
848
849		offset += rv;
850	}
851	PF_RULES_RUNLOCK();
852
853done:
854	m_freem(m);
855	return (IPPROTO_DONE);
856}
857#endif
858
859#ifdef INET6
860static int
861pfsync6_input(struct mbuf **mp, int *offp __unused, int proto __unused)
862{
863	struct pfsync_softc *sc = V_pfsyncif;
864	struct mbuf *m = *mp;
865	struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
866	struct pfsync_header *ph;
867	struct pfsync_subheader subh;
868
869	int offset, len, flags = 0;
870	int rv;
871	uint16_t count;
872
873	PF_RULES_RLOCK_TRACKER;
874
875	*mp = NULL;
876	V_pfsyncstats.pfsyncs_ipackets++;
877
878	/* Verify that we have a sync interface configured. */
879	if (!sc || !sc->sc_sync_if || !V_pf_status.running ||
880	    (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
881		goto done;
882
883	/* verify that the packet came in on the right interface */
884	if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
885		V_pfsyncstats.pfsyncs_badif++;
886		goto done;
887	}
888
889	if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
890	if_inc_counter(sc->sc_ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
891	/* verify that the IP TTL is 255. */
892	if (ip6->ip6_hlim != PFSYNC_DFLTTL) {
893		V_pfsyncstats.pfsyncs_badttl++;
894		goto done;
895	}
896
897
898	offset = sizeof(*ip6);
899	if (m->m_pkthdr.len < offset + sizeof(*ph)) {
900		V_pfsyncstats.pfsyncs_hdrops++;
901		goto done;
902	}
903
904	if (offset + sizeof(*ph) > m->m_len) {
905		if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
906			V_pfsyncstats.pfsyncs_hdrops++;
907			return (IPPROTO_DONE);
908		}
909		ip6 = mtod(m, struct ip6_hdr *);
910	}
911	ph = (struct pfsync_header *)((char *)ip6 + offset);
912
913	/* verify the version */
914	if (ph->version != PFSYNC_VERSION) {
915		V_pfsyncstats.pfsyncs_badver++;
916		goto done;
917	}
918
919	len = ntohs(ph->len) + offset;
920	if (m->m_pkthdr.len < len) {
921		V_pfsyncstats.pfsyncs_badlen++;
922		goto done;
923	}
924
925	/*
926	 * Trusting pf_chksum during packet processing, as well as seeking
927	 * in interface name tree, require holding PF_RULES_RLOCK().
928	 */
929	PF_RULES_RLOCK();
930	if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
931		flags = PFSYNC_SI_CKSUM;
932
933	offset += sizeof(*ph);
934	while (offset <= len - sizeof(subh)) {
935		m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
936		offset += sizeof(subh);
937
938		if (subh.action >= PFSYNC_ACT_MAX) {
939			V_pfsyncstats.pfsyncs_badact++;
940			PF_RULES_RUNLOCK();
941			goto done;
942		}
943
944		count = ntohs(subh.count);
945		V_pfsyncstats.pfsyncs_iacts[subh.action] += count;
946		rv = (*pfsync_acts[subh.action])(m, offset, count, flags, subh.action);
947		if (rv == -1) {
948			PF_RULES_RUNLOCK();
949			return (IPPROTO_DONE);
950		}
951
952		offset += rv;
953	}
954	PF_RULES_RUNLOCK();
955
956done:
957	m_freem(m);
958	return (IPPROTO_DONE);
959}
960#endif
961
962static int
963pfsync_in_clr(struct mbuf *m, int offset, int count, int flags, int action)
964{
965	struct pfsync_clr *clr;
966	struct mbuf *mp;
967	int len = sizeof(*clr) * count;
968	int i, offp;
969	u_int32_t creatorid;
970
971	mp = m_pulldown(m, offset, len, &offp);
972	if (mp == NULL) {
973		V_pfsyncstats.pfsyncs_badlen++;
974		return (-1);
975	}
976	clr = (struct pfsync_clr *)(mp->m_data + offp);
977
978	for (i = 0; i < count; i++) {
979		creatorid = clr[i].creatorid;
980
981		if (clr[i].ifname[0] != '\0' &&
982		    pfi_kkif_find(clr[i].ifname) == NULL)
983			continue;
984
985		for (int i = 0; i <= pf_hashmask; i++) {
986			struct pf_idhash *ih = &V_pf_idhash[i];
987			struct pf_kstate *s;
988relock:
989			PF_HASHROW_LOCK(ih);
990			LIST_FOREACH(s, &ih->states, entry) {
991				if (s->creatorid == creatorid) {
992					s->state_flags |= PFSTATE_NOSYNC;
993					pf_unlink_state(s);
994					goto relock;
995				}
996			}
997			PF_HASHROW_UNLOCK(ih);
998		}
999	}
1000
1001	return (len);
1002}
1003
1004static int
1005pfsync_in_ins(struct mbuf *m, int offset, int count, int flags, int action)
1006{
1007	struct mbuf *mp;
1008	union pfsync_state_union *sa, *sp;
1009	int i, offp, total_len, msg_version, msg_len;
1010
1011	switch (action) {
1012		case PFSYNC_ACT_INS_1301:
1013			msg_len = sizeof(struct pfsync_state_1301);
1014			total_len = msg_len * count;
1015			msg_version = PFSYNC_MSG_VERSION_1301;
1016			break;
1017		case PFSYNC_ACT_INS_1400:
1018			msg_len = sizeof(struct pfsync_state_1400);
1019			total_len = msg_len * count;
1020			msg_version = PFSYNC_MSG_VERSION_1400;
1021			break;
1022		default:
1023			V_pfsyncstats.pfsyncs_badact++;
1024			return (-1);
1025	}
1026
1027	mp = m_pulldown(m, offset, total_len, &offp);
1028	if (mp == NULL) {
1029		V_pfsyncstats.pfsyncs_badlen++;
1030		return (-1);
1031	}
1032	sa = (union pfsync_state_union *)(mp->m_data + offp);
1033
1034	for (i = 0; i < count; i++) {
1035		sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
1036
1037		/* Check for invalid values. */
1038		if (sp->pfs_1301.timeout >= PFTM_MAX ||
1039		    sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
1040		    sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST ||
1041		    sp->pfs_1301.direction > PF_OUT ||
1042		    (sp->pfs_1301.af != AF_INET &&
1043		    sp->pfs_1301.af != AF_INET6)) {
1044			if (V_pf_status.debug >= PF_DEBUG_MISC)
1045				printf("%s: invalid value\n", __func__);
1046			V_pfsyncstats.pfsyncs_badval++;
1047			continue;
1048		}
1049
1050		if (pfsync_state_import(sp, flags, msg_version) == ENOMEM)
1051			/* Drop out, but process the rest of the actions. */
1052			break;
1053	}
1054
1055	return (total_len);
1056}
1057
1058static int
1059pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action)
1060{
1061	struct pfsync_ins_ack *ia, *iaa;
1062	struct pf_kstate *st;
1063
1064	struct mbuf *mp;
1065	int len = count * sizeof(*ia);
1066	int offp, i;
1067
1068	mp = m_pulldown(m, offset, len, &offp);
1069	if (mp == NULL) {
1070		V_pfsyncstats.pfsyncs_badlen++;
1071		return (-1);
1072	}
1073	iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1074
1075	for (i = 0; i < count; i++) {
1076		ia = &iaa[i];
1077
1078		st = pf_find_state_byid(ia->id, ia->creatorid);
1079		if (st == NULL)
1080			continue;
1081
1082		if (st->state_flags & PFSTATE_ACK) {
1083			pfsync_undefer_state(st, 0);
1084		}
1085		PF_STATE_UNLOCK(st);
1086	}
1087	/*
1088	 * XXX this is not yet implemented, but we know the size of the
1089	 * message so we can skip it.
1090	 */
1091
1092	return (count * sizeof(struct pfsync_ins_ack));
1093}
1094
1095static int
1096pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
1097    struct pfsync_state_peer *dst)
1098{
1099	int sync = 0;
1100
1101	PF_STATE_LOCK_ASSERT(st);
1102
1103	/*
1104	 * The state should never go backwards except
1105	 * for syn-proxy states.  Neither should the
1106	 * sequence window slide backwards.
1107	 */
1108	if ((st->src.state > src->state &&
1109	    (st->src.state < PF_TCPS_PROXY_SRC ||
1110	    src->state >= PF_TCPS_PROXY_SRC)) ||
1111
1112	    (st->src.state == src->state &&
1113	    SEQ_GT(st->src.seqlo, ntohl(src->seqlo))))
1114		sync++;
1115	else
1116		pf_state_peer_ntoh(src, &st->src);
1117
1118	if ((st->dst.state > dst->state) ||
1119
1120	    (st->dst.state >= TCPS_SYN_SENT &&
1121	    SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
1122		sync++;
1123	else
1124		pf_state_peer_ntoh(dst, &st->dst);
1125
1126	return (sync);
1127}
1128
1129static int
1130pfsync_in_upd(struct mbuf *m, int offset, int count, int flags, int action)
1131{
1132	struct pfsync_softc *sc = V_pfsyncif;
1133	union pfsync_state_union *sa, *sp;
1134	struct pf_kstate *st;
1135	struct mbuf *mp;
1136	int sync, offp, i, total_len, msg_len, msg_version;
1137
1138	switch (action) {
1139		case PFSYNC_ACT_UPD_1301:
1140			msg_len = sizeof(struct pfsync_state_1301);
1141			total_len = msg_len * count;
1142			msg_version = PFSYNC_MSG_VERSION_1301;
1143			break;
1144		case PFSYNC_ACT_UPD_1400:
1145			msg_len = sizeof(struct pfsync_state_1400);
1146			total_len = msg_len * count;
1147			msg_version = PFSYNC_MSG_VERSION_1400;
1148			break;
1149		default:
1150			V_pfsyncstats.pfsyncs_badact++;
1151			return (-1);
1152	}
1153
1154	mp = m_pulldown(m, offset, total_len, &offp);
1155	if (mp == NULL) {
1156		V_pfsyncstats.pfsyncs_badlen++;
1157		return (-1);
1158	}
1159	sa = (union pfsync_state_union *)(mp->m_data + offp);
1160
1161	for (i = 0; i < count; i++) {
1162		sp = (union pfsync_state_union *)((char *)sa + msg_len * i);
1163
1164		/* check for invalid values */
1165		if (sp->pfs_1301.timeout >= PFTM_MAX ||
1166		    sp->pfs_1301.src.state > PF_TCPS_PROXY_DST ||
1167		    sp->pfs_1301.dst.state > PF_TCPS_PROXY_DST) {
1168			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1169				printf("pfsync_input: PFSYNC_ACT_UPD: "
1170				    "invalid value\n");
1171			}
1172			V_pfsyncstats.pfsyncs_badval++;
1173			continue;
1174		}
1175
1176		st = pf_find_state_byid(sp->pfs_1301.id, sp->pfs_1301.creatorid);
1177		if (st == NULL) {
1178			/* insert the update */
1179			if (pfsync_state_import(sp, flags, msg_version))
1180				V_pfsyncstats.pfsyncs_badstate++;
1181			continue;
1182		}
1183
1184		if (st->state_flags & PFSTATE_ACK) {
1185			pfsync_undefer_state(st, 1);
1186		}
1187
1188		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
1189			sync = pfsync_upd_tcp(st, &sp->pfs_1301.src, &sp->pfs_1301.dst);
1190		else {
1191			sync = 0;
1192
1193			/*
1194			 * Non-TCP protocol state machine always go
1195			 * forwards
1196			 */
1197			if (st->src.state > sp->pfs_1301.src.state)
1198				sync++;
1199			else
1200				pf_state_peer_ntoh(&sp->pfs_1301.src, &st->src);
1201			if (st->dst.state > sp->pfs_1301.dst.state)
1202				sync++;
1203			else
1204				pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
1205		}
1206		if (sync < 2) {
1207			pfsync_alloc_scrub_memory(&sp->pfs_1301.dst, &st->dst);
1208			pf_state_peer_ntoh(&sp->pfs_1301.dst, &st->dst);
1209			st->expire = pf_get_uptime();
1210			st->timeout = sp->pfs_1301.timeout;
1211		}
1212		st->pfsync_time = time_uptime;
1213
1214		if (sync) {
1215			V_pfsyncstats.pfsyncs_stale++;
1216
1217			pfsync_update_state(st);
1218			PF_STATE_UNLOCK(st);
1219			pfsync_push_all(sc);
1220			continue;
1221		}
1222		PF_STATE_UNLOCK(st);
1223	}
1224
1225	return (total_len);
1226}
1227
1228static int
1229pfsync_in_upd_c(struct mbuf *m, int offset, int count, int flags, int action)
1230{
1231	struct pfsync_softc *sc = V_pfsyncif;
1232	struct pfsync_upd_c *ua, *up;
1233	struct pf_kstate *st;
1234	int len = count * sizeof(*up);
1235	int sync;
1236	struct mbuf *mp;
1237	int offp, i;
1238
1239	mp = m_pulldown(m, offset, len, &offp);
1240	if (mp == NULL) {
1241		V_pfsyncstats.pfsyncs_badlen++;
1242		return (-1);
1243	}
1244	ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1245
1246	for (i = 0; i < count; i++) {
1247		up = &ua[i];
1248
1249		/* check for invalid values */
1250		if (up->timeout >= PFTM_MAX ||
1251		    up->src.state > PF_TCPS_PROXY_DST ||
1252		    up->dst.state > PF_TCPS_PROXY_DST) {
1253			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1254				printf("pfsync_input: "
1255				    "PFSYNC_ACT_UPD_C: "
1256				    "invalid value\n");
1257			}
1258			V_pfsyncstats.pfsyncs_badval++;
1259			continue;
1260		}
1261
1262		st = pf_find_state_byid(up->id, up->creatorid);
1263		if (st == NULL) {
1264			/* We don't have this state. Ask for it. */
1265			PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
1266			pfsync_request_update(up->creatorid, up->id);
1267			PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
1268			continue;
1269		}
1270
1271		if (st->state_flags & PFSTATE_ACK) {
1272			pfsync_undefer_state(st, 1);
1273		}
1274
1275		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
1276			sync = pfsync_upd_tcp(st, &up->src, &up->dst);
1277		else {
1278			sync = 0;
1279
1280			/*
1281			 * Non-TCP protocol state machine always go
1282			 * forwards
1283			 */
1284			if (st->src.state > up->src.state)
1285				sync++;
1286			else
1287				pf_state_peer_ntoh(&up->src, &st->src);
1288			if (st->dst.state > up->dst.state)
1289				sync++;
1290			else
1291				pf_state_peer_ntoh(&up->dst, &st->dst);
1292		}
1293		if (sync < 2) {
1294			pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1295			pf_state_peer_ntoh(&up->dst, &st->dst);
1296			st->expire = pf_get_uptime();
1297			st->timeout = up->timeout;
1298		}
1299		st->pfsync_time = time_uptime;
1300
1301		if (sync) {
1302			V_pfsyncstats.pfsyncs_stale++;
1303
1304			pfsync_update_state(st);
1305			PF_STATE_UNLOCK(st);
1306			pfsync_push_all(sc);
1307			continue;
1308		}
1309		PF_STATE_UNLOCK(st);
1310	}
1311
1312	return (len);
1313}
1314
1315static int
1316pfsync_in_ureq(struct mbuf *m, int offset, int count, int flags, int action)
1317{
1318	struct pfsync_upd_req *ur, *ura;
1319	struct mbuf *mp;
1320	int len = count * sizeof(*ur);
1321	int i, offp;
1322
1323	struct pf_kstate *st;
1324
1325	mp = m_pulldown(m, offset, len, &offp);
1326	if (mp == NULL) {
1327		V_pfsyncstats.pfsyncs_badlen++;
1328		return (-1);
1329	}
1330	ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1331
1332	for (i = 0; i < count; i++) {
1333		ur = &ura[i];
1334
1335		if (ur->id == 0 && ur->creatorid == 0)
1336			pfsync_bulk_start();
1337		else {
1338			st = pf_find_state_byid(ur->id, ur->creatorid);
1339			if (st == NULL) {
1340				V_pfsyncstats.pfsyncs_badstate++;
1341				continue;
1342			}
1343			if (st->state_flags & PFSTATE_NOSYNC) {
1344				PF_STATE_UNLOCK(st);
1345				continue;
1346			}
1347
1348			pfsync_update_state_req(st);
1349			PF_STATE_UNLOCK(st);
1350		}
1351	}
1352
1353	return (len);
1354}
1355
1356static int
1357pfsync_in_del_c(struct mbuf *m, int offset, int count, int flags, int action)
1358{
1359	struct mbuf *mp;
1360	struct pfsync_del_c *sa, *sp;
1361	struct pf_kstate *st;
1362	int len = count * sizeof(*sp);
1363	int offp, i;
1364
1365	mp = m_pulldown(m, offset, len, &offp);
1366	if (mp == NULL) {
1367		V_pfsyncstats.pfsyncs_badlen++;
1368		return (-1);
1369	}
1370	sa = (struct pfsync_del_c *)(mp->m_data + offp);
1371
1372	for (i = 0; i < count; i++) {
1373		sp = &sa[i];
1374
1375		st = pf_find_state_byid(sp->id, sp->creatorid);
1376		if (st == NULL) {
1377			V_pfsyncstats.pfsyncs_badstate++;
1378			continue;
1379		}
1380
1381		st->state_flags |= PFSTATE_NOSYNC;
1382		pf_unlink_state(st);
1383	}
1384
1385	return (len);
1386}
1387
1388static int
1389pfsync_in_bus(struct mbuf *m, int offset, int count, int flags, int action)
1390{
1391	struct pfsync_softc *sc = V_pfsyncif;
1392	struct pfsync_bus *bus;
1393	struct mbuf *mp;
1394	int len = count * sizeof(*bus);
1395	int offp;
1396
1397	PFSYNC_BLOCK(sc);
1398
1399	/* If we're not waiting for a bulk update, who cares. */
1400	if (sc->sc_ureq_sent == 0) {
1401		PFSYNC_BUNLOCK(sc);
1402		return (len);
1403	}
1404
1405	mp = m_pulldown(m, offset, len, &offp);
1406	if (mp == NULL) {
1407		PFSYNC_BUNLOCK(sc);
1408		V_pfsyncstats.pfsyncs_badlen++;
1409		return (-1);
1410	}
1411	bus = (struct pfsync_bus *)(mp->m_data + offp);
1412
1413	switch (bus->status) {
1414	case PFSYNC_BUS_START:
1415		callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1416		    V_pf_limits[PF_LIMIT_STATES].limit /
1417		    ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1418		    sizeof(union pfsync_state_union)),
1419		    pfsync_bulk_fail, sc);
1420		if (V_pf_status.debug >= PF_DEBUG_MISC)
1421			printf("pfsync: received bulk update start\n");
1422		break;
1423
1424	case PFSYNC_BUS_END:
1425		if (time_uptime - ntohl(bus->endtime) >=
1426		    sc->sc_ureq_sent) {
1427			/* that's it, we're happy */
1428			sc->sc_ureq_sent = 0;
1429			sc->sc_bulk_tries = 0;
1430			callout_stop(&sc->sc_bulkfail_tmo);
1431			if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
1432				(*carp_demote_adj_p)(-V_pfsync_carp_adj,
1433				    "pfsync bulk done");
1434			sc->sc_flags |= PFSYNCF_OK;
1435			if (V_pf_status.debug >= PF_DEBUG_MISC)
1436				printf("pfsync: received valid "
1437				    "bulk update end\n");
1438		} else {
1439			if (V_pf_status.debug >= PF_DEBUG_MISC)
1440				printf("pfsync: received invalid "
1441				    "bulk update end: bad timestamp\n");
1442		}
1443		break;
1444	}
1445	PFSYNC_BUNLOCK(sc);
1446
1447	return (len);
1448}
1449
1450static int
1451pfsync_in_tdb(struct mbuf *m, int offset, int count, int flags, int action)
1452{
1453	int len = count * sizeof(struct pfsync_tdb);
1454
1455#if defined(IPSEC)
1456	struct pfsync_tdb *tp;
1457	struct mbuf *mp;
1458	int offp;
1459	int i;
1460	int s;
1461
1462	mp = m_pulldown(m, offset, len, &offp);
1463	if (mp == NULL) {
1464		V_pfsyncstats.pfsyncs_badlen++;
1465		return (-1);
1466	}
1467	tp = (struct pfsync_tdb *)(mp->m_data + offp);
1468
1469	for (i = 0; i < count; i++)
1470		pfsync_update_net_tdb(&tp[i]);
1471#endif
1472
1473	return (len);
1474}
1475
1476#if defined(IPSEC)
1477/* Update an in-kernel tdb. Silently fail if no tdb is found. */
1478static void
1479pfsync_update_net_tdb(struct pfsync_tdb *pt)
1480{
1481	struct tdb		*tdb;
1482	int			 s;
1483
1484	/* check for invalid values */
1485	if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1486	    (pt->dst.sa.sa_family != AF_INET &&
1487	    pt->dst.sa.sa_family != AF_INET6))
1488		goto bad;
1489
1490	tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1491	if (tdb) {
1492		pt->rpl = ntohl(pt->rpl);
1493		pt->cur_bytes = (unsigned long long)be64toh(pt->cur_bytes);
1494
1495		/* Neither replay nor byte counter should ever decrease. */
1496		if (pt->rpl < tdb->tdb_rpl ||
1497		    pt->cur_bytes < tdb->tdb_cur_bytes) {
1498			goto bad;
1499		}
1500
1501		tdb->tdb_rpl = pt->rpl;
1502		tdb->tdb_cur_bytes = pt->cur_bytes;
1503	}
1504	return;
1505
1506bad:
1507	if (V_pf_status.debug >= PF_DEBUG_MISC)
1508		printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1509		    "invalid value\n");
1510	V_pfsyncstats.pfsyncs_badstate++;
1511	return;
1512}
1513#endif
1514
1515static int
1516pfsync_in_eof(struct mbuf *m, int offset, int count, int flags, int action)
1517{
1518	/* check if we are at the right place in the packet */
1519	if (offset != m->m_pkthdr.len)
1520		V_pfsyncstats.pfsyncs_badlen++;
1521
1522	/* we're done. free and let the caller return */
1523	m_freem(m);
1524	return (-1);
1525}
1526
1527static int
1528pfsync_in_error(struct mbuf *m, int offset, int count, int flags, int action)
1529{
1530	V_pfsyncstats.pfsyncs_badact++;
1531
1532	m_freem(m);
1533	return (-1);
1534}
1535
1536static int
1537pfsyncoutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst,
1538	struct route *rt)
1539{
1540	m_freem(m);
1541	return (0);
1542}
1543
1544/* ARGSUSED */
1545static int
1546pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1547{
1548	struct pfsync_softc *sc = ifp->if_softc;
1549	struct ifreq *ifr = (struct ifreq *)data;
1550	struct pfsyncreq pfsyncr;
1551	size_t nvbuflen;
1552	int error;
1553	int c;
1554
1555	switch (cmd) {
1556	case SIOCSIFFLAGS:
1557		PFSYNC_LOCK(sc);
1558		if (ifp->if_flags & IFF_UP) {
1559			ifp->if_drv_flags |= IFF_DRV_RUNNING;
1560			PFSYNC_UNLOCK(sc);
1561			pfsync_pointers_init();
1562		} else {
1563			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1564			PFSYNC_UNLOCK(sc);
1565			pfsync_pointers_uninit();
1566		}
1567		break;
1568	case SIOCSIFMTU:
1569		if (!sc->sc_sync_if ||
1570		    ifr->ifr_mtu <= PFSYNC_MINPKT ||
1571		    ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1572			return (EINVAL);
1573		if (ifr->ifr_mtu < ifp->if_mtu) {
1574			for (c = 0; c < pfsync_buckets; c++) {
1575				PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
1576				if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT)
1577					pfsync_sendout(1, c);
1578				PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
1579			}
1580		}
1581		ifp->if_mtu = ifr->ifr_mtu;
1582		break;
1583	case SIOCGETPFSYNC:
1584		bzero(&pfsyncr, sizeof(pfsyncr));
1585		PFSYNC_LOCK(sc);
1586		if (sc->sc_sync_if) {
1587			strlcpy(pfsyncr.pfsyncr_syncdev,
1588			    sc->sc_sync_if->if_xname, IFNAMSIZ);
1589		}
1590		pfsyncr.pfsyncr_syncpeer = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
1591		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1592		pfsyncr.pfsyncr_defer = sc->sc_flags;
1593		PFSYNC_UNLOCK(sc);
1594		return (copyout(&pfsyncr, ifr_data_get_ptr(ifr),
1595		    sizeof(pfsyncr)));
1596
1597	case SIOCGETPFSYNCNV:
1598	    {
1599		nvlist_t *nvl_syncpeer;
1600		nvlist_t *nvl = nvlist_create(0);
1601
1602		if (nvl == NULL)
1603			return (ENOMEM);
1604
1605		if (sc->sc_sync_if)
1606			nvlist_add_string(nvl, "syncdev", sc->sc_sync_if->if_xname);
1607		nvlist_add_number(nvl, "maxupdates", sc->sc_maxupdates);
1608		nvlist_add_number(nvl, "flags", sc->sc_flags);
1609		nvlist_add_number(nvl, "version", sc->sc_version);
1610		if ((nvl_syncpeer = pfsync_sockaddr_to_syncpeer_nvlist(&sc->sc_sync_peer)) != NULL)
1611			nvlist_add_nvlist(nvl, "syncpeer", nvl_syncpeer);
1612
1613		void *packed = NULL;
1614		packed = nvlist_pack(nvl, &nvbuflen);
1615		if (packed == NULL) {
1616			free(packed, M_NVLIST);
1617			nvlist_destroy(nvl);
1618			return (ENOMEM);
1619		}
1620
1621		if (nvbuflen > ifr->ifr_cap_nv.buf_length) {
1622			ifr->ifr_cap_nv.length = nvbuflen;
1623			ifr->ifr_cap_nv.buffer = NULL;
1624			free(packed, M_NVLIST);
1625			nvlist_destroy(nvl);
1626			return (EFBIG);
1627		}
1628
1629		ifr->ifr_cap_nv.length = nvbuflen;
1630		error = copyout(packed, ifr->ifr_cap_nv.buffer, nvbuflen);
1631
1632		nvlist_destroy(nvl);
1633		nvlist_destroy(nvl_syncpeer);
1634		free(packed, M_NVLIST);
1635		break;
1636	    }
1637
1638	case SIOCSETPFSYNC:
1639	    {
1640		struct pfsync_kstatus status;
1641
1642		if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1643			return (error);
1644		if ((error = copyin(ifr_data_get_ptr(ifr), &pfsyncr,
1645		    sizeof(pfsyncr))))
1646			return (error);
1647
1648		memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
1649		pfsync_pfsyncreq_to_kstatus(&pfsyncr, &status);
1650
1651		error = pfsync_kstatus_to_softc(&status, sc);
1652		return (error);
1653	    }
1654	case SIOCSETPFSYNCNV:
1655	    {
1656		struct pfsync_kstatus status;
1657		void *data;
1658		nvlist_t *nvl;
1659
1660		if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1661			return (error);
1662		if (ifr->ifr_cap_nv.length > IFR_CAP_NV_MAXBUFSIZE)
1663			return (EINVAL);
1664
1665		data = malloc(ifr->ifr_cap_nv.length, M_TEMP, M_WAITOK);
1666
1667		if ((error = copyin(ifr->ifr_cap_nv.buffer, data,
1668		    ifr->ifr_cap_nv.length)) != 0) {
1669			free(data, M_TEMP);
1670			return (error);
1671		}
1672
1673		if ((nvl = nvlist_unpack(data, ifr->ifr_cap_nv.length, 0)) == NULL) {
1674			free(data, M_TEMP);
1675			return (EINVAL);
1676		}
1677
1678		memset((char *)&status, 0, sizeof(struct pfsync_kstatus));
1679		pfsync_nvstatus_to_kstatus(nvl, &status);
1680
1681		nvlist_destroy(nvl);
1682		free(data, M_TEMP);
1683
1684		error = pfsync_kstatus_to_softc(&status, sc);
1685		return (error);
1686	    }
1687	default:
1688		return (ENOTTY);
1689	}
1690
1691	return (0);
1692}
1693
1694static void
1695pfsync_out_state_1301(struct pf_kstate *st, void *buf)
1696{
1697	union pfsync_state_union *sp = buf;
1698
1699	pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1301);
1700}
1701
1702static void
1703pfsync_out_state_1400(struct pf_kstate *st, void *buf)
1704{
1705	union pfsync_state_union *sp = buf;
1706
1707	pfsync_state_export(sp, st, PFSYNC_MSG_VERSION_1400);
1708}
1709
1710static void
1711pfsync_out_iack(struct pf_kstate *st, void *buf)
1712{
1713	struct pfsync_ins_ack *iack = buf;
1714
1715	iack->id = st->id;
1716	iack->creatorid = st->creatorid;
1717}
1718
1719static void
1720pfsync_out_upd_c(struct pf_kstate *st, void *buf)
1721{
1722	struct pfsync_upd_c *up = buf;
1723
1724	bzero(up, sizeof(*up));
1725	up->id = st->id;
1726	pf_state_peer_hton(&st->src, &up->src);
1727	pf_state_peer_hton(&st->dst, &up->dst);
1728	up->creatorid = st->creatorid;
1729	up->timeout = st->timeout;
1730}
1731
1732static void
1733pfsync_out_del_c(struct pf_kstate *st, void *buf)
1734{
1735	struct pfsync_del_c *dp = buf;
1736
1737	dp->id = st->id;
1738	dp->creatorid = st->creatorid;
1739	st->state_flags |= PFSTATE_NOSYNC;
1740}
1741
1742static void
1743pfsync_drop(struct pfsync_softc *sc)
1744{
1745	struct pf_kstate *st, *next;
1746	struct pfsync_upd_req_item *ur;
1747	struct pfsync_bucket *b;
1748	int c;
1749	enum pfsync_q_id q;
1750
1751	for (c = 0; c < pfsync_buckets; c++) {
1752		b = &sc->sc_buckets[c];
1753		for (q = 0; q < PFSYNC_Q_COUNT; q++) {
1754			if (TAILQ_EMPTY(&b->b_qs[q]))
1755				continue;
1756
1757			TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, next) {
1758				KASSERT(st->sync_state == pfsync_qid_sstate[q],
1759					("%s: st->sync_state == q",
1760						__func__));
1761				st->sync_state = PFSYNC_S_NONE;
1762				pf_release_state(st);
1763			}
1764			TAILQ_INIT(&b->b_qs[q]);
1765		}
1766
1767		while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1768			TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1769			free(ur, M_PFSYNC);
1770		}
1771
1772		b->b_len = PFSYNC_MINPKT;
1773		free(b->b_plus, M_PFSYNC);
1774		b->b_plus = NULL;
1775		b->b_pluslen = 0;
1776	}
1777}
1778
1779static void
1780pfsync_sendout(int schedswi, int c)
1781{
1782	struct pfsync_softc *sc = V_pfsyncif;
1783	struct ifnet *ifp = sc->sc_ifp;
1784	struct mbuf *m;
1785	struct pfsync_header *ph;
1786	struct pfsync_subheader *subh;
1787	struct pf_kstate *st, *st_next;
1788	struct pfsync_upd_req_item *ur;
1789	struct pfsync_bucket *b = &sc->sc_buckets[c];
1790	size_t len;
1791	int aflen, offset, count = 0;
1792	enum pfsync_q_id q;
1793
1794	KASSERT(sc != NULL, ("%s: null sc", __func__));
1795	KASSERT(b->b_len > PFSYNC_MINPKT,
1796	    ("%s: sc_len %zu", __func__, b->b_len));
1797	PFSYNC_BUCKET_LOCK_ASSERT(b);
1798
1799	if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
1800		pfsync_drop(sc);
1801		return;
1802	}
1803
1804	m = m_get2(max_linkhdr + b->b_len, M_NOWAIT, MT_DATA, M_PKTHDR);
1805	if (m == NULL) {
1806		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1807		V_pfsyncstats.pfsyncs_onomem++;
1808		return;
1809	}
1810	m->m_data += max_linkhdr;
1811	bzero(m->m_data, b->b_len);
1812
1813	len = b->b_len;
1814
1815	/* build the ip header */
1816	switch (sc->sc_sync_peer.ss_family) {
1817#ifdef INET
1818	case AF_INET:
1819	    {
1820		struct ip *ip;
1821
1822		ip = mtod(m, struct ip *);
1823		bcopy(&sc->sc_template.ipv4, ip, sizeof(*ip));
1824		aflen = offset = sizeof(*ip);
1825
1826		len -= sizeof(union inet_template) - sizeof(struct ip);
1827		ip->ip_len = htons(len);
1828		ip_fillid(ip);
1829		break;
1830	    }
1831#endif
1832#ifdef INET6
1833	case AF_INET6:
1834		{
1835		struct ip6_hdr *ip6;
1836
1837		ip6 = mtod(m, struct ip6_hdr *);
1838		bcopy(&sc->sc_template.ipv6, ip6, sizeof(*ip6));
1839		aflen = offset = sizeof(*ip6);
1840
1841		len -= sizeof(union inet_template) - sizeof(struct ip6_hdr);
1842		ip6->ip6_plen = htons(len);
1843		break;
1844		}
1845#endif
1846	default:
1847		m_freem(m);
1848		return;
1849	}
1850	m->m_len = m->m_pkthdr.len = len;
1851
1852	/* build the pfsync header */
1853	ph = (struct pfsync_header *)(m->m_data + offset);
1854	offset += sizeof(*ph);
1855
1856	ph->version = PFSYNC_VERSION;
1857	ph->len = htons(len - aflen);
1858	bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
1859
1860	/* walk the queues */
1861	for (q = 0; q < PFSYNC_Q_COUNT; q++) {
1862		if (TAILQ_EMPTY(&b->b_qs[q]))
1863			continue;
1864
1865		subh = (struct pfsync_subheader *)(m->m_data + offset);
1866		offset += sizeof(*subh);
1867
1868		count = 0;
1869		TAILQ_FOREACH_SAFE(st, &b->b_qs[q], sync_list, st_next) {
1870			KASSERT(st->sync_state == pfsync_qid_sstate[q],
1871				("%s: st->sync_state == q",
1872					__func__));
1873			/*
1874			 * XXXGL: some of write methods do unlocked reads
1875			 * of state data :(
1876			 */
1877			pfsync_qs[q].write(st, m->m_data + offset);
1878			offset += pfsync_qs[q].len;
1879			st->sync_state = PFSYNC_S_NONE;
1880			pf_release_state(st);
1881			count++;
1882		}
1883		TAILQ_INIT(&b->b_qs[q]);
1884
1885		subh->action = pfsync_qs[q].action;
1886		subh->count = htons(count);
1887		V_pfsyncstats.pfsyncs_oacts[pfsync_qs[q].action] += count;
1888	}
1889
1890	if (!TAILQ_EMPTY(&b->b_upd_req_list)) {
1891		subh = (struct pfsync_subheader *)(m->m_data + offset);
1892		offset += sizeof(*subh);
1893
1894		count = 0;
1895		while ((ur = TAILQ_FIRST(&b->b_upd_req_list)) != NULL) {
1896			TAILQ_REMOVE(&b->b_upd_req_list, ur, ur_entry);
1897
1898			bcopy(&ur->ur_msg, m->m_data + offset,
1899			    sizeof(ur->ur_msg));
1900			offset += sizeof(ur->ur_msg);
1901			free(ur, M_PFSYNC);
1902			count++;
1903		}
1904
1905		subh->action = PFSYNC_ACT_UPD_REQ;
1906		subh->count = htons(count);
1907		V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_UPD_REQ] += count;
1908	}
1909
1910	/* has someone built a custom region for us to add? */
1911	if (b->b_plus != NULL) {
1912		bcopy(b->b_plus, m->m_data + offset, b->b_pluslen);
1913		offset += b->b_pluslen;
1914
1915		free(b->b_plus, M_PFSYNC);
1916		b->b_plus = NULL;
1917		b->b_pluslen = 0;
1918	}
1919
1920	subh = (struct pfsync_subheader *)(m->m_data + offset);
1921	offset += sizeof(*subh);
1922
1923	subh->action = PFSYNC_ACT_EOF;
1924	subh->count = htons(1);
1925	V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_EOF]++;
1926
1927	/* we're done, let's put it on the wire */
1928	if (ifp->if_bpf) {
1929		m->m_data += aflen;
1930		m->m_len = m->m_pkthdr.len = len - aflen;
1931		BPF_MTAP(ifp, m);
1932		m->m_data -= aflen;
1933		m->m_len = m->m_pkthdr.len = len;
1934	}
1935
1936	if (sc->sc_sync_if == NULL) {
1937		b->b_len = PFSYNC_MINPKT;
1938		m_freem(m);
1939		return;
1940	}
1941
1942	if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1943	if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1944	b->b_len = PFSYNC_MINPKT;
1945
1946	if (!_IF_QFULL(&b->b_snd))
1947		_IF_ENQUEUE(&b->b_snd, m);
1948	else {
1949		m_freem(m);
1950		if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
1951	}
1952	if (schedswi)
1953		swi_sched(V_pfsync_swi_cookie, 0);
1954}
1955
1956static void
1957pfsync_insert_state(struct pf_kstate *st)
1958{
1959	struct pfsync_softc *sc = V_pfsyncif;
1960	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
1961
1962	if (st->state_flags & PFSTATE_NOSYNC)
1963		return;
1964
1965	if ((st->rule.ptr->rule_flag & PFRULE_NOSYNC) ||
1966	    st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
1967		st->state_flags |= PFSTATE_NOSYNC;
1968		return;
1969	}
1970
1971	KASSERT(st->sync_state == PFSYNC_S_NONE,
1972		("%s: st->sync_state %u", __func__, st->sync_state));
1973
1974	PFSYNC_BUCKET_LOCK(b);
1975	if (b->b_len == PFSYNC_MINPKT)
1976		callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
1977
1978	pfsync_q_ins(st, PFSYNC_S_INS, true);
1979	PFSYNC_BUCKET_UNLOCK(b);
1980
1981	st->sync_updates = 0;
1982}
1983
1984static int
1985pfsync_defer(struct pf_kstate *st, struct mbuf *m)
1986{
1987	struct pfsync_softc *sc = V_pfsyncif;
1988	struct pfsync_deferral *pd;
1989	struct pfsync_bucket *b;
1990
1991	if (m->m_flags & (M_BCAST|M_MCAST))
1992		return (0);
1993
1994	if (sc == NULL)
1995		return (0);
1996
1997	b = pfsync_get_bucket(sc, st);
1998
1999	PFSYNC_LOCK(sc);
2000
2001	if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) ||
2002	    !(sc->sc_flags & PFSYNCF_DEFER)) {
2003		PFSYNC_UNLOCK(sc);
2004		return (0);
2005	}
2006
2007	PFSYNC_BUCKET_LOCK(b);
2008	PFSYNC_UNLOCK(sc);
2009
2010	if (b->b_deferred >= 128)
2011		pfsync_undefer(TAILQ_FIRST(&b->b_deferrals), 0);
2012
2013	pd = malloc(sizeof(*pd), M_PFSYNC, M_NOWAIT);
2014	if (pd == NULL) {
2015		PFSYNC_BUCKET_UNLOCK(b);
2016		return (0);
2017	}
2018	b->b_deferred++;
2019
2020	m->m_flags |= M_SKIP_FIREWALL;
2021	st->state_flags |= PFSTATE_ACK;
2022
2023	pd->pd_sc = sc;
2024	pd->pd_st = st;
2025	pf_ref_state(st);
2026	pd->pd_m = m;
2027
2028	TAILQ_INSERT_TAIL(&b->b_deferrals, pd, pd_entry);
2029	callout_init_mtx(&pd->pd_tmo, &b->b_mtx, CALLOUT_RETURNUNLOCKED);
2030	callout_reset(&pd->pd_tmo, (V_pfsync_defer_timeout * hz) / 1000,
2031	    pfsync_defer_tmo, pd);
2032
2033	pfsync_push(b);
2034	PFSYNC_BUCKET_UNLOCK(b);
2035
2036	return (1);
2037}
2038
2039static void
2040pfsync_undefer(struct pfsync_deferral *pd, int drop)
2041{
2042	struct pfsync_softc *sc = pd->pd_sc;
2043	struct mbuf *m = pd->pd_m;
2044	struct pf_kstate *st = pd->pd_st;
2045	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2046
2047	PFSYNC_BUCKET_LOCK_ASSERT(b);
2048
2049	TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
2050	b->b_deferred--;
2051	pd->pd_st->state_flags &= ~PFSTATE_ACK;	/* XXX: locking! */
2052	free(pd, M_PFSYNC);
2053	pf_release_state(st);
2054
2055	if (drop)
2056		m_freem(m);
2057	else {
2058		_IF_ENQUEUE(&b->b_snd, m);
2059		pfsync_push(b);
2060	}
2061}
2062
2063static void
2064pfsync_defer_tmo(void *arg)
2065{
2066	struct epoch_tracker et;
2067	struct pfsync_deferral *pd = arg;
2068	struct pfsync_softc *sc = pd->pd_sc;
2069	struct mbuf *m = pd->pd_m;
2070	struct pf_kstate *st = pd->pd_st;
2071	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2072
2073	PFSYNC_BUCKET_LOCK_ASSERT(b);
2074
2075	TAILQ_REMOVE(&b->b_deferrals, pd, pd_entry);
2076	b->b_deferred--;
2077	pd->pd_st->state_flags &= ~PFSTATE_ACK;	/* XXX: locking! */
2078	PFSYNC_BUCKET_UNLOCK(b);
2079	free(pd, M_PFSYNC);
2080
2081	if (sc->sc_sync_if == NULL) {
2082		pf_release_state(st);
2083		m_freem(m);
2084		return;
2085	}
2086
2087	NET_EPOCH_ENTER(et);
2088	CURVNET_SET(sc->sc_sync_if->if_vnet);
2089
2090	pfsync_tx(sc, m);
2091
2092	pf_release_state(st);
2093
2094	CURVNET_RESTORE();
2095	NET_EPOCH_EXIT(et);
2096}
2097
2098static void
2099pfsync_undefer_state_locked(struct pf_kstate *st, int drop)
2100{
2101	struct pfsync_softc *sc = V_pfsyncif;
2102	struct pfsync_deferral *pd;
2103	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2104
2105	PFSYNC_BUCKET_LOCK_ASSERT(b);
2106
2107	TAILQ_FOREACH(pd, &b->b_deferrals, pd_entry) {
2108		 if (pd->pd_st == st) {
2109			if (callout_stop(&pd->pd_tmo) > 0)
2110				pfsync_undefer(pd, drop);
2111
2112			return;
2113		}
2114	}
2115
2116	panic("%s: unable to find deferred state", __func__);
2117}
2118
2119static void
2120pfsync_undefer_state(struct pf_kstate *st, int drop)
2121{
2122	struct pfsync_softc *sc = V_pfsyncif;
2123	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2124
2125	PFSYNC_BUCKET_LOCK(b);
2126	pfsync_undefer_state_locked(st, drop);
2127	PFSYNC_BUCKET_UNLOCK(b);
2128}
2129
2130static struct pfsync_bucket*
2131pfsync_get_bucket(struct pfsync_softc *sc, struct pf_kstate *st)
2132{
2133	int c = PF_IDHASH(st) % pfsync_buckets;
2134	return &sc->sc_buckets[c];
2135}
2136
2137static void
2138pfsync_update_state(struct pf_kstate *st)
2139{
2140	struct pfsync_softc *sc = V_pfsyncif;
2141	bool sync = false, ref = true;
2142	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2143
2144	PF_STATE_LOCK_ASSERT(st);
2145	PFSYNC_BUCKET_LOCK(b);
2146
2147	if (st->state_flags & PFSTATE_ACK)
2148		pfsync_undefer_state_locked(st, 0);
2149	if (st->state_flags & PFSTATE_NOSYNC) {
2150		if (st->sync_state != PFSYNC_S_NONE)
2151			pfsync_q_del(st, true, b);
2152		PFSYNC_BUCKET_UNLOCK(b);
2153		return;
2154	}
2155
2156	if (b->b_len == PFSYNC_MINPKT)
2157		callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
2158
2159	switch (st->sync_state) {
2160	case PFSYNC_S_UPD_C:
2161	case PFSYNC_S_UPD:
2162	case PFSYNC_S_INS:
2163		/* we're already handling it */
2164
2165		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
2166			st->sync_updates++;
2167			if (st->sync_updates >= sc->sc_maxupdates)
2168				sync = true;
2169		}
2170		break;
2171
2172	case PFSYNC_S_IACK:
2173		pfsync_q_del(st, false, b);
2174		ref = false;
2175		/* FALLTHROUGH */
2176
2177	case PFSYNC_S_NONE:
2178		pfsync_q_ins(st, PFSYNC_S_UPD_C, ref);
2179		st->sync_updates = 0;
2180		break;
2181
2182	default:
2183		panic("%s: unexpected sync state %d", __func__, st->sync_state);
2184	}
2185
2186	if (sync || (time_uptime - st->pfsync_time) < 2)
2187		pfsync_push(b);
2188
2189	PFSYNC_BUCKET_UNLOCK(b);
2190}
2191
2192static void
2193pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2194{
2195	struct pfsync_softc *sc = V_pfsyncif;
2196	struct pfsync_bucket *b = &sc->sc_buckets[0];
2197	struct pfsync_upd_req_item *item;
2198	size_t nlen = sizeof(struct pfsync_upd_req);
2199
2200	PFSYNC_BUCKET_LOCK_ASSERT(b);
2201
2202	/*
2203	 * This code does a bit to prevent multiple update requests for the
2204	 * same state being generated. It searches current subheader queue,
2205	 * but it doesn't lookup into queue of already packed datagrams.
2206	 */
2207	TAILQ_FOREACH(item, &b->b_upd_req_list, ur_entry)
2208		if (item->ur_msg.id == id &&
2209		    item->ur_msg.creatorid == creatorid)
2210			return;
2211
2212	item = malloc(sizeof(*item), M_PFSYNC, M_NOWAIT);
2213	if (item == NULL)
2214		return; /* XXX stats */
2215
2216	item->ur_msg.id = id;
2217	item->ur_msg.creatorid = creatorid;
2218
2219	if (TAILQ_EMPTY(&b->b_upd_req_list))
2220		nlen += sizeof(struct pfsync_subheader);
2221
2222	if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
2223		pfsync_sendout(0, 0);
2224
2225		nlen = sizeof(struct pfsync_subheader) +
2226		    sizeof(struct pfsync_upd_req);
2227	}
2228
2229	TAILQ_INSERT_TAIL(&b->b_upd_req_list, item, ur_entry);
2230	b->b_len += nlen;
2231
2232	pfsync_push(b);
2233}
2234
2235static bool
2236pfsync_update_state_req(struct pf_kstate *st)
2237{
2238	struct pfsync_softc *sc = V_pfsyncif;
2239	bool ref = true, full = false;
2240	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2241
2242	PF_STATE_LOCK_ASSERT(st);
2243	PFSYNC_BUCKET_LOCK(b);
2244
2245	if (st->state_flags & PFSTATE_NOSYNC) {
2246		if (st->sync_state != PFSYNC_S_NONE)
2247			pfsync_q_del(st, true, b);
2248		PFSYNC_BUCKET_UNLOCK(b);
2249		return (full);
2250	}
2251
2252	switch (st->sync_state) {
2253	case PFSYNC_S_UPD_C:
2254	case PFSYNC_S_IACK:
2255		pfsync_q_del(st, false, b);
2256		ref = false;
2257		/* FALLTHROUGH */
2258
2259	case PFSYNC_S_NONE:
2260		pfsync_q_ins(st, PFSYNC_S_UPD, ref);
2261		pfsync_push(b);
2262		break;
2263
2264	case PFSYNC_S_INS:
2265	case PFSYNC_S_UPD:
2266	case PFSYNC_S_DEL_C:
2267		/* we're already handling it */
2268		break;
2269
2270	default:
2271		panic("%s: unexpected sync state %d", __func__, st->sync_state);
2272	}
2273
2274	if ((sc->sc_ifp->if_mtu - b->b_len) < sizeof(union pfsync_state_union))
2275		full = true;
2276
2277	PFSYNC_BUCKET_UNLOCK(b);
2278
2279	return (full);
2280}
2281
2282static void
2283pfsync_delete_state(struct pf_kstate *st)
2284{
2285	struct pfsync_softc *sc = V_pfsyncif;
2286	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2287	bool ref = true;
2288
2289	PFSYNC_BUCKET_LOCK(b);
2290	if (st->state_flags & PFSTATE_ACK)
2291		pfsync_undefer_state_locked(st, 1);
2292	if (st->state_flags & PFSTATE_NOSYNC) {
2293		if (st->sync_state != PFSYNC_S_NONE)
2294			pfsync_q_del(st, true, b);
2295		PFSYNC_BUCKET_UNLOCK(b);
2296		return;
2297	}
2298
2299	if (b->b_len == PFSYNC_MINPKT)
2300		callout_reset(&b->b_tmo, 1 * hz, pfsync_timeout, b);
2301
2302	switch (st->sync_state) {
2303	case PFSYNC_S_INS:
2304		/* We never got to tell the world so just forget about it. */
2305		pfsync_q_del(st, true, b);
2306		break;
2307
2308	case PFSYNC_S_UPD_C:
2309	case PFSYNC_S_UPD:
2310	case PFSYNC_S_IACK:
2311		pfsync_q_del(st, false, b);
2312		ref = false;
2313		/* FALLTHROUGH */
2314
2315	case PFSYNC_S_NONE:
2316		pfsync_q_ins(st, PFSYNC_S_DEL_C, ref);
2317		break;
2318
2319	default:
2320		panic("%s: unexpected sync state %d", __func__, st->sync_state);
2321	}
2322
2323	PFSYNC_BUCKET_UNLOCK(b);
2324}
2325
2326static void
2327pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2328{
2329	struct {
2330		struct pfsync_subheader subh;
2331		struct pfsync_clr clr;
2332	} __packed r;
2333
2334	bzero(&r, sizeof(r));
2335
2336	r.subh.action = PFSYNC_ACT_CLR;
2337	r.subh.count = htons(1);
2338	V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_CLR]++;
2339
2340	strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2341	r.clr.creatorid = creatorid;
2342
2343	pfsync_send_plus(&r, sizeof(r));
2344}
2345
2346static enum pfsync_q_id
2347pfsync_sstate_to_qid(u_int8_t sync_state)
2348{
2349	struct pfsync_softc *sc = V_pfsyncif;
2350
2351	switch (sync_state) {
2352		case PFSYNC_S_INS:
2353			switch (sc->sc_version) {
2354				case PFSYNC_MSG_VERSION_1301:
2355					return PFSYNC_Q_INS_1301;
2356				case PFSYNC_MSG_VERSION_1400:
2357					return PFSYNC_Q_INS_1400;
2358			}
2359			break;
2360		case PFSYNC_S_IACK:
2361			return PFSYNC_Q_IACK;
2362		case PFSYNC_S_UPD:
2363			switch (sc->sc_version) {
2364				case PFSYNC_MSG_VERSION_1301:
2365					return PFSYNC_Q_UPD_1301;
2366				case PFSYNC_MSG_VERSION_1400:
2367					return PFSYNC_Q_UPD_1400;
2368			}
2369			break;
2370		case PFSYNC_S_UPD_C:
2371			return PFSYNC_Q_UPD_C;
2372		case PFSYNC_S_DEL_C:
2373			return PFSYNC_Q_DEL_C;
2374		default:
2375			panic("%s: Unsupported st->sync_state 0x%02x",
2376			__func__, sync_state);
2377	}
2378
2379	panic("%s: Unsupported pfsync_msg_version %d",
2380	    __func__, sc->sc_version);
2381}
2382
2383static void
2384pfsync_q_ins(struct pf_kstate *st, int sync_state, bool ref)
2385{
2386	enum pfsync_q_id q = pfsync_sstate_to_qid(sync_state);
2387	struct pfsync_softc *sc = V_pfsyncif;
2388	size_t nlen = pfsync_qs[q].len;
2389	struct pfsync_bucket *b = pfsync_get_bucket(sc, st);
2390
2391	PFSYNC_BUCKET_LOCK_ASSERT(b);
2392
2393	KASSERT(st->sync_state == PFSYNC_S_NONE,
2394		("%s: st->sync_state %u", __func__, st->sync_state));
2395	KASSERT(b->b_len >= PFSYNC_MINPKT, ("pfsync pkt len is too low %zu",
2396	    b->b_len));
2397
2398	if (TAILQ_EMPTY(&b->b_qs[q]))
2399		nlen += sizeof(struct pfsync_subheader);
2400
2401	if (b->b_len + nlen > sc->sc_ifp->if_mtu) {
2402		pfsync_sendout(1, b->b_id);
2403
2404		nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2405	}
2406
2407	b->b_len += nlen;
2408	TAILQ_INSERT_TAIL(&b->b_qs[q], st, sync_list);
2409	st->sync_state = pfsync_qid_sstate[q];
2410	if (ref)
2411		pf_ref_state(st);
2412}
2413
2414static void
2415pfsync_q_del(struct pf_kstate *st, bool unref, struct pfsync_bucket *b)
2416{
2417	enum pfsync_q_id q;
2418
2419	PFSYNC_BUCKET_LOCK_ASSERT(b);
2420	KASSERT(st->sync_state != PFSYNC_S_NONE,
2421		("%s: st->sync_state != PFSYNC_S_NONE", __func__));
2422
2423	q =  pfsync_sstate_to_qid(st->sync_state);
2424	b->b_len -= pfsync_qs[q].len;
2425	TAILQ_REMOVE(&b->b_qs[q], st, sync_list);
2426	st->sync_state = PFSYNC_S_NONE;
2427	if (unref)
2428		pf_release_state(st);
2429
2430	if (TAILQ_EMPTY(&b->b_qs[q]))
2431		b->b_len -= sizeof(struct pfsync_subheader);
2432}
2433
2434static void
2435pfsync_bulk_start(void)
2436{
2437	struct pfsync_softc *sc = V_pfsyncif;
2438
2439	if (V_pf_status.debug >= PF_DEBUG_MISC)
2440		printf("pfsync: received bulk update request\n");
2441
2442	PFSYNC_BLOCK(sc);
2443
2444	sc->sc_ureq_received = time_uptime;
2445	sc->sc_bulk_hashid = 0;
2446	sc->sc_bulk_stateid = 0;
2447	pfsync_bulk_status(PFSYNC_BUS_START);
2448	callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2449	PFSYNC_BUNLOCK(sc);
2450}
2451
2452static void
2453pfsync_bulk_update(void *arg)
2454{
2455	struct pfsync_softc *sc = arg;
2456	struct pf_kstate *s;
2457	int i;
2458
2459	PFSYNC_BLOCK_ASSERT(sc);
2460	CURVNET_SET(sc->sc_ifp->if_vnet);
2461
2462	/*
2463	 * Start with last state from previous invocation.
2464	 * It may had gone, in this case start from the
2465	 * hash slot.
2466	 */
2467	s = pf_find_state_byid(sc->sc_bulk_stateid, sc->sc_bulk_creatorid);
2468
2469	if (s != NULL)
2470		i = PF_IDHASH(s);
2471	else
2472		i = sc->sc_bulk_hashid;
2473
2474	for (; i <= pf_hashmask; i++) {
2475		struct pf_idhash *ih = &V_pf_idhash[i];
2476
2477		if (s != NULL)
2478			PF_HASHROW_ASSERT(ih);
2479		else {
2480			PF_HASHROW_LOCK(ih);
2481			s = LIST_FIRST(&ih->states);
2482		}
2483
2484		for (; s; s = LIST_NEXT(s, entry)) {
2485			if (s->sync_state == PFSYNC_S_NONE &&
2486			    s->timeout < PFTM_MAX &&
2487			    s->pfsync_time <= sc->sc_ureq_received) {
2488				if (pfsync_update_state_req(s)) {
2489					/* We've filled a packet. */
2490					sc->sc_bulk_hashid = i;
2491					sc->sc_bulk_stateid = s->id;
2492					sc->sc_bulk_creatorid = s->creatorid;
2493					PF_HASHROW_UNLOCK(ih);
2494					callout_reset(&sc->sc_bulk_tmo, 1,
2495					    pfsync_bulk_update, sc);
2496					goto full;
2497				}
2498			}
2499		}
2500		PF_HASHROW_UNLOCK(ih);
2501	}
2502
2503	/* We're done. */
2504	pfsync_bulk_status(PFSYNC_BUS_END);
2505full:
2506	CURVNET_RESTORE();
2507}
2508
2509static void
2510pfsync_bulk_status(u_int8_t status)
2511{
2512	struct {
2513		struct pfsync_subheader subh;
2514		struct pfsync_bus bus;
2515	} __packed r;
2516
2517	struct pfsync_softc *sc = V_pfsyncif;
2518
2519	bzero(&r, sizeof(r));
2520
2521	r.subh.action = PFSYNC_ACT_BUS;
2522	r.subh.count = htons(1);
2523	V_pfsyncstats.pfsyncs_oacts[PFSYNC_ACT_BUS]++;
2524
2525	r.bus.creatorid = V_pf_status.hostid;
2526	r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
2527	r.bus.status = status;
2528
2529	pfsync_send_plus(&r, sizeof(r));
2530}
2531
2532static void
2533pfsync_bulk_fail(void *arg)
2534{
2535	struct pfsync_softc *sc = arg;
2536	struct pfsync_bucket *b = &sc->sc_buckets[0];
2537
2538	CURVNET_SET(sc->sc_ifp->if_vnet);
2539
2540	PFSYNC_BLOCK_ASSERT(sc);
2541
2542	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
2543		/* Try again */
2544		callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2545		    pfsync_bulk_fail, V_pfsyncif);
2546		PFSYNC_BUCKET_LOCK(b);
2547		pfsync_request_update(0, 0);
2548		PFSYNC_BUCKET_UNLOCK(b);
2549	} else {
2550		/* Pretend like the transfer was ok. */
2551		sc->sc_ureq_sent = 0;
2552		sc->sc_bulk_tries = 0;
2553		PFSYNC_LOCK(sc);
2554		if (!(sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
2555			(*carp_demote_adj_p)(-V_pfsync_carp_adj,
2556			    "pfsync bulk fail");
2557		sc->sc_flags |= PFSYNCF_OK;
2558		PFSYNC_UNLOCK(sc);
2559		if (V_pf_status.debug >= PF_DEBUG_MISC)
2560			printf("pfsync: failed to receive bulk update\n");
2561	}
2562
2563	CURVNET_RESTORE();
2564}
2565
2566static void
2567pfsync_send_plus(void *plus, size_t pluslen)
2568{
2569	struct pfsync_softc *sc = V_pfsyncif;
2570	struct pfsync_bucket *b = &sc->sc_buckets[0];
2571	uint8_t *newplus;
2572
2573	PFSYNC_BUCKET_LOCK(b);
2574
2575	if (b->b_len + pluslen > sc->sc_ifp->if_mtu)
2576		pfsync_sendout(1, b->b_id);
2577
2578	newplus = malloc(pluslen + b->b_pluslen, M_PFSYNC, M_NOWAIT);
2579	if (newplus == NULL)
2580		goto out;
2581
2582	if (b->b_plus != NULL) {
2583		memcpy(newplus, b->b_plus, b->b_pluslen);
2584		free(b->b_plus, M_PFSYNC);
2585	} else {
2586		MPASS(b->b_pluslen == 0);
2587	}
2588	memcpy(newplus + b->b_pluslen, plus, pluslen);
2589
2590	b->b_plus = newplus;
2591	b->b_pluslen += pluslen;
2592	b->b_len += pluslen;
2593
2594	pfsync_sendout(1, b->b_id);
2595
2596out:
2597	PFSYNC_BUCKET_UNLOCK(b);
2598}
2599
2600static void
2601pfsync_timeout(void *arg)
2602{
2603	struct pfsync_bucket *b = arg;
2604
2605	CURVNET_SET(b->b_sc->sc_ifp->if_vnet);
2606	PFSYNC_BUCKET_LOCK(b);
2607	pfsync_push(b);
2608	PFSYNC_BUCKET_UNLOCK(b);
2609	CURVNET_RESTORE();
2610}
2611
2612static void
2613pfsync_push(struct pfsync_bucket *b)
2614{
2615
2616	PFSYNC_BUCKET_LOCK_ASSERT(b);
2617
2618	b->b_flags |= PFSYNCF_BUCKET_PUSH;
2619	swi_sched(V_pfsync_swi_cookie, 0);
2620}
2621
2622static void
2623pfsync_push_all(struct pfsync_softc *sc)
2624{
2625	int c;
2626	struct pfsync_bucket *b;
2627
2628	for (c = 0; c < pfsync_buckets; c++) {
2629		b = &sc->sc_buckets[c];
2630
2631		PFSYNC_BUCKET_LOCK(b);
2632		pfsync_push(b);
2633		PFSYNC_BUCKET_UNLOCK(b);
2634	}
2635}
2636
2637static void
2638pfsync_tx(struct pfsync_softc *sc, struct mbuf *m)
2639{
2640	struct ip *ip;
2641	int af, error = 0;
2642
2643	ip = mtod(m, struct ip *);
2644	MPASS(ip->ip_v == IPVERSION || ip->ip_v == (IPV6_VERSION >> 4));
2645
2646	af = ip->ip_v == IPVERSION ? AF_INET : AF_INET6;
2647
2648	/*
2649	 * We distinguish between a deferral packet and our
2650	 * own pfsync packet based on M_SKIP_FIREWALL
2651	 * flag. This is XXX.
2652	 */
2653	switch (af) {
2654#ifdef INET
2655	case AF_INET:
2656		if (m->m_flags & M_SKIP_FIREWALL) {
2657			error = ip_output(m, NULL, NULL, 0,
2658			    NULL, NULL);
2659		} else {
2660			error = ip_output(m, NULL, NULL,
2661			    IP_RAWOUTPUT, &sc->sc_imo, NULL);
2662		}
2663		break;
2664#endif
2665#ifdef INET6
2666	case AF_INET6:
2667		if (m->m_flags & M_SKIP_FIREWALL) {
2668			error = ip6_output(m, NULL, NULL, 0,
2669			    NULL, NULL, NULL);
2670		} else {
2671			error = ip6_output(m, NULL, NULL, 0,
2672				&sc->sc_im6o, NULL, NULL);
2673		}
2674		break;
2675#endif
2676	}
2677
2678	if (error == 0)
2679		V_pfsyncstats.pfsyncs_opackets++;
2680	else
2681		V_pfsyncstats.pfsyncs_oerrors++;
2682
2683}
2684
2685static void
2686pfsyncintr(void *arg)
2687{
2688	struct epoch_tracker et;
2689	struct pfsync_softc *sc = arg;
2690	struct pfsync_bucket *b;
2691	struct mbuf *m, *n;
2692	int c;
2693
2694	NET_EPOCH_ENTER(et);
2695	CURVNET_SET(sc->sc_ifp->if_vnet);
2696
2697	for (c = 0; c < pfsync_buckets; c++) {
2698		b = &sc->sc_buckets[c];
2699
2700		PFSYNC_BUCKET_LOCK(b);
2701		if ((b->b_flags & PFSYNCF_BUCKET_PUSH) && b->b_len > PFSYNC_MINPKT) {
2702			pfsync_sendout(0, b->b_id);
2703			b->b_flags &= ~PFSYNCF_BUCKET_PUSH;
2704		}
2705		_IF_DEQUEUE_ALL(&b->b_snd, m);
2706		PFSYNC_BUCKET_UNLOCK(b);
2707
2708		for (; m != NULL; m = n) {
2709			n = m->m_nextpkt;
2710			m->m_nextpkt = NULL;
2711
2712			pfsync_tx(sc, m);
2713		}
2714	}
2715	CURVNET_RESTORE();
2716	NET_EPOCH_EXIT(et);
2717}
2718
2719static int
2720pfsync_multicast_setup(struct pfsync_softc *sc, struct ifnet *ifp,
2721    struct in_mfilter* imf, struct in6_mfilter* im6f)
2722{
2723#ifdef  INET
2724	struct ip_moptions *imo = &sc->sc_imo;
2725#endif
2726#ifdef INET6
2727	struct ip6_moptions *im6o = &sc->sc_im6o;
2728	struct sockaddr_in6 *syncpeer_sa6 = NULL;
2729#endif
2730
2731	if (!(ifp->if_flags & IFF_MULTICAST))
2732		return (EADDRNOTAVAIL);
2733
2734	switch (sc->sc_sync_peer.ss_family) {
2735#ifdef INET
2736	case AF_INET:
2737	{
2738		int error;
2739
2740		ip_mfilter_init(&imo->imo_head);
2741		imo->imo_multicast_vif = -1;
2742		if ((error = in_joingroup(ifp,
2743		    &((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr, NULL,
2744		    &imf->imf_inm)) != 0)
2745			return (error);
2746
2747		ip_mfilter_insert(&imo->imo_head, imf);
2748		imo->imo_multicast_ifp = ifp;
2749		imo->imo_multicast_ttl = PFSYNC_DFLTTL;
2750		imo->imo_multicast_loop = 0;
2751		break;
2752	}
2753#endif
2754#ifdef INET6
2755	case AF_INET6:
2756	{
2757		int error;
2758
2759		syncpeer_sa6 = (struct sockaddr_in6 *)&sc->sc_sync_peer;
2760		if ((error = in6_setscope(&syncpeer_sa6->sin6_addr, ifp, NULL)))
2761			return (error);
2762
2763		ip6_mfilter_init(&im6o->im6o_head);
2764		if ((error = in6_joingroup(ifp, &syncpeer_sa6->sin6_addr, NULL,
2765		    &(im6f->im6f_in6m), 0)) != 0)
2766			return (error);
2767
2768		ip6_mfilter_insert(&im6o->im6o_head, im6f);
2769		im6o->im6o_multicast_ifp = ifp;
2770		im6o->im6o_multicast_hlim = PFSYNC_DFLTTL;
2771		im6o->im6o_multicast_loop = 0;
2772		break;
2773	}
2774#endif
2775	}
2776
2777	return (0);
2778}
2779
2780static void
2781pfsync_multicast_cleanup(struct pfsync_softc *sc)
2782{
2783#ifdef INET
2784	struct ip_moptions *imo = &sc->sc_imo;
2785	struct in_mfilter *imf;
2786
2787	while ((imf = ip_mfilter_first(&imo->imo_head)) != NULL) {
2788		ip_mfilter_remove(&imo->imo_head, imf);
2789		in_leavegroup(imf->imf_inm, NULL);
2790		ip_mfilter_free(imf);
2791	}
2792	imo->imo_multicast_ifp = NULL;
2793#endif
2794
2795#ifdef INET6
2796	struct ip6_moptions *im6o = &sc->sc_im6o;
2797	struct in6_mfilter *im6f;
2798
2799	while ((im6f = ip6_mfilter_first(&im6o->im6o_head)) != NULL) {
2800		ip6_mfilter_remove(&im6o->im6o_head, im6f);
2801		in6_leavegroup(im6f->im6f_in6m, NULL);
2802		ip6_mfilter_free(im6f);
2803	}
2804	im6o->im6o_multicast_ifp = NULL;
2805#endif
2806}
2807
2808void
2809pfsync_detach_ifnet(struct ifnet *ifp)
2810{
2811	struct pfsync_softc *sc = V_pfsyncif;
2812
2813	if (sc == NULL)
2814		return;
2815
2816	PFSYNC_LOCK(sc);
2817
2818	if (sc->sc_sync_if == ifp) {
2819		/* We don't need mutlicast cleanup here, because the interface
2820		 * is going away. We do need to ensure we don't try to do
2821		 * cleanup later.
2822		 */
2823		ip_mfilter_init(&sc->sc_imo.imo_head);
2824		sc->sc_imo.imo_multicast_ifp = NULL;
2825		sc->sc_im6o.im6o_multicast_ifp = NULL;
2826		sc->sc_sync_if = NULL;
2827	}
2828
2829	PFSYNC_UNLOCK(sc);
2830}
2831
2832static int
2833pfsync_pfsyncreq_to_kstatus(struct pfsyncreq *pfsyncr, struct pfsync_kstatus *status)
2834{
2835	struct sockaddr_storage sa;
2836	status->maxupdates = pfsyncr->pfsyncr_maxupdates;
2837	status->flags = pfsyncr->pfsyncr_defer;
2838
2839	strlcpy(status->syncdev, pfsyncr->pfsyncr_syncdev, IFNAMSIZ);
2840
2841	memset(&sa, 0, sizeof(sa));
2842	if (pfsyncr->pfsyncr_syncpeer.s_addr != 0) {
2843		struct sockaddr_in *in = (struct sockaddr_in *)&sa;
2844		in->sin_family = AF_INET;
2845		in->sin_len = sizeof(*in);
2846		in->sin_addr.s_addr = pfsyncr->pfsyncr_syncpeer.s_addr;
2847	}
2848	status->syncpeer = sa;
2849
2850	return 0;
2851}
2852
2853static int
2854pfsync_kstatus_to_softc(struct pfsync_kstatus *status, struct pfsync_softc *sc)
2855{
2856	struct ifnet *sifp;
2857	struct in_mfilter *imf = NULL;
2858	struct in6_mfilter *im6f = NULL;
2859	int error;
2860	int c;
2861
2862	if ((status->maxupdates < 0) || (status->maxupdates > 255))
2863		return (EINVAL);
2864
2865	if (status->syncdev[0] == '\0')
2866		sifp = NULL;
2867	else if ((sifp = ifunit_ref(status->syncdev)) == NULL)
2868		return (EINVAL);
2869
2870	switch (status->syncpeer.ss_family) {
2871#ifdef INET
2872	case AF_UNSPEC:
2873	case AF_INET: {
2874		struct sockaddr_in *status_sin;
2875		status_sin = (struct sockaddr_in *)&(status->syncpeer);
2876		if (sifp != NULL) {
2877			if (status_sin->sin_addr.s_addr == 0 ||
2878			    status_sin->sin_addr.s_addr ==
2879			    htonl(INADDR_PFSYNC_GROUP)) {
2880				status_sin->sin_family = AF_INET;
2881				status_sin->sin_len = sizeof(*status_sin);
2882				status_sin->sin_addr.s_addr =
2883				    htonl(INADDR_PFSYNC_GROUP);
2884			}
2885
2886			if (IN_MULTICAST(ntohl(status_sin->sin_addr.s_addr))) {
2887				imf = ip_mfilter_alloc(M_WAITOK, 0, 0);
2888			}
2889		}
2890		break;
2891	}
2892#endif
2893#ifdef INET6
2894	case AF_INET6: {
2895		struct sockaddr_in6 *status_sin6;
2896		status_sin6 = (struct sockaddr_in6*)&(status->syncpeer);
2897		if (sifp != NULL) {
2898			if (IN6_IS_ADDR_UNSPECIFIED(&status_sin6->sin6_addr) ||
2899			    IN6_ARE_ADDR_EQUAL(&status_sin6->sin6_addr,
2900				&in6addr_linklocal_pfsync_group)) {
2901				status_sin6->sin6_family = AF_INET6;
2902				status_sin6->sin6_len = sizeof(*status_sin6);
2903				status_sin6->sin6_addr =
2904				    in6addr_linklocal_pfsync_group;
2905			}
2906
2907			if (IN6_IS_ADDR_MULTICAST(&status_sin6->sin6_addr)) {
2908				im6f = ip6_mfilter_alloc(M_WAITOK, 0, 0);
2909			}
2910		}
2911		break;
2912	}
2913#endif
2914	}
2915
2916	PFSYNC_LOCK(sc);
2917
2918	switch (status->version) {
2919		case PFSYNC_MSG_VERSION_UNSPECIFIED:
2920			sc->sc_version = PFSYNC_MSG_VERSION_DEFAULT;
2921			break;
2922		case PFSYNC_MSG_VERSION_1301:
2923		case PFSYNC_MSG_VERSION_1400:
2924			sc->sc_version = status->version;
2925			break;
2926		default:
2927			PFSYNC_UNLOCK(sc);
2928			return (EINVAL);
2929	}
2930
2931	switch (status->syncpeer.ss_family) {
2932	case AF_INET: {
2933		struct sockaddr_in *status_sin = (struct sockaddr_in *)&(status->syncpeer);
2934		struct sockaddr_in *sc_sin = (struct sockaddr_in *)&sc->sc_sync_peer;
2935		sc_sin->sin_family = AF_INET;
2936		sc_sin->sin_len = sizeof(*sc_sin);
2937		if (status_sin->sin_addr.s_addr == 0) {
2938			sc_sin->sin_addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
2939		} else {
2940			sc_sin->sin_addr.s_addr = status_sin->sin_addr.s_addr;
2941		}
2942		break;
2943	}
2944	case AF_INET6: {
2945		struct sockaddr_in6 *status_sin = (struct sockaddr_in6 *)&(status->syncpeer);
2946		struct sockaddr_in6 *sc_sin = (struct sockaddr_in6 *)&sc->sc_sync_peer;
2947		sc_sin->sin6_family = AF_INET6;
2948		sc_sin->sin6_len = sizeof(*sc_sin);
2949		if(IN6_IS_ADDR_UNSPECIFIED(&status_sin->sin6_addr)) {
2950			sc_sin->sin6_addr = in6addr_linklocal_pfsync_group;
2951		} else {
2952			sc_sin->sin6_addr = status_sin->sin6_addr;
2953		}
2954		break;
2955	}
2956	}
2957
2958	sc->sc_maxupdates = status->maxupdates;
2959	if (status->flags & PFSYNCF_DEFER) {
2960		sc->sc_flags |= PFSYNCF_DEFER;
2961		V_pfsync_defer_ptr = pfsync_defer;
2962	} else {
2963		sc->sc_flags &= ~PFSYNCF_DEFER;
2964		V_pfsync_defer_ptr = NULL;
2965	}
2966
2967	if (sifp == NULL) {
2968		if (sc->sc_sync_if)
2969			if_rele(sc->sc_sync_if);
2970		sc->sc_sync_if = NULL;
2971		pfsync_multicast_cleanup(sc);
2972		PFSYNC_UNLOCK(sc);
2973		return (0);
2974	}
2975
2976	for (c = 0; c < pfsync_buckets; c++) {
2977		PFSYNC_BUCKET_LOCK(&sc->sc_buckets[c]);
2978		if (sc->sc_buckets[c].b_len > PFSYNC_MINPKT &&
2979		    (sifp->if_mtu < sc->sc_ifp->if_mtu ||
2980			(sc->sc_sync_if != NULL &&
2981			    sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
2982			sifp->if_mtu < MCLBYTES - sizeof(struct ip)))
2983			pfsync_sendout(1, c);
2984		PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[c]);
2985	}
2986
2987	pfsync_multicast_cleanup(sc);
2988
2989	if (((sc->sc_sync_peer.ss_family == AF_INET) &&
2990	    IN_MULTICAST(ntohl(((struct sockaddr_in *)
2991	        &sc->sc_sync_peer)->sin_addr.s_addr))) ||
2992	    ((sc->sc_sync_peer.ss_family == AF_INET6) &&
2993	    IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6*)
2994	        &sc->sc_sync_peer)->sin6_addr))) {
2995		error = pfsync_multicast_setup(sc, sifp, imf, im6f);
2996		if (error) {
2997			if_rele(sifp);
2998			PFSYNC_UNLOCK(sc);
2999#ifdef INET
3000			if (imf != NULL)
3001				ip_mfilter_free(imf);
3002#endif
3003#ifdef INET6
3004			if (im6f != NULL)
3005				ip6_mfilter_free(im6f);
3006#endif
3007			return (error);
3008		}
3009	}
3010	if (sc->sc_sync_if)
3011		if_rele(sc->sc_sync_if);
3012	sc->sc_sync_if = sifp;
3013
3014	switch (sc->sc_sync_peer.ss_family) {
3015#ifdef INET
3016	case AF_INET: {
3017		struct ip *ip;
3018		ip = &sc->sc_template.ipv4;
3019		bzero(ip, sizeof(*ip));
3020		ip->ip_v = IPVERSION;
3021		ip->ip_hl = sizeof(sc->sc_template.ipv4) >> 2;
3022		ip->ip_tos = IPTOS_LOWDELAY;
3023		/* len and id are set later. */
3024		ip->ip_off = htons(IP_DF);
3025		ip->ip_ttl = PFSYNC_DFLTTL;
3026		ip->ip_p = IPPROTO_PFSYNC;
3027		ip->ip_src.s_addr = INADDR_ANY;
3028		ip->ip_dst = ((struct sockaddr_in *)&sc->sc_sync_peer)->sin_addr;
3029		break;
3030	}
3031#endif
3032#ifdef INET6
3033	case AF_INET6: {
3034		struct ip6_hdr *ip6;
3035		ip6 = &sc->sc_template.ipv6;
3036		bzero(ip6, sizeof(*ip6));
3037		ip6->ip6_vfc = IPV6_VERSION;
3038		ip6->ip6_hlim = PFSYNC_DFLTTL;
3039		ip6->ip6_nxt = IPPROTO_PFSYNC;
3040		ip6->ip6_dst = ((struct sockaddr_in6 *)&sc->sc_sync_peer)->sin6_addr;
3041
3042		struct epoch_tracker et;
3043		NET_EPOCH_ENTER(et);
3044		in6_selectsrc_addr(if_getfib(sc->sc_sync_if), &ip6->ip6_dst, 0,
3045		    sc->sc_sync_if, &ip6->ip6_src, NULL);
3046		NET_EPOCH_EXIT(et);
3047		break;
3048	}
3049#endif
3050	}
3051
3052	/* Request a full state table update. */
3053	if ((sc->sc_flags & PFSYNCF_OK) && carp_demote_adj_p)
3054		(*carp_demote_adj_p)(V_pfsync_carp_adj,
3055		    "pfsync bulk start");
3056	sc->sc_flags &= ~PFSYNCF_OK;
3057	if (V_pf_status.debug >= PF_DEBUG_MISC)
3058		printf("pfsync: requesting bulk update\n");
3059	PFSYNC_UNLOCK(sc);
3060	PFSYNC_BUCKET_LOCK(&sc->sc_buckets[0]);
3061	pfsync_request_update(0, 0);
3062	PFSYNC_BUCKET_UNLOCK(&sc->sc_buckets[0]);
3063	PFSYNC_BLOCK(sc);
3064	sc->sc_ureq_sent = time_uptime;
3065	callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, sc);
3066	PFSYNC_BUNLOCK(sc);
3067	return (0);
3068}
3069
3070static void
3071pfsync_pointers_init(void)
3072{
3073
3074	PF_RULES_WLOCK();
3075	V_pfsync_state_import_ptr = pfsync_state_import;
3076	V_pfsync_insert_state_ptr = pfsync_insert_state;
3077	V_pfsync_update_state_ptr = pfsync_update_state;
3078	V_pfsync_delete_state_ptr = pfsync_delete_state;
3079	V_pfsync_clear_states_ptr = pfsync_clear_states;
3080	V_pfsync_defer_ptr = pfsync_defer;
3081	PF_RULES_WUNLOCK();
3082}
3083
3084static void
3085pfsync_pointers_uninit(void)
3086{
3087
3088	PF_RULES_WLOCK();
3089	V_pfsync_state_import_ptr = NULL;
3090	V_pfsync_insert_state_ptr = NULL;
3091	V_pfsync_update_state_ptr = NULL;
3092	V_pfsync_delete_state_ptr = NULL;
3093	V_pfsync_clear_states_ptr = NULL;
3094	V_pfsync_defer_ptr = NULL;
3095	PF_RULES_WUNLOCK();
3096}
3097
3098static void
3099vnet_pfsync_init(const void *unused __unused)
3100{
3101	int error;
3102
3103	V_pfsync_cloner = if_clone_simple(pfsyncname,
3104	    pfsync_clone_create, pfsync_clone_destroy, 1);
3105	error = swi_add(&V_pfsync_swi_ie, pfsyncname, pfsyncintr, V_pfsyncif,
3106	    SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
3107	if (error) {
3108		if_clone_detach(V_pfsync_cloner);
3109		log(LOG_INFO, "swi_add() failed in %s\n", __func__);
3110	}
3111
3112	pfsync_pointers_init();
3113}
3114VNET_SYSINIT(vnet_pfsync_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY,
3115    vnet_pfsync_init, NULL);
3116
3117static void
3118vnet_pfsync_uninit(const void *unused __unused)
3119{
3120	int ret __diagused;
3121
3122	pfsync_pointers_uninit();
3123
3124	if_clone_detach(V_pfsync_cloner);
3125	ret = swi_remove(V_pfsync_swi_cookie);
3126	MPASS(ret == 0);
3127	ret = intr_event_destroy(V_pfsync_swi_ie);
3128	MPASS(ret == 0);
3129}
3130
3131VNET_SYSUNINIT(vnet_pfsync_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_FOURTH,
3132    vnet_pfsync_uninit, NULL);
3133
3134static int
3135pfsync_init(void)
3136{
3137	int error;
3138
3139	pfsync_detach_ifnet_ptr = pfsync_detach_ifnet;
3140
3141#ifdef INET
3142	error = ipproto_register(IPPROTO_PFSYNC, pfsync_input, NULL);
3143	if (error)
3144		return (error);
3145#endif
3146#ifdef INET6
3147	error = ip6proto_register(IPPROTO_PFSYNC, pfsync6_input, NULL);
3148	if (error) {
3149		ipproto_unregister(IPPROTO_PFSYNC);
3150		return (error);
3151	}
3152#endif
3153
3154	return (0);
3155}
3156
3157static void
3158pfsync_uninit(void)
3159{
3160	pfsync_detach_ifnet_ptr = NULL;
3161
3162#ifdef INET
3163	ipproto_unregister(IPPROTO_PFSYNC);
3164#endif
3165#ifdef INET6
3166	ip6proto_unregister(IPPROTO_PFSYNC);
3167#endif
3168}
3169
3170static int
3171pfsync_modevent(module_t mod, int type, void *data)
3172{
3173	int error = 0;
3174
3175	switch (type) {
3176	case MOD_LOAD:
3177		error = pfsync_init();
3178		break;
3179	case MOD_UNLOAD:
3180		pfsync_uninit();
3181		break;
3182	default:
3183		error = EINVAL;
3184		break;
3185	}
3186
3187	return (error);
3188}
3189
3190static moduledata_t pfsync_mod = {
3191	pfsyncname,
3192	pfsync_modevent,
3193	0
3194};
3195
3196#define PFSYNC_MODVER 1
3197
3198/* Stay on FIREWALL as we depend on pf being initialized and on inetdomain. */
3199DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY);
3200MODULE_VERSION(pfsync, PFSYNC_MODVER);
3201MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3202