1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 *    - Redistributions of source code must retain the above copyright
14 *      notice, this list of conditions and the following disclaimer.
15 *    - Redistributions in binary form must reproduce the above
16 *      copyright notice, this list of conditions and the following
17 *      disclaimer in the documentation and/or other materials provided
18 *      with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 *	$OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38 */
39
40#include <sys/cdefs.h>
41#include "opt_bpf.h"
42#include "opt_inet.h"
43#include "opt_inet6.h"
44#include "opt_pf.h"
45#include "opt_sctp.h"
46
47#include <sys/param.h>
48#include <sys/bus.h>
49#include <sys/endian.h>
50#include <sys/gsb_crc32.h>
51#include <sys/hash.h>
52#include <sys/interrupt.h>
53#include <sys/kernel.h>
54#include <sys/kthread.h>
55#include <sys/limits.h>
56#include <sys/mbuf.h>
57#include <sys/md5.h>
58#include <sys/random.h>
59#include <sys/refcount.h>
60#include <sys/sdt.h>
61#include <sys/socket.h>
62#include <sys/sysctl.h>
63#include <sys/taskqueue.h>
64#include <sys/ucred.h>
65
66#include <net/if.h>
67#include <net/if_var.h>
68#include <net/if_private.h>
69#include <net/if_types.h>
70#include <net/if_vlan_var.h>
71#include <net/route.h>
72#include <net/route/nhop.h>
73#include <net/vnet.h>
74
75#include <net/pfil.h>
76#include <net/pfvar.h>
77#include <net/if_pflog.h>
78#include <net/if_pfsync.h>
79
80#include <netinet/in_pcb.h>
81#include <netinet/in_var.h>
82#include <netinet/in_fib.h>
83#include <netinet/ip.h>
84#include <netinet/ip_fw.h>
85#include <netinet/ip_icmp.h>
86#include <netinet/icmp_var.h>
87#include <netinet/ip_var.h>
88#include <netinet/tcp.h>
89#include <netinet/tcp_fsm.h>
90#include <netinet/tcp_seq.h>
91#include <netinet/tcp_timer.h>
92#include <netinet/tcp_var.h>
93#include <netinet/udp.h>
94#include <netinet/udp_var.h>
95
96/* dummynet */
97#include <netinet/ip_dummynet.h>
98#include <netinet/ip_fw.h>
99#include <netpfil/ipfw/dn_heap.h>
100#include <netpfil/ipfw/ip_fw_private.h>
101#include <netpfil/ipfw/ip_dn_private.h>
102
103#ifdef INET6
104#include <netinet/ip6.h>
105#include <netinet/icmp6.h>
106#include <netinet6/nd6.h>
107#include <netinet6/ip6_var.h>
108#include <netinet6/in6_pcb.h>
109#include <netinet6/in6_fib.h>
110#include <netinet6/scope6_var.h>
111#endif /* INET6 */
112
113#include <netinet/sctp_header.h>
114#include <netinet/sctp_crc32.h>
115
116#include <machine/in_cksum.h>
117#include <security/mac/mac_framework.h>
118
119#define	DPFPRINTF(n, x)	if (V_pf_status.debug >= (n)) printf x
120
121SDT_PROVIDER_DEFINE(pf);
122SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *",
123    "struct pf_kstate *");
124SDT_PROBE_DEFINE4(pf, ip, test6, done, "int", "int", "struct pf_krule *",
125    "struct pf_kstate *");
126SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *",
127    "struct pf_state_key_cmp *", "int", "struct pf_pdesc *",
128    "struct pf_kstate *");
129SDT_PROBE_DEFINE2(pf, ip, , bound_iface, "struct pf_kstate *",
130    "struct pfi_kkif *");
131SDT_PROBE_DEFINE4(pf, sctp, multihome, test, "struct pfi_kkif *",
132    "struct pf_krule *", "struct mbuf *", "int");
133
134SDT_PROBE_DEFINE3(pf, eth, test_rule, entry, "int", "struct ifnet *",
135    "struct mbuf *");
136SDT_PROBE_DEFINE2(pf, eth, test_rule, test, "int", "struct pf_keth_rule *");
137SDT_PROBE_DEFINE3(pf, eth, test_rule, mismatch,
138    "int", "struct pf_keth_rule *", "char *");
139SDT_PROBE_DEFINE2(pf, eth, test_rule, match, "int", "struct pf_keth_rule *");
140SDT_PROBE_DEFINE2(pf, eth, test_rule, final_match,
141    "int", "struct pf_keth_rule *");
142SDT_PROBE_DEFINE2(pf, purge, state, rowcount, "int", "size_t");
143
144/*
145 * Global variables
146 */
147
148/* state tables */
149VNET_DEFINE(struct pf_altqqueue,	 pf_altqs[4]);
150VNET_DEFINE(struct pf_kpalist,		 pf_pabuf);
151VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_active);
152VNET_DEFINE(struct pf_altqqueue *,	 pf_altq_ifs_active);
153VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_inactive);
154VNET_DEFINE(struct pf_altqqueue *,	 pf_altq_ifs_inactive);
155VNET_DEFINE(struct pf_kstatus,		 pf_status);
156
157VNET_DEFINE(u_int32_t,			 ticket_altqs_active);
158VNET_DEFINE(u_int32_t,			 ticket_altqs_inactive);
159VNET_DEFINE(int,			 altqs_inactive_open);
160VNET_DEFINE(u_int32_t,			 ticket_pabuf);
161
162VNET_DEFINE(MD5_CTX,			 pf_tcp_secret_ctx);
163#define	V_pf_tcp_secret_ctx		 VNET(pf_tcp_secret_ctx)
164VNET_DEFINE(u_char,			 pf_tcp_secret[16]);
165#define	V_pf_tcp_secret			 VNET(pf_tcp_secret)
166VNET_DEFINE(int,			 pf_tcp_secret_init);
167#define	V_pf_tcp_secret_init		 VNET(pf_tcp_secret_init)
168VNET_DEFINE(int,			 pf_tcp_iss_off);
169#define	V_pf_tcp_iss_off		 VNET(pf_tcp_iss_off)
170VNET_DECLARE(int,			 pf_vnet_active);
171#define	V_pf_vnet_active		 VNET(pf_vnet_active)
172
173VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
174#define V_pf_purge_idx	VNET(pf_purge_idx)
175
176#ifdef PF_WANT_32_TO_64_COUNTER
177VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter);
178#define	V_pf_counter_periodic_iter	VNET(pf_counter_periodic_iter)
179
180VNET_DEFINE(struct allrulelist_head, pf_allrulelist);
181VNET_DEFINE(size_t, pf_allrulecount);
182VNET_DEFINE(struct pf_krule *, pf_rulemarker);
183#endif
184
185struct pf_sctp_endpoint;
186RB_HEAD(pf_sctp_endpoints, pf_sctp_endpoint);
187struct pf_sctp_source {
188	sa_family_t			af;
189	struct pf_addr			addr;
190	TAILQ_ENTRY(pf_sctp_source)	entry;
191};
192TAILQ_HEAD(pf_sctp_sources, pf_sctp_source);
193struct pf_sctp_endpoint
194{
195	uint32_t		 v_tag;
196	struct pf_sctp_sources	 sources;
197	RB_ENTRY(pf_sctp_endpoint)	entry;
198};
199static int
200pf_sctp_endpoint_compare(struct pf_sctp_endpoint *a, struct pf_sctp_endpoint *b)
201{
202	return (a->v_tag - b->v_tag);
203}
204RB_PROTOTYPE(pf_sctp_endpoints, pf_sctp_endpoint, entry, pf_sctp_endpoint_compare);
205RB_GENERATE(pf_sctp_endpoints, pf_sctp_endpoint, entry, pf_sctp_endpoint_compare);
206VNET_DEFINE_STATIC(struct pf_sctp_endpoints, pf_sctp_endpoints);
207#define V_pf_sctp_endpoints	VNET(pf_sctp_endpoints)
208static struct mtx_padalign pf_sctp_endpoints_mtx;
209MTX_SYSINIT(pf_sctp_endpoints_mtx, &pf_sctp_endpoints_mtx, "SCTP endpoints", MTX_DEF);
210#define	PF_SCTP_ENDPOINTS_LOCK()	mtx_lock(&pf_sctp_endpoints_mtx)
211#define	PF_SCTP_ENDPOINTS_UNLOCK()	mtx_unlock(&pf_sctp_endpoints_mtx)
212
213/*
214 * Queue for pf_intr() sends.
215 */
216static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
217struct pf_send_entry {
218	STAILQ_ENTRY(pf_send_entry)	pfse_next;
219	struct mbuf			*pfse_m;
220	enum {
221		PFSE_IP,
222		PFSE_IP6,
223		PFSE_ICMP,
224		PFSE_ICMP6,
225	}				pfse_type;
226	struct {
227		int		type;
228		int		code;
229		int		mtu;
230	} icmpopts;
231};
232
233STAILQ_HEAD(pf_send_head, pf_send_entry);
234VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
235#define	V_pf_sendqueue	VNET(pf_sendqueue)
236
237static struct mtx_padalign pf_sendqueue_mtx;
238MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
239#define	PF_SENDQ_LOCK()		mtx_lock(&pf_sendqueue_mtx)
240#define	PF_SENDQ_UNLOCK()	mtx_unlock(&pf_sendqueue_mtx)
241
242/*
243 * Queue for pf_overload_task() tasks.
244 */
245struct pf_overload_entry {
246	SLIST_ENTRY(pf_overload_entry)	next;
247	struct pf_addr  		addr;
248	sa_family_t			af;
249	uint8_t				dir;
250	struct pf_krule  		*rule;
251};
252
253SLIST_HEAD(pf_overload_head, pf_overload_entry);
254VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
255#define V_pf_overloadqueue	VNET(pf_overloadqueue)
256VNET_DEFINE_STATIC(struct task, pf_overloadtask);
257#define	V_pf_overloadtask	VNET(pf_overloadtask)
258
259static struct mtx_padalign pf_overloadqueue_mtx;
260MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
261    "pf overload/flush queue", MTX_DEF);
262#define	PF_OVERLOADQ_LOCK()	mtx_lock(&pf_overloadqueue_mtx)
263#define	PF_OVERLOADQ_UNLOCK()	mtx_unlock(&pf_overloadqueue_mtx)
264
265VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules);
266struct mtx_padalign pf_unlnkdrules_mtx;
267MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
268    MTX_DEF);
269
270struct sx pf_config_lock;
271SX_SYSINIT(pf_config_lock, &pf_config_lock, "pf config");
272
273struct mtx_padalign pf_table_stats_lock;
274MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats",
275    MTX_DEF);
276
277VNET_DEFINE_STATIC(uma_zone_t,	pf_sources_z);
278#define	V_pf_sources_z	VNET(pf_sources_z)
279uma_zone_t		pf_mtag_z;
280VNET_DEFINE(uma_zone_t,	 pf_state_z);
281VNET_DEFINE(uma_zone_t,	 pf_state_key_z);
282
283VNET_DEFINE(struct unrhdr64, pf_stateid);
284
285static void		 pf_src_tree_remove_state(struct pf_kstate *);
286static void		 pf_init_threshold(struct pf_threshold *, u_int32_t,
287			    u_int32_t);
288static void		 pf_add_threshold(struct pf_threshold *);
289static int		 pf_check_threshold(struct pf_threshold *);
290
291static void		 pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
292			    u_int16_t *, u_int16_t *, struct pf_addr *,
293			    u_int16_t, u_int8_t, sa_family_t);
294static int		 pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
295			    struct tcphdr *, struct pf_state_peer *);
296static void		 pf_change_icmp(struct pf_addr *, u_int16_t *,
297			    struct pf_addr *, struct pf_addr *, u_int16_t,
298			    u_int16_t *, u_int16_t *, u_int16_t *,
299			    u_int16_t *, u_int8_t, sa_family_t);
300static void		 pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
301			    sa_family_t, struct pf_krule *, int);
302static void		 pf_detach_state(struct pf_kstate *);
303static int		 pf_state_key_attach(struct pf_state_key *,
304			    struct pf_state_key *, struct pf_kstate *);
305static void		 pf_state_key_detach(struct pf_kstate *, int);
306static int		 pf_state_key_ctor(void *, int, void *, int);
307static u_int32_t	 pf_tcp_iss(struct pf_pdesc *);
308static __inline void	 pf_dummynet_flag_remove(struct mbuf *m,
309			    struct pf_mtag *pf_mtag);
310static int		 pf_dummynet(struct pf_pdesc *, struct pf_kstate *,
311			    struct pf_krule *, struct mbuf **);
312static int		 pf_dummynet_route(struct pf_pdesc *,
313			    struct pf_kstate *, struct pf_krule *,
314			    struct ifnet *, struct sockaddr *, struct mbuf **);
315static int		 pf_test_eth_rule(int, struct pfi_kkif *,
316			    struct mbuf **);
317static int		 pf_test_rule(struct pf_krule **, struct pf_kstate **,
318			    struct pfi_kkif *, struct mbuf *, int,
319			    struct pf_pdesc *, struct pf_krule **,
320			    struct pf_kruleset **, struct inpcb *);
321static int		 pf_create_state(struct pf_krule *, struct pf_krule *,
322			    struct pf_krule *, struct pf_pdesc *,
323			    struct pf_ksrc_node *, struct pf_state_key *,
324			    struct pf_state_key *, struct mbuf *, int,
325			    u_int16_t, u_int16_t, int *, struct pfi_kkif *,
326			    struct pf_kstate **, int, u_int16_t, u_int16_t,
327			    int, struct pf_krule_slist *);
328static int		 pf_test_fragment(struct pf_krule **, struct pfi_kkif *,
329			    struct mbuf *, void *, struct pf_pdesc *,
330			    struct pf_krule **, struct pf_kruleset **);
331static int		 pf_tcp_track_full(struct pf_kstate **,
332			    struct pfi_kkif *, struct mbuf *, int,
333			    struct pf_pdesc *, u_short *, int *);
334static int		 pf_tcp_track_sloppy(struct pf_kstate **,
335			    struct pf_pdesc *, u_short *);
336static int		 pf_test_state_tcp(struct pf_kstate **,
337			    struct pfi_kkif *, struct mbuf *, int,
338			    void *, struct pf_pdesc *, u_short *);
339static int		 pf_test_state_udp(struct pf_kstate **,
340			    struct pfi_kkif *, struct mbuf *, int,
341			    void *, struct pf_pdesc *);
342static int		 pf_test_state_icmp(struct pf_kstate **,
343			    struct pfi_kkif *, struct mbuf *, int,
344			    void *, struct pf_pdesc *, u_short *);
345static void		 pf_sctp_multihome_detach_addr(const struct pf_kstate *);
346static void		 pf_sctp_multihome_delayed(struct pf_pdesc *, int,
347			    struct pfi_kkif *, struct pf_kstate *, int);
348static int		 pf_test_state_sctp(struct pf_kstate **,
349			    struct pfi_kkif *, struct mbuf *, int,
350			    void *, struct pf_pdesc *, u_short *);
351static int		 pf_test_state_other(struct pf_kstate **,
352			    struct pfi_kkif *, struct mbuf *, struct pf_pdesc *);
353static u_int16_t	 pf_calc_mss(struct pf_addr *, sa_family_t,
354				int, u_int16_t);
355static int		 pf_check_proto_cksum(struct mbuf *, int, int,
356			    u_int8_t, sa_family_t);
357static void		 pf_print_state_parts(struct pf_kstate *,
358			    struct pf_state_key *, struct pf_state_key *);
359static void		 pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t,
360			    bool, u_int8_t);
361static struct pf_kstate	*pf_find_state(struct pfi_kkif *,
362			    struct pf_state_key_cmp *, u_int);
363static int		 pf_src_connlimit(struct pf_kstate **);
364static void		 pf_overload_task(void *v, int pending);
365static u_short		 pf_insert_src_node(struct pf_ksrc_node **,
366			    struct pf_krule *, struct pf_addr *, sa_family_t);
367static u_int		 pf_purge_expired_states(u_int, int);
368static void		 pf_purge_unlinked_rules(void);
369static int		 pf_mtag_uminit(void *, int, int);
370static void		 pf_mtag_free(struct m_tag *);
371static void		 pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *,
372			    int, struct pf_state_key *);
373#ifdef INET
374static void		 pf_route(struct mbuf **, struct pf_krule *,
375			    struct ifnet *, struct pf_kstate *,
376			    struct pf_pdesc *, struct inpcb *);
377#endif /* INET */
378#ifdef INET6
379static void		 pf_change_a6(struct pf_addr *, u_int16_t *,
380			    struct pf_addr *, u_int8_t);
381static void		 pf_route6(struct mbuf **, struct pf_krule *,
382			    struct ifnet *, struct pf_kstate *,
383			    struct pf_pdesc *, struct inpcb *);
384#endif /* INET6 */
385static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t);
386
387int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
388
389extern int pf_end_threads;
390extern struct proc *pf_purge_proc;
391
392VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
393
394#define	PACKET_UNDO_NAT(_m, _pd, _off, _s)		\
395	do {								\
396		struct pf_state_key *nk;				\
397		if ((pd->dir) == PF_OUT)					\
398			nk = (_s)->key[PF_SK_STACK];			\
399		else							\
400			nk = (_s)->key[PF_SK_WIRE];			\
401		pf_packet_rework_nat(_m, _pd, _off, nk);		\
402	} while (0)
403
404#define	PACKET_LOOPED(pd)	((pd)->pf_mtag &&			\
405				 (pd)->pf_mtag->flags & PF_MTAG_FLAG_PACKET_LOOPED)
406
407#define	STATE_LOOKUP(i, k, s, pd)					\
408	do {								\
409		(s) = pf_find_state((i), (k), (pd->dir));			\
410		SDT_PROBE5(pf, ip, state, lookup, i, k, (pd->dir), pd, (s));	\
411		if ((s) == NULL)					\
412			return (PF_DROP);				\
413		if (PACKET_LOOPED(pd))					\
414			return (PF_PASS);				\
415	} while (0)
416
417static struct pfi_kkif *
418BOUND_IFACE(struct pf_kstate *st, struct pfi_kkif *k)
419{
420	SDT_PROBE2(pf, ip, , bound_iface, st, k);
421
422	/* Floating unless otherwise specified. */
423	if (! (st->rule.ptr->rule_flag & PFRULE_IFBOUND))
424		return (V_pfi_all);
425
426	/*
427	 * Initially set to all, because we don't know what interface we'll be
428	 * sending this out when we create the state.
429	 */
430	if (st->rule.ptr->rt == PF_REPLYTO)
431		return (V_pfi_all);
432
433	/* Don't overrule the interface for states created on incoming packets. */
434	if (st->direction == PF_IN)
435		return (k);
436
437	/* No route-to, so don't overrule. */
438	if (st->rt != PF_ROUTETO)
439		return (k);
440
441	/* Bind to the route-to interface. */
442	return (st->rt_kif);
443}
444
445#define	STATE_INC_COUNTERS(s)						\
446	do {								\
447		struct pf_krule_item *mrm;				\
448		counter_u64_add(s->rule.ptr->states_cur, 1);		\
449		counter_u64_add(s->rule.ptr->states_tot, 1);		\
450		if (s->anchor.ptr != NULL) {				\
451			counter_u64_add(s->anchor.ptr->states_cur, 1);	\
452			counter_u64_add(s->anchor.ptr->states_tot, 1);	\
453		}							\
454		if (s->nat_rule.ptr != NULL) {				\
455			counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
456			counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
457		}							\
458		SLIST_FOREACH(mrm, &s->match_rules, entry) {		\
459			counter_u64_add(mrm->r->states_cur, 1);		\
460			counter_u64_add(mrm->r->states_tot, 1);		\
461		}							\
462	} while (0)
463
464#define	STATE_DEC_COUNTERS(s)						\
465	do {								\
466		struct pf_krule_item *mrm;				\
467		if (s->nat_rule.ptr != NULL)				\
468			counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
469		if (s->anchor.ptr != NULL)				\
470			counter_u64_add(s->anchor.ptr->states_cur, -1);	\
471		counter_u64_add(s->rule.ptr->states_cur, -1);		\
472		SLIST_FOREACH(mrm, &s->match_rules, entry)		\
473			counter_u64_add(mrm->r->states_cur, -1);	\
474	} while (0)
475
476MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
477MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items");
478VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
479VNET_DEFINE(struct pf_idhash *, pf_idhash);
480VNET_DEFINE(struct pf_srchash *, pf_srchash);
481
482SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
483    "pf(4)");
484
485u_long	pf_hashmask;
486u_long	pf_srchashmask;
487static u_long	pf_hashsize;
488static u_long	pf_srchashsize;
489u_long	pf_ioctl_maxcount = 65535;
490
491SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
492    &pf_hashsize, 0, "Size of pf(4) states hashtable");
493SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
494    &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
495SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
496    &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
497
498VNET_DEFINE(void *, pf_swi_cookie);
499VNET_DEFINE(struct intr_event *, pf_swi_ie);
500
501VNET_DEFINE(uint32_t, pf_hashseed);
502#define	V_pf_hashseed	VNET(pf_hashseed)
503
504static void
505pf_sctp_checksum(struct mbuf *m, int off)
506{
507	uint32_t sum = 0;
508
509	/* Zero out the checksum, to enable recalculation. */
510	m_copyback(m, off + offsetof(struct sctphdr, checksum),
511	    sizeof(sum), (caddr_t)&sum);
512
513	sum = sctp_calculate_cksum(m, off);
514
515	m_copyback(m, off + offsetof(struct sctphdr, checksum),
516	    sizeof(sum), (caddr_t)&sum);
517}
518
519int
520pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
521{
522
523	switch (af) {
524#ifdef INET
525	case AF_INET:
526		if (a->addr32[0] > b->addr32[0])
527			return (1);
528		if (a->addr32[0] < b->addr32[0])
529			return (-1);
530		break;
531#endif /* INET */
532#ifdef INET6
533	case AF_INET6:
534		if (a->addr32[3] > b->addr32[3])
535			return (1);
536		if (a->addr32[3] < b->addr32[3])
537			return (-1);
538		if (a->addr32[2] > b->addr32[2])
539			return (1);
540		if (a->addr32[2] < b->addr32[2])
541			return (-1);
542		if (a->addr32[1] > b->addr32[1])
543			return (1);
544		if (a->addr32[1] < b->addr32[1])
545			return (-1);
546		if (a->addr32[0] > b->addr32[0])
547			return (1);
548		if (a->addr32[0] < b->addr32[0])
549			return (-1);
550		break;
551#endif /* INET6 */
552	default:
553		panic("%s: unknown address family %u", __func__, af);
554	}
555	return (0);
556}
557
558static void
559pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off,
560	struct pf_state_key *nk)
561{
562
563	switch (pd->proto) {
564	case IPPROTO_TCP: {
565		struct tcphdr *th = &pd->hdr.tcp;
566
567		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
568			pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum,
569			    &th->th_sum, &nk->addr[pd->sidx],
570			    nk->port[pd->sidx], 0, pd->af);
571		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
572			pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum,
573			    &th->th_sum, &nk->addr[pd->didx],
574			    nk->port[pd->didx], 0, pd->af);
575		m_copyback(m, off, sizeof(*th), (caddr_t)th);
576		break;
577	}
578	case IPPROTO_UDP: {
579		struct udphdr *uh = &pd->hdr.udp;
580
581		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
582			pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
583			    &uh->uh_sum, &nk->addr[pd->sidx],
584			    nk->port[pd->sidx], 1, pd->af);
585		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
586			pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
587			    &uh->uh_sum, &nk->addr[pd->didx],
588			    nk->port[pd->didx], 1, pd->af);
589		m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
590		break;
591	}
592	case IPPROTO_SCTP: {
593		struct sctphdr *sh = &pd->hdr.sctp;
594		uint16_t checksum = 0;
595
596		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
597			pf_change_ap(m, pd->src, &sh->src_port, pd->ip_sum,
598			    &checksum, &nk->addr[pd->sidx],
599			    nk->port[pd->sidx], 1, pd->af);
600		}
601		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
602			pf_change_ap(m, pd->dst, &sh->dest_port, pd->ip_sum,
603			    &checksum, &nk->addr[pd->didx],
604			    nk->port[pd->didx], 1, pd->af);
605		}
606
607		break;
608	}
609	case IPPROTO_ICMP: {
610		struct icmp *ih = &pd->hdr.icmp;
611
612		if (nk->port[pd->sidx] != ih->icmp_id) {
613			pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
614			    ih->icmp_cksum, ih->icmp_id,
615			    nk->port[pd->sidx], 0);
616			ih->icmp_id = nk->port[pd->sidx];
617			pd->sport = &ih->icmp_id;
618
619			m_copyback(m, off, ICMP_MINLEN, (caddr_t)ih);
620		}
621		/* FALLTHROUGH */
622	}
623	default:
624		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
625			switch (pd->af) {
626			case AF_INET:
627				pf_change_a(&pd->src->v4.s_addr,
628				    pd->ip_sum, nk->addr[pd->sidx].v4.s_addr,
629				    0);
630				break;
631			case AF_INET6:
632				PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
633				break;
634			}
635		}
636		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
637			switch (pd->af) {
638			case AF_INET:
639				pf_change_a(&pd->dst->v4.s_addr,
640				    pd->ip_sum, nk->addr[pd->didx].v4.s_addr,
641				    0);
642				break;
643			case AF_INET6:
644				PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
645				break;
646			}
647		}
648		break;
649	}
650}
651
652static __inline uint32_t
653pf_hashkey(struct pf_state_key *sk)
654{
655	uint32_t h;
656
657	h = murmur3_32_hash32((uint32_t *)sk,
658	    sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
659	    V_pf_hashseed);
660
661	return (h & pf_hashmask);
662}
663
664static __inline uint32_t
665pf_hashsrc(struct pf_addr *addr, sa_family_t af)
666{
667	uint32_t h;
668
669	switch (af) {
670	case AF_INET:
671		h = murmur3_32_hash32((uint32_t *)&addr->v4,
672		    sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
673		break;
674	case AF_INET6:
675		h = murmur3_32_hash32((uint32_t *)&addr->v6,
676		    sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
677		break;
678	default:
679		panic("%s: unknown address family %u", __func__, af);
680	}
681
682	return (h & pf_srchashmask);
683}
684
685#ifdef ALTQ
686static int
687pf_state_hash(struct pf_kstate *s)
688{
689	u_int32_t hv = (intptr_t)s / sizeof(*s);
690
691	hv ^= crc32(&s->src, sizeof(s->src));
692	hv ^= crc32(&s->dst, sizeof(s->dst));
693	if (hv == 0)
694		hv = 1;
695	return (hv);
696}
697#endif
698
699static __inline void
700pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate)
701{
702	if (which == PF_PEER_DST || which == PF_PEER_BOTH)
703		s->dst.state = newstate;
704	if (which == PF_PEER_DST)
705		return;
706	if (s->src.state == newstate)
707		return;
708	if (s->creatorid == V_pf_status.hostid &&
709	    s->key[PF_SK_STACK] != NULL &&
710	    s->key[PF_SK_STACK]->proto == IPPROTO_TCP &&
711	    !(TCPS_HAVEESTABLISHED(s->src.state) ||
712	    s->src.state == TCPS_CLOSED) &&
713	    (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED))
714		atomic_add_32(&V_pf_status.states_halfopen, -1);
715
716	s->src.state = newstate;
717}
718
719#ifdef INET6
720void
721pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
722{
723	switch (af) {
724#ifdef INET
725	case AF_INET:
726		dst->addr32[0] = src->addr32[0];
727		break;
728#endif /* INET */
729	case AF_INET6:
730		dst->addr32[0] = src->addr32[0];
731		dst->addr32[1] = src->addr32[1];
732		dst->addr32[2] = src->addr32[2];
733		dst->addr32[3] = src->addr32[3];
734		break;
735	}
736}
737#endif /* INET6 */
738
739static void
740pf_init_threshold(struct pf_threshold *threshold,
741    u_int32_t limit, u_int32_t seconds)
742{
743	threshold->limit = limit * PF_THRESHOLD_MULT;
744	threshold->seconds = seconds;
745	threshold->count = 0;
746	threshold->last = time_uptime;
747}
748
749static void
750pf_add_threshold(struct pf_threshold *threshold)
751{
752	u_int32_t t = time_uptime, diff = t - threshold->last;
753
754	if (diff >= threshold->seconds)
755		threshold->count = 0;
756	else
757		threshold->count -= threshold->count * diff /
758		    threshold->seconds;
759	threshold->count += PF_THRESHOLD_MULT;
760	threshold->last = t;
761}
762
763static int
764pf_check_threshold(struct pf_threshold *threshold)
765{
766	return (threshold->count > threshold->limit);
767}
768
769static int
770pf_src_connlimit(struct pf_kstate **state)
771{
772	struct pf_overload_entry *pfoe;
773	int bad = 0;
774
775	PF_STATE_LOCK_ASSERT(*state);
776	/*
777	 * XXXKS: The src node is accessed unlocked!
778	 * PF_SRC_NODE_LOCK_ASSERT((*state)->src_node);
779	 */
780
781	(*state)->src_node->conn++;
782	(*state)->src.tcp_est = 1;
783	pf_add_threshold(&(*state)->src_node->conn_rate);
784
785	if ((*state)->rule.ptr->max_src_conn &&
786	    (*state)->rule.ptr->max_src_conn <
787	    (*state)->src_node->conn) {
788		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
789		bad++;
790	}
791
792	if ((*state)->rule.ptr->max_src_conn_rate.limit &&
793	    pf_check_threshold(&(*state)->src_node->conn_rate)) {
794		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
795		bad++;
796	}
797
798	if (!bad)
799		return (0);
800
801	/* Kill this state. */
802	(*state)->timeout = PFTM_PURGE;
803	pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
804
805	if ((*state)->rule.ptr->overload_tbl == NULL)
806		return (1);
807
808	/* Schedule overloading and flushing task. */
809	pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
810	if (pfoe == NULL)
811		return (1);	/* too bad :( */
812
813	bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
814	pfoe->af = (*state)->key[PF_SK_WIRE]->af;
815	pfoe->rule = (*state)->rule.ptr;
816	pfoe->dir = (*state)->direction;
817	PF_OVERLOADQ_LOCK();
818	SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
819	PF_OVERLOADQ_UNLOCK();
820	taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
821
822	return (1);
823}
824
825static void
826pf_overload_task(void *v, int pending)
827{
828	struct pf_overload_head queue;
829	struct pfr_addr p;
830	struct pf_overload_entry *pfoe, *pfoe1;
831	uint32_t killed = 0;
832
833	CURVNET_SET((struct vnet *)v);
834
835	PF_OVERLOADQ_LOCK();
836	queue = V_pf_overloadqueue;
837	SLIST_INIT(&V_pf_overloadqueue);
838	PF_OVERLOADQ_UNLOCK();
839
840	bzero(&p, sizeof(p));
841	SLIST_FOREACH(pfoe, &queue, next) {
842		counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
843		if (V_pf_status.debug >= PF_DEBUG_MISC) {
844			printf("%s: blocking address ", __func__);
845			pf_print_host(&pfoe->addr, 0, pfoe->af);
846			printf("\n");
847		}
848
849		p.pfra_af = pfoe->af;
850		switch (pfoe->af) {
851#ifdef INET
852		case AF_INET:
853			p.pfra_net = 32;
854			p.pfra_ip4addr = pfoe->addr.v4;
855			break;
856#endif
857#ifdef INET6
858		case AF_INET6:
859			p.pfra_net = 128;
860			p.pfra_ip6addr = pfoe->addr.v6;
861			break;
862#endif
863		}
864
865		PF_RULES_WLOCK();
866		pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
867		PF_RULES_WUNLOCK();
868	}
869
870	/*
871	 * Remove those entries, that don't need flushing.
872	 */
873	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
874		if (pfoe->rule->flush == 0) {
875			SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
876			free(pfoe, M_PFTEMP);
877		} else
878			counter_u64_add(
879			    V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
880
881	/* If nothing to flush, return. */
882	if (SLIST_EMPTY(&queue)) {
883		CURVNET_RESTORE();
884		return;
885	}
886
887	for (int i = 0; i <= pf_hashmask; i++) {
888		struct pf_idhash *ih = &V_pf_idhash[i];
889		struct pf_state_key *sk;
890		struct pf_kstate *s;
891
892		PF_HASHROW_LOCK(ih);
893		LIST_FOREACH(s, &ih->states, entry) {
894		    sk = s->key[PF_SK_WIRE];
895		    SLIST_FOREACH(pfoe, &queue, next)
896			if (sk->af == pfoe->af &&
897			    ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
898			    pfoe->rule == s->rule.ptr) &&
899			    ((pfoe->dir == PF_OUT &&
900			    PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
901			    (pfoe->dir == PF_IN &&
902			    PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
903				s->timeout = PFTM_PURGE;
904				pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
905				killed++;
906			}
907		}
908		PF_HASHROW_UNLOCK(ih);
909	}
910	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
911		free(pfoe, M_PFTEMP);
912	if (V_pf_status.debug >= PF_DEBUG_MISC)
913		printf("%s: %u states killed", __func__, killed);
914
915	CURVNET_RESTORE();
916}
917
918/*
919 * Can return locked on failure, so that we can consistently
920 * allocate and insert a new one.
921 */
922struct pf_ksrc_node *
923pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af,
924	struct pf_srchash **sh, bool returnlocked)
925{
926	struct pf_ksrc_node *n;
927
928	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
929
930	*sh = &V_pf_srchash[pf_hashsrc(src, af)];
931	PF_HASHROW_LOCK(*sh);
932	LIST_FOREACH(n, &(*sh)->nodes, entry)
933		if (n->rule.ptr == rule && n->af == af &&
934		    ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
935		    (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
936			break;
937
938	if (n != NULL) {
939		n->states++;
940		PF_HASHROW_UNLOCK(*sh);
941	} else if (returnlocked == false)
942		PF_HASHROW_UNLOCK(*sh);
943
944	return (n);
945}
946
947static void
948pf_free_src_node(struct pf_ksrc_node *sn)
949{
950
951	for (int i = 0; i < 2; i++) {
952		counter_u64_free(sn->bytes[i]);
953		counter_u64_free(sn->packets[i]);
954	}
955	uma_zfree(V_pf_sources_z, sn);
956}
957
958static u_short
959pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_krule *rule,
960    struct pf_addr *src, sa_family_t af)
961{
962	u_short			 reason = 0;
963	struct pf_srchash	*sh = NULL;
964
965	KASSERT((rule->rule_flag & PFRULE_SRCTRACK ||
966	    rule->rpool.opts & PF_POOL_STICKYADDR),
967	    ("%s for non-tracking rule %p", __func__, rule));
968
969	if (*sn == NULL)
970		*sn = pf_find_src_node(src, rule, af, &sh, true);
971
972	if (*sn == NULL) {
973		PF_HASHROW_ASSERT(sh);
974
975		if (rule->max_src_nodes &&
976		    counter_u64_fetch(rule->src_nodes) >= rule->max_src_nodes) {
977			counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES], 1);
978			PF_HASHROW_UNLOCK(sh);
979			reason = PFRES_SRCLIMIT;
980			goto done;
981		}
982
983		(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
984		if ((*sn) == NULL) {
985			PF_HASHROW_UNLOCK(sh);
986			reason = PFRES_MEMORY;
987			goto done;
988		}
989
990		for (int i = 0; i < 2; i++) {
991			(*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
992			(*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
993
994			if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
995				pf_free_src_node(*sn);
996				PF_HASHROW_UNLOCK(sh);
997				reason = PFRES_MEMORY;
998				goto done;
999			}
1000		}
1001
1002		pf_init_threshold(&(*sn)->conn_rate,
1003		    rule->max_src_conn_rate.limit,
1004		    rule->max_src_conn_rate.seconds);
1005
1006		MPASS((*sn)->lock == NULL);
1007		(*sn)->lock = &sh->lock;
1008
1009		(*sn)->af = af;
1010		(*sn)->rule.ptr = rule;
1011		PF_ACPY(&(*sn)->addr, src, af);
1012		LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
1013		(*sn)->creation = time_uptime;
1014		(*sn)->ruletype = rule->action;
1015		(*sn)->states = 1;
1016		if ((*sn)->rule.ptr != NULL)
1017			counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
1018		PF_HASHROW_UNLOCK(sh);
1019		counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
1020	} else {
1021		if (rule->max_src_states &&
1022		    (*sn)->states >= rule->max_src_states) {
1023			counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
1024			    1);
1025			reason = PFRES_SRCLIMIT;
1026			goto done;
1027		}
1028	}
1029done:
1030	return (reason);
1031}
1032
1033void
1034pf_unlink_src_node(struct pf_ksrc_node *src)
1035{
1036	PF_SRC_NODE_LOCK_ASSERT(src);
1037
1038	LIST_REMOVE(src, entry);
1039	if (src->rule.ptr)
1040		counter_u64_add(src->rule.ptr->src_nodes, -1);
1041}
1042
1043u_int
1044pf_free_src_nodes(struct pf_ksrc_node_list *head)
1045{
1046	struct pf_ksrc_node *sn, *tmp;
1047	u_int count = 0;
1048
1049	LIST_FOREACH_SAFE(sn, head, entry, tmp) {
1050		pf_free_src_node(sn);
1051		count++;
1052	}
1053
1054	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
1055
1056	return (count);
1057}
1058
1059void
1060pf_mtag_initialize(void)
1061{
1062
1063	pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
1064	    sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
1065	    UMA_ALIGN_PTR, 0);
1066}
1067
1068/* Per-vnet data storage structures initialization. */
1069void
1070pf_initialize(void)
1071{
1072	struct pf_keyhash	*kh;
1073	struct pf_idhash	*ih;
1074	struct pf_srchash	*sh;
1075	u_int i;
1076
1077	if (pf_hashsize == 0 || !powerof2(pf_hashsize))
1078		pf_hashsize = PF_HASHSIZ;
1079	if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
1080		pf_srchashsize = PF_SRCHASHSIZ;
1081
1082	V_pf_hashseed = arc4random();
1083
1084	/* States and state keys storage. */
1085	V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate),
1086	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1087	V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
1088	uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
1089	uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
1090
1091	V_pf_state_key_z = uma_zcreate("pf state keys",
1092	    sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
1093	    UMA_ALIGN_PTR, 0);
1094
1095	V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
1096	    M_PFHASH, M_NOWAIT | M_ZERO);
1097	V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
1098	    M_PFHASH, M_NOWAIT | M_ZERO);
1099	if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
1100		printf("pf: Unable to allocate memory for "
1101		    "state_hashsize %lu.\n", pf_hashsize);
1102
1103		free(V_pf_keyhash, M_PFHASH);
1104		free(V_pf_idhash, M_PFHASH);
1105
1106		pf_hashsize = PF_HASHSIZ;
1107		V_pf_keyhash = mallocarray(pf_hashsize,
1108		    sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
1109		V_pf_idhash = mallocarray(pf_hashsize,
1110		    sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
1111	}
1112
1113	pf_hashmask = pf_hashsize - 1;
1114	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
1115	    i++, kh++, ih++) {
1116		mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
1117		mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
1118	}
1119
1120	/* Source nodes. */
1121	V_pf_sources_z = uma_zcreate("pf source nodes",
1122	    sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1123	    0);
1124	V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
1125	uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
1126	uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
1127
1128	V_pf_srchash = mallocarray(pf_srchashsize,
1129	    sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
1130	if (V_pf_srchash == NULL) {
1131		printf("pf: Unable to allocate memory for "
1132		    "source_hashsize %lu.\n", pf_srchashsize);
1133
1134		pf_srchashsize = PF_SRCHASHSIZ;
1135		V_pf_srchash = mallocarray(pf_srchashsize,
1136		    sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
1137	}
1138
1139	pf_srchashmask = pf_srchashsize - 1;
1140	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
1141		mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
1142
1143	/* ALTQ */
1144	TAILQ_INIT(&V_pf_altqs[0]);
1145	TAILQ_INIT(&V_pf_altqs[1]);
1146	TAILQ_INIT(&V_pf_altqs[2]);
1147	TAILQ_INIT(&V_pf_altqs[3]);
1148	TAILQ_INIT(&V_pf_pabuf);
1149	V_pf_altqs_active = &V_pf_altqs[0];
1150	V_pf_altq_ifs_active = &V_pf_altqs[1];
1151	V_pf_altqs_inactive = &V_pf_altqs[2];
1152	V_pf_altq_ifs_inactive = &V_pf_altqs[3];
1153
1154	/* Send & overload+flush queues. */
1155	STAILQ_INIT(&V_pf_sendqueue);
1156	SLIST_INIT(&V_pf_overloadqueue);
1157	TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
1158
1159	/* Unlinked, but may be referenced rules. */
1160	TAILQ_INIT(&V_pf_unlinked_rules);
1161}
1162
1163void
1164pf_mtag_cleanup(void)
1165{
1166
1167	uma_zdestroy(pf_mtag_z);
1168}
1169
1170void
1171pf_cleanup(void)
1172{
1173	struct pf_keyhash	*kh;
1174	struct pf_idhash	*ih;
1175	struct pf_srchash	*sh;
1176	struct pf_send_entry	*pfse, *next;
1177	u_int i;
1178
1179	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
1180	    i++, kh++, ih++) {
1181		KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
1182		    __func__));
1183		KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
1184		    __func__));
1185		mtx_destroy(&kh->lock);
1186		mtx_destroy(&ih->lock);
1187	}
1188	free(V_pf_keyhash, M_PFHASH);
1189	free(V_pf_idhash, M_PFHASH);
1190
1191	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1192		KASSERT(LIST_EMPTY(&sh->nodes),
1193		    ("%s: source node hash not empty", __func__));
1194		mtx_destroy(&sh->lock);
1195	}
1196	free(V_pf_srchash, M_PFHASH);
1197
1198	STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
1199		m_freem(pfse->pfse_m);
1200		free(pfse, M_PFTEMP);
1201	}
1202	MPASS(RB_EMPTY(&V_pf_sctp_endpoints));
1203
1204	uma_zdestroy(V_pf_sources_z);
1205	uma_zdestroy(V_pf_state_z);
1206	uma_zdestroy(V_pf_state_key_z);
1207}
1208
1209static int
1210pf_mtag_uminit(void *mem, int size, int how)
1211{
1212	struct m_tag *t;
1213
1214	t = (struct m_tag *)mem;
1215	t->m_tag_cookie = MTAG_ABI_COMPAT;
1216	t->m_tag_id = PACKET_TAG_PF;
1217	t->m_tag_len = sizeof(struct pf_mtag);
1218	t->m_tag_free = pf_mtag_free;
1219
1220	return (0);
1221}
1222
1223static void
1224pf_mtag_free(struct m_tag *t)
1225{
1226
1227	uma_zfree(pf_mtag_z, t);
1228}
1229
1230struct pf_mtag *
1231pf_get_mtag(struct mbuf *m)
1232{
1233	struct m_tag *mtag;
1234
1235	if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
1236		return ((struct pf_mtag *)(mtag + 1));
1237
1238	mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
1239	if (mtag == NULL)
1240		return (NULL);
1241	bzero(mtag + 1, sizeof(struct pf_mtag));
1242	m_tag_prepend(m, mtag);
1243
1244	return ((struct pf_mtag *)(mtag + 1));
1245}
1246
1247static int
1248pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1249    struct pf_kstate *s)
1250{
1251	struct pf_keyhash	*khs, *khw, *kh;
1252	struct pf_state_key	*sk, *cur;
1253	struct pf_kstate	*si, *olds = NULL;
1254	int idx;
1255
1256	NET_EPOCH_ASSERT();
1257	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1258	KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1259	KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1260
1261	/*
1262	 * We need to lock hash slots of both keys. To avoid deadlock
1263	 * we always lock the slot with lower address first. Unlock order
1264	 * isn't important.
1265	 *
1266	 * We also need to lock ID hash slot before dropping key
1267	 * locks. On success we return with ID hash slot locked.
1268	 */
1269
1270	if (skw == sks) {
1271		khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1272		PF_HASHROW_LOCK(khs);
1273	} else {
1274		khs = &V_pf_keyhash[pf_hashkey(sks)];
1275		khw = &V_pf_keyhash[pf_hashkey(skw)];
1276		if (khs == khw) {
1277			PF_HASHROW_LOCK(khs);
1278		} else if (khs < khw) {
1279			PF_HASHROW_LOCK(khs);
1280			PF_HASHROW_LOCK(khw);
1281		} else {
1282			PF_HASHROW_LOCK(khw);
1283			PF_HASHROW_LOCK(khs);
1284		}
1285	}
1286
1287#define	KEYS_UNLOCK()	do {			\
1288	if (khs != khw) {			\
1289		PF_HASHROW_UNLOCK(khs);		\
1290		PF_HASHROW_UNLOCK(khw);		\
1291	} else					\
1292		PF_HASHROW_UNLOCK(khs);		\
1293} while (0)
1294
1295	/*
1296	 * First run: start with wire key.
1297	 */
1298	sk = skw;
1299	kh = khw;
1300	idx = PF_SK_WIRE;
1301
1302	MPASS(s->lock == NULL);
1303	s->lock = &V_pf_idhash[PF_IDHASH(s)].lock;
1304
1305keyattach:
1306	LIST_FOREACH(cur, &kh->keys, entry)
1307		if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1308			break;
1309
1310	if (cur != NULL) {
1311		/* Key exists. Check for same kif, if none, add to key. */
1312		TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1313			struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1314
1315			PF_HASHROW_LOCK(ih);
1316			if (si->kif == s->kif &&
1317			    si->direction == s->direction) {
1318				if (sk->proto == IPPROTO_TCP &&
1319				    si->src.state >= TCPS_FIN_WAIT_2 &&
1320				    si->dst.state >= TCPS_FIN_WAIT_2) {
1321					/*
1322					 * New state matches an old >FIN_WAIT_2
1323					 * state. We can't drop key hash locks,
1324					 * thus we can't unlink it properly.
1325					 *
1326					 * As a workaround we drop it into
1327					 * TCPS_CLOSED state, schedule purge
1328					 * ASAP and push it into the very end
1329					 * of the slot TAILQ, so that it won't
1330					 * conflict with our new state.
1331					 */
1332					pf_set_protostate(si, PF_PEER_BOTH,
1333					    TCPS_CLOSED);
1334					si->timeout = PFTM_PURGE;
1335					olds = si;
1336				} else {
1337					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1338						printf("pf: %s key attach "
1339						    "failed on %s: ",
1340						    (idx == PF_SK_WIRE) ?
1341						    "wire" : "stack",
1342						    s->kif->pfik_name);
1343						pf_print_state_parts(s,
1344						    (idx == PF_SK_WIRE) ?
1345						    sk : NULL,
1346						    (idx == PF_SK_STACK) ?
1347						    sk : NULL);
1348						printf(", existing: ");
1349						pf_print_state_parts(si,
1350						    (idx == PF_SK_WIRE) ?
1351						    sk : NULL,
1352						    (idx == PF_SK_STACK) ?
1353						    sk : NULL);
1354						printf("\n");
1355					}
1356					s->timeout = PFTM_UNLINKED;
1357					PF_HASHROW_UNLOCK(ih);
1358					KEYS_UNLOCK();
1359					uma_zfree(V_pf_state_key_z, sk);
1360					if (idx == PF_SK_STACK)
1361						pf_detach_state(s);
1362					return (EEXIST); /* collision! */
1363				}
1364			}
1365			PF_HASHROW_UNLOCK(ih);
1366		}
1367		uma_zfree(V_pf_state_key_z, sk);
1368		s->key[idx] = cur;
1369	} else {
1370		LIST_INSERT_HEAD(&kh->keys, sk, entry);
1371		s->key[idx] = sk;
1372	}
1373
1374stateattach:
1375	/* List is sorted, if-bound states before floating. */
1376	if (s->kif == V_pfi_all)
1377		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1378	else
1379		TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1380
1381	if (olds) {
1382		TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1383		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1384		    key_list[idx]);
1385		olds = NULL;
1386	}
1387
1388	/*
1389	 * Attach done. See how should we (or should not?)
1390	 * attach a second key.
1391	 */
1392	if (sks == skw) {
1393		s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1394		idx = PF_SK_STACK;
1395		sks = NULL;
1396		goto stateattach;
1397	} else if (sks != NULL) {
1398		/*
1399		 * Continue attaching with stack key.
1400		 */
1401		sk = sks;
1402		kh = khs;
1403		idx = PF_SK_STACK;
1404		sks = NULL;
1405		goto keyattach;
1406	}
1407
1408	PF_STATE_LOCK(s);
1409	KEYS_UNLOCK();
1410
1411	KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1412	    ("%s failure", __func__));
1413
1414	return (0);
1415#undef	KEYS_UNLOCK
1416}
1417
1418static void
1419pf_detach_state(struct pf_kstate *s)
1420{
1421	struct pf_state_key *sks = s->key[PF_SK_STACK];
1422	struct pf_keyhash *kh;
1423
1424	NET_EPOCH_ASSERT();
1425	MPASS(s->timeout >= PFTM_MAX);
1426
1427	pf_sctp_multihome_detach_addr(s);
1428
1429	if ((s->state_flags & PFSTATE_PFLOW) && V_pflow_export_state_ptr)
1430		V_pflow_export_state_ptr(s);
1431
1432	if (sks != NULL) {
1433		kh = &V_pf_keyhash[pf_hashkey(sks)];
1434		PF_HASHROW_LOCK(kh);
1435		if (s->key[PF_SK_STACK] != NULL)
1436			pf_state_key_detach(s, PF_SK_STACK);
1437		/*
1438		 * If both point to same key, then we are done.
1439		 */
1440		if (sks == s->key[PF_SK_WIRE]) {
1441			pf_state_key_detach(s, PF_SK_WIRE);
1442			PF_HASHROW_UNLOCK(kh);
1443			return;
1444		}
1445		PF_HASHROW_UNLOCK(kh);
1446	}
1447
1448	if (s->key[PF_SK_WIRE] != NULL) {
1449		kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1450		PF_HASHROW_LOCK(kh);
1451		if (s->key[PF_SK_WIRE] != NULL)
1452			pf_state_key_detach(s, PF_SK_WIRE);
1453		PF_HASHROW_UNLOCK(kh);
1454	}
1455}
1456
1457static void
1458pf_state_key_detach(struct pf_kstate *s, int idx)
1459{
1460	struct pf_state_key *sk = s->key[idx];
1461#ifdef INVARIANTS
1462	struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1463
1464	PF_HASHROW_ASSERT(kh);
1465#endif
1466	TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1467	s->key[idx] = NULL;
1468
1469	if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1470		LIST_REMOVE(sk, entry);
1471		uma_zfree(V_pf_state_key_z, sk);
1472	}
1473}
1474
1475static int
1476pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1477{
1478	struct pf_state_key *sk = mem;
1479
1480	bzero(sk, sizeof(struct pf_state_key_cmp));
1481	TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1482	TAILQ_INIT(&sk->states[PF_SK_STACK]);
1483
1484	return (0);
1485}
1486
1487struct pf_state_key *
1488pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1489	struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1490{
1491	struct pf_state_key *sk;
1492
1493	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1494	if (sk == NULL)
1495		return (NULL);
1496
1497	PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1498	PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1499	sk->port[pd->sidx] = sport;
1500	sk->port[pd->didx] = dport;
1501	sk->proto = pd->proto;
1502	sk->af = pd->af;
1503
1504	return (sk);
1505}
1506
1507struct pf_state_key *
1508pf_state_key_clone(struct pf_state_key *orig)
1509{
1510	struct pf_state_key *sk;
1511
1512	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1513	if (sk == NULL)
1514		return (NULL);
1515
1516	bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1517
1518	return (sk);
1519}
1520
1521int
1522pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif,
1523    struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s)
1524{
1525	struct pf_idhash *ih;
1526	struct pf_kstate *cur;
1527	int error;
1528
1529	NET_EPOCH_ASSERT();
1530
1531	KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1532	    ("%s: sks not pristine", __func__));
1533	KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1534	    ("%s: skw not pristine", __func__));
1535	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1536
1537	s->kif = kif;
1538	s->orig_kif = orig_kif;
1539
1540	if (s->id == 0 && s->creatorid == 0) {
1541		s->id = alloc_unr64(&V_pf_stateid);
1542		s->id = htobe64(s->id);
1543		s->creatorid = V_pf_status.hostid;
1544	}
1545
1546	/* Returns with ID locked on success. */
1547	if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1548		return (error);
1549
1550	ih = &V_pf_idhash[PF_IDHASH(s)];
1551	PF_HASHROW_ASSERT(ih);
1552	LIST_FOREACH(cur, &ih->states, entry)
1553		if (cur->id == s->id && cur->creatorid == s->creatorid)
1554			break;
1555
1556	if (cur != NULL) {
1557		s->timeout = PFTM_UNLINKED;
1558		PF_HASHROW_UNLOCK(ih);
1559		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1560			printf("pf: state ID collision: "
1561			    "id: %016llx creatorid: %08x\n",
1562			    (unsigned long long)be64toh(s->id),
1563			    ntohl(s->creatorid));
1564		}
1565		pf_detach_state(s);
1566		return (EEXIST);
1567	}
1568	LIST_INSERT_HEAD(&ih->states, s, entry);
1569	/* One for keys, one for ID hash. */
1570	refcount_init(&s->refs, 2);
1571
1572	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1573	if (V_pfsync_insert_state_ptr != NULL)
1574		V_pfsync_insert_state_ptr(s);
1575
1576	/* Returns locked. */
1577	return (0);
1578}
1579
1580/*
1581 * Find state by ID: returns with locked row on success.
1582 */
1583struct pf_kstate *
1584pf_find_state_byid(uint64_t id, uint32_t creatorid)
1585{
1586	struct pf_idhash *ih;
1587	struct pf_kstate *s;
1588
1589	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1590
1591	ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1592
1593	PF_HASHROW_LOCK(ih);
1594	LIST_FOREACH(s, &ih->states, entry)
1595		if (s->id == id && s->creatorid == creatorid)
1596			break;
1597
1598	if (s == NULL)
1599		PF_HASHROW_UNLOCK(ih);
1600
1601	return (s);
1602}
1603
1604/*
1605 * Find state by key.
1606 * Returns with ID hash slot locked on success.
1607 */
1608static struct pf_kstate *
1609pf_find_state(struct pfi_kkif *kif, struct pf_state_key_cmp *key, u_int dir)
1610{
1611	struct pf_keyhash	*kh;
1612	struct pf_state_key	*sk;
1613	struct pf_kstate	*s;
1614	int idx;
1615
1616	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1617
1618	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1619
1620	PF_HASHROW_LOCK(kh);
1621	LIST_FOREACH(sk, &kh->keys, entry)
1622		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1623			break;
1624	if (sk == NULL) {
1625		PF_HASHROW_UNLOCK(kh);
1626		return (NULL);
1627	}
1628
1629	idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1630
1631	/* List is sorted, if-bound states before floating ones. */
1632	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1633		if (s->kif == V_pfi_all || s->kif == kif || s->orig_kif == kif) {
1634			PF_STATE_LOCK(s);
1635			PF_HASHROW_UNLOCK(kh);
1636			if (__predict_false(s->timeout >= PFTM_MAX)) {
1637				/*
1638				 * State is either being processed by
1639				 * pf_unlink_state() in an other thread, or
1640				 * is scheduled for immediate expiry.
1641				 */
1642				PF_STATE_UNLOCK(s);
1643				return (NULL);
1644			}
1645			return (s);
1646		}
1647	PF_HASHROW_UNLOCK(kh);
1648
1649	return (NULL);
1650}
1651
1652/*
1653 * Returns with ID hash slot locked on success.
1654 */
1655struct pf_kstate *
1656pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1657{
1658	struct pf_keyhash	*kh;
1659	struct pf_state_key	*sk;
1660	struct pf_kstate	*s, *ret = NULL;
1661	int			 idx, inout = 0;
1662
1663	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1664
1665	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1666
1667	PF_HASHROW_LOCK(kh);
1668	LIST_FOREACH(sk, &kh->keys, entry)
1669		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1670			break;
1671	if (sk == NULL) {
1672		PF_HASHROW_UNLOCK(kh);
1673		return (NULL);
1674	}
1675	switch (dir) {
1676	case PF_IN:
1677		idx = PF_SK_WIRE;
1678		break;
1679	case PF_OUT:
1680		idx = PF_SK_STACK;
1681		break;
1682	case PF_INOUT:
1683		idx = PF_SK_WIRE;
1684		inout = 1;
1685		break;
1686	default:
1687		panic("%s: dir %u", __func__, dir);
1688	}
1689second_run:
1690	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1691		if (more == NULL) {
1692			PF_STATE_LOCK(s);
1693			PF_HASHROW_UNLOCK(kh);
1694			return (s);
1695		}
1696
1697		if (ret)
1698			(*more)++;
1699		else {
1700			ret = s;
1701			PF_STATE_LOCK(s);
1702		}
1703	}
1704	if (inout == 1) {
1705		inout = 0;
1706		idx = PF_SK_STACK;
1707		goto second_run;
1708	}
1709	PF_HASHROW_UNLOCK(kh);
1710
1711	return (ret);
1712}
1713
1714/*
1715 * FIXME
1716 * This routine is inefficient -- locks the state only to unlock immediately on
1717 * return.
1718 * It is racy -- after the state is unlocked nothing stops other threads from
1719 * removing it.
1720 */
1721bool
1722pf_find_state_all_exists(struct pf_state_key_cmp *key, u_int dir)
1723{
1724	struct pf_kstate *s;
1725
1726	s = pf_find_state_all(key, dir, NULL);
1727	if (s != NULL) {
1728		PF_STATE_UNLOCK(s);
1729		return (true);
1730	}
1731	return (false);
1732}
1733
1734/* END state table stuff */
1735
1736static void
1737pf_send(struct pf_send_entry *pfse)
1738{
1739
1740	PF_SENDQ_LOCK();
1741	STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1742	PF_SENDQ_UNLOCK();
1743	swi_sched(V_pf_swi_cookie, 0);
1744}
1745
1746static bool
1747pf_isforlocal(struct mbuf *m, int af)
1748{
1749	switch (af) {
1750#ifdef INET
1751	case AF_INET: {
1752		struct ip *ip = mtod(m, struct ip *);
1753
1754		return (in_localip(ip->ip_dst));
1755	}
1756#endif
1757#ifdef INET6
1758	case AF_INET6: {
1759		struct ip6_hdr *ip6;
1760		struct in6_ifaddr *ia;
1761		ip6 = mtod(m, struct ip6_hdr *);
1762		ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1763		if (ia == NULL)
1764			return (false);
1765		return (! (ia->ia6_flags & IN6_IFF_NOTREADY));
1766	}
1767#endif
1768	default:
1769		panic("Unsupported af %d", af);
1770	}
1771
1772	return (false);
1773}
1774
1775void
1776pf_intr(void *v)
1777{
1778	struct epoch_tracker et;
1779	struct pf_send_head queue;
1780	struct pf_send_entry *pfse, *next;
1781
1782	CURVNET_SET((struct vnet *)v);
1783
1784	PF_SENDQ_LOCK();
1785	queue = V_pf_sendqueue;
1786	STAILQ_INIT(&V_pf_sendqueue);
1787	PF_SENDQ_UNLOCK();
1788
1789	NET_EPOCH_ENTER(et);
1790
1791	STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1792		switch (pfse->pfse_type) {
1793#ifdef INET
1794		case PFSE_IP: {
1795			if (pf_isforlocal(pfse->pfse_m, AF_INET)) {
1796				pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1797				pfse->pfse_m->m_pkthdr.csum_flags |=
1798				    CSUM_IP_VALID | CSUM_IP_CHECKED;
1799				ip_input(pfse->pfse_m);
1800			} else {
1801				ip_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1802				    NULL);
1803			}
1804			break;
1805		}
1806		case PFSE_ICMP:
1807			icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1808			    pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1809			break;
1810#endif /* INET */
1811#ifdef INET6
1812		case PFSE_IP6:
1813			if (pf_isforlocal(pfse->pfse_m, AF_INET6)) {
1814				pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1815				ip6_input(pfse->pfse_m);
1816			} else {
1817				ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1818				    NULL, NULL);
1819			}
1820			break;
1821		case PFSE_ICMP6:
1822			icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1823			    pfse->icmpopts.code, pfse->icmpopts.mtu);
1824			break;
1825#endif /* INET6 */
1826		default:
1827			panic("%s: unknown type", __func__);
1828		}
1829		free(pfse, M_PFTEMP);
1830	}
1831	NET_EPOCH_EXIT(et);
1832	CURVNET_RESTORE();
1833}
1834
1835#define	pf_purge_thread_period	(hz / 10)
1836
1837#ifdef PF_WANT_32_TO_64_COUNTER
1838static void
1839pf_status_counter_u64_periodic(void)
1840{
1841
1842	PF_RULES_RASSERT();
1843
1844	if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) {
1845		return;
1846	}
1847
1848	for (int i = 0; i < FCNT_MAX; i++) {
1849		pf_counter_u64_periodic(&V_pf_status.fcounters[i]);
1850	}
1851}
1852
1853static void
1854pf_kif_counter_u64_periodic(void)
1855{
1856	struct pfi_kkif *kif;
1857	size_t r, run;
1858
1859	PF_RULES_RASSERT();
1860
1861	if (__predict_false(V_pf_allkifcount == 0)) {
1862		return;
1863	}
1864
1865	if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1866		return;
1867	}
1868
1869	run = V_pf_allkifcount / 10;
1870	if (run < 5)
1871		run = 5;
1872
1873	for (r = 0; r < run; r++) {
1874		kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist);
1875		if (kif == NULL) {
1876			LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1877			LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
1878			break;
1879		}
1880
1881		LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1882		LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist);
1883
1884		for (int i = 0; i < 2; i++) {
1885			for (int j = 0; j < 2; j++) {
1886				for (int k = 0; k < 2; k++) {
1887					pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]);
1888					pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]);
1889				}
1890			}
1891		}
1892	}
1893}
1894
1895static void
1896pf_rule_counter_u64_periodic(void)
1897{
1898	struct pf_krule *rule;
1899	size_t r, run;
1900
1901	PF_RULES_RASSERT();
1902
1903	if (__predict_false(V_pf_allrulecount == 0)) {
1904		return;
1905	}
1906
1907	if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1908		return;
1909	}
1910
1911	run = V_pf_allrulecount / 10;
1912	if (run < 5)
1913		run = 5;
1914
1915	for (r = 0; r < run; r++) {
1916		rule = LIST_NEXT(V_pf_rulemarker, allrulelist);
1917		if (rule == NULL) {
1918			LIST_REMOVE(V_pf_rulemarker, allrulelist);
1919			LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
1920			break;
1921		}
1922
1923		LIST_REMOVE(V_pf_rulemarker, allrulelist);
1924		LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist);
1925
1926		pf_counter_u64_periodic(&rule->evaluations);
1927		for (int i = 0; i < 2; i++) {
1928			pf_counter_u64_periodic(&rule->packets[i]);
1929			pf_counter_u64_periodic(&rule->bytes[i]);
1930		}
1931	}
1932}
1933
1934static void
1935pf_counter_u64_periodic_main(void)
1936{
1937	PF_RULES_RLOCK_TRACKER;
1938
1939	V_pf_counter_periodic_iter++;
1940
1941	PF_RULES_RLOCK();
1942	pf_counter_u64_critical_enter();
1943	pf_status_counter_u64_periodic();
1944	pf_kif_counter_u64_periodic();
1945	pf_rule_counter_u64_periodic();
1946	pf_counter_u64_critical_exit();
1947	PF_RULES_RUNLOCK();
1948}
1949#else
1950#define	pf_counter_u64_periodic_main()	do { } while (0)
1951#endif
1952
1953void
1954pf_purge_thread(void *unused __unused)
1955{
1956	struct epoch_tracker	 et;
1957
1958	VNET_ITERATOR_DECL(vnet_iter);
1959
1960	sx_xlock(&pf_end_lock);
1961	while (pf_end_threads == 0) {
1962		sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period);
1963
1964		VNET_LIST_RLOCK();
1965		NET_EPOCH_ENTER(et);
1966		VNET_FOREACH(vnet_iter) {
1967			CURVNET_SET(vnet_iter);
1968
1969			/* Wait until V_pf_default_rule is initialized. */
1970			if (V_pf_vnet_active == 0) {
1971				CURVNET_RESTORE();
1972				continue;
1973			}
1974
1975			pf_counter_u64_periodic_main();
1976
1977			/*
1978			 *  Process 1/interval fraction of the state
1979			 * table every run.
1980			 */
1981			V_pf_purge_idx =
1982			    pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1983			    (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1984
1985			/*
1986			 * Purge other expired types every
1987			 * PFTM_INTERVAL seconds.
1988			 */
1989			if (V_pf_purge_idx == 0) {
1990				/*
1991				 * Order is important:
1992				 * - states and src nodes reference rules
1993				 * - states and rules reference kifs
1994				 */
1995				pf_purge_expired_fragments();
1996				pf_purge_expired_src_nodes();
1997				pf_purge_unlinked_rules();
1998				pfi_kkif_purge();
1999			}
2000			CURVNET_RESTORE();
2001		}
2002		NET_EPOCH_EXIT(et);
2003		VNET_LIST_RUNLOCK();
2004	}
2005
2006	pf_end_threads++;
2007	sx_xunlock(&pf_end_lock);
2008	kproc_exit(0);
2009}
2010
2011void
2012pf_unload_vnet_purge(void)
2013{
2014
2015	/*
2016	 * To cleanse up all kifs and rules we need
2017	 * two runs: first one clears reference flags,
2018	 * then pf_purge_expired_states() doesn't
2019	 * raise them, and then second run frees.
2020	 */
2021	pf_purge_unlinked_rules();
2022	pfi_kkif_purge();
2023
2024	/*
2025	 * Now purge everything.
2026	 */
2027	pf_purge_expired_states(0, pf_hashmask);
2028	pf_purge_fragments(UINT_MAX);
2029	pf_purge_expired_src_nodes();
2030
2031	/*
2032	 * Now all kifs & rules should be unreferenced,
2033	 * thus should be successfully freed.
2034	 */
2035	pf_purge_unlinked_rules();
2036	pfi_kkif_purge();
2037}
2038
2039u_int32_t
2040pf_state_expires(const struct pf_kstate *state)
2041{
2042	u_int32_t	timeout;
2043	u_int32_t	start;
2044	u_int32_t	end;
2045	u_int32_t	states;
2046
2047	/* handle all PFTM_* > PFTM_MAX here */
2048	if (state->timeout == PFTM_PURGE)
2049		return (time_uptime);
2050	KASSERT(state->timeout != PFTM_UNLINKED,
2051	    ("pf_state_expires: timeout == PFTM_UNLINKED"));
2052	KASSERT((state->timeout < PFTM_MAX),
2053	    ("pf_state_expires: timeout > PFTM_MAX"));
2054	timeout = state->rule.ptr->timeout[state->timeout];
2055	if (!timeout)
2056		timeout = V_pf_default_rule.timeout[state->timeout];
2057	start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
2058	if (start && state->rule.ptr != &V_pf_default_rule) {
2059		end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
2060		states = counter_u64_fetch(state->rule.ptr->states_cur);
2061	} else {
2062		start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
2063		end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
2064		states = V_pf_status.states;
2065	}
2066	if (end && states > start && start < end) {
2067		if (states < end) {
2068			timeout = (u_int64_t)timeout * (end - states) /
2069			    (end - start);
2070			return ((state->expire / 1000) + timeout);
2071		}
2072		else
2073			return (time_uptime);
2074	}
2075	return ((state->expire / 1000) + timeout);
2076}
2077
2078void
2079pf_purge_expired_src_nodes(void)
2080{
2081	struct pf_ksrc_node_list	 freelist;
2082	struct pf_srchash	*sh;
2083	struct pf_ksrc_node	*cur, *next;
2084	int i;
2085
2086	LIST_INIT(&freelist);
2087	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
2088	    PF_HASHROW_LOCK(sh);
2089	    LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
2090		if (cur->states == 0 && cur->expire <= time_uptime) {
2091			pf_unlink_src_node(cur);
2092			LIST_INSERT_HEAD(&freelist, cur, entry);
2093		} else if (cur->rule.ptr != NULL)
2094			cur->rule.ptr->rule_ref |= PFRULE_REFS;
2095	    PF_HASHROW_UNLOCK(sh);
2096	}
2097
2098	pf_free_src_nodes(&freelist);
2099
2100	V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
2101}
2102
2103static void
2104pf_src_tree_remove_state(struct pf_kstate *s)
2105{
2106	struct pf_ksrc_node *sn;
2107	uint32_t timeout;
2108
2109	timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
2110	    s->rule.ptr->timeout[PFTM_SRC_NODE] :
2111	    V_pf_default_rule.timeout[PFTM_SRC_NODE];
2112
2113	if (s->src_node != NULL) {
2114		sn = s->src_node;
2115		PF_SRC_NODE_LOCK(sn);
2116		if (s->src.tcp_est)
2117			--sn->conn;
2118		if (--sn->states == 0)
2119			sn->expire = time_uptime + timeout;
2120		PF_SRC_NODE_UNLOCK(sn);
2121	}
2122	if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
2123		sn = s->nat_src_node;
2124		PF_SRC_NODE_LOCK(sn);
2125		if (--sn->states == 0)
2126			sn->expire = time_uptime + timeout;
2127		PF_SRC_NODE_UNLOCK(sn);
2128	}
2129	s->src_node = s->nat_src_node = NULL;
2130}
2131
2132/*
2133 * Unlink and potentilly free a state. Function may be
2134 * called with ID hash row locked, but always returns
2135 * unlocked, since it needs to go through key hash locking.
2136 */
2137int
2138pf_unlink_state(struct pf_kstate *s)
2139{
2140	struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
2141
2142	NET_EPOCH_ASSERT();
2143	PF_HASHROW_ASSERT(ih);
2144
2145	if (s->timeout == PFTM_UNLINKED) {
2146		/*
2147		 * State is being processed
2148		 * by pf_unlink_state() in
2149		 * an other thread.
2150		 */
2151		PF_HASHROW_UNLOCK(ih);
2152		return (0);	/* XXXGL: undefined actually */
2153	}
2154
2155	if (s->src.state == PF_TCPS_PROXY_DST) {
2156		/* XXX wire key the right one? */
2157		pf_send_tcp(s->rule.ptr, s->key[PF_SK_WIRE]->af,
2158		    &s->key[PF_SK_WIRE]->addr[1],
2159		    &s->key[PF_SK_WIRE]->addr[0],
2160		    s->key[PF_SK_WIRE]->port[1],
2161		    s->key[PF_SK_WIRE]->port[0],
2162		    s->src.seqhi, s->src.seqlo + 1,
2163		    TH_RST|TH_ACK, 0, 0, 0, true, s->tag, 0, s->act.rtableid);
2164	}
2165
2166	LIST_REMOVE(s, entry);
2167	pf_src_tree_remove_state(s);
2168
2169	if (V_pfsync_delete_state_ptr != NULL)
2170		V_pfsync_delete_state_ptr(s);
2171
2172	STATE_DEC_COUNTERS(s);
2173
2174	s->timeout = PFTM_UNLINKED;
2175
2176	/* Ensure we remove it from the list of halfopen states, if needed. */
2177	if (s->key[PF_SK_STACK] != NULL &&
2178	    s->key[PF_SK_STACK]->proto == IPPROTO_TCP)
2179		pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
2180
2181	PF_HASHROW_UNLOCK(ih);
2182
2183	pf_detach_state(s);
2184	/* pf_state_insert() initialises refs to 2 */
2185	return (pf_release_staten(s, 2));
2186}
2187
2188struct pf_kstate *
2189pf_alloc_state(int flags)
2190{
2191
2192	return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
2193}
2194
2195void
2196pf_free_state(struct pf_kstate *cur)
2197{
2198	struct pf_krule_item *ri;
2199
2200	KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
2201	KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
2202	    cur->timeout));
2203
2204	while ((ri = SLIST_FIRST(&cur->match_rules))) {
2205		SLIST_REMOVE_HEAD(&cur->match_rules, entry);
2206		free(ri, M_PF_RULE_ITEM);
2207	}
2208
2209	pf_normalize_tcp_cleanup(cur);
2210	uma_zfree(V_pf_state_z, cur);
2211	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
2212}
2213
2214/*
2215 * Called only from pf_purge_thread(), thus serialized.
2216 */
2217static u_int
2218pf_purge_expired_states(u_int i, int maxcheck)
2219{
2220	struct pf_idhash *ih;
2221	struct pf_kstate *s;
2222	struct pf_krule_item *mrm;
2223	size_t count __unused;
2224
2225	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2226
2227	/*
2228	 * Go through hash and unlink states that expire now.
2229	 */
2230	while (maxcheck > 0) {
2231		count = 0;
2232		ih = &V_pf_idhash[i];
2233
2234		/* only take the lock if we expect to do work */
2235		if (!LIST_EMPTY(&ih->states)) {
2236relock:
2237			PF_HASHROW_LOCK(ih);
2238			LIST_FOREACH(s, &ih->states, entry) {
2239				if (pf_state_expires(s) <= time_uptime) {
2240					V_pf_status.states -=
2241					    pf_unlink_state(s);
2242					goto relock;
2243				}
2244				s->rule.ptr->rule_ref |= PFRULE_REFS;
2245				if (s->nat_rule.ptr != NULL)
2246					s->nat_rule.ptr->rule_ref |= PFRULE_REFS;
2247				if (s->anchor.ptr != NULL)
2248					s->anchor.ptr->rule_ref |= PFRULE_REFS;
2249				s->kif->pfik_flags |= PFI_IFLAG_REFS;
2250				SLIST_FOREACH(mrm, &s->match_rules, entry)
2251					mrm->r->rule_ref |= PFRULE_REFS;
2252				if (s->rt_kif)
2253					s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
2254				count++;
2255			}
2256			PF_HASHROW_UNLOCK(ih);
2257		}
2258
2259		SDT_PROBE2(pf, purge, state, rowcount, i, count);
2260
2261		/* Return when we hit end of hash. */
2262		if (++i > pf_hashmask) {
2263			V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2264			return (0);
2265		}
2266
2267		maxcheck--;
2268	}
2269
2270	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2271
2272	return (i);
2273}
2274
2275static void
2276pf_purge_unlinked_rules(void)
2277{
2278	struct pf_krulequeue tmpq;
2279	struct pf_krule *r, *r1;
2280
2281	/*
2282	 * If we have overloading task pending, then we'd
2283	 * better skip purging this time. There is a tiny
2284	 * probability that overloading task references
2285	 * an already unlinked rule.
2286	 */
2287	PF_OVERLOADQ_LOCK();
2288	if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
2289		PF_OVERLOADQ_UNLOCK();
2290		return;
2291	}
2292	PF_OVERLOADQ_UNLOCK();
2293
2294	/*
2295	 * Do naive mark-and-sweep garbage collecting of old rules.
2296	 * Reference flag is raised by pf_purge_expired_states()
2297	 * and pf_purge_expired_src_nodes().
2298	 *
2299	 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
2300	 * use a temporary queue.
2301	 */
2302	TAILQ_INIT(&tmpq);
2303	PF_UNLNKDRULES_LOCK();
2304	TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
2305		if (!(r->rule_ref & PFRULE_REFS)) {
2306			TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
2307			TAILQ_INSERT_TAIL(&tmpq, r, entries);
2308		} else
2309			r->rule_ref &= ~PFRULE_REFS;
2310	}
2311	PF_UNLNKDRULES_UNLOCK();
2312
2313	if (!TAILQ_EMPTY(&tmpq)) {
2314		PF_CONFIG_LOCK();
2315		PF_RULES_WLOCK();
2316		TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
2317			TAILQ_REMOVE(&tmpq, r, entries);
2318			pf_free_rule(r);
2319		}
2320		PF_RULES_WUNLOCK();
2321		PF_CONFIG_UNLOCK();
2322	}
2323}
2324
2325void
2326pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2327{
2328	switch (af) {
2329#ifdef INET
2330	case AF_INET: {
2331		u_int32_t a = ntohl(addr->addr32[0]);
2332		printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2333		    (a>>8)&255, a&255);
2334		if (p) {
2335			p = ntohs(p);
2336			printf(":%u", p);
2337		}
2338		break;
2339	}
2340#endif /* INET */
2341#ifdef INET6
2342	case AF_INET6: {
2343		u_int16_t b;
2344		u_int8_t i, curstart, curend, maxstart, maxend;
2345		curstart = curend = maxstart = maxend = 255;
2346		for (i = 0; i < 8; i++) {
2347			if (!addr->addr16[i]) {
2348				if (curstart == 255)
2349					curstart = i;
2350				curend = i;
2351			} else {
2352				if ((curend - curstart) >
2353				    (maxend - maxstart)) {
2354					maxstart = curstart;
2355					maxend = curend;
2356				}
2357				curstart = curend = 255;
2358			}
2359		}
2360		if ((curend - curstart) >
2361		    (maxend - maxstart)) {
2362			maxstart = curstart;
2363			maxend = curend;
2364		}
2365		for (i = 0; i < 8; i++) {
2366			if (i >= maxstart && i <= maxend) {
2367				if (i == 0)
2368					printf(":");
2369				if (i == maxend)
2370					printf(":");
2371			} else {
2372				b = ntohs(addr->addr16[i]);
2373				printf("%x", b);
2374				if (i < 7)
2375					printf(":");
2376			}
2377		}
2378		if (p) {
2379			p = ntohs(p);
2380			printf("[%u]", p);
2381		}
2382		break;
2383	}
2384#endif /* INET6 */
2385	}
2386}
2387
2388void
2389pf_print_state(struct pf_kstate *s)
2390{
2391	pf_print_state_parts(s, NULL, NULL);
2392}
2393
2394static void
2395pf_print_state_parts(struct pf_kstate *s,
2396    struct pf_state_key *skwp, struct pf_state_key *sksp)
2397{
2398	struct pf_state_key *skw, *sks;
2399	u_int8_t proto, dir;
2400
2401	/* Do our best to fill these, but they're skipped if NULL */
2402	skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
2403	sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
2404	proto = skw ? skw->proto : (sks ? sks->proto : 0);
2405	dir = s ? s->direction : 0;
2406
2407	switch (proto) {
2408	case IPPROTO_IPV4:
2409		printf("IPv4");
2410		break;
2411	case IPPROTO_IPV6:
2412		printf("IPv6");
2413		break;
2414	case IPPROTO_TCP:
2415		printf("TCP");
2416		break;
2417	case IPPROTO_UDP:
2418		printf("UDP");
2419		break;
2420	case IPPROTO_ICMP:
2421		printf("ICMP");
2422		break;
2423	case IPPROTO_ICMPV6:
2424		printf("ICMPv6");
2425		break;
2426	default:
2427		printf("%u", proto);
2428		break;
2429	}
2430	switch (dir) {
2431	case PF_IN:
2432		printf(" in");
2433		break;
2434	case PF_OUT:
2435		printf(" out");
2436		break;
2437	}
2438	if (skw) {
2439		printf(" wire: ");
2440		pf_print_host(&skw->addr[0], skw->port[0], skw->af);
2441		printf(" ");
2442		pf_print_host(&skw->addr[1], skw->port[1], skw->af);
2443	}
2444	if (sks) {
2445		printf(" stack: ");
2446		if (sks != skw) {
2447			pf_print_host(&sks->addr[0], sks->port[0], sks->af);
2448			printf(" ");
2449			pf_print_host(&sks->addr[1], sks->port[1], sks->af);
2450		} else
2451			printf("-");
2452	}
2453	if (s) {
2454		if (proto == IPPROTO_TCP) {
2455			printf(" [lo=%u high=%u win=%u modulator=%u",
2456			    s->src.seqlo, s->src.seqhi,
2457			    s->src.max_win, s->src.seqdiff);
2458			if (s->src.wscale && s->dst.wscale)
2459				printf(" wscale=%u",
2460				    s->src.wscale & PF_WSCALE_MASK);
2461			printf("]");
2462			printf(" [lo=%u high=%u win=%u modulator=%u",
2463			    s->dst.seqlo, s->dst.seqhi,
2464			    s->dst.max_win, s->dst.seqdiff);
2465			if (s->src.wscale && s->dst.wscale)
2466				printf(" wscale=%u",
2467				s->dst.wscale & PF_WSCALE_MASK);
2468			printf("]");
2469		}
2470		printf(" %u:%u", s->src.state, s->dst.state);
2471	}
2472}
2473
2474void
2475pf_print_flags(u_int8_t f)
2476{
2477	if (f)
2478		printf(" ");
2479	if (f & TH_FIN)
2480		printf("F");
2481	if (f & TH_SYN)
2482		printf("S");
2483	if (f & TH_RST)
2484		printf("R");
2485	if (f & TH_PUSH)
2486		printf("P");
2487	if (f & TH_ACK)
2488		printf("A");
2489	if (f & TH_URG)
2490		printf("U");
2491	if (f & TH_ECE)
2492		printf("E");
2493	if (f & TH_CWR)
2494		printf("W");
2495}
2496
2497#define	PF_SET_SKIP_STEPS(i)					\
2498	do {							\
2499		while (head[i] != cur) {			\
2500			head[i]->skip[i].ptr = cur;		\
2501			head[i] = TAILQ_NEXT(head[i], entries);	\
2502		}						\
2503	} while (0)
2504
2505void
2506pf_calc_skip_steps(struct pf_krulequeue *rules)
2507{
2508	struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT];
2509	int i;
2510
2511	cur = TAILQ_FIRST(rules);
2512	prev = cur;
2513	for (i = 0; i < PF_SKIP_COUNT; ++i)
2514		head[i] = cur;
2515	while (cur != NULL) {
2516		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2517			PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2518		if (cur->direction != prev->direction)
2519			PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2520		if (cur->af != prev->af)
2521			PF_SET_SKIP_STEPS(PF_SKIP_AF);
2522		if (cur->proto != prev->proto)
2523			PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2524		if (cur->src.neg != prev->src.neg ||
2525		    pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2526			PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2527		if (cur->src.port[0] != prev->src.port[0] ||
2528		    cur->src.port[1] != prev->src.port[1] ||
2529		    cur->src.port_op != prev->src.port_op)
2530			PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2531		if (cur->dst.neg != prev->dst.neg ||
2532		    pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2533			PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2534		if (cur->dst.port[0] != prev->dst.port[0] ||
2535		    cur->dst.port[1] != prev->dst.port[1] ||
2536		    cur->dst.port_op != prev->dst.port_op)
2537			PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2538
2539		prev = cur;
2540		cur = TAILQ_NEXT(cur, entries);
2541	}
2542	for (i = 0; i < PF_SKIP_COUNT; ++i)
2543		PF_SET_SKIP_STEPS(i);
2544}
2545
2546int
2547pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2548{
2549	if (aw1->type != aw2->type)
2550		return (1);
2551	switch (aw1->type) {
2552	case PF_ADDR_ADDRMASK:
2553	case PF_ADDR_RANGE:
2554		if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2555			return (1);
2556		if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2557			return (1);
2558		return (0);
2559	case PF_ADDR_DYNIFTL:
2560		return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2561	case PF_ADDR_NOROUTE:
2562	case PF_ADDR_URPFFAILED:
2563		return (0);
2564	case PF_ADDR_TABLE:
2565		return (aw1->p.tbl != aw2->p.tbl);
2566	default:
2567		printf("invalid address type: %d\n", aw1->type);
2568		return (1);
2569	}
2570}
2571
2572/**
2573 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2574 * header isn't always a full checksum. In some cases (i.e. output) it's a
2575 * pseudo-header checksum, which is a partial checksum over src/dst IP
2576 * addresses, protocol number and length.
2577 *
2578 * That means we have the following cases:
2579 *  * Input or forwarding: we don't have TSO, the checksum fields are full
2580 *  	checksums, we need to update the checksum whenever we change anything.
2581 *  * Output (i.e. the checksum is a pseudo-header checksum):
2582 *  	x The field being updated is src/dst address or affects the length of
2583 *  	the packet. We need to update the pseudo-header checksum (note that this
2584 *  	checksum is not ones' complement).
2585 *  	x Some other field is being modified (e.g. src/dst port numbers): We
2586 *  	don't have to update anything.
2587 **/
2588u_int16_t
2589pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2590{
2591	u_int32_t x;
2592
2593	x = cksum + old - new;
2594	x = (x + (x >> 16)) & 0xffff;
2595
2596	/* optimise: eliminate a branch when not udp */
2597	if (udp && cksum == 0x0000)
2598		return cksum;
2599	if (udp && x == 0x0000)
2600		x = 0xffff;
2601
2602	return (u_int16_t)(x);
2603}
2604
2605static void
2606pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi,
2607    u_int8_t udp)
2608{
2609	u_int16_t old = htons(hi ? (*f << 8) : *f);
2610	u_int16_t new = htons(hi ? ( v << 8) :  v);
2611
2612	if (*f == v)
2613		return;
2614
2615	*f = v;
2616
2617	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2618		return;
2619
2620	*cksum = pf_cksum_fixup(*cksum, old, new, udp);
2621}
2622
2623void
2624pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v,
2625    bool hi, u_int8_t udp)
2626{
2627	u_int8_t *fb = (u_int8_t *)f;
2628	u_int8_t *vb = (u_int8_t *)&v;
2629
2630	pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2631	pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2632}
2633
2634void
2635pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v,
2636    bool hi, u_int8_t udp)
2637{
2638	u_int8_t *fb = (u_int8_t *)f;
2639	u_int8_t *vb = (u_int8_t *)&v;
2640
2641	pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2642	pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2643	pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2644	pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2645}
2646
2647u_int16_t
2648pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2649        u_int16_t new, u_int8_t udp)
2650{
2651	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2652		return (cksum);
2653
2654	return (pf_cksum_fixup(cksum, old, new, udp));
2655}
2656
2657static void
2658pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2659        u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2660        sa_family_t af)
2661{
2662	struct pf_addr	ao;
2663	u_int16_t	po = *p;
2664
2665	PF_ACPY(&ao, a, af);
2666	PF_ACPY(a, an, af);
2667
2668	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2669		*pc = ~*pc;
2670
2671	*p = pn;
2672
2673	switch (af) {
2674#ifdef INET
2675	case AF_INET:
2676		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2677		    ao.addr16[0], an->addr16[0], 0),
2678		    ao.addr16[1], an->addr16[1], 0);
2679		*p = pn;
2680
2681		*pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2682		    ao.addr16[0], an->addr16[0], u),
2683		    ao.addr16[1], an->addr16[1], u);
2684
2685		*pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2686		break;
2687#endif /* INET */
2688#ifdef INET6
2689	case AF_INET6:
2690		*pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2691		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2692		    pf_cksum_fixup(pf_cksum_fixup(*pc,
2693		    ao.addr16[0], an->addr16[0], u),
2694		    ao.addr16[1], an->addr16[1], u),
2695		    ao.addr16[2], an->addr16[2], u),
2696		    ao.addr16[3], an->addr16[3], u),
2697		    ao.addr16[4], an->addr16[4], u),
2698		    ao.addr16[5], an->addr16[5], u),
2699		    ao.addr16[6], an->addr16[6], u),
2700		    ao.addr16[7], an->addr16[7], u);
2701
2702		*pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2703		break;
2704#endif /* INET6 */
2705	}
2706
2707	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2708	    CSUM_DELAY_DATA_IPV6)) {
2709		*pc = ~*pc;
2710		if (! *pc)
2711			*pc = 0xffff;
2712	}
2713}
2714
2715/* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
2716void
2717pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2718{
2719	u_int32_t	ao;
2720
2721	memcpy(&ao, a, sizeof(ao));
2722	memcpy(a, &an, sizeof(u_int32_t));
2723	*c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2724	    ao % 65536, an % 65536, u);
2725}
2726
2727void
2728pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2729{
2730	u_int32_t	ao;
2731
2732	memcpy(&ao, a, sizeof(ao));
2733	memcpy(a, &an, sizeof(u_int32_t));
2734
2735	*c = pf_proto_cksum_fixup(m,
2736	    pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2737	    ao % 65536, an % 65536, udp);
2738}
2739
2740#ifdef INET6
2741static void
2742pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2743{
2744	struct pf_addr	ao;
2745
2746	PF_ACPY(&ao, a, AF_INET6);
2747	PF_ACPY(a, an, AF_INET6);
2748
2749	*c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2750	    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2751	    pf_cksum_fixup(pf_cksum_fixup(*c,
2752	    ao.addr16[0], an->addr16[0], u),
2753	    ao.addr16[1], an->addr16[1], u),
2754	    ao.addr16[2], an->addr16[2], u),
2755	    ao.addr16[3], an->addr16[3], u),
2756	    ao.addr16[4], an->addr16[4], u),
2757	    ao.addr16[5], an->addr16[5], u),
2758	    ao.addr16[6], an->addr16[6], u),
2759	    ao.addr16[7], an->addr16[7], u);
2760}
2761#endif /* INET6 */
2762
2763static void
2764pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2765    struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2766    u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2767{
2768	struct pf_addr	oia, ooa;
2769
2770	PF_ACPY(&oia, ia, af);
2771	if (oa)
2772		PF_ACPY(&ooa, oa, af);
2773
2774	/* Change inner protocol port, fix inner protocol checksum. */
2775	if (ip != NULL) {
2776		u_int16_t	oip = *ip;
2777		u_int32_t	opc;
2778
2779		if (pc != NULL)
2780			opc = *pc;
2781		*ip = np;
2782		if (pc != NULL)
2783			*pc = pf_cksum_fixup(*pc, oip, *ip, u);
2784		*ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2785		if (pc != NULL)
2786			*ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2787	}
2788	/* Change inner ip address, fix inner ip and icmp checksums. */
2789	PF_ACPY(ia, na, af);
2790	switch (af) {
2791#ifdef INET
2792	case AF_INET: {
2793		u_int32_t	 oh2c = *h2c;
2794
2795		*h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2796		    oia.addr16[0], ia->addr16[0], 0),
2797		    oia.addr16[1], ia->addr16[1], 0);
2798		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2799		    oia.addr16[0], ia->addr16[0], 0),
2800		    oia.addr16[1], ia->addr16[1], 0);
2801		*ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2802		break;
2803	}
2804#endif /* INET */
2805#ifdef INET6
2806	case AF_INET6:
2807		*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2808		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2809		    pf_cksum_fixup(pf_cksum_fixup(*ic,
2810		    oia.addr16[0], ia->addr16[0], u),
2811		    oia.addr16[1], ia->addr16[1], u),
2812		    oia.addr16[2], ia->addr16[2], u),
2813		    oia.addr16[3], ia->addr16[3], u),
2814		    oia.addr16[4], ia->addr16[4], u),
2815		    oia.addr16[5], ia->addr16[5], u),
2816		    oia.addr16[6], ia->addr16[6], u),
2817		    oia.addr16[7], ia->addr16[7], u);
2818		break;
2819#endif /* INET6 */
2820	}
2821	/* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2822	if (oa) {
2823		PF_ACPY(oa, na, af);
2824		switch (af) {
2825#ifdef INET
2826		case AF_INET:
2827			*hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2828			    ooa.addr16[0], oa->addr16[0], 0),
2829			    ooa.addr16[1], oa->addr16[1], 0);
2830			break;
2831#endif /* INET */
2832#ifdef INET6
2833		case AF_INET6:
2834			*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2835			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2836			    pf_cksum_fixup(pf_cksum_fixup(*ic,
2837			    ooa.addr16[0], oa->addr16[0], u),
2838			    ooa.addr16[1], oa->addr16[1], u),
2839			    ooa.addr16[2], oa->addr16[2], u),
2840			    ooa.addr16[3], oa->addr16[3], u),
2841			    ooa.addr16[4], oa->addr16[4], u),
2842			    ooa.addr16[5], oa->addr16[5], u),
2843			    ooa.addr16[6], oa->addr16[6], u),
2844			    ooa.addr16[7], oa->addr16[7], u);
2845			break;
2846#endif /* INET6 */
2847		}
2848	}
2849}
2850
2851/*
2852 * Need to modulate the sequence numbers in the TCP SACK option
2853 * (credits to Krzysztof Pfaff for report and patch)
2854 */
2855static int
2856pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2857    struct tcphdr *th, struct pf_state_peer *dst)
2858{
2859	int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2860	u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2861	int copyback = 0, i, olen;
2862	struct sackblk sack;
2863
2864#define	TCPOLEN_SACKLEN	(TCPOLEN_SACK + 2)
2865	if (hlen < TCPOLEN_SACKLEN ||
2866	    !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2867		return 0;
2868
2869	while (hlen >= TCPOLEN_SACKLEN) {
2870		size_t startoff = opt - opts;
2871		olen = opt[1];
2872		switch (*opt) {
2873		case TCPOPT_EOL:	/* FALLTHROUGH */
2874		case TCPOPT_NOP:
2875			opt++;
2876			hlen--;
2877			break;
2878		case TCPOPT_SACK:
2879			if (olen > hlen)
2880				olen = hlen;
2881			if (olen >= TCPOLEN_SACKLEN) {
2882				for (i = 2; i + TCPOLEN_SACK <= olen;
2883				    i += TCPOLEN_SACK) {
2884					memcpy(&sack, &opt[i], sizeof(sack));
2885					pf_patch_32_unaligned(m,
2886					    &th->th_sum, &sack.start,
2887					    htonl(ntohl(sack.start) - dst->seqdiff),
2888					    PF_ALGNMNT(startoff),
2889					    0);
2890					pf_patch_32_unaligned(m, &th->th_sum,
2891					    &sack.end,
2892					    htonl(ntohl(sack.end) - dst->seqdiff),
2893					    PF_ALGNMNT(startoff),
2894					    0);
2895					memcpy(&opt[i], &sack, sizeof(sack));
2896				}
2897				copyback = 1;
2898			}
2899			/* FALLTHROUGH */
2900		default:
2901			if (olen < 2)
2902				olen = 2;
2903			hlen -= olen;
2904			opt += olen;
2905		}
2906	}
2907
2908	if (copyback)
2909		m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2910	return (copyback);
2911}
2912
2913struct mbuf *
2914pf_build_tcp(const struct pf_krule *r, sa_family_t af,
2915    const struct pf_addr *saddr, const struct pf_addr *daddr,
2916    u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2917    u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
2918    bool skip_firewall, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid)
2919{
2920	struct mbuf	*m;
2921	int		 len, tlen;
2922#ifdef INET
2923	struct ip	*h = NULL;
2924#endif /* INET */
2925#ifdef INET6
2926	struct ip6_hdr	*h6 = NULL;
2927#endif /* INET6 */
2928	struct tcphdr	*th;
2929	char		*opt;
2930	struct pf_mtag  *pf_mtag;
2931
2932	len = 0;
2933	th = NULL;
2934
2935	/* maximum segment size tcp option */
2936	tlen = sizeof(struct tcphdr);
2937	if (mss)
2938		tlen += 4;
2939
2940	switch (af) {
2941#ifdef INET
2942	case AF_INET:
2943		len = sizeof(struct ip) + tlen;
2944		break;
2945#endif /* INET */
2946#ifdef INET6
2947	case AF_INET6:
2948		len = sizeof(struct ip6_hdr) + tlen;
2949		break;
2950#endif /* INET6 */
2951	default:
2952		panic("%s: unsupported af %d", __func__, af);
2953	}
2954
2955	m = m_gethdr(M_NOWAIT, MT_DATA);
2956	if (m == NULL)
2957		return (NULL);
2958
2959#ifdef MAC
2960	mac_netinet_firewall_send(m);
2961#endif
2962	if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2963		m_freem(m);
2964		return (NULL);
2965	}
2966	if (skip_firewall)
2967		m->m_flags |= M_SKIP_FIREWALL;
2968	pf_mtag->tag = mtag_tag;
2969	pf_mtag->flags = mtag_flags;
2970
2971	if (rtableid >= 0)
2972		M_SETFIB(m, rtableid);
2973
2974#ifdef ALTQ
2975	if (r != NULL && r->qid) {
2976		pf_mtag->qid = r->qid;
2977
2978		/* add hints for ecn */
2979		pf_mtag->hdr = mtod(m, struct ip *);
2980	}
2981#endif /* ALTQ */
2982	m->m_data += max_linkhdr;
2983	m->m_pkthdr.len = m->m_len = len;
2984	/* The rest of the stack assumes a rcvif, so provide one.
2985	 * This is a locally generated packet, so .. close enough. */
2986	m->m_pkthdr.rcvif = V_loif;
2987	bzero(m->m_data, len);
2988	switch (af) {
2989#ifdef INET
2990	case AF_INET:
2991		h = mtod(m, struct ip *);
2992
2993		/* IP header fields included in the TCP checksum */
2994		h->ip_p = IPPROTO_TCP;
2995		h->ip_len = htons(tlen);
2996		h->ip_src.s_addr = saddr->v4.s_addr;
2997		h->ip_dst.s_addr = daddr->v4.s_addr;
2998
2999		th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
3000		break;
3001#endif /* INET */
3002#ifdef INET6
3003	case AF_INET6:
3004		h6 = mtod(m, struct ip6_hdr *);
3005
3006		/* IP header fields included in the TCP checksum */
3007		h6->ip6_nxt = IPPROTO_TCP;
3008		h6->ip6_plen = htons(tlen);
3009		memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
3010		memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
3011
3012		th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
3013		break;
3014#endif /* INET6 */
3015	}
3016
3017	/* TCP header */
3018	th->th_sport = sport;
3019	th->th_dport = dport;
3020	th->th_seq = htonl(seq);
3021	th->th_ack = htonl(ack);
3022	th->th_off = tlen >> 2;
3023	th->th_flags = tcp_flags;
3024	th->th_win = htons(win);
3025
3026	if (mss) {
3027		opt = (char *)(th + 1);
3028		opt[0] = TCPOPT_MAXSEG;
3029		opt[1] = 4;
3030		HTONS(mss);
3031		bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
3032	}
3033
3034	switch (af) {
3035#ifdef INET
3036	case AF_INET:
3037		/* TCP checksum */
3038		th->th_sum = in_cksum(m, len);
3039
3040		/* Finish the IP header */
3041		h->ip_v = 4;
3042		h->ip_hl = sizeof(*h) >> 2;
3043		h->ip_tos = IPTOS_LOWDELAY;
3044		h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
3045		h->ip_len = htons(len);
3046		h->ip_ttl = ttl ? ttl : V_ip_defttl;
3047		h->ip_sum = 0;
3048		break;
3049#endif /* INET */
3050#ifdef INET6
3051	case AF_INET6:
3052		/* TCP checksum */
3053		th->th_sum = in6_cksum(m, IPPROTO_TCP,
3054		    sizeof(struct ip6_hdr), tlen);
3055
3056		h6->ip6_vfc |= IPV6_VERSION;
3057		h6->ip6_hlim = IPV6_DEFHLIM;
3058		break;
3059#endif /* INET6 */
3060	}
3061
3062	return (m);
3063}
3064
3065static void
3066pf_send_sctp_abort(sa_family_t af, struct pf_pdesc *pd,
3067    uint8_t ttl, int rtableid)
3068{
3069	struct mbuf		*m;
3070#ifdef INET
3071	struct ip		*h = NULL;
3072#endif /* INET */
3073#ifdef INET6
3074	struct ip6_hdr		*h6 = NULL;
3075#endif /* INET6 */
3076	struct sctphdr		*hdr;
3077	struct sctp_chunkhdr	*chunk;
3078	struct pf_send_entry	*pfse;
3079	int			 off = 0;
3080
3081	MPASS(af == pd->af);
3082
3083	m = m_gethdr(M_NOWAIT, MT_DATA);
3084	if (m == NULL)
3085		return;
3086
3087	m->m_data += max_linkhdr;
3088	m->m_flags |= M_SKIP_FIREWALL;
3089	/* The rest of the stack assumes a rcvif, so provide one.
3090	 * This is a locally generated packet, so .. close enough. */
3091	m->m_pkthdr.rcvif = V_loif;
3092
3093	/* IPv4|6 header */
3094	switch (af) {
3095#ifdef INET
3096	case AF_INET:
3097		bzero(m->m_data, sizeof(struct ip) + sizeof(*hdr) + sizeof(*chunk));
3098
3099		h = mtod(m, struct ip *);
3100
3101		/* IP header fields included in the TCP checksum */
3102
3103		h->ip_p = IPPROTO_SCTP;
3104		h->ip_len = htons(sizeof(*h) + sizeof(*hdr) + sizeof(*chunk));
3105		h->ip_ttl = ttl ? ttl : V_ip_defttl;
3106		h->ip_src = pd->dst->v4;
3107		h->ip_dst = pd->src->v4;
3108
3109		off += sizeof(struct ip);
3110		break;
3111#endif /* INET */
3112#ifdef INET6
3113	case AF_INET6:
3114		bzero(m->m_data, sizeof(struct ip6_hdr) + sizeof(*hdr) + sizeof(*chunk));
3115
3116		h6 = mtod(m, struct ip6_hdr *);
3117
3118		/* IP header fields included in the TCP checksum */
3119		h6->ip6_vfc |= IPV6_VERSION;
3120		h6->ip6_nxt = IPPROTO_SCTP;
3121		h6->ip6_plen = htons(sizeof(*h6) + sizeof(*hdr) + sizeof(*chunk));
3122		h6->ip6_hlim = ttl ? ttl : V_ip6_defhlim;
3123		memcpy(&h6->ip6_src, &pd->dst->v6, sizeof(struct in6_addr));
3124		memcpy(&h6->ip6_dst, &pd->src->v6, sizeof(struct in6_addr));
3125
3126		off += sizeof(struct ip6_hdr);
3127		break;
3128#endif /* INET6 */
3129	}
3130
3131	/* SCTP header */
3132	hdr = mtodo(m, off);
3133
3134	hdr->src_port = pd->hdr.sctp.dest_port;
3135	hdr->dest_port = pd->hdr.sctp.src_port;
3136	hdr->v_tag = pd->sctp_initiate_tag;
3137	hdr->checksum = 0;
3138
3139	/* Abort chunk. */
3140	off += sizeof(struct sctphdr);
3141	chunk = mtodo(m, off);
3142
3143	chunk->chunk_type = SCTP_ABORT_ASSOCIATION;
3144	chunk->chunk_length = htons(sizeof(*chunk));
3145
3146	/* SCTP checksum */
3147	off += sizeof(*chunk);
3148	m->m_pkthdr.len = m->m_len = off;
3149
3150	pf_sctp_checksum(m, off - sizeof(*hdr) - sizeof(*chunk));
3151
3152	if (rtableid >= 0)
3153		M_SETFIB(m, rtableid);
3154
3155	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
3156	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
3157	if (pfse == NULL) {
3158		m_freem(m);
3159		return;
3160	}
3161
3162	switch (af) {
3163#ifdef INET
3164	case AF_INET:
3165		pfse->pfse_type = PFSE_IP;
3166		break;
3167#endif /* INET */
3168#ifdef INET6
3169	case AF_INET6:
3170		pfse->pfse_type = PFSE_IP6;
3171		break;
3172#endif /* INET6 */
3173	}
3174
3175	pfse->pfse_m = m;
3176	pf_send(pfse);
3177}
3178
3179void
3180pf_send_tcp(const struct pf_krule *r, sa_family_t af,
3181    const struct pf_addr *saddr, const struct pf_addr *daddr,
3182    u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
3183    u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
3184    bool skip_firewall, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid)
3185{
3186	struct pf_send_entry *pfse;
3187	struct mbuf	*m;
3188
3189	m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, tcp_flags,
3190	    win, mss, ttl, skip_firewall, mtag_tag, mtag_flags, rtableid);
3191	if (m == NULL)
3192		return;
3193
3194	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
3195	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
3196	if (pfse == NULL) {
3197		m_freem(m);
3198		return;
3199	}
3200
3201	switch (af) {
3202#ifdef INET
3203	case AF_INET:
3204		pfse->pfse_type = PFSE_IP;
3205		break;
3206#endif /* INET */
3207#ifdef INET6
3208	case AF_INET6:
3209		pfse->pfse_type = PFSE_IP6;
3210		break;
3211#endif /* INET6 */
3212	}
3213
3214	pfse->pfse_m = m;
3215	pf_send(pfse);
3216}
3217
3218static void
3219pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
3220    struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
3221    struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
3222    u_short *reason, int rtableid)
3223{
3224	struct pf_addr	* const saddr = pd->src;
3225	struct pf_addr	* const daddr = pd->dst;
3226	sa_family_t	 af = pd->af;
3227
3228	/* undo NAT changes, if they have taken place */
3229	if (nr != NULL) {
3230		PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3231		PF_ACPY(daddr, &sk->addr[pd->didx], af);
3232		if (pd->sport)
3233			*pd->sport = sk->port[pd->sidx];
3234		if (pd->dport)
3235			*pd->dport = sk->port[pd->didx];
3236		if (pd->proto_sum)
3237			*pd->proto_sum = bproto_sum;
3238		if (pd->ip_sum)
3239			*pd->ip_sum = bip_sum;
3240		m_copyback(m, off, hdrlen, pd->hdr.any);
3241	}
3242	if (pd->proto == IPPROTO_TCP &&
3243	    ((r->rule_flag & PFRULE_RETURNRST) ||
3244	    (r->rule_flag & PFRULE_RETURN)) &&
3245	    !(th->th_flags & TH_RST)) {
3246		u_int32_t	 ack = ntohl(th->th_seq) + pd->p_len;
3247		int		 len = 0;
3248#ifdef INET
3249		struct ip	*h4;
3250#endif
3251#ifdef INET6
3252		struct ip6_hdr	*h6;
3253#endif
3254
3255		switch (af) {
3256#ifdef INET
3257		case AF_INET:
3258			h4 = mtod(m, struct ip *);
3259			len = ntohs(h4->ip_len) - off;
3260			break;
3261#endif
3262#ifdef INET6
3263		case AF_INET6:
3264			h6 = mtod(m, struct ip6_hdr *);
3265			len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3266			break;
3267#endif
3268		}
3269
3270		if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3271			REASON_SET(reason, PFRES_PROTCKSUM);
3272		else {
3273			if (th->th_flags & TH_SYN)
3274				ack++;
3275			if (th->th_flags & TH_FIN)
3276				ack++;
3277			pf_send_tcp(r, af, pd->dst,
3278				pd->src, th->th_dport, th->th_sport,
3279				ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3280				r->return_ttl, true, 0, 0, rtableid);
3281		}
3282	} else if (pd->proto == IPPROTO_SCTP &&
3283	    (r->rule_flag & PFRULE_RETURN)) {
3284		pf_send_sctp_abort(af, pd, r->return_ttl, rtableid);
3285	} else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3286		r->return_icmp)
3287		pf_send_icmp(m, r->return_icmp >> 8,
3288			r->return_icmp & 255, af, r, rtableid);
3289	else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3290		r->return_icmp6)
3291		pf_send_icmp(m, r->return_icmp6 >> 8,
3292			r->return_icmp6 & 255, af, r, rtableid);
3293}
3294
3295static int
3296pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
3297{
3298	struct m_tag *mtag;
3299	u_int8_t mpcp;
3300
3301	mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
3302	if (mtag == NULL)
3303		return (0);
3304
3305	if (prio == PF_PRIO_ZERO)
3306		prio = 0;
3307
3308	mpcp = *(uint8_t *)(mtag + 1);
3309
3310	return (mpcp == prio);
3311}
3312
3313static int
3314pf_icmp_to_bandlim(uint8_t type)
3315{
3316	switch (type) {
3317		case ICMP_ECHO:
3318		case ICMP_ECHOREPLY:
3319			return (BANDLIM_ICMP_ECHO);
3320		case ICMP_TSTAMP:
3321		case ICMP_TSTAMPREPLY:
3322			return (BANDLIM_ICMP_TSTAMP);
3323		case ICMP_UNREACH:
3324		default:
3325			return (BANDLIM_ICMP_UNREACH);
3326	}
3327}
3328
3329static void
3330pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
3331    struct pf_krule *r, int rtableid)
3332{
3333	struct pf_send_entry *pfse;
3334	struct mbuf *m0;
3335	struct pf_mtag *pf_mtag;
3336
3337	/* ICMP packet rate limitation. */
3338#ifdef INET6
3339	if (af == AF_INET6) {
3340		if (icmp6_ratelimit(NULL, type, code))
3341			return;
3342	}
3343#endif
3344#ifdef INET
3345	if (af == AF_INET) {
3346		if (badport_bandlim(pf_icmp_to_bandlim(type)) != 0)
3347			return;
3348	}
3349#endif
3350
3351	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
3352	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
3353	if (pfse == NULL)
3354		return;
3355
3356	if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
3357		free(pfse, M_PFTEMP);
3358		return;
3359	}
3360
3361	if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
3362		free(pfse, M_PFTEMP);
3363		return;
3364	}
3365	/* XXX: revisit */
3366	m0->m_flags |= M_SKIP_FIREWALL;
3367
3368	if (rtableid >= 0)
3369		M_SETFIB(m0, rtableid);
3370
3371#ifdef ALTQ
3372	if (r->qid) {
3373		pf_mtag->qid = r->qid;
3374		/* add hints for ecn */
3375		pf_mtag->hdr = mtod(m0, struct ip *);
3376	}
3377#endif /* ALTQ */
3378
3379	switch (af) {
3380#ifdef INET
3381	case AF_INET:
3382		pfse->pfse_type = PFSE_ICMP;
3383		break;
3384#endif /* INET */
3385#ifdef INET6
3386	case AF_INET6:
3387		pfse->pfse_type = PFSE_ICMP6;
3388		break;
3389#endif /* INET6 */
3390	}
3391	pfse->pfse_m = m0;
3392	pfse->icmpopts.type = type;
3393	pfse->icmpopts.code = code;
3394	pf_send(pfse);
3395}
3396
3397/*
3398 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
3399 * If n is 0, they match if they are equal. If n is != 0, they match if they
3400 * are different.
3401 */
3402int
3403pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
3404    struct pf_addr *b, sa_family_t af)
3405{
3406	int	match = 0;
3407
3408	switch (af) {
3409#ifdef INET
3410	case AF_INET:
3411		if ((a->addr32[0] & m->addr32[0]) ==
3412		    (b->addr32[0] & m->addr32[0]))
3413			match++;
3414		break;
3415#endif /* INET */
3416#ifdef INET6
3417	case AF_INET6:
3418		if (((a->addr32[0] & m->addr32[0]) ==
3419		     (b->addr32[0] & m->addr32[0])) &&
3420		    ((a->addr32[1] & m->addr32[1]) ==
3421		     (b->addr32[1] & m->addr32[1])) &&
3422		    ((a->addr32[2] & m->addr32[2]) ==
3423		     (b->addr32[2] & m->addr32[2])) &&
3424		    ((a->addr32[3] & m->addr32[3]) ==
3425		     (b->addr32[3] & m->addr32[3])))
3426			match++;
3427		break;
3428#endif /* INET6 */
3429	}
3430	if (match) {
3431		if (n)
3432			return (0);
3433		else
3434			return (1);
3435	} else {
3436		if (n)
3437			return (1);
3438		else
3439			return (0);
3440	}
3441}
3442
3443/*
3444 * Return 1 if b <= a <= e, otherwise return 0.
3445 */
3446int
3447pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
3448    struct pf_addr *a, sa_family_t af)
3449{
3450	switch (af) {
3451#ifdef INET
3452	case AF_INET:
3453		if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
3454		    (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
3455			return (0);
3456		break;
3457#endif /* INET */
3458#ifdef INET6
3459	case AF_INET6: {
3460		int	i;
3461
3462		/* check a >= b */
3463		for (i = 0; i < 4; ++i)
3464			if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
3465				break;
3466			else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
3467				return (0);
3468		/* check a <= e */
3469		for (i = 0; i < 4; ++i)
3470			if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
3471				break;
3472			else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
3473				return (0);
3474		break;
3475	}
3476#endif /* INET6 */
3477	}
3478	return (1);
3479}
3480
3481static int
3482pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
3483{
3484	switch (op) {
3485	case PF_OP_IRG:
3486		return ((p > a1) && (p < a2));
3487	case PF_OP_XRG:
3488		return ((p < a1) || (p > a2));
3489	case PF_OP_RRG:
3490		return ((p >= a1) && (p <= a2));
3491	case PF_OP_EQ:
3492		return (p == a1);
3493	case PF_OP_NE:
3494		return (p != a1);
3495	case PF_OP_LT:
3496		return (p < a1);
3497	case PF_OP_LE:
3498		return (p <= a1);
3499	case PF_OP_GT:
3500		return (p > a1);
3501	case PF_OP_GE:
3502		return (p >= a1);
3503	}
3504	return (0); /* never reached */
3505}
3506
3507int
3508pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
3509{
3510	NTOHS(a1);
3511	NTOHS(a2);
3512	NTOHS(p);
3513	return (pf_match(op, a1, a2, p));
3514}
3515
3516static int
3517pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
3518{
3519	if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3520		return (0);
3521	return (pf_match(op, a1, a2, u));
3522}
3523
3524static int
3525pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
3526{
3527	if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3528		return (0);
3529	return (pf_match(op, a1, a2, g));
3530}
3531
3532int
3533pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag)
3534{
3535	if (*tag == -1)
3536		*tag = mtag;
3537
3538	return ((!r->match_tag_not && r->match_tag == *tag) ||
3539	    (r->match_tag_not && r->match_tag != *tag));
3540}
3541
3542int
3543pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
3544{
3545
3546	KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
3547
3548	if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
3549		return (ENOMEM);
3550
3551	pd->pf_mtag->tag = tag;
3552
3553	return (0);
3554}
3555
3556#define	PF_ANCHOR_STACKSIZE	32
3557struct pf_kanchor_stackframe {
3558	struct pf_kruleset	*rs;
3559	struct pf_krule		*r;	/* XXX: + match bit */
3560	struct pf_kanchor	*child;
3561};
3562
3563/*
3564 * XXX: We rely on malloc(9) returning pointer aligned addresses.
3565 */
3566#define	PF_ANCHORSTACK_MATCH	0x00000001
3567#define	PF_ANCHORSTACK_MASK	(PF_ANCHORSTACK_MATCH)
3568
3569#define	PF_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
3570#define	PF_ANCHOR_RULE(f)	(struct pf_krule *)			\
3571				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
3572#define	PF_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 			\
3573				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
3574} while (0)
3575
3576void
3577pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3578    struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3579    int *match)
3580{
3581	struct pf_kanchor_stackframe	*f;
3582
3583	PF_RULES_RASSERT();
3584
3585	if (match)
3586		*match = 0;
3587	if (*depth >= PF_ANCHOR_STACKSIZE) {
3588		printf("%s: anchor stack overflow on %s\n",
3589		    __func__, (*r)->anchor->name);
3590		*r = TAILQ_NEXT(*r, entries);
3591		return;
3592	} else if (*depth == 0 && a != NULL)
3593		*a = *r;
3594	f = stack + (*depth)++;
3595	f->rs = *rs;
3596	f->r = *r;
3597	if ((*r)->anchor_wildcard) {
3598		struct pf_kanchor_node *parent = &(*r)->anchor->children;
3599
3600		if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) {
3601			*r = NULL;
3602			return;
3603		}
3604		*rs = &f->child->ruleset;
3605	} else {
3606		f->child = NULL;
3607		*rs = &(*r)->anchor->ruleset;
3608	}
3609	*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3610}
3611
3612int
3613pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3614    struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3615    int *match)
3616{
3617	struct pf_kanchor_stackframe	*f;
3618	struct pf_krule *fr;
3619	int quick = 0;
3620
3621	PF_RULES_RASSERT();
3622
3623	do {
3624		if (*depth <= 0)
3625			break;
3626		f = stack + *depth - 1;
3627		fr = PF_ANCHOR_RULE(f);
3628		if (f->child != NULL) {
3629			/*
3630			 * This block traverses through
3631			 * a wildcard anchor.
3632			 */
3633			if (match != NULL && *match) {
3634				/*
3635				 * If any of "*" matched, then
3636				 * "foo/ *" matched, mark frame
3637				 * appropriately.
3638				 */
3639				PF_ANCHOR_SET_MATCH(f);
3640				*match = 0;
3641			}
3642			f->child = RB_NEXT(pf_kanchor_node,
3643			    &fr->anchor->children, f->child);
3644			if (f->child != NULL) {
3645				*rs = &f->child->ruleset;
3646				*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3647				if (*r == NULL)
3648					continue;
3649				else
3650					break;
3651			}
3652		}
3653		(*depth)--;
3654		if (*depth == 0 && a != NULL)
3655			*a = NULL;
3656		*rs = f->rs;
3657		if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
3658			quick = fr->quick;
3659		*r = TAILQ_NEXT(fr, entries);
3660	} while (*r == NULL);
3661
3662	return (quick);
3663}
3664
3665struct pf_keth_anchor_stackframe {
3666	struct pf_keth_ruleset	*rs;
3667	struct pf_keth_rule	*r;	/* XXX: + match bit */
3668	struct pf_keth_anchor	*child;
3669};
3670
3671#define	PF_ETH_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
3672#define	PF_ETH_ANCHOR_RULE(f)	(struct pf_keth_rule *)			\
3673				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
3674#define	PF_ETH_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 		\
3675				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
3676} while (0)
3677
3678void
3679pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth,
3680    struct pf_keth_ruleset **rs, struct pf_keth_rule **r,
3681    struct pf_keth_rule **a, int *match)
3682{
3683	struct pf_keth_anchor_stackframe	*f;
3684
3685	NET_EPOCH_ASSERT();
3686
3687	if (match)
3688		*match = 0;
3689	if (*depth >= PF_ANCHOR_STACKSIZE) {
3690		printf("%s: anchor stack overflow on %s\n",
3691		    __func__, (*r)->anchor->name);
3692		*r = TAILQ_NEXT(*r, entries);
3693		return;
3694	} else if (*depth == 0 && a != NULL)
3695		*a = *r;
3696	f = stack + (*depth)++;
3697	f->rs = *rs;
3698	f->r = *r;
3699	if ((*r)->anchor_wildcard) {
3700		struct pf_keth_anchor_node *parent = &(*r)->anchor->children;
3701
3702		if ((f->child = RB_MIN(pf_keth_anchor_node, parent)) == NULL) {
3703			*r = NULL;
3704			return;
3705		}
3706		*rs = &f->child->ruleset;
3707	} else {
3708		f->child = NULL;
3709		*rs = &(*r)->anchor->ruleset;
3710	}
3711	*r = TAILQ_FIRST((*rs)->active.rules);
3712}
3713
3714int
3715pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth,
3716    struct pf_keth_ruleset **rs, struct pf_keth_rule **r,
3717    struct pf_keth_rule **a, int *match)
3718{
3719	struct pf_keth_anchor_stackframe	*f;
3720	struct pf_keth_rule *fr;
3721	int quick = 0;
3722
3723	NET_EPOCH_ASSERT();
3724
3725	do {
3726		if (*depth <= 0)
3727			break;
3728		f = stack + *depth - 1;
3729		fr = PF_ETH_ANCHOR_RULE(f);
3730		if (f->child != NULL) {
3731			/*
3732			 * This block traverses through
3733			 * a wildcard anchor.
3734			 */
3735			if (match != NULL && *match) {
3736				/*
3737				 * If any of "*" matched, then
3738				 * "foo/ *" matched, mark frame
3739				 * appropriately.
3740				 */
3741				PF_ETH_ANCHOR_SET_MATCH(f);
3742				*match = 0;
3743			}
3744			f->child = RB_NEXT(pf_keth_anchor_node,
3745			    &fr->anchor->children, f->child);
3746			if (f->child != NULL) {
3747				*rs = &f->child->ruleset;
3748				*r = TAILQ_FIRST((*rs)->active.rules);
3749				if (*r == NULL)
3750					continue;
3751				else
3752					break;
3753			}
3754		}
3755		(*depth)--;
3756		if (*depth == 0 && a != NULL)
3757			*a = NULL;
3758		*rs = f->rs;
3759		if (PF_ETH_ANCHOR_MATCH(f) || (match != NULL && *match))
3760			quick = fr->quick;
3761		*r = TAILQ_NEXT(fr, entries);
3762	} while (*r == NULL);
3763
3764	return (quick);
3765}
3766
3767#ifdef INET6
3768void
3769pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3770    struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3771{
3772	switch (af) {
3773#ifdef INET
3774	case AF_INET:
3775		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3776		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3777		break;
3778#endif /* INET */
3779	case AF_INET6:
3780		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3781		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3782		naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3783		((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3784		naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3785		((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3786		naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3787		((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3788		break;
3789	}
3790}
3791
3792void
3793pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3794{
3795	switch (af) {
3796#ifdef INET
3797	case AF_INET:
3798		addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3799		break;
3800#endif /* INET */
3801	case AF_INET6:
3802		if (addr->addr32[3] == 0xffffffff) {
3803			addr->addr32[3] = 0;
3804			if (addr->addr32[2] == 0xffffffff) {
3805				addr->addr32[2] = 0;
3806				if (addr->addr32[1] == 0xffffffff) {
3807					addr->addr32[1] = 0;
3808					addr->addr32[0] =
3809					    htonl(ntohl(addr->addr32[0]) + 1);
3810				} else
3811					addr->addr32[1] =
3812					    htonl(ntohl(addr->addr32[1]) + 1);
3813			} else
3814				addr->addr32[2] =
3815				    htonl(ntohl(addr->addr32[2]) + 1);
3816		} else
3817			addr->addr32[3] =
3818			    htonl(ntohl(addr->addr32[3]) + 1);
3819		break;
3820	}
3821}
3822#endif /* INET6 */
3823
3824void
3825pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
3826{
3827	/*
3828	 * Modern rules use the same flags in rules as they do in states.
3829	 */
3830	a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
3831	    PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
3832
3833	/*
3834	 * Old-style scrub rules have different flags which need to be translated.
3835	 */
3836	if (r->rule_flag & PFRULE_RANDOMID)
3837		a->flags |= PFSTATE_RANDOMID;
3838	if (r->scrub_flags & PFSTATE_SETTOS || r->rule_flag & PFRULE_SET_TOS ) {
3839		a->flags |= PFSTATE_SETTOS;
3840		a->set_tos = r->set_tos;
3841	}
3842
3843	if (r->qid)
3844		a->qid = r->qid;
3845	if (r->pqid)
3846		a->pqid = r->pqid;
3847	if (r->rtableid >= 0)
3848		a->rtableid = r->rtableid;
3849	a->log |= r->log;
3850	if (r->min_ttl)
3851		a->min_ttl = r->min_ttl;
3852	if (r->max_mss)
3853		a->max_mss = r->max_mss;
3854	if (r->dnpipe)
3855		a->dnpipe = r->dnpipe;
3856	if (r->dnrpipe)
3857		a->dnrpipe = r->dnrpipe;
3858	if (r->dnpipe || r->dnrpipe) {
3859		if (r->free_flags & PFRULE_DN_IS_PIPE)
3860			a->flags |= PFSTATE_DN_IS_PIPE;
3861		else
3862			a->flags &= ~PFSTATE_DN_IS_PIPE;
3863	}
3864	if (r->scrub_flags & PFSTATE_SETPRIO) {
3865		a->set_prio[0] = r->set_prio[0];
3866		a->set_prio[1] = r->set_prio[1];
3867	}
3868}
3869
3870int
3871pf_socket_lookup(struct pf_pdesc *pd, struct mbuf *m)
3872{
3873	struct pf_addr		*saddr, *daddr;
3874	u_int16_t		 sport, dport;
3875	struct inpcbinfo	*pi;
3876	struct inpcb		*inp;
3877
3878	pd->lookup.uid = UID_MAX;
3879	pd->lookup.gid = GID_MAX;
3880
3881	switch (pd->proto) {
3882	case IPPROTO_TCP:
3883		sport = pd->hdr.tcp.th_sport;
3884		dport = pd->hdr.tcp.th_dport;
3885		pi = &V_tcbinfo;
3886		break;
3887	case IPPROTO_UDP:
3888		sport = pd->hdr.udp.uh_sport;
3889		dport = pd->hdr.udp.uh_dport;
3890		pi = &V_udbinfo;
3891		break;
3892	default:
3893		return (-1);
3894	}
3895	if (pd->dir == PF_IN) {
3896		saddr = pd->src;
3897		daddr = pd->dst;
3898	} else {
3899		u_int16_t	p;
3900
3901		p = sport;
3902		sport = dport;
3903		dport = p;
3904		saddr = pd->dst;
3905		daddr = pd->src;
3906	}
3907	switch (pd->af) {
3908#ifdef INET
3909	case AF_INET:
3910		inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3911		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
3912		if (inp == NULL) {
3913			inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3914			   daddr->v4, dport, INPLOOKUP_WILDCARD |
3915			   INPLOOKUP_RLOCKPCB, NULL, m);
3916			if (inp == NULL)
3917				return (-1);
3918		}
3919		break;
3920#endif /* INET */
3921#ifdef INET6
3922	case AF_INET6:
3923		inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3924		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
3925		if (inp == NULL) {
3926			inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3927			    &daddr->v6, dport, INPLOOKUP_WILDCARD |
3928			    INPLOOKUP_RLOCKPCB, NULL, m);
3929			if (inp == NULL)
3930				return (-1);
3931		}
3932		break;
3933#endif /* INET6 */
3934
3935	default:
3936		return (-1);
3937	}
3938	INP_RLOCK_ASSERT(inp);
3939	pd->lookup.uid = inp->inp_cred->cr_uid;
3940	pd->lookup.gid = inp->inp_cred->cr_groups[0];
3941	INP_RUNLOCK(inp);
3942
3943	return (1);
3944}
3945
3946u_int8_t
3947pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3948{
3949	int		 hlen;
3950	u_int8_t	 hdr[60];
3951	u_int8_t	*opt, optlen;
3952	u_int8_t	 wscale = 0;
3953
3954	hlen = th_off << 2;		/* hlen <= sizeof(hdr) */
3955	if (hlen <= sizeof(struct tcphdr))
3956		return (0);
3957	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3958		return (0);
3959	opt = hdr + sizeof(struct tcphdr);
3960	hlen -= sizeof(struct tcphdr);
3961	while (hlen >= 3) {
3962		switch (*opt) {
3963		case TCPOPT_EOL:
3964		case TCPOPT_NOP:
3965			++opt;
3966			--hlen;
3967			break;
3968		case TCPOPT_WINDOW:
3969			wscale = opt[2];
3970			if (wscale > TCP_MAX_WINSHIFT)
3971				wscale = TCP_MAX_WINSHIFT;
3972			wscale |= PF_WSCALE_FLAG;
3973			/* FALLTHROUGH */
3974		default:
3975			optlen = opt[1];
3976			if (optlen < 2)
3977				optlen = 2;
3978			hlen -= optlen;
3979			opt += optlen;
3980			break;
3981		}
3982	}
3983	return (wscale);
3984}
3985
3986u_int16_t
3987pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3988{
3989	int		 hlen;
3990	u_int8_t	 hdr[60];
3991	u_int8_t	*opt, optlen;
3992	u_int16_t	 mss = V_tcp_mssdflt;
3993
3994	hlen = th_off << 2;	/* hlen <= sizeof(hdr) */
3995	if (hlen <= sizeof(struct tcphdr))
3996		return (0);
3997	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3998		return (0);
3999	opt = hdr + sizeof(struct tcphdr);
4000	hlen -= sizeof(struct tcphdr);
4001	while (hlen >= TCPOLEN_MAXSEG) {
4002		switch (*opt) {
4003		case TCPOPT_EOL:
4004		case TCPOPT_NOP:
4005			++opt;
4006			--hlen;
4007			break;
4008		case TCPOPT_MAXSEG:
4009			bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
4010			NTOHS(mss);
4011			/* FALLTHROUGH */
4012		default:
4013			optlen = opt[1];
4014			if (optlen < 2)
4015				optlen = 2;
4016			hlen -= optlen;
4017			opt += optlen;
4018			break;
4019		}
4020	}
4021	return (mss);
4022}
4023
4024static u_int16_t
4025pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
4026{
4027	struct nhop_object *nh;
4028#ifdef INET6
4029	struct in6_addr		dst6;
4030	uint32_t		scopeid;
4031#endif /* INET6 */
4032	int			 hlen = 0;
4033	uint16_t		 mss = 0;
4034
4035	NET_EPOCH_ASSERT();
4036
4037	switch (af) {
4038#ifdef INET
4039	case AF_INET:
4040		hlen = sizeof(struct ip);
4041		nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0);
4042		if (nh != NULL)
4043			mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
4044		break;
4045#endif /* INET */
4046#ifdef INET6
4047	case AF_INET6:
4048		hlen = sizeof(struct ip6_hdr);
4049		in6_splitscope(&addr->v6, &dst6, &scopeid);
4050		nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0);
4051		if (nh != NULL)
4052			mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
4053		break;
4054#endif /* INET6 */
4055	}
4056
4057	mss = max(V_tcp_mssdflt, mss);
4058	mss = min(mss, offer);
4059	mss = max(mss, 64);		/* sanity - at least max opt space */
4060	return (mss);
4061}
4062
4063static u_int32_t
4064pf_tcp_iss(struct pf_pdesc *pd)
4065{
4066	MD5_CTX ctx;
4067	u_int32_t digest[4];
4068
4069	if (V_pf_tcp_secret_init == 0) {
4070		arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
4071		MD5Init(&V_pf_tcp_secret_ctx);
4072		MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
4073		    sizeof(V_pf_tcp_secret));
4074		V_pf_tcp_secret_init = 1;
4075	}
4076
4077	ctx = V_pf_tcp_secret_ctx;
4078
4079	MD5Update(&ctx, (char *)&pd->hdr.tcp.th_sport, sizeof(u_short));
4080	MD5Update(&ctx, (char *)&pd->hdr.tcp.th_dport, sizeof(u_short));
4081	if (pd->af == AF_INET6) {
4082		MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
4083		MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
4084	} else {
4085		MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
4086		MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
4087	}
4088	MD5Final((u_char *)digest, &ctx);
4089	V_pf_tcp_iss_off += 4096;
4090#define	ISN_RANDOM_INCREMENT (4096 - 1)
4091	return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
4092	    V_pf_tcp_iss_off);
4093#undef	ISN_RANDOM_INCREMENT
4094}
4095
4096static bool
4097pf_match_eth_addr(const uint8_t *a, const struct pf_keth_rule_addr *r)
4098{
4099	bool match = true;
4100
4101	/* Always matches if not set */
4102	if (! r->isset)
4103		return (!r->neg);
4104
4105	for (int i = 0; i < ETHER_ADDR_LEN; i++) {
4106		if ((a[i] & r->mask[i]) != (r->addr[i] & r->mask[i])) {
4107			match = false;
4108			break;
4109		}
4110	}
4111
4112	return (match ^ r->neg);
4113}
4114
4115static int
4116pf_match_eth_tag(struct mbuf *m, struct pf_keth_rule *r, int *tag, int mtag)
4117{
4118	if (*tag == -1)
4119		*tag = mtag;
4120
4121	return ((!r->match_tag_not && r->match_tag == *tag) ||
4122	    (r->match_tag_not && r->match_tag != *tag));
4123}
4124
4125static void
4126pf_bridge_to(struct ifnet *ifp, struct mbuf *m)
4127{
4128	/* If we don't have the interface drop the packet. */
4129	if (ifp == NULL) {
4130		m_freem(m);
4131		return;
4132	}
4133
4134	switch (ifp->if_type) {
4135	case IFT_ETHER:
4136	case IFT_XETHER:
4137	case IFT_L2VLAN:
4138	case IFT_BRIDGE:
4139	case IFT_IEEE8023ADLAG:
4140		break;
4141	default:
4142		m_freem(m);
4143		return;
4144	}
4145
4146	ifp->if_transmit(ifp, m);
4147}
4148
4149static int
4150pf_test_eth_rule(int dir, struct pfi_kkif *kif, struct mbuf **m0)
4151{
4152#ifdef INET
4153	struct ip ip;
4154#endif
4155#ifdef INET6
4156	struct ip6_hdr ip6;
4157#endif
4158	struct mbuf *m = *m0;
4159	struct ether_header *e;
4160	struct pf_keth_rule *r, *rm, *a = NULL;
4161	struct pf_keth_ruleset *ruleset = NULL;
4162	struct pf_mtag *mtag;
4163	struct pf_keth_ruleq *rules;
4164	struct pf_addr *src = NULL, *dst = NULL;
4165	struct pfi_kkif *bridge_to;
4166	sa_family_t af = 0;
4167	uint16_t proto;
4168	int asd = 0, match = 0;
4169	int tag = -1;
4170	uint8_t action;
4171	struct pf_keth_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
4172
4173	MPASS(kif->pfik_ifp->if_vnet == curvnet);
4174	NET_EPOCH_ASSERT();
4175
4176	PF_RULES_RLOCK_TRACKER;
4177
4178	SDT_PROBE3(pf, eth, test_rule, entry, dir, kif->pfik_ifp, m);
4179
4180	mtag = pf_find_mtag(m);
4181	if (mtag != NULL && mtag->flags & PF_MTAG_FLAG_DUMMYNET) {
4182		/* Dummynet re-injects packets after they've
4183		 * completed their delay. We've already
4184		 * processed them, so pass unconditionally. */
4185
4186		/* But only once. We may see the packet multiple times (e.g.
4187		 * PFIL_IN/PFIL_OUT). */
4188		pf_dummynet_flag_remove(m, mtag);
4189
4190		return (PF_PASS);
4191	}
4192
4193	ruleset = V_pf_keth;
4194	rules = ck_pr_load_ptr(&ruleset->active.rules);
4195	r = TAILQ_FIRST(rules);
4196	rm = NULL;
4197
4198	e = mtod(m, struct ether_header *);
4199	proto = ntohs(e->ether_type);
4200
4201	switch (proto) {
4202#ifdef INET
4203	case ETHERTYPE_IP: {
4204		if (m_length(m, NULL) < (sizeof(struct ether_header) +
4205		    sizeof(ip)))
4206			return (PF_DROP);
4207
4208		af = AF_INET;
4209		m_copydata(m, sizeof(struct ether_header), sizeof(ip),
4210		    (caddr_t)&ip);
4211		src = (struct pf_addr *)&ip.ip_src;
4212		dst = (struct pf_addr *)&ip.ip_dst;
4213		break;
4214	}
4215#endif /* INET */
4216#ifdef INET6
4217	case ETHERTYPE_IPV6: {
4218		if (m_length(m, NULL) < (sizeof(struct ether_header) +
4219		    sizeof(ip6)))
4220			return (PF_DROP);
4221
4222		af = AF_INET6;
4223		m_copydata(m, sizeof(struct ether_header), sizeof(ip6),
4224		    (caddr_t)&ip6);
4225		src = (struct pf_addr *)&ip6.ip6_src;
4226		dst = (struct pf_addr *)&ip6.ip6_dst;
4227		break;
4228	}
4229#endif /* INET6 */
4230	}
4231
4232	PF_RULES_RLOCK();
4233
4234	while (r != NULL) {
4235		counter_u64_add(r->evaluations, 1);
4236		SDT_PROBE2(pf, eth, test_rule, test, r->nr, r);
4237
4238		if (pfi_kkif_match(r->kif, kif) == r->ifnot) {
4239			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4240			    "kif");
4241			r = r->skip[PFE_SKIP_IFP].ptr;
4242		}
4243		else if (r->direction && r->direction != dir) {
4244			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4245			    "dir");
4246			r = r->skip[PFE_SKIP_DIR].ptr;
4247		}
4248		else if (r->proto && r->proto != proto) {
4249			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4250			    "proto");
4251			r = r->skip[PFE_SKIP_PROTO].ptr;
4252		}
4253		else if (! pf_match_eth_addr(e->ether_shost, &r->src)) {
4254			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4255			    "src");
4256			r = r->skip[PFE_SKIP_SRC_ADDR].ptr;
4257		}
4258		else if (! pf_match_eth_addr(e->ether_dhost, &r->dst)) {
4259			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4260			    "dst");
4261			r = r->skip[PFE_SKIP_DST_ADDR].ptr;
4262		}
4263		else if (src != NULL && PF_MISMATCHAW(&r->ipsrc.addr, src, af,
4264		    r->ipsrc.neg, kif, M_GETFIB(m))) {
4265			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4266			    "ip_src");
4267			r = r->skip[PFE_SKIP_SRC_IP_ADDR].ptr;
4268		}
4269		else if (dst != NULL && PF_MISMATCHAW(&r->ipdst.addr, dst, af,
4270		    r->ipdst.neg, kif, M_GETFIB(m))) {
4271			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4272			    "ip_dst");
4273			r = r->skip[PFE_SKIP_DST_IP_ADDR].ptr;
4274		}
4275		else if (r->match_tag && !pf_match_eth_tag(m, r, &tag,
4276		    mtag ? mtag->tag : 0)) {
4277			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
4278			    "match_tag");
4279			r = TAILQ_NEXT(r, entries);
4280		}
4281		else {
4282			if (r->tag)
4283				tag = r->tag;
4284			if (r->anchor == NULL) {
4285				/* Rule matches */
4286				rm = r;
4287
4288				SDT_PROBE2(pf, eth, test_rule, match, r->nr, r);
4289
4290				if (r->quick)
4291					break;
4292
4293				r = TAILQ_NEXT(r, entries);
4294			} else {
4295				pf_step_into_keth_anchor(anchor_stack, &asd,
4296				    &ruleset, &r, &a, &match);
4297			}
4298		}
4299		if (r == NULL && pf_step_out_of_keth_anchor(anchor_stack, &asd,
4300		    &ruleset, &r, &a, &match))
4301			break;
4302	}
4303
4304	r = rm;
4305
4306	SDT_PROBE2(pf, eth, test_rule, final_match, (r != NULL ? r->nr : -1), r);
4307
4308	/* Default to pass. */
4309	if (r == NULL) {
4310		PF_RULES_RUNLOCK();
4311		return (PF_PASS);
4312	}
4313
4314	/* Execute action. */
4315	counter_u64_add(r->packets[dir == PF_OUT], 1);
4316	counter_u64_add(r->bytes[dir == PF_OUT], m_length(m, NULL));
4317	pf_update_timestamp(r);
4318
4319	/* Shortcut. Don't tag if we're just going to drop anyway. */
4320	if (r->action == PF_DROP) {
4321		PF_RULES_RUNLOCK();
4322		return (PF_DROP);
4323	}
4324
4325	if (tag > 0) {
4326		if (mtag == NULL)
4327			mtag = pf_get_mtag(m);
4328		if (mtag == NULL) {
4329			PF_RULES_RUNLOCK();
4330			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4331			return (PF_DROP);
4332		}
4333		mtag->tag = tag;
4334	}
4335
4336	if (r->qid != 0) {
4337		if (mtag == NULL)
4338			mtag = pf_get_mtag(m);
4339		if (mtag == NULL) {
4340			PF_RULES_RUNLOCK();
4341			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4342			return (PF_DROP);
4343		}
4344		mtag->qid = r->qid;
4345	}
4346
4347	action = r->action;
4348	bridge_to = r->bridge_to;
4349
4350	/* Dummynet */
4351	if (r->dnpipe) {
4352		struct ip_fw_args dnflow;
4353
4354		/* Drop packet if dummynet is not loaded. */
4355		if (ip_dn_io_ptr == NULL) {
4356			PF_RULES_RUNLOCK();
4357			m_freem(m);
4358			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4359			return (PF_DROP);
4360		}
4361		if (mtag == NULL)
4362			mtag = pf_get_mtag(m);
4363		if (mtag == NULL) {
4364			PF_RULES_RUNLOCK();
4365			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4366			return (PF_DROP);
4367		}
4368
4369		bzero(&dnflow, sizeof(dnflow));
4370
4371		/* We don't have port numbers here, so we set 0.  That means
4372		 * that we'll be somewhat limited in distinguishing flows (i.e.
4373		 * only based on IP addresses, not based on port numbers), but
4374		 * it's better than nothing. */
4375		dnflow.f_id.dst_port = 0;
4376		dnflow.f_id.src_port = 0;
4377		dnflow.f_id.proto = 0;
4378
4379		dnflow.rule.info = r->dnpipe;
4380		dnflow.rule.info |= IPFW_IS_DUMMYNET;
4381		if (r->dnflags & PFRULE_DN_IS_PIPE)
4382			dnflow.rule.info |= IPFW_IS_PIPE;
4383
4384		dnflow.f_id.extra = dnflow.rule.info;
4385
4386		dnflow.flags = dir == PF_IN ? IPFW_ARGS_IN : IPFW_ARGS_OUT;
4387		dnflow.flags |= IPFW_ARGS_ETHER;
4388		dnflow.ifp = kif->pfik_ifp;
4389
4390		switch (af) {
4391		case AF_INET:
4392			dnflow.f_id.addr_type = 4;
4393			dnflow.f_id.src_ip = src->v4.s_addr;
4394			dnflow.f_id.dst_ip = dst->v4.s_addr;
4395			break;
4396		case AF_INET6:
4397			dnflow.flags |= IPFW_ARGS_IP6;
4398			dnflow.f_id.addr_type = 6;
4399			dnflow.f_id.src_ip6 = src->v6;
4400			dnflow.f_id.dst_ip6 = dst->v6;
4401			break;
4402		}
4403
4404		PF_RULES_RUNLOCK();
4405
4406		mtag->flags |= PF_MTAG_FLAG_DUMMYNET;
4407		ip_dn_io_ptr(m0, &dnflow);
4408		if (*m0 != NULL)
4409			pf_dummynet_flag_remove(m, mtag);
4410	} else {
4411		PF_RULES_RUNLOCK();
4412	}
4413
4414	if (action == PF_PASS && bridge_to) {
4415		pf_bridge_to(bridge_to->pfik_ifp, *m0);
4416		*m0 = NULL; /* We've eaten the packet. */
4417	}
4418
4419	return (action);
4420}
4421
4422static int
4423pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, struct pfi_kkif *kif,
4424    struct mbuf *m, int off, struct pf_pdesc *pd, struct pf_krule **am,
4425    struct pf_kruleset **rsm, struct inpcb *inp)
4426{
4427	struct pf_krule		*nr = NULL;
4428	struct pf_addr		* const saddr = pd->src;
4429	struct pf_addr		* const daddr = pd->dst;
4430	sa_family_t		 af = pd->af;
4431	struct pf_krule		*r, *a = NULL;
4432	struct pf_kruleset	*ruleset = NULL;
4433	struct pf_krule_slist	 match_rules;
4434	struct pf_krule_item	*ri;
4435	struct pf_ksrc_node	*nsn = NULL;
4436	struct tcphdr		*th = &pd->hdr.tcp;
4437	struct pf_state_key	*sk = NULL, *nk = NULL;
4438	u_short			 reason;
4439	int			 rewrite = 0, hdrlen = 0;
4440	int			 tag = -1;
4441	int			 asd = 0;
4442	int			 match = 0;
4443	int			 state_icmp = 0;
4444	u_int16_t		 sport = 0, dport = 0;
4445	u_int16_t		 bproto_sum = 0, bip_sum = 0;
4446	u_int8_t		 icmptype = 0, icmpcode = 0;
4447	struct pf_kanchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
4448
4449	PF_RULES_RASSERT();
4450
4451	if (inp != NULL) {
4452		INP_LOCK_ASSERT(inp);
4453		pd->lookup.uid = inp->inp_cred->cr_uid;
4454		pd->lookup.gid = inp->inp_cred->cr_groups[0];
4455		pd->lookup.done = 1;
4456	}
4457
4458	switch (pd->proto) {
4459	case IPPROTO_TCP:
4460		sport = th->th_sport;
4461		dport = th->th_dport;
4462		hdrlen = sizeof(*th);
4463		break;
4464	case IPPROTO_UDP:
4465		sport = pd->hdr.udp.uh_sport;
4466		dport = pd->hdr.udp.uh_dport;
4467		hdrlen = sizeof(pd->hdr.udp);
4468		break;
4469	case IPPROTO_SCTP:
4470		sport = pd->hdr.sctp.src_port;
4471		dport = pd->hdr.sctp.dest_port;
4472		hdrlen = sizeof(pd->hdr.sctp);
4473		break;
4474#ifdef INET
4475	case IPPROTO_ICMP:
4476		if (pd->af != AF_INET)
4477			break;
4478		sport = dport = pd->hdr.icmp.icmp_id;
4479		hdrlen = sizeof(pd->hdr.icmp);
4480		icmptype = pd->hdr.icmp.icmp_type;
4481		icmpcode = pd->hdr.icmp.icmp_code;
4482
4483		if (icmptype == ICMP_UNREACH ||
4484		    icmptype == ICMP_SOURCEQUENCH ||
4485		    icmptype == ICMP_REDIRECT ||
4486		    icmptype == ICMP_TIMXCEED ||
4487		    icmptype == ICMP_PARAMPROB)
4488			state_icmp++;
4489		break;
4490#endif /* INET */
4491#ifdef INET6
4492	case IPPROTO_ICMPV6:
4493		if (af != AF_INET6)
4494			break;
4495		sport = dport = pd->hdr.icmp6.icmp6_id;
4496		hdrlen = sizeof(pd->hdr.icmp6);
4497		icmptype = pd->hdr.icmp6.icmp6_type;
4498		icmpcode = pd->hdr.icmp6.icmp6_code;
4499
4500		if (icmptype == ICMP6_DST_UNREACH ||
4501		    icmptype == ICMP6_PACKET_TOO_BIG ||
4502		    icmptype == ICMP6_TIME_EXCEEDED ||
4503		    icmptype == ICMP6_PARAM_PROB)
4504			state_icmp++;
4505		break;
4506#endif /* INET6 */
4507	default:
4508		sport = dport = hdrlen = 0;
4509		break;
4510	}
4511
4512	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4513
4514	/* check packet for BINAT/NAT/RDR */
4515	if ((nr = pf_get_translation(pd, m, off, kif, &nsn, &sk,
4516	    &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
4517		KASSERT(sk != NULL, ("%s: null sk", __func__));
4518		KASSERT(nk != NULL, ("%s: null nk", __func__));
4519
4520		if (nr->log) {
4521			PFLOG_PACKET(kif, m, af, PF_PASS, PFRES_MATCH, nr, a,
4522			    ruleset, pd, 1);
4523		}
4524
4525		if (pd->ip_sum)
4526			bip_sum = *pd->ip_sum;
4527
4528		switch (pd->proto) {
4529		case IPPROTO_TCP:
4530			bproto_sum = th->th_sum;
4531			pd->proto_sum = &th->th_sum;
4532
4533			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
4534			    nk->port[pd->sidx] != sport) {
4535				pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
4536				    &th->th_sum, &nk->addr[pd->sidx],
4537				    nk->port[pd->sidx], 0, af);
4538				pd->sport = &th->th_sport;
4539				sport = th->th_sport;
4540			}
4541
4542			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
4543			    nk->port[pd->didx] != dport) {
4544				pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
4545				    &th->th_sum, &nk->addr[pd->didx],
4546				    nk->port[pd->didx], 0, af);
4547				dport = th->th_dport;
4548				pd->dport = &th->th_dport;
4549			}
4550			rewrite++;
4551			break;
4552		case IPPROTO_UDP:
4553			bproto_sum = pd->hdr.udp.uh_sum;
4554			pd->proto_sum = &pd->hdr.udp.uh_sum;
4555
4556			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
4557			    nk->port[pd->sidx] != sport) {
4558				pf_change_ap(m, saddr, &pd->hdr.udp.uh_sport,
4559				    pd->ip_sum, &pd->hdr.udp.uh_sum,
4560				    &nk->addr[pd->sidx],
4561				    nk->port[pd->sidx], 1, af);
4562				sport = pd->hdr.udp.uh_sport;
4563				pd->sport = &pd->hdr.udp.uh_sport;
4564			}
4565
4566			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
4567			    nk->port[pd->didx] != dport) {
4568				pf_change_ap(m, daddr, &pd->hdr.udp.uh_dport,
4569				    pd->ip_sum, &pd->hdr.udp.uh_sum,
4570				    &nk->addr[pd->didx],
4571				    nk->port[pd->didx], 1, af);
4572				dport = pd->hdr.udp.uh_dport;
4573				pd->dport = &pd->hdr.udp.uh_dport;
4574			}
4575			rewrite++;
4576			break;
4577		case IPPROTO_SCTP: {
4578			uint16_t checksum = 0;
4579
4580			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
4581			    nk->port[pd->sidx] != sport) {
4582				pf_change_ap(m, saddr, &pd->hdr.sctp.src_port,
4583				    pd->ip_sum, &checksum,
4584				    &nk->addr[pd->sidx],
4585				    nk->port[pd->sidx], 1, af);
4586			}
4587			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
4588			    nk->port[pd->didx] != dport) {
4589				pf_change_ap(m, daddr, &pd->hdr.sctp.dest_port,
4590				    pd->ip_sum, &checksum,
4591				    &nk->addr[pd->didx],
4592				    nk->port[pd->didx], 1, af);
4593			}
4594			break;
4595		}
4596#ifdef INET
4597		case IPPROTO_ICMP:
4598			nk->port[0] = nk->port[1];
4599			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
4600				pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
4601				    nk->addr[pd->sidx].v4.s_addr, 0);
4602
4603			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
4604				pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
4605				    nk->addr[pd->didx].v4.s_addr, 0);
4606
4607			if (nk->port[1] != pd->hdr.icmp.icmp_id) {
4608				pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
4609				    pd->hdr.icmp.icmp_cksum, sport,
4610				    nk->port[1], 0);
4611				pd->hdr.icmp.icmp_id = nk->port[1];
4612				pd->sport = &pd->hdr.icmp.icmp_id;
4613			}
4614			m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
4615			break;
4616#endif /* INET */
4617#ifdef INET6
4618		case IPPROTO_ICMPV6:
4619			nk->port[0] = nk->port[1];
4620			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
4621				pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum,
4622				    &nk->addr[pd->sidx], 0);
4623
4624			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
4625				pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum,
4626				    &nk->addr[pd->didx], 0);
4627			rewrite++;
4628			break;
4629#endif /* INET */
4630		default:
4631			switch (af) {
4632#ifdef INET
4633			case AF_INET:
4634				if (PF_ANEQ(saddr,
4635				    &nk->addr[pd->sidx], AF_INET))
4636					pf_change_a(&saddr->v4.s_addr,
4637					    pd->ip_sum,
4638					    nk->addr[pd->sidx].v4.s_addr, 0);
4639
4640				if (PF_ANEQ(daddr,
4641				    &nk->addr[pd->didx], AF_INET))
4642					pf_change_a(&daddr->v4.s_addr,
4643					    pd->ip_sum,
4644					    nk->addr[pd->didx].v4.s_addr, 0);
4645				break;
4646#endif /* INET */
4647#ifdef INET6
4648			case AF_INET6:
4649				if (PF_ANEQ(saddr,
4650				    &nk->addr[pd->sidx], AF_INET6))
4651					PF_ACPY(saddr, &nk->addr[pd->sidx], af);
4652
4653				if (PF_ANEQ(daddr,
4654				    &nk->addr[pd->didx], AF_INET6))
4655					PF_ACPY(daddr, &nk->addr[pd->didx], af);
4656				break;
4657#endif /* INET */
4658			}
4659			break;
4660		}
4661		if (nr->natpass)
4662			r = NULL;
4663		pd->nat_rule = nr;
4664	}
4665
4666	SLIST_INIT(&match_rules);
4667	while (r != NULL) {
4668		pf_counter_u64_add(&r->evaluations, 1);
4669		if (pfi_kkif_match(r->kif, kif) == r->ifnot)
4670			r = r->skip[PF_SKIP_IFP].ptr;
4671		else if (r->direction && r->direction != pd->dir)
4672			r = r->skip[PF_SKIP_DIR].ptr;
4673		else if (r->af && r->af != af)
4674			r = r->skip[PF_SKIP_AF].ptr;
4675		else if (r->proto && r->proto != pd->proto)
4676			r = r->skip[PF_SKIP_PROTO].ptr;
4677		else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
4678		    r->src.neg, kif, M_GETFIB(m)))
4679			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4680		/* tcp/udp only. port_op always 0 in other cases */
4681		else if (r->src.port_op && !pf_match_port(r->src.port_op,
4682		    r->src.port[0], r->src.port[1], sport))
4683			r = r->skip[PF_SKIP_SRC_PORT].ptr;
4684		else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
4685		    r->dst.neg, NULL, M_GETFIB(m)))
4686			r = r->skip[PF_SKIP_DST_ADDR].ptr;
4687		/* tcp/udp only. port_op always 0 in other cases */
4688		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
4689		    r->dst.port[0], r->dst.port[1], dport))
4690			r = r->skip[PF_SKIP_DST_PORT].ptr;
4691		/* icmp only. type always 0 in other cases */
4692		else if (r->type && r->type != icmptype + 1)
4693			r = TAILQ_NEXT(r, entries);
4694		/* icmp only. type always 0 in other cases */
4695		else if (r->code && r->code != icmpcode + 1)
4696			r = TAILQ_NEXT(r, entries);
4697		else if (r->tos && !(r->tos == pd->tos))
4698			r = TAILQ_NEXT(r, entries);
4699		else if (r->rule_flag & PFRULE_FRAGMENT)
4700			r = TAILQ_NEXT(r, entries);
4701		else if (pd->proto == IPPROTO_TCP &&
4702		    (r->flagset & th->th_flags) != r->flags)
4703			r = TAILQ_NEXT(r, entries);
4704		/* tcp/udp only. uid.op always 0 in other cases */
4705		else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
4706		    pf_socket_lookup(pd, m), 1)) &&
4707		    !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
4708		    pd->lookup.uid))
4709			r = TAILQ_NEXT(r, entries);
4710		/* tcp/udp only. gid.op always 0 in other cases */
4711		else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
4712		    pf_socket_lookup(pd, m), 1)) &&
4713		    !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
4714		    pd->lookup.gid))
4715			r = TAILQ_NEXT(r, entries);
4716		else if (r->prio &&
4717		    !pf_match_ieee8021q_pcp(r->prio, m))
4718			r = TAILQ_NEXT(r, entries);
4719		else if (r->prob &&
4720		    r->prob <= arc4random())
4721			r = TAILQ_NEXT(r, entries);
4722		else if (r->match_tag && !pf_match_tag(m, r, &tag,
4723		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
4724			r = TAILQ_NEXT(r, entries);
4725		else if (r->os_fingerprint != PF_OSFP_ANY &&
4726		    (pd->proto != IPPROTO_TCP || !pf_osfp_match(
4727		    pf_osfp_fingerprint(pd, m, off, th),
4728		    r->os_fingerprint)))
4729			r = TAILQ_NEXT(r, entries);
4730		else {
4731			if (r->tag)
4732				tag = r->tag;
4733			if (r->anchor == NULL) {
4734				if (r->action == PF_MATCH) {
4735					ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO);
4736					if (ri == NULL) {
4737						REASON_SET(&reason, PFRES_MEMORY);
4738						goto cleanup;
4739					}
4740					ri->r = r;
4741					SLIST_INSERT_HEAD(&match_rules, ri, entry);
4742					pf_counter_u64_critical_enter();
4743					pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
4744					pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
4745					pf_counter_u64_critical_exit();
4746					pf_rule_to_actions(r, &pd->act);
4747					if (r->log)
4748						PFLOG_PACKET(kif, m, af,
4749						    r->action, PFRES_MATCH, r,
4750						    a, ruleset, pd, 1);
4751				} else {
4752					match = 1;
4753					*rm = r;
4754					*am = a;
4755					*rsm = ruleset;
4756				}
4757				if ((*rm)->quick)
4758					break;
4759				r = TAILQ_NEXT(r, entries);
4760			} else
4761				pf_step_into_anchor(anchor_stack, &asd,
4762				    &ruleset, PF_RULESET_FILTER, &r, &a,
4763				    &match);
4764		}
4765		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4766		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4767			break;
4768	}
4769	r = *rm;
4770	a = *am;
4771	ruleset = *rsm;
4772
4773	REASON_SET(&reason, PFRES_MATCH);
4774
4775	/* apply actions for last matching pass/block rule */
4776	pf_rule_to_actions(r, &pd->act);
4777
4778	if (r->log) {
4779		if (rewrite)
4780			m_copyback(m, off, hdrlen, pd->hdr.any);
4781		PFLOG_PACKET(kif, m, af, r->action, reason, r, a, ruleset, pd, 1);
4782	}
4783
4784	if ((r->action == PF_DROP) &&
4785	    ((r->rule_flag & PFRULE_RETURNRST) ||
4786	    (r->rule_flag & PFRULE_RETURNICMP) ||
4787	    (r->rule_flag & PFRULE_RETURN))) {
4788		pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
4789		    bip_sum, hdrlen, &reason, r->rtableid);
4790	}
4791
4792	if (r->action == PF_DROP)
4793		goto cleanup;
4794
4795	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4796		REASON_SET(&reason, PFRES_MEMORY);
4797		goto cleanup;
4798	}
4799	if (pd->act.rtableid >= 0)
4800		M_SETFIB(m, pd->act.rtableid);
4801
4802	if (!state_icmp && (r->keep_state || nr != NULL ||
4803	    (pd->flags & PFDESC_TCP_NORM))) {
4804		int action;
4805		action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
4806		    sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
4807		    hdrlen, &match_rules);
4808		if (action != PF_PASS) {
4809			if (action == PF_DROP &&
4810			    (r->rule_flag & PFRULE_RETURN))
4811				pf_return(r, nr, pd, sk, off, m, th, kif,
4812				    bproto_sum, bip_sum, hdrlen, &reason,
4813				    pd->act.rtableid);
4814			return (action);
4815		}
4816	} else {
4817		while ((ri = SLIST_FIRST(&match_rules))) {
4818			SLIST_REMOVE_HEAD(&match_rules, entry);
4819			free(ri, M_PF_RULE_ITEM);
4820		}
4821
4822		uma_zfree(V_pf_state_key_z, sk);
4823		uma_zfree(V_pf_state_key_z, nk);
4824	}
4825
4826	/* copy back packet headers if we performed NAT operations */
4827	if (rewrite)
4828		m_copyback(m, off, hdrlen, pd->hdr.any);
4829
4830	if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
4831	    pd->dir == PF_OUT &&
4832	    V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m))
4833		/*
4834		 * We want the state created, but we dont
4835		 * want to send this in case a partner
4836		 * firewall has to know about it to allow
4837		 * replies through it.
4838		 */
4839		return (PF_DEFER);
4840
4841	return (PF_PASS);
4842
4843cleanup:
4844	while ((ri = SLIST_FIRST(&match_rules))) {
4845		SLIST_REMOVE_HEAD(&match_rules, entry);
4846		free(ri, M_PF_RULE_ITEM);
4847	}
4848
4849	uma_zfree(V_pf_state_key_z, sk);
4850	uma_zfree(V_pf_state_key_z, nk);
4851	return (PF_DROP);
4852}
4853
4854static int
4855pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
4856    struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
4857    struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
4858    u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm,
4859    int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
4860    struct pf_krule_slist *match_rules)
4861{
4862	struct pf_kstate	*s = NULL;
4863	struct pf_ksrc_node	*sn = NULL;
4864	struct tcphdr		*th = &pd->hdr.tcp;
4865	u_int16_t		 mss = V_tcp_mssdflt;
4866	u_short			 reason, sn_reason;
4867	struct pf_krule_item	*ri;
4868
4869	/* check maximums */
4870	if (r->max_states &&
4871	    (counter_u64_fetch(r->states_cur) >= r->max_states)) {
4872		counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
4873		REASON_SET(&reason, PFRES_MAXSTATES);
4874		goto csfailed;
4875	}
4876	/* src node for filter rule */
4877	if ((r->rule_flag & PFRULE_SRCTRACK ||
4878	    r->rpool.opts & PF_POOL_STICKYADDR) &&
4879	    (sn_reason = pf_insert_src_node(&sn, r, pd->src, pd->af)) != 0) {
4880		REASON_SET(&reason, sn_reason);
4881		goto csfailed;
4882	}
4883	/* src node for translation rule */
4884	if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
4885	    (sn_reason = pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx],
4886	    pd->af)) != 0 ) {
4887		REASON_SET(&reason, sn_reason);
4888		goto csfailed;
4889	}
4890	s = pf_alloc_state(M_NOWAIT);
4891	if (s == NULL) {
4892		REASON_SET(&reason, PFRES_MEMORY);
4893		goto csfailed;
4894	}
4895	s->rule.ptr = r;
4896	s->nat_rule.ptr = nr;
4897	s->anchor.ptr = a;
4898	bcopy(match_rules, &s->match_rules, sizeof(s->match_rules));
4899	memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions));
4900
4901	STATE_INC_COUNTERS(s);
4902	if (r->allow_opts)
4903		s->state_flags |= PFSTATE_ALLOWOPTS;
4904	if (r->rule_flag & PFRULE_STATESLOPPY)
4905		s->state_flags |= PFSTATE_SLOPPY;
4906	if (pd->flags & PFDESC_TCP_NORM) /* Set by old-style scrub rules */
4907		s->state_flags |= PFSTATE_SCRUB_TCP;
4908	if ((r->rule_flag & PFRULE_PFLOW) ||
4909	    (nr != NULL && nr->rule_flag & PFRULE_PFLOW))
4910		s->state_flags |= PFSTATE_PFLOW;
4911
4912	s->act.log = pd->act.log & PF_LOG_ALL;
4913	s->sync_state = PFSYNC_S_NONE;
4914	s->state_flags |= pd->act.flags; /* Only needed for pfsync and state export */
4915
4916	if (nr != NULL)
4917		s->act.log |= nr->log & PF_LOG_ALL;
4918	switch (pd->proto) {
4919	case IPPROTO_TCP:
4920		s->src.seqlo = ntohl(th->th_seq);
4921		s->src.seqhi = s->src.seqlo + pd->p_len + 1;
4922		if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
4923		    r->keep_state == PF_STATE_MODULATE) {
4924			/* Generate sequence number modulator */
4925			if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
4926			    0)
4927				s->src.seqdiff = 1;
4928			pf_change_proto_a(m, &th->th_seq, &th->th_sum,
4929			    htonl(s->src.seqlo + s->src.seqdiff), 0);
4930			*rewrite = 1;
4931		} else
4932			s->src.seqdiff = 0;
4933		if (th->th_flags & TH_SYN) {
4934			s->src.seqhi++;
4935			s->src.wscale = pf_get_wscale(m, off,
4936			    th->th_off, pd->af);
4937		}
4938		s->src.max_win = MAX(ntohs(th->th_win), 1);
4939		if (s->src.wscale & PF_WSCALE_MASK) {
4940			/* Remove scale factor from initial window */
4941			int win = s->src.max_win;
4942			win += 1 << (s->src.wscale & PF_WSCALE_MASK);
4943			s->src.max_win = (win - 1) >>
4944			    (s->src.wscale & PF_WSCALE_MASK);
4945		}
4946		if (th->th_flags & TH_FIN)
4947			s->src.seqhi++;
4948		s->dst.seqhi = 1;
4949		s->dst.max_win = 1;
4950		pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT);
4951		pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED);
4952		s->timeout = PFTM_TCP_FIRST_PACKET;
4953		atomic_add_32(&V_pf_status.states_halfopen, 1);
4954		break;
4955	case IPPROTO_UDP:
4956		pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE);
4957		pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC);
4958		s->timeout = PFTM_UDP_FIRST_PACKET;
4959		break;
4960	case IPPROTO_SCTP:
4961		pf_set_protostate(s, PF_PEER_SRC, SCTP_COOKIE_WAIT);
4962		pf_set_protostate(s, PF_PEER_DST, SCTP_CLOSED);
4963		s->timeout = PFTM_SCTP_FIRST_PACKET;
4964		break;
4965	case IPPROTO_ICMP:
4966#ifdef INET6
4967	case IPPROTO_ICMPV6:
4968#endif
4969		s->timeout = PFTM_ICMP_FIRST_PACKET;
4970		break;
4971	default:
4972		pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE);
4973		pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC);
4974		s->timeout = PFTM_OTHER_FIRST_PACKET;
4975	}
4976
4977	if (r->rt) {
4978		/* pf_map_addr increases the reason counters */
4979		if ((reason = pf_map_addr(pd->af, r, pd->src, &s->rt_addr,
4980		    &s->rt_kif, NULL, &sn)) != 0)
4981			goto csfailed;
4982		s->rt = r->rt;
4983	}
4984
4985	s->creation = s->expire = pf_get_uptime();
4986
4987	if (sn != NULL)
4988		s->src_node = sn;
4989	if (nsn != NULL) {
4990		/* XXX We only modify one side for now. */
4991		PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
4992		s->nat_src_node = nsn;
4993	}
4994	if (pd->proto == IPPROTO_TCP) {
4995		if (s->state_flags & PFSTATE_SCRUB_TCP &&
4996		    pf_normalize_tcp_init(m, off, pd, th, &s->src, &s->dst)) {
4997			REASON_SET(&reason, PFRES_MEMORY);
4998			goto drop;
4999		}
5000		if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub &&
5001		    pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
5002		    &s->src, &s->dst, rewrite)) {
5003			/* This really shouldn't happen!!! */
5004			DPFPRINTF(PF_DEBUG_URGENT,
5005			    ("pf_normalize_tcp_stateful failed on first "
5006			     "pkt\n"));
5007			goto drop;
5008		}
5009	} else if (pd->proto == IPPROTO_SCTP) {
5010		if (pf_normalize_sctp_init(m, off, pd, &s->src, &s->dst))
5011			goto drop;
5012		if (! (pd->sctp_flags & (PFDESC_SCTP_INIT | PFDESC_SCTP_ADD_IP)))
5013			goto drop;
5014	}
5015	s->direction = pd->dir;
5016
5017	/*
5018	 * sk/nk could already been setup by pf_get_translation().
5019	 */
5020	if (nr == NULL) {
5021		KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
5022		    __func__, nr, sk, nk));
5023		sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
5024		if (sk == NULL)
5025			goto csfailed;
5026		nk = sk;
5027	} else
5028		KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
5029		    __func__, nr, sk, nk));
5030
5031	/* Swap sk/nk for PF_OUT. */
5032	if (pf_state_insert(BOUND_IFACE(s, kif), kif,
5033	    (pd->dir == PF_IN) ? sk : nk,
5034	    (pd->dir == PF_IN) ? nk : sk, s)) {
5035		REASON_SET(&reason, PFRES_STATEINS);
5036		goto drop;
5037	} else
5038		*sm = s;
5039
5040	if (tag > 0)
5041		s->tag = tag;
5042	if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
5043	    TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
5044		pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
5045		/* undo NAT changes, if they have taken place */
5046		if (nr != NULL) {
5047			struct pf_state_key *skt = s->key[PF_SK_WIRE];
5048			if (pd->dir == PF_OUT)
5049				skt = s->key[PF_SK_STACK];
5050			PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
5051			PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
5052			if (pd->sport)
5053				*pd->sport = skt->port[pd->sidx];
5054			if (pd->dport)
5055				*pd->dport = skt->port[pd->didx];
5056			if (pd->proto_sum)
5057				*pd->proto_sum = bproto_sum;
5058			if (pd->ip_sum)
5059				*pd->ip_sum = bip_sum;
5060			m_copyback(m, off, hdrlen, pd->hdr.any);
5061		}
5062		s->src.seqhi = htonl(arc4random());
5063		/* Find mss option */
5064		int rtid = M_GETFIB(m);
5065		mss = pf_get_mss(m, off, th->th_off, pd->af);
5066		mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
5067		mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
5068		s->src.mss = mss;
5069		pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
5070		    th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
5071		    TH_SYN|TH_ACK, 0, s->src.mss, 0, true, 0, 0,
5072		    pd->act.rtableid);
5073		REASON_SET(&reason, PFRES_SYNPROXY);
5074		return (PF_SYNPROXY_DROP);
5075	}
5076
5077	return (PF_PASS);
5078
5079csfailed:
5080	while ((ri = SLIST_FIRST(match_rules))) {
5081		SLIST_REMOVE_HEAD(match_rules, entry);
5082		free(ri, M_PF_RULE_ITEM);
5083	}
5084
5085	uma_zfree(V_pf_state_key_z, sk);
5086	uma_zfree(V_pf_state_key_z, nk);
5087
5088	if (sn != NULL) {
5089		PF_SRC_NODE_LOCK(sn);
5090		if (--sn->states == 0 && sn->expire == 0) {
5091			pf_unlink_src_node(sn);
5092			uma_zfree(V_pf_sources_z, sn);
5093			counter_u64_add(
5094			    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
5095		}
5096		PF_SRC_NODE_UNLOCK(sn);
5097	}
5098
5099	if (nsn != sn && nsn != NULL) {
5100		PF_SRC_NODE_LOCK(nsn);
5101		if (--nsn->states == 0 && nsn->expire == 0) {
5102			pf_unlink_src_node(nsn);
5103			uma_zfree(V_pf_sources_z, nsn);
5104			counter_u64_add(
5105			    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
5106		}
5107		PF_SRC_NODE_UNLOCK(nsn);
5108	}
5109
5110drop:
5111	if (s != NULL) {
5112		pf_src_tree_remove_state(s);
5113		s->timeout = PFTM_UNLINKED;
5114		STATE_DEC_COUNTERS(s);
5115		pf_free_state(s);
5116	}
5117
5118	return (PF_DROP);
5119}
5120
5121static int
5122pf_test_fragment(struct pf_krule **rm, struct pfi_kkif *kif,
5123    struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_krule **am,
5124    struct pf_kruleset **rsm)
5125{
5126	struct pf_krule		*r, *a = NULL;
5127	struct pf_kruleset	*ruleset = NULL;
5128	struct pf_krule_slist	 match_rules;
5129	struct pf_krule_item	*ri;
5130	sa_family_t		 af = pd->af;
5131	u_short			 reason;
5132	int			 tag = -1;
5133	int			 asd = 0;
5134	int			 match = 0;
5135	struct pf_kanchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
5136
5137	PF_RULES_RASSERT();
5138
5139	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
5140	SLIST_INIT(&match_rules);
5141	while (r != NULL) {
5142		pf_counter_u64_add(&r->evaluations, 1);
5143		if (pfi_kkif_match(r->kif, kif) == r->ifnot)
5144			r = r->skip[PF_SKIP_IFP].ptr;
5145		else if (r->direction && r->direction != pd->dir)
5146			r = r->skip[PF_SKIP_DIR].ptr;
5147		else if (r->af && r->af != af)
5148			r = r->skip[PF_SKIP_AF].ptr;
5149		else if (r->proto && r->proto != pd->proto)
5150			r = r->skip[PF_SKIP_PROTO].ptr;
5151		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
5152		    r->src.neg, kif, M_GETFIB(m)))
5153			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
5154		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
5155		    r->dst.neg, NULL, M_GETFIB(m)))
5156			r = r->skip[PF_SKIP_DST_ADDR].ptr;
5157		else if (r->tos && !(r->tos == pd->tos))
5158			r = TAILQ_NEXT(r, entries);
5159		else if (r->os_fingerprint != PF_OSFP_ANY)
5160			r = TAILQ_NEXT(r, entries);
5161		else if (pd->proto == IPPROTO_UDP &&
5162		    (r->src.port_op || r->dst.port_op))
5163			r = TAILQ_NEXT(r, entries);
5164		else if (pd->proto == IPPROTO_TCP &&
5165		    (r->src.port_op || r->dst.port_op || r->flagset))
5166			r = TAILQ_NEXT(r, entries);
5167		else if ((pd->proto == IPPROTO_ICMP ||
5168		    pd->proto == IPPROTO_ICMPV6) &&
5169		    (r->type || r->code))
5170			r = TAILQ_NEXT(r, entries);
5171		else if (r->prio &&
5172		    !pf_match_ieee8021q_pcp(r->prio, m))
5173			r = TAILQ_NEXT(r, entries);
5174		else if (r->prob && r->prob <=
5175		    (arc4random() % (UINT_MAX - 1) + 1))
5176			r = TAILQ_NEXT(r, entries);
5177		else if (r->match_tag && !pf_match_tag(m, r, &tag,
5178		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
5179			r = TAILQ_NEXT(r, entries);
5180		else {
5181			if (r->anchor == NULL) {
5182				if (r->action == PF_MATCH) {
5183					ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO);
5184					if (ri == NULL) {
5185						REASON_SET(&reason, PFRES_MEMORY);
5186						goto cleanup;
5187					}
5188					ri->r = r;
5189					SLIST_INSERT_HEAD(&match_rules, ri, entry);
5190					pf_counter_u64_critical_enter();
5191					pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
5192					pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
5193					pf_counter_u64_critical_exit();
5194					pf_rule_to_actions(r, &pd->act);
5195					if (r->log)
5196						PFLOG_PACKET(kif, m, af,
5197						    r->action, PFRES_MATCH, r,
5198						    a, ruleset, pd, 1);
5199				} else {
5200					match = 1;
5201					*rm = r;
5202					*am = a;
5203					*rsm = ruleset;
5204				}
5205				if ((*rm)->quick)
5206					break;
5207				r = TAILQ_NEXT(r, entries);
5208			} else
5209				pf_step_into_anchor(anchor_stack, &asd,
5210				    &ruleset, PF_RULESET_FILTER, &r, &a,
5211				    &match);
5212		}
5213		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
5214		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
5215			break;
5216	}
5217	r = *rm;
5218	a = *am;
5219	ruleset = *rsm;
5220
5221	REASON_SET(&reason, PFRES_MATCH);
5222
5223	/* apply actions for last matching pass/block rule */
5224	pf_rule_to_actions(r, &pd->act);
5225
5226	if (r->log)
5227		PFLOG_PACKET(kif, m, af, r->action, reason, r, a, ruleset, pd, 1);
5228
5229	if (r->action != PF_PASS)
5230		return (PF_DROP);
5231
5232	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
5233		REASON_SET(&reason, PFRES_MEMORY);
5234		goto cleanup;
5235	}
5236
5237	return (PF_PASS);
5238
5239cleanup:
5240	while ((ri = SLIST_FIRST(&match_rules))) {
5241		SLIST_REMOVE_HEAD(&match_rules, entry);
5242		free(ri, M_PF_RULE_ITEM);
5243	}
5244
5245	return (PF_DROP);
5246}
5247
5248static int
5249pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
5250    struct mbuf *m, int off, struct pf_pdesc *pd, u_short *reason,
5251    int *copyback)
5252{
5253	struct tcphdr		*th = &pd->hdr.tcp;
5254	struct pf_state_peer	*src, *dst;
5255	u_int16_t		 win = ntohs(th->th_win);
5256	u_int32_t		 ack, end, seq, orig_seq;
5257	u_int8_t		 sws, dws, psrc, pdst;
5258	int			 ackskew;
5259
5260	if (pd->dir == (*state)->direction) {
5261		src = &(*state)->src;
5262		dst = &(*state)->dst;
5263		psrc = PF_PEER_SRC;
5264		pdst = PF_PEER_DST;
5265	} else {
5266		src = &(*state)->dst;
5267		dst = &(*state)->src;
5268		psrc = PF_PEER_DST;
5269		pdst = PF_PEER_SRC;
5270	}
5271
5272	if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
5273		sws = src->wscale & PF_WSCALE_MASK;
5274		dws = dst->wscale & PF_WSCALE_MASK;
5275	} else
5276		sws = dws = 0;
5277
5278	/*
5279	 * Sequence tracking algorithm from Guido van Rooij's paper:
5280	 *   http://www.madison-gurkha.com/publications/tcp_filtering/
5281	 *	tcp_filtering.ps
5282	 */
5283
5284	orig_seq = seq = ntohl(th->th_seq);
5285	if (src->seqlo == 0) {
5286		/* First packet from this end. Set its state */
5287
5288		if (((*state)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) &&
5289		    src->scrub == NULL) {
5290			if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
5291				REASON_SET(reason, PFRES_MEMORY);
5292				return (PF_DROP);
5293			}
5294		}
5295
5296		/* Deferred generation of sequence number modulator */
5297		if (dst->seqdiff && !src->seqdiff) {
5298			/* use random iss for the TCP server */
5299			while ((src->seqdiff = arc4random() - seq) == 0)
5300				;
5301			ack = ntohl(th->th_ack) - dst->seqdiff;
5302			pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
5303			    src->seqdiff), 0);
5304			pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
5305			*copyback = 1;
5306		} else {
5307			ack = ntohl(th->th_ack);
5308		}
5309
5310		end = seq + pd->p_len;
5311		if (th->th_flags & TH_SYN) {
5312			end++;
5313			if (dst->wscale & PF_WSCALE_FLAG) {
5314				src->wscale = pf_get_wscale(m, off, th->th_off,
5315				    pd->af);
5316				if (src->wscale & PF_WSCALE_FLAG) {
5317					/* Remove scale factor from initial
5318					 * window */
5319					sws = src->wscale & PF_WSCALE_MASK;
5320					win = ((u_int32_t)win + (1 << sws) - 1)
5321					    >> sws;
5322					dws = dst->wscale & PF_WSCALE_MASK;
5323				} else {
5324					/* fixup other window */
5325					dst->max_win <<= dst->wscale &
5326					    PF_WSCALE_MASK;
5327					/* in case of a retrans SYN|ACK */
5328					dst->wscale = 0;
5329				}
5330			}
5331		}
5332		if (th->th_flags & TH_FIN)
5333			end++;
5334
5335		src->seqlo = seq;
5336		if (src->state < TCPS_SYN_SENT)
5337			pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
5338
5339		/*
5340		 * May need to slide the window (seqhi may have been set by
5341		 * the crappy stack check or if we picked up the connection
5342		 * after establishment)
5343		 */
5344		if (src->seqhi == 1 ||
5345		    SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
5346			src->seqhi = end + MAX(1, dst->max_win << dws);
5347		if (win > src->max_win)
5348			src->max_win = win;
5349
5350	} else {
5351		ack = ntohl(th->th_ack) - dst->seqdiff;
5352		if (src->seqdiff) {
5353			/* Modulate sequence numbers */
5354			pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
5355			    src->seqdiff), 0);
5356			pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
5357			*copyback = 1;
5358		}
5359		end = seq + pd->p_len;
5360		if (th->th_flags & TH_SYN)
5361			end++;
5362		if (th->th_flags & TH_FIN)
5363			end++;
5364	}
5365
5366	if ((th->th_flags & TH_ACK) == 0) {
5367		/* Let it pass through the ack skew check */
5368		ack = dst->seqlo;
5369	} else if ((ack == 0 &&
5370	    (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
5371	    /* broken tcp stacks do not set ack */
5372	    (dst->state < TCPS_SYN_SENT)) {
5373		/*
5374		 * Many stacks (ours included) will set the ACK number in an
5375		 * FIN|ACK if the SYN times out -- no sequence to ACK.
5376		 */
5377		ack = dst->seqlo;
5378	}
5379
5380	if (seq == end) {
5381		/* Ease sequencing restrictions on no data packets */
5382		seq = src->seqlo;
5383		end = seq;
5384	}
5385
5386	ackskew = dst->seqlo - ack;
5387
5388	/*
5389	 * Need to demodulate the sequence numbers in any TCP SACK options
5390	 * (Selective ACK). We could optionally validate the SACK values
5391	 * against the current ACK window, either forwards or backwards, but
5392	 * I'm not confident that SACK has been implemented properly
5393	 * everywhere. It wouldn't surprise me if several stacks accidentally
5394	 * SACK too far backwards of previously ACKed data. There really aren't
5395	 * any security implications of bad SACKing unless the target stack
5396	 * doesn't validate the option length correctly. Someone trying to
5397	 * spoof into a TCP connection won't bother blindly sending SACK
5398	 * options anyway.
5399	 */
5400	if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
5401		if (pf_modulate_sack(m, off, pd, th, dst))
5402			*copyback = 1;
5403	}
5404
5405#define	MAXACKWINDOW (0xffff + 1500)	/* 1500 is an arbitrary fudge factor */
5406	if (SEQ_GEQ(src->seqhi, end) &&
5407	    /* Last octet inside other's window space */
5408	    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
5409	    /* Retrans: not more than one window back */
5410	    (ackskew >= -MAXACKWINDOW) &&
5411	    /* Acking not more than one reassembled fragment backwards */
5412	    (ackskew <= (MAXACKWINDOW << sws)) &&
5413	    /* Acking not more than one window forward */
5414	    ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
5415	    (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
5416	    /* Require an exact/+1 sequence match on resets when possible */
5417
5418		if (dst->scrub || src->scrub) {
5419			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
5420			    *state, src, dst, copyback))
5421				return (PF_DROP);
5422		}
5423
5424		/* update max window */
5425		if (src->max_win < win)
5426			src->max_win = win;
5427		/* synchronize sequencing */
5428		if (SEQ_GT(end, src->seqlo))
5429			src->seqlo = end;
5430		/* slide the window of what the other end can send */
5431		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
5432			dst->seqhi = ack + MAX((win << sws), 1);
5433
5434		/* update states */
5435		if (th->th_flags & TH_SYN)
5436			if (src->state < TCPS_SYN_SENT)
5437				pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
5438		if (th->th_flags & TH_FIN)
5439			if (src->state < TCPS_CLOSING)
5440				pf_set_protostate(*state, psrc, TCPS_CLOSING);
5441		if (th->th_flags & TH_ACK) {
5442			if (dst->state == TCPS_SYN_SENT) {
5443				pf_set_protostate(*state, pdst,
5444				    TCPS_ESTABLISHED);
5445				if (src->state == TCPS_ESTABLISHED &&
5446				    (*state)->src_node != NULL &&
5447				    pf_src_connlimit(state)) {
5448					REASON_SET(reason, PFRES_SRCLIMIT);
5449					return (PF_DROP);
5450				}
5451			} else if (dst->state == TCPS_CLOSING)
5452				pf_set_protostate(*state, pdst,
5453				    TCPS_FIN_WAIT_2);
5454		}
5455		if (th->th_flags & TH_RST)
5456			pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
5457
5458		/* update expire time */
5459		(*state)->expire = pf_get_uptime();
5460		if (src->state >= TCPS_FIN_WAIT_2 &&
5461		    dst->state >= TCPS_FIN_WAIT_2)
5462			(*state)->timeout = PFTM_TCP_CLOSED;
5463		else if (src->state >= TCPS_CLOSING &&
5464		    dst->state >= TCPS_CLOSING)
5465			(*state)->timeout = PFTM_TCP_FIN_WAIT;
5466		else if (src->state < TCPS_ESTABLISHED ||
5467		    dst->state < TCPS_ESTABLISHED)
5468			(*state)->timeout = PFTM_TCP_OPENING;
5469		else if (src->state >= TCPS_CLOSING ||
5470		    dst->state >= TCPS_CLOSING)
5471			(*state)->timeout = PFTM_TCP_CLOSING;
5472		else
5473			(*state)->timeout = PFTM_TCP_ESTABLISHED;
5474
5475		/* Fall through to PASS packet */
5476
5477	} else if ((dst->state < TCPS_SYN_SENT ||
5478		dst->state >= TCPS_FIN_WAIT_2 ||
5479		src->state >= TCPS_FIN_WAIT_2) &&
5480	    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
5481	    /* Within a window forward of the originating packet */
5482	    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
5483	    /* Within a window backward of the originating packet */
5484
5485		/*
5486		 * This currently handles three situations:
5487		 *  1) Stupid stacks will shotgun SYNs before their peer
5488		 *     replies.
5489		 *  2) When PF catches an already established stream (the
5490		 *     firewall rebooted, the state table was flushed, routes
5491		 *     changed...)
5492		 *  3) Packets get funky immediately after the connection
5493		 *     closes (this should catch Solaris spurious ACK|FINs
5494		 *     that web servers like to spew after a close)
5495		 *
5496		 * This must be a little more careful than the above code
5497		 * since packet floods will also be caught here. We don't
5498		 * update the TTL here to mitigate the damage of a packet
5499		 * flood and so the same code can handle awkward establishment
5500		 * and a loosened connection close.
5501		 * In the establishment case, a correct peer response will
5502		 * validate the connection, go through the normal state code
5503		 * and keep updating the state TTL.
5504		 */
5505
5506		if (V_pf_status.debug >= PF_DEBUG_MISC) {
5507			printf("pf: loose state match: ");
5508			pf_print_state(*state);
5509			pf_print_flags(th->th_flags);
5510			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5511			    "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
5512			    pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
5513			    (unsigned long long)(*state)->packets[1],
5514			    pd->dir == PF_IN ? "in" : "out",
5515			    pd->dir == (*state)->direction ? "fwd" : "rev");
5516		}
5517
5518		if (dst->scrub || src->scrub) {
5519			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
5520			    *state, src, dst, copyback))
5521				return (PF_DROP);
5522		}
5523
5524		/* update max window */
5525		if (src->max_win < win)
5526			src->max_win = win;
5527		/* synchronize sequencing */
5528		if (SEQ_GT(end, src->seqlo))
5529			src->seqlo = end;
5530		/* slide the window of what the other end can send */
5531		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
5532			dst->seqhi = ack + MAX((win << sws), 1);
5533
5534		/*
5535		 * Cannot set dst->seqhi here since this could be a shotgunned
5536		 * SYN and not an already established connection.
5537		 */
5538
5539		if (th->th_flags & TH_FIN)
5540			if (src->state < TCPS_CLOSING)
5541				pf_set_protostate(*state, psrc, TCPS_CLOSING);
5542		if (th->th_flags & TH_RST)
5543			pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
5544
5545		/* Fall through to PASS packet */
5546
5547	} else {
5548		if ((*state)->dst.state == TCPS_SYN_SENT &&
5549		    (*state)->src.state == TCPS_SYN_SENT) {
5550			/* Send RST for state mismatches during handshake */
5551			if (!(th->th_flags & TH_RST))
5552				pf_send_tcp((*state)->rule.ptr, pd->af,
5553				    pd->dst, pd->src, th->th_dport,
5554				    th->th_sport, ntohl(th->th_ack), 0,
5555				    TH_RST, 0, 0,
5556				    (*state)->rule.ptr->return_ttl, true, 0, 0,
5557				    (*state)->act.rtableid);
5558			src->seqlo = 0;
5559			src->seqhi = 1;
5560			src->max_win = 1;
5561		} else if (V_pf_status.debug >= PF_DEBUG_MISC) {
5562			printf("pf: BAD state: ");
5563			pf_print_state(*state);
5564			pf_print_flags(th->th_flags);
5565			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5566			    "pkts=%llu:%llu dir=%s,%s\n",
5567			    seq, orig_seq, ack, pd->p_len, ackskew,
5568			    (unsigned long long)(*state)->packets[0],
5569			    (unsigned long long)(*state)->packets[1],
5570			    pd->dir == PF_IN ? "in" : "out",
5571			    pd->dir == (*state)->direction ? "fwd" : "rev");
5572			printf("pf: State failure on: %c %c %c %c | %c %c\n",
5573			    SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
5574			    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
5575			    ' ': '2',
5576			    (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
5577			    (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
5578			    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
5579			    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
5580		}
5581		REASON_SET(reason, PFRES_BADSTATE);
5582		return (PF_DROP);
5583	}
5584
5585	return (PF_PASS);
5586}
5587
5588static int
5589pf_tcp_track_sloppy(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
5590{
5591	struct tcphdr		*th = &pd->hdr.tcp;
5592	struct pf_state_peer	*src, *dst;
5593	u_int8_t		 psrc, pdst;
5594
5595	if (pd->dir == (*state)->direction) {
5596		src = &(*state)->src;
5597		dst = &(*state)->dst;
5598		psrc = PF_PEER_SRC;
5599		pdst = PF_PEER_DST;
5600	} else {
5601		src = &(*state)->dst;
5602		dst = &(*state)->src;
5603		psrc = PF_PEER_DST;
5604		pdst = PF_PEER_SRC;
5605	}
5606
5607	if (th->th_flags & TH_SYN)
5608		if (src->state < TCPS_SYN_SENT)
5609			pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
5610	if (th->th_flags & TH_FIN)
5611		if (src->state < TCPS_CLOSING)
5612			pf_set_protostate(*state, psrc, TCPS_CLOSING);
5613	if (th->th_flags & TH_ACK) {
5614		if (dst->state == TCPS_SYN_SENT) {
5615			pf_set_protostate(*state, pdst, TCPS_ESTABLISHED);
5616			if (src->state == TCPS_ESTABLISHED &&
5617			    (*state)->src_node != NULL &&
5618			    pf_src_connlimit(state)) {
5619				REASON_SET(reason, PFRES_SRCLIMIT);
5620				return (PF_DROP);
5621			}
5622		} else if (dst->state == TCPS_CLOSING) {
5623			pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2);
5624		} else if (src->state == TCPS_SYN_SENT &&
5625		    dst->state < TCPS_SYN_SENT) {
5626			/*
5627			 * Handle a special sloppy case where we only see one
5628			 * half of the connection. If there is a ACK after
5629			 * the initial SYN without ever seeing a packet from
5630			 * the destination, set the connection to established.
5631			 */
5632			pf_set_protostate(*state, PF_PEER_BOTH,
5633			    TCPS_ESTABLISHED);
5634			dst->state = src->state = TCPS_ESTABLISHED;
5635			if ((*state)->src_node != NULL &&
5636			    pf_src_connlimit(state)) {
5637				REASON_SET(reason, PFRES_SRCLIMIT);
5638				return (PF_DROP);
5639			}
5640		} else if (src->state == TCPS_CLOSING &&
5641		    dst->state == TCPS_ESTABLISHED &&
5642		    dst->seqlo == 0) {
5643			/*
5644			 * Handle the closing of half connections where we
5645			 * don't see the full bidirectional FIN/ACK+ACK
5646			 * handshake.
5647			 */
5648			pf_set_protostate(*state, pdst, TCPS_CLOSING);
5649		}
5650	}
5651	if (th->th_flags & TH_RST)
5652		pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
5653
5654	/* update expire time */
5655	(*state)->expire = pf_get_uptime();
5656	if (src->state >= TCPS_FIN_WAIT_2 &&
5657	    dst->state >= TCPS_FIN_WAIT_2)
5658		(*state)->timeout = PFTM_TCP_CLOSED;
5659	else if (src->state >= TCPS_CLOSING &&
5660	    dst->state >= TCPS_CLOSING)
5661		(*state)->timeout = PFTM_TCP_FIN_WAIT;
5662	else if (src->state < TCPS_ESTABLISHED ||
5663	    dst->state < TCPS_ESTABLISHED)
5664		(*state)->timeout = PFTM_TCP_OPENING;
5665	else if (src->state >= TCPS_CLOSING ||
5666	    dst->state >= TCPS_CLOSING)
5667		(*state)->timeout = PFTM_TCP_CLOSING;
5668	else
5669		(*state)->timeout = PFTM_TCP_ESTABLISHED;
5670
5671	return (PF_PASS);
5672}
5673
5674static int
5675pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
5676{
5677	struct pf_state_key	*sk = (*state)->key[pd->didx];
5678	struct tcphdr		*th = &pd->hdr.tcp;
5679
5680	if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
5681		if (pd->dir != (*state)->direction) {
5682			REASON_SET(reason, PFRES_SYNPROXY);
5683			return (PF_SYNPROXY_DROP);
5684		}
5685		if (th->th_flags & TH_SYN) {
5686			if (ntohl(th->th_seq) != (*state)->src.seqlo) {
5687				REASON_SET(reason, PFRES_SYNPROXY);
5688				return (PF_DROP);
5689			}
5690			pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
5691			    pd->src, th->th_dport, th->th_sport,
5692			    (*state)->src.seqhi, ntohl(th->th_seq) + 1,
5693			    TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, true, 0, 0,
5694			    (*state)->act.rtableid);
5695			REASON_SET(reason, PFRES_SYNPROXY);
5696			return (PF_SYNPROXY_DROP);
5697		} else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
5698		    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
5699		    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
5700			REASON_SET(reason, PFRES_SYNPROXY);
5701			return (PF_DROP);
5702		} else if ((*state)->src_node != NULL &&
5703		    pf_src_connlimit(state)) {
5704			REASON_SET(reason, PFRES_SRCLIMIT);
5705			return (PF_DROP);
5706		} else
5707			pf_set_protostate(*state, PF_PEER_SRC,
5708			    PF_TCPS_PROXY_DST);
5709	}
5710	if ((*state)->src.state == PF_TCPS_PROXY_DST) {
5711		if (pd->dir == (*state)->direction) {
5712			if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
5713			    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
5714			    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
5715				REASON_SET(reason, PFRES_SYNPROXY);
5716				return (PF_DROP);
5717			}
5718			(*state)->src.max_win = MAX(ntohs(th->th_win), 1);
5719			if ((*state)->dst.seqhi == 1)
5720				(*state)->dst.seqhi = htonl(arc4random());
5721			pf_send_tcp((*state)->rule.ptr, pd->af,
5722			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
5723			    sk->port[pd->sidx], sk->port[pd->didx],
5724			    (*state)->dst.seqhi, 0, TH_SYN, 0,
5725			    (*state)->src.mss, 0, false, (*state)->tag, 0,
5726			    (*state)->act.rtableid);
5727			REASON_SET(reason, PFRES_SYNPROXY);
5728			return (PF_SYNPROXY_DROP);
5729		} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
5730		    (TH_SYN|TH_ACK)) ||
5731		    (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
5732			REASON_SET(reason, PFRES_SYNPROXY);
5733			return (PF_DROP);
5734		} else {
5735			(*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
5736			(*state)->dst.seqlo = ntohl(th->th_seq);
5737			pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
5738			    pd->src, th->th_dport, th->th_sport,
5739			    ntohl(th->th_ack), ntohl(th->th_seq) + 1,
5740			    TH_ACK, (*state)->src.max_win, 0, 0, false,
5741			    (*state)->tag, 0, (*state)->act.rtableid);
5742			pf_send_tcp((*state)->rule.ptr, pd->af,
5743			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
5744			    sk->port[pd->sidx], sk->port[pd->didx],
5745			    (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
5746			    TH_ACK, (*state)->dst.max_win, 0, 0, true, 0, 0,
5747			    (*state)->act.rtableid);
5748			(*state)->src.seqdiff = (*state)->dst.seqhi -
5749			    (*state)->src.seqlo;
5750			(*state)->dst.seqdiff = (*state)->src.seqhi -
5751			    (*state)->dst.seqlo;
5752			(*state)->src.seqhi = (*state)->src.seqlo +
5753			    (*state)->dst.max_win;
5754			(*state)->dst.seqhi = (*state)->dst.seqlo +
5755			    (*state)->src.max_win;
5756			(*state)->src.wscale = (*state)->dst.wscale = 0;
5757			pf_set_protostate(*state, PF_PEER_BOTH,
5758			    TCPS_ESTABLISHED);
5759			REASON_SET(reason, PFRES_SYNPROXY);
5760			return (PF_SYNPROXY_DROP);
5761		}
5762	}
5763
5764	return (PF_PASS);
5765}
5766
5767static int
5768pf_test_state_tcp(struct pf_kstate **state, struct pfi_kkif *kif,
5769    struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
5770    u_short *reason)
5771{
5772	struct pf_state_key_cmp	 key;
5773	struct tcphdr		*th = &pd->hdr.tcp;
5774	int			 copyback = 0;
5775	int			 action;
5776	struct pf_state_peer	*src, *dst;
5777
5778	bzero(&key, sizeof(key));
5779	key.af = pd->af;
5780	key.proto = IPPROTO_TCP;
5781	if (pd->dir == PF_IN)	{	/* wire side, straight */
5782		PF_ACPY(&key.addr[0], pd->src, key.af);
5783		PF_ACPY(&key.addr[1], pd->dst, key.af);
5784		key.port[0] = th->th_sport;
5785		key.port[1] = th->th_dport;
5786	} else {			/* stack side, reverse */
5787		PF_ACPY(&key.addr[1], pd->src, key.af);
5788		PF_ACPY(&key.addr[0], pd->dst, key.af);
5789		key.port[1] = th->th_sport;
5790		key.port[0] = th->th_dport;
5791	}
5792
5793	STATE_LOOKUP(kif, &key, *state, pd);
5794
5795	if (pd->dir == (*state)->direction) {
5796		src = &(*state)->src;
5797		dst = &(*state)->dst;
5798	} else {
5799		src = &(*state)->dst;
5800		dst = &(*state)->src;
5801	}
5802
5803	if ((action = pf_synproxy(pd, state, reason)) != PF_PASS)
5804		return (action);
5805
5806	if (dst->state >= TCPS_FIN_WAIT_2 &&
5807	    src->state >= TCPS_FIN_WAIT_2 &&
5808	    (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) ||
5809	    ((th->th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK &&
5810	    pf_syncookie_check(pd) && pd->dir == PF_IN))) {
5811		if (V_pf_status.debug >= PF_DEBUG_MISC) {
5812			printf("pf: state reuse ");
5813			pf_print_state(*state);
5814			pf_print_flags(th->th_flags);
5815			printf("\n");
5816		}
5817		/* XXX make sure it's the same direction ?? */
5818		pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
5819		pf_unlink_state(*state);
5820		*state = NULL;
5821		return (PF_DROP);
5822	}
5823
5824	if ((*state)->state_flags & PFSTATE_SLOPPY) {
5825		if (pf_tcp_track_sloppy(state, pd, reason) == PF_DROP)
5826			return (PF_DROP);
5827	} else {
5828		if (pf_tcp_track_full(state, kif, m, off, pd, reason,
5829		    &copyback) == PF_DROP)
5830			return (PF_DROP);
5831	}
5832
5833	/* translate source/destination address, if necessary */
5834	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5835		struct pf_state_key *nk = (*state)->key[pd->didx];
5836
5837		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
5838		    nk->port[pd->sidx] != th->th_sport)
5839			pf_change_ap(m, pd->src, &th->th_sport,
5840			    pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
5841			    nk->port[pd->sidx], 0, pd->af);
5842
5843		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
5844		    nk->port[pd->didx] != th->th_dport)
5845			pf_change_ap(m, pd->dst, &th->th_dport,
5846			    pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
5847			    nk->port[pd->didx], 0, pd->af);
5848		copyback = 1;
5849	}
5850
5851	/* Copyback sequence modulation or stateful scrub changes if needed */
5852	if (copyback)
5853		m_copyback(m, off, sizeof(*th), (caddr_t)th);
5854
5855	return (PF_PASS);
5856}
5857
5858static int
5859pf_test_state_udp(struct pf_kstate **state, struct pfi_kkif *kif,
5860    struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
5861{
5862	struct pf_state_peer	*src, *dst;
5863	struct pf_state_key_cmp	 key;
5864	struct udphdr		*uh = &pd->hdr.udp;
5865	uint8_t			 psrc, pdst;
5866
5867	bzero(&key, sizeof(key));
5868	key.af = pd->af;
5869	key.proto = IPPROTO_UDP;
5870	if (pd->dir == PF_IN)	{	/* wire side, straight */
5871		PF_ACPY(&key.addr[0], pd->src, key.af);
5872		PF_ACPY(&key.addr[1], pd->dst, key.af);
5873		key.port[0] = uh->uh_sport;
5874		key.port[1] = uh->uh_dport;
5875	} else {			/* stack side, reverse */
5876		PF_ACPY(&key.addr[1], pd->src, key.af);
5877		PF_ACPY(&key.addr[0], pd->dst, key.af);
5878		key.port[1] = uh->uh_sport;
5879		key.port[0] = uh->uh_dport;
5880	}
5881
5882	STATE_LOOKUP(kif, &key, *state, pd);
5883
5884	if (pd->dir == (*state)->direction) {
5885		src = &(*state)->src;
5886		dst = &(*state)->dst;
5887		psrc = PF_PEER_SRC;
5888		pdst = PF_PEER_DST;
5889	} else {
5890		src = &(*state)->dst;
5891		dst = &(*state)->src;
5892		psrc = PF_PEER_DST;
5893		pdst = PF_PEER_SRC;
5894	}
5895
5896	/* update states */
5897	if (src->state < PFUDPS_SINGLE)
5898		pf_set_protostate(*state, psrc, PFUDPS_SINGLE);
5899	if (dst->state == PFUDPS_SINGLE)
5900		pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE);
5901
5902	/* update expire time */
5903	(*state)->expire = pf_get_uptime();
5904	if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
5905		(*state)->timeout = PFTM_UDP_MULTIPLE;
5906	else
5907		(*state)->timeout = PFTM_UDP_SINGLE;
5908
5909	/* translate source/destination address, if necessary */
5910	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5911		struct pf_state_key *nk = (*state)->key[pd->didx];
5912
5913		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
5914		    nk->port[pd->sidx] != uh->uh_sport)
5915			pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
5916			    &uh->uh_sum, &nk->addr[pd->sidx],
5917			    nk->port[pd->sidx], 1, pd->af);
5918
5919		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
5920		    nk->port[pd->didx] != uh->uh_dport)
5921			pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
5922			    &uh->uh_sum, &nk->addr[pd->didx],
5923			    nk->port[pd->didx], 1, pd->af);
5924		m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
5925	}
5926
5927	return (PF_PASS);
5928}
5929
5930static int
5931pf_test_state_sctp(struct pf_kstate **state, struct pfi_kkif *kif,
5932    struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
5933{
5934	struct pf_state_key_cmp	 key;
5935	struct pf_state_peer	*src, *dst;
5936	struct sctphdr		*sh = &pd->hdr.sctp;
5937	u_int8_t		 psrc; //, pdst;
5938
5939	bzero(&key, sizeof(key));
5940	key.af = pd->af;
5941	key.proto = IPPROTO_SCTP;
5942	if (pd->dir == PF_IN)	{	/* wire side, straight */
5943		PF_ACPY(&key.addr[0], pd->src, key.af);
5944		PF_ACPY(&key.addr[1], pd->dst, key.af);
5945		key.port[0] = sh->src_port;
5946		key.port[1] = sh->dest_port;
5947	} else {			/* stack side, reverse */
5948		PF_ACPY(&key.addr[1], pd->src, key.af);
5949		PF_ACPY(&key.addr[0], pd->dst, key.af);
5950		key.port[1] = sh->src_port;
5951		key.port[0] = sh->dest_port;
5952	}
5953
5954	STATE_LOOKUP(kif, &key, *state, pd);
5955
5956	if (pd->dir == (*state)->direction) {
5957		src = &(*state)->src;
5958		dst = &(*state)->dst;
5959		psrc = PF_PEER_SRC;
5960	} else {
5961		src = &(*state)->dst;
5962		dst = &(*state)->src;
5963		psrc = PF_PEER_DST;
5964	}
5965
5966	/* Track state. */
5967	if (pd->sctp_flags & PFDESC_SCTP_INIT) {
5968		if (src->state < SCTP_COOKIE_WAIT) {
5969			pf_set_protostate(*state, psrc, SCTP_COOKIE_WAIT);
5970			(*state)->timeout = PFTM_SCTP_OPENING;
5971		}
5972	}
5973	if (pd->sctp_flags & PFDESC_SCTP_INIT_ACK) {
5974		MPASS(dst->scrub != NULL);
5975		if (dst->scrub->pfss_v_tag == 0)
5976			dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
5977	}
5978
5979	if (pd->sctp_flags & (PFDESC_SCTP_COOKIE | PFDESC_SCTP_HEARTBEAT_ACK)) {
5980		if (src->state < SCTP_ESTABLISHED) {
5981			pf_set_protostate(*state, psrc, SCTP_ESTABLISHED);
5982			(*state)->timeout = PFTM_SCTP_ESTABLISHED;
5983		}
5984	}
5985	if (pd->sctp_flags & (PFDESC_SCTP_SHUTDOWN | PFDESC_SCTP_ABORT |
5986	    PFDESC_SCTP_SHUTDOWN_COMPLETE)) {
5987		if (src->state < SCTP_SHUTDOWN_PENDING) {
5988			pf_set_protostate(*state, psrc, SCTP_SHUTDOWN_PENDING);
5989			(*state)->timeout = PFTM_SCTP_CLOSING;
5990		}
5991	}
5992	if (pd->sctp_flags & (PFDESC_SCTP_SHUTDOWN_COMPLETE)) {
5993		pf_set_protostate(*state, psrc, SCTP_CLOSED);
5994		(*state)->timeout = PFTM_SCTP_CLOSED;
5995	}
5996
5997	if (src->scrub != NULL) {
5998		if (src->scrub->pfss_v_tag == 0) {
5999			src->scrub->pfss_v_tag = pd->hdr.sctp.v_tag;
6000		} else  if (src->scrub->pfss_v_tag != pd->hdr.sctp.v_tag)
6001			return (PF_DROP);
6002	}
6003
6004	(*state)->expire = pf_get_uptime();
6005
6006	/* translate source/destination address, if necessary */
6007	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
6008		uint16_t checksum = 0;
6009		struct pf_state_key *nk = (*state)->key[pd->didx];
6010
6011		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
6012		    nk->port[pd->sidx] != pd->hdr.sctp.src_port) {
6013			pf_change_ap(m, pd->src, &pd->hdr.sctp.src_port,
6014			    pd->ip_sum, &checksum, &nk->addr[pd->sidx],
6015			    nk->port[pd->sidx], 1, pd->af);
6016		}
6017
6018		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
6019		    nk->port[pd->didx] != pd->hdr.sctp.dest_port) {
6020			pf_change_ap(m, pd->dst, &pd->hdr.sctp.dest_port,
6021			    pd->ip_sum, &checksum, &nk->addr[pd->didx],
6022			    nk->port[pd->didx], 1, pd->af);
6023		}
6024	}
6025
6026	return (PF_PASS);
6027}
6028
6029static void
6030pf_sctp_multihome_detach_addr(const struct pf_kstate *s)
6031{
6032	struct pf_sctp_endpoint key;
6033	struct pf_sctp_endpoint *ep;
6034	struct pf_state_key *sks = s->key[PF_SK_STACK];
6035	struct pf_sctp_source *i, *tmp;
6036
6037	if (sks == NULL || sks->proto != IPPROTO_SCTP || s->dst.scrub == NULL)
6038		return;
6039
6040	PF_SCTP_ENDPOINTS_LOCK();
6041
6042	key.v_tag = s->dst.scrub->pfss_v_tag;
6043	ep  = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
6044	if (ep != NULL) {
6045		/* XXX Actually remove! */
6046		TAILQ_FOREACH_SAFE(i, &ep->sources, entry, tmp) {
6047			if (pf_addr_cmp(&i->addr,
6048			    &s->key[PF_SK_WIRE]->addr[s->direction == PF_OUT],
6049			    s->key[PF_SK_WIRE]->af) == 0) {
6050				TAILQ_REMOVE(&ep->sources, i, entry);
6051				free(i, M_PFTEMP);
6052				break;
6053			}
6054		}
6055
6056		if (TAILQ_EMPTY(&ep->sources)) {
6057			RB_REMOVE(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep);
6058			free(ep, M_PFTEMP);
6059		}
6060	}
6061
6062	/* Other direction. */
6063	key.v_tag = s->src.scrub->pfss_v_tag;
6064	ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
6065	if (ep != NULL) {
6066		TAILQ_FOREACH_SAFE(i, &ep->sources, entry, tmp) {
6067			if (pf_addr_cmp(&i->addr,
6068			    &s->key[PF_SK_WIRE]->addr[s->direction == PF_IN],
6069			    s->key[PF_SK_WIRE]->af) == 0) {
6070				TAILQ_REMOVE(&ep->sources, i, entry);
6071				free(i, M_PFTEMP);
6072				break;
6073			}
6074		}
6075
6076		if (TAILQ_EMPTY(&ep->sources)) {
6077			RB_REMOVE(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep);
6078			free(ep, M_PFTEMP);
6079		}
6080	}
6081
6082	PF_SCTP_ENDPOINTS_UNLOCK();
6083}
6084
6085static void
6086pf_sctp_multihome_add_addr(struct pf_pdesc *pd, struct pf_addr *a, uint32_t v_tag)
6087{
6088	struct pf_sctp_endpoint key = {
6089		.v_tag = v_tag,
6090	};
6091	struct pf_sctp_source *i;
6092	struct pf_sctp_endpoint *ep;
6093
6094	PF_SCTP_ENDPOINTS_LOCK();
6095
6096	ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
6097	if (ep == NULL) {
6098		ep = malloc(sizeof(struct pf_sctp_endpoint),
6099		    M_PFTEMP, M_NOWAIT);
6100		if (ep == NULL) {
6101			PF_SCTP_ENDPOINTS_UNLOCK();
6102			return;
6103		}
6104
6105		ep->v_tag = v_tag;
6106		TAILQ_INIT(&ep->sources);
6107		RB_INSERT(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep);
6108	}
6109
6110	/* Avoid inserting duplicates. */
6111	TAILQ_FOREACH(i, &ep->sources, entry) {
6112		if (pf_addr_cmp(&i->addr, a, pd->af) == 0) {
6113			PF_SCTP_ENDPOINTS_UNLOCK();
6114			return;
6115		}
6116	}
6117
6118	i = malloc(sizeof(*i), M_PFTEMP, M_NOWAIT);
6119	if (i == NULL) {
6120		PF_SCTP_ENDPOINTS_UNLOCK();
6121		return;
6122	}
6123
6124	i->af = pd->af;
6125	memcpy(&i->addr, a, sizeof(*a));
6126	TAILQ_INSERT_TAIL(&ep->sources, i, entry);
6127
6128	PF_SCTP_ENDPOINTS_UNLOCK();
6129}
6130
6131static void
6132pf_sctp_multihome_delayed(struct pf_pdesc *pd, int off, struct pfi_kkif *kif,
6133    struct pf_kstate *s, int action)
6134{
6135	struct pf_sctp_multihome_job	*j, *tmp;
6136	struct pf_sctp_source		*i;
6137	int			 ret __unused;
6138	struct pf_kstate	*sm = NULL;
6139	struct pf_krule		*ra = NULL;
6140	struct pf_krule		*r = &V_pf_default_rule;
6141	struct pf_kruleset	*rs = NULL;
6142	bool do_extra = true;
6143
6144	PF_RULES_RLOCK_TRACKER;
6145
6146again:
6147	TAILQ_FOREACH_SAFE(j, &pd->sctp_multihome_jobs, next, tmp) {
6148		if (s == NULL || action != PF_PASS)
6149			goto free;
6150
6151		/* Confirm we don't recurse here. */
6152		MPASS(! (pd->sctp_flags & PFDESC_SCTP_ADD_IP));
6153
6154		switch (j->op) {
6155		case  SCTP_ADD_IP_ADDRESS: {
6156			uint32_t v_tag = pd->sctp_initiate_tag;
6157
6158			if (v_tag == 0) {
6159				if (s->direction == pd->dir)
6160					v_tag = s->src.scrub->pfss_v_tag;
6161				else
6162					v_tag = s->dst.scrub->pfss_v_tag;
6163			}
6164
6165			/*
6166			 * Avoid duplicating states. We'll already have
6167			 * created a state based on the source address of
6168			 * the packet, but SCTP endpoints may also list this
6169			 * address again in the INIT(_ACK) parameters.
6170			 */
6171			if (pf_addr_cmp(&j->src, pd->src, pd->af) == 0) {
6172				break;
6173			}
6174
6175			j->pd.sctp_flags |= PFDESC_SCTP_ADD_IP;
6176			PF_RULES_RLOCK();
6177			sm = NULL;
6178			/*
6179			 * New connections need to be floating, because
6180			 * we cannot know what interfaces it will use.
6181			 * That's why we pass V_pfi_all rather than kif.
6182			 */
6183			ret = pf_test_rule(&r, &sm, V_pfi_all,
6184			    j->m, off, &j->pd, &ra, &rs, NULL);
6185			PF_RULES_RUNLOCK();
6186			SDT_PROBE4(pf, sctp, multihome, test, kif, r, j->m, ret);
6187			if (ret != PF_DROP && sm != NULL) {
6188				/* Inherit v_tag values. */
6189				if (sm->direction == s->direction) {
6190					sm->src.scrub->pfss_v_tag = s->src.scrub->pfss_v_tag;
6191					sm->dst.scrub->pfss_v_tag = s->dst.scrub->pfss_v_tag;
6192				} else {
6193					sm->src.scrub->pfss_v_tag = s->dst.scrub->pfss_v_tag;
6194					sm->dst.scrub->pfss_v_tag = s->src.scrub->pfss_v_tag;
6195				}
6196				PF_STATE_UNLOCK(sm);
6197			} else {
6198				/* If we try duplicate inserts? */
6199				break;
6200			}
6201
6202			/* Only add the address if we've actually allowed the state. */
6203			pf_sctp_multihome_add_addr(pd, &j->src, v_tag);
6204
6205			if (! do_extra) {
6206				break;
6207			}
6208			/*
6209			 * We need to do this for each of our source addresses.
6210			 * Find those based on the verification tag.
6211			 */
6212			struct pf_sctp_endpoint key = {
6213				.v_tag = pd->hdr.sctp.v_tag,
6214			};
6215			struct pf_sctp_endpoint *ep;
6216
6217			PF_SCTP_ENDPOINTS_LOCK();
6218			ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
6219			if (ep == NULL) {
6220				PF_SCTP_ENDPOINTS_UNLOCK();
6221				break;
6222			}
6223			MPASS(ep != NULL);
6224
6225			TAILQ_FOREACH(i, &ep->sources, entry) {
6226				struct pf_sctp_multihome_job *nj;
6227
6228				/* SCTP can intermingle IPv4 and IPv6. */
6229				if (i->af != pd->af)
6230					continue;
6231
6232				nj = malloc(sizeof(*nj), M_PFTEMP, M_NOWAIT | M_ZERO);
6233				if (! nj) {
6234					continue;
6235				}
6236				memcpy(&nj->pd, &j->pd, sizeof(j->pd));
6237				memcpy(&nj->src, &j->src, sizeof(nj->src));
6238				nj->pd.src = &nj->src;
6239				// New destination address!
6240				memcpy(&nj->dst, &i->addr, sizeof(nj->dst));
6241				nj->pd.dst = &nj->dst;
6242				nj->m = j->m;
6243				nj->op = j->op;
6244
6245				TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, nj, next);
6246			}
6247			PF_SCTP_ENDPOINTS_UNLOCK();
6248
6249			break;
6250		}
6251		case SCTP_DEL_IP_ADDRESS: {
6252			struct pf_state_key_cmp key;
6253			uint8_t psrc;
6254
6255			bzero(&key, sizeof(key));
6256			key.af = j->pd.af;
6257			key.proto = IPPROTO_SCTP;
6258			if (j->pd.dir == PF_IN)	{	/* wire side, straight */
6259				PF_ACPY(&key.addr[0], j->pd.src, key.af);
6260				PF_ACPY(&key.addr[1], j->pd.dst, key.af);
6261				key.port[0] = j->pd.hdr.sctp.src_port;
6262				key.port[1] = j->pd.hdr.sctp.dest_port;
6263			} else {			/* stack side, reverse */
6264				PF_ACPY(&key.addr[1], j->pd.src, key.af);
6265				PF_ACPY(&key.addr[0], j->pd.dst, key.af);
6266				key.port[1] = j->pd.hdr.sctp.src_port;
6267				key.port[0] = j->pd.hdr.sctp.dest_port;
6268			}
6269
6270			sm = pf_find_state(kif, &key, j->pd.dir);
6271			if (sm != NULL) {
6272				PF_STATE_LOCK_ASSERT(sm);
6273				if (j->pd.dir == sm->direction) {
6274					psrc = PF_PEER_SRC;
6275				} else {
6276					psrc = PF_PEER_DST;
6277				}
6278				pf_set_protostate(sm, psrc, SCTP_SHUTDOWN_PENDING);
6279				sm->timeout = PFTM_SCTP_CLOSING;
6280				PF_STATE_UNLOCK(sm);
6281			}
6282			break;
6283		default:
6284			panic("Unknown op %#x", j->op);
6285		}
6286	}
6287
6288	free:
6289		TAILQ_REMOVE(&pd->sctp_multihome_jobs, j, next);
6290		free(j, M_PFTEMP);
6291	}
6292
6293	/* We may have inserted extra work while processing the list. */
6294	if (! TAILQ_EMPTY(&pd->sctp_multihome_jobs)) {
6295		do_extra = false;
6296		goto again;
6297	}
6298}
6299
6300static int
6301pf_multihome_scan(struct mbuf *m, int start, int len, struct pf_pdesc *pd,
6302    struct pfi_kkif *kif, int op)
6303{
6304	int			 off = 0;
6305	struct pf_sctp_multihome_job	*job;
6306
6307	while (off < len) {
6308		struct sctp_paramhdr h;
6309
6310		if (!pf_pull_hdr(m, start + off, &h, sizeof(h), NULL, NULL,
6311		    pd->af))
6312			return (PF_DROP);
6313
6314		/* Parameters are at least 4 bytes. */
6315		if (ntohs(h.param_length) < 4)
6316			return (PF_DROP);
6317
6318		switch (ntohs(h.param_type)) {
6319		case  SCTP_IPV4_ADDRESS: {
6320			struct in_addr t;
6321
6322			if (ntohs(h.param_length) !=
6323			    (sizeof(struct sctp_paramhdr) + sizeof(t)))
6324				return (PF_DROP);
6325
6326			if (!pf_pull_hdr(m, start + off + sizeof(h), &t, sizeof(t),
6327			    NULL, NULL, pd->af))
6328				return (PF_DROP);
6329
6330			if (in_nullhost(t))
6331				t.s_addr = pd->src->v4.s_addr;
6332
6333			/*
6334			 * We hold the state lock (idhash) here, which means
6335			 * that we can't acquire the keyhash, or we'll get a
6336			 * LOR (and potentially double-lock things too). We also
6337			 * can't release the state lock here, so instead we'll
6338			 * enqueue this for async handling.
6339			 * There's a relatively small race here, in that a
6340			 * packet using the new addresses could arrive already,
6341			 * but that's just though luck for it.
6342			 */
6343			job = malloc(sizeof(*job), M_PFTEMP, M_NOWAIT | M_ZERO);
6344			if (! job)
6345				return (PF_DROP);
6346
6347			memcpy(&job->pd, pd, sizeof(*pd));
6348
6349			// New source address!
6350			memcpy(&job->src, &t, sizeof(t));
6351			job->pd.src = &job->src;
6352			memcpy(&job->dst, pd->dst, sizeof(job->dst));
6353			job->pd.dst = &job->dst;
6354			job->m = m;
6355			job->op = op;
6356
6357			TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
6358			break;
6359		}
6360#ifdef INET6
6361		case SCTP_IPV6_ADDRESS: {
6362			struct in6_addr t;
6363
6364			if (ntohs(h.param_length) !=
6365			    (sizeof(struct sctp_paramhdr) + sizeof(t)))
6366				return (PF_DROP);
6367
6368			if (!pf_pull_hdr(m, start + off + sizeof(h), &t, sizeof(t),
6369			    NULL, NULL, pd->af))
6370				return (PF_DROP);
6371			if (memcmp(&t, &pd->src->v6, sizeof(t)) == 0)
6372				break;
6373			if (memcmp(&t, &in6addr_any, sizeof(t)) == 0)
6374				memcpy(&t, &pd->src->v6, sizeof(t));
6375
6376			job = malloc(sizeof(*job), M_PFTEMP, M_NOWAIT | M_ZERO);
6377			if (! job)
6378				return (PF_DROP);
6379
6380			memcpy(&job->pd, pd, sizeof(*pd));
6381			memcpy(&job->src, &t, sizeof(t));
6382			job->pd.src = &job->src;
6383			memcpy(&job->dst, pd->dst, sizeof(job->dst));
6384			job->pd.dst = &job->dst;
6385			job->m = m;
6386			job->op = op;
6387
6388			TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
6389			break;
6390		}
6391#endif
6392		case SCTP_ADD_IP_ADDRESS: {
6393			int ret;
6394			struct sctp_asconf_paramhdr ah;
6395
6396			if (!pf_pull_hdr(m, start + off, &ah, sizeof(ah),
6397			    NULL, NULL, pd->af))
6398				return (PF_DROP);
6399
6400			ret = pf_multihome_scan(m, start + off + sizeof(ah),
6401			    ntohs(ah.ph.param_length) - sizeof(ah), pd, kif,
6402			    SCTP_ADD_IP_ADDRESS);
6403			if (ret != PF_PASS)
6404				return (ret);
6405			break;
6406		}
6407		case SCTP_DEL_IP_ADDRESS: {
6408			int ret;
6409			struct sctp_asconf_paramhdr ah;
6410
6411			if (!pf_pull_hdr(m, start + off, &ah, sizeof(ah),
6412			    NULL, NULL, pd->af))
6413				return (PF_DROP);
6414			ret = pf_multihome_scan(m, start + off + sizeof(ah),
6415			    ntohs(ah.ph.param_length) - sizeof(ah), pd, kif,
6416			    SCTP_DEL_IP_ADDRESS);
6417			if (ret != PF_PASS)
6418				return (ret);
6419			break;
6420		}
6421		default:
6422			break;
6423		}
6424
6425		off += roundup(ntohs(h.param_length), 4);
6426	}
6427
6428	return (PF_PASS);
6429}
6430int
6431pf_multihome_scan_init(struct mbuf *m, int start, int len, struct pf_pdesc *pd,
6432    struct pfi_kkif *kif)
6433{
6434	start += sizeof(struct sctp_init_chunk);
6435	len -= sizeof(struct sctp_init_chunk);
6436
6437	return (pf_multihome_scan(m, start, len, pd, kif, SCTP_ADD_IP_ADDRESS));
6438}
6439
6440int
6441pf_multihome_scan_asconf(struct mbuf *m, int start, int len,
6442    struct pf_pdesc *pd, struct pfi_kkif *kif)
6443{
6444	start += sizeof(struct sctp_asconf_chunk);
6445	len -= sizeof(struct sctp_asconf_chunk);
6446
6447	return (pf_multihome_scan(m, start, len, pd, kif, SCTP_ADD_IP_ADDRESS));
6448}
6449
6450static int
6451pf_test_state_icmp(struct pf_kstate **state, struct pfi_kkif *kif,
6452    struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
6453{
6454	struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
6455	u_int16_t	 icmpid = 0, *icmpsum;
6456	u_int8_t	 icmptype, icmpcode;
6457	int		 state_icmp = 0;
6458	struct pf_state_key_cmp key;
6459
6460	bzero(&key, sizeof(key));
6461	switch (pd->proto) {
6462#ifdef INET
6463	case IPPROTO_ICMP:
6464		icmptype = pd->hdr.icmp.icmp_type;
6465		icmpcode = pd->hdr.icmp.icmp_code;
6466		icmpid = pd->hdr.icmp.icmp_id;
6467		icmpsum = &pd->hdr.icmp.icmp_cksum;
6468
6469		if (icmptype == ICMP_UNREACH ||
6470		    icmptype == ICMP_SOURCEQUENCH ||
6471		    icmptype == ICMP_REDIRECT ||
6472		    icmptype == ICMP_TIMXCEED ||
6473		    icmptype == ICMP_PARAMPROB)
6474			state_icmp++;
6475		break;
6476#endif /* INET */
6477#ifdef INET6
6478	case IPPROTO_ICMPV6:
6479		icmptype = pd->hdr.icmp6.icmp6_type;
6480		icmpcode = pd->hdr.icmp6.icmp6_code;
6481		icmpid = pd->hdr.icmp6.icmp6_id;
6482		icmpsum = &pd->hdr.icmp6.icmp6_cksum;
6483
6484		if (icmptype == ICMP6_DST_UNREACH ||
6485		    icmptype == ICMP6_PACKET_TOO_BIG ||
6486		    icmptype == ICMP6_TIME_EXCEEDED ||
6487		    icmptype == ICMP6_PARAM_PROB)
6488			state_icmp++;
6489		break;
6490#endif /* INET6 */
6491	}
6492
6493	if (!state_icmp) {
6494		/*
6495		 * ICMP query/reply message not related to a TCP/UDP packet.
6496		 * Search for an ICMP state.
6497		 */
6498		key.af = pd->af;
6499		key.proto = pd->proto;
6500		key.port[0] = key.port[1] = icmpid;
6501		if (pd->dir == PF_IN)	{	/* wire side, straight */
6502			PF_ACPY(&key.addr[0], pd->src, key.af);
6503			PF_ACPY(&key.addr[1], pd->dst, key.af);
6504		} else {			/* stack side, reverse */
6505			PF_ACPY(&key.addr[1], pd->src, key.af);
6506			PF_ACPY(&key.addr[0], pd->dst, key.af);
6507		}
6508
6509		STATE_LOOKUP(kif, &key, *state, pd);
6510
6511		(*state)->expire = pf_get_uptime();
6512		(*state)->timeout = PFTM_ICMP_ERROR_REPLY;
6513
6514		/* translate source/destination address, if necessary */
6515		if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
6516			struct pf_state_key *nk = (*state)->key[pd->didx];
6517
6518			switch (pd->af) {
6519#ifdef INET
6520			case AF_INET:
6521				if (PF_ANEQ(pd->src,
6522				    &nk->addr[pd->sidx], AF_INET))
6523					pf_change_a(&saddr->v4.s_addr,
6524					    pd->ip_sum,
6525					    nk->addr[pd->sidx].v4.s_addr, 0);
6526
6527				if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
6528				    AF_INET))
6529					pf_change_a(&daddr->v4.s_addr,
6530					    pd->ip_sum,
6531					    nk->addr[pd->didx].v4.s_addr, 0);
6532
6533				if (nk->port[0] !=
6534				    pd->hdr.icmp.icmp_id) {
6535					pd->hdr.icmp.icmp_cksum =
6536					    pf_cksum_fixup(
6537					    pd->hdr.icmp.icmp_cksum, icmpid,
6538					    nk->port[pd->sidx], 0);
6539					pd->hdr.icmp.icmp_id =
6540					    nk->port[pd->sidx];
6541				}
6542
6543				m_copyback(m, off, ICMP_MINLEN,
6544				    (caddr_t )&pd->hdr.icmp);
6545				break;
6546#endif /* INET */
6547#ifdef INET6
6548			case AF_INET6:
6549				if (PF_ANEQ(pd->src,
6550				    &nk->addr[pd->sidx], AF_INET6))
6551					pf_change_a6(saddr,
6552					    &pd->hdr.icmp6.icmp6_cksum,
6553					    &nk->addr[pd->sidx], 0);
6554
6555				if (PF_ANEQ(pd->dst,
6556				    &nk->addr[pd->didx], AF_INET6))
6557					pf_change_a6(daddr,
6558					    &pd->hdr.icmp6.icmp6_cksum,
6559					    &nk->addr[pd->didx], 0);
6560
6561				m_copyback(m, off, sizeof(struct icmp6_hdr),
6562				    (caddr_t )&pd->hdr.icmp6);
6563				break;
6564#endif /* INET6 */
6565			}
6566		}
6567		return (PF_PASS);
6568
6569	} else {
6570		/*
6571		 * ICMP error message in response to a TCP/UDP packet.
6572		 * Extract the inner TCP/UDP header and search for that state.
6573		 */
6574
6575		struct pf_pdesc	pd2;
6576		bzero(&pd2, sizeof pd2);
6577#ifdef INET
6578		struct ip	h2;
6579#endif /* INET */
6580#ifdef INET6
6581		struct ip6_hdr	h2_6;
6582		int		terminal = 0;
6583#endif /* INET6 */
6584		int		ipoff2 = 0;
6585		int		off2 = 0;
6586
6587		pd2.af = pd->af;
6588		/* Payload packet is from the opposite direction. */
6589		pd2.sidx = (pd->dir == PF_IN) ? 1 : 0;
6590		pd2.didx = (pd->dir == PF_IN) ? 0 : 1;
6591		switch (pd->af) {
6592#ifdef INET
6593		case AF_INET:
6594			/* offset of h2 in mbuf chain */
6595			ipoff2 = off + ICMP_MINLEN;
6596
6597			if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
6598			    NULL, reason, pd2.af)) {
6599				DPFPRINTF(PF_DEBUG_MISC,
6600				    ("pf: ICMP error message too short "
6601				    "(ip)\n"));
6602				return (PF_DROP);
6603			}
6604			/*
6605			 * ICMP error messages don't refer to non-first
6606			 * fragments
6607			 */
6608			if (h2.ip_off & htons(IP_OFFMASK)) {
6609				REASON_SET(reason, PFRES_FRAG);
6610				return (PF_DROP);
6611			}
6612
6613			/* offset of protocol header that follows h2 */
6614			off2 = ipoff2 + (h2.ip_hl << 2);
6615
6616			pd2.proto = h2.ip_p;
6617			pd2.src = (struct pf_addr *)&h2.ip_src;
6618			pd2.dst = (struct pf_addr *)&h2.ip_dst;
6619			pd2.ip_sum = &h2.ip_sum;
6620			break;
6621#endif /* INET */
6622#ifdef INET6
6623		case AF_INET6:
6624			ipoff2 = off + sizeof(struct icmp6_hdr);
6625
6626			if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
6627			    NULL, reason, pd2.af)) {
6628				DPFPRINTF(PF_DEBUG_MISC,
6629				    ("pf: ICMP error message too short "
6630				    "(ip6)\n"));
6631				return (PF_DROP);
6632			}
6633			pd2.proto = h2_6.ip6_nxt;
6634			pd2.src = (struct pf_addr *)&h2_6.ip6_src;
6635			pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
6636			pd2.ip_sum = NULL;
6637			off2 = ipoff2 + sizeof(h2_6);
6638			do {
6639				switch (pd2.proto) {
6640				case IPPROTO_FRAGMENT:
6641					/*
6642					 * ICMPv6 error messages for
6643					 * non-first fragments
6644					 */
6645					REASON_SET(reason, PFRES_FRAG);
6646					return (PF_DROP);
6647				case IPPROTO_AH:
6648				case IPPROTO_HOPOPTS:
6649				case IPPROTO_ROUTING:
6650				case IPPROTO_DSTOPTS: {
6651					/* get next header and header length */
6652					struct ip6_ext opt6;
6653
6654					if (!pf_pull_hdr(m, off2, &opt6,
6655					    sizeof(opt6), NULL, reason,
6656					    pd2.af)) {
6657						DPFPRINTF(PF_DEBUG_MISC,
6658						    ("pf: ICMPv6 short opt\n"));
6659						return (PF_DROP);
6660					}
6661					if (pd2.proto == IPPROTO_AH)
6662						off2 += (opt6.ip6e_len + 2) * 4;
6663					else
6664						off2 += (opt6.ip6e_len + 1) * 8;
6665					pd2.proto = opt6.ip6e_nxt;
6666					/* goto the next header */
6667					break;
6668				}
6669				default:
6670					terminal++;
6671					break;
6672				}
6673			} while (!terminal);
6674			break;
6675#endif /* INET6 */
6676		}
6677
6678		if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
6679			if (V_pf_status.debug >= PF_DEBUG_MISC) {
6680				printf("pf: BAD ICMP %d:%d outer dst: ",
6681				    icmptype, icmpcode);
6682				pf_print_host(pd->src, 0, pd->af);
6683				printf(" -> ");
6684				pf_print_host(pd->dst, 0, pd->af);
6685				printf(" inner src: ");
6686				pf_print_host(pd2.src, 0, pd2.af);
6687				printf(" -> ");
6688				pf_print_host(pd2.dst, 0, pd2.af);
6689				printf("\n");
6690			}
6691			REASON_SET(reason, PFRES_BADSTATE);
6692			return (PF_DROP);
6693		}
6694
6695		switch (pd2.proto) {
6696		case IPPROTO_TCP: {
6697			struct tcphdr		 th;
6698			u_int32_t		 seq;
6699			struct pf_state_peer	*src, *dst;
6700			u_int8_t		 dws;
6701			int			 copyback = 0;
6702
6703			/*
6704			 * Only the first 8 bytes of the TCP header can be
6705			 * expected. Don't access any TCP header fields after
6706			 * th_seq, an ackskew test is not possible.
6707			 */
6708			if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
6709			    pd2.af)) {
6710				DPFPRINTF(PF_DEBUG_MISC,
6711				    ("pf: ICMP error message too short "
6712				    "(tcp)\n"));
6713				return (PF_DROP);
6714			}
6715
6716			key.af = pd2.af;
6717			key.proto = IPPROTO_TCP;
6718			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6719			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6720			key.port[pd2.sidx] = th.th_sport;
6721			key.port[pd2.didx] = th.th_dport;
6722
6723			STATE_LOOKUP(kif, &key, *state, pd);
6724
6725			if (pd->dir == (*state)->direction) {
6726				src = &(*state)->dst;
6727				dst = &(*state)->src;
6728			} else {
6729				src = &(*state)->src;
6730				dst = &(*state)->dst;
6731			}
6732
6733			if (src->wscale && dst->wscale)
6734				dws = dst->wscale & PF_WSCALE_MASK;
6735			else
6736				dws = 0;
6737
6738			/* Demodulate sequence number */
6739			seq = ntohl(th.th_seq) - src->seqdiff;
6740			if (src->seqdiff) {
6741				pf_change_a(&th.th_seq, icmpsum,
6742				    htonl(seq), 0);
6743				copyback = 1;
6744			}
6745
6746			if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
6747			    (!SEQ_GEQ(src->seqhi, seq) ||
6748			    !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
6749				if (V_pf_status.debug >= PF_DEBUG_MISC) {
6750					printf("pf: BAD ICMP %d:%d ",
6751					    icmptype, icmpcode);
6752					pf_print_host(pd->src, 0, pd->af);
6753					printf(" -> ");
6754					pf_print_host(pd->dst, 0, pd->af);
6755					printf(" state: ");
6756					pf_print_state(*state);
6757					printf(" seq=%u\n", seq);
6758				}
6759				REASON_SET(reason, PFRES_BADSTATE);
6760				return (PF_DROP);
6761			} else {
6762				if (V_pf_status.debug >= PF_DEBUG_MISC) {
6763					printf("pf: OK ICMP %d:%d ",
6764					    icmptype, icmpcode);
6765					pf_print_host(pd->src, 0, pd->af);
6766					printf(" -> ");
6767					pf_print_host(pd->dst, 0, pd->af);
6768					printf(" state: ");
6769					pf_print_state(*state);
6770					printf(" seq=%u\n", seq);
6771				}
6772			}
6773
6774			/* translate source/destination address, if necessary */
6775			if ((*state)->key[PF_SK_WIRE] !=
6776			    (*state)->key[PF_SK_STACK]) {
6777				struct pf_state_key *nk =
6778				    (*state)->key[pd->didx];
6779
6780				if (PF_ANEQ(pd2.src,
6781				    &nk->addr[pd2.sidx], pd2.af) ||
6782				    nk->port[pd2.sidx] != th.th_sport)
6783					pf_change_icmp(pd2.src, &th.th_sport,
6784					    daddr, &nk->addr[pd2.sidx],
6785					    nk->port[pd2.sidx], NULL,
6786					    pd2.ip_sum, icmpsum,
6787					    pd->ip_sum, 0, pd2.af);
6788
6789				if (PF_ANEQ(pd2.dst,
6790				    &nk->addr[pd2.didx], pd2.af) ||
6791				    nk->port[pd2.didx] != th.th_dport)
6792					pf_change_icmp(pd2.dst, &th.th_dport,
6793					    saddr, &nk->addr[pd2.didx],
6794					    nk->port[pd2.didx], NULL,
6795					    pd2.ip_sum, icmpsum,
6796					    pd->ip_sum, 0, pd2.af);
6797				copyback = 1;
6798			}
6799
6800			if (copyback) {
6801				switch (pd2.af) {
6802#ifdef INET
6803				case AF_INET:
6804					m_copyback(m, off, ICMP_MINLEN,
6805					    (caddr_t )&pd->hdr.icmp);
6806					m_copyback(m, ipoff2, sizeof(h2),
6807					    (caddr_t )&h2);
6808					break;
6809#endif /* INET */
6810#ifdef INET6
6811				case AF_INET6:
6812					m_copyback(m, off,
6813					    sizeof(struct icmp6_hdr),
6814					    (caddr_t )&pd->hdr.icmp6);
6815					m_copyback(m, ipoff2, sizeof(h2_6),
6816					    (caddr_t )&h2_6);
6817					break;
6818#endif /* INET6 */
6819				}
6820				m_copyback(m, off2, 8, (caddr_t)&th);
6821			}
6822
6823			return (PF_PASS);
6824			break;
6825		}
6826		case IPPROTO_UDP: {
6827			struct udphdr		uh;
6828
6829			if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
6830			    NULL, reason, pd2.af)) {
6831				DPFPRINTF(PF_DEBUG_MISC,
6832				    ("pf: ICMP error message too short "
6833				    "(udp)\n"));
6834				return (PF_DROP);
6835			}
6836
6837			key.af = pd2.af;
6838			key.proto = IPPROTO_UDP;
6839			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6840			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6841			key.port[pd2.sidx] = uh.uh_sport;
6842			key.port[pd2.didx] = uh.uh_dport;
6843
6844			STATE_LOOKUP(kif, &key, *state, pd);
6845
6846			/* translate source/destination address, if necessary */
6847			if ((*state)->key[PF_SK_WIRE] !=
6848			    (*state)->key[PF_SK_STACK]) {
6849				struct pf_state_key *nk =
6850				    (*state)->key[pd->didx];
6851
6852				if (PF_ANEQ(pd2.src,
6853				    &nk->addr[pd2.sidx], pd2.af) ||
6854				    nk->port[pd2.sidx] != uh.uh_sport)
6855					pf_change_icmp(pd2.src, &uh.uh_sport,
6856					    daddr, &nk->addr[pd2.sidx],
6857					    nk->port[pd2.sidx], &uh.uh_sum,
6858					    pd2.ip_sum, icmpsum,
6859					    pd->ip_sum, 1, pd2.af);
6860
6861				if (PF_ANEQ(pd2.dst,
6862				    &nk->addr[pd2.didx], pd2.af) ||
6863				    nk->port[pd2.didx] != uh.uh_dport)
6864					pf_change_icmp(pd2.dst, &uh.uh_dport,
6865					    saddr, &nk->addr[pd2.didx],
6866					    nk->port[pd2.didx], &uh.uh_sum,
6867					    pd2.ip_sum, icmpsum,
6868					    pd->ip_sum, 1, pd2.af);
6869
6870				switch (pd2.af) {
6871#ifdef INET
6872				case AF_INET:
6873					m_copyback(m, off, ICMP_MINLEN,
6874					    (caddr_t )&pd->hdr.icmp);
6875					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
6876					break;
6877#endif /* INET */
6878#ifdef INET6
6879				case AF_INET6:
6880					m_copyback(m, off,
6881					    sizeof(struct icmp6_hdr),
6882					    (caddr_t )&pd->hdr.icmp6);
6883					m_copyback(m, ipoff2, sizeof(h2_6),
6884					    (caddr_t )&h2_6);
6885					break;
6886#endif /* INET6 */
6887				}
6888				m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
6889			}
6890			return (PF_PASS);
6891			break;
6892		}
6893#ifdef INET
6894		case IPPROTO_ICMP: {
6895			struct icmp		iih;
6896
6897			if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
6898			    NULL, reason, pd2.af)) {
6899				DPFPRINTF(PF_DEBUG_MISC,
6900				    ("pf: ICMP error message too short i"
6901				    "(icmp)\n"));
6902				return (PF_DROP);
6903			}
6904
6905			key.af = pd2.af;
6906			key.proto = IPPROTO_ICMP;
6907			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6908			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6909			key.port[0] = key.port[1] = iih.icmp_id;
6910
6911			STATE_LOOKUP(kif, &key, *state, pd);
6912
6913			/* translate source/destination address, if necessary */
6914			if ((*state)->key[PF_SK_WIRE] !=
6915			    (*state)->key[PF_SK_STACK]) {
6916				struct pf_state_key *nk =
6917				    (*state)->key[pd->didx];
6918
6919				if (PF_ANEQ(pd2.src,
6920				    &nk->addr[pd2.sidx], pd2.af) ||
6921				    nk->port[pd2.sidx] != iih.icmp_id)
6922					pf_change_icmp(pd2.src, &iih.icmp_id,
6923					    daddr, &nk->addr[pd2.sidx],
6924					    nk->port[pd2.sidx], NULL,
6925					    pd2.ip_sum, icmpsum,
6926					    pd->ip_sum, 0, AF_INET);
6927
6928				if (PF_ANEQ(pd2.dst,
6929				    &nk->addr[pd2.didx], pd2.af) ||
6930				    nk->port[pd2.didx] != iih.icmp_id)
6931					pf_change_icmp(pd2.dst, &iih.icmp_id,
6932					    saddr, &nk->addr[pd2.didx],
6933					    nk->port[pd2.didx], NULL,
6934					    pd2.ip_sum, icmpsum,
6935					    pd->ip_sum, 0, AF_INET);
6936
6937				m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
6938				m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
6939				m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
6940			}
6941			return (PF_PASS);
6942			break;
6943		}
6944#endif /* INET */
6945#ifdef INET6
6946		case IPPROTO_ICMPV6: {
6947			struct icmp6_hdr	iih;
6948
6949			if (!pf_pull_hdr(m, off2, &iih,
6950			    sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
6951				DPFPRINTF(PF_DEBUG_MISC,
6952				    ("pf: ICMP error message too short "
6953				    "(icmp6)\n"));
6954				return (PF_DROP);
6955			}
6956
6957			key.af = pd2.af;
6958			key.proto = IPPROTO_ICMPV6;
6959			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6960			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6961			key.port[0] = key.port[1] = iih.icmp6_id;
6962
6963			STATE_LOOKUP(kif, &key, *state, pd);
6964
6965			/* translate source/destination address, if necessary */
6966			if ((*state)->key[PF_SK_WIRE] !=
6967			    (*state)->key[PF_SK_STACK]) {
6968				struct pf_state_key *nk =
6969				    (*state)->key[pd->didx];
6970
6971				if (PF_ANEQ(pd2.src,
6972				    &nk->addr[pd2.sidx], pd2.af) ||
6973				    nk->port[pd2.sidx] != iih.icmp6_id)
6974					pf_change_icmp(pd2.src, &iih.icmp6_id,
6975					    daddr, &nk->addr[pd2.sidx],
6976					    nk->port[pd2.sidx], NULL,
6977					    pd2.ip_sum, icmpsum,
6978					    pd->ip_sum, 0, AF_INET6);
6979
6980				if (PF_ANEQ(pd2.dst,
6981				    &nk->addr[pd2.didx], pd2.af) ||
6982				    nk->port[pd2.didx] != iih.icmp6_id)
6983					pf_change_icmp(pd2.dst, &iih.icmp6_id,
6984					    saddr, &nk->addr[pd2.didx],
6985					    nk->port[pd2.didx], NULL,
6986					    pd2.ip_sum, icmpsum,
6987					    pd->ip_sum, 0, AF_INET6);
6988
6989				m_copyback(m, off, sizeof(struct icmp6_hdr),
6990				    (caddr_t)&pd->hdr.icmp6);
6991				m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
6992				m_copyback(m, off2, sizeof(struct icmp6_hdr),
6993				    (caddr_t)&iih);
6994			}
6995			return (PF_PASS);
6996			break;
6997		}
6998#endif /* INET6 */
6999		default: {
7000			key.af = pd2.af;
7001			key.proto = pd2.proto;
7002			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
7003			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
7004			key.port[0] = key.port[1] = 0;
7005
7006			STATE_LOOKUP(kif, &key, *state, pd);
7007
7008			/* translate source/destination address, if necessary */
7009			if ((*state)->key[PF_SK_WIRE] !=
7010			    (*state)->key[PF_SK_STACK]) {
7011				struct pf_state_key *nk =
7012				    (*state)->key[pd->didx];
7013
7014				if (PF_ANEQ(pd2.src,
7015				    &nk->addr[pd2.sidx], pd2.af))
7016					pf_change_icmp(pd2.src, NULL, daddr,
7017					    &nk->addr[pd2.sidx], 0, NULL,
7018					    pd2.ip_sum, icmpsum,
7019					    pd->ip_sum, 0, pd2.af);
7020
7021				if (PF_ANEQ(pd2.dst,
7022				    &nk->addr[pd2.didx], pd2.af))
7023					pf_change_icmp(pd2.dst, NULL, saddr,
7024					    &nk->addr[pd2.didx], 0, NULL,
7025					    pd2.ip_sum, icmpsum,
7026					    pd->ip_sum, 0, pd2.af);
7027
7028				switch (pd2.af) {
7029#ifdef INET
7030				case AF_INET:
7031					m_copyback(m, off, ICMP_MINLEN,
7032					    (caddr_t)&pd->hdr.icmp);
7033					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
7034					break;
7035#endif /* INET */
7036#ifdef INET6
7037				case AF_INET6:
7038					m_copyback(m, off,
7039					    sizeof(struct icmp6_hdr),
7040					    (caddr_t )&pd->hdr.icmp6);
7041					m_copyback(m, ipoff2, sizeof(h2_6),
7042					    (caddr_t )&h2_6);
7043					break;
7044#endif /* INET6 */
7045				}
7046			}
7047			return (PF_PASS);
7048			break;
7049		}
7050		}
7051	}
7052}
7053
7054static int
7055pf_test_state_other(struct pf_kstate **state, struct pfi_kkif *kif,
7056    struct mbuf *m, struct pf_pdesc *pd)
7057{
7058	struct pf_state_peer	*src, *dst;
7059	struct pf_state_key_cmp	 key;
7060	uint8_t			 psrc, pdst;
7061
7062	bzero(&key, sizeof(key));
7063	key.af = pd->af;
7064	key.proto = pd->proto;
7065	if (pd->dir == PF_IN)	{
7066		PF_ACPY(&key.addr[0], pd->src, key.af);
7067		PF_ACPY(&key.addr[1], pd->dst, key.af);
7068		key.port[0] = key.port[1] = 0;
7069	} else {
7070		PF_ACPY(&key.addr[1], pd->src, key.af);
7071		PF_ACPY(&key.addr[0], pd->dst, key.af);
7072		key.port[1] = key.port[0] = 0;
7073	}
7074
7075	STATE_LOOKUP(kif, &key, *state, pd);
7076
7077	if (pd->dir == (*state)->direction) {
7078		src = &(*state)->src;
7079		dst = &(*state)->dst;
7080		psrc = PF_PEER_SRC;
7081		pdst = PF_PEER_DST;
7082	} else {
7083		src = &(*state)->dst;
7084		dst = &(*state)->src;
7085		psrc = PF_PEER_DST;
7086		pdst = PF_PEER_SRC;
7087	}
7088
7089	/* update states */
7090	if (src->state < PFOTHERS_SINGLE)
7091		pf_set_protostate(*state, psrc, PFOTHERS_SINGLE);
7092	if (dst->state == PFOTHERS_SINGLE)
7093		pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE);
7094
7095	/* update expire time */
7096	(*state)->expire = pf_get_uptime();
7097	if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
7098		(*state)->timeout = PFTM_OTHER_MULTIPLE;
7099	else
7100		(*state)->timeout = PFTM_OTHER_SINGLE;
7101
7102	/* translate source/destination address, if necessary */
7103	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
7104		struct pf_state_key *nk = (*state)->key[pd->didx];
7105
7106		KASSERT(nk, ("%s: nk is null", __func__));
7107		KASSERT(pd, ("%s: pd is null", __func__));
7108		KASSERT(pd->src, ("%s: pd->src is null", __func__));
7109		KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
7110		switch (pd->af) {
7111#ifdef INET
7112		case AF_INET:
7113			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
7114				pf_change_a(&pd->src->v4.s_addr,
7115				    pd->ip_sum,
7116				    nk->addr[pd->sidx].v4.s_addr,
7117				    0);
7118
7119			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
7120				pf_change_a(&pd->dst->v4.s_addr,
7121				    pd->ip_sum,
7122				    nk->addr[pd->didx].v4.s_addr,
7123				    0);
7124
7125			break;
7126#endif /* INET */
7127#ifdef INET6
7128		case AF_INET6:
7129			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
7130				PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
7131
7132			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
7133				PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
7134#endif /* INET6 */
7135		}
7136	}
7137	return (PF_PASS);
7138}
7139
7140/*
7141 * ipoff and off are measured from the start of the mbuf chain.
7142 * h must be at "ipoff" on the mbuf chain.
7143 */
7144void *
7145pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
7146    u_short *actionp, u_short *reasonp, sa_family_t af)
7147{
7148	switch (af) {
7149#ifdef INET
7150	case AF_INET: {
7151		struct ip	*h = mtod(m, struct ip *);
7152		u_int16_t	 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
7153
7154		if (fragoff) {
7155			if (fragoff >= len)
7156				ACTION_SET(actionp, PF_PASS);
7157			else {
7158				ACTION_SET(actionp, PF_DROP);
7159				REASON_SET(reasonp, PFRES_FRAG);
7160			}
7161			return (NULL);
7162		}
7163		if (m->m_pkthdr.len < off + len ||
7164		    ntohs(h->ip_len) < off + len) {
7165			ACTION_SET(actionp, PF_DROP);
7166			REASON_SET(reasonp, PFRES_SHORT);
7167			return (NULL);
7168		}
7169		break;
7170	}
7171#endif /* INET */
7172#ifdef INET6
7173	case AF_INET6: {
7174		struct ip6_hdr	*h = mtod(m, struct ip6_hdr *);
7175
7176		if (m->m_pkthdr.len < off + len ||
7177		    (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
7178		    (unsigned)(off + len)) {
7179			ACTION_SET(actionp, PF_DROP);
7180			REASON_SET(reasonp, PFRES_SHORT);
7181			return (NULL);
7182		}
7183		break;
7184	}
7185#endif /* INET6 */
7186	}
7187	m_copydata(m, off, len, p);
7188	return (p);
7189}
7190
7191int
7192pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif,
7193    int rtableid)
7194{
7195	struct ifnet		*ifp;
7196
7197	/*
7198	 * Skip check for addresses with embedded interface scope,
7199	 * as they would always match anyway.
7200	 */
7201	if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
7202		return (1);
7203
7204	if (af != AF_INET && af != AF_INET6)
7205		return (0);
7206
7207	if (kif == V_pfi_all)
7208		return (1);
7209
7210	/* Skip checks for ipsec interfaces */
7211	if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
7212		return (1);
7213
7214	ifp = (kif != NULL) ? kif->pfik_ifp : NULL;
7215
7216	switch (af) {
7217#ifdef INET6
7218	case AF_INET6:
7219		return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE,
7220		    ifp));
7221#endif
7222#ifdef INET
7223	case AF_INET:
7224		return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE,
7225		    ifp));
7226#endif
7227	}
7228
7229	return (0);
7230}
7231
7232#ifdef INET
7233static void
7234pf_route(struct mbuf **m, struct pf_krule *r, struct ifnet *oifp,
7235    struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
7236{
7237	struct mbuf		*m0, *m1, *md;
7238	struct sockaddr_in	dst;
7239	struct ip		*ip;
7240	struct pfi_kkif		*nkif = NULL;
7241	struct ifnet		*ifp = NULL;
7242	struct pf_addr		 naddr;
7243	struct pf_ksrc_node	*sn = NULL;
7244	int			 error = 0;
7245	uint16_t		 ip_len, ip_off;
7246	uint16_t		 tmp;
7247	int			 r_rt, r_dir;
7248
7249	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
7250
7251	if (s) {
7252		r_rt = s->rt;
7253		r_dir = s->direction;
7254	} else {
7255		r_rt = r->rt;
7256		r_dir = r->direction;
7257	}
7258
7259	KASSERT(pd->dir == PF_IN || pd->dir == PF_OUT ||
7260	    r_dir == PF_IN || r_dir == PF_OUT, ("%s: invalid direction",
7261	    __func__));
7262
7263	if ((pd->pf_mtag == NULL &&
7264	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
7265	    pd->pf_mtag->routed++ > 3) {
7266		m0 = *m;
7267		*m = NULL;
7268		goto bad_locked;
7269	}
7270
7271	if (r_rt == PF_DUPTO) {
7272		if ((pd->pf_mtag->flags & PF_MTAG_FLAG_DUPLICATED)) {
7273			if (s == NULL) {
7274				ifp = r->rpool.cur->kif ?
7275				    r->rpool.cur->kif->pfik_ifp : NULL;
7276			} else {
7277				ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
7278				/* If pfsync'd */
7279				if (ifp == NULL && r->rpool.cur != NULL)
7280					ifp = r->rpool.cur->kif ?
7281					    r->rpool.cur->kif->pfik_ifp : NULL;
7282				PF_STATE_UNLOCK(s);
7283			}
7284			if (ifp == oifp) {
7285				/* When the 2nd interface is not skipped */
7286				return;
7287			} else {
7288				m0 = *m;
7289				*m = NULL;
7290				goto bad;
7291			}
7292		} else {
7293			pd->pf_mtag->flags |= PF_MTAG_FLAG_DUPLICATED;
7294			if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
7295				if (s)
7296					PF_STATE_UNLOCK(s);
7297				return;
7298			}
7299		}
7300	} else {
7301		if ((r_rt == PF_REPLYTO) == (r_dir == pd->dir)) {
7302			pf_dummynet(pd, s, r, m);
7303			if (s)
7304				PF_STATE_UNLOCK(s);
7305			return;
7306		}
7307		m0 = *m;
7308	}
7309
7310	ip = mtod(m0, struct ip *);
7311
7312	bzero(&dst, sizeof(dst));
7313	dst.sin_family = AF_INET;
7314	dst.sin_len = sizeof(dst);
7315	dst.sin_addr = ip->ip_dst;
7316
7317	bzero(&naddr, sizeof(naddr));
7318
7319	if (s == NULL) {
7320		if (TAILQ_EMPTY(&r->rpool.list)) {
7321			DPFPRINTF(PF_DEBUG_URGENT,
7322			    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
7323			goto bad_locked;
7324		}
7325		pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
7326		    &naddr, &nkif, NULL, &sn);
7327		if (!PF_AZERO(&naddr, AF_INET))
7328			dst.sin_addr.s_addr = naddr.v4.s_addr;
7329		ifp = nkif ? nkif->pfik_ifp : NULL;
7330	} else {
7331		struct pfi_kkif *kif;
7332
7333		if (!PF_AZERO(&s->rt_addr, AF_INET))
7334			dst.sin_addr.s_addr =
7335			    s->rt_addr.v4.s_addr;
7336		ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
7337		kif = s->rt_kif;
7338		/* If pfsync'd */
7339		if (ifp == NULL && r->rpool.cur != NULL) {
7340			ifp = r->rpool.cur->kif ?
7341			    r->rpool.cur->kif->pfik_ifp : NULL;
7342			kif = r->rpool.cur->kif;
7343		}
7344		if (ifp != NULL && kif != NULL &&
7345		    r->rule_flag & PFRULE_IFBOUND &&
7346		    r->rt == PF_REPLYTO &&
7347		    s->kif == V_pfi_all) {
7348			s->kif = kif;
7349			s->orig_kif = oifp->if_pf_kif;
7350		}
7351
7352		PF_STATE_UNLOCK(s);
7353	}
7354
7355	if (ifp == NULL)
7356		goto bad;
7357
7358	if (pd->dir == PF_IN) {
7359		if (pf_test(PF_OUT, PFIL_FWD, ifp, &m0, inp, &pd->act) != PF_PASS)
7360			goto bad;
7361		else if (m0 == NULL)
7362			goto done;
7363		if (m0->m_len < sizeof(struct ip)) {
7364			DPFPRINTF(PF_DEBUG_URGENT,
7365			    ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
7366			goto bad;
7367		}
7368		ip = mtod(m0, struct ip *);
7369	}
7370
7371	if (ifp->if_flags & IFF_LOOPBACK)
7372		m0->m_flags |= M_SKIP_FIREWALL;
7373
7374	ip_len = ntohs(ip->ip_len);
7375	ip_off = ntohs(ip->ip_off);
7376
7377	/* Copied from FreeBSD 10.0-CURRENT ip_output. */
7378	m0->m_pkthdr.csum_flags |= CSUM_IP;
7379	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
7380		in_delayed_cksum(m0);
7381		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
7382	}
7383	if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
7384		pf_sctp_checksum(m0, (uint32_t)(ip->ip_hl << 2));
7385		m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
7386	}
7387
7388	if (pd->dir == PF_IN) {
7389		/*
7390		 * Make sure dummynet gets the correct direction, in case it needs to
7391		 * re-inject later.
7392		 */
7393		pd->dir = PF_OUT;
7394
7395		/*
7396		 * The following processing is actually the rest of the inbound processing, even
7397		 * though we've marked it as outbound (so we don't look through dummynet) and it
7398		 * happens after the outbound processing (pf_test(PF_OUT) above).
7399		 * Swap the dummynet pipe numbers, because it's going to come to the wrong
7400		 * conclusion about what direction it's processing, and we can't fix it or it
7401		 * will re-inject incorrectly. Swapping the pipe numbers means that its incorrect
7402		 * decision will pick the right pipe, and everything will mostly work as expected.
7403		 */
7404		tmp = pd->act.dnrpipe;
7405		pd->act.dnrpipe = pd->act.dnpipe;
7406		pd->act.dnpipe = tmp;
7407	}
7408
7409	/*
7410	 * If small enough for interface, or the interface will take
7411	 * care of the fragmentation for us, we can just send directly.
7412	 */
7413	if (ip_len <= ifp->if_mtu ||
7414	    (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
7415		ip->ip_sum = 0;
7416		if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
7417			ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
7418			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
7419		}
7420		m_clrprotoflags(m0);	/* Avoid confusing lower layers. */
7421
7422		md = m0;
7423		error = pf_dummynet_route(pd, s, r, ifp, sintosa(&dst), &md);
7424		if (md != NULL)
7425			error = (*ifp->if_output)(ifp, md, sintosa(&dst), NULL);
7426		goto done;
7427	}
7428
7429	/* Balk when DF bit is set or the interface didn't support TSO. */
7430	if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
7431		error = EMSGSIZE;
7432		KMOD_IPSTAT_INC(ips_cantfrag);
7433		if (r_rt != PF_DUPTO) {
7434			if (s && pd->nat_rule != NULL)
7435				PACKET_UNDO_NAT(m0, pd,
7436				    (ip->ip_hl << 2) + (ip_off & IP_OFFMASK),
7437				    s);
7438
7439			icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
7440			    ifp->if_mtu);
7441			goto done;
7442		} else
7443			goto bad;
7444	}
7445
7446	error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
7447	if (error)
7448		goto bad;
7449
7450	for (; m0; m0 = m1) {
7451		m1 = m0->m_nextpkt;
7452		m0->m_nextpkt = NULL;
7453		if (error == 0) {
7454			m_clrprotoflags(m0);
7455			md = m0;
7456			pd->pf_mtag = pf_find_mtag(md);
7457			error = pf_dummynet_route(pd, s, r, ifp,
7458			    sintosa(&dst), &md);
7459			if (md != NULL)
7460				error = (*ifp->if_output)(ifp, md,
7461				    sintosa(&dst), NULL);
7462		} else
7463			m_freem(m0);
7464	}
7465
7466	if (error == 0)
7467		KMOD_IPSTAT_INC(ips_fragmented);
7468
7469done:
7470	if (r_rt != PF_DUPTO)
7471		*m = NULL;
7472	return;
7473
7474bad_locked:
7475	if (s)
7476		PF_STATE_UNLOCK(s);
7477bad:
7478	m_freem(m0);
7479	goto done;
7480}
7481#endif /* INET */
7482
7483#ifdef INET6
7484static void
7485pf_route6(struct mbuf **m, struct pf_krule *r, struct ifnet *oifp,
7486    struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
7487{
7488	struct mbuf		*m0, *md;
7489	struct sockaddr_in6	dst;
7490	struct ip6_hdr		*ip6;
7491	struct pfi_kkif		*nkif = NULL;
7492	struct ifnet		*ifp = NULL;
7493	struct pf_addr		 naddr;
7494	struct pf_ksrc_node	*sn = NULL;
7495	int			 r_rt, r_dir;
7496
7497	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
7498
7499	if (s) {
7500		r_rt = s->rt;
7501		r_dir = s->direction;
7502	} else {
7503		r_rt = r->rt;
7504		r_dir = r->direction;
7505	}
7506
7507	KASSERT(pd->dir == PF_IN || pd->dir == PF_OUT ||
7508	    r_dir == PF_IN || r_dir == PF_OUT, ("%s: invalid direction",
7509	    __func__));
7510
7511	if ((pd->pf_mtag == NULL &&
7512	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
7513	    pd->pf_mtag->routed++ > 3) {
7514		m0 = *m;
7515		*m = NULL;
7516		goto bad_locked;
7517	}
7518
7519	if (r_rt == PF_DUPTO) {
7520		if ((pd->pf_mtag->flags & PF_MTAG_FLAG_DUPLICATED)) {
7521			if (s == NULL) {
7522				ifp = r->rpool.cur->kif ?
7523				    r->rpool.cur->kif->pfik_ifp : NULL;
7524			} else {
7525				ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
7526				/* If pfsync'd */
7527				if (ifp == NULL && r->rpool.cur != NULL)
7528					ifp = r->rpool.cur->kif ?
7529					    r->rpool.cur->kif->pfik_ifp : NULL;
7530				PF_STATE_UNLOCK(s);
7531			}
7532			if (ifp == oifp) {
7533				/* When the 2nd interface is not skipped */
7534				return;
7535			} else {
7536				m0 = *m;
7537				*m = NULL;
7538				goto bad;
7539			}
7540		} else {
7541			pd->pf_mtag->flags |= PF_MTAG_FLAG_DUPLICATED;
7542			if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
7543				if (s)
7544					PF_STATE_UNLOCK(s);
7545				return;
7546			}
7547		}
7548	} else {
7549		if ((r_rt == PF_REPLYTO) == (r_dir == pd->dir)) {
7550			pf_dummynet(pd, s, r, m);
7551			if (s)
7552				PF_STATE_UNLOCK(s);
7553			return;
7554		}
7555		m0 = *m;
7556	}
7557
7558	ip6 = mtod(m0, struct ip6_hdr *);
7559
7560	bzero(&dst, sizeof(dst));
7561	dst.sin6_family = AF_INET6;
7562	dst.sin6_len = sizeof(dst);
7563	dst.sin6_addr = ip6->ip6_dst;
7564
7565	bzero(&naddr, sizeof(naddr));
7566
7567	if (s == NULL) {
7568		if (TAILQ_EMPTY(&r->rpool.list)) {
7569			DPFPRINTF(PF_DEBUG_URGENT,
7570			    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
7571			goto bad_locked;
7572		}
7573		pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
7574		    &naddr, &nkif, NULL, &sn);
7575		if (!PF_AZERO(&naddr, AF_INET6))
7576			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
7577			    &naddr, AF_INET6);
7578		ifp = nkif ? nkif->pfik_ifp : NULL;
7579	} else {
7580		struct pfi_kkif *kif;
7581
7582		if (!PF_AZERO(&s->rt_addr, AF_INET6))
7583			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
7584			    &s->rt_addr, AF_INET6);
7585		ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
7586		kif = s->rt_kif;
7587		/* If pfsync'd */
7588		if (ifp == NULL && r->rpool.cur != NULL) {
7589			ifp = r->rpool.cur->kif ?
7590			    r->rpool.cur->kif->pfik_ifp : NULL;
7591			kif = r->rpool.cur->kif;
7592		}
7593		if (ifp != NULL && kif != NULL &&
7594		    r->rule_flag & PFRULE_IFBOUND &&
7595		    r->rt == PF_REPLYTO &&
7596		    s->kif == V_pfi_all) {
7597			s->kif = kif;
7598			s->orig_kif = oifp->if_pf_kif;
7599		}
7600	}
7601
7602	if (s)
7603		PF_STATE_UNLOCK(s);
7604
7605	if (ifp == NULL)
7606		goto bad;
7607
7608	if (pd->dir == PF_IN) {
7609		if (pf_test6(PF_OUT, PFIL_FWD, ifp, &m0, inp, &pd->act) != PF_PASS)
7610			goto bad;
7611		else if (m0 == NULL)
7612			goto done;
7613		if (m0->m_len < sizeof(struct ip6_hdr)) {
7614			DPFPRINTF(PF_DEBUG_URGENT,
7615			    ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
7616			    __func__));
7617			goto bad;
7618		}
7619		ip6 = mtod(m0, struct ip6_hdr *);
7620	}
7621
7622	if (ifp->if_flags & IFF_LOOPBACK)
7623		m0->m_flags |= M_SKIP_FIREWALL;
7624
7625	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
7626	    ~ifp->if_hwassist) {
7627		uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
7628		in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
7629		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
7630	}
7631
7632	/*
7633	 * If the packet is too large for the outgoing interface,
7634	 * send back an icmp6 error.
7635	 */
7636	if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
7637		dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
7638	if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
7639		md = m0;
7640		pf_dummynet_route(pd, s, r, ifp, sintosa(&dst), &md);
7641		if (md != NULL)
7642			nd6_output_ifp(ifp, ifp, md, &dst, NULL);
7643	}
7644	else {
7645		in6_ifstat_inc(ifp, ifs6_in_toobig);
7646		if (r_rt != PF_DUPTO) {
7647			if (s && pd->nat_rule != NULL)
7648				PACKET_UNDO_NAT(m0, pd,
7649				    ((caddr_t)ip6 - m0->m_data) +
7650				    sizeof(struct ip6_hdr), s);
7651
7652			icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
7653		} else
7654			goto bad;
7655	}
7656
7657done:
7658	if (r_rt != PF_DUPTO)
7659		*m = NULL;
7660	return;
7661
7662bad_locked:
7663	if (s)
7664		PF_STATE_UNLOCK(s);
7665bad:
7666	m_freem(m0);
7667	goto done;
7668}
7669#endif /* INET6 */
7670
7671/*
7672 * FreeBSD supports cksum offloads for the following drivers.
7673 *  em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4)
7674 *
7675 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
7676 *  network driver performed cksum including pseudo header, need to verify
7677 *   csum_data
7678 * CSUM_DATA_VALID :
7679 *  network driver performed cksum, needs to additional pseudo header
7680 *  cksum computation with partial csum_data(i.e. lack of H/W support for
7681 *  pseudo header, for instance sk(4) and possibly gem(4))
7682 *
7683 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
7684 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
7685 * TCP/UDP layer.
7686 * Also, set csum_data to 0xffff to force cksum validation.
7687 */
7688static int
7689pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
7690{
7691	u_int16_t sum = 0;
7692	int hw_assist = 0;
7693	struct ip *ip;
7694
7695	if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
7696		return (1);
7697	if (m->m_pkthdr.len < off + len)
7698		return (1);
7699
7700	switch (p) {
7701	case IPPROTO_TCP:
7702		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
7703			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
7704				sum = m->m_pkthdr.csum_data;
7705			} else {
7706				ip = mtod(m, struct ip *);
7707				sum = in_pseudo(ip->ip_src.s_addr,
7708				ip->ip_dst.s_addr, htonl((u_short)len +
7709				m->m_pkthdr.csum_data + IPPROTO_TCP));
7710			}
7711			sum ^= 0xffff;
7712			++hw_assist;
7713		}
7714		break;
7715	case IPPROTO_UDP:
7716		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
7717			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
7718				sum = m->m_pkthdr.csum_data;
7719			} else {
7720				ip = mtod(m, struct ip *);
7721				sum = in_pseudo(ip->ip_src.s_addr,
7722				ip->ip_dst.s_addr, htonl((u_short)len +
7723				m->m_pkthdr.csum_data + IPPROTO_UDP));
7724			}
7725			sum ^= 0xffff;
7726			++hw_assist;
7727		}
7728		break;
7729	case IPPROTO_ICMP:
7730#ifdef INET6
7731	case IPPROTO_ICMPV6:
7732#endif /* INET6 */
7733		break;
7734	default:
7735		return (1);
7736	}
7737
7738	if (!hw_assist) {
7739		switch (af) {
7740		case AF_INET:
7741			if (p == IPPROTO_ICMP) {
7742				if (m->m_len < off)
7743					return (1);
7744				m->m_data += off;
7745				m->m_len -= off;
7746				sum = in_cksum(m, len);
7747				m->m_data -= off;
7748				m->m_len += off;
7749			} else {
7750				if (m->m_len < sizeof(struct ip))
7751					return (1);
7752				sum = in4_cksum(m, p, off, len);
7753			}
7754			break;
7755#ifdef INET6
7756		case AF_INET6:
7757			if (m->m_len < sizeof(struct ip6_hdr))
7758				return (1);
7759			sum = in6_cksum(m, p, off, len);
7760			break;
7761#endif /* INET6 */
7762		default:
7763			return (1);
7764		}
7765	}
7766	if (sum) {
7767		switch (p) {
7768		case IPPROTO_TCP:
7769		    {
7770			KMOD_TCPSTAT_INC(tcps_rcvbadsum);
7771			break;
7772		    }
7773		case IPPROTO_UDP:
7774		    {
7775			KMOD_UDPSTAT_INC(udps_badsum);
7776			break;
7777		    }
7778#ifdef INET
7779		case IPPROTO_ICMP:
7780		    {
7781			KMOD_ICMPSTAT_INC(icps_checksum);
7782			break;
7783		    }
7784#endif
7785#ifdef INET6
7786		case IPPROTO_ICMPV6:
7787		    {
7788			KMOD_ICMP6STAT_INC(icp6s_checksum);
7789			break;
7790		    }
7791#endif /* INET6 */
7792		}
7793		return (1);
7794	} else {
7795		if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
7796			m->m_pkthdr.csum_flags |=
7797			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
7798			m->m_pkthdr.csum_data = 0xffff;
7799		}
7800	}
7801	return (0);
7802}
7803
7804static bool
7805pf_pdesc_to_dnflow(const struct pf_pdesc *pd, const struct pf_krule *r,
7806    const struct pf_kstate *s, struct ip_fw_args *dnflow)
7807{
7808	int dndir = r->direction;
7809
7810	if (s && dndir == PF_INOUT) {
7811		dndir = s->direction;
7812	} else if (dndir == PF_INOUT) {
7813		/* Assume primary direction. Happens when we've set dnpipe in
7814		 * the ethernet level code. */
7815		dndir = pd->dir;
7816	}
7817
7818	if (pd->pf_mtag->flags & PF_MTAG_FLAG_DUMMYNETED)
7819		return (false);
7820
7821	memset(dnflow, 0, sizeof(*dnflow));
7822
7823	if (pd->dport != NULL)
7824		dnflow->f_id.dst_port = ntohs(*pd->dport);
7825	if (pd->sport != NULL)
7826		dnflow->f_id.src_port = ntohs(*pd->sport);
7827
7828	if (pd->dir == PF_IN)
7829		dnflow->flags |= IPFW_ARGS_IN;
7830	else
7831		dnflow->flags |= IPFW_ARGS_OUT;
7832
7833	if (pd->dir != dndir && pd->act.dnrpipe) {
7834		dnflow->rule.info = pd->act.dnrpipe;
7835	}
7836	else if (pd->dir == dndir && pd->act.dnpipe) {
7837		dnflow->rule.info = pd->act.dnpipe;
7838	}
7839	else {
7840		return (false);
7841	}
7842
7843	dnflow->rule.info |= IPFW_IS_DUMMYNET;
7844	if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFSTATE_DN_IS_PIPE)
7845		dnflow->rule.info |= IPFW_IS_PIPE;
7846
7847	dnflow->f_id.proto = pd->proto;
7848	dnflow->f_id.extra = dnflow->rule.info;
7849	switch (pd->af) {
7850	case AF_INET:
7851		dnflow->f_id.addr_type = 4;
7852		dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr);
7853		dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr);
7854		break;
7855	case AF_INET6:
7856		dnflow->flags |= IPFW_ARGS_IP6;
7857		dnflow->f_id.addr_type = 6;
7858		dnflow->f_id.src_ip6 = pd->src->v6;
7859		dnflow->f_id.dst_ip6 = pd->dst->v6;
7860		break;
7861	default:
7862		panic("Invalid AF");
7863		break;
7864	}
7865
7866	return (true);
7867}
7868
7869int
7870pf_test_eth(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
7871    struct inpcb *inp)
7872{
7873	struct pfi_kkif		*kif;
7874	struct mbuf		*m = *m0;
7875
7876	M_ASSERTPKTHDR(m);
7877	MPASS(ifp->if_vnet == curvnet);
7878	NET_EPOCH_ASSERT();
7879
7880	if (!V_pf_status.running)
7881		return (PF_PASS);
7882
7883	kif = (struct pfi_kkif *)ifp->if_pf_kif;
7884
7885	if (kif == NULL) {
7886		DPFPRINTF(PF_DEBUG_URGENT,
7887		    ("%s: kif == NULL, if_xname %s\n", __func__, ifp->if_xname));
7888		return (PF_DROP);
7889	}
7890	if (kif->pfik_flags & PFI_IFLAG_SKIP)
7891		return (PF_PASS);
7892
7893	if (m->m_flags & M_SKIP_FIREWALL)
7894		return (PF_PASS);
7895
7896	/* Stateless! */
7897	return (pf_test_eth_rule(dir, kif, m0));
7898}
7899
7900static __inline void
7901pf_dummynet_flag_remove(struct mbuf *m, struct pf_mtag *pf_mtag)
7902{
7903	struct m_tag *mtag;
7904
7905	pf_mtag->flags &= ~PF_MTAG_FLAG_DUMMYNET;
7906
7907	/* dummynet adds this tag, but pf does not need it,
7908	 * and keeping it creates unexpected behavior,
7909	 * e.g. in case of divert(4) usage right after dummynet. */
7910	mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
7911	if (mtag != NULL)
7912		m_tag_delete(m, mtag);
7913}
7914
7915static int
7916pf_dummynet(struct pf_pdesc *pd, struct pf_kstate *s,
7917    struct pf_krule *r, struct mbuf **m0)
7918{
7919	return (pf_dummynet_route(pd, s, r, NULL, NULL, m0));
7920}
7921
7922static int
7923pf_dummynet_route(struct pf_pdesc *pd, struct pf_kstate *s,
7924    struct pf_krule *r, struct ifnet *ifp, struct sockaddr *sa,
7925    struct mbuf **m0)
7926{
7927	NET_EPOCH_ASSERT();
7928
7929	if (pd->act.dnpipe || pd->act.dnrpipe) {
7930		struct ip_fw_args dnflow;
7931		if (ip_dn_io_ptr == NULL) {
7932			m_freem(*m0);
7933			*m0 = NULL;
7934			return (ENOMEM);
7935		}
7936
7937		if (pd->pf_mtag == NULL &&
7938		    ((pd->pf_mtag = pf_get_mtag(*m0)) == NULL)) {
7939			m_freem(*m0);
7940			*m0 = NULL;
7941			return (ENOMEM);
7942		}
7943
7944		if (ifp != NULL) {
7945			pd->pf_mtag->flags |= PF_MTAG_FLAG_ROUTE_TO;
7946
7947			pd->pf_mtag->if_index = ifp->if_index;
7948			pd->pf_mtag->if_idxgen = ifp->if_idxgen;
7949
7950			MPASS(sa != NULL);
7951
7952			if (pd->af == AF_INET)
7953				memcpy(&pd->pf_mtag->dst, sa,
7954				    sizeof(struct sockaddr_in));
7955			else
7956				memcpy(&pd->pf_mtag->dst, sa,
7957				    sizeof(struct sockaddr_in6));
7958		}
7959
7960		if (s != NULL && s->nat_rule.ptr != NULL &&
7961		    s->nat_rule.ptr->action == PF_RDR &&
7962		    (
7963#ifdef INET
7964		    (pd->af == AF_INET && IN_LOOPBACK(ntohl(pd->dst->v4.s_addr))) ||
7965#endif
7966		    (pd->af == AF_INET6 && IN6_IS_ADDR_LOOPBACK(&pd->dst->v6)))) {
7967			/*
7968			 * If we're redirecting to loopback mark this packet
7969			 * as being local. Otherwise it might get dropped
7970			 * if dummynet re-injects.
7971			 */
7972			(*m0)->m_pkthdr.rcvif = V_loif;
7973		}
7974
7975		if (pf_pdesc_to_dnflow(pd, r, s, &dnflow)) {
7976			pd->pf_mtag->flags |= PF_MTAG_FLAG_DUMMYNET;
7977			pd->pf_mtag->flags |= PF_MTAG_FLAG_DUMMYNETED;
7978			ip_dn_io_ptr(m0, &dnflow);
7979			if (*m0 != NULL) {
7980				pd->pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
7981				pf_dummynet_flag_remove(*m0, pd->pf_mtag);
7982			}
7983		}
7984	}
7985
7986	return (0);
7987}
7988
7989#ifdef INET
7990int
7991pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
7992    struct inpcb *inp, struct pf_rule_actions *default_actions)
7993{
7994	struct pfi_kkif		*kif;
7995	u_short			 action, reason = 0;
7996	struct mbuf		*m = *m0;
7997	struct ip		*h = NULL;
7998	struct m_tag		*mtag;
7999	struct pf_krule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
8000	struct pf_kstate	*s = NULL;
8001	struct pf_kruleset	*ruleset = NULL;
8002	struct pf_pdesc		 pd;
8003	int			 off, dirndx, use_2nd_queue = 0;
8004	uint16_t		 tag;
8005	uint8_t			 rt;
8006
8007	PF_RULES_RLOCK_TRACKER;
8008	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
8009	M_ASSERTPKTHDR(m);
8010
8011	if (!V_pf_status.running)
8012		return (PF_PASS);
8013
8014	PF_RULES_RLOCK();
8015
8016	kif = (struct pfi_kkif *)ifp->if_pf_kif;
8017
8018	if (__predict_false(kif == NULL)) {
8019		DPFPRINTF(PF_DEBUG_URGENT,
8020		    ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
8021		PF_RULES_RUNLOCK();
8022		return (PF_DROP);
8023	}
8024	if (kif->pfik_flags & PFI_IFLAG_SKIP) {
8025		PF_RULES_RUNLOCK();
8026		return (PF_PASS);
8027	}
8028
8029	if (m->m_flags & M_SKIP_FIREWALL) {
8030		PF_RULES_RUNLOCK();
8031		return (PF_PASS);
8032	}
8033
8034	memset(&pd, 0, sizeof(pd));
8035	TAILQ_INIT(&pd.sctp_multihome_jobs);
8036	if (default_actions != NULL)
8037		memcpy(&pd.act, default_actions, sizeof(pd.act));
8038	pd.pf_mtag = pf_find_mtag(m);
8039
8040	if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_MTAG_FLAG_ROUTE_TO)) {
8041		pd.pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
8042
8043		ifp = ifnet_byindexgen(pd.pf_mtag->if_index,
8044		    pd.pf_mtag->if_idxgen);
8045		if (ifp == NULL || ifp->if_flags & IFF_DYING) {
8046			PF_RULES_RUNLOCK();
8047			m_freem(*m0);
8048			*m0 = NULL;
8049			return (PF_PASS);
8050		}
8051		PF_RULES_RUNLOCK();
8052		(ifp->if_output)(ifp, m, sintosa(&pd.pf_mtag->dst), NULL);
8053		*m0 = NULL;
8054		return (PF_PASS);
8055	}
8056
8057	if (pd.pf_mtag && pd.pf_mtag->dnpipe) {
8058		pd.act.dnpipe = pd.pf_mtag->dnpipe;
8059		pd.act.flags = pd.pf_mtag->dnflags;
8060	}
8061
8062	if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL &&
8063	    pd.pf_mtag->flags & PF_MTAG_FLAG_DUMMYNET) {
8064		/* Dummynet re-injects packets after they've
8065		 * completed their delay. We've already
8066		 * processed them, so pass unconditionally. */
8067
8068		/* But only once. We may see the packet multiple times (e.g.
8069		 * PFIL_IN/PFIL_OUT). */
8070		pf_dummynet_flag_remove(m, pd.pf_mtag);
8071		PF_RULES_RUNLOCK();
8072
8073		return (PF_PASS);
8074	}
8075
8076	pd.sport = pd.dport = NULL;
8077	pd.proto_sum = NULL;
8078	pd.dir = dir;
8079	pd.sidx = (dir == PF_IN) ? 0 : 1;
8080	pd.didx = (dir == PF_IN) ? 1 : 0;
8081	pd.af = AF_INET;
8082	pd.act.rtableid = -1;
8083
8084	h = mtod(m, struct ip *);
8085	off = h->ip_hl << 2;
8086
8087	if (__predict_false(ip_divert_ptr != NULL) &&
8088	    ((mtag = m_tag_locate(m, MTAG_PF_DIVERT, 0, NULL)) != NULL)) {
8089		struct pf_divert_mtag *dt = (struct pf_divert_mtag *)(mtag+1);
8090		if ((dt->idir == PF_DIVERT_MTAG_DIR_IN && dir == PF_IN) ||
8091		    (dt->idir == PF_DIVERT_MTAG_DIR_OUT && dir == PF_OUT)) {
8092			if (pd.pf_mtag == NULL &&
8093			    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
8094				action = PF_DROP;
8095				goto done;
8096			}
8097			pd.pf_mtag->flags |= PF_MTAG_FLAG_PACKET_LOOPED;
8098		}
8099		if (pd.pf_mtag && pd.pf_mtag->flags & PF_MTAG_FLAG_FASTFWD_OURS_PRESENT) {
8100			m->m_flags |= M_FASTFWD_OURS;
8101			pd.pf_mtag->flags &= ~PF_MTAG_FLAG_FASTFWD_OURS_PRESENT;
8102		}
8103		m_tag_delete(m, mtag);
8104
8105		mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
8106		if (mtag != NULL)
8107			m_tag_delete(m, mtag);
8108	} else if (pf_normalize_ip(m0, kif, &reason, &pd) != PF_PASS) {
8109		/* We do IP header normalization and packet reassembly here */
8110		action = PF_DROP;
8111		goto done;
8112	}
8113	m = *m0;	/* pf_normalize messes with m0 */
8114	h = mtod(m, struct ip *);
8115
8116	off = h->ip_hl << 2;
8117	if (off < (int)sizeof(struct ip)) {
8118		action = PF_DROP;
8119		REASON_SET(&reason, PFRES_SHORT);
8120		pd.act.log = PF_LOG_FORCE;
8121		goto done;
8122	}
8123
8124	pd.src = (struct pf_addr *)&h->ip_src;
8125	pd.dst = (struct pf_addr *)&h->ip_dst;
8126	pd.ip_sum = &h->ip_sum;
8127	pd.proto = h->ip_p;
8128	pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
8129	pd.tot_len = ntohs(h->ip_len);
8130
8131	/* handle fragments that didn't get reassembled by normalization */
8132	if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
8133		action = pf_test_fragment(&r, kif, m, h, &pd, &a, &ruleset);
8134		goto done;
8135	}
8136
8137	switch (h->ip_p) {
8138	case IPPROTO_TCP: {
8139		if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
8140		    &action, &reason, AF_INET)) {
8141			if (action != PF_PASS)
8142				pd.act.log = PF_LOG_FORCE;
8143			goto done;
8144		}
8145		pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
8146
8147		pd.sport = &pd.hdr.tcp.th_sport;
8148		pd.dport = &pd.hdr.tcp.th_dport;
8149
8150		/* Respond to SYN with a syncookie. */
8151		if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
8152		    pd.dir == PF_IN && pf_synflood_check(&pd)) {
8153			pf_syncookie_send(m, off, &pd);
8154			action = PF_DROP;
8155			break;
8156		}
8157
8158		if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0)
8159			use_2nd_queue = 1;
8160		action = pf_normalize_tcp(kif, m, 0, off, h, &pd);
8161		if (action == PF_DROP)
8162			goto done;
8163		action = pf_test_state_tcp(&s, kif, m, off, h, &pd, &reason);
8164		if (action == PF_PASS) {
8165			if (V_pfsync_update_state_ptr != NULL)
8166				V_pfsync_update_state_ptr(s);
8167			r = s->rule.ptr;
8168			a = s->anchor.ptr;
8169		} else if (s == NULL) {
8170			/* Validate remote SYN|ACK, re-create original SYN if
8171			 * valid. */
8172			if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) ==
8173			    TH_ACK && pf_syncookie_validate(&pd) &&
8174			    pd.dir == PF_IN) {
8175				struct mbuf *msyn;
8176
8177				msyn = pf_syncookie_recreate_syn(h->ip_ttl, off,
8178				    &pd);
8179				if (msyn == NULL) {
8180					action = PF_DROP;
8181					break;
8182				}
8183
8184				action = pf_test(dir, pflags, ifp, &msyn, inp,
8185				    &pd.act);
8186				m_freem(msyn);
8187				if (action != PF_PASS)
8188					break;
8189
8190				action = pf_test_state_tcp(&s, kif, m, off, h,
8191				    &pd, &reason);
8192				if (action != PF_PASS || s == NULL) {
8193					action = PF_DROP;
8194					break;
8195				}
8196
8197				s->src.seqhi = ntohl(pd.hdr.tcp.th_ack) - 1;
8198				s->src.seqlo = ntohl(pd.hdr.tcp.th_seq) - 1;
8199				pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_DST);
8200				action = pf_synproxy(&pd, &s, &reason);
8201				break;
8202			} else {
8203				action = pf_test_rule(&r, &s, kif, m, off, &pd,
8204				    &a, &ruleset, inp);
8205			}
8206		}
8207		break;
8208	}
8209
8210	case IPPROTO_UDP: {
8211		if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
8212		    &action, &reason, AF_INET)) {
8213			if (action != PF_PASS)
8214				pd.act.log = PF_LOG_FORCE;
8215			goto done;
8216		}
8217		pd.sport = &pd.hdr.udp.uh_sport;
8218		pd.dport = &pd.hdr.udp.uh_dport;
8219		if (pd.hdr.udp.uh_dport == 0 ||
8220		    ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
8221		    ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
8222			action = PF_DROP;
8223			REASON_SET(&reason, PFRES_SHORT);
8224			goto done;
8225		}
8226		action = pf_test_state_udp(&s, kif, m, off, h, &pd);
8227		if (action == PF_PASS) {
8228			if (V_pfsync_update_state_ptr != NULL)
8229				V_pfsync_update_state_ptr(s);
8230			r = s->rule.ptr;
8231			a = s->anchor.ptr;
8232		} else if (s == NULL)
8233			action = pf_test_rule(&r, &s, kif, m, off, &pd,
8234			    &a, &ruleset, inp);
8235		break;
8236	}
8237
8238	case IPPROTO_SCTP: {
8239		if (!pf_pull_hdr(m, off, &pd.hdr.sctp, sizeof(pd.hdr.sctp),
8240		    &action, &reason, AF_INET)) {
8241			if (action != PF_PASS)
8242				pd.act.log |= PF_LOG_FORCE;
8243			goto done;
8244		}
8245		pd.p_len = pd.tot_len - off;
8246
8247		pd.sport = &pd.hdr.sctp.src_port;
8248		pd.dport = &pd.hdr.sctp.dest_port;
8249		if (pd.hdr.sctp.src_port == 0 || pd.hdr.sctp.dest_port == 0) {
8250			action = PF_DROP;
8251			REASON_SET(&reason, PFRES_SHORT);
8252			goto done;
8253		}
8254		action = pf_normalize_sctp(dir, kif, m, 0, off, h, &pd);
8255		if (action == PF_DROP)
8256			goto done;
8257		action = pf_test_state_sctp(&s, kif, m, off, h, &pd,
8258		    &reason);
8259		if (action == PF_PASS) {
8260			if (V_pfsync_update_state_ptr != NULL)
8261				V_pfsync_update_state_ptr(s);
8262			r = s->rule.ptr;
8263			a = s->anchor.ptr;
8264		} else {
8265			action = pf_test_rule(&r, &s, kif, m, off,
8266			    &pd, &a, &ruleset, inp);
8267		}
8268		break;
8269	}
8270
8271	case IPPROTO_ICMP: {
8272		if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN,
8273		    &action, &reason, AF_INET)) {
8274			if (action != PF_PASS)
8275				pd.act.log = PF_LOG_FORCE;
8276			goto done;
8277		}
8278		action = pf_test_state_icmp(&s, kif, m, off, h, &pd, &reason);
8279		if (action == PF_PASS) {
8280			if (V_pfsync_update_state_ptr != NULL)
8281				V_pfsync_update_state_ptr(s);
8282			r = s->rule.ptr;
8283			a = s->anchor.ptr;
8284		} else if (s == NULL)
8285			action = pf_test_rule(&r, &s, kif, m, off, &pd,
8286			    &a, &ruleset, inp);
8287		break;
8288	}
8289
8290#ifdef INET6
8291	case IPPROTO_ICMPV6: {
8292		action = PF_DROP;
8293		DPFPRINTF(PF_DEBUG_MISC,
8294		    ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
8295		goto done;
8296	}
8297#endif
8298
8299	default:
8300		action = pf_test_state_other(&s, kif, m, &pd);
8301		if (action == PF_PASS) {
8302			if (V_pfsync_update_state_ptr != NULL)
8303				V_pfsync_update_state_ptr(s);
8304			r = s->rule.ptr;
8305			a = s->anchor.ptr;
8306		} else if (s == NULL)
8307			action = pf_test_rule(&r, &s, kif, m, off, &pd,
8308			    &a, &ruleset, inp);
8309		break;
8310	}
8311
8312done:
8313	PF_RULES_RUNLOCK();
8314	if (action == PF_PASS && h->ip_hl > 5 &&
8315	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
8316		action = PF_DROP;
8317		REASON_SET(&reason, PFRES_IPOPTIONS);
8318		pd.act.log = PF_LOG_FORCE;
8319		DPFPRINTF(PF_DEBUG_MISC,
8320		    ("pf: dropping packet with ip options\n"));
8321	}
8322
8323	if (s) {
8324		uint8_t log = pd.act.log;
8325		memcpy(&pd.act, &s->act, sizeof(struct pf_rule_actions));
8326		pd.act.log |= log;
8327		tag = s->tag;
8328		rt = s->rt;
8329	} else {
8330		tag = r->tag;
8331		rt = r->rt;
8332	}
8333
8334	if (tag > 0 && pf_tag_packet(m, &pd, tag)) {
8335		action = PF_DROP;
8336		REASON_SET(&reason, PFRES_MEMORY);
8337	}
8338
8339	pf_scrub_ip(&m, &pd);
8340	if (pd.proto == IPPROTO_TCP && pd.act.max_mss)
8341		pf_normalize_mss(m, off, &pd);
8342
8343	if (pd.act.rtableid >= 0)
8344		M_SETFIB(m, pd.act.rtableid);
8345
8346	if (pd.act.flags & PFSTATE_SETPRIO) {
8347		if (pd.tos & IPTOS_LOWDELAY)
8348			use_2nd_queue = 1;
8349		if (vlan_set_pcp(m, pd.act.set_prio[use_2nd_queue])) {
8350			action = PF_DROP;
8351			REASON_SET(&reason, PFRES_MEMORY);
8352			pd.act.log = PF_LOG_FORCE;
8353			DPFPRINTF(PF_DEBUG_MISC,
8354			    ("pf: failed to allocate 802.1q mtag\n"));
8355		}
8356	}
8357
8358#ifdef ALTQ
8359	if (action == PF_PASS && pd.act.qid) {
8360		if (pd.pf_mtag == NULL &&
8361		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
8362			action = PF_DROP;
8363			REASON_SET(&reason, PFRES_MEMORY);
8364		} else {
8365			if (s != NULL)
8366				pd.pf_mtag->qid_hash = pf_state_hash(s);
8367			if (use_2nd_queue || (pd.tos & IPTOS_LOWDELAY))
8368				pd.pf_mtag->qid = pd.act.pqid;
8369			else
8370				pd.pf_mtag->qid = pd.act.qid;
8371			/* Add hints for ecn. */
8372			pd.pf_mtag->hdr = h;
8373		}
8374	}
8375#endif /* ALTQ */
8376
8377	/*
8378	 * connections redirected to loopback should not match sockets
8379	 * bound specifically to loopback due to security implications,
8380	 * see tcp_input() and in_pcblookup_listen().
8381	 */
8382	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
8383	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
8384	    (s->nat_rule.ptr->action == PF_RDR ||
8385	    s->nat_rule.ptr->action == PF_BINAT) &&
8386	    IN_LOOPBACK(ntohl(pd.dst->v4.s_addr)))
8387		m->m_flags |= M_SKIP_FIREWALL;
8388
8389	if (__predict_false(ip_divert_ptr != NULL) && action == PF_PASS &&
8390	    r->divert.port && !PACKET_LOOPED(&pd)) {
8391		mtag = m_tag_alloc(MTAG_PF_DIVERT, 0,
8392		    sizeof(struct pf_divert_mtag), M_NOWAIT | M_ZERO);
8393		if (mtag != NULL) {
8394			((struct pf_divert_mtag *)(mtag+1))->port =
8395			    ntohs(r->divert.port);
8396			((struct pf_divert_mtag *)(mtag+1))->idir =
8397			    (dir == PF_IN) ? PF_DIVERT_MTAG_DIR_IN :
8398			    PF_DIVERT_MTAG_DIR_OUT;
8399
8400			if (s)
8401				PF_STATE_UNLOCK(s);
8402
8403			m_tag_prepend(m, mtag);
8404			if (m->m_flags & M_FASTFWD_OURS) {
8405				if (pd.pf_mtag == NULL &&
8406				    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
8407					action = PF_DROP;
8408					REASON_SET(&reason, PFRES_MEMORY);
8409					pd.act.log = PF_LOG_FORCE;
8410					DPFPRINTF(PF_DEBUG_MISC,
8411					    ("pf: failed to allocate tag\n"));
8412				} else {
8413					pd.pf_mtag->flags |=
8414					    PF_MTAG_FLAG_FASTFWD_OURS_PRESENT;
8415					m->m_flags &= ~M_FASTFWD_OURS;
8416				}
8417			}
8418			ip_divert_ptr(*m0, dir == PF_IN);
8419			*m0 = NULL;
8420
8421			return (action);
8422		} else {
8423			/* XXX: ipfw has the same behaviour! */
8424			action = PF_DROP;
8425			REASON_SET(&reason, PFRES_MEMORY);
8426			pd.act.log = PF_LOG_FORCE;
8427			DPFPRINTF(PF_DEBUG_MISC,
8428			    ("pf: failed to allocate divert tag\n"));
8429		}
8430	}
8431	/* this flag will need revising if the pkt is forwarded */
8432	if (pd.pf_mtag)
8433		pd.pf_mtag->flags &= ~PF_MTAG_FLAG_PACKET_LOOPED;
8434
8435	if (pd.act.log) {
8436		struct pf_krule		*lr;
8437		struct pf_krule_item	*ri;
8438
8439		if (s != NULL && s->nat_rule.ptr != NULL &&
8440		    s->nat_rule.ptr->log & PF_LOG_ALL)
8441			lr = s->nat_rule.ptr;
8442		else
8443			lr = r;
8444
8445		if (pd.act.log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
8446			PFLOG_PACKET(kif, m, AF_INET, action, reason, lr, a,
8447			    ruleset, &pd, (s == NULL));
8448		if (s) {
8449			SLIST_FOREACH(ri, &s->match_rules, entry)
8450				if (ri->r->log & PF_LOG_ALL)
8451					PFLOG_PACKET(kif, m, AF_INET, action,
8452					    reason, ri->r, a, ruleset, &pd, 0);
8453		}
8454	}
8455
8456	pf_counter_u64_critical_enter();
8457	pf_counter_u64_add_protected(&kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS],
8458	    pd.tot_len);
8459	pf_counter_u64_add_protected(&kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS],
8460	    1);
8461
8462	if (action == PF_PASS || r->action == PF_DROP) {
8463		dirndx = (dir == PF_OUT);
8464		pf_counter_u64_add_protected(&r->packets[dirndx], 1);
8465		pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len);
8466		pf_update_timestamp(r);
8467
8468		if (a != NULL) {
8469			pf_counter_u64_add_protected(&a->packets[dirndx], 1);
8470			pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
8471		}
8472		if (s != NULL) {
8473			struct pf_krule_item	*ri;
8474
8475			if (s->nat_rule.ptr != NULL) {
8476				pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
8477				    1);
8478				pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx],
8479				    pd.tot_len);
8480			}
8481			if (s->src_node != NULL) {
8482				counter_u64_add(s->src_node->packets[dirndx],
8483				    1);
8484				counter_u64_add(s->src_node->bytes[dirndx],
8485				    pd.tot_len);
8486			}
8487			if (s->nat_src_node != NULL) {
8488				counter_u64_add(s->nat_src_node->packets[dirndx],
8489				    1);
8490				counter_u64_add(s->nat_src_node->bytes[dirndx],
8491				    pd.tot_len);
8492			}
8493			dirndx = (dir == s->direction) ? 0 : 1;
8494			s->packets[dirndx]++;
8495			s->bytes[dirndx] += pd.tot_len;
8496			SLIST_FOREACH(ri, &s->match_rules, entry) {
8497				pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1);
8498				pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd.tot_len);
8499			}
8500		}
8501		tr = r;
8502		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
8503		if (nr != NULL && r == &V_pf_default_rule)
8504			tr = nr;
8505		if (tr->src.addr.type == PF_ADDR_TABLE)
8506			pfr_update_stats(tr->src.addr.p.tbl,
8507			    (s == NULL) ? pd.src :
8508			    &s->key[(s->direction == PF_IN)]->
8509				addr[(s->direction == PF_OUT)],
8510			    pd.af, pd.tot_len, dir == PF_OUT,
8511			    r->action == PF_PASS, tr->src.neg);
8512		if (tr->dst.addr.type == PF_ADDR_TABLE)
8513			pfr_update_stats(tr->dst.addr.p.tbl,
8514			    (s == NULL) ? pd.dst :
8515			    &s->key[(s->direction == PF_IN)]->
8516				addr[(s->direction == PF_IN)],
8517			    pd.af, pd.tot_len, dir == PF_OUT,
8518			    r->action == PF_PASS, tr->dst.neg);
8519	}
8520	pf_counter_u64_critical_exit();
8521
8522	switch (action) {
8523	case PF_SYNPROXY_DROP:
8524		m_freem(*m0);
8525	case PF_DEFER:
8526		*m0 = NULL;
8527		action = PF_PASS;
8528		break;
8529	case PF_DROP:
8530		m_freem(*m0);
8531		*m0 = NULL;
8532		break;
8533	default:
8534		/* pf_route() returns unlocked. */
8535		if (rt) {
8536			pf_route(m0, r, kif->pfik_ifp, s, &pd, inp);
8537			goto out;
8538		}
8539		if (pf_dummynet(&pd, s, r, m0) != 0) {
8540			action = PF_DROP;
8541			REASON_SET(&reason, PFRES_MEMORY);
8542		}
8543		break;
8544	}
8545
8546	SDT_PROBE4(pf, ip, test, done, action, reason, r, s);
8547
8548	if (s && action != PF_DROP) {
8549		if (!s->if_index_in && dir == PF_IN)
8550			s->if_index_in = ifp->if_index;
8551		else if (!s->if_index_out && dir == PF_OUT)
8552			s->if_index_out = ifp->if_index;
8553	}
8554
8555	if (s)
8556		PF_STATE_UNLOCK(s);
8557
8558out:
8559	pf_sctp_multihome_delayed(&pd, off, kif, s, action);
8560
8561	return (action);
8562}
8563#endif /* INET */
8564
8565#ifdef INET6
8566int
8567pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp,
8568    struct pf_rule_actions *default_actions)
8569{
8570	struct pfi_kkif		*kif;
8571	u_short			 action, reason = 0;
8572	struct mbuf		*m = *m0, *n = NULL;
8573	struct m_tag		*mtag;
8574	struct ip6_hdr		*h = NULL;
8575	struct pf_krule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
8576	struct pf_kstate	*s = NULL;
8577	struct pf_kruleset	*ruleset = NULL;
8578	struct pf_pdesc		 pd;
8579	int			 off, terminal = 0, dirndx, rh_cnt = 0, use_2nd_queue = 0;
8580	uint16_t		 tag;
8581	uint8_t			 rt;
8582
8583	PF_RULES_RLOCK_TRACKER;
8584	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
8585	M_ASSERTPKTHDR(m);
8586
8587	if (!V_pf_status.running)
8588		return (PF_PASS);
8589
8590	PF_RULES_RLOCK();
8591
8592	kif = (struct pfi_kkif *)ifp->if_pf_kif;
8593	if (__predict_false(kif == NULL)) {
8594		DPFPRINTF(PF_DEBUG_URGENT,
8595		    ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
8596		PF_RULES_RUNLOCK();
8597		return (PF_DROP);
8598	}
8599	if (kif->pfik_flags & PFI_IFLAG_SKIP) {
8600		PF_RULES_RUNLOCK();
8601		return (PF_PASS);
8602	}
8603
8604	if (m->m_flags & M_SKIP_FIREWALL) {
8605		PF_RULES_RUNLOCK();
8606		return (PF_PASS);
8607	}
8608
8609	/*
8610	 * If we end up changing IP addresses (e.g. binat) the stack may get
8611	 * confused and fail to send the icmp6 packet too big error. Just send
8612	 * it here, before we do any NAT.
8613	 */
8614	if (dir == PF_OUT && pflags & PFIL_FWD && IN6_LINKMTU(ifp) < pf_max_frag_size(m)) {
8615		PF_RULES_RUNLOCK();
8616		*m0 = NULL;
8617		icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0, IN6_LINKMTU(ifp));
8618		return (PF_DROP);
8619	}
8620
8621	memset(&pd, 0, sizeof(pd));
8622	TAILQ_INIT(&pd.sctp_multihome_jobs);
8623	if (default_actions != NULL)
8624		memcpy(&pd.act, default_actions, sizeof(pd.act));
8625	pd.pf_mtag = pf_find_mtag(m);
8626
8627	if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_MTAG_FLAG_ROUTE_TO)) {
8628		pd.pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
8629
8630		ifp = ifnet_byindexgen(pd.pf_mtag->if_index,
8631		    pd.pf_mtag->if_idxgen);
8632		if (ifp == NULL || ifp->if_flags & IFF_DYING) {
8633			PF_RULES_RUNLOCK();
8634			m_freem(*m0);
8635			*m0 = NULL;
8636			return (PF_PASS);
8637		}
8638		PF_RULES_RUNLOCK();
8639		nd6_output_ifp(ifp, ifp, m,
8640                    (struct sockaddr_in6 *)&pd.pf_mtag->dst, NULL);
8641		*m0 = NULL;
8642		return (PF_PASS);
8643	}
8644
8645	if (pd.pf_mtag && pd.pf_mtag->dnpipe) {
8646		pd.act.dnpipe = pd.pf_mtag->dnpipe;
8647		pd.act.flags = pd.pf_mtag->dnflags;
8648	}
8649
8650	if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL &&
8651	    pd.pf_mtag->flags & PF_MTAG_FLAG_DUMMYNET) {
8652		pf_dummynet_flag_remove(m, pd.pf_mtag);
8653		/* Dummynet re-injects packets after they've
8654		 * completed their delay. We've already
8655		 * processed them, so pass unconditionally. */
8656		PF_RULES_RUNLOCK();
8657		return (PF_PASS);
8658	}
8659
8660	pd.sport = pd.dport = NULL;
8661	pd.ip_sum = NULL;
8662	pd.proto_sum = NULL;
8663	pd.dir = dir;
8664	pd.sidx = (dir == PF_IN) ? 0 : 1;
8665	pd.didx = (dir == PF_IN) ? 1 : 0;
8666	pd.af = AF_INET6;
8667	pd.act.rtableid = -1;
8668
8669	h = mtod(m, struct ip6_hdr *);
8670	off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
8671
8672	/* We do IP header normalization and packet reassembly here */
8673	if (pf_normalize_ip6(m0, kif, &reason, &pd) != PF_PASS) {
8674		action = PF_DROP;
8675		goto done;
8676	}
8677	m = *m0;	/* pf_normalize messes with m0 */
8678	h = mtod(m, struct ip6_hdr *);
8679	off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
8680
8681	/*
8682	 * we do not support jumbogram.  if we keep going, zero ip6_plen
8683	 * will do something bad, so drop the packet for now.
8684	 */
8685	if (htons(h->ip6_plen) == 0) {
8686		action = PF_DROP;
8687		REASON_SET(&reason, PFRES_NORM);	/*XXX*/
8688		goto done;
8689	}
8690
8691	pd.src = (struct pf_addr *)&h->ip6_src;
8692	pd.dst = (struct pf_addr *)&h->ip6_dst;
8693	pd.tos = IPV6_DSCP(h);
8694	pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
8695
8696	pd.proto = h->ip6_nxt;
8697	do {
8698		switch (pd.proto) {
8699		case IPPROTO_FRAGMENT:
8700			action = pf_test_fragment(&r, kif, m, h, &pd, &a,
8701			    &ruleset);
8702			if (action == PF_DROP)
8703				REASON_SET(&reason, PFRES_FRAG);
8704			goto done;
8705		case IPPROTO_ROUTING: {
8706			struct ip6_rthdr rthdr;
8707
8708			if (rh_cnt++) {
8709				DPFPRINTF(PF_DEBUG_MISC,
8710				    ("pf: IPv6 more than one rthdr\n"));
8711				action = PF_DROP;
8712				REASON_SET(&reason, PFRES_IPOPTIONS);
8713				pd.act.log = PF_LOG_FORCE;
8714				goto done;
8715			}
8716			if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
8717			    &reason, pd.af)) {
8718				DPFPRINTF(PF_DEBUG_MISC,
8719				    ("pf: IPv6 short rthdr\n"));
8720				action = PF_DROP;
8721				REASON_SET(&reason, PFRES_SHORT);
8722				pd.act.log = PF_LOG_FORCE;
8723				goto done;
8724			}
8725			if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
8726				DPFPRINTF(PF_DEBUG_MISC,
8727				    ("pf: IPv6 rthdr0\n"));
8728				action = PF_DROP;
8729				REASON_SET(&reason, PFRES_IPOPTIONS);
8730				pd.act.log = PF_LOG_FORCE;
8731				goto done;
8732			}
8733			/* FALLTHROUGH */
8734		}
8735		case IPPROTO_AH:
8736		case IPPROTO_HOPOPTS:
8737		case IPPROTO_DSTOPTS: {
8738			/* get next header and header length */
8739			struct ip6_ext	opt6;
8740
8741			if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
8742			    NULL, &reason, pd.af)) {
8743				DPFPRINTF(PF_DEBUG_MISC,
8744				    ("pf: IPv6 short opt\n"));
8745				action = PF_DROP;
8746				pd.act.log = PF_LOG_FORCE;
8747				goto done;
8748			}
8749			if (pd.proto == IPPROTO_AH)
8750				off += (opt6.ip6e_len + 2) * 4;
8751			else
8752				off += (opt6.ip6e_len + 1) * 8;
8753			pd.proto = opt6.ip6e_nxt;
8754			/* goto the next header */
8755			break;
8756		}
8757		default:
8758			terminal++;
8759			break;
8760		}
8761	} while (!terminal);
8762
8763	/* if there's no routing header, use unmodified mbuf for checksumming */
8764	if (!n)
8765		n = m;
8766
8767	switch (pd.proto) {
8768	case IPPROTO_TCP: {
8769		if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
8770		    &action, &reason, AF_INET6)) {
8771			if (action != PF_PASS)
8772				pd.act.log |= PF_LOG_FORCE;
8773			goto done;
8774		}
8775		pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
8776		pd.sport = &pd.hdr.tcp.th_sport;
8777		pd.dport = &pd.hdr.tcp.th_dport;
8778
8779		/* Respond to SYN with a syncookie. */
8780		if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
8781		    pd.dir == PF_IN && pf_synflood_check(&pd)) {
8782			pf_syncookie_send(m, off, &pd);
8783			action = PF_DROP;
8784			break;
8785		}
8786
8787		action = pf_normalize_tcp(kif, m, 0, off, h, &pd);
8788		if (action == PF_DROP)
8789			goto done;
8790		action = pf_test_state_tcp(&s, kif, m, off, h, &pd, &reason);
8791		if (action == PF_PASS) {
8792			if (V_pfsync_update_state_ptr != NULL)
8793				V_pfsync_update_state_ptr(s);
8794			r = s->rule.ptr;
8795			a = s->anchor.ptr;
8796		} else if (s == NULL) {
8797			/* Validate remote SYN|ACK, re-create original SYN if
8798			 * valid. */
8799			if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) ==
8800			    TH_ACK && pf_syncookie_validate(&pd) &&
8801			    pd.dir == PF_IN) {
8802				struct mbuf *msyn;
8803
8804				msyn = pf_syncookie_recreate_syn(h->ip6_hlim,
8805				    off, &pd);
8806				if (msyn == NULL) {
8807					action = PF_DROP;
8808					break;
8809				}
8810
8811				action = pf_test6(dir, pflags, ifp, &msyn, inp,
8812				    &pd.act);
8813				m_freem(msyn);
8814				if (action != PF_PASS)
8815					break;
8816
8817				action = pf_test_state_tcp(&s, kif, m, off, h,
8818				    &pd, &reason);
8819				if (action != PF_PASS || s == NULL) {
8820					action = PF_DROP;
8821					break;
8822				}
8823
8824				s->src.seqhi = ntohl(pd.hdr.tcp.th_ack) - 1;
8825				s->src.seqlo = ntohl(pd.hdr.tcp.th_seq) - 1;
8826				pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_DST);
8827
8828				action = pf_synproxy(&pd, &s, &reason);
8829				break;
8830			} else {
8831				action = pf_test_rule(&r, &s, kif, m, off, &pd,
8832				    &a, &ruleset, inp);
8833			}
8834		}
8835		break;
8836	}
8837
8838	case IPPROTO_UDP: {
8839		if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
8840		    &action, &reason, AF_INET6)) {
8841			if (action != PF_PASS)
8842				pd.act.log |= PF_LOG_FORCE;
8843			goto done;
8844		}
8845		pd.sport = &pd.hdr.udp.uh_sport;
8846		pd.dport = &pd.hdr.udp.uh_dport;
8847		if (pd.hdr.udp.uh_dport == 0 ||
8848		    ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
8849		    ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
8850			action = PF_DROP;
8851			REASON_SET(&reason, PFRES_SHORT);
8852			goto done;
8853		}
8854		action = pf_test_state_udp(&s, kif, m, off, h, &pd);
8855		if (action == PF_PASS) {
8856			if (V_pfsync_update_state_ptr != NULL)
8857				V_pfsync_update_state_ptr(s);
8858			r = s->rule.ptr;
8859			a = s->anchor.ptr;
8860		} else if (s == NULL)
8861			action = pf_test_rule(&r, &s, kif, m, off, &pd,
8862			    &a, &ruleset, inp);
8863		break;
8864	}
8865
8866	case IPPROTO_SCTP: {
8867		if (!pf_pull_hdr(m, off, &pd.hdr.sctp, sizeof(pd.hdr.sctp),
8868		    &action, &reason, AF_INET6)) {
8869			if (action != PF_PASS)
8870				pd.act.log |= PF_LOG_FORCE;
8871			goto done;
8872		}
8873		pd.sport = &pd.hdr.sctp.src_port;
8874		pd.dport = &pd.hdr.sctp.dest_port;
8875		if (pd.hdr.sctp.src_port == 0 || pd.hdr.sctp.dest_port == 0) {
8876			action = PF_DROP;
8877			REASON_SET(&reason, PFRES_SHORT);
8878			goto done;
8879		}
8880		action = pf_normalize_sctp(dir, kif, m, 0, off, h, &pd);
8881		if (action == PF_DROP)
8882			goto done;
8883		action = pf_test_state_sctp(&s, kif, m, off, h, &pd,
8884		    &reason);
8885		if (action == PF_PASS) {
8886			if (V_pfsync_update_state_ptr != NULL)
8887				V_pfsync_update_state_ptr(s);
8888			r = s->rule.ptr;
8889			a = s->anchor.ptr;
8890		} else {
8891			action = pf_test_rule(&r, &s, kif, m, off,
8892			    &pd, &a, &ruleset, inp);
8893		}
8894		break;
8895	}
8896
8897	case IPPROTO_ICMP: {
8898		action = PF_DROP;
8899		DPFPRINTF(PF_DEBUG_MISC,
8900		    ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
8901		goto done;
8902	}
8903
8904	case IPPROTO_ICMPV6: {
8905		if (!pf_pull_hdr(m, off, &pd.hdr.icmp6, sizeof(pd.hdr.icmp6),
8906		    &action, &reason, AF_INET6)) {
8907			if (action != PF_PASS)
8908				pd.act.log |= PF_LOG_FORCE;
8909			goto done;
8910		}
8911		action = pf_test_state_icmp(&s, kif, m, off, h, &pd, &reason);
8912		if (action == PF_PASS) {
8913			if (V_pfsync_update_state_ptr != NULL)
8914				V_pfsync_update_state_ptr(s);
8915			r = s->rule.ptr;
8916			a = s->anchor.ptr;
8917		} else if (s == NULL)
8918			action = pf_test_rule(&r, &s, kif, m, off, &pd,
8919			    &a, &ruleset, inp);
8920		break;
8921	}
8922
8923	default:
8924		action = pf_test_state_other(&s, kif, m, &pd);
8925		if (action == PF_PASS) {
8926			if (V_pfsync_update_state_ptr != NULL)
8927				V_pfsync_update_state_ptr(s);
8928			r = s->rule.ptr;
8929			a = s->anchor.ptr;
8930		} else if (s == NULL)
8931			action = pf_test_rule(&r, &s, kif, m, off, &pd,
8932			    &a, &ruleset, inp);
8933		break;
8934	}
8935
8936done:
8937	PF_RULES_RUNLOCK();
8938	if (n != m) {
8939		m_freem(n);
8940		n = NULL;
8941	}
8942
8943	/* handle dangerous IPv6 extension headers. */
8944	if (action == PF_PASS && rh_cnt &&
8945	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
8946		action = PF_DROP;
8947		REASON_SET(&reason, PFRES_IPOPTIONS);
8948		pd.act.log = r->log;
8949		DPFPRINTF(PF_DEBUG_MISC,
8950		    ("pf: dropping packet with dangerous v6 headers\n"));
8951	}
8952
8953	if (s) {
8954		uint8_t log = pd.act.log;
8955		memcpy(&pd.act, &s->act, sizeof(struct pf_rule_actions));
8956		pd.act.log |= log;
8957		tag = s->tag;
8958		rt = s->rt;
8959	} else {
8960		tag = r->tag;
8961		rt = r->rt;
8962	}
8963
8964	if (tag > 0 && pf_tag_packet(m, &pd, tag)) {
8965		action = PF_DROP;
8966		REASON_SET(&reason, PFRES_MEMORY);
8967	}
8968
8969	pf_scrub_ip6(&m, &pd);
8970	if (pd.proto == IPPROTO_TCP && pd.act.max_mss)
8971		pf_normalize_mss(m, off, &pd);
8972
8973	if (pd.act.rtableid >= 0)
8974		M_SETFIB(m, pd.act.rtableid);
8975
8976	if (pd.act.flags & PFSTATE_SETPRIO) {
8977		if (pd.tos & IPTOS_LOWDELAY)
8978			use_2nd_queue = 1;
8979		if (vlan_set_pcp(m, pd.act.set_prio[use_2nd_queue])) {
8980			action = PF_DROP;
8981			REASON_SET(&reason, PFRES_MEMORY);
8982			pd.act.log = PF_LOG_FORCE;
8983			DPFPRINTF(PF_DEBUG_MISC,
8984			    ("pf: failed to allocate 802.1q mtag\n"));
8985		}
8986	}
8987
8988#ifdef ALTQ
8989	if (action == PF_PASS && pd.act.qid) {
8990		if (pd.pf_mtag == NULL &&
8991		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
8992			action = PF_DROP;
8993			REASON_SET(&reason, PFRES_MEMORY);
8994		} else {
8995			if (s != NULL)
8996				pd.pf_mtag->qid_hash = pf_state_hash(s);
8997			if (pd.tos & IPTOS_LOWDELAY)
8998				pd.pf_mtag->qid = pd.act.pqid;
8999			else
9000				pd.pf_mtag->qid = pd.act.qid;
9001			/* Add hints for ecn. */
9002			pd.pf_mtag->hdr = h;
9003		}
9004	}
9005#endif /* ALTQ */
9006
9007	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
9008	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
9009	    (s->nat_rule.ptr->action == PF_RDR ||
9010	    s->nat_rule.ptr->action == PF_BINAT) &&
9011	    IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
9012		m->m_flags |= M_SKIP_FIREWALL;
9013
9014	/* XXX: Anybody working on it?! */
9015	if (r->divert.port)
9016		printf("pf: divert(9) is not supported for IPv6\n");
9017
9018	if (pd.act.log) {
9019		struct pf_krule		*lr;
9020		struct pf_krule_item	*ri;
9021
9022		if (s != NULL && s->nat_rule.ptr != NULL &&
9023		    s->nat_rule.ptr->log & PF_LOG_ALL)
9024			lr = s->nat_rule.ptr;
9025		else
9026			lr = r;
9027
9028		if (pd.act.log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
9029			PFLOG_PACKET(kif, m, AF_INET6, action, reason, lr, a, ruleset,
9030			    &pd, (s == NULL));
9031		if (s) {
9032			SLIST_FOREACH(ri, &s->match_rules, entry)
9033				if (ri->r->log & PF_LOG_ALL)
9034					PFLOG_PACKET(kif, m, AF_INET6, action, reason,
9035					    ri->r, a, ruleset, &pd, 0);
9036		}
9037	}
9038
9039	pf_counter_u64_critical_enter();
9040	pf_counter_u64_add_protected(&kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS],
9041	    pd.tot_len);
9042	pf_counter_u64_add_protected(&kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS],
9043	    1);
9044
9045	if (action == PF_PASS || r->action == PF_DROP) {
9046		dirndx = (dir == PF_OUT);
9047		pf_counter_u64_add_protected(&r->packets[dirndx], 1);
9048		pf_counter_u64_add_protected(&r->bytes[dirndx], pd.tot_len);
9049		if (a != NULL) {
9050			pf_counter_u64_add_protected(&a->packets[dirndx], 1);
9051			pf_counter_u64_add_protected(&a->bytes[dirndx], pd.tot_len);
9052		}
9053		if (s != NULL) {
9054			if (s->nat_rule.ptr != NULL) {
9055				pf_counter_u64_add_protected(&s->nat_rule.ptr->packets[dirndx],
9056				    1);
9057				pf_counter_u64_add_protected(&s->nat_rule.ptr->bytes[dirndx],
9058				    pd.tot_len);
9059			}
9060			if (s->src_node != NULL) {
9061				counter_u64_add(s->src_node->packets[dirndx],
9062				    1);
9063				counter_u64_add(s->src_node->bytes[dirndx],
9064				    pd.tot_len);
9065			}
9066			if (s->nat_src_node != NULL) {
9067				counter_u64_add(s->nat_src_node->packets[dirndx],
9068				    1);
9069				counter_u64_add(s->nat_src_node->bytes[dirndx],
9070				    pd.tot_len);
9071			}
9072			dirndx = (dir == s->direction) ? 0 : 1;
9073			s->packets[dirndx]++;
9074			s->bytes[dirndx] += pd.tot_len;
9075		}
9076		tr = r;
9077		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
9078		if (nr != NULL && r == &V_pf_default_rule)
9079			tr = nr;
9080		if (tr->src.addr.type == PF_ADDR_TABLE)
9081			pfr_update_stats(tr->src.addr.p.tbl,
9082			    (s == NULL) ? pd.src :
9083			    &s->key[(s->direction == PF_IN)]->addr[0],
9084			    pd.af, pd.tot_len, dir == PF_OUT,
9085			    r->action == PF_PASS, tr->src.neg);
9086		if (tr->dst.addr.type == PF_ADDR_TABLE)
9087			pfr_update_stats(tr->dst.addr.p.tbl,
9088			    (s == NULL) ? pd.dst :
9089			    &s->key[(s->direction == PF_IN)]->addr[1],
9090			    pd.af, pd.tot_len, dir == PF_OUT,
9091			    r->action == PF_PASS, tr->dst.neg);
9092	}
9093	pf_counter_u64_critical_exit();
9094
9095	switch (action) {
9096	case PF_SYNPROXY_DROP:
9097		m_freem(*m0);
9098	case PF_DEFER:
9099		*m0 = NULL;
9100		action = PF_PASS;
9101		break;
9102	case PF_DROP:
9103		m_freem(*m0);
9104		*m0 = NULL;
9105		break;
9106	default:
9107		/* pf_route6() returns unlocked. */
9108		if (rt) {
9109			pf_route6(m0, r, kif->pfik_ifp, s, &pd, inp);
9110			goto out;
9111		}
9112		if (pf_dummynet(&pd, s, r, m0) != 0) {
9113			action = PF_DROP;
9114			REASON_SET(&reason, PFRES_MEMORY);
9115		}
9116		break;
9117	}
9118
9119	if (s && action != PF_DROP) {
9120		if (!s->if_index_in && dir == PF_IN)
9121			s->if_index_in = ifp->if_index;
9122		else if (!s->if_index_out && dir == PF_OUT)
9123			s->if_index_out = ifp->if_index;
9124	}
9125
9126	if (s)
9127		PF_STATE_UNLOCK(s);
9128
9129	/* If reassembled packet passed, create new fragments. */
9130	if (action == PF_PASS && *m0 && dir == PF_OUT &&
9131	    (mtag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL)) != NULL)
9132		action = pf_refragment6(ifp, m0, mtag, pflags & PFIL_FWD);
9133
9134out:
9135	SDT_PROBE4(pf, ip, test6, done, action, reason, r, s);
9136
9137	pf_sctp_multihome_delayed(&pd, off, kif, s, action);
9138
9139	return (action);
9140}
9141#endif /* INET6 */
9142