pf.c revision 316641
1/*-
2 * Copyright (c) 2001 Daniel Hartmeier
3 * Copyright (c) 2002 - 2008 Henning Brauer
4 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *    - Redistributions of source code must retain the above copyright
12 *      notice, this list of conditions and the following disclaimer.
13 *    - Redistributions in binary form must reproduce the above
14 *      copyright notice, this list of conditions and the following
15 *      disclaimer in the documentation and/or other materials provided
16 *      with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
34 *
35 *	$OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: stable/10/sys/netpfil/pf/pf.c 316641 2017-04-08 09:49:21Z kp $");
40
41#include "opt_inet.h"
42#include "opt_inet6.h"
43#include "opt_bpf.h"
44#include "opt_pf.h"
45
46#include <sys/param.h>
47#include <sys/bus.h>
48#include <sys/endian.h>
49#include <sys/hash.h>
50#include <sys/interrupt.h>
51#include <sys/kernel.h>
52#include <sys/kthread.h>
53#include <sys/limits.h>
54#include <sys/mbuf.h>
55#include <sys/md5.h>
56#include <sys/random.h>
57#include <sys/refcount.h>
58#include <sys/socket.h>
59#include <sys/sysctl.h>
60#include <sys/taskqueue.h>
61#include <sys/ucred.h>
62
63#include <net/if.h>
64#include <net/if_types.h>
65#include <net/route.h>
66#include <net/radix_mpath.h>
67#include <net/vnet.h>
68
69#include <net/pfvar.h>
70#include <net/if_pflog.h>
71#include <net/if_pfsync.h>
72
73#include <netinet/in_pcb.h>
74#include <netinet/in_var.h>
75#include <netinet/ip.h>
76#include <netinet/ip_fw.h>
77#include <netinet/ip_icmp.h>
78#include <netinet/icmp_var.h>
79#include <netinet/ip_var.h>
80#include <netinet/tcp.h>
81#include <netinet/tcp_fsm.h>
82#include <netinet/tcp_seq.h>
83#include <netinet/tcp_timer.h>
84#include <netinet/tcp_var.h>
85#include <netinet/udp.h>
86#include <netinet/udp_var.h>
87
88#include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
89
90#ifdef INET6
91#include <netinet/ip6.h>
92#include <netinet/icmp6.h>
93#include <netinet6/nd6.h>
94#include <netinet6/ip6_var.h>
95#include <netinet6/in6_pcb.h>
96#endif /* INET6 */
97
98#include <machine/in_cksum.h>
99#include <security/mac/mac_framework.h>
100
101#define	DPFPRINTF(n, x)	if (V_pf_status.debug >= (n)) printf x
102
103/*
104 * Global variables
105 */
106
107/* state tables */
108VNET_DEFINE(struct pf_altqqueue,	 pf_altqs[2]);
109VNET_DEFINE(struct pf_palist,		 pf_pabuf);
110VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_active);
111VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_inactive);
112VNET_DEFINE(struct pf_kstatus,		 pf_status);
113
114VNET_DEFINE(u_int32_t,			 ticket_altqs_active);
115VNET_DEFINE(u_int32_t,			 ticket_altqs_inactive);
116VNET_DEFINE(int,			 altqs_inactive_open);
117VNET_DEFINE(u_int32_t,			 ticket_pabuf);
118
119VNET_DEFINE(MD5_CTX,			 pf_tcp_secret_ctx);
120#define	V_pf_tcp_secret_ctx		 VNET(pf_tcp_secret_ctx)
121VNET_DEFINE(u_char,			 pf_tcp_secret[16]);
122#define	V_pf_tcp_secret			 VNET(pf_tcp_secret)
123VNET_DEFINE(int,			 pf_tcp_secret_init);
124#define	V_pf_tcp_secret_init		 VNET(pf_tcp_secret_init)
125VNET_DEFINE(int,			 pf_tcp_iss_off);
126#define	V_pf_tcp_iss_off		 VNET(pf_tcp_iss_off)
127
128/*
129 * Queue for pf_intr() sends.
130 */
131static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
132struct pf_send_entry {
133	STAILQ_ENTRY(pf_send_entry)	pfse_next;
134	struct mbuf			*pfse_m;
135	enum {
136		PFSE_IP,
137		PFSE_IP6,
138		PFSE_ICMP,
139		PFSE_ICMP6,
140	}				pfse_type;
141	union {
142		struct route		ro;
143		struct {
144			int		type;
145			int		code;
146			int		mtu;
147		} icmpopts;
148	} u;
149#define	pfse_ro		u.ro
150#define	pfse_icmp_type	u.icmpopts.type
151#define	pfse_icmp_code	u.icmpopts.code
152#define	pfse_icmp_mtu	u.icmpopts.mtu
153};
154
155STAILQ_HEAD(pf_send_head, pf_send_entry);
156static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
157#define	V_pf_sendqueue	VNET(pf_sendqueue)
158
159static struct mtx pf_sendqueue_mtx;
160#define	PF_SENDQ_LOCK()		mtx_lock(&pf_sendqueue_mtx)
161#define	PF_SENDQ_UNLOCK()	mtx_unlock(&pf_sendqueue_mtx)
162
163/*
164 * Queue for pf_overload_task() tasks.
165 */
166struct pf_overload_entry {
167	SLIST_ENTRY(pf_overload_entry)	next;
168	struct pf_addr  		addr;
169	sa_family_t			af;
170	uint8_t				dir;
171	struct pf_rule  		*rule;
172};
173
174SLIST_HEAD(pf_overload_head, pf_overload_entry);
175static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
176#define V_pf_overloadqueue	VNET(pf_overloadqueue)
177static VNET_DEFINE(struct task, pf_overloadtask);
178#define	V_pf_overloadtask	VNET(pf_overloadtask)
179
180static struct mtx pf_overloadqueue_mtx;
181#define	PF_OVERLOADQ_LOCK()	mtx_lock(&pf_overloadqueue_mtx)
182#define	PF_OVERLOADQ_UNLOCK()	mtx_unlock(&pf_overloadqueue_mtx)
183
184VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
185struct mtx pf_unlnkdrules_mtx;
186
187static VNET_DEFINE(uma_zone_t,	pf_sources_z);
188#define	V_pf_sources_z	VNET(pf_sources_z)
189uma_zone_t		pf_mtag_z;
190VNET_DEFINE(uma_zone_t,	 pf_state_z);
191VNET_DEFINE(uma_zone_t,	 pf_state_key_z);
192
193VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
194#define	PFID_CPUBITS	8
195#define	PFID_CPUSHIFT	(sizeof(uint64_t) * NBBY - PFID_CPUBITS)
196#define	PFID_CPUMASK	((uint64_t)((1 << PFID_CPUBITS) - 1) <<	PFID_CPUSHIFT)
197#define	PFID_MAXID	(~PFID_CPUMASK)
198CTASSERT((1 << PFID_CPUBITS) >= MAXCPU);
199
200static void		 pf_src_tree_remove_state(struct pf_state *);
201static void		 pf_init_threshold(struct pf_threshold *, u_int32_t,
202			    u_int32_t);
203static void		 pf_add_threshold(struct pf_threshold *);
204static int		 pf_check_threshold(struct pf_threshold *);
205
206static void		 pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
207			    u_int16_t *, u_int16_t *, struct pf_addr *,
208			    u_int16_t, u_int8_t, sa_family_t);
209static int		 pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
210			    struct tcphdr *, struct pf_state_peer *);
211static void		 pf_change_icmp(struct pf_addr *, u_int16_t *,
212			    struct pf_addr *, struct pf_addr *, u_int16_t,
213			    u_int16_t *, u_int16_t *, u_int16_t *,
214			    u_int16_t *, u_int8_t, sa_family_t);
215static void		 pf_send_tcp(struct mbuf *,
216			    const struct pf_rule *, sa_family_t,
217			    const struct pf_addr *, const struct pf_addr *,
218			    u_int16_t, u_int16_t, u_int32_t, u_int32_t,
219			    u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
220			    u_int16_t, struct ifnet *);
221static void		 pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
222			    sa_family_t, struct pf_rule *);
223static void		 pf_detach_state(struct pf_state *);
224static int		 pf_state_key_attach(struct pf_state_key *,
225			    struct pf_state_key *, struct pf_state *);
226static void		 pf_state_key_detach(struct pf_state *, int);
227static int		 pf_state_key_ctor(void *, int, void *, int);
228static u_int32_t	 pf_tcp_iss(struct pf_pdesc *);
229static int		 pf_test_rule(struct pf_rule **, struct pf_state **,
230			    int, struct pfi_kif *, struct mbuf *, int,
231			    struct pf_pdesc *, struct pf_rule **,
232			    struct pf_ruleset **, struct inpcb *);
233static int		 pf_create_state(struct pf_rule *, struct pf_rule *,
234			    struct pf_rule *, struct pf_pdesc *,
235			    struct pf_src_node *, struct pf_state_key *,
236			    struct pf_state_key *, struct mbuf *, int,
237			    u_int16_t, u_int16_t, int *, struct pfi_kif *,
238			    struct pf_state **, int, u_int16_t, u_int16_t,
239			    int);
240static int		 pf_test_fragment(struct pf_rule **, int,
241			    struct pfi_kif *, struct mbuf *, void *,
242			    struct pf_pdesc *, struct pf_rule **,
243			    struct pf_ruleset **);
244static int		 pf_tcp_track_full(struct pf_state_peer *,
245			    struct pf_state_peer *, struct pf_state **,
246			    struct pfi_kif *, struct mbuf *, int,
247			    struct pf_pdesc *, u_short *, int *);
248static int		 pf_tcp_track_sloppy(struct pf_state_peer *,
249			    struct pf_state_peer *, struct pf_state **,
250			    struct pf_pdesc *, u_short *);
251static int		 pf_test_state_tcp(struct pf_state **, int,
252			    struct pfi_kif *, struct mbuf *, int,
253			    void *, struct pf_pdesc *, u_short *);
254static int		 pf_test_state_udp(struct pf_state **, int,
255			    struct pfi_kif *, struct mbuf *, int,
256			    void *, struct pf_pdesc *);
257static int		 pf_test_state_icmp(struct pf_state **, int,
258			    struct pfi_kif *, struct mbuf *, int,
259			    void *, struct pf_pdesc *, u_short *);
260static int		 pf_test_state_other(struct pf_state **, int,
261			    struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
262static u_int8_t		 pf_get_wscale(struct mbuf *, int, u_int16_t,
263			    sa_family_t);
264static u_int16_t	 pf_get_mss(struct mbuf *, int, u_int16_t,
265			    sa_family_t);
266static u_int16_t	 pf_calc_mss(struct pf_addr *, sa_family_t,
267				int, u_int16_t);
268static int		 pf_check_proto_cksum(struct mbuf *, int, int,
269			    u_int8_t, sa_family_t);
270static void		 pf_print_state_parts(struct pf_state *,
271			    struct pf_state_key *, struct pf_state_key *);
272static int		 pf_addr_wrap_neq(struct pf_addr_wrap *,
273			    struct pf_addr_wrap *);
274static struct pf_state	*pf_find_state(struct pfi_kif *,
275			    struct pf_state_key_cmp *, u_int);
276static int		 pf_src_connlimit(struct pf_state **);
277static void		 pf_overload_task(void *v, int pending);
278static int		 pf_insert_src_node(struct pf_src_node **,
279			    struct pf_rule *, struct pf_addr *, sa_family_t);
280static u_int		 pf_purge_expired_states(u_int, int);
281static void		 pf_purge_unlinked_rules(void);
282static int		 pf_mtag_uminit(void *, int, int);
283static void		 pf_mtag_free(struct m_tag *);
284#ifdef INET
285static void		 pf_route(struct mbuf **, struct pf_rule *, int,
286			    struct ifnet *, struct pf_state *,
287			    struct pf_pdesc *);
288#endif /* INET */
289#ifdef INET6
290static void		 pf_change_a6(struct pf_addr *, u_int16_t *,
291			    struct pf_addr *, u_int8_t);
292static void		 pf_route6(struct mbuf **, struct pf_rule *, int,
293			    struct ifnet *, struct pf_state *,
294			    struct pf_pdesc *);
295#endif /* INET6 */
296
297int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
298
299VNET_DECLARE(int, pf_end_threads);
300
301VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
302
303#define	PACKET_LOOPED(pd)	((pd)->pf_mtag &&			\
304				 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
305
306#define	STATE_LOOKUP(i, k, d, s, pd)					\
307	do {								\
308		(s) = pf_find_state((i), (k), (d));			\
309		if ((s) == NULL)					\
310			return (PF_DROP);				\
311		if (PACKET_LOOPED(pd))					\
312			return (PF_PASS);				\
313		if ((d) == PF_OUT &&					\
314		    (((s)->rule.ptr->rt == PF_ROUTETO &&		\
315		    (s)->rule.ptr->direction == PF_OUT) ||		\
316		    ((s)->rule.ptr->rt == PF_REPLYTO &&			\
317		    (s)->rule.ptr->direction == PF_IN)) &&		\
318		    (s)->rt_kif != NULL &&				\
319		    (s)->rt_kif != (i))					\
320			return (PF_PASS);				\
321	} while (0)
322
323#define	BOUND_IFACE(r, k) \
324	((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
325
326#define	STATE_INC_COUNTERS(s)						\
327	do {								\
328		counter_u64_add(s->rule.ptr->states_cur, 1);		\
329		counter_u64_add(s->rule.ptr->states_tot, 1);		\
330		if (s->anchor.ptr != NULL) {				\
331			counter_u64_add(s->anchor.ptr->states_cur, 1);	\
332			counter_u64_add(s->anchor.ptr->states_tot, 1);	\
333		}							\
334		if (s->nat_rule.ptr != NULL) {				\
335			counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
336			counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
337		}							\
338	} while (0)
339
340#define	STATE_DEC_COUNTERS(s)						\
341	do {								\
342		if (s->nat_rule.ptr != NULL)				\
343			counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
344		if (s->anchor.ptr != NULL)				\
345			counter_u64_add(s->anchor.ptr->states_cur, -1);	\
346		counter_u64_add(s->rule.ptr->states_cur, -1);		\
347	} while (0)
348
349static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
350VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
351VNET_DEFINE(struct pf_idhash *, pf_idhash);
352VNET_DEFINE(struct pf_srchash *, pf_srchash);
353
354SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
355
356u_long	pf_hashmask;
357u_long	pf_srchashmask;
358static u_long	pf_hashsize;
359static u_long	pf_srchashsize;
360
361SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
362    &pf_hashsize, 0, "Size of pf(4) states hashtable");
363SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
364    &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
365
366VNET_DEFINE(void *, pf_swi_cookie);
367
368VNET_DEFINE(uint32_t, pf_hashseed);
369#define	V_pf_hashseed	VNET(pf_hashseed)
370
371int
372pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
373{
374
375	switch (af) {
376#ifdef INET
377	case AF_INET:
378		if (a->addr32[0] > b->addr32[0])
379			return (1);
380		if (a->addr32[0] < b->addr32[0])
381			return (-1);
382		break;
383#endif /* INET */
384#ifdef INET6
385	case AF_INET6:
386		if (a->addr32[3] > b->addr32[3])
387			return (1);
388		if (a->addr32[3] < b->addr32[3])
389			return (-1);
390		if (a->addr32[2] > b->addr32[2])
391			return (1);
392		if (a->addr32[2] < b->addr32[2])
393			return (-1);
394		if (a->addr32[1] > b->addr32[1])
395			return (1);
396		if (a->addr32[1] < b->addr32[1])
397			return (-1);
398		if (a->addr32[0] > b->addr32[0])
399			return (1);
400		if (a->addr32[0] < b->addr32[0])
401			return (-1);
402		break;
403#endif /* INET6 */
404	default:
405		panic("%s: unknown address family %u", __func__, af);
406	}
407	return (0);
408}
409
410static __inline uint32_t
411pf_hashkey(struct pf_state_key *sk)
412{
413	uint32_t h;
414
415	h = murmur3_aligned_32((uint32_t *)sk,
416			       sizeof(struct pf_state_key_cmp),
417			       V_pf_hashseed);
418
419	return (h & pf_hashmask);
420}
421
422static __inline uint32_t
423pf_hashsrc(struct pf_addr *addr, sa_family_t af)
424{
425	uint32_t h;
426
427	switch (af) {
428	case AF_INET:
429		h = murmur3_aligned_32((uint32_t *)&addr->v4,
430				       sizeof(addr->v4), V_pf_hashseed);
431		break;
432	case AF_INET6:
433		h = murmur3_aligned_32((uint32_t *)&addr->v6,
434				       sizeof(addr->v6), V_pf_hashseed);
435		break;
436	default:
437		panic("%s: unknown address family %u", __func__, af);
438	}
439
440	return (h & pf_srchashmask);
441}
442
443#ifdef ALTQ
444static int
445pf_state_hash(struct pf_state *s)
446{
447	u_int32_t hv = (intptr_t)s / sizeof(*s);
448
449	hv ^= crc32(&s->src, sizeof(s->src));
450	hv ^= crc32(&s->dst, sizeof(s->dst));
451	if (hv == 0)
452		hv = 1;
453	return (hv);
454}
455#endif
456
457#ifdef INET6
458void
459pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
460{
461	switch (af) {
462#ifdef INET
463	case AF_INET:
464		dst->addr32[0] = src->addr32[0];
465		break;
466#endif /* INET */
467	case AF_INET6:
468		dst->addr32[0] = src->addr32[0];
469		dst->addr32[1] = src->addr32[1];
470		dst->addr32[2] = src->addr32[2];
471		dst->addr32[3] = src->addr32[3];
472		break;
473	}
474}
475#endif /* INET6 */
476
477static void
478pf_init_threshold(struct pf_threshold *threshold,
479    u_int32_t limit, u_int32_t seconds)
480{
481	threshold->limit = limit * PF_THRESHOLD_MULT;
482	threshold->seconds = seconds;
483	threshold->count = 0;
484	threshold->last = time_uptime;
485}
486
487static void
488pf_add_threshold(struct pf_threshold *threshold)
489{
490	u_int32_t t = time_uptime, diff = t - threshold->last;
491
492	if (diff >= threshold->seconds)
493		threshold->count = 0;
494	else
495		threshold->count -= threshold->count * diff /
496		    threshold->seconds;
497	threshold->count += PF_THRESHOLD_MULT;
498	threshold->last = t;
499}
500
501static int
502pf_check_threshold(struct pf_threshold *threshold)
503{
504	return (threshold->count > threshold->limit);
505}
506
507static int
508pf_src_connlimit(struct pf_state **state)
509{
510	struct pf_overload_entry *pfoe;
511	int bad = 0;
512
513	PF_STATE_LOCK_ASSERT(*state);
514
515	(*state)->src_node->conn++;
516	(*state)->src.tcp_est = 1;
517	pf_add_threshold(&(*state)->src_node->conn_rate);
518
519	if ((*state)->rule.ptr->max_src_conn &&
520	    (*state)->rule.ptr->max_src_conn <
521	    (*state)->src_node->conn) {
522		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
523		bad++;
524	}
525
526	if ((*state)->rule.ptr->max_src_conn_rate.limit &&
527	    pf_check_threshold(&(*state)->src_node->conn_rate)) {
528		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
529		bad++;
530	}
531
532	if (!bad)
533		return (0);
534
535	/* Kill this state. */
536	(*state)->timeout = PFTM_PURGE;
537	(*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
538
539	if ((*state)->rule.ptr->overload_tbl == NULL)
540		return (1);
541
542	/* Schedule overloading and flushing task. */
543	pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
544	if (pfoe == NULL)
545		return (1);	/* too bad :( */
546
547	bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
548	pfoe->af = (*state)->key[PF_SK_WIRE]->af;
549	pfoe->rule = (*state)->rule.ptr;
550	pfoe->dir = (*state)->direction;
551	PF_OVERLOADQ_LOCK();
552	SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
553	PF_OVERLOADQ_UNLOCK();
554	taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
555
556	return (1);
557}
558
559static void
560pf_overload_task(void *v, int pending)
561{
562	struct pf_overload_head queue;
563	struct pfr_addr p;
564	struct pf_overload_entry *pfoe, *pfoe1;
565	uint32_t killed = 0;
566
567	CURVNET_SET((struct vnet *)v);
568
569	PF_OVERLOADQ_LOCK();
570	queue = V_pf_overloadqueue;
571	SLIST_INIT(&V_pf_overloadqueue);
572	PF_OVERLOADQ_UNLOCK();
573
574	bzero(&p, sizeof(p));
575	SLIST_FOREACH(pfoe, &queue, next) {
576		counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
577		if (V_pf_status.debug >= PF_DEBUG_MISC) {
578			printf("%s: blocking address ", __func__);
579			pf_print_host(&pfoe->addr, 0, pfoe->af);
580			printf("\n");
581		}
582
583		p.pfra_af = pfoe->af;
584		switch (pfoe->af) {
585#ifdef INET
586		case AF_INET:
587			p.pfra_net = 32;
588			p.pfra_ip4addr = pfoe->addr.v4;
589			break;
590#endif
591#ifdef INET6
592		case AF_INET6:
593			p.pfra_net = 128;
594			p.pfra_ip6addr = pfoe->addr.v6;
595			break;
596#endif
597		}
598
599		PF_RULES_WLOCK();
600		pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
601		PF_RULES_WUNLOCK();
602	}
603
604	/*
605	 * Remove those entries, that don't need flushing.
606	 */
607	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
608		if (pfoe->rule->flush == 0) {
609			SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
610			free(pfoe, M_PFTEMP);
611		} else
612			counter_u64_add(
613			    V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
614
615	/* If nothing to flush, return. */
616	if (SLIST_EMPTY(&queue)) {
617		CURVNET_RESTORE();
618		return;
619	}
620
621	for (int i = 0; i <= pf_hashmask; i++) {
622		struct pf_idhash *ih = &V_pf_idhash[i];
623		struct pf_state_key *sk;
624		struct pf_state *s;
625
626		PF_HASHROW_LOCK(ih);
627		LIST_FOREACH(s, &ih->states, entry) {
628		    sk = s->key[PF_SK_WIRE];
629		    SLIST_FOREACH(pfoe, &queue, next)
630			if (sk->af == pfoe->af &&
631			    ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
632			    pfoe->rule == s->rule.ptr) &&
633			    ((pfoe->dir == PF_OUT &&
634			    PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
635			    (pfoe->dir == PF_IN &&
636			    PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
637				s->timeout = PFTM_PURGE;
638				s->src.state = s->dst.state = TCPS_CLOSED;
639				killed++;
640			}
641		}
642		PF_HASHROW_UNLOCK(ih);
643	}
644	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
645		free(pfoe, M_PFTEMP);
646	if (V_pf_status.debug >= PF_DEBUG_MISC)
647		printf("%s: %u states killed", __func__, killed);
648
649	CURVNET_RESTORE();
650}
651
652/*
653 * Can return locked on failure, so that we can consistently
654 * allocate and insert a new one.
655 */
656struct pf_src_node *
657pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
658	int returnlocked)
659{
660	struct pf_srchash *sh;
661	struct pf_src_node *n;
662
663	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
664
665	sh = &V_pf_srchash[pf_hashsrc(src, af)];
666	PF_HASHROW_LOCK(sh);
667	LIST_FOREACH(n, &sh->nodes, entry)
668		if (n->rule.ptr == rule && n->af == af &&
669		    ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
670		    (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
671			break;
672	if (n != NULL) {
673		n->states++;
674		PF_HASHROW_UNLOCK(sh);
675	} else if (returnlocked == 0)
676		PF_HASHROW_UNLOCK(sh);
677
678	return (n);
679}
680
681static int
682pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
683    struct pf_addr *src, sa_family_t af)
684{
685
686	KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
687	    rule->rpool.opts & PF_POOL_STICKYADDR),
688	    ("%s for non-tracking rule %p", __func__, rule));
689
690	if (*sn == NULL)
691		*sn = pf_find_src_node(src, rule, af, 1);
692
693	if (*sn == NULL) {
694		struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
695
696		PF_HASHROW_ASSERT(sh);
697
698		if (!rule->max_src_nodes ||
699		    counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
700			(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
701		else
702			counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
703			    1);
704		if ((*sn) == NULL) {
705			PF_HASHROW_UNLOCK(sh);
706			return (-1);
707		}
708
709		pf_init_threshold(&(*sn)->conn_rate,
710		    rule->max_src_conn_rate.limit,
711		    rule->max_src_conn_rate.seconds);
712
713		(*sn)->af = af;
714		(*sn)->rule.ptr = rule;
715		PF_ACPY(&(*sn)->addr, src, af);
716		LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
717		(*sn)->creation = time_uptime;
718		(*sn)->ruletype = rule->action;
719		(*sn)->states = 1;
720		if ((*sn)->rule.ptr != NULL)
721			counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
722		PF_HASHROW_UNLOCK(sh);
723		counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
724	} else {
725		if (rule->max_src_states &&
726		    (*sn)->states >= rule->max_src_states) {
727			counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
728			    1);
729			return (-1);
730		}
731	}
732	return (0);
733}
734
735void
736pf_unlink_src_node(struct pf_src_node *src)
737{
738
739	PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
740	LIST_REMOVE(src, entry);
741	if (src->rule.ptr)
742		counter_u64_add(src->rule.ptr->src_nodes, -1);
743}
744
745u_int
746pf_free_src_nodes(struct pf_src_node_list *head)
747{
748	struct pf_src_node *sn, *tmp;
749	u_int count = 0;
750
751	LIST_FOREACH_SAFE(sn, head, entry, tmp) {
752		uma_zfree(V_pf_sources_z, sn);
753		count++;
754	}
755
756	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
757
758	return (count);
759}
760
761void
762pf_mtag_initialize()
763{
764
765	pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
766	    sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
767	    UMA_ALIGN_PTR, 0);
768}
769
770/* Per-vnet data storage structures initialization. */
771void
772pf_initialize()
773{
774	struct pf_keyhash	*kh;
775	struct pf_idhash	*ih;
776	struct pf_srchash	*sh;
777	u_int i;
778
779	TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize);
780	if (pf_hashsize == 0 || !powerof2(pf_hashsize))
781		pf_hashsize = PF_HASHSIZ;
782	TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize);
783	if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
784		pf_srchashsize = PF_HASHSIZ / 4;
785
786	V_pf_hashseed = arc4random();
787
788	/* States and state keys storage. */
789	V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
790	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
791	V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
792	uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
793	uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
794
795	V_pf_state_key_z = uma_zcreate("pf state keys",
796	    sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
797	    UMA_ALIGN_PTR, 0);
798	V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash),
799	    M_PFHASH, M_WAITOK | M_ZERO);
800	V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash),
801	    M_PFHASH, M_WAITOK | M_ZERO);
802	pf_hashmask = pf_hashsize - 1;
803	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
804	    i++, kh++, ih++) {
805		mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
806		mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
807	}
808
809	/* Source nodes. */
810	V_pf_sources_z = uma_zcreate("pf source nodes",
811	    sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
812	    0);
813	V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
814	uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
815	uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
816	V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash),
817	  M_PFHASH, M_WAITOK|M_ZERO);
818	pf_srchashmask = pf_srchashsize - 1;
819	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
820		mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
821
822	/* ALTQ */
823	TAILQ_INIT(&V_pf_altqs[0]);
824	TAILQ_INIT(&V_pf_altqs[1]);
825	TAILQ_INIT(&V_pf_pabuf);
826	V_pf_altqs_active = &V_pf_altqs[0];
827	V_pf_altqs_inactive = &V_pf_altqs[1];
828
829
830	/* Send & overload+flush queues. */
831	STAILQ_INIT(&V_pf_sendqueue);
832	SLIST_INIT(&V_pf_overloadqueue);
833	TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
834	mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
835	mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
836	    MTX_DEF);
837
838	/* Unlinked, but may be referenced rules. */
839	TAILQ_INIT(&V_pf_unlinked_rules);
840	mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
841}
842
843void
844pf_mtag_cleanup()
845{
846
847	uma_zdestroy(pf_mtag_z);
848}
849
850void
851pf_cleanup()
852{
853	struct pf_keyhash	*kh;
854	struct pf_idhash	*ih;
855	struct pf_srchash	*sh;
856	struct pf_send_entry	*pfse, *next;
857	u_int i;
858
859	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
860	    i++, kh++, ih++) {
861		KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
862		    __func__));
863		KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
864		    __func__));
865		mtx_destroy(&kh->lock);
866		mtx_destroy(&ih->lock);
867	}
868	free(V_pf_keyhash, M_PFHASH);
869	free(V_pf_idhash, M_PFHASH);
870
871	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
872		KASSERT(LIST_EMPTY(&sh->nodes),
873		    ("%s: source node hash not empty", __func__));
874		mtx_destroy(&sh->lock);
875	}
876	free(V_pf_srchash, M_PFHASH);
877
878	STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
879		m_freem(pfse->pfse_m);
880		free(pfse, M_PFTEMP);
881	}
882
883	mtx_destroy(&pf_sendqueue_mtx);
884	mtx_destroy(&pf_overloadqueue_mtx);
885	mtx_destroy(&pf_unlnkdrules_mtx);
886
887	uma_zdestroy(V_pf_sources_z);
888	uma_zdestroy(V_pf_state_z);
889	uma_zdestroy(V_pf_state_key_z);
890}
891
892static int
893pf_mtag_uminit(void *mem, int size, int how)
894{
895	struct m_tag *t;
896
897	t = (struct m_tag *)mem;
898	t->m_tag_cookie = MTAG_ABI_COMPAT;
899	t->m_tag_id = PACKET_TAG_PF;
900	t->m_tag_len = sizeof(struct pf_mtag);
901	t->m_tag_free = pf_mtag_free;
902
903	return (0);
904}
905
906static void
907pf_mtag_free(struct m_tag *t)
908{
909
910	uma_zfree(pf_mtag_z, t);
911}
912
913struct pf_mtag *
914pf_get_mtag(struct mbuf *m)
915{
916	struct m_tag *mtag;
917
918	if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
919		return ((struct pf_mtag *)(mtag + 1));
920
921	mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
922	if (mtag == NULL)
923		return (NULL);
924	bzero(mtag + 1, sizeof(struct pf_mtag));
925	m_tag_prepend(m, mtag);
926
927	return ((struct pf_mtag *)(mtag + 1));
928}
929
930static int
931pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
932    struct pf_state *s)
933{
934	struct pf_keyhash	*khs, *khw, *kh;
935	struct pf_state_key	*sk, *cur;
936	struct pf_state		*si, *olds = NULL;
937	int idx;
938
939	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
940	KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
941	KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
942
943	/*
944	 * We need to lock hash slots of both keys. To avoid deadlock
945	 * we always lock the slot with lower address first. Unlock order
946	 * isn't important.
947	 *
948	 * We also need to lock ID hash slot before dropping key
949	 * locks. On success we return with ID hash slot locked.
950	 */
951
952	if (skw == sks) {
953		khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
954		PF_HASHROW_LOCK(khs);
955	} else {
956		khs = &V_pf_keyhash[pf_hashkey(sks)];
957		khw = &V_pf_keyhash[pf_hashkey(skw)];
958		if (khs == khw) {
959			PF_HASHROW_LOCK(khs);
960		} else if (khs < khw) {
961			PF_HASHROW_LOCK(khs);
962			PF_HASHROW_LOCK(khw);
963		} else {
964			PF_HASHROW_LOCK(khw);
965			PF_HASHROW_LOCK(khs);
966		}
967	}
968
969#define	KEYS_UNLOCK()	do {			\
970	if (khs != khw) {			\
971		PF_HASHROW_UNLOCK(khs);		\
972		PF_HASHROW_UNLOCK(khw);		\
973	} else					\
974		PF_HASHROW_UNLOCK(khs);		\
975} while (0)
976
977	/*
978	 * First run: start with wire key.
979	 */
980	sk = skw;
981	kh = khw;
982	idx = PF_SK_WIRE;
983
984keyattach:
985	LIST_FOREACH(cur, &kh->keys, entry)
986		if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
987			break;
988
989	if (cur != NULL) {
990		/* Key exists. Check for same kif, if none, add to key. */
991		TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
992			struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
993
994			PF_HASHROW_LOCK(ih);
995			if (si->kif == s->kif &&
996			    si->direction == s->direction) {
997				if (sk->proto == IPPROTO_TCP &&
998				    si->src.state >= TCPS_FIN_WAIT_2 &&
999				    si->dst.state >= TCPS_FIN_WAIT_2) {
1000					/*
1001					 * New state matches an old >FIN_WAIT_2
1002					 * state. We can't drop key hash locks,
1003					 * thus we can't unlink it properly.
1004					 *
1005					 * As a workaround we drop it into
1006					 * TCPS_CLOSED state, schedule purge
1007					 * ASAP and push it into the very end
1008					 * of the slot TAILQ, so that it won't
1009					 * conflict with our new state.
1010					 */
1011					si->src.state = si->dst.state =
1012					    TCPS_CLOSED;
1013					si->timeout = PFTM_PURGE;
1014					olds = si;
1015				} else {
1016					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1017						printf("pf: %s key attach "
1018						    "failed on %s: ",
1019						    (idx == PF_SK_WIRE) ?
1020						    "wire" : "stack",
1021						    s->kif->pfik_name);
1022						pf_print_state_parts(s,
1023						    (idx == PF_SK_WIRE) ?
1024						    sk : NULL,
1025						    (idx == PF_SK_STACK) ?
1026						    sk : NULL);
1027						printf(", existing: ");
1028						pf_print_state_parts(si,
1029						    (idx == PF_SK_WIRE) ?
1030						    sk : NULL,
1031						    (idx == PF_SK_STACK) ?
1032						    sk : NULL);
1033						printf("\n");
1034					}
1035					PF_HASHROW_UNLOCK(ih);
1036					KEYS_UNLOCK();
1037					uma_zfree(V_pf_state_key_z, sk);
1038					if (idx == PF_SK_STACK)
1039						pf_detach_state(s);
1040					return (EEXIST); /* collision! */
1041				}
1042			}
1043			PF_HASHROW_UNLOCK(ih);
1044		}
1045		uma_zfree(V_pf_state_key_z, sk);
1046		s->key[idx] = cur;
1047	} else {
1048		LIST_INSERT_HEAD(&kh->keys, sk, entry);
1049		s->key[idx] = sk;
1050	}
1051
1052stateattach:
1053	/* List is sorted, if-bound states before floating. */
1054	if (s->kif == V_pfi_all)
1055		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1056	else
1057		TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1058
1059	if (olds) {
1060		TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1061		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1062		    key_list[idx]);
1063		olds = NULL;
1064	}
1065
1066	/*
1067	 * Attach done. See how should we (or should not?)
1068	 * attach a second key.
1069	 */
1070	if (sks == skw) {
1071		s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1072		idx = PF_SK_STACK;
1073		sks = NULL;
1074		goto stateattach;
1075	} else if (sks != NULL) {
1076		/*
1077		 * Continue attaching with stack key.
1078		 */
1079		sk = sks;
1080		kh = khs;
1081		idx = PF_SK_STACK;
1082		sks = NULL;
1083		goto keyattach;
1084	}
1085
1086	PF_STATE_LOCK(s);
1087	KEYS_UNLOCK();
1088
1089	KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1090	    ("%s failure", __func__));
1091
1092	return (0);
1093#undef	KEYS_UNLOCK
1094}
1095
1096static void
1097pf_detach_state(struct pf_state *s)
1098{
1099	struct pf_state_key *sks = s->key[PF_SK_STACK];
1100	struct pf_keyhash *kh;
1101
1102	if (sks != NULL) {
1103		kh = &V_pf_keyhash[pf_hashkey(sks)];
1104		PF_HASHROW_LOCK(kh);
1105		if (s->key[PF_SK_STACK] != NULL)
1106			pf_state_key_detach(s, PF_SK_STACK);
1107		/*
1108		 * If both point to same key, then we are done.
1109		 */
1110		if (sks == s->key[PF_SK_WIRE]) {
1111			pf_state_key_detach(s, PF_SK_WIRE);
1112			PF_HASHROW_UNLOCK(kh);
1113			return;
1114		}
1115		PF_HASHROW_UNLOCK(kh);
1116	}
1117
1118	if (s->key[PF_SK_WIRE] != NULL) {
1119		kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1120		PF_HASHROW_LOCK(kh);
1121		if (s->key[PF_SK_WIRE] != NULL)
1122			pf_state_key_detach(s, PF_SK_WIRE);
1123		PF_HASHROW_UNLOCK(kh);
1124	}
1125}
1126
1127static void
1128pf_state_key_detach(struct pf_state *s, int idx)
1129{
1130	struct pf_state_key *sk = s->key[idx];
1131#ifdef INVARIANTS
1132	struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1133
1134	PF_HASHROW_ASSERT(kh);
1135#endif
1136	TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1137	s->key[idx] = NULL;
1138
1139	if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1140		LIST_REMOVE(sk, entry);
1141		uma_zfree(V_pf_state_key_z, sk);
1142	}
1143}
1144
1145static int
1146pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1147{
1148	struct pf_state_key *sk = mem;
1149
1150	bzero(sk, sizeof(struct pf_state_key_cmp));
1151	TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1152	TAILQ_INIT(&sk->states[PF_SK_STACK]);
1153
1154	return (0);
1155}
1156
1157struct pf_state_key *
1158pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1159	struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1160{
1161	struct pf_state_key *sk;
1162
1163	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1164	if (sk == NULL)
1165		return (NULL);
1166
1167	PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1168	PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1169	sk->port[pd->sidx] = sport;
1170	sk->port[pd->didx] = dport;
1171	sk->proto = pd->proto;
1172	sk->af = pd->af;
1173
1174	return (sk);
1175}
1176
1177struct pf_state_key *
1178pf_state_key_clone(struct pf_state_key *orig)
1179{
1180	struct pf_state_key *sk;
1181
1182	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1183	if (sk == NULL)
1184		return (NULL);
1185
1186	bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1187
1188	return (sk);
1189}
1190
1191int
1192pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1193    struct pf_state_key *sks, struct pf_state *s)
1194{
1195	struct pf_idhash *ih;
1196	struct pf_state *cur;
1197	int error;
1198
1199	KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1200	    ("%s: sks not pristine", __func__));
1201	KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1202	    ("%s: skw not pristine", __func__));
1203	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1204
1205	s->kif = kif;
1206
1207	if (s->id == 0 && s->creatorid == 0) {
1208		/* XXX: should be atomic, but probability of collision low */
1209		if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1210			V_pf_stateid[curcpu] = 1;
1211		s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1212		s->id = htobe64(s->id);
1213		s->creatorid = V_pf_status.hostid;
1214	}
1215
1216	/* Returns with ID locked on success. */
1217	if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1218		return (error);
1219
1220	ih = &V_pf_idhash[PF_IDHASH(s)];
1221	PF_HASHROW_ASSERT(ih);
1222	LIST_FOREACH(cur, &ih->states, entry)
1223		if (cur->id == s->id && cur->creatorid == s->creatorid)
1224			break;
1225
1226	if (cur != NULL) {
1227		PF_HASHROW_UNLOCK(ih);
1228		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1229			printf("pf: state ID collision: "
1230			    "id: %016llx creatorid: %08x\n",
1231			    (unsigned long long)be64toh(s->id),
1232			    ntohl(s->creatorid));
1233		}
1234		pf_detach_state(s);
1235		return (EEXIST);
1236	}
1237	LIST_INSERT_HEAD(&ih->states, s, entry);
1238	/* One for keys, one for ID hash. */
1239	refcount_init(&s->refs, 2);
1240
1241	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1242	if (pfsync_insert_state_ptr != NULL)
1243		pfsync_insert_state_ptr(s);
1244
1245	/* Returns locked. */
1246	return (0);
1247}
1248
1249/*
1250 * Find state by ID: returns with locked row on success.
1251 */
1252struct pf_state *
1253pf_find_state_byid(uint64_t id, uint32_t creatorid)
1254{
1255	struct pf_idhash *ih;
1256	struct pf_state *s;
1257
1258	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1259
1260	ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1261
1262	PF_HASHROW_LOCK(ih);
1263	LIST_FOREACH(s, &ih->states, entry)
1264		if (s->id == id && s->creatorid == creatorid)
1265			break;
1266
1267	if (s == NULL)
1268		PF_HASHROW_UNLOCK(ih);
1269
1270	return (s);
1271}
1272
1273/*
1274 * Find state by key.
1275 * Returns with ID hash slot locked on success.
1276 */
1277static struct pf_state *
1278pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1279{
1280	struct pf_keyhash	*kh;
1281	struct pf_state_key	*sk;
1282	struct pf_state		*s;
1283	int idx;
1284
1285	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1286
1287	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1288
1289	PF_HASHROW_LOCK(kh);
1290	LIST_FOREACH(sk, &kh->keys, entry)
1291		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1292			break;
1293	if (sk == NULL) {
1294		PF_HASHROW_UNLOCK(kh);
1295		return (NULL);
1296	}
1297
1298	idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1299
1300	/* List is sorted, if-bound states before floating ones. */
1301	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1302		if (s->kif == V_pfi_all || s->kif == kif) {
1303			PF_STATE_LOCK(s);
1304			PF_HASHROW_UNLOCK(kh);
1305			if (s->timeout >= PFTM_MAX) {
1306				/*
1307				 * State is either being processed by
1308				 * pf_unlink_state() in an other thread, or
1309				 * is scheduled for immediate expiry.
1310				 */
1311				PF_STATE_UNLOCK(s);
1312				return (NULL);
1313			}
1314			return (s);
1315		}
1316	PF_HASHROW_UNLOCK(kh);
1317
1318	return (NULL);
1319}
1320
1321struct pf_state *
1322pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1323{
1324	struct pf_keyhash	*kh;
1325	struct pf_state_key	*sk;
1326	struct pf_state		*s, *ret = NULL;
1327	int			 idx, inout = 0;
1328
1329	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1330
1331	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1332
1333	PF_HASHROW_LOCK(kh);
1334	LIST_FOREACH(sk, &kh->keys, entry)
1335		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1336			break;
1337	if (sk == NULL) {
1338		PF_HASHROW_UNLOCK(kh);
1339		return (NULL);
1340	}
1341	switch (dir) {
1342	case PF_IN:
1343		idx = PF_SK_WIRE;
1344		break;
1345	case PF_OUT:
1346		idx = PF_SK_STACK;
1347		break;
1348	case PF_INOUT:
1349		idx = PF_SK_WIRE;
1350		inout = 1;
1351		break;
1352	default:
1353		panic("%s: dir %u", __func__, dir);
1354	}
1355second_run:
1356	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1357		if (more == NULL) {
1358			PF_HASHROW_UNLOCK(kh);
1359			return (s);
1360		}
1361
1362		if (ret)
1363			(*more)++;
1364		else
1365			ret = s;
1366	}
1367	if (inout == 1) {
1368		inout = 0;
1369		idx = PF_SK_STACK;
1370		goto second_run;
1371	}
1372	PF_HASHROW_UNLOCK(kh);
1373
1374	return (ret);
1375}
1376
1377/* END state table stuff */
1378
1379static void
1380pf_send(struct pf_send_entry *pfse)
1381{
1382
1383	PF_SENDQ_LOCK();
1384	STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1385	PF_SENDQ_UNLOCK();
1386	swi_sched(V_pf_swi_cookie, 0);
1387}
1388
1389void
1390pf_intr(void *v)
1391{
1392	struct pf_send_head queue;
1393	struct pf_send_entry *pfse, *next;
1394
1395	CURVNET_SET((struct vnet *)v);
1396
1397	PF_SENDQ_LOCK();
1398	queue = V_pf_sendqueue;
1399	STAILQ_INIT(&V_pf_sendqueue);
1400	PF_SENDQ_UNLOCK();
1401
1402	STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1403		switch (pfse->pfse_type) {
1404#ifdef INET
1405		case PFSE_IP:
1406			ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1407			break;
1408		case PFSE_ICMP:
1409			icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1410			    pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1411			break;
1412#endif /* INET */
1413#ifdef INET6
1414		case PFSE_IP6:
1415			ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1416			    NULL);
1417			break;
1418		case PFSE_ICMP6:
1419			icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1420			    pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1421			break;
1422#endif /* INET6 */
1423		default:
1424			panic("%s: unknown type", __func__);
1425		}
1426		free(pfse, M_PFTEMP);
1427	}
1428	CURVNET_RESTORE();
1429}
1430
1431void
1432pf_purge_thread(void *v)
1433{
1434	u_int idx = 0;
1435
1436	CURVNET_SET((struct vnet *)v);
1437
1438	for (;;) {
1439		PF_RULES_RLOCK();
1440		rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1441
1442		if (V_pf_end_threads) {
1443			/*
1444			 * To cleanse up all kifs and rules we need
1445			 * two runs: first one clears reference flags,
1446			 * then pf_purge_expired_states() doesn't
1447			 * raise them, and then second run frees.
1448			 */
1449			PF_RULES_RUNLOCK();
1450			pf_purge_unlinked_rules();
1451			pfi_kif_purge();
1452
1453			/*
1454			 * Now purge everything.
1455			 */
1456			pf_purge_expired_states(0, pf_hashmask);
1457			pf_purge_expired_fragments();
1458			pf_purge_expired_src_nodes();
1459
1460			/*
1461			 * Now all kifs & rules should be unreferenced,
1462			 * thus should be successfully freed.
1463			 */
1464			pf_purge_unlinked_rules();
1465			pfi_kif_purge();
1466
1467			/*
1468			 * Announce success and exit.
1469			 */
1470			PF_RULES_RLOCK();
1471			V_pf_end_threads++;
1472			PF_RULES_RUNLOCK();
1473			wakeup(pf_purge_thread);
1474			kproc_exit(0);
1475		}
1476		PF_RULES_RUNLOCK();
1477
1478		/* Process 1/interval fraction of the state table every run. */
1479		idx = pf_purge_expired_states(idx, pf_hashmask /
1480			    (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1481
1482		/* Purge other expired types every PFTM_INTERVAL seconds. */
1483		if (idx == 0) {
1484			/*
1485			 * Order is important:
1486			 * - states and src nodes reference rules
1487			 * - states and rules reference kifs
1488			 */
1489			pf_purge_expired_fragments();
1490			pf_purge_expired_src_nodes();
1491			pf_purge_unlinked_rules();
1492			pfi_kif_purge();
1493		}
1494	}
1495	/* not reached */
1496	CURVNET_RESTORE();
1497}
1498
1499u_int32_t
1500pf_state_expires(const struct pf_state *state)
1501{
1502	u_int32_t	timeout;
1503	u_int32_t	start;
1504	u_int32_t	end;
1505	u_int32_t	states;
1506
1507	/* handle all PFTM_* > PFTM_MAX here */
1508	if (state->timeout == PFTM_PURGE)
1509		return (time_uptime);
1510	KASSERT(state->timeout != PFTM_UNLINKED,
1511	    ("pf_state_expires: timeout == PFTM_UNLINKED"));
1512	KASSERT((state->timeout < PFTM_MAX),
1513	    ("pf_state_expires: timeout > PFTM_MAX"));
1514	timeout = state->rule.ptr->timeout[state->timeout];
1515	if (!timeout)
1516		timeout = V_pf_default_rule.timeout[state->timeout];
1517	start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1518	if (start) {
1519		end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1520		states = counter_u64_fetch(state->rule.ptr->states_cur);
1521	} else {
1522		start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1523		end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1524		states = V_pf_status.states;
1525	}
1526	if (end && states > start && start < end) {
1527		if (states < end)
1528			return (state->expire + timeout * (end - states) /
1529			    (end - start));
1530		else
1531			return (time_uptime);
1532	}
1533	return (state->expire + timeout);
1534}
1535
1536void
1537pf_purge_expired_src_nodes()
1538{
1539	struct pf_src_node_list	 freelist;
1540	struct pf_srchash	*sh;
1541	struct pf_src_node	*cur, *next;
1542	int i;
1543
1544	LIST_INIT(&freelist);
1545	for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1546	    PF_HASHROW_LOCK(sh);
1547	    LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1548		if (cur->states == 0 && cur->expire <= time_uptime) {
1549			pf_unlink_src_node(cur);
1550			LIST_INSERT_HEAD(&freelist, cur, entry);
1551		} else if (cur->rule.ptr != NULL)
1552			cur->rule.ptr->rule_flag |= PFRULE_REFS;
1553	    PF_HASHROW_UNLOCK(sh);
1554	}
1555
1556	pf_free_src_nodes(&freelist);
1557
1558	V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1559}
1560
1561static void
1562pf_src_tree_remove_state(struct pf_state *s)
1563{
1564	struct pf_src_node *sn;
1565	struct pf_srchash *sh;
1566	uint32_t timeout;
1567
1568	timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1569	    s->rule.ptr->timeout[PFTM_SRC_NODE] :
1570	    V_pf_default_rule.timeout[PFTM_SRC_NODE];
1571
1572	if (s->src_node != NULL) {
1573		sn = s->src_node;
1574		sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1575	    	PF_HASHROW_LOCK(sh);
1576		if (s->src.tcp_est)
1577			--sn->conn;
1578		if (--sn->states == 0)
1579			sn->expire = time_uptime + timeout;
1580	    	PF_HASHROW_UNLOCK(sh);
1581	}
1582	if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1583		sn = s->nat_src_node;
1584		sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1585	    	PF_HASHROW_LOCK(sh);
1586		if (--sn->states == 0)
1587			sn->expire = time_uptime + timeout;
1588	    	PF_HASHROW_UNLOCK(sh);
1589	}
1590	s->src_node = s->nat_src_node = NULL;
1591}
1592
1593/*
1594 * Unlink and potentilly free a state. Function may be
1595 * called with ID hash row locked, but always returns
1596 * unlocked, since it needs to go through key hash locking.
1597 */
1598int
1599pf_unlink_state(struct pf_state *s, u_int flags)
1600{
1601	struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1602
1603	if ((flags & PF_ENTER_LOCKED) == 0)
1604		PF_HASHROW_LOCK(ih);
1605	else
1606		PF_HASHROW_ASSERT(ih);
1607
1608	if (s->timeout == PFTM_UNLINKED) {
1609		/*
1610		 * State is being processed
1611		 * by pf_unlink_state() in
1612		 * an other thread.
1613		 */
1614		PF_HASHROW_UNLOCK(ih);
1615		return (0);	/* XXXGL: undefined actually */
1616	}
1617
1618	if (s->src.state == PF_TCPS_PROXY_DST) {
1619		/* XXX wire key the right one? */
1620		pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1621		    &s->key[PF_SK_WIRE]->addr[1],
1622		    &s->key[PF_SK_WIRE]->addr[0],
1623		    s->key[PF_SK_WIRE]->port[1],
1624		    s->key[PF_SK_WIRE]->port[0],
1625		    s->src.seqhi, s->src.seqlo + 1,
1626		    TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1627	}
1628
1629	LIST_REMOVE(s, entry);
1630	pf_src_tree_remove_state(s);
1631
1632	if (pfsync_delete_state_ptr != NULL)
1633		pfsync_delete_state_ptr(s);
1634
1635	STATE_DEC_COUNTERS(s);
1636
1637	s->timeout = PFTM_UNLINKED;
1638
1639	PF_HASHROW_UNLOCK(ih);
1640
1641	pf_detach_state(s);
1642	refcount_release(&s->refs);
1643
1644	return (pf_release_state(s));
1645}
1646
1647void
1648pf_free_state(struct pf_state *cur)
1649{
1650
1651	KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1652	KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1653	    cur->timeout));
1654
1655	pf_normalize_tcp_cleanup(cur);
1656	uma_zfree(V_pf_state_z, cur);
1657	counter_u64_add(V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
1658}
1659
1660/*
1661 * Called only from pf_purge_thread(), thus serialized.
1662 */
1663static u_int
1664pf_purge_expired_states(u_int i, int maxcheck)
1665{
1666	struct pf_idhash *ih;
1667	struct pf_state *s;
1668
1669	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1670
1671	/*
1672	 * Go through hash and unlink states that expire now.
1673	 */
1674	while (maxcheck > 0) {
1675
1676		ih = &V_pf_idhash[i];
1677relock:
1678		PF_HASHROW_LOCK(ih);
1679		LIST_FOREACH(s, &ih->states, entry) {
1680			if (pf_state_expires(s) <= time_uptime) {
1681				V_pf_status.states -=
1682				    pf_unlink_state(s, PF_ENTER_LOCKED);
1683				goto relock;
1684			}
1685			s->rule.ptr->rule_flag |= PFRULE_REFS;
1686			if (s->nat_rule.ptr != NULL)
1687				s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1688			if (s->anchor.ptr != NULL)
1689				s->anchor.ptr->rule_flag |= PFRULE_REFS;
1690			s->kif->pfik_flags |= PFI_IFLAG_REFS;
1691			if (s->rt_kif)
1692				s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1693		}
1694		PF_HASHROW_UNLOCK(ih);
1695
1696		/* Return when we hit end of hash. */
1697		if (++i > pf_hashmask) {
1698			V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1699			return (0);
1700		}
1701
1702		maxcheck--;
1703	}
1704
1705	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1706
1707	return (i);
1708}
1709
1710static void
1711pf_purge_unlinked_rules()
1712{
1713	struct pf_rulequeue tmpq;
1714	struct pf_rule *r, *r1;
1715
1716	/*
1717	 * If we have overloading task pending, then we'd
1718	 * better skip purging this time. There is a tiny
1719	 * probability that overloading task references
1720	 * an already unlinked rule.
1721	 */
1722	PF_OVERLOADQ_LOCK();
1723	if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1724		PF_OVERLOADQ_UNLOCK();
1725		return;
1726	}
1727	PF_OVERLOADQ_UNLOCK();
1728
1729	/*
1730	 * Do naive mark-and-sweep garbage collecting of old rules.
1731	 * Reference flag is raised by pf_purge_expired_states()
1732	 * and pf_purge_expired_src_nodes().
1733	 *
1734	 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1735	 * use a temporary queue.
1736	 */
1737	TAILQ_INIT(&tmpq);
1738	PF_UNLNKDRULES_LOCK();
1739	TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1740		if (!(r->rule_flag & PFRULE_REFS)) {
1741			TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1742			TAILQ_INSERT_TAIL(&tmpq, r, entries);
1743		} else
1744			r->rule_flag &= ~PFRULE_REFS;
1745	}
1746	PF_UNLNKDRULES_UNLOCK();
1747
1748	if (!TAILQ_EMPTY(&tmpq)) {
1749		PF_RULES_WLOCK();
1750		TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1751			TAILQ_REMOVE(&tmpq, r, entries);
1752			pf_free_rule(r);
1753		}
1754		PF_RULES_WUNLOCK();
1755	}
1756}
1757
1758void
1759pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1760{
1761	switch (af) {
1762#ifdef INET
1763	case AF_INET: {
1764		u_int32_t a = ntohl(addr->addr32[0]);
1765		printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1766		    (a>>8)&255, a&255);
1767		if (p) {
1768			p = ntohs(p);
1769			printf(":%u", p);
1770		}
1771		break;
1772	}
1773#endif /* INET */
1774#ifdef INET6
1775	case AF_INET6: {
1776		u_int16_t b;
1777		u_int8_t i, curstart, curend, maxstart, maxend;
1778		curstart = curend = maxstart = maxend = 255;
1779		for (i = 0; i < 8; i++) {
1780			if (!addr->addr16[i]) {
1781				if (curstart == 255)
1782					curstart = i;
1783				curend = i;
1784			} else {
1785				if ((curend - curstart) >
1786				    (maxend - maxstart)) {
1787					maxstart = curstart;
1788					maxend = curend;
1789				}
1790				curstart = curend = 255;
1791			}
1792		}
1793		if ((curend - curstart) >
1794		    (maxend - maxstart)) {
1795			maxstart = curstart;
1796			maxend = curend;
1797		}
1798		for (i = 0; i < 8; i++) {
1799			if (i >= maxstart && i <= maxend) {
1800				if (i == 0)
1801					printf(":");
1802				if (i == maxend)
1803					printf(":");
1804			} else {
1805				b = ntohs(addr->addr16[i]);
1806				printf("%x", b);
1807				if (i < 7)
1808					printf(":");
1809			}
1810		}
1811		if (p) {
1812			p = ntohs(p);
1813			printf("[%u]", p);
1814		}
1815		break;
1816	}
1817#endif /* INET6 */
1818	}
1819}
1820
1821void
1822pf_print_state(struct pf_state *s)
1823{
1824	pf_print_state_parts(s, NULL, NULL);
1825}
1826
1827static void
1828pf_print_state_parts(struct pf_state *s,
1829    struct pf_state_key *skwp, struct pf_state_key *sksp)
1830{
1831	struct pf_state_key *skw, *sks;
1832	u_int8_t proto, dir;
1833
1834	/* Do our best to fill these, but they're skipped if NULL */
1835	skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1836	sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1837	proto = skw ? skw->proto : (sks ? sks->proto : 0);
1838	dir = s ? s->direction : 0;
1839
1840	switch (proto) {
1841	case IPPROTO_IPV4:
1842		printf("IPv4");
1843		break;
1844	case IPPROTO_IPV6:
1845		printf("IPv6");
1846		break;
1847	case IPPROTO_TCP:
1848		printf("TCP");
1849		break;
1850	case IPPROTO_UDP:
1851		printf("UDP");
1852		break;
1853	case IPPROTO_ICMP:
1854		printf("ICMP");
1855		break;
1856	case IPPROTO_ICMPV6:
1857		printf("ICMPv6");
1858		break;
1859	default:
1860		printf("%u", skw->proto);
1861		break;
1862	}
1863	switch (dir) {
1864	case PF_IN:
1865		printf(" in");
1866		break;
1867	case PF_OUT:
1868		printf(" out");
1869		break;
1870	}
1871	if (skw) {
1872		printf(" wire: ");
1873		pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1874		printf(" ");
1875		pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1876	}
1877	if (sks) {
1878		printf(" stack: ");
1879		if (sks != skw) {
1880			pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1881			printf(" ");
1882			pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1883		} else
1884			printf("-");
1885	}
1886	if (s) {
1887		if (proto == IPPROTO_TCP) {
1888			printf(" [lo=%u high=%u win=%u modulator=%u",
1889			    s->src.seqlo, s->src.seqhi,
1890			    s->src.max_win, s->src.seqdiff);
1891			if (s->src.wscale && s->dst.wscale)
1892				printf(" wscale=%u",
1893				    s->src.wscale & PF_WSCALE_MASK);
1894			printf("]");
1895			printf(" [lo=%u high=%u win=%u modulator=%u",
1896			    s->dst.seqlo, s->dst.seqhi,
1897			    s->dst.max_win, s->dst.seqdiff);
1898			if (s->src.wscale && s->dst.wscale)
1899				printf(" wscale=%u",
1900				s->dst.wscale & PF_WSCALE_MASK);
1901			printf("]");
1902		}
1903		printf(" %u:%u", s->src.state, s->dst.state);
1904	}
1905}
1906
1907void
1908pf_print_flags(u_int8_t f)
1909{
1910	if (f)
1911		printf(" ");
1912	if (f & TH_FIN)
1913		printf("F");
1914	if (f & TH_SYN)
1915		printf("S");
1916	if (f & TH_RST)
1917		printf("R");
1918	if (f & TH_PUSH)
1919		printf("P");
1920	if (f & TH_ACK)
1921		printf("A");
1922	if (f & TH_URG)
1923		printf("U");
1924	if (f & TH_ECE)
1925		printf("E");
1926	if (f & TH_CWR)
1927		printf("W");
1928}
1929
1930#define	PF_SET_SKIP_STEPS(i)					\
1931	do {							\
1932		while (head[i] != cur) {			\
1933			head[i]->skip[i].ptr = cur;		\
1934			head[i] = TAILQ_NEXT(head[i], entries);	\
1935		}						\
1936	} while (0)
1937
1938void
1939pf_calc_skip_steps(struct pf_rulequeue *rules)
1940{
1941	struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1942	int i;
1943
1944	cur = TAILQ_FIRST(rules);
1945	prev = cur;
1946	for (i = 0; i < PF_SKIP_COUNT; ++i)
1947		head[i] = cur;
1948	while (cur != NULL) {
1949
1950		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1951			PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1952		if (cur->direction != prev->direction)
1953			PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1954		if (cur->af != prev->af)
1955			PF_SET_SKIP_STEPS(PF_SKIP_AF);
1956		if (cur->proto != prev->proto)
1957			PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1958		if (cur->src.neg != prev->src.neg ||
1959		    pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1960			PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1961		if (cur->src.port[0] != prev->src.port[0] ||
1962		    cur->src.port[1] != prev->src.port[1] ||
1963		    cur->src.port_op != prev->src.port_op)
1964			PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1965		if (cur->dst.neg != prev->dst.neg ||
1966		    pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1967			PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1968		if (cur->dst.port[0] != prev->dst.port[0] ||
1969		    cur->dst.port[1] != prev->dst.port[1] ||
1970		    cur->dst.port_op != prev->dst.port_op)
1971			PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1972
1973		prev = cur;
1974		cur = TAILQ_NEXT(cur, entries);
1975	}
1976	for (i = 0; i < PF_SKIP_COUNT; ++i)
1977		PF_SET_SKIP_STEPS(i);
1978}
1979
1980static int
1981pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1982{
1983	if (aw1->type != aw2->type)
1984		return (1);
1985	switch (aw1->type) {
1986	case PF_ADDR_ADDRMASK:
1987	case PF_ADDR_RANGE:
1988		if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
1989			return (1);
1990		if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
1991			return (1);
1992		return (0);
1993	case PF_ADDR_DYNIFTL:
1994		return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1995	case PF_ADDR_NOROUTE:
1996	case PF_ADDR_URPFFAILED:
1997		return (0);
1998	case PF_ADDR_TABLE:
1999		return (aw1->p.tbl != aw2->p.tbl);
2000	default:
2001		printf("invalid address type: %d\n", aw1->type);
2002		return (1);
2003	}
2004}
2005
2006/**
2007 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2008 * header isn't always a full checksum. In some cases (i.e. output) it's a
2009 * pseudo-header checksum, which is a partial checksum over src/dst IP
2010 * addresses, protocol number and length.
2011 *
2012 * That means we have the following cases:
2013 *  * Input or forwarding: we don't have TSO, the checksum fields are full
2014 *  	checksums, we need to update the checksum whenever we change anything.
2015 *  * Output (i.e. the checksum is a pseudo-header checksum):
2016 *  	x The field being updated is src/dst address or affects the length of
2017 *  	the packet. We need to update the pseudo-header checksum (note that this
2018 *  	checksum is not ones' complement).
2019 *  	x Some other field is being modified (e.g. src/dst port numbers): We
2020 *  	don't have to update anything.
2021 **/
2022u_int16_t
2023pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2024{
2025	u_int32_t	l;
2026
2027	if (udp && !cksum)
2028		return (0x0000);
2029	l = cksum + old - new;
2030	l = (l >> 16) + (l & 65535);
2031	l = l & 65535;
2032	if (udp && !l)
2033		return (0xFFFF);
2034	return (l);
2035}
2036
2037u_int16_t
2038pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2039        u_int16_t new, u_int8_t udp)
2040{
2041	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2042		return (cksum);
2043
2044	return (pf_cksum_fixup(cksum, old, new, udp));
2045}
2046
2047static void
2048pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2049        u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2050        sa_family_t af)
2051{
2052	struct pf_addr	ao;
2053	u_int16_t	po = *p;
2054
2055	PF_ACPY(&ao, a, af);
2056	PF_ACPY(a, an, af);
2057
2058	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2059		*pc = ~*pc;
2060
2061	*p = pn;
2062
2063	switch (af) {
2064#ifdef INET
2065	case AF_INET:
2066		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2067		    ao.addr16[0], an->addr16[0], 0),
2068		    ao.addr16[1], an->addr16[1], 0);
2069		*p = pn;
2070
2071		*pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2072		    ao.addr16[0], an->addr16[0], u),
2073		    ao.addr16[1], an->addr16[1], u);
2074
2075		*pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2076		break;
2077#endif /* INET */
2078#ifdef INET6
2079	case AF_INET6:
2080		*pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2081		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2082		    pf_cksum_fixup(pf_cksum_fixup(*pc,
2083		    ao.addr16[0], an->addr16[0], u),
2084		    ao.addr16[1], an->addr16[1], u),
2085		    ao.addr16[2], an->addr16[2], u),
2086		    ao.addr16[3], an->addr16[3], u),
2087		    ao.addr16[4], an->addr16[4], u),
2088		    ao.addr16[5], an->addr16[5], u),
2089		    ao.addr16[6], an->addr16[6], u),
2090		    ao.addr16[7], an->addr16[7], u);
2091
2092		*pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2093		break;
2094#endif /* INET6 */
2095	}
2096
2097	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2098	    CSUM_DELAY_DATA_IPV6)) {
2099		*pc = ~*pc;
2100		if (! *pc)
2101			*pc = 0xffff;
2102	}
2103}
2104
2105/* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
2106void
2107pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2108{
2109	u_int32_t	ao;
2110
2111	memcpy(&ao, a, sizeof(ao));
2112	memcpy(a, &an, sizeof(u_int32_t));
2113	*c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2114	    ao % 65536, an % 65536, u);
2115}
2116
2117void
2118pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2119{
2120	u_int32_t	ao;
2121
2122	memcpy(&ao, a, sizeof(ao));
2123	memcpy(a, &an, sizeof(u_int32_t));
2124
2125	*c = pf_proto_cksum_fixup(m,
2126	    pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2127	    ao % 65536, an % 65536, udp);
2128}
2129
2130#ifdef INET6
2131static void
2132pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2133{
2134	struct pf_addr	ao;
2135
2136	PF_ACPY(&ao, a, AF_INET6);
2137	PF_ACPY(a, an, AF_INET6);
2138
2139	*c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2140	    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2141	    pf_cksum_fixup(pf_cksum_fixup(*c,
2142	    ao.addr16[0], an->addr16[0], u),
2143	    ao.addr16[1], an->addr16[1], u),
2144	    ao.addr16[2], an->addr16[2], u),
2145	    ao.addr16[3], an->addr16[3], u),
2146	    ao.addr16[4], an->addr16[4], u),
2147	    ao.addr16[5], an->addr16[5], u),
2148	    ao.addr16[6], an->addr16[6], u),
2149	    ao.addr16[7], an->addr16[7], u);
2150}
2151#endif /* INET6 */
2152
2153static void
2154pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2155    struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2156    u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2157{
2158	struct pf_addr	oia, ooa;
2159
2160	PF_ACPY(&oia, ia, af);
2161	if (oa)
2162		PF_ACPY(&ooa, oa, af);
2163
2164	/* Change inner protocol port, fix inner protocol checksum. */
2165	if (ip != NULL) {
2166		u_int16_t	oip = *ip;
2167		u_int32_t	opc;
2168
2169		if (pc != NULL)
2170			opc = *pc;
2171		*ip = np;
2172		if (pc != NULL)
2173			*pc = pf_cksum_fixup(*pc, oip, *ip, u);
2174		*ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2175		if (pc != NULL)
2176			*ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2177	}
2178	/* Change inner ip address, fix inner ip and icmp checksums. */
2179	PF_ACPY(ia, na, af);
2180	switch (af) {
2181#ifdef INET
2182	case AF_INET: {
2183		u_int32_t	 oh2c = *h2c;
2184
2185		*h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2186		    oia.addr16[0], ia->addr16[0], 0),
2187		    oia.addr16[1], ia->addr16[1], 0);
2188		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2189		    oia.addr16[0], ia->addr16[0], 0),
2190		    oia.addr16[1], ia->addr16[1], 0);
2191		*ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2192		break;
2193	}
2194#endif /* INET */
2195#ifdef INET6
2196	case AF_INET6:
2197		*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2198		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2199		    pf_cksum_fixup(pf_cksum_fixup(*ic,
2200		    oia.addr16[0], ia->addr16[0], u),
2201		    oia.addr16[1], ia->addr16[1], u),
2202		    oia.addr16[2], ia->addr16[2], u),
2203		    oia.addr16[3], ia->addr16[3], u),
2204		    oia.addr16[4], ia->addr16[4], u),
2205		    oia.addr16[5], ia->addr16[5], u),
2206		    oia.addr16[6], ia->addr16[6], u),
2207		    oia.addr16[7], ia->addr16[7], u);
2208		break;
2209#endif /* INET6 */
2210	}
2211	/* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2212	if (oa) {
2213		PF_ACPY(oa, na, af);
2214		switch (af) {
2215#ifdef INET
2216		case AF_INET:
2217			*hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2218			    ooa.addr16[0], oa->addr16[0], 0),
2219			    ooa.addr16[1], oa->addr16[1], 0);
2220			break;
2221#endif /* INET */
2222#ifdef INET6
2223		case AF_INET6:
2224			*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2225			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2226			    pf_cksum_fixup(pf_cksum_fixup(*ic,
2227			    ooa.addr16[0], oa->addr16[0], u),
2228			    ooa.addr16[1], oa->addr16[1], u),
2229			    ooa.addr16[2], oa->addr16[2], u),
2230			    ooa.addr16[3], oa->addr16[3], u),
2231			    ooa.addr16[4], oa->addr16[4], u),
2232			    ooa.addr16[5], oa->addr16[5], u),
2233			    ooa.addr16[6], oa->addr16[6], u),
2234			    ooa.addr16[7], oa->addr16[7], u);
2235			break;
2236#endif /* INET6 */
2237		}
2238	}
2239}
2240
2241
2242/*
2243 * Need to modulate the sequence numbers in the TCP SACK option
2244 * (credits to Krzysztof Pfaff for report and patch)
2245 */
2246static int
2247pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2248    struct tcphdr *th, struct pf_state_peer *dst)
2249{
2250	int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2251	u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2252	int copyback = 0, i, olen;
2253	struct sackblk sack;
2254
2255#define	TCPOLEN_SACKLEN	(TCPOLEN_SACK + 2)
2256	if (hlen < TCPOLEN_SACKLEN ||
2257	    !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2258		return 0;
2259
2260	while (hlen >= TCPOLEN_SACKLEN) {
2261		olen = opt[1];
2262		switch (*opt) {
2263		case TCPOPT_EOL:	/* FALLTHROUGH */
2264		case TCPOPT_NOP:
2265			opt++;
2266			hlen--;
2267			break;
2268		case TCPOPT_SACK:
2269			if (olen > hlen)
2270				olen = hlen;
2271			if (olen >= TCPOLEN_SACKLEN) {
2272				for (i = 2; i + TCPOLEN_SACK <= olen;
2273				    i += TCPOLEN_SACK) {
2274					memcpy(&sack, &opt[i], sizeof(sack));
2275					pf_change_proto_a(m, &sack.start, &th->th_sum,
2276					    htonl(ntohl(sack.start) - dst->seqdiff), 0);
2277					pf_change_proto_a(m, &sack.end, &th->th_sum,
2278					    htonl(ntohl(sack.end) - dst->seqdiff), 0);
2279					memcpy(&opt[i], &sack, sizeof(sack));
2280				}
2281				copyback = 1;
2282			}
2283			/* FALLTHROUGH */
2284		default:
2285			if (olen < 2)
2286				olen = 2;
2287			hlen -= olen;
2288			opt += olen;
2289		}
2290	}
2291
2292	if (copyback)
2293		m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2294	return (copyback);
2295}
2296
2297static void
2298pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2299    const struct pf_addr *saddr, const struct pf_addr *daddr,
2300    u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2301    u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2302    u_int16_t rtag, struct ifnet *ifp)
2303{
2304	struct pf_send_entry *pfse;
2305	struct mbuf	*m;
2306	int		 len, tlen;
2307#ifdef INET
2308	struct ip	*h = NULL;
2309#endif /* INET */
2310#ifdef INET6
2311	struct ip6_hdr	*h6 = NULL;
2312#endif /* INET6 */
2313	struct tcphdr	*th;
2314	char		*opt;
2315	struct pf_mtag  *pf_mtag;
2316
2317	len = 0;
2318	th = NULL;
2319
2320	/* maximum segment size tcp option */
2321	tlen = sizeof(struct tcphdr);
2322	if (mss)
2323		tlen += 4;
2324
2325	switch (af) {
2326#ifdef INET
2327	case AF_INET:
2328		len = sizeof(struct ip) + tlen;
2329		break;
2330#endif /* INET */
2331#ifdef INET6
2332	case AF_INET6:
2333		len = sizeof(struct ip6_hdr) + tlen;
2334		break;
2335#endif /* INET6 */
2336	default:
2337		panic("%s: unsupported af %d", __func__, af);
2338	}
2339
2340	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
2341	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2342	if (pfse == NULL)
2343		return;
2344	m = m_gethdr(M_NOWAIT, MT_DATA);
2345	if (m == NULL) {
2346		free(pfse, M_PFTEMP);
2347		return;
2348	}
2349#ifdef MAC
2350	mac_netinet_firewall_send(m);
2351#endif
2352	if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2353		free(pfse, M_PFTEMP);
2354		m_freem(m);
2355		return;
2356	}
2357	if (tag)
2358		m->m_flags |= M_SKIP_FIREWALL;
2359	pf_mtag->tag = rtag;
2360
2361	if (r != NULL && r->rtableid >= 0)
2362		M_SETFIB(m, r->rtableid);
2363
2364#ifdef ALTQ
2365	if (r != NULL && r->qid) {
2366		pf_mtag->qid = r->qid;
2367
2368		/* add hints for ecn */
2369		pf_mtag->hdr = mtod(m, struct ip *);
2370	}
2371#endif /* ALTQ */
2372	m->m_data += max_linkhdr;
2373	m->m_pkthdr.len = m->m_len = len;
2374	m->m_pkthdr.rcvif = NULL;
2375	bzero(m->m_data, len);
2376	switch (af) {
2377#ifdef INET
2378	case AF_INET:
2379		h = mtod(m, struct ip *);
2380
2381		/* IP header fields included in the TCP checksum */
2382		h->ip_p = IPPROTO_TCP;
2383		h->ip_len = htons(tlen);
2384		h->ip_src.s_addr = saddr->v4.s_addr;
2385		h->ip_dst.s_addr = daddr->v4.s_addr;
2386
2387		th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2388		break;
2389#endif /* INET */
2390#ifdef INET6
2391	case AF_INET6:
2392		h6 = mtod(m, struct ip6_hdr *);
2393
2394		/* IP header fields included in the TCP checksum */
2395		h6->ip6_nxt = IPPROTO_TCP;
2396		h6->ip6_plen = htons(tlen);
2397		memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2398		memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2399
2400		th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2401		break;
2402#endif /* INET6 */
2403	}
2404
2405	/* TCP header */
2406	th->th_sport = sport;
2407	th->th_dport = dport;
2408	th->th_seq = htonl(seq);
2409	th->th_ack = htonl(ack);
2410	th->th_off = tlen >> 2;
2411	th->th_flags = flags;
2412	th->th_win = htons(win);
2413
2414	if (mss) {
2415		opt = (char *)(th + 1);
2416		opt[0] = TCPOPT_MAXSEG;
2417		opt[1] = 4;
2418		HTONS(mss);
2419		bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2420	}
2421
2422	switch (af) {
2423#ifdef INET
2424	case AF_INET:
2425		/* TCP checksum */
2426		th->th_sum = in_cksum(m, len);
2427
2428		/* Finish the IP header */
2429		h->ip_v = 4;
2430		h->ip_hl = sizeof(*h) >> 2;
2431		h->ip_tos = IPTOS_LOWDELAY;
2432		h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2433		h->ip_len = htons(len);
2434		h->ip_ttl = ttl ? ttl : V_ip_defttl;
2435		h->ip_sum = 0;
2436
2437		pfse->pfse_type = PFSE_IP;
2438		break;
2439#endif /* INET */
2440#ifdef INET6
2441	case AF_INET6:
2442		/* TCP checksum */
2443		th->th_sum = in6_cksum(m, IPPROTO_TCP,
2444		    sizeof(struct ip6_hdr), tlen);
2445
2446		h6->ip6_vfc |= IPV6_VERSION;
2447		h6->ip6_hlim = IPV6_DEFHLIM;
2448
2449		pfse->pfse_type = PFSE_IP6;
2450		break;
2451#endif /* INET6 */
2452	}
2453	pfse->pfse_m = m;
2454	pf_send(pfse);
2455}
2456
2457static void
2458pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2459    struct pf_rule *r)
2460{
2461	struct pf_send_entry *pfse;
2462	struct mbuf *m0;
2463	struct pf_mtag *pf_mtag;
2464
2465	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
2466	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2467	if (pfse == NULL)
2468		return;
2469
2470	if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2471		free(pfse, M_PFTEMP);
2472		return;
2473	}
2474
2475	if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2476		free(pfse, M_PFTEMP);
2477		return;
2478	}
2479	/* XXX: revisit */
2480	m0->m_flags |= M_SKIP_FIREWALL;
2481
2482	if (r->rtableid >= 0)
2483		M_SETFIB(m0, r->rtableid);
2484
2485#ifdef ALTQ
2486	if (r->qid) {
2487		pf_mtag->qid = r->qid;
2488		/* add hints for ecn */
2489		pf_mtag->hdr = mtod(m0, struct ip *);
2490	}
2491#endif /* ALTQ */
2492
2493	switch (af) {
2494#ifdef INET
2495	case AF_INET:
2496		pfse->pfse_type = PFSE_ICMP;
2497		break;
2498#endif /* INET */
2499#ifdef INET6
2500	case AF_INET6:
2501		pfse->pfse_type = PFSE_ICMP6;
2502		break;
2503#endif /* INET6 */
2504	}
2505	pfse->pfse_m = m0;
2506	pfse->pfse_icmp_type = type;
2507	pfse->pfse_icmp_code = code;
2508	pf_send(pfse);
2509}
2510
2511/*
2512 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2513 * If n is 0, they match if they are equal. If n is != 0, they match if they
2514 * are different.
2515 */
2516int
2517pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2518    struct pf_addr *b, sa_family_t af)
2519{
2520	int	match = 0;
2521
2522	switch (af) {
2523#ifdef INET
2524	case AF_INET:
2525		if ((a->addr32[0] & m->addr32[0]) ==
2526		    (b->addr32[0] & m->addr32[0]))
2527			match++;
2528		break;
2529#endif /* INET */
2530#ifdef INET6
2531	case AF_INET6:
2532		if (((a->addr32[0] & m->addr32[0]) ==
2533		     (b->addr32[0] & m->addr32[0])) &&
2534		    ((a->addr32[1] & m->addr32[1]) ==
2535		     (b->addr32[1] & m->addr32[1])) &&
2536		    ((a->addr32[2] & m->addr32[2]) ==
2537		     (b->addr32[2] & m->addr32[2])) &&
2538		    ((a->addr32[3] & m->addr32[3]) ==
2539		     (b->addr32[3] & m->addr32[3])))
2540			match++;
2541		break;
2542#endif /* INET6 */
2543	}
2544	if (match) {
2545		if (n)
2546			return (0);
2547		else
2548			return (1);
2549	} else {
2550		if (n)
2551			return (1);
2552		else
2553			return (0);
2554	}
2555}
2556
2557/*
2558 * Return 1 if b <= a <= e, otherwise return 0.
2559 */
2560int
2561pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2562    struct pf_addr *a, sa_family_t af)
2563{
2564	switch (af) {
2565#ifdef INET
2566	case AF_INET:
2567		if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
2568		    (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
2569			return (0);
2570		break;
2571#endif /* INET */
2572#ifdef INET6
2573	case AF_INET6: {
2574		int	i;
2575
2576		/* check a >= b */
2577		for (i = 0; i < 4; ++i)
2578			if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
2579				break;
2580			else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
2581				return (0);
2582		/* check a <= e */
2583		for (i = 0; i < 4; ++i)
2584			if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
2585				break;
2586			else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
2587				return (0);
2588		break;
2589	}
2590#endif /* INET6 */
2591	}
2592	return (1);
2593}
2594
2595static int
2596pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2597{
2598	switch (op) {
2599	case PF_OP_IRG:
2600		return ((p > a1) && (p < a2));
2601	case PF_OP_XRG:
2602		return ((p < a1) || (p > a2));
2603	case PF_OP_RRG:
2604		return ((p >= a1) && (p <= a2));
2605	case PF_OP_EQ:
2606		return (p == a1);
2607	case PF_OP_NE:
2608		return (p != a1);
2609	case PF_OP_LT:
2610		return (p < a1);
2611	case PF_OP_LE:
2612		return (p <= a1);
2613	case PF_OP_GT:
2614		return (p > a1);
2615	case PF_OP_GE:
2616		return (p >= a1);
2617	}
2618	return (0); /* never reached */
2619}
2620
2621int
2622pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2623{
2624	NTOHS(a1);
2625	NTOHS(a2);
2626	NTOHS(p);
2627	return (pf_match(op, a1, a2, p));
2628}
2629
2630static int
2631pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2632{
2633	if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2634		return (0);
2635	return (pf_match(op, a1, a2, u));
2636}
2637
2638static int
2639pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2640{
2641	if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2642		return (0);
2643	return (pf_match(op, a1, a2, g));
2644}
2645
2646int
2647pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2648{
2649	if (*tag == -1)
2650		*tag = mtag;
2651
2652	return ((!r->match_tag_not && r->match_tag == *tag) ||
2653	    (r->match_tag_not && r->match_tag != *tag));
2654}
2655
2656int
2657pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2658{
2659
2660	KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2661
2662	if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2663		return (ENOMEM);
2664
2665	pd->pf_mtag->tag = tag;
2666
2667	return (0);
2668}
2669
2670#define	PF_ANCHOR_STACKSIZE	32
2671struct pf_anchor_stackframe {
2672	struct pf_ruleset	*rs;
2673	struct pf_rule		*r;	/* XXX: + match bit */
2674	struct pf_anchor	*child;
2675};
2676
2677/*
2678 * XXX: We rely on malloc(9) returning pointer aligned addresses.
2679 */
2680#define	PF_ANCHORSTACK_MATCH	0x00000001
2681#define	PF_ANCHORSTACK_MASK	(PF_ANCHORSTACK_MATCH)
2682
2683#define	PF_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2684#define	PF_ANCHOR_RULE(f)	(struct pf_rule *)			\
2685				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2686#define	PF_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 			\
2687				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
2688} while (0)
2689
2690void
2691pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2692    struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2693    int *match)
2694{
2695	struct pf_anchor_stackframe	*f;
2696
2697	PF_RULES_RASSERT();
2698
2699	if (match)
2700		*match = 0;
2701	if (*depth >= PF_ANCHOR_STACKSIZE) {
2702		printf("%s: anchor stack overflow on %s\n",
2703		    __func__, (*r)->anchor->name);
2704		*r = TAILQ_NEXT(*r, entries);
2705		return;
2706	} else if (*depth == 0 && a != NULL)
2707		*a = *r;
2708	f = stack + (*depth)++;
2709	f->rs = *rs;
2710	f->r = *r;
2711	if ((*r)->anchor_wildcard) {
2712		struct pf_anchor_node *parent = &(*r)->anchor->children;
2713
2714		if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2715			*r = NULL;
2716			return;
2717		}
2718		*rs = &f->child->ruleset;
2719	} else {
2720		f->child = NULL;
2721		*rs = &(*r)->anchor->ruleset;
2722	}
2723	*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2724}
2725
2726int
2727pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2728    struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2729    int *match)
2730{
2731	struct pf_anchor_stackframe	*f;
2732	struct pf_rule *fr;
2733	int quick = 0;
2734
2735	PF_RULES_RASSERT();
2736
2737	do {
2738		if (*depth <= 0)
2739			break;
2740		f = stack + *depth - 1;
2741		fr = PF_ANCHOR_RULE(f);
2742		if (f->child != NULL) {
2743			struct pf_anchor_node *parent;
2744
2745			/*
2746			 * This block traverses through
2747			 * a wildcard anchor.
2748			 */
2749			parent = &fr->anchor->children;
2750			if (match != NULL && *match) {
2751				/*
2752				 * If any of "*" matched, then
2753				 * "foo/ *" matched, mark frame
2754				 * appropriately.
2755				 */
2756				PF_ANCHOR_SET_MATCH(f);
2757				*match = 0;
2758			}
2759			f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2760			if (f->child != NULL) {
2761				*rs = &f->child->ruleset;
2762				*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2763				if (*r == NULL)
2764					continue;
2765				else
2766					break;
2767			}
2768		}
2769		(*depth)--;
2770		if (*depth == 0 && a != NULL)
2771			*a = NULL;
2772		*rs = f->rs;
2773		if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2774			quick = fr->quick;
2775		*r = TAILQ_NEXT(fr, entries);
2776	} while (*r == NULL);
2777
2778	return (quick);
2779}
2780
2781#ifdef INET6
2782void
2783pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2784    struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2785{
2786	switch (af) {
2787#ifdef INET
2788	case AF_INET:
2789		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2790		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2791		break;
2792#endif /* INET */
2793	case AF_INET6:
2794		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2795		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2796		naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2797		((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2798		naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2799		((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2800		naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2801		((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2802		break;
2803	}
2804}
2805
2806void
2807pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2808{
2809	switch (af) {
2810#ifdef INET
2811	case AF_INET:
2812		addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2813		break;
2814#endif /* INET */
2815	case AF_INET6:
2816		if (addr->addr32[3] == 0xffffffff) {
2817			addr->addr32[3] = 0;
2818			if (addr->addr32[2] == 0xffffffff) {
2819				addr->addr32[2] = 0;
2820				if (addr->addr32[1] == 0xffffffff) {
2821					addr->addr32[1] = 0;
2822					addr->addr32[0] =
2823					    htonl(ntohl(addr->addr32[0]) + 1);
2824				} else
2825					addr->addr32[1] =
2826					    htonl(ntohl(addr->addr32[1]) + 1);
2827			} else
2828				addr->addr32[2] =
2829				    htonl(ntohl(addr->addr32[2]) + 1);
2830		} else
2831			addr->addr32[3] =
2832			    htonl(ntohl(addr->addr32[3]) + 1);
2833		break;
2834	}
2835}
2836#endif /* INET6 */
2837
2838int
2839pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2840{
2841	struct pf_addr		*saddr, *daddr;
2842	u_int16_t		 sport, dport;
2843	struct inpcbinfo	*pi;
2844	struct inpcb		*inp;
2845
2846	pd->lookup.uid = UID_MAX;
2847	pd->lookup.gid = GID_MAX;
2848
2849	switch (pd->proto) {
2850	case IPPROTO_TCP:
2851		if (pd->hdr.tcp == NULL)
2852			return (-1);
2853		sport = pd->hdr.tcp->th_sport;
2854		dport = pd->hdr.tcp->th_dport;
2855		pi = &V_tcbinfo;
2856		break;
2857	case IPPROTO_UDP:
2858		if (pd->hdr.udp == NULL)
2859			return (-1);
2860		sport = pd->hdr.udp->uh_sport;
2861		dport = pd->hdr.udp->uh_dport;
2862		pi = &V_udbinfo;
2863		break;
2864	default:
2865		return (-1);
2866	}
2867	if (direction == PF_IN) {
2868		saddr = pd->src;
2869		daddr = pd->dst;
2870	} else {
2871		u_int16_t	p;
2872
2873		p = sport;
2874		sport = dport;
2875		dport = p;
2876		saddr = pd->dst;
2877		daddr = pd->src;
2878	}
2879	switch (pd->af) {
2880#ifdef INET
2881	case AF_INET:
2882		inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2883		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
2884		if (inp == NULL) {
2885			inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2886			   daddr->v4, dport, INPLOOKUP_WILDCARD |
2887			   INPLOOKUP_RLOCKPCB, NULL, m);
2888			if (inp == NULL)
2889				return (-1);
2890		}
2891		break;
2892#endif /* INET */
2893#ifdef INET6
2894	case AF_INET6:
2895		inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2896		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
2897		if (inp == NULL) {
2898			inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2899			    &daddr->v6, dport, INPLOOKUP_WILDCARD |
2900			    INPLOOKUP_RLOCKPCB, NULL, m);
2901			if (inp == NULL)
2902				return (-1);
2903		}
2904		break;
2905#endif /* INET6 */
2906
2907	default:
2908		return (-1);
2909	}
2910	INP_RLOCK_ASSERT(inp);
2911	pd->lookup.uid = inp->inp_cred->cr_uid;
2912	pd->lookup.gid = inp->inp_cred->cr_groups[0];
2913	INP_RUNLOCK(inp);
2914
2915	return (1);
2916}
2917
2918static u_int8_t
2919pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2920{
2921	int		 hlen;
2922	u_int8_t	 hdr[60];
2923	u_int8_t	*opt, optlen;
2924	u_int8_t	 wscale = 0;
2925
2926	hlen = th_off << 2;		/* hlen <= sizeof(hdr) */
2927	if (hlen <= sizeof(struct tcphdr))
2928		return (0);
2929	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2930		return (0);
2931	opt = hdr + sizeof(struct tcphdr);
2932	hlen -= sizeof(struct tcphdr);
2933	while (hlen >= 3) {
2934		switch (*opt) {
2935		case TCPOPT_EOL:
2936		case TCPOPT_NOP:
2937			++opt;
2938			--hlen;
2939			break;
2940		case TCPOPT_WINDOW:
2941			wscale = opt[2];
2942			if (wscale > TCP_MAX_WINSHIFT)
2943				wscale = TCP_MAX_WINSHIFT;
2944			wscale |= PF_WSCALE_FLAG;
2945			/* FALLTHROUGH */
2946		default:
2947			optlen = opt[1];
2948			if (optlen < 2)
2949				optlen = 2;
2950			hlen -= optlen;
2951			opt += optlen;
2952			break;
2953		}
2954	}
2955	return (wscale);
2956}
2957
2958static u_int16_t
2959pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2960{
2961	int		 hlen;
2962	u_int8_t	 hdr[60];
2963	u_int8_t	*opt, optlen;
2964	u_int16_t	 mss = V_tcp_mssdflt;
2965
2966	hlen = th_off << 2;	/* hlen <= sizeof(hdr) */
2967	if (hlen <= sizeof(struct tcphdr))
2968		return (0);
2969	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2970		return (0);
2971	opt = hdr + sizeof(struct tcphdr);
2972	hlen -= sizeof(struct tcphdr);
2973	while (hlen >= TCPOLEN_MAXSEG) {
2974		switch (*opt) {
2975		case TCPOPT_EOL:
2976		case TCPOPT_NOP:
2977			++opt;
2978			--hlen;
2979			break;
2980		case TCPOPT_MAXSEG:
2981			bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2982			NTOHS(mss);
2983			/* FALLTHROUGH */
2984		default:
2985			optlen = opt[1];
2986			if (optlen < 2)
2987				optlen = 2;
2988			hlen -= optlen;
2989			opt += optlen;
2990			break;
2991		}
2992	}
2993	return (mss);
2994}
2995
2996static u_int16_t
2997pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2998{
2999#ifdef INET
3000	struct sockaddr_in	*dst;
3001	struct route		 ro;
3002#endif /* INET */
3003#ifdef INET6
3004	struct sockaddr_in6	*dst6;
3005	struct route_in6	 ro6;
3006#endif /* INET6 */
3007	struct rtentry		*rt = NULL;
3008	int			 hlen = 0;
3009	u_int16_t		 mss = V_tcp_mssdflt;
3010
3011	switch (af) {
3012#ifdef INET
3013	case AF_INET:
3014		hlen = sizeof(struct ip);
3015		bzero(&ro, sizeof(ro));
3016		dst = (struct sockaddr_in *)&ro.ro_dst;
3017		dst->sin_family = AF_INET;
3018		dst->sin_len = sizeof(*dst);
3019		dst->sin_addr = addr->v4;
3020		in_rtalloc_ign(&ro, 0, rtableid);
3021		rt = ro.ro_rt;
3022		break;
3023#endif /* INET */
3024#ifdef INET6
3025	case AF_INET6:
3026		hlen = sizeof(struct ip6_hdr);
3027		bzero(&ro6, sizeof(ro6));
3028		dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
3029		dst6->sin6_family = AF_INET6;
3030		dst6->sin6_len = sizeof(*dst6);
3031		dst6->sin6_addr = addr->v6;
3032		in6_rtalloc_ign(&ro6, 0, rtableid);
3033		rt = ro6.ro_rt;
3034		break;
3035#endif /* INET6 */
3036	}
3037
3038	if (rt && rt->rt_ifp) {
3039		mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
3040		mss = max(V_tcp_mssdflt, mss);
3041		RTFREE(rt);
3042	}
3043	mss = min(mss, offer);
3044	mss = max(mss, 64);		/* sanity - at least max opt space */
3045	return (mss);
3046}
3047
3048static u_int32_t
3049pf_tcp_iss(struct pf_pdesc *pd)
3050{
3051	MD5_CTX ctx;
3052	u_int32_t digest[4];
3053
3054	if (V_pf_tcp_secret_init == 0) {
3055		read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3056		MD5Init(&V_pf_tcp_secret_ctx);
3057		MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3058		    sizeof(V_pf_tcp_secret));
3059		V_pf_tcp_secret_init = 1;
3060	}
3061
3062	ctx = V_pf_tcp_secret_ctx;
3063
3064	MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
3065	MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
3066	if (pd->af == AF_INET6) {
3067		MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3068		MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3069	} else {
3070		MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3071		MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3072	}
3073	MD5Final((u_char *)digest, &ctx);
3074	V_pf_tcp_iss_off += 4096;
3075#define	ISN_RANDOM_INCREMENT (4096 - 1)
3076	return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3077	    V_pf_tcp_iss_off);
3078#undef	ISN_RANDOM_INCREMENT
3079}
3080
3081static int
3082pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
3083    struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
3084    struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
3085{
3086	struct pf_rule		*nr = NULL;
3087	struct pf_addr		* const saddr = pd->src;
3088	struct pf_addr		* const daddr = pd->dst;
3089	sa_family_t		 af = pd->af;
3090	struct pf_rule		*r, *a = NULL;
3091	struct pf_ruleset	*ruleset = NULL;
3092	struct pf_src_node	*nsn = NULL;
3093	struct tcphdr		*th = pd->hdr.tcp;
3094	struct pf_state_key	*sk = NULL, *nk = NULL;
3095	u_short			 reason;
3096	int			 rewrite = 0, hdrlen = 0;
3097	int			 tag = -1, rtableid = -1;
3098	int			 asd = 0;
3099	int			 match = 0;
3100	int			 state_icmp = 0;
3101	u_int16_t		 sport = 0, dport = 0;
3102	u_int16_t		 bproto_sum = 0, bip_sum = 0;
3103	u_int8_t		 icmptype = 0, icmpcode = 0;
3104	struct pf_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
3105
3106	PF_RULES_RASSERT();
3107
3108	if (inp != NULL) {
3109		INP_LOCK_ASSERT(inp);
3110		pd->lookup.uid = inp->inp_cred->cr_uid;
3111		pd->lookup.gid = inp->inp_cred->cr_groups[0];
3112		pd->lookup.done = 1;
3113	}
3114
3115	switch (pd->proto) {
3116	case IPPROTO_TCP:
3117		sport = th->th_sport;
3118		dport = th->th_dport;
3119		hdrlen = sizeof(*th);
3120		break;
3121	case IPPROTO_UDP:
3122		sport = pd->hdr.udp->uh_sport;
3123		dport = pd->hdr.udp->uh_dport;
3124		hdrlen = sizeof(*pd->hdr.udp);
3125		break;
3126#ifdef INET
3127	case IPPROTO_ICMP:
3128		if (pd->af != AF_INET)
3129			break;
3130		sport = dport = pd->hdr.icmp->icmp_id;
3131		hdrlen = sizeof(*pd->hdr.icmp);
3132		icmptype = pd->hdr.icmp->icmp_type;
3133		icmpcode = pd->hdr.icmp->icmp_code;
3134
3135		if (icmptype == ICMP_UNREACH ||
3136		    icmptype == ICMP_SOURCEQUENCH ||
3137		    icmptype == ICMP_REDIRECT ||
3138		    icmptype == ICMP_TIMXCEED ||
3139		    icmptype == ICMP_PARAMPROB)
3140			state_icmp++;
3141		break;
3142#endif /* INET */
3143#ifdef INET6
3144	case IPPROTO_ICMPV6:
3145		if (af != AF_INET6)
3146			break;
3147		sport = dport = pd->hdr.icmp6->icmp6_id;
3148		hdrlen = sizeof(*pd->hdr.icmp6);
3149		icmptype = pd->hdr.icmp6->icmp6_type;
3150		icmpcode = pd->hdr.icmp6->icmp6_code;
3151
3152		if (icmptype == ICMP6_DST_UNREACH ||
3153		    icmptype == ICMP6_PACKET_TOO_BIG ||
3154		    icmptype == ICMP6_TIME_EXCEEDED ||
3155		    icmptype == ICMP6_PARAM_PROB)
3156			state_icmp++;
3157		break;
3158#endif /* INET6 */
3159	default:
3160		sport = dport = hdrlen = 0;
3161		break;
3162	}
3163
3164	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3165
3166	/* check packet for BINAT/NAT/RDR */
3167	if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3168	    &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3169		KASSERT(sk != NULL, ("%s: null sk", __func__));
3170		KASSERT(nk != NULL, ("%s: null nk", __func__));
3171
3172		if (pd->ip_sum)
3173			bip_sum = *pd->ip_sum;
3174
3175		switch (pd->proto) {
3176		case IPPROTO_TCP:
3177			bproto_sum = th->th_sum;
3178			pd->proto_sum = &th->th_sum;
3179
3180			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3181			    nk->port[pd->sidx] != sport) {
3182				pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
3183				    &th->th_sum, &nk->addr[pd->sidx],
3184				    nk->port[pd->sidx], 0, af);
3185				pd->sport = &th->th_sport;
3186				sport = th->th_sport;
3187			}
3188
3189			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3190			    nk->port[pd->didx] != dport) {
3191				pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
3192				    &th->th_sum, &nk->addr[pd->didx],
3193				    nk->port[pd->didx], 0, af);
3194				dport = th->th_dport;
3195				pd->dport = &th->th_dport;
3196			}
3197			rewrite++;
3198			break;
3199		case IPPROTO_UDP:
3200			bproto_sum = pd->hdr.udp->uh_sum;
3201			pd->proto_sum = &pd->hdr.udp->uh_sum;
3202
3203			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3204			    nk->port[pd->sidx] != sport) {
3205				pf_change_ap(m, saddr, &pd->hdr.udp->uh_sport,
3206				    pd->ip_sum, &pd->hdr.udp->uh_sum,
3207				    &nk->addr[pd->sidx],
3208				    nk->port[pd->sidx], 1, af);
3209				sport = pd->hdr.udp->uh_sport;
3210				pd->sport = &pd->hdr.udp->uh_sport;
3211			}
3212
3213			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3214			    nk->port[pd->didx] != dport) {
3215				pf_change_ap(m, daddr, &pd->hdr.udp->uh_dport,
3216				    pd->ip_sum, &pd->hdr.udp->uh_sum,
3217				    &nk->addr[pd->didx],
3218				    nk->port[pd->didx], 1, af);
3219				dport = pd->hdr.udp->uh_dport;
3220				pd->dport = &pd->hdr.udp->uh_dport;
3221			}
3222			rewrite++;
3223			break;
3224#ifdef INET
3225		case IPPROTO_ICMP:
3226			nk->port[0] = nk->port[1];
3227			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3228				pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3229				    nk->addr[pd->sidx].v4.s_addr, 0);
3230
3231			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3232				pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3233				    nk->addr[pd->didx].v4.s_addr, 0);
3234
3235			if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3236				pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3237				    pd->hdr.icmp->icmp_cksum, sport,
3238				    nk->port[1], 0);
3239				pd->hdr.icmp->icmp_id = nk->port[1];
3240				pd->sport = &pd->hdr.icmp->icmp_id;
3241			}
3242			m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3243			break;
3244#endif /* INET */
3245#ifdef INET6
3246		case IPPROTO_ICMPV6:
3247			nk->port[0] = nk->port[1];
3248			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3249				pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3250				    &nk->addr[pd->sidx], 0);
3251
3252			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3253				pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3254				    &nk->addr[pd->didx], 0);
3255			rewrite++;
3256			break;
3257#endif /* INET */
3258		default:
3259			switch (af) {
3260#ifdef INET
3261			case AF_INET:
3262				if (PF_ANEQ(saddr,
3263				    &nk->addr[pd->sidx], AF_INET))
3264					pf_change_a(&saddr->v4.s_addr,
3265					    pd->ip_sum,
3266					    nk->addr[pd->sidx].v4.s_addr, 0);
3267
3268				if (PF_ANEQ(daddr,
3269				    &nk->addr[pd->didx], AF_INET))
3270					pf_change_a(&daddr->v4.s_addr,
3271					    pd->ip_sum,
3272					    nk->addr[pd->didx].v4.s_addr, 0);
3273				break;
3274#endif /* INET */
3275#ifdef INET6
3276			case AF_INET6:
3277				if (PF_ANEQ(saddr,
3278				    &nk->addr[pd->sidx], AF_INET6))
3279					PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3280
3281				if (PF_ANEQ(daddr,
3282				    &nk->addr[pd->didx], AF_INET6))
3283					PF_ACPY(saddr, &nk->addr[pd->didx], af);
3284				break;
3285#endif /* INET */
3286			}
3287			break;
3288		}
3289		if (nr->natpass)
3290			r = NULL;
3291		pd->nat_rule = nr;
3292	}
3293
3294	while (r != NULL) {
3295		r->evaluations++;
3296		if (pfi_kif_match(r->kif, kif) == r->ifnot)
3297			r = r->skip[PF_SKIP_IFP].ptr;
3298		else if (r->direction && r->direction != direction)
3299			r = r->skip[PF_SKIP_DIR].ptr;
3300		else if (r->af && r->af != af)
3301			r = r->skip[PF_SKIP_AF].ptr;
3302		else if (r->proto && r->proto != pd->proto)
3303			r = r->skip[PF_SKIP_PROTO].ptr;
3304		else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3305		    r->src.neg, kif, M_GETFIB(m)))
3306			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3307		/* tcp/udp only. port_op always 0 in other cases */
3308		else if (r->src.port_op && !pf_match_port(r->src.port_op,
3309		    r->src.port[0], r->src.port[1], sport))
3310			r = r->skip[PF_SKIP_SRC_PORT].ptr;
3311		else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3312		    r->dst.neg, NULL, M_GETFIB(m)))
3313			r = r->skip[PF_SKIP_DST_ADDR].ptr;
3314		/* tcp/udp only. port_op always 0 in other cases */
3315		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3316		    r->dst.port[0], r->dst.port[1], dport))
3317			r = r->skip[PF_SKIP_DST_PORT].ptr;
3318		/* icmp only. type always 0 in other cases */
3319		else if (r->type && r->type != icmptype + 1)
3320			r = TAILQ_NEXT(r, entries);
3321		/* icmp only. type always 0 in other cases */
3322		else if (r->code && r->code != icmpcode + 1)
3323			r = TAILQ_NEXT(r, entries);
3324		else if (r->tos && !(r->tos == pd->tos))
3325			r = TAILQ_NEXT(r, entries);
3326		else if (r->rule_flag & PFRULE_FRAGMENT)
3327			r = TAILQ_NEXT(r, entries);
3328		else if (pd->proto == IPPROTO_TCP &&
3329		    (r->flagset & th->th_flags) != r->flags)
3330			r = TAILQ_NEXT(r, entries);
3331		/* tcp/udp only. uid.op always 0 in other cases */
3332		else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3333		    pf_socket_lookup(direction, pd, m), 1)) &&
3334		    !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3335		    pd->lookup.uid))
3336			r = TAILQ_NEXT(r, entries);
3337		/* tcp/udp only. gid.op always 0 in other cases */
3338		else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3339		    pf_socket_lookup(direction, pd, m), 1)) &&
3340		    !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3341		    pd->lookup.gid))
3342			r = TAILQ_NEXT(r, entries);
3343		else if (r->prob &&
3344		    r->prob <= arc4random())
3345			r = TAILQ_NEXT(r, entries);
3346		else if (r->match_tag && !pf_match_tag(m, r, &tag,
3347		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
3348			r = TAILQ_NEXT(r, entries);
3349		else if (r->os_fingerprint != PF_OSFP_ANY &&
3350		    (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3351		    pf_osfp_fingerprint(pd, m, off, th),
3352		    r->os_fingerprint)))
3353			r = TAILQ_NEXT(r, entries);
3354		else {
3355			if (r->tag)
3356				tag = r->tag;
3357			if (r->rtableid >= 0)
3358				rtableid = r->rtableid;
3359			if (r->anchor == NULL) {
3360				match = 1;
3361				*rm = r;
3362				*am = a;
3363				*rsm = ruleset;
3364				if ((*rm)->quick)
3365					break;
3366				r = TAILQ_NEXT(r, entries);
3367			} else
3368				pf_step_into_anchor(anchor_stack, &asd,
3369				    &ruleset, PF_RULESET_FILTER, &r, &a,
3370				    &match);
3371		}
3372		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3373		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3374			break;
3375	}
3376	r = *rm;
3377	a = *am;
3378	ruleset = *rsm;
3379
3380	REASON_SET(&reason, PFRES_MATCH);
3381
3382	if (r->log || (nr != NULL && nr->log)) {
3383		if (rewrite)
3384			m_copyback(m, off, hdrlen, pd->hdr.any);
3385		PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3386		    ruleset, pd, 1);
3387	}
3388
3389	if ((r->action == PF_DROP) &&
3390	    ((r->rule_flag & PFRULE_RETURNRST) ||
3391	    (r->rule_flag & PFRULE_RETURNICMP) ||
3392	    (r->rule_flag & PFRULE_RETURN))) {
3393		/* undo NAT changes, if they have taken place */
3394		if (nr != NULL) {
3395			PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3396			PF_ACPY(daddr, &sk->addr[pd->didx], af);
3397			if (pd->sport)
3398				*pd->sport = sk->port[pd->sidx];
3399			if (pd->dport)
3400				*pd->dport = sk->port[pd->didx];
3401			if (pd->proto_sum)
3402				*pd->proto_sum = bproto_sum;
3403			if (pd->ip_sum)
3404				*pd->ip_sum = bip_sum;
3405			m_copyback(m, off, hdrlen, pd->hdr.any);
3406		}
3407		if (pd->proto == IPPROTO_TCP &&
3408		    ((r->rule_flag & PFRULE_RETURNRST) ||
3409		    (r->rule_flag & PFRULE_RETURN)) &&
3410		    !(th->th_flags & TH_RST)) {
3411			u_int32_t	 ack = ntohl(th->th_seq) + pd->p_len;
3412			int		 len = 0;
3413#ifdef INET
3414			struct ip	*h4;
3415#endif
3416#ifdef INET6
3417			struct ip6_hdr	*h6;
3418#endif
3419
3420			switch (af) {
3421#ifdef INET
3422			case AF_INET:
3423				h4 = mtod(m, struct ip *);
3424				len = ntohs(h4->ip_len) - off;
3425				break;
3426#endif
3427#ifdef INET6
3428			case AF_INET6:
3429				h6 = mtod(m, struct ip6_hdr *);
3430				len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3431				break;
3432#endif
3433			}
3434
3435			if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3436				REASON_SET(&reason, PFRES_PROTCKSUM);
3437			else {
3438				if (th->th_flags & TH_SYN)
3439					ack++;
3440				if (th->th_flags & TH_FIN)
3441					ack++;
3442				pf_send_tcp(m, r, af, pd->dst,
3443				    pd->src, th->th_dport, th->th_sport,
3444				    ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3445				    r->return_ttl, 1, 0, kif->pfik_ifp);
3446			}
3447		} else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3448		    r->return_icmp)
3449			pf_send_icmp(m, r->return_icmp >> 8,
3450			    r->return_icmp & 255, af, r);
3451		else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3452		    r->return_icmp6)
3453			pf_send_icmp(m, r->return_icmp6 >> 8,
3454			    r->return_icmp6 & 255, af, r);
3455	}
3456
3457	if (r->action == PF_DROP)
3458		goto cleanup;
3459
3460	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3461		REASON_SET(&reason, PFRES_MEMORY);
3462		goto cleanup;
3463	}
3464	if (rtableid >= 0)
3465		M_SETFIB(m, rtableid);
3466
3467	if (!state_icmp && (r->keep_state || nr != NULL ||
3468	    (pd->flags & PFDESC_TCP_NORM))) {
3469		int action;
3470		action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3471		    sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3472		    hdrlen);
3473		if (action != PF_PASS)
3474			return (action);
3475	} else {
3476		if (sk != NULL)
3477			uma_zfree(V_pf_state_key_z, sk);
3478		if (nk != NULL)
3479			uma_zfree(V_pf_state_key_z, nk);
3480	}
3481
3482	/* copy back packet headers if we performed NAT operations */
3483	if (rewrite)
3484		m_copyback(m, off, hdrlen, pd->hdr.any);
3485
3486	if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3487	    direction == PF_OUT &&
3488	    pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3489		/*
3490		 * We want the state created, but we dont
3491		 * want to send this in case a partner
3492		 * firewall has to know about it to allow
3493		 * replies through it.
3494		 */
3495		return (PF_DEFER);
3496
3497	return (PF_PASS);
3498
3499cleanup:
3500	if (sk != NULL)
3501		uma_zfree(V_pf_state_key_z, sk);
3502	if (nk != NULL)
3503		uma_zfree(V_pf_state_key_z, nk);
3504	return (PF_DROP);
3505}
3506
3507static int
3508pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3509    struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3510    struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3511    u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3512    int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3513{
3514	struct pf_state		*s = NULL;
3515	struct pf_src_node	*sn = NULL;
3516	struct tcphdr		*th = pd->hdr.tcp;
3517	u_int16_t		 mss = V_tcp_mssdflt;
3518	u_short			 reason;
3519
3520	/* check maximums */
3521	if (r->max_states &&
3522	    (counter_u64_fetch(r->states_cur) >= r->max_states)) {
3523		counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
3524		REASON_SET(&reason, PFRES_MAXSTATES);
3525		goto csfailed;
3526	}
3527	/* src node for filter rule */
3528	if ((r->rule_flag & PFRULE_SRCTRACK ||
3529	    r->rpool.opts & PF_POOL_STICKYADDR) &&
3530	    pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3531		REASON_SET(&reason, PFRES_SRCLIMIT);
3532		goto csfailed;
3533	}
3534	/* src node for translation rule */
3535	if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3536	    pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3537		REASON_SET(&reason, PFRES_SRCLIMIT);
3538		goto csfailed;
3539	}
3540	s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3541	if (s == NULL) {
3542		REASON_SET(&reason, PFRES_MEMORY);
3543		goto csfailed;
3544	}
3545	s->rule.ptr = r;
3546	s->nat_rule.ptr = nr;
3547	s->anchor.ptr = a;
3548	STATE_INC_COUNTERS(s);
3549	if (r->allow_opts)
3550		s->state_flags |= PFSTATE_ALLOWOPTS;
3551	if (r->rule_flag & PFRULE_STATESLOPPY)
3552		s->state_flags |= PFSTATE_SLOPPY;
3553	s->log = r->log & PF_LOG_ALL;
3554	s->sync_state = PFSYNC_S_NONE;
3555	if (nr != NULL)
3556		s->log |= nr->log & PF_LOG_ALL;
3557	switch (pd->proto) {
3558	case IPPROTO_TCP:
3559		s->src.seqlo = ntohl(th->th_seq);
3560		s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3561		if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3562		    r->keep_state == PF_STATE_MODULATE) {
3563			/* Generate sequence number modulator */
3564			if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3565			    0)
3566				s->src.seqdiff = 1;
3567			pf_change_proto_a(m, &th->th_seq, &th->th_sum,
3568			    htonl(s->src.seqlo + s->src.seqdiff), 0);
3569			*rewrite = 1;
3570		} else
3571			s->src.seqdiff = 0;
3572		if (th->th_flags & TH_SYN) {
3573			s->src.seqhi++;
3574			s->src.wscale = pf_get_wscale(m, off,
3575			    th->th_off, pd->af);
3576		}
3577		s->src.max_win = MAX(ntohs(th->th_win), 1);
3578		if (s->src.wscale & PF_WSCALE_MASK) {
3579			/* Remove scale factor from initial window */
3580			int win = s->src.max_win;
3581			win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3582			s->src.max_win = (win - 1) >>
3583			    (s->src.wscale & PF_WSCALE_MASK);
3584		}
3585		if (th->th_flags & TH_FIN)
3586			s->src.seqhi++;
3587		s->dst.seqhi = 1;
3588		s->dst.max_win = 1;
3589		s->src.state = TCPS_SYN_SENT;
3590		s->dst.state = TCPS_CLOSED;
3591		s->timeout = PFTM_TCP_FIRST_PACKET;
3592		break;
3593	case IPPROTO_UDP:
3594		s->src.state = PFUDPS_SINGLE;
3595		s->dst.state = PFUDPS_NO_TRAFFIC;
3596		s->timeout = PFTM_UDP_FIRST_PACKET;
3597		break;
3598	case IPPROTO_ICMP:
3599#ifdef INET6
3600	case IPPROTO_ICMPV6:
3601#endif
3602		s->timeout = PFTM_ICMP_FIRST_PACKET;
3603		break;
3604	default:
3605		s->src.state = PFOTHERS_SINGLE;
3606		s->dst.state = PFOTHERS_NO_TRAFFIC;
3607		s->timeout = PFTM_OTHER_FIRST_PACKET;
3608	}
3609
3610	if (r->rt && r->rt != PF_FASTROUTE) {
3611		if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
3612			REASON_SET(&reason, PFRES_BADSTATE);
3613			pf_src_tree_remove_state(s);
3614			STATE_DEC_COUNTERS(s);
3615			uma_zfree(V_pf_state_z, s);
3616			goto csfailed;
3617		}
3618		s->rt_kif = r->rpool.cur->kif;
3619	}
3620
3621	s->creation = time_uptime;
3622	s->expire = time_uptime;
3623
3624	if (sn != NULL)
3625		s->src_node = sn;
3626	if (nsn != NULL) {
3627		/* XXX We only modify one side for now. */
3628		PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3629		s->nat_src_node = nsn;
3630	}
3631	if (pd->proto == IPPROTO_TCP) {
3632		if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3633		    off, pd, th, &s->src, &s->dst)) {
3634			REASON_SET(&reason, PFRES_MEMORY);
3635			pf_src_tree_remove_state(s);
3636			STATE_DEC_COUNTERS(s);
3637			uma_zfree(V_pf_state_z, s);
3638			return (PF_DROP);
3639		}
3640		if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3641		    pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3642		    &s->src, &s->dst, rewrite)) {
3643			/* This really shouldn't happen!!! */
3644			DPFPRINTF(PF_DEBUG_URGENT,
3645			    ("pf_normalize_tcp_stateful failed on first pkt"));
3646			pf_normalize_tcp_cleanup(s);
3647			pf_src_tree_remove_state(s);
3648			STATE_DEC_COUNTERS(s);
3649			uma_zfree(V_pf_state_z, s);
3650			return (PF_DROP);
3651		}
3652	}
3653	s->direction = pd->dir;
3654
3655	/*
3656	 * sk/nk could already been setup by pf_get_translation().
3657	 */
3658	if (nr == NULL) {
3659		KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3660		    __func__, nr, sk, nk));
3661		sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3662		if (sk == NULL)
3663			goto csfailed;
3664		nk = sk;
3665	} else
3666		KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3667		    __func__, nr, sk, nk));
3668
3669	/* Swap sk/nk for PF_OUT. */
3670	if (pf_state_insert(BOUND_IFACE(r, kif),
3671	    (pd->dir == PF_IN) ? sk : nk,
3672	    (pd->dir == PF_IN) ? nk : sk, s)) {
3673		if (pd->proto == IPPROTO_TCP)
3674			pf_normalize_tcp_cleanup(s);
3675		REASON_SET(&reason, PFRES_STATEINS);
3676		pf_src_tree_remove_state(s);
3677		STATE_DEC_COUNTERS(s);
3678		uma_zfree(V_pf_state_z, s);
3679		return (PF_DROP);
3680	} else
3681		*sm = s;
3682
3683	if (tag > 0)
3684		s->tag = tag;
3685	if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3686	    TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3687		s->src.state = PF_TCPS_PROXY_SRC;
3688		/* undo NAT changes, if they have taken place */
3689		if (nr != NULL) {
3690			struct pf_state_key *skt = s->key[PF_SK_WIRE];
3691			if (pd->dir == PF_OUT)
3692				skt = s->key[PF_SK_STACK];
3693			PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3694			PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3695			if (pd->sport)
3696				*pd->sport = skt->port[pd->sidx];
3697			if (pd->dport)
3698				*pd->dport = skt->port[pd->didx];
3699			if (pd->proto_sum)
3700				*pd->proto_sum = bproto_sum;
3701			if (pd->ip_sum)
3702				*pd->ip_sum = bip_sum;
3703			m_copyback(m, off, hdrlen, pd->hdr.any);
3704		}
3705		s->src.seqhi = htonl(arc4random());
3706		/* Find mss option */
3707		int rtid = M_GETFIB(m);
3708		mss = pf_get_mss(m, off, th->th_off, pd->af);
3709		mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3710		mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3711		s->src.mss = mss;
3712		pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3713		    th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3714		    TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3715		REASON_SET(&reason, PFRES_SYNPROXY);
3716		return (PF_SYNPROXY_DROP);
3717	}
3718
3719	return (PF_PASS);
3720
3721csfailed:
3722	if (sk != NULL)
3723		uma_zfree(V_pf_state_key_z, sk);
3724	if (nk != NULL)
3725		uma_zfree(V_pf_state_key_z, nk);
3726
3727	if (sn != NULL) {
3728		struct pf_srchash *sh;
3729
3730		sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
3731		PF_HASHROW_LOCK(sh);
3732		if (--sn->states == 0 && sn->expire == 0) {
3733			pf_unlink_src_node(sn);
3734			uma_zfree(V_pf_sources_z, sn);
3735			counter_u64_add(
3736			    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3737		}
3738		PF_HASHROW_UNLOCK(sh);
3739	}
3740
3741	if (nsn != sn && nsn != NULL) {
3742		struct pf_srchash *sh;
3743
3744		sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
3745		PF_HASHROW_LOCK(sh);
3746		if (--nsn->states == 0 && nsn->expire == 0) {
3747			pf_unlink_src_node(nsn);
3748			uma_zfree(V_pf_sources_z, nsn);
3749			counter_u64_add(
3750			    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
3751		}
3752		PF_HASHROW_UNLOCK(sh);
3753	}
3754
3755	return (PF_DROP);
3756}
3757
3758static int
3759pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3760    struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3761    struct pf_ruleset **rsm)
3762{
3763	struct pf_rule		*r, *a = NULL;
3764	struct pf_ruleset	*ruleset = NULL;
3765	sa_family_t		 af = pd->af;
3766	u_short			 reason;
3767	int			 tag = -1;
3768	int			 asd = 0;
3769	int			 match = 0;
3770	struct pf_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
3771
3772	PF_RULES_RASSERT();
3773
3774	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3775	while (r != NULL) {
3776		r->evaluations++;
3777		if (pfi_kif_match(r->kif, kif) == r->ifnot)
3778			r = r->skip[PF_SKIP_IFP].ptr;
3779		else if (r->direction && r->direction != direction)
3780			r = r->skip[PF_SKIP_DIR].ptr;
3781		else if (r->af && r->af != af)
3782			r = r->skip[PF_SKIP_AF].ptr;
3783		else if (r->proto && r->proto != pd->proto)
3784			r = r->skip[PF_SKIP_PROTO].ptr;
3785		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3786		    r->src.neg, kif, M_GETFIB(m)))
3787			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3788		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3789		    r->dst.neg, NULL, M_GETFIB(m)))
3790			r = r->skip[PF_SKIP_DST_ADDR].ptr;
3791		else if (r->tos && !(r->tos == pd->tos))
3792			r = TAILQ_NEXT(r, entries);
3793		else if (r->os_fingerprint != PF_OSFP_ANY)
3794			r = TAILQ_NEXT(r, entries);
3795		else if (pd->proto == IPPROTO_UDP &&
3796		    (r->src.port_op || r->dst.port_op))
3797			r = TAILQ_NEXT(r, entries);
3798		else if (pd->proto == IPPROTO_TCP &&
3799		    (r->src.port_op || r->dst.port_op || r->flagset))
3800			r = TAILQ_NEXT(r, entries);
3801		else if ((pd->proto == IPPROTO_ICMP ||
3802		    pd->proto == IPPROTO_ICMPV6) &&
3803		    (r->type || r->code))
3804			r = TAILQ_NEXT(r, entries);
3805		else if (r->prob && r->prob <=
3806		    (arc4random() % (UINT_MAX - 1) + 1))
3807			r = TAILQ_NEXT(r, entries);
3808		else if (r->match_tag && !pf_match_tag(m, r, &tag,
3809		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
3810			r = TAILQ_NEXT(r, entries);
3811		else {
3812			if (r->anchor == NULL) {
3813				match = 1;
3814				*rm = r;
3815				*am = a;
3816				*rsm = ruleset;
3817				if ((*rm)->quick)
3818					break;
3819				r = TAILQ_NEXT(r, entries);
3820			} else
3821				pf_step_into_anchor(anchor_stack, &asd,
3822				    &ruleset, PF_RULESET_FILTER, &r, &a,
3823				    &match);
3824		}
3825		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3826		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3827			break;
3828	}
3829	r = *rm;
3830	a = *am;
3831	ruleset = *rsm;
3832
3833	REASON_SET(&reason, PFRES_MATCH);
3834
3835	if (r->log)
3836		PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3837		    1);
3838
3839	if (r->action != PF_PASS)
3840		return (PF_DROP);
3841
3842	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3843		REASON_SET(&reason, PFRES_MEMORY);
3844		return (PF_DROP);
3845	}
3846
3847	return (PF_PASS);
3848}
3849
3850static int
3851pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3852	struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3853	struct pf_pdesc *pd, u_short *reason, int *copyback)
3854{
3855	struct tcphdr		*th = pd->hdr.tcp;
3856	u_int16_t		 win = ntohs(th->th_win);
3857	u_int32_t		 ack, end, seq, orig_seq;
3858	u_int8_t		 sws, dws;
3859	int			 ackskew;
3860
3861	if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3862		sws = src->wscale & PF_WSCALE_MASK;
3863		dws = dst->wscale & PF_WSCALE_MASK;
3864	} else
3865		sws = dws = 0;
3866
3867	/*
3868	 * Sequence tracking algorithm from Guido van Rooij's paper:
3869	 *   http://www.madison-gurkha.com/publications/tcp_filtering/
3870	 *	tcp_filtering.ps
3871	 */
3872
3873	orig_seq = seq = ntohl(th->th_seq);
3874	if (src->seqlo == 0) {
3875		/* First packet from this end. Set its state */
3876
3877		if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3878		    src->scrub == NULL) {
3879			if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3880				REASON_SET(reason, PFRES_MEMORY);
3881				return (PF_DROP);
3882			}
3883		}
3884
3885		/* Deferred generation of sequence number modulator */
3886		if (dst->seqdiff && !src->seqdiff) {
3887			/* use random iss for the TCP server */
3888			while ((src->seqdiff = arc4random() - seq) == 0)
3889				;
3890			ack = ntohl(th->th_ack) - dst->seqdiff;
3891			pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3892			    src->seqdiff), 0);
3893			pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3894			*copyback = 1;
3895		} else {
3896			ack = ntohl(th->th_ack);
3897		}
3898
3899		end = seq + pd->p_len;
3900		if (th->th_flags & TH_SYN) {
3901			end++;
3902			if (dst->wscale & PF_WSCALE_FLAG) {
3903				src->wscale = pf_get_wscale(m, off, th->th_off,
3904				    pd->af);
3905				if (src->wscale & PF_WSCALE_FLAG) {
3906					/* Remove scale factor from initial
3907					 * window */
3908					sws = src->wscale & PF_WSCALE_MASK;
3909					win = ((u_int32_t)win + (1 << sws) - 1)
3910					    >> sws;
3911					dws = dst->wscale & PF_WSCALE_MASK;
3912				} else {
3913					/* fixup other window */
3914					dst->max_win <<= dst->wscale &
3915					    PF_WSCALE_MASK;
3916					/* in case of a retrans SYN|ACK */
3917					dst->wscale = 0;
3918				}
3919			}
3920		}
3921		if (th->th_flags & TH_FIN)
3922			end++;
3923
3924		src->seqlo = seq;
3925		if (src->state < TCPS_SYN_SENT)
3926			src->state = TCPS_SYN_SENT;
3927
3928		/*
3929		 * May need to slide the window (seqhi may have been set by
3930		 * the crappy stack check or if we picked up the connection
3931		 * after establishment)
3932		 */
3933		if (src->seqhi == 1 ||
3934		    SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3935			src->seqhi = end + MAX(1, dst->max_win << dws);
3936		if (win > src->max_win)
3937			src->max_win = win;
3938
3939	} else {
3940		ack = ntohl(th->th_ack) - dst->seqdiff;
3941		if (src->seqdiff) {
3942			/* Modulate sequence numbers */
3943			pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
3944			    src->seqdiff), 0);
3945			pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
3946			*copyback = 1;
3947		}
3948		end = seq + pd->p_len;
3949		if (th->th_flags & TH_SYN)
3950			end++;
3951		if (th->th_flags & TH_FIN)
3952			end++;
3953	}
3954
3955	if ((th->th_flags & TH_ACK) == 0) {
3956		/* Let it pass through the ack skew check */
3957		ack = dst->seqlo;
3958	} else if ((ack == 0 &&
3959	    (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3960	    /* broken tcp stacks do not set ack */
3961	    (dst->state < TCPS_SYN_SENT)) {
3962		/*
3963		 * Many stacks (ours included) will set the ACK number in an
3964		 * FIN|ACK if the SYN times out -- no sequence to ACK.
3965		 */
3966		ack = dst->seqlo;
3967	}
3968
3969	if (seq == end) {
3970		/* Ease sequencing restrictions on no data packets */
3971		seq = src->seqlo;
3972		end = seq;
3973	}
3974
3975	ackskew = dst->seqlo - ack;
3976
3977
3978	/*
3979	 * Need to demodulate the sequence numbers in any TCP SACK options
3980	 * (Selective ACK). We could optionally validate the SACK values
3981	 * against the current ACK window, either forwards or backwards, but
3982	 * I'm not confident that SACK has been implemented properly
3983	 * everywhere. It wouldn't surprise me if several stacks accidently
3984	 * SACK too far backwards of previously ACKed data. There really aren't
3985	 * any security implications of bad SACKing unless the target stack
3986	 * doesn't validate the option length correctly. Someone trying to
3987	 * spoof into a TCP connection won't bother blindly sending SACK
3988	 * options anyway.
3989	 */
3990	if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3991		if (pf_modulate_sack(m, off, pd, th, dst))
3992			*copyback = 1;
3993	}
3994
3995
3996#define	MAXACKWINDOW (0xffff + 1500)	/* 1500 is an arbitrary fudge factor */
3997	if (SEQ_GEQ(src->seqhi, end) &&
3998	    /* Last octet inside other's window space */
3999	    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
4000	    /* Retrans: not more than one window back */
4001	    (ackskew >= -MAXACKWINDOW) &&
4002	    /* Acking not more than one reassembled fragment backwards */
4003	    (ackskew <= (MAXACKWINDOW << sws)) &&
4004	    /* Acking not more than one window forward */
4005	    ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
4006	    (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
4007	    (pd->flags & PFDESC_IP_REAS) == 0)) {
4008	    /* Require an exact/+1 sequence match on resets when possible */
4009
4010		if (dst->scrub || src->scrub) {
4011			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4012			    *state, src, dst, copyback))
4013				return (PF_DROP);
4014		}
4015
4016		/* update max window */
4017		if (src->max_win < win)
4018			src->max_win = win;
4019		/* synchronize sequencing */
4020		if (SEQ_GT(end, src->seqlo))
4021			src->seqlo = end;
4022		/* slide the window of what the other end can send */
4023		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4024			dst->seqhi = ack + MAX((win << sws), 1);
4025
4026
4027		/* update states */
4028		if (th->th_flags & TH_SYN)
4029			if (src->state < TCPS_SYN_SENT)
4030				src->state = TCPS_SYN_SENT;
4031		if (th->th_flags & TH_FIN)
4032			if (src->state < TCPS_CLOSING)
4033				src->state = TCPS_CLOSING;
4034		if (th->th_flags & TH_ACK) {
4035			if (dst->state == TCPS_SYN_SENT) {
4036				dst->state = TCPS_ESTABLISHED;
4037				if (src->state == TCPS_ESTABLISHED &&
4038				    (*state)->src_node != NULL &&
4039				    pf_src_connlimit(state)) {
4040					REASON_SET(reason, PFRES_SRCLIMIT);
4041					return (PF_DROP);
4042				}
4043			} else if (dst->state == TCPS_CLOSING)
4044				dst->state = TCPS_FIN_WAIT_2;
4045		}
4046		if (th->th_flags & TH_RST)
4047			src->state = dst->state = TCPS_TIME_WAIT;
4048
4049		/* update expire time */
4050		(*state)->expire = time_uptime;
4051		if (src->state >= TCPS_FIN_WAIT_2 &&
4052		    dst->state >= TCPS_FIN_WAIT_2)
4053			(*state)->timeout = PFTM_TCP_CLOSED;
4054		else if (src->state >= TCPS_CLOSING &&
4055		    dst->state >= TCPS_CLOSING)
4056			(*state)->timeout = PFTM_TCP_FIN_WAIT;
4057		else if (src->state < TCPS_ESTABLISHED ||
4058		    dst->state < TCPS_ESTABLISHED)
4059			(*state)->timeout = PFTM_TCP_OPENING;
4060		else if (src->state >= TCPS_CLOSING ||
4061		    dst->state >= TCPS_CLOSING)
4062			(*state)->timeout = PFTM_TCP_CLOSING;
4063		else
4064			(*state)->timeout = PFTM_TCP_ESTABLISHED;
4065
4066		/* Fall through to PASS packet */
4067
4068	} else if ((dst->state < TCPS_SYN_SENT ||
4069		dst->state >= TCPS_FIN_WAIT_2 ||
4070		src->state >= TCPS_FIN_WAIT_2) &&
4071	    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
4072	    /* Within a window forward of the originating packet */
4073	    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
4074	    /* Within a window backward of the originating packet */
4075
4076		/*
4077		 * This currently handles three situations:
4078		 *  1) Stupid stacks will shotgun SYNs before their peer
4079		 *     replies.
4080		 *  2) When PF catches an already established stream (the
4081		 *     firewall rebooted, the state table was flushed, routes
4082		 *     changed...)
4083		 *  3) Packets get funky immediately after the connection
4084		 *     closes (this should catch Solaris spurious ACK|FINs
4085		 *     that web servers like to spew after a close)
4086		 *
4087		 * This must be a little more careful than the above code
4088		 * since packet floods will also be caught here. We don't
4089		 * update the TTL here to mitigate the damage of a packet
4090		 * flood and so the same code can handle awkward establishment
4091		 * and a loosened connection close.
4092		 * In the establishment case, a correct peer response will
4093		 * validate the connection, go through the normal state code
4094		 * and keep updating the state TTL.
4095		 */
4096
4097		if (V_pf_status.debug >= PF_DEBUG_MISC) {
4098			printf("pf: loose state match: ");
4099			pf_print_state(*state);
4100			pf_print_flags(th->th_flags);
4101			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4102			    "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
4103			    pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
4104			    (unsigned long long)(*state)->packets[1],
4105			    pd->dir == PF_IN ? "in" : "out",
4106			    pd->dir == (*state)->direction ? "fwd" : "rev");
4107		}
4108
4109		if (dst->scrub || src->scrub) {
4110			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4111			    *state, src, dst, copyback))
4112				return (PF_DROP);
4113		}
4114
4115		/* update max window */
4116		if (src->max_win < win)
4117			src->max_win = win;
4118		/* synchronize sequencing */
4119		if (SEQ_GT(end, src->seqlo))
4120			src->seqlo = end;
4121		/* slide the window of what the other end can send */
4122		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
4123			dst->seqhi = ack + MAX((win << sws), 1);
4124
4125		/*
4126		 * Cannot set dst->seqhi here since this could be a shotgunned
4127		 * SYN and not an already established connection.
4128		 */
4129
4130		if (th->th_flags & TH_FIN)
4131			if (src->state < TCPS_CLOSING)
4132				src->state = TCPS_CLOSING;
4133		if (th->th_flags & TH_RST)
4134			src->state = dst->state = TCPS_TIME_WAIT;
4135
4136		/* Fall through to PASS packet */
4137
4138	} else {
4139		if ((*state)->dst.state == TCPS_SYN_SENT &&
4140		    (*state)->src.state == TCPS_SYN_SENT) {
4141			/* Send RST for state mismatches during handshake */
4142			if (!(th->th_flags & TH_RST))
4143				pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4144				    pd->dst, pd->src, th->th_dport,
4145				    th->th_sport, ntohl(th->th_ack), 0,
4146				    TH_RST, 0, 0,
4147				    (*state)->rule.ptr->return_ttl, 1, 0,
4148				    kif->pfik_ifp);
4149			src->seqlo = 0;
4150			src->seqhi = 1;
4151			src->max_win = 1;
4152		} else if (V_pf_status.debug >= PF_DEBUG_MISC) {
4153			printf("pf: BAD state: ");
4154			pf_print_state(*state);
4155			pf_print_flags(th->th_flags);
4156			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
4157			    "pkts=%llu:%llu dir=%s,%s\n",
4158			    seq, orig_seq, ack, pd->p_len, ackskew,
4159			    (unsigned long long)(*state)->packets[0],
4160			    (unsigned long long)(*state)->packets[1],
4161			    pd->dir == PF_IN ? "in" : "out",
4162			    pd->dir == (*state)->direction ? "fwd" : "rev");
4163			printf("pf: State failure on: %c %c %c %c | %c %c\n",
4164			    SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4165			    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4166			    ' ': '2',
4167			    (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4168			    (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4169			    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4170			    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4171		}
4172		REASON_SET(reason, PFRES_BADSTATE);
4173		return (PF_DROP);
4174	}
4175
4176	return (PF_PASS);
4177}
4178
4179static int
4180pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4181	struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4182{
4183	struct tcphdr		*th = pd->hdr.tcp;
4184
4185	if (th->th_flags & TH_SYN)
4186		if (src->state < TCPS_SYN_SENT)
4187			src->state = TCPS_SYN_SENT;
4188	if (th->th_flags & TH_FIN)
4189		if (src->state < TCPS_CLOSING)
4190			src->state = TCPS_CLOSING;
4191	if (th->th_flags & TH_ACK) {
4192		if (dst->state == TCPS_SYN_SENT) {
4193			dst->state = TCPS_ESTABLISHED;
4194			if (src->state == TCPS_ESTABLISHED &&
4195			    (*state)->src_node != NULL &&
4196			    pf_src_connlimit(state)) {
4197				REASON_SET(reason, PFRES_SRCLIMIT);
4198				return (PF_DROP);
4199			}
4200		} else if (dst->state == TCPS_CLOSING) {
4201			dst->state = TCPS_FIN_WAIT_2;
4202		} else if (src->state == TCPS_SYN_SENT &&
4203		    dst->state < TCPS_SYN_SENT) {
4204			/*
4205			 * Handle a special sloppy case where we only see one
4206			 * half of the connection. If there is a ACK after
4207			 * the initial SYN without ever seeing a packet from
4208			 * the destination, set the connection to established.
4209			 */
4210			dst->state = src->state = TCPS_ESTABLISHED;
4211			if ((*state)->src_node != NULL &&
4212			    pf_src_connlimit(state)) {
4213				REASON_SET(reason, PFRES_SRCLIMIT);
4214				return (PF_DROP);
4215			}
4216		} else if (src->state == TCPS_CLOSING &&
4217		    dst->state == TCPS_ESTABLISHED &&
4218		    dst->seqlo == 0) {
4219			/*
4220			 * Handle the closing of half connections where we
4221			 * don't see the full bidirectional FIN/ACK+ACK
4222			 * handshake.
4223			 */
4224			dst->state = TCPS_CLOSING;
4225		}
4226	}
4227	if (th->th_flags & TH_RST)
4228		src->state = dst->state = TCPS_TIME_WAIT;
4229
4230	/* update expire time */
4231	(*state)->expire = time_uptime;
4232	if (src->state >= TCPS_FIN_WAIT_2 &&
4233	    dst->state >= TCPS_FIN_WAIT_2)
4234		(*state)->timeout = PFTM_TCP_CLOSED;
4235	else if (src->state >= TCPS_CLOSING &&
4236	    dst->state >= TCPS_CLOSING)
4237		(*state)->timeout = PFTM_TCP_FIN_WAIT;
4238	else if (src->state < TCPS_ESTABLISHED ||
4239	    dst->state < TCPS_ESTABLISHED)
4240		(*state)->timeout = PFTM_TCP_OPENING;
4241	else if (src->state >= TCPS_CLOSING ||
4242	    dst->state >= TCPS_CLOSING)
4243		(*state)->timeout = PFTM_TCP_CLOSING;
4244	else
4245		(*state)->timeout = PFTM_TCP_ESTABLISHED;
4246
4247	return (PF_PASS);
4248}
4249
4250static int
4251pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4252    struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4253    u_short *reason)
4254{
4255	struct pf_state_key_cmp	 key;
4256	struct tcphdr		*th = pd->hdr.tcp;
4257	int			 copyback = 0;
4258	struct pf_state_peer	*src, *dst;
4259	struct pf_state_key	*sk;
4260
4261	bzero(&key, sizeof(key));
4262	key.af = pd->af;
4263	key.proto = IPPROTO_TCP;
4264	if (direction == PF_IN)	{	/* wire side, straight */
4265		PF_ACPY(&key.addr[0], pd->src, key.af);
4266		PF_ACPY(&key.addr[1], pd->dst, key.af);
4267		key.port[0] = th->th_sport;
4268		key.port[1] = th->th_dport;
4269	} else {			/* stack side, reverse */
4270		PF_ACPY(&key.addr[1], pd->src, key.af);
4271		PF_ACPY(&key.addr[0], pd->dst, key.af);
4272		key.port[1] = th->th_sport;
4273		key.port[0] = th->th_dport;
4274	}
4275
4276	STATE_LOOKUP(kif, &key, direction, *state, pd);
4277
4278	if (direction == (*state)->direction) {
4279		src = &(*state)->src;
4280		dst = &(*state)->dst;
4281	} else {
4282		src = &(*state)->dst;
4283		dst = &(*state)->src;
4284	}
4285
4286	sk = (*state)->key[pd->didx];
4287
4288	if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4289		if (direction != (*state)->direction) {
4290			REASON_SET(reason, PFRES_SYNPROXY);
4291			return (PF_SYNPROXY_DROP);
4292		}
4293		if (th->th_flags & TH_SYN) {
4294			if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4295				REASON_SET(reason, PFRES_SYNPROXY);
4296				return (PF_DROP);
4297			}
4298			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4299			    pd->src, th->th_dport, th->th_sport,
4300			    (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4301			    TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4302			REASON_SET(reason, PFRES_SYNPROXY);
4303			return (PF_SYNPROXY_DROP);
4304		} else if (!(th->th_flags & TH_ACK) ||
4305		    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4306		    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4307			REASON_SET(reason, PFRES_SYNPROXY);
4308			return (PF_DROP);
4309		} else if ((*state)->src_node != NULL &&
4310		    pf_src_connlimit(state)) {
4311			REASON_SET(reason, PFRES_SRCLIMIT);
4312			return (PF_DROP);
4313		} else
4314			(*state)->src.state = PF_TCPS_PROXY_DST;
4315	}
4316	if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4317		if (direction == (*state)->direction) {
4318			if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4319			    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4320			    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4321				REASON_SET(reason, PFRES_SYNPROXY);
4322				return (PF_DROP);
4323			}
4324			(*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4325			if ((*state)->dst.seqhi == 1)
4326				(*state)->dst.seqhi = htonl(arc4random());
4327			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4328			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
4329			    sk->port[pd->sidx], sk->port[pd->didx],
4330			    (*state)->dst.seqhi, 0, TH_SYN, 0,
4331			    (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4332			REASON_SET(reason, PFRES_SYNPROXY);
4333			return (PF_SYNPROXY_DROP);
4334		} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4335		    (TH_SYN|TH_ACK)) ||
4336		    (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4337			REASON_SET(reason, PFRES_SYNPROXY);
4338			return (PF_DROP);
4339		} else {
4340			(*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4341			(*state)->dst.seqlo = ntohl(th->th_seq);
4342			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4343			    pd->src, th->th_dport, th->th_sport,
4344			    ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4345			    TH_ACK, (*state)->src.max_win, 0, 0, 0,
4346			    (*state)->tag, NULL);
4347			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4348			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
4349			    sk->port[pd->sidx], sk->port[pd->didx],
4350			    (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4351			    TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4352			(*state)->src.seqdiff = (*state)->dst.seqhi -
4353			    (*state)->src.seqlo;
4354			(*state)->dst.seqdiff = (*state)->src.seqhi -
4355			    (*state)->dst.seqlo;
4356			(*state)->src.seqhi = (*state)->src.seqlo +
4357			    (*state)->dst.max_win;
4358			(*state)->dst.seqhi = (*state)->dst.seqlo +
4359			    (*state)->src.max_win;
4360			(*state)->src.wscale = (*state)->dst.wscale = 0;
4361			(*state)->src.state = (*state)->dst.state =
4362			    TCPS_ESTABLISHED;
4363			REASON_SET(reason, PFRES_SYNPROXY);
4364			return (PF_SYNPROXY_DROP);
4365		}
4366	}
4367
4368	if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4369	    dst->state >= TCPS_FIN_WAIT_2 &&
4370	    src->state >= TCPS_FIN_WAIT_2) {
4371		if (V_pf_status.debug >= PF_DEBUG_MISC) {
4372			printf("pf: state reuse ");
4373			pf_print_state(*state);
4374			pf_print_flags(th->th_flags);
4375			printf("\n");
4376		}
4377		/* XXX make sure it's the same direction ?? */
4378		(*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4379		pf_unlink_state(*state, PF_ENTER_LOCKED);
4380		*state = NULL;
4381		return (PF_DROP);
4382	}
4383
4384	if ((*state)->state_flags & PFSTATE_SLOPPY) {
4385		if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4386			return (PF_DROP);
4387	} else {
4388		if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4389		    &copyback) == PF_DROP)
4390			return (PF_DROP);
4391	}
4392
4393	/* translate source/destination address, if necessary */
4394	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4395		struct pf_state_key *nk = (*state)->key[pd->didx];
4396
4397		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4398		    nk->port[pd->sidx] != th->th_sport)
4399			pf_change_ap(m, pd->src, &th->th_sport,
4400			    pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
4401			    nk->port[pd->sidx], 0, pd->af);
4402
4403		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4404		    nk->port[pd->didx] != th->th_dport)
4405			pf_change_ap(m, pd->dst, &th->th_dport,
4406			    pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
4407			    nk->port[pd->didx], 0, pd->af);
4408		copyback = 1;
4409	}
4410
4411	/* Copyback sequence modulation or stateful scrub changes if needed */
4412	if (copyback)
4413		m_copyback(m, off, sizeof(*th), (caddr_t)th);
4414
4415	return (PF_PASS);
4416}
4417
4418static int
4419pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4420    struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4421{
4422	struct pf_state_peer	*src, *dst;
4423	struct pf_state_key_cmp	 key;
4424	struct udphdr		*uh = pd->hdr.udp;
4425
4426	bzero(&key, sizeof(key));
4427	key.af = pd->af;
4428	key.proto = IPPROTO_UDP;
4429	if (direction == PF_IN)	{	/* wire side, straight */
4430		PF_ACPY(&key.addr[0], pd->src, key.af);
4431		PF_ACPY(&key.addr[1], pd->dst, key.af);
4432		key.port[0] = uh->uh_sport;
4433		key.port[1] = uh->uh_dport;
4434	} else {			/* stack side, reverse */
4435		PF_ACPY(&key.addr[1], pd->src, key.af);
4436		PF_ACPY(&key.addr[0], pd->dst, key.af);
4437		key.port[1] = uh->uh_sport;
4438		key.port[0] = uh->uh_dport;
4439	}
4440
4441	STATE_LOOKUP(kif, &key, direction, *state, pd);
4442
4443	if (direction == (*state)->direction) {
4444		src = &(*state)->src;
4445		dst = &(*state)->dst;
4446	} else {
4447		src = &(*state)->dst;
4448		dst = &(*state)->src;
4449	}
4450
4451	/* update states */
4452	if (src->state < PFUDPS_SINGLE)
4453		src->state = PFUDPS_SINGLE;
4454	if (dst->state == PFUDPS_SINGLE)
4455		dst->state = PFUDPS_MULTIPLE;
4456
4457	/* update expire time */
4458	(*state)->expire = time_uptime;
4459	if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4460		(*state)->timeout = PFTM_UDP_MULTIPLE;
4461	else
4462		(*state)->timeout = PFTM_UDP_SINGLE;
4463
4464	/* translate source/destination address, if necessary */
4465	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4466		struct pf_state_key *nk = (*state)->key[pd->didx];
4467
4468		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4469		    nk->port[pd->sidx] != uh->uh_sport)
4470			pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
4471			    &uh->uh_sum, &nk->addr[pd->sidx],
4472			    nk->port[pd->sidx], 1, pd->af);
4473
4474		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4475		    nk->port[pd->didx] != uh->uh_dport)
4476			pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
4477			    &uh->uh_sum, &nk->addr[pd->didx],
4478			    nk->port[pd->didx], 1, pd->af);
4479		m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4480	}
4481
4482	return (PF_PASS);
4483}
4484
4485static int
4486pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4487    struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4488{
4489	struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
4490	u_int16_t	 icmpid = 0, *icmpsum;
4491	u_int8_t	 icmptype;
4492	int		 state_icmp = 0;
4493	struct pf_state_key_cmp key;
4494
4495	bzero(&key, sizeof(key));
4496	switch (pd->proto) {
4497#ifdef INET
4498	case IPPROTO_ICMP:
4499		icmptype = pd->hdr.icmp->icmp_type;
4500		icmpid = pd->hdr.icmp->icmp_id;
4501		icmpsum = &pd->hdr.icmp->icmp_cksum;
4502
4503		if (icmptype == ICMP_UNREACH ||
4504		    icmptype == ICMP_SOURCEQUENCH ||
4505		    icmptype == ICMP_REDIRECT ||
4506		    icmptype == ICMP_TIMXCEED ||
4507		    icmptype == ICMP_PARAMPROB)
4508			state_icmp++;
4509		break;
4510#endif /* INET */
4511#ifdef INET6
4512	case IPPROTO_ICMPV6:
4513		icmptype = pd->hdr.icmp6->icmp6_type;
4514		icmpid = pd->hdr.icmp6->icmp6_id;
4515		icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4516
4517		if (icmptype == ICMP6_DST_UNREACH ||
4518		    icmptype == ICMP6_PACKET_TOO_BIG ||
4519		    icmptype == ICMP6_TIME_EXCEEDED ||
4520		    icmptype == ICMP6_PARAM_PROB)
4521			state_icmp++;
4522		break;
4523#endif /* INET6 */
4524	}
4525
4526	if (!state_icmp) {
4527
4528		/*
4529		 * ICMP query/reply message not related to a TCP/UDP packet.
4530		 * Search for an ICMP state.
4531		 */
4532		key.af = pd->af;
4533		key.proto = pd->proto;
4534		key.port[0] = key.port[1] = icmpid;
4535		if (direction == PF_IN)	{	/* wire side, straight */
4536			PF_ACPY(&key.addr[0], pd->src, key.af);
4537			PF_ACPY(&key.addr[1], pd->dst, key.af);
4538		} else {			/* stack side, reverse */
4539			PF_ACPY(&key.addr[1], pd->src, key.af);
4540			PF_ACPY(&key.addr[0], pd->dst, key.af);
4541		}
4542
4543		STATE_LOOKUP(kif, &key, direction, *state, pd);
4544
4545		(*state)->expire = time_uptime;
4546		(*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4547
4548		/* translate source/destination address, if necessary */
4549		if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4550			struct pf_state_key *nk = (*state)->key[pd->didx];
4551
4552			switch (pd->af) {
4553#ifdef INET
4554			case AF_INET:
4555				if (PF_ANEQ(pd->src,
4556				    &nk->addr[pd->sidx], AF_INET))
4557					pf_change_a(&saddr->v4.s_addr,
4558					    pd->ip_sum,
4559					    nk->addr[pd->sidx].v4.s_addr, 0);
4560
4561				if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4562				    AF_INET))
4563					pf_change_a(&daddr->v4.s_addr,
4564					    pd->ip_sum,
4565					    nk->addr[pd->didx].v4.s_addr, 0);
4566
4567				if (nk->port[0] !=
4568				    pd->hdr.icmp->icmp_id) {
4569					pd->hdr.icmp->icmp_cksum =
4570					    pf_cksum_fixup(
4571					    pd->hdr.icmp->icmp_cksum, icmpid,
4572					    nk->port[pd->sidx], 0);
4573					pd->hdr.icmp->icmp_id =
4574					    nk->port[pd->sidx];
4575				}
4576
4577				m_copyback(m, off, ICMP_MINLEN,
4578				    (caddr_t )pd->hdr.icmp);
4579				break;
4580#endif /* INET */
4581#ifdef INET6
4582			case AF_INET6:
4583				if (PF_ANEQ(pd->src,
4584				    &nk->addr[pd->sidx], AF_INET6))
4585					pf_change_a6(saddr,
4586					    &pd->hdr.icmp6->icmp6_cksum,
4587					    &nk->addr[pd->sidx], 0);
4588
4589				if (PF_ANEQ(pd->dst,
4590				    &nk->addr[pd->didx], AF_INET6))
4591					pf_change_a6(daddr,
4592					    &pd->hdr.icmp6->icmp6_cksum,
4593					    &nk->addr[pd->didx], 0);
4594
4595				m_copyback(m, off, sizeof(struct icmp6_hdr),
4596				    (caddr_t )pd->hdr.icmp6);
4597				break;
4598#endif /* INET6 */
4599			}
4600		}
4601		return (PF_PASS);
4602
4603	} else {
4604		/*
4605		 * ICMP error message in response to a TCP/UDP packet.
4606		 * Extract the inner TCP/UDP header and search for that state.
4607		 */
4608
4609		struct pf_pdesc	pd2;
4610		bzero(&pd2, sizeof pd2);
4611#ifdef INET
4612		struct ip	h2;
4613#endif /* INET */
4614#ifdef INET6
4615		struct ip6_hdr	h2_6;
4616		int		terminal = 0;
4617#endif /* INET6 */
4618		int		ipoff2 = 0;
4619		int		off2 = 0;
4620
4621		pd2.af = pd->af;
4622		/* Payload packet is from the opposite direction. */
4623		pd2.sidx = (direction == PF_IN) ? 1 : 0;
4624		pd2.didx = (direction == PF_IN) ? 0 : 1;
4625		switch (pd->af) {
4626#ifdef INET
4627		case AF_INET:
4628			/* offset of h2 in mbuf chain */
4629			ipoff2 = off + ICMP_MINLEN;
4630
4631			if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4632			    NULL, reason, pd2.af)) {
4633				DPFPRINTF(PF_DEBUG_MISC,
4634				    ("pf: ICMP error message too short "
4635				    "(ip)\n"));
4636				return (PF_DROP);
4637			}
4638			/*
4639			 * ICMP error messages don't refer to non-first
4640			 * fragments
4641			 */
4642			if (h2.ip_off & htons(IP_OFFMASK)) {
4643				REASON_SET(reason, PFRES_FRAG);
4644				return (PF_DROP);
4645			}
4646
4647			/* offset of protocol header that follows h2 */
4648			off2 = ipoff2 + (h2.ip_hl << 2);
4649
4650			pd2.proto = h2.ip_p;
4651			pd2.src = (struct pf_addr *)&h2.ip_src;
4652			pd2.dst = (struct pf_addr *)&h2.ip_dst;
4653			pd2.ip_sum = &h2.ip_sum;
4654			break;
4655#endif /* INET */
4656#ifdef INET6
4657		case AF_INET6:
4658			ipoff2 = off + sizeof(struct icmp6_hdr);
4659
4660			if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4661			    NULL, reason, pd2.af)) {
4662				DPFPRINTF(PF_DEBUG_MISC,
4663				    ("pf: ICMP error message too short "
4664				    "(ip6)\n"));
4665				return (PF_DROP);
4666			}
4667			pd2.proto = h2_6.ip6_nxt;
4668			pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4669			pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4670			pd2.ip_sum = NULL;
4671			off2 = ipoff2 + sizeof(h2_6);
4672			do {
4673				switch (pd2.proto) {
4674				case IPPROTO_FRAGMENT:
4675					/*
4676					 * ICMPv6 error messages for
4677					 * non-first fragments
4678					 */
4679					REASON_SET(reason, PFRES_FRAG);
4680					return (PF_DROP);
4681				case IPPROTO_AH:
4682				case IPPROTO_HOPOPTS:
4683				case IPPROTO_ROUTING:
4684				case IPPROTO_DSTOPTS: {
4685					/* get next header and header length */
4686					struct ip6_ext opt6;
4687
4688					if (!pf_pull_hdr(m, off2, &opt6,
4689					    sizeof(opt6), NULL, reason,
4690					    pd2.af)) {
4691						DPFPRINTF(PF_DEBUG_MISC,
4692						    ("pf: ICMPv6 short opt\n"));
4693						return (PF_DROP);
4694					}
4695					if (pd2.proto == IPPROTO_AH)
4696						off2 += (opt6.ip6e_len + 2) * 4;
4697					else
4698						off2 += (opt6.ip6e_len + 1) * 8;
4699					pd2.proto = opt6.ip6e_nxt;
4700					/* goto the next header */
4701					break;
4702				}
4703				default:
4704					terminal++;
4705					break;
4706				}
4707			} while (!terminal);
4708			break;
4709#endif /* INET6 */
4710		}
4711
4712		switch (pd2.proto) {
4713		case IPPROTO_TCP: {
4714			struct tcphdr		 th;
4715			u_int32_t		 seq;
4716			struct pf_state_peer	*src, *dst;
4717			u_int8_t		 dws;
4718			int			 copyback = 0;
4719
4720			/*
4721			 * Only the first 8 bytes of the TCP header can be
4722			 * expected. Don't access any TCP header fields after
4723			 * th_seq, an ackskew test is not possible.
4724			 */
4725			if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4726			    pd2.af)) {
4727				DPFPRINTF(PF_DEBUG_MISC,
4728				    ("pf: ICMP error message too short "
4729				    "(tcp)\n"));
4730				return (PF_DROP);
4731			}
4732
4733			key.af = pd2.af;
4734			key.proto = IPPROTO_TCP;
4735			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4736			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4737			key.port[pd2.sidx] = th.th_sport;
4738			key.port[pd2.didx] = th.th_dport;
4739
4740			STATE_LOOKUP(kif, &key, direction, *state, pd);
4741
4742			if (direction == (*state)->direction) {
4743				src = &(*state)->dst;
4744				dst = &(*state)->src;
4745			} else {
4746				src = &(*state)->src;
4747				dst = &(*state)->dst;
4748			}
4749
4750			if (src->wscale && dst->wscale)
4751				dws = dst->wscale & PF_WSCALE_MASK;
4752			else
4753				dws = 0;
4754
4755			/* Demodulate sequence number */
4756			seq = ntohl(th.th_seq) - src->seqdiff;
4757			if (src->seqdiff) {
4758				pf_change_a(&th.th_seq, icmpsum,
4759				    htonl(seq), 0);
4760				copyback = 1;
4761			}
4762
4763			if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4764			    (!SEQ_GEQ(src->seqhi, seq) ||
4765			    !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4766				if (V_pf_status.debug >= PF_DEBUG_MISC) {
4767					printf("pf: BAD ICMP %d:%d ",
4768					    icmptype, pd->hdr.icmp->icmp_code);
4769					pf_print_host(pd->src, 0, pd->af);
4770					printf(" -> ");
4771					pf_print_host(pd->dst, 0, pd->af);
4772					printf(" state: ");
4773					pf_print_state(*state);
4774					printf(" seq=%u\n", seq);
4775				}
4776				REASON_SET(reason, PFRES_BADSTATE);
4777				return (PF_DROP);
4778			} else {
4779				if (V_pf_status.debug >= PF_DEBUG_MISC) {
4780					printf("pf: OK ICMP %d:%d ",
4781					    icmptype, pd->hdr.icmp->icmp_code);
4782					pf_print_host(pd->src, 0, pd->af);
4783					printf(" -> ");
4784					pf_print_host(pd->dst, 0, pd->af);
4785					printf(" state: ");
4786					pf_print_state(*state);
4787					printf(" seq=%u\n", seq);
4788				}
4789			}
4790
4791			/* translate source/destination address, if necessary */
4792			if ((*state)->key[PF_SK_WIRE] !=
4793			    (*state)->key[PF_SK_STACK]) {
4794				struct pf_state_key *nk =
4795				    (*state)->key[pd->didx];
4796
4797				if (PF_ANEQ(pd2.src,
4798				    &nk->addr[pd2.sidx], pd2.af) ||
4799				    nk->port[pd2.sidx] != th.th_sport)
4800					pf_change_icmp(pd2.src, &th.th_sport,
4801					    daddr, &nk->addr[pd2.sidx],
4802					    nk->port[pd2.sidx], NULL,
4803					    pd2.ip_sum, icmpsum,
4804					    pd->ip_sum, 0, pd2.af);
4805
4806				if (PF_ANEQ(pd2.dst,
4807				    &nk->addr[pd2.didx], pd2.af) ||
4808				    nk->port[pd2.didx] != th.th_dport)
4809					pf_change_icmp(pd2.dst, &th.th_dport,
4810					    saddr, &nk->addr[pd2.didx],
4811					    nk->port[pd2.didx], NULL,
4812					    pd2.ip_sum, icmpsum,
4813					    pd->ip_sum, 0, pd2.af);
4814				copyback = 1;
4815			}
4816
4817			if (copyback) {
4818				switch (pd2.af) {
4819#ifdef INET
4820				case AF_INET:
4821					m_copyback(m, off, ICMP_MINLEN,
4822					    (caddr_t )pd->hdr.icmp);
4823					m_copyback(m, ipoff2, sizeof(h2),
4824					    (caddr_t )&h2);
4825					break;
4826#endif /* INET */
4827#ifdef INET6
4828				case AF_INET6:
4829					m_copyback(m, off,
4830					    sizeof(struct icmp6_hdr),
4831					    (caddr_t )pd->hdr.icmp6);
4832					m_copyback(m, ipoff2, sizeof(h2_6),
4833					    (caddr_t )&h2_6);
4834					break;
4835#endif /* INET6 */
4836				}
4837				m_copyback(m, off2, 8, (caddr_t)&th);
4838			}
4839
4840			return (PF_PASS);
4841			break;
4842		}
4843		case IPPROTO_UDP: {
4844			struct udphdr		uh;
4845
4846			if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4847			    NULL, reason, pd2.af)) {
4848				DPFPRINTF(PF_DEBUG_MISC,
4849				    ("pf: ICMP error message too short "
4850				    "(udp)\n"));
4851				return (PF_DROP);
4852			}
4853
4854			key.af = pd2.af;
4855			key.proto = IPPROTO_UDP;
4856			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4857			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4858			key.port[pd2.sidx] = uh.uh_sport;
4859			key.port[pd2.didx] = uh.uh_dport;
4860
4861			STATE_LOOKUP(kif, &key, direction, *state, pd);
4862
4863			/* translate source/destination address, if necessary */
4864			if ((*state)->key[PF_SK_WIRE] !=
4865			    (*state)->key[PF_SK_STACK]) {
4866				struct pf_state_key *nk =
4867				    (*state)->key[pd->didx];
4868
4869				if (PF_ANEQ(pd2.src,
4870				    &nk->addr[pd2.sidx], pd2.af) ||
4871				    nk->port[pd2.sidx] != uh.uh_sport)
4872					pf_change_icmp(pd2.src, &uh.uh_sport,
4873					    daddr, &nk->addr[pd2.sidx],
4874					    nk->port[pd2.sidx], &uh.uh_sum,
4875					    pd2.ip_sum, icmpsum,
4876					    pd->ip_sum, 1, pd2.af);
4877
4878				if (PF_ANEQ(pd2.dst,
4879				    &nk->addr[pd2.didx], pd2.af) ||
4880				    nk->port[pd2.didx] != uh.uh_dport)
4881					pf_change_icmp(pd2.dst, &uh.uh_dport,
4882					    saddr, &nk->addr[pd2.didx],
4883					    nk->port[pd2.didx], &uh.uh_sum,
4884					    pd2.ip_sum, icmpsum,
4885					    pd->ip_sum, 1, pd2.af);
4886
4887				switch (pd2.af) {
4888#ifdef INET
4889				case AF_INET:
4890					m_copyback(m, off, ICMP_MINLEN,
4891					    (caddr_t )pd->hdr.icmp);
4892					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4893					break;
4894#endif /* INET */
4895#ifdef INET6
4896				case AF_INET6:
4897					m_copyback(m, off,
4898					    sizeof(struct icmp6_hdr),
4899					    (caddr_t )pd->hdr.icmp6);
4900					m_copyback(m, ipoff2, sizeof(h2_6),
4901					    (caddr_t )&h2_6);
4902					break;
4903#endif /* INET6 */
4904				}
4905				m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4906			}
4907			return (PF_PASS);
4908			break;
4909		}
4910#ifdef INET
4911		case IPPROTO_ICMP: {
4912			struct icmp		iih;
4913
4914			if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4915			    NULL, reason, pd2.af)) {
4916				DPFPRINTF(PF_DEBUG_MISC,
4917				    ("pf: ICMP error message too short i"
4918				    "(icmp)\n"));
4919				return (PF_DROP);
4920			}
4921
4922			key.af = pd2.af;
4923			key.proto = IPPROTO_ICMP;
4924			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4925			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4926			key.port[0] = key.port[1] = iih.icmp_id;
4927
4928			STATE_LOOKUP(kif, &key, direction, *state, pd);
4929
4930			/* translate source/destination address, if necessary */
4931			if ((*state)->key[PF_SK_WIRE] !=
4932			    (*state)->key[PF_SK_STACK]) {
4933				struct pf_state_key *nk =
4934				    (*state)->key[pd->didx];
4935
4936				if (PF_ANEQ(pd2.src,
4937				    &nk->addr[pd2.sidx], pd2.af) ||
4938				    nk->port[pd2.sidx] != iih.icmp_id)
4939					pf_change_icmp(pd2.src, &iih.icmp_id,
4940					    daddr, &nk->addr[pd2.sidx],
4941					    nk->port[pd2.sidx], NULL,
4942					    pd2.ip_sum, icmpsum,
4943					    pd->ip_sum, 0, AF_INET);
4944
4945				if (PF_ANEQ(pd2.dst,
4946				    &nk->addr[pd2.didx], pd2.af) ||
4947				    nk->port[pd2.didx] != iih.icmp_id)
4948					pf_change_icmp(pd2.dst, &iih.icmp_id,
4949					    saddr, &nk->addr[pd2.didx],
4950					    nk->port[pd2.didx], NULL,
4951					    pd2.ip_sum, icmpsum,
4952					    pd->ip_sum, 0, AF_INET);
4953
4954				m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4955				m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4956				m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4957			}
4958			return (PF_PASS);
4959			break;
4960		}
4961#endif /* INET */
4962#ifdef INET6
4963		case IPPROTO_ICMPV6: {
4964			struct icmp6_hdr	iih;
4965
4966			if (!pf_pull_hdr(m, off2, &iih,
4967			    sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4968				DPFPRINTF(PF_DEBUG_MISC,
4969				    ("pf: ICMP error message too short "
4970				    "(icmp6)\n"));
4971				return (PF_DROP);
4972			}
4973
4974			key.af = pd2.af;
4975			key.proto = IPPROTO_ICMPV6;
4976			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4977			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4978			key.port[0] = key.port[1] = iih.icmp6_id;
4979
4980			STATE_LOOKUP(kif, &key, direction, *state, pd);
4981
4982			/* translate source/destination address, if necessary */
4983			if ((*state)->key[PF_SK_WIRE] !=
4984			    (*state)->key[PF_SK_STACK]) {
4985				struct pf_state_key *nk =
4986				    (*state)->key[pd->didx];
4987
4988				if (PF_ANEQ(pd2.src,
4989				    &nk->addr[pd2.sidx], pd2.af) ||
4990				    nk->port[pd2.sidx] != iih.icmp6_id)
4991					pf_change_icmp(pd2.src, &iih.icmp6_id,
4992					    daddr, &nk->addr[pd2.sidx],
4993					    nk->port[pd2.sidx], NULL,
4994					    pd2.ip_sum, icmpsum,
4995					    pd->ip_sum, 0, AF_INET6);
4996
4997				if (PF_ANEQ(pd2.dst,
4998				    &nk->addr[pd2.didx], pd2.af) ||
4999				    nk->port[pd2.didx] != iih.icmp6_id)
5000					pf_change_icmp(pd2.dst, &iih.icmp6_id,
5001					    saddr, &nk->addr[pd2.didx],
5002					    nk->port[pd2.didx], NULL,
5003					    pd2.ip_sum, icmpsum,
5004					    pd->ip_sum, 0, AF_INET6);
5005
5006				m_copyback(m, off, sizeof(struct icmp6_hdr),
5007				    (caddr_t)pd->hdr.icmp6);
5008				m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
5009				m_copyback(m, off2, sizeof(struct icmp6_hdr),
5010				    (caddr_t)&iih);
5011			}
5012			return (PF_PASS);
5013			break;
5014		}
5015#endif /* INET6 */
5016		default: {
5017			key.af = pd2.af;
5018			key.proto = pd2.proto;
5019			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5020			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5021			key.port[0] = key.port[1] = 0;
5022
5023			STATE_LOOKUP(kif, &key, direction, *state, pd);
5024
5025			/* translate source/destination address, if necessary */
5026			if ((*state)->key[PF_SK_WIRE] !=
5027			    (*state)->key[PF_SK_STACK]) {
5028				struct pf_state_key *nk =
5029				    (*state)->key[pd->didx];
5030
5031				if (PF_ANEQ(pd2.src,
5032				    &nk->addr[pd2.sidx], pd2.af))
5033					pf_change_icmp(pd2.src, NULL, daddr,
5034					    &nk->addr[pd2.sidx], 0, NULL,
5035					    pd2.ip_sum, icmpsum,
5036					    pd->ip_sum, 0, pd2.af);
5037
5038				if (PF_ANEQ(pd2.dst,
5039				    &nk->addr[pd2.didx], pd2.af))
5040					pf_change_icmp(pd2.dst, NULL, saddr,
5041					    &nk->addr[pd2.didx], 0, NULL,
5042					    pd2.ip_sum, icmpsum,
5043					    pd->ip_sum, 0, pd2.af);
5044
5045				switch (pd2.af) {
5046#ifdef INET
5047				case AF_INET:
5048					m_copyback(m, off, ICMP_MINLEN,
5049					    (caddr_t)pd->hdr.icmp);
5050					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5051					break;
5052#endif /* INET */
5053#ifdef INET6
5054				case AF_INET6:
5055					m_copyback(m, off,
5056					    sizeof(struct icmp6_hdr),
5057					    (caddr_t )pd->hdr.icmp6);
5058					m_copyback(m, ipoff2, sizeof(h2_6),
5059					    (caddr_t )&h2_6);
5060					break;
5061#endif /* INET6 */
5062				}
5063			}
5064			return (PF_PASS);
5065			break;
5066		}
5067		}
5068	}
5069}
5070
5071static int
5072pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
5073    struct mbuf *m, struct pf_pdesc *pd)
5074{
5075	struct pf_state_peer	*src, *dst;
5076	struct pf_state_key_cmp	 key;
5077
5078	bzero(&key, sizeof(key));
5079	key.af = pd->af;
5080	key.proto = pd->proto;
5081	if (direction == PF_IN)	{
5082		PF_ACPY(&key.addr[0], pd->src, key.af);
5083		PF_ACPY(&key.addr[1], pd->dst, key.af);
5084		key.port[0] = key.port[1] = 0;
5085	} else {
5086		PF_ACPY(&key.addr[1], pd->src, key.af);
5087		PF_ACPY(&key.addr[0], pd->dst, key.af);
5088		key.port[1] = key.port[0] = 0;
5089	}
5090
5091	STATE_LOOKUP(kif, &key, direction, *state, pd);
5092
5093	if (direction == (*state)->direction) {
5094		src = &(*state)->src;
5095		dst = &(*state)->dst;
5096	} else {
5097		src = &(*state)->dst;
5098		dst = &(*state)->src;
5099	}
5100
5101	/* update states */
5102	if (src->state < PFOTHERS_SINGLE)
5103		src->state = PFOTHERS_SINGLE;
5104	if (dst->state == PFOTHERS_SINGLE)
5105		dst->state = PFOTHERS_MULTIPLE;
5106
5107	/* update expire time */
5108	(*state)->expire = time_uptime;
5109	if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
5110		(*state)->timeout = PFTM_OTHER_MULTIPLE;
5111	else
5112		(*state)->timeout = PFTM_OTHER_SINGLE;
5113
5114	/* translate source/destination address, if necessary */
5115	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5116		struct pf_state_key *nk = (*state)->key[pd->didx];
5117
5118		KASSERT(nk, ("%s: nk is null", __func__));
5119		KASSERT(pd, ("%s: pd is null", __func__));
5120		KASSERT(pd->src, ("%s: pd->src is null", __func__));
5121		KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
5122		switch (pd->af) {
5123#ifdef INET
5124		case AF_INET:
5125			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5126				pf_change_a(&pd->src->v4.s_addr,
5127				    pd->ip_sum,
5128				    nk->addr[pd->sidx].v4.s_addr,
5129				    0);
5130
5131
5132			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5133				pf_change_a(&pd->dst->v4.s_addr,
5134				    pd->ip_sum,
5135				    nk->addr[pd->didx].v4.s_addr,
5136				    0);
5137
5138				break;
5139#endif /* INET */
5140#ifdef INET6
5141		case AF_INET6:
5142			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
5143				PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
5144
5145			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
5146				PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
5147#endif /* INET6 */
5148		}
5149	}
5150	return (PF_PASS);
5151}
5152
5153/*
5154 * ipoff and off are measured from the start of the mbuf chain.
5155 * h must be at "ipoff" on the mbuf chain.
5156 */
5157void *
5158pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
5159    u_short *actionp, u_short *reasonp, sa_family_t af)
5160{
5161	switch (af) {
5162#ifdef INET
5163	case AF_INET: {
5164		struct ip	*h = mtod(m, struct ip *);
5165		u_int16_t	 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
5166
5167		if (fragoff) {
5168			if (fragoff >= len)
5169				ACTION_SET(actionp, PF_PASS);
5170			else {
5171				ACTION_SET(actionp, PF_DROP);
5172				REASON_SET(reasonp, PFRES_FRAG);
5173			}
5174			return (NULL);
5175		}
5176		if (m->m_pkthdr.len < off + len ||
5177		    ntohs(h->ip_len) < off + len) {
5178			ACTION_SET(actionp, PF_DROP);
5179			REASON_SET(reasonp, PFRES_SHORT);
5180			return (NULL);
5181		}
5182		break;
5183	}
5184#endif /* INET */
5185#ifdef INET6
5186	case AF_INET6: {
5187		struct ip6_hdr	*h = mtod(m, struct ip6_hdr *);
5188
5189		if (m->m_pkthdr.len < off + len ||
5190		    (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5191		    (unsigned)(off + len)) {
5192			ACTION_SET(actionp, PF_DROP);
5193			REASON_SET(reasonp, PFRES_SHORT);
5194			return (NULL);
5195		}
5196		break;
5197	}
5198#endif /* INET6 */
5199	}
5200	m_copydata(m, off, len, p);
5201	return (p);
5202}
5203
5204int
5205pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5206    int rtableid)
5207{
5208#ifdef RADIX_MPATH
5209	struct radix_node_head	*rnh;
5210#endif
5211	struct sockaddr_in	*dst;
5212	int			 ret = 1;
5213	int			 check_mpath;
5214#ifdef INET6
5215	struct sockaddr_in6	*dst6;
5216	struct route_in6	 ro;
5217#else
5218	struct route		 ro;
5219#endif
5220	struct radix_node	*rn;
5221	struct rtentry		*rt;
5222	struct ifnet		*ifp;
5223
5224	check_mpath = 0;
5225#ifdef RADIX_MPATH
5226	/* XXX: stick to table 0 for now */
5227	rnh = rt_tables_get_rnh(0, af);
5228	if (rnh != NULL && rn_mpath_capable(rnh))
5229		check_mpath = 1;
5230#endif
5231	bzero(&ro, sizeof(ro));
5232	switch (af) {
5233	case AF_INET:
5234		dst = satosin(&ro.ro_dst);
5235		dst->sin_family = AF_INET;
5236		dst->sin_len = sizeof(*dst);
5237		dst->sin_addr = addr->v4;
5238		break;
5239#ifdef INET6
5240	case AF_INET6:
5241		/*
5242		 * Skip check for addresses with embedded interface scope,
5243		 * as they would always match anyway.
5244		 */
5245		if (IN6_IS_SCOPE_EMBED(&addr->v6))
5246			goto out;
5247		dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5248		dst6->sin6_family = AF_INET6;
5249		dst6->sin6_len = sizeof(*dst6);
5250		dst6->sin6_addr = addr->v6;
5251		break;
5252#endif /* INET6 */
5253	default:
5254		return (0);
5255	}
5256
5257	/* Skip checks for ipsec interfaces */
5258	if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5259		goto out;
5260
5261	switch (af) {
5262#ifdef INET6
5263	case AF_INET6:
5264		in6_rtalloc_ign(&ro, 0, rtableid);
5265		break;
5266#endif
5267#ifdef INET
5268	case AF_INET:
5269		in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5270		break;
5271#endif
5272	default:
5273		rtalloc_ign((struct route *)&ro, 0);	/* No/default FIB. */
5274		break;
5275	}
5276
5277	if (ro.ro_rt != NULL) {
5278		/* No interface given, this is a no-route check */
5279		if (kif == NULL)
5280			goto out;
5281
5282		if (kif->pfik_ifp == NULL) {
5283			ret = 0;
5284			goto out;
5285		}
5286
5287		/* Perform uRPF check if passed input interface */
5288		ret = 0;
5289		rn = (struct radix_node *)ro.ro_rt;
5290		do {
5291			rt = (struct rtentry *)rn;
5292			ifp = rt->rt_ifp;
5293
5294			if (kif->pfik_ifp == ifp)
5295				ret = 1;
5296#ifdef RADIX_MPATH
5297			rn = rn_mpath_next(rn);
5298#endif
5299		} while (check_mpath == 1 && rn != NULL && ret == 0);
5300	} else
5301		ret = 0;
5302out:
5303	if (ro.ro_rt != NULL)
5304		RTFREE(ro.ro_rt);
5305	return (ret);
5306}
5307
5308#ifdef INET
5309static void
5310pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5311    struct pf_state *s, struct pf_pdesc *pd)
5312{
5313	struct mbuf		*m0, *m1;
5314	struct sockaddr_in	dst;
5315	struct ip		*ip;
5316	struct ifnet		*ifp = NULL;
5317	struct pf_addr		 naddr;
5318	struct pf_src_node	*sn = NULL;
5319	int			 error = 0;
5320	uint16_t		 ip_len, ip_off;
5321
5322	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5323	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5324	    __func__));
5325
5326	if ((pd->pf_mtag == NULL &&
5327	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5328	    pd->pf_mtag->routed++ > 3) {
5329		m0 = *m;
5330		*m = NULL;
5331		goto bad_locked;
5332	}
5333
5334	if (r->rt == PF_DUPTO) {
5335		if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5336			if (s)
5337				PF_STATE_UNLOCK(s);
5338			return;
5339		}
5340	} else {
5341		if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5342			if (s)
5343				PF_STATE_UNLOCK(s);
5344			return;
5345		}
5346		m0 = *m;
5347	}
5348
5349	ip = mtod(m0, struct ip *);
5350
5351	bzero(&dst, sizeof(dst));
5352	dst.sin_family = AF_INET;
5353	dst.sin_len = sizeof(dst);
5354	dst.sin_addr = ip->ip_dst;
5355
5356	if (r->rt == PF_FASTROUTE) {
5357		struct rtentry *rt;
5358
5359		if (s)
5360			PF_STATE_UNLOCK(s);
5361		rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5362		if (rt == NULL) {
5363			KMOD_IPSTAT_INC(ips_noroute);
5364			error = EHOSTUNREACH;
5365			goto bad;
5366		}
5367
5368		ifp = rt->rt_ifp;
5369		counter_u64_add(rt->rt_pksent, 1);
5370
5371		if (rt->rt_flags & RTF_GATEWAY)
5372			bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5373		RTFREE_LOCKED(rt);
5374	} else {
5375		if (TAILQ_EMPTY(&r->rpool.list)) {
5376			DPFPRINTF(PF_DEBUG_URGENT,
5377			    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5378			goto bad_locked;
5379		}
5380		if (s == NULL) {
5381			pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5382			    &naddr, NULL, &sn);
5383			if (!PF_AZERO(&naddr, AF_INET))
5384				dst.sin_addr.s_addr = naddr.v4.s_addr;
5385			ifp = r->rpool.cur->kif ?
5386			    r->rpool.cur->kif->pfik_ifp : NULL;
5387		} else {
5388			if (!PF_AZERO(&s->rt_addr, AF_INET))
5389				dst.sin_addr.s_addr =
5390				    s->rt_addr.v4.s_addr;
5391			ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5392			PF_STATE_UNLOCK(s);
5393		}
5394	}
5395	if (ifp == NULL)
5396		goto bad;
5397
5398	if (oifp != ifp) {
5399		if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5400			goto bad;
5401		else if (m0 == NULL)
5402			goto done;
5403		if (m0->m_len < sizeof(struct ip)) {
5404			DPFPRINTF(PF_DEBUG_URGENT,
5405			    ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5406			goto bad;
5407		}
5408		ip = mtod(m0, struct ip *);
5409	}
5410
5411	if (ifp->if_flags & IFF_LOOPBACK)
5412		m0->m_flags |= M_SKIP_FIREWALL;
5413
5414	ip_len = ntohs(ip->ip_len);
5415	ip_off = ntohs(ip->ip_off);
5416
5417	/* Copied from FreeBSD 10.0-CURRENT ip_output. */
5418	m0->m_pkthdr.csum_flags |= CSUM_IP;
5419	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5420		in_delayed_cksum(m0);
5421		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5422	}
5423#ifdef SCTP
5424	if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5425		sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5426		m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5427	}
5428#endif
5429
5430	/*
5431	 * If small enough for interface, or the interface will take
5432	 * care of the fragmentation for us, we can just send directly.
5433	 */
5434	if (ip_len <= ifp->if_mtu ||
5435	    (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5436	    ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5437		ip->ip_sum = 0;
5438		if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5439			ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5440			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5441		}
5442		m_clrprotoflags(m0);	/* Avoid confusing lower layers. */
5443		error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5444		goto done;
5445	}
5446
5447	/* Balk when DF bit is set or the interface didn't support TSO. */
5448	if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5449		error = EMSGSIZE;
5450		KMOD_IPSTAT_INC(ips_cantfrag);
5451		if (r->rt != PF_DUPTO) {
5452			icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5453			    ifp->if_mtu);
5454			goto done;
5455		} else
5456			goto bad;
5457	}
5458
5459	error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5460	if (error)
5461		goto bad;
5462
5463	for (; m0; m0 = m1) {
5464		m1 = m0->m_nextpkt;
5465		m0->m_nextpkt = NULL;
5466		if (error == 0) {
5467			m_clrprotoflags(m0);
5468			error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5469		} else
5470			m_freem(m0);
5471	}
5472
5473	if (error == 0)
5474		KMOD_IPSTAT_INC(ips_fragmented);
5475
5476done:
5477	if (r->rt != PF_DUPTO)
5478		*m = NULL;
5479	return;
5480
5481bad_locked:
5482	if (s)
5483		PF_STATE_UNLOCK(s);
5484bad:
5485	m_freem(m0);
5486	goto done;
5487}
5488#endif /* INET */
5489
5490#ifdef INET6
5491static void
5492pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5493    struct pf_state *s, struct pf_pdesc *pd)
5494{
5495	struct mbuf		*m0;
5496	struct sockaddr_in6	dst;
5497	struct ip6_hdr		*ip6;
5498	struct ifnet		*ifp = NULL;
5499	struct pf_addr		 naddr;
5500	struct pf_src_node	*sn = NULL;
5501
5502	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5503	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5504	    __func__));
5505
5506	if ((pd->pf_mtag == NULL &&
5507	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5508	    pd->pf_mtag->routed++ > 3) {
5509		m0 = *m;
5510		*m = NULL;
5511		goto bad_locked;
5512	}
5513
5514	if (r->rt == PF_DUPTO) {
5515		if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5516			if (s)
5517				PF_STATE_UNLOCK(s);
5518			return;
5519		}
5520	} else {
5521		if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5522			if (s)
5523				PF_STATE_UNLOCK(s);
5524			return;
5525		}
5526		m0 = *m;
5527	}
5528
5529	ip6 = mtod(m0, struct ip6_hdr *);
5530
5531	bzero(&dst, sizeof(dst));
5532	dst.sin6_family = AF_INET6;
5533	dst.sin6_len = sizeof(dst);
5534	dst.sin6_addr = ip6->ip6_dst;
5535
5536	/* Cheat. XXX why only in the v6 case??? */
5537	if (r->rt == PF_FASTROUTE) {
5538		if (s)
5539			PF_STATE_UNLOCK(s);
5540		m0->m_flags |= M_SKIP_FIREWALL;
5541		ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5542		*m = NULL;
5543		return;
5544	}
5545
5546	if (TAILQ_EMPTY(&r->rpool.list)) {
5547		DPFPRINTF(PF_DEBUG_URGENT,
5548		    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5549		goto bad_locked;
5550	}
5551	if (s == NULL) {
5552		pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5553		    &naddr, NULL, &sn);
5554		if (!PF_AZERO(&naddr, AF_INET6))
5555			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5556			    &naddr, AF_INET6);
5557		ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5558	} else {
5559		if (!PF_AZERO(&s->rt_addr, AF_INET6))
5560			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5561			    &s->rt_addr, AF_INET6);
5562		ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5563	}
5564
5565	if (s)
5566		PF_STATE_UNLOCK(s);
5567
5568	if (ifp == NULL)
5569		goto bad;
5570
5571	if (oifp != ifp) {
5572		if (pf_test6(PF_FWD, ifp, &m0, NULL) != PF_PASS)
5573			goto bad;
5574		else if (m0 == NULL)
5575			goto done;
5576		if (m0->m_len < sizeof(struct ip6_hdr)) {
5577			DPFPRINTF(PF_DEBUG_URGENT,
5578			    ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5579			    __func__));
5580			goto bad;
5581		}
5582		ip6 = mtod(m0, struct ip6_hdr *);
5583	}
5584
5585	if (ifp->if_flags & IFF_LOOPBACK)
5586		m0->m_flags |= M_SKIP_FIREWALL;
5587
5588	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
5589	    ~ifp->if_hwassist) {
5590		uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
5591		in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
5592		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
5593	}
5594
5595	/*
5596	 * If the packet is too large for the outgoing interface,
5597	 * send back an icmp6 error.
5598	 */
5599	if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5600		dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5601	if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5602		nd6_output(ifp, ifp, m0, &dst, NULL);
5603	else {
5604		in6_ifstat_inc(ifp, ifs6_in_toobig);
5605		if (r->rt != PF_DUPTO)
5606			icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5607		else
5608			goto bad;
5609	}
5610
5611done:
5612	if (r->rt != PF_DUPTO)
5613		*m = NULL;
5614	return;
5615
5616bad_locked:
5617	if (s)
5618		PF_STATE_UNLOCK(s);
5619bad:
5620	m_freem(m0);
5621	goto done;
5622}
5623#endif /* INET6 */
5624
5625/*
5626 * FreeBSD supports cksum offloads for the following drivers.
5627 *  em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5628 *   ti(4), txp(4), xl(4)
5629 *
5630 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5631 *  network driver performed cksum including pseudo header, need to verify
5632 *   csum_data
5633 * CSUM_DATA_VALID :
5634 *  network driver performed cksum, needs to additional pseudo header
5635 *  cksum computation with partial csum_data(i.e. lack of H/W support for
5636 *  pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5637 *
5638 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5639 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5640 * TCP/UDP layer.
5641 * Also, set csum_data to 0xffff to force cksum validation.
5642 */
5643static int
5644pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5645{
5646	u_int16_t sum = 0;
5647	int hw_assist = 0;
5648	struct ip *ip;
5649
5650	if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5651		return (1);
5652	if (m->m_pkthdr.len < off + len)
5653		return (1);
5654
5655	switch (p) {
5656	case IPPROTO_TCP:
5657		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5658			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5659				sum = m->m_pkthdr.csum_data;
5660			} else {
5661				ip = mtod(m, struct ip *);
5662				sum = in_pseudo(ip->ip_src.s_addr,
5663				ip->ip_dst.s_addr, htonl((u_short)len +
5664				m->m_pkthdr.csum_data + IPPROTO_TCP));
5665			}
5666			sum ^= 0xffff;
5667			++hw_assist;
5668		}
5669		break;
5670	case IPPROTO_UDP:
5671		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5672			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5673				sum = m->m_pkthdr.csum_data;
5674			} else {
5675				ip = mtod(m, struct ip *);
5676				sum = in_pseudo(ip->ip_src.s_addr,
5677				ip->ip_dst.s_addr, htonl((u_short)len +
5678				m->m_pkthdr.csum_data + IPPROTO_UDP));
5679			}
5680			sum ^= 0xffff;
5681			++hw_assist;
5682		}
5683		break;
5684	case IPPROTO_ICMP:
5685#ifdef INET6
5686	case IPPROTO_ICMPV6:
5687#endif /* INET6 */
5688		break;
5689	default:
5690		return (1);
5691	}
5692
5693	if (!hw_assist) {
5694		switch (af) {
5695		case AF_INET:
5696			if (p == IPPROTO_ICMP) {
5697				if (m->m_len < off)
5698					return (1);
5699				m->m_data += off;
5700				m->m_len -= off;
5701				sum = in_cksum(m, len);
5702				m->m_data -= off;
5703				m->m_len += off;
5704			} else {
5705				if (m->m_len < sizeof(struct ip))
5706					return (1);
5707				sum = in4_cksum(m, p, off, len);
5708			}
5709			break;
5710#ifdef INET6
5711		case AF_INET6:
5712			if (m->m_len < sizeof(struct ip6_hdr))
5713				return (1);
5714			sum = in6_cksum(m, p, off, len);
5715			break;
5716#endif /* INET6 */
5717		default:
5718			return (1);
5719		}
5720	}
5721	if (sum) {
5722		switch (p) {
5723		case IPPROTO_TCP:
5724		    {
5725			KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5726			break;
5727		    }
5728		case IPPROTO_UDP:
5729		    {
5730			KMOD_UDPSTAT_INC(udps_badsum);
5731			break;
5732		    }
5733#ifdef INET
5734		case IPPROTO_ICMP:
5735		    {
5736			KMOD_ICMPSTAT_INC(icps_checksum);
5737			break;
5738		    }
5739#endif
5740#ifdef INET6
5741		case IPPROTO_ICMPV6:
5742		    {
5743			KMOD_ICMP6STAT_INC(icp6s_checksum);
5744			break;
5745		    }
5746#endif /* INET6 */
5747		}
5748		return (1);
5749	} else {
5750		if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5751			m->m_pkthdr.csum_flags |=
5752			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5753			m->m_pkthdr.csum_data = 0xffff;
5754		}
5755	}
5756	return (0);
5757}
5758
5759
5760#ifdef INET
5761int
5762pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5763{
5764	struct pfi_kif		*kif;
5765	u_short			 action, reason = 0, log = 0;
5766	struct mbuf		*m = *m0;
5767	struct ip		*h = NULL;
5768	struct m_tag		*ipfwtag;
5769	struct pf_rule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5770	struct pf_state		*s = NULL;
5771	struct pf_ruleset	*ruleset = NULL;
5772	struct pf_pdesc		 pd;
5773	int			 off, dirndx, pqid = 0;
5774
5775	M_ASSERTPKTHDR(m);
5776
5777	if (!V_pf_status.running)
5778		return (PF_PASS);
5779
5780	memset(&pd, 0, sizeof(pd));
5781
5782	kif = (struct pfi_kif *)ifp->if_pf_kif;
5783
5784	if (kif == NULL) {
5785		DPFPRINTF(PF_DEBUG_URGENT,
5786		    ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5787		return (PF_DROP);
5788	}
5789	if (kif->pfik_flags & PFI_IFLAG_SKIP)
5790		return (PF_PASS);
5791
5792	if (m->m_flags & M_SKIP_FIREWALL)
5793		return (PF_PASS);
5794
5795	pd.pf_mtag = pf_find_mtag(m);
5796
5797	PF_RULES_RLOCK();
5798
5799	if (ip_divert_ptr != NULL &&
5800	    ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5801		struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5802		if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5803			if (pd.pf_mtag == NULL &&
5804			    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5805				action = PF_DROP;
5806				goto done;
5807			}
5808			pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5809			m_tag_delete(m, ipfwtag);
5810		}
5811		if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5812			m->m_flags |= M_FASTFWD_OURS;
5813			pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5814		}
5815	} else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5816		/* We do IP header normalization and packet reassembly here */
5817		action = PF_DROP;
5818		goto done;
5819	}
5820	m = *m0;	/* pf_normalize messes with m0 */
5821	h = mtod(m, struct ip *);
5822
5823	off = h->ip_hl << 2;
5824	if (off < (int)sizeof(struct ip)) {
5825		action = PF_DROP;
5826		REASON_SET(&reason, PFRES_SHORT);
5827		log = 1;
5828		goto done;
5829	}
5830
5831	pd.src = (struct pf_addr *)&h->ip_src;
5832	pd.dst = (struct pf_addr *)&h->ip_dst;
5833	pd.sport = pd.dport = NULL;
5834	pd.ip_sum = &h->ip_sum;
5835	pd.proto_sum = NULL;
5836	pd.proto = h->ip_p;
5837	pd.dir = dir;
5838	pd.sidx = (dir == PF_IN) ? 0 : 1;
5839	pd.didx = (dir == PF_IN) ? 1 : 0;
5840	pd.af = AF_INET;
5841	pd.tos = h->ip_tos;
5842	pd.tot_len = ntohs(h->ip_len);
5843
5844	/* handle fragments that didn't get reassembled by normalization */
5845	if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5846		action = pf_test_fragment(&r, dir, kif, m, h,
5847		    &pd, &a, &ruleset);
5848		goto done;
5849	}
5850
5851	switch (h->ip_p) {
5852
5853	case IPPROTO_TCP: {
5854		struct tcphdr	th;
5855
5856		pd.hdr.tcp = &th;
5857		if (!pf_pull_hdr(m, off, &th, sizeof(th),
5858		    &action, &reason, AF_INET)) {
5859			log = action != PF_PASS;
5860			goto done;
5861		}
5862		pd.p_len = pd.tot_len - off - (th.th_off << 2);
5863		if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5864			pqid = 1;
5865		action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5866		if (action == PF_DROP)
5867			goto done;
5868		action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5869		    &reason);
5870		if (action == PF_PASS) {
5871			if (pfsync_update_state_ptr != NULL)
5872				pfsync_update_state_ptr(s);
5873			r = s->rule.ptr;
5874			a = s->anchor.ptr;
5875			log = s->log;
5876		} else if (s == NULL)
5877			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5878			    &a, &ruleset, inp);
5879		break;
5880	}
5881
5882	case IPPROTO_UDP: {
5883		struct udphdr	uh;
5884
5885		pd.hdr.udp = &uh;
5886		if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5887		    &action, &reason, AF_INET)) {
5888			log = action != PF_PASS;
5889			goto done;
5890		}
5891		if (uh.uh_dport == 0 ||
5892		    ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5893		    ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5894			action = PF_DROP;
5895			REASON_SET(&reason, PFRES_SHORT);
5896			goto done;
5897		}
5898		action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5899		if (action == PF_PASS) {
5900			if (pfsync_update_state_ptr != NULL)
5901				pfsync_update_state_ptr(s);
5902			r = s->rule.ptr;
5903			a = s->anchor.ptr;
5904			log = s->log;
5905		} else if (s == NULL)
5906			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5907			    &a, &ruleset, inp);
5908		break;
5909	}
5910
5911	case IPPROTO_ICMP: {
5912		struct icmp	ih;
5913
5914		pd.hdr.icmp = &ih;
5915		if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5916		    &action, &reason, AF_INET)) {
5917			log = action != PF_PASS;
5918			goto done;
5919		}
5920		action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5921		    &reason);
5922		if (action == PF_PASS) {
5923			if (pfsync_update_state_ptr != NULL)
5924				pfsync_update_state_ptr(s);
5925			r = s->rule.ptr;
5926			a = s->anchor.ptr;
5927			log = s->log;
5928		} else if (s == NULL)
5929			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5930			    &a, &ruleset, inp);
5931		break;
5932	}
5933
5934#ifdef INET6
5935	case IPPROTO_ICMPV6: {
5936		action = PF_DROP;
5937		DPFPRINTF(PF_DEBUG_MISC,
5938		    ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5939		goto done;
5940	}
5941#endif
5942
5943	default:
5944		action = pf_test_state_other(&s, dir, kif, m, &pd);
5945		if (action == PF_PASS) {
5946			if (pfsync_update_state_ptr != NULL)
5947				pfsync_update_state_ptr(s);
5948			r = s->rule.ptr;
5949			a = s->anchor.ptr;
5950			log = s->log;
5951		} else if (s == NULL)
5952			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5953			    &a, &ruleset, inp);
5954		break;
5955	}
5956
5957done:
5958	PF_RULES_RUNLOCK();
5959	if (action == PF_PASS && h->ip_hl > 5 &&
5960	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5961		action = PF_DROP;
5962		REASON_SET(&reason, PFRES_IPOPTIONS);
5963		log = r->log;
5964		DPFPRINTF(PF_DEBUG_MISC,
5965		    ("pf: dropping packet with ip options\n"));
5966	}
5967
5968	if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5969		action = PF_DROP;
5970		REASON_SET(&reason, PFRES_MEMORY);
5971	}
5972	if (r->rtableid >= 0)
5973		M_SETFIB(m, r->rtableid);
5974
5975#ifdef ALTQ
5976	if (action == PF_PASS && r->qid) {
5977		if (pd.pf_mtag == NULL &&
5978		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5979			action = PF_DROP;
5980			REASON_SET(&reason, PFRES_MEMORY);
5981		} else {
5982			if (s != NULL)
5983				pd.pf_mtag->qid_hash = pf_state_hash(s);
5984			if (pqid || (pd.tos & IPTOS_LOWDELAY))
5985				pd.pf_mtag->qid = r->pqid;
5986			else
5987				pd.pf_mtag->qid = r->qid;
5988			/* Add hints for ecn. */
5989			pd.pf_mtag->hdr = h;
5990		}
5991
5992	}
5993#endif /* ALTQ */
5994
5995	/*
5996	 * connections redirected to loopback should not match sockets
5997	 * bound specifically to loopback due to security implications,
5998	 * see tcp_input() and in_pcblookup_listen().
5999	 */
6000	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6001	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6002	    (s->nat_rule.ptr->action == PF_RDR ||
6003	    s->nat_rule.ptr->action == PF_BINAT) &&
6004	    (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
6005		m->m_flags |= M_SKIP_FIREWALL;
6006
6007	if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
6008	    !PACKET_LOOPED(&pd)) {
6009
6010		ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
6011		    sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
6012		if (ipfwtag != NULL) {
6013			((struct ipfw_rule_ref *)(ipfwtag+1))->info =
6014			    ntohs(r->divert.port);
6015			((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
6016
6017			if (s)
6018				PF_STATE_UNLOCK(s);
6019
6020			m_tag_prepend(m, ipfwtag);
6021			if (m->m_flags & M_FASTFWD_OURS) {
6022				if (pd.pf_mtag == NULL &&
6023				    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6024					action = PF_DROP;
6025					REASON_SET(&reason, PFRES_MEMORY);
6026					log = 1;
6027					DPFPRINTF(PF_DEBUG_MISC,
6028					    ("pf: failed to allocate tag\n"));
6029				} else {
6030					pd.pf_mtag->flags |=
6031					    PF_FASTFWD_OURS_PRESENT;
6032					m->m_flags &= ~M_FASTFWD_OURS;
6033				}
6034			}
6035			ip_divert_ptr(*m0, dir ==  PF_IN ? DIR_IN : DIR_OUT);
6036			*m0 = NULL;
6037
6038			return (action);
6039		} else {
6040			/* XXX: ipfw has the same behaviour! */
6041			action = PF_DROP;
6042			REASON_SET(&reason, PFRES_MEMORY);
6043			log = 1;
6044			DPFPRINTF(PF_DEBUG_MISC,
6045			    ("pf: failed to allocate divert tag\n"));
6046		}
6047	}
6048
6049	if (log) {
6050		struct pf_rule *lr;
6051
6052		if (s != NULL && s->nat_rule.ptr != NULL &&
6053		    s->nat_rule.ptr->log & PF_LOG_ALL)
6054			lr = s->nat_rule.ptr;
6055		else
6056			lr = r;
6057		PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
6058		    (s == NULL));
6059	}
6060
6061	kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6062	kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
6063
6064	if (action == PF_PASS || r->action == PF_DROP) {
6065		dirndx = (dir == PF_OUT);
6066		r->packets[dirndx]++;
6067		r->bytes[dirndx] += pd.tot_len;
6068		if (a != NULL) {
6069			a->packets[dirndx]++;
6070			a->bytes[dirndx] += pd.tot_len;
6071		}
6072		if (s != NULL) {
6073			if (s->nat_rule.ptr != NULL) {
6074				s->nat_rule.ptr->packets[dirndx]++;
6075				s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6076			}
6077			if (s->src_node != NULL) {
6078				s->src_node->packets[dirndx]++;
6079				s->src_node->bytes[dirndx] += pd.tot_len;
6080			}
6081			if (s->nat_src_node != NULL) {
6082				s->nat_src_node->packets[dirndx]++;
6083				s->nat_src_node->bytes[dirndx] += pd.tot_len;
6084			}
6085			dirndx = (dir == s->direction) ? 0 : 1;
6086			s->packets[dirndx]++;
6087			s->bytes[dirndx] += pd.tot_len;
6088		}
6089		tr = r;
6090		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6091		if (nr != NULL && r == &V_pf_default_rule)
6092			tr = nr;
6093		if (tr->src.addr.type == PF_ADDR_TABLE)
6094			pfr_update_stats(tr->src.addr.p.tbl,
6095			    (s == NULL) ? pd.src :
6096			    &s->key[(s->direction == PF_IN)]->
6097				addr[(s->direction == PF_OUT)],
6098			    pd.af, pd.tot_len, dir == PF_OUT,
6099			    r->action == PF_PASS, tr->src.neg);
6100		if (tr->dst.addr.type == PF_ADDR_TABLE)
6101			pfr_update_stats(tr->dst.addr.p.tbl,
6102			    (s == NULL) ? pd.dst :
6103			    &s->key[(s->direction == PF_IN)]->
6104				addr[(s->direction == PF_IN)],
6105			    pd.af, pd.tot_len, dir == PF_OUT,
6106			    r->action == PF_PASS, tr->dst.neg);
6107	}
6108
6109	switch (action) {
6110	case PF_SYNPROXY_DROP:
6111		m_freem(*m0);
6112	case PF_DEFER:
6113		*m0 = NULL;
6114		action = PF_PASS;
6115		break;
6116	case PF_DROP:
6117		m_freem(*m0);
6118		*m0 = NULL;
6119		break;
6120	default:
6121		/* pf_route() returns unlocked. */
6122		if (r->rt) {
6123			pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
6124			return (action);
6125		}
6126		break;
6127	}
6128	if (s)
6129		PF_STATE_UNLOCK(s);
6130
6131	return (action);
6132}
6133#endif /* INET */
6134
6135#ifdef INET6
6136int
6137pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
6138{
6139	struct pfi_kif		*kif;
6140	u_short			 action, reason = 0, log = 0;
6141	struct mbuf		*m = *m0, *n = NULL;
6142	struct m_tag		*mtag;
6143	struct ip6_hdr		*h = NULL;
6144	struct pf_rule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
6145	struct pf_state		*s = NULL;
6146	struct pf_ruleset	*ruleset = NULL;
6147	struct pf_pdesc		 pd;
6148	int			 off, terminal = 0, dirndx, rh_cnt = 0;
6149	int			 fwdir = dir;
6150
6151	M_ASSERTPKTHDR(m);
6152
6153	/* Detect packet forwarding.
6154	 * If the input interface is different from the output interface we're
6155	 * forwarding.
6156	 * We do need to be careful about bridges. If the
6157	 * net.link.bridge.pfil_bridge sysctl is set we can be filtering on a
6158	 * bridge, so if the input interface is a bridge member and the output
6159	 * interface is its bridge or a member of the same bridge we're not
6160	 * actually forwarding but bridging.
6161	 */
6162	if (dir == PF_OUT && m->m_pkthdr.rcvif && ifp != m->m_pkthdr.rcvif &&
6163	    (m->m_pkthdr.rcvif->if_bridge == NULL ||
6164	    (m->m_pkthdr.rcvif->if_bridge != ifp->if_softc &&
6165	    m->m_pkthdr.rcvif->if_bridge != ifp->if_bridge)))
6166		fwdir = PF_FWD;
6167
6168	if (dir == PF_FWD)
6169		dir = PF_OUT;
6170
6171	if (!V_pf_status.running)
6172		return (PF_PASS);
6173
6174	memset(&pd, 0, sizeof(pd));
6175	pd.pf_mtag = pf_find_mtag(m);
6176
6177	if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
6178		return (PF_PASS);
6179
6180	kif = (struct pfi_kif *)ifp->if_pf_kif;
6181	if (kif == NULL) {
6182		DPFPRINTF(PF_DEBUG_URGENT,
6183		    ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
6184		return (PF_DROP);
6185	}
6186	if (kif->pfik_flags & PFI_IFLAG_SKIP)
6187		return (PF_PASS);
6188
6189	if (m->m_flags & M_SKIP_FIREWALL)
6190		return (PF_PASS);
6191
6192	PF_RULES_RLOCK();
6193
6194	/* We do IP header normalization and packet reassembly here */
6195	if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
6196		action = PF_DROP;
6197		goto done;
6198	}
6199	m = *m0;	/* pf_normalize messes with m0 */
6200	h = mtod(m, struct ip6_hdr *);
6201
6202#if 1
6203	/*
6204	 * we do not support jumbogram yet.  if we keep going, zero ip6_plen
6205	 * will do something bad, so drop the packet for now.
6206	 */
6207	if (htons(h->ip6_plen) == 0) {
6208		action = PF_DROP;
6209		REASON_SET(&reason, PFRES_NORM);	/*XXX*/
6210		goto done;
6211	}
6212#endif
6213
6214	pd.src = (struct pf_addr *)&h->ip6_src;
6215	pd.dst = (struct pf_addr *)&h->ip6_dst;
6216	pd.sport = pd.dport = NULL;
6217	pd.ip_sum = NULL;
6218	pd.proto_sum = NULL;
6219	pd.dir = dir;
6220	pd.sidx = (dir == PF_IN) ? 0 : 1;
6221	pd.didx = (dir == PF_IN) ? 1 : 0;
6222	pd.af = AF_INET6;
6223	pd.tos = 0;
6224	pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6225
6226	off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6227	pd.proto = h->ip6_nxt;
6228	do {
6229		switch (pd.proto) {
6230		case IPPROTO_FRAGMENT:
6231			action = pf_test_fragment(&r, dir, kif, m, h,
6232			    &pd, &a, &ruleset);
6233			if (action == PF_DROP)
6234				REASON_SET(&reason, PFRES_FRAG);
6235			goto done;
6236		case IPPROTO_ROUTING: {
6237			struct ip6_rthdr rthdr;
6238
6239			if (rh_cnt++) {
6240				DPFPRINTF(PF_DEBUG_MISC,
6241				    ("pf: IPv6 more than one rthdr\n"));
6242				action = PF_DROP;
6243				REASON_SET(&reason, PFRES_IPOPTIONS);
6244				log = 1;
6245				goto done;
6246			}
6247			if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6248			    &reason, pd.af)) {
6249				DPFPRINTF(PF_DEBUG_MISC,
6250				    ("pf: IPv6 short rthdr\n"));
6251				action = PF_DROP;
6252				REASON_SET(&reason, PFRES_SHORT);
6253				log = 1;
6254				goto done;
6255			}
6256			if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6257				DPFPRINTF(PF_DEBUG_MISC,
6258				    ("pf: IPv6 rthdr0\n"));
6259				action = PF_DROP;
6260				REASON_SET(&reason, PFRES_IPOPTIONS);
6261				log = 1;
6262				goto done;
6263			}
6264			/* FALLTHROUGH */
6265		}
6266		case IPPROTO_AH:
6267		case IPPROTO_HOPOPTS:
6268		case IPPROTO_DSTOPTS: {
6269			/* get next header and header length */
6270			struct ip6_ext	opt6;
6271
6272			if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6273			    NULL, &reason, pd.af)) {
6274				DPFPRINTF(PF_DEBUG_MISC,
6275				    ("pf: IPv6 short opt\n"));
6276				action = PF_DROP;
6277				log = 1;
6278				goto done;
6279			}
6280			if (pd.proto == IPPROTO_AH)
6281				off += (opt6.ip6e_len + 2) * 4;
6282			else
6283				off += (opt6.ip6e_len + 1) * 8;
6284			pd.proto = opt6.ip6e_nxt;
6285			/* goto the next header */
6286			break;
6287		}
6288		default:
6289			terminal++;
6290			break;
6291		}
6292	} while (!terminal);
6293
6294	/* if there's no routing header, use unmodified mbuf for checksumming */
6295	if (!n)
6296		n = m;
6297
6298	switch (pd.proto) {
6299
6300	case IPPROTO_TCP: {
6301		struct tcphdr	th;
6302
6303		pd.hdr.tcp = &th;
6304		if (!pf_pull_hdr(m, off, &th, sizeof(th),
6305		    &action, &reason, AF_INET6)) {
6306			log = action != PF_PASS;
6307			goto done;
6308		}
6309		pd.p_len = pd.tot_len - off - (th.th_off << 2);
6310		action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6311		if (action == PF_DROP)
6312			goto done;
6313		action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6314		    &reason);
6315		if (action == PF_PASS) {
6316			if (pfsync_update_state_ptr != NULL)
6317				pfsync_update_state_ptr(s);
6318			r = s->rule.ptr;
6319			a = s->anchor.ptr;
6320			log = s->log;
6321		} else if (s == NULL)
6322			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6323			    &a, &ruleset, inp);
6324		break;
6325	}
6326
6327	case IPPROTO_UDP: {
6328		struct udphdr	uh;
6329
6330		pd.hdr.udp = &uh;
6331		if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6332		    &action, &reason, AF_INET6)) {
6333			log = action != PF_PASS;
6334			goto done;
6335		}
6336		if (uh.uh_dport == 0 ||
6337		    ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6338		    ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6339			action = PF_DROP;
6340			REASON_SET(&reason, PFRES_SHORT);
6341			goto done;
6342		}
6343		action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6344		if (action == PF_PASS) {
6345			if (pfsync_update_state_ptr != NULL)
6346				pfsync_update_state_ptr(s);
6347			r = s->rule.ptr;
6348			a = s->anchor.ptr;
6349			log = s->log;
6350		} else if (s == NULL)
6351			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6352			    &a, &ruleset, inp);
6353		break;
6354	}
6355
6356	case IPPROTO_ICMP: {
6357		action = PF_DROP;
6358		DPFPRINTF(PF_DEBUG_MISC,
6359		    ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6360		goto done;
6361	}
6362
6363	case IPPROTO_ICMPV6: {
6364		struct icmp6_hdr	ih;
6365
6366		pd.hdr.icmp6 = &ih;
6367		if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6368		    &action, &reason, AF_INET6)) {
6369			log = action != PF_PASS;
6370			goto done;
6371		}
6372		action = pf_test_state_icmp(&s, dir, kif,
6373		    m, off, h, &pd, &reason);
6374		if (action == PF_PASS) {
6375			if (pfsync_update_state_ptr != NULL)
6376				pfsync_update_state_ptr(s);
6377			r = s->rule.ptr;
6378			a = s->anchor.ptr;
6379			log = s->log;
6380		} else if (s == NULL)
6381			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6382			    &a, &ruleset, inp);
6383		break;
6384	}
6385
6386	default:
6387		action = pf_test_state_other(&s, dir, kif, m, &pd);
6388		if (action == PF_PASS) {
6389			if (pfsync_update_state_ptr != NULL)
6390				pfsync_update_state_ptr(s);
6391			r = s->rule.ptr;
6392			a = s->anchor.ptr;
6393			log = s->log;
6394		} else if (s == NULL)
6395			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6396			    &a, &ruleset, inp);
6397		break;
6398	}
6399
6400done:
6401	PF_RULES_RUNLOCK();
6402	if (n != m) {
6403		m_freem(n);
6404		n = NULL;
6405	}
6406
6407	/* handle dangerous IPv6 extension headers. */
6408	if (action == PF_PASS && rh_cnt &&
6409	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6410		action = PF_DROP;
6411		REASON_SET(&reason, PFRES_IPOPTIONS);
6412		log = r->log;
6413		DPFPRINTF(PF_DEBUG_MISC,
6414		    ("pf: dropping packet with dangerous v6 headers\n"));
6415	}
6416
6417	if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6418		action = PF_DROP;
6419		REASON_SET(&reason, PFRES_MEMORY);
6420	}
6421	if (r->rtableid >= 0)
6422		M_SETFIB(m, r->rtableid);
6423
6424#ifdef ALTQ
6425	if (action == PF_PASS && r->qid) {
6426		if (pd.pf_mtag == NULL &&
6427		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6428			action = PF_DROP;
6429			REASON_SET(&reason, PFRES_MEMORY);
6430		} else {
6431			if (s != NULL)
6432				pd.pf_mtag->qid_hash = pf_state_hash(s);
6433			if (pd.tos & IPTOS_LOWDELAY)
6434				pd.pf_mtag->qid = r->pqid;
6435			else
6436				pd.pf_mtag->qid = r->qid;
6437			/* Add hints for ecn. */
6438			pd.pf_mtag->hdr = h;
6439		}
6440	}
6441#endif /* ALTQ */
6442
6443	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6444	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6445	    (s->nat_rule.ptr->action == PF_RDR ||
6446	    s->nat_rule.ptr->action == PF_BINAT) &&
6447	    IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6448		m->m_flags |= M_SKIP_FIREWALL;
6449
6450	/* XXX: Anybody working on it?! */
6451	if (r->divert.port)
6452		printf("pf: divert(9) is not supported for IPv6\n");
6453
6454	if (log) {
6455		struct pf_rule *lr;
6456
6457		if (s != NULL && s->nat_rule.ptr != NULL &&
6458		    s->nat_rule.ptr->log & PF_LOG_ALL)
6459			lr = s->nat_rule.ptr;
6460		else
6461			lr = r;
6462		PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6463		    &pd, (s == NULL));
6464	}
6465
6466	kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6467	kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6468
6469	if (action == PF_PASS || r->action == PF_DROP) {
6470		dirndx = (dir == PF_OUT);
6471		r->packets[dirndx]++;
6472		r->bytes[dirndx] += pd.tot_len;
6473		if (a != NULL) {
6474			a->packets[dirndx]++;
6475			a->bytes[dirndx] += pd.tot_len;
6476		}
6477		if (s != NULL) {
6478			if (s->nat_rule.ptr != NULL) {
6479				s->nat_rule.ptr->packets[dirndx]++;
6480				s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6481			}
6482			if (s->src_node != NULL) {
6483				s->src_node->packets[dirndx]++;
6484				s->src_node->bytes[dirndx] += pd.tot_len;
6485			}
6486			if (s->nat_src_node != NULL) {
6487				s->nat_src_node->packets[dirndx]++;
6488				s->nat_src_node->bytes[dirndx] += pd.tot_len;
6489			}
6490			dirndx = (dir == s->direction) ? 0 : 1;
6491			s->packets[dirndx]++;
6492			s->bytes[dirndx] += pd.tot_len;
6493		}
6494		tr = r;
6495		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6496		if (nr != NULL && r == &V_pf_default_rule)
6497			tr = nr;
6498		if (tr->src.addr.type == PF_ADDR_TABLE)
6499			pfr_update_stats(tr->src.addr.p.tbl,
6500			    (s == NULL) ? pd.src :
6501			    &s->key[(s->direction == PF_IN)]->addr[0],
6502			    pd.af, pd.tot_len, dir == PF_OUT,
6503			    r->action == PF_PASS, tr->src.neg);
6504		if (tr->dst.addr.type == PF_ADDR_TABLE)
6505			pfr_update_stats(tr->dst.addr.p.tbl,
6506			    (s == NULL) ? pd.dst :
6507			    &s->key[(s->direction == PF_IN)]->addr[1],
6508			    pd.af, pd.tot_len, dir == PF_OUT,
6509			    r->action == PF_PASS, tr->dst.neg);
6510	}
6511
6512	switch (action) {
6513	case PF_SYNPROXY_DROP:
6514		m_freem(*m0);
6515	case PF_DEFER:
6516		*m0 = NULL;
6517		action = PF_PASS;
6518		break;
6519	case PF_DROP:
6520		m_freem(*m0);
6521		*m0 = NULL;
6522		break;
6523	default:
6524		/* pf_route6() returns unlocked. */
6525		if (r->rt) {
6526			pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6527			return (action);
6528		}
6529		break;
6530	}
6531
6532	if (s)
6533		PF_STATE_UNLOCK(s);
6534
6535	/* If reassembled packet passed, create new fragments. */
6536	if (action == PF_PASS && *m0 && fwdir == PF_FWD &&
6537	    (mtag = m_tag_find(m, PF_REASSEMBLED, NULL)) != NULL)
6538		action = pf_refragment6(ifp, m0, mtag);
6539
6540	return (action);
6541}
6542#endif /* INET6 */
6543