1/*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*	$apfw: pf_table.c,v 1.4 2008/08/27 00:01:32 jhw Exp $ */
30/*	$OpenBSD: pf_table.c,v 1.68 2006/05/02 10:08:45 dhartmei Exp $	*/
31
32/*
33 * Copyright (c) 2002 Cedric Berger
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 *    - Redistributions of source code must retain the above copyright
41 *      notice, this list of conditions and the following disclaimer.
42 *    - Redistributions in binary form must reproduce the above
43 *      copyright notice, this list of conditions and the following
44 *      disclaimer in the documentation and/or other materials provided
45 *      with the distribution.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
50 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
51 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
53 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
54 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
55 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
57 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
59 *
60 */
61
62#include <sys/param.h>
63#include <sys/systm.h>
64#include <sys/socket.h>
65#include <sys/mbuf.h>
66#include <sys/kernel.h>
67#include <sys/malloc.h>
68
69#include <net/if.h>
70#include <net/route.h>
71#include <netinet/in.h>
72#include <net/radix.h>
73#include <net/pfvar.h>
74
75#define ACCEPT_FLAGS(flags, oklist)		\
76	do {					\
77		if ((flags & ~(oklist)) &	\
78		    PFR_FLAG_ALLMASK)		\
79			return (EINVAL);	\
80	} while (0)
81
82#define COPYIN(from, to, size, flags)				\
83	((flags & PFR_FLAG_USERIOCTL) ?				\
84	copyin((from), (to), (size)) :				\
85	(bcopy((void *)(uintptr_t)(from), (to), (size)), 0))
86
87#define COPYOUT(from, to, size, flags)				\
88	((flags & PFR_FLAG_USERIOCTL) ?				\
89	copyout((from), (to), (size)) :				\
90	(bcopy((from), (void *)(uintptr_t)(to), (size)), 0))
91
92#define	FILLIN_SIN(sin, addr)			\
93	do {					\
94		(sin).sin_len = sizeof (sin);	\
95		(sin).sin_family = AF_INET;	\
96		(sin).sin_addr = (addr);	\
97	} while (0)
98
99#define	FILLIN_SIN6(sin6, addr)				\
100	do {						\
101		(sin6).sin6_len = sizeof (sin6);	\
102		(sin6).sin6_family = AF_INET6;		\
103		(sin6).sin6_addr = (addr);		\
104	} while (0)
105
106#define SWAP(type, a1, a2)			\
107	do {					\
108		type tmp = a1;			\
109		a1 = a2;			\
110		a2 = tmp;			\
111	} while (0)
112
113#define SUNION2PF(su, af) (((af) == AF_INET) ?	\
114	(struct pf_addr *)&(su)->sin.sin_addr :	\
115	(struct pf_addr *)&(su)->sin6.sin6_addr)
116
117#define	AF_BITS(af)		(((af) == AF_INET) ? 32 : 128)
118#define	ADDR_NETWORK(ad)	((ad)->pfra_net < AF_BITS((ad)->pfra_af))
119#define	KENTRY_NETWORK(ke)	((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
120#define KENTRY_RNF_ROOT(ke) \
121		((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0)
122
123#define NO_ADDRESSES		(-1)
124#define ENQUEUE_UNMARKED_ONLY	(1)
125#define INVERT_NEG_FLAG		(1)
126
127struct pfr_walktree {
128	enum pfrw_op {
129		PFRW_MARK,
130		PFRW_SWEEP,
131		PFRW_ENQUEUE,
132		PFRW_GET_ADDRS,
133		PFRW_GET_ASTATS,
134		PFRW_POOL_GET,
135		PFRW_DYNADDR_UPDATE
136	}	 pfrw_op;
137	union {
138		user_addr_t		 pfrw1_addr;
139		user_addr_t		 pfrw1_astats;
140		struct pfr_kentryworkq	*pfrw1_workq;
141		struct pfr_kentry	*pfrw1_kentry;
142		struct pfi_dynaddr	*pfrw1_dyn;
143	}	 pfrw_1;
144	int	 pfrw_free;
145	int	 pfrw_flags;
146};
147#define pfrw_addr	pfrw_1.pfrw1_addr
148#define pfrw_astats	pfrw_1.pfrw1_astats
149#define pfrw_workq	pfrw_1.pfrw1_workq
150#define pfrw_kentry	pfrw_1.pfrw1_kentry
151#define pfrw_dyn	pfrw_1.pfrw1_dyn
152#define pfrw_cnt	pfrw_free
153
154#define senderr(e)	do { rv = (e); goto _bad; } while (0)
155
156struct pool		 pfr_ktable_pl;
157struct pool		 pfr_kentry_pl;
158
159static struct pool		pfr_kentry_pl2;
160static struct sockaddr_in	pfr_sin;
161static struct sockaddr_in6	pfr_sin6;
162static union sockaddr_union	pfr_mask;
163static struct pf_addr		pfr_ffaddr;
164
165static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke);
166static int pfr_validate_addr(struct pfr_addr *);
167static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *,
168    int *, int);
169static void pfr_mark_addrs(struct pfr_ktable *);
170static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
171    struct pfr_addr *, int);
172static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int);
173static void pfr_destroy_kentries(struct pfr_kentryworkq *);
174static void pfr_destroy_kentry(struct pfr_kentry *);
175static void pfr_insert_kentries(struct pfr_ktable *,
176    struct pfr_kentryworkq *, u_int64_t);
177static void pfr_remove_kentries(struct pfr_ktable *, struct pfr_kentryworkq *);
178static void pfr_clstats_kentries(struct pfr_kentryworkq *, u_int64_t, int);
179static void pfr_reset_feedback(user_addr_t, int, int);
180static void pfr_prepare_network(union sockaddr_union *, int, int);
181static int pfr_route_kentry(struct pfr_ktable *, struct pfr_kentry *);
182static int pfr_unroute_kentry(struct pfr_ktable *, struct pfr_kentry *);
183static int pfr_walktree(struct radix_node *, void *);
184static int pfr_validate_table(struct pfr_table *, int, int);
185static int pfr_fix_anchor(char *);
186static void pfr_commit_ktable(struct pfr_ktable *, u_int64_t);
187static void pfr_insert_ktables(struct pfr_ktableworkq *);
188static void pfr_insert_ktable(struct pfr_ktable *);
189static void pfr_setflags_ktables(struct pfr_ktableworkq *);
190static void pfr_setflags_ktable(struct pfr_ktable *, int);
191static void pfr_clstats_ktables(struct pfr_ktableworkq *, u_int64_t, int);
192static void pfr_clstats_ktable(struct pfr_ktable *, u_int64_t, int);
193static struct pfr_ktable *pfr_create_ktable(struct pfr_table *, u_int64_t, int);
194static void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
195static void pfr_destroy_ktable(struct pfr_ktable *, int);
196static int pfr_ktable_compare(struct pfr_ktable *, struct pfr_ktable *);
197static struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198static void pfr_clean_node_mask(struct pfr_ktable *, struct pfr_kentryworkq *);
199static int pfr_table_count(struct pfr_table *, int);
200static int pfr_skip_table(struct pfr_table *, struct pfr_ktable *, int);
201static struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
202
203RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree,
204    pfr_ktable_compare);
205RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
206
207static struct pfr_ktablehead	pfr_ktables;
208static struct pfr_table		pfr_nulltable;
209static int			pfr_ktable_cnt;
210
211void
212pfr_initialize(void)
213{
214	pool_init(&pfr_ktable_pl, sizeof (struct pfr_ktable), 0, 0, 0,
215	    "pfrktable", NULL);
216	pool_init(&pfr_kentry_pl, sizeof (struct pfr_kentry), 0, 0, 0,
217	    "pfrkentry", NULL);
218	pool_init(&pfr_kentry_pl2, sizeof (struct pfr_kentry), 0, 0, 0,
219	    "pfrkentry2", NULL);
220
221	pfr_sin.sin_len = sizeof (pfr_sin);
222	pfr_sin.sin_family = AF_INET;
223	pfr_sin6.sin6_len = sizeof (pfr_sin6);
224	pfr_sin6.sin6_family = AF_INET6;
225
226	memset(&pfr_ffaddr, 0xff, sizeof (pfr_ffaddr));
227}
228
229#if 0
230void
231pfr_destroy(void)
232{
233	pool_destroy(&pfr_ktable_pl);
234	pool_destroy(&pfr_kentry_pl);
235	pool_destroy(&pfr_kentry_pl2);
236}
237#endif
238
239int
240pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
241{
242	struct pfr_ktable	*kt;
243	struct pfr_kentryworkq	 workq;
244
245	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
246	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
247		return (EINVAL);
248	kt = pfr_lookup_table(tbl);
249	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
250		return (ESRCH);
251	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
252		return (EPERM);
253	pfr_enqueue_addrs(kt, &workq, ndel, 0);
254
255	if (!(flags & PFR_FLAG_DUMMY)) {
256		pfr_remove_kentries(kt, &workq);
257		if (kt->pfrkt_cnt) {
258			printf("pfr_clr_addrs: corruption detected (%d).\n",
259			    kt->pfrkt_cnt);
260			kt->pfrkt_cnt = 0;
261		}
262	}
263	return (0);
264}
265
266int
267pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
268    int *nadd, int flags)
269{
270	struct pfr_ktable	*kt, *tmpkt;
271	struct pfr_kentryworkq	 workq;
272	struct pfr_kentry	*p, *q;
273	struct pfr_addr		 ad;
274	int			 i, rv, xadd = 0;
275	user_addr_t		 addr = _addr;
276	u_int64_t		 tzero = pf_calendar_time_second();
277
278	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
279	    PFR_FLAG_FEEDBACK);
280	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
281		return (EINVAL);
282	kt = pfr_lookup_table(tbl);
283	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
284		return (ESRCH);
285	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
286		return (EPERM);
287	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
288	if (tmpkt == NULL)
289		return (ENOMEM);
290	SLIST_INIT(&workq);
291	for (i = 0; i < size; i++, addr += sizeof (ad)) {
292		if (COPYIN(addr, &ad, sizeof (ad), flags))
293			senderr(EFAULT);
294		if (pfr_validate_addr(&ad))
295			senderr(EINVAL);
296		p = pfr_lookup_addr(kt, &ad, 1);
297		q = pfr_lookup_addr(tmpkt, &ad, 1);
298		if (flags & PFR_FLAG_FEEDBACK) {
299			if (q != NULL)
300				ad.pfra_fback = PFR_FB_DUPLICATE;
301			else if (p == NULL)
302				ad.pfra_fback = PFR_FB_ADDED;
303			else if (p->pfrke_not != ad.pfra_not)
304				ad.pfra_fback = PFR_FB_CONFLICT;
305			else
306				ad.pfra_fback = PFR_FB_NONE;
307		}
308		if (p == NULL && q == NULL) {
309			p = pfr_create_kentry(&ad,
310			    !(flags & PFR_FLAG_USERIOCTL));
311			if (p == NULL)
312				senderr(ENOMEM);
313			if (pfr_route_kentry(tmpkt, p)) {
314				pfr_destroy_kentry(p);
315				ad.pfra_fback = PFR_FB_NONE;
316			} else {
317				SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
318				xadd++;
319			}
320		}
321		if (flags & PFR_FLAG_FEEDBACK)
322			if (COPYOUT(&ad, addr, sizeof (ad), flags))
323				senderr(EFAULT);
324	}
325	pfr_clean_node_mask(tmpkt, &workq);
326	if (!(flags & PFR_FLAG_DUMMY)) {
327		pfr_insert_kentries(kt, &workq, tzero);
328	} else
329		pfr_destroy_kentries(&workq);
330	if (nadd != NULL)
331		*nadd = xadd;
332	pfr_destroy_ktable(tmpkt, 0);
333	return (0);
334_bad:
335	pfr_clean_node_mask(tmpkt, &workq);
336	pfr_destroy_kentries(&workq);
337	if (flags & PFR_FLAG_FEEDBACK)
338		pfr_reset_feedback(_addr, size, flags);
339	pfr_destroy_ktable(tmpkt, 0);
340	return (rv);
341}
342
343int
344pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
345    int *ndel, int flags)
346{
347	struct pfr_ktable	*kt;
348	struct pfr_kentryworkq	 workq;
349	struct pfr_kentry	*p;
350	struct pfr_addr		 ad;
351	user_addr_t		 addr = _addr;
352	int			 i, rv, xdel = 0, log = 1;
353
354	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
355	    PFR_FLAG_FEEDBACK);
356	if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
357		return (EINVAL);
358	kt = pfr_lookup_table(tbl);
359	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
360		return (ESRCH);
361	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
362		return (EPERM);
363	/*
364	 * there are two algorithms to choose from here.
365	 * with:
366	 *   n: number of addresses to delete
367	 *   N: number of addresses in the table
368	 *
369	 * one is O(N) and is better for large 'n'
370	 * one is O(n*LOG(N)) and is better for small 'n'
371	 *
372	 * following code try to decide which one is best.
373	 */
374	for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
375		log++;
376	if (size > kt->pfrkt_cnt/log) {
377		/* full table scan */
378		pfr_mark_addrs(kt);
379	} else {
380		/* iterate over addresses to delete */
381		for (i = 0; i < size; i++, addr += sizeof (ad)) {
382			if (COPYIN(addr, &ad, sizeof (ad), flags))
383				return (EFAULT);
384			if (pfr_validate_addr(&ad))
385				return (EINVAL);
386			p = pfr_lookup_addr(kt, &ad, 1);
387			if (p != NULL)
388				p->pfrke_mark = 0;
389		}
390	}
391	SLIST_INIT(&workq);
392	for (addr = _addr, i = 0; i < size; i++, addr += sizeof (ad)) {
393		if (COPYIN(addr, &ad, sizeof (ad), flags))
394			senderr(EFAULT);
395		if (pfr_validate_addr(&ad))
396			senderr(EINVAL);
397		p = pfr_lookup_addr(kt, &ad, 1);
398		if (flags & PFR_FLAG_FEEDBACK) {
399			if (p == NULL)
400				ad.pfra_fback = PFR_FB_NONE;
401			else if (p->pfrke_not != ad.pfra_not)
402				ad.pfra_fback = PFR_FB_CONFLICT;
403			else if (p->pfrke_mark)
404				ad.pfra_fback = PFR_FB_DUPLICATE;
405			else
406				ad.pfra_fback = PFR_FB_DELETED;
407		}
408		if (p != NULL && p->pfrke_not == ad.pfra_not &&
409		    !p->pfrke_mark) {
410			p->pfrke_mark = 1;
411			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
412			xdel++;
413		}
414		if (flags & PFR_FLAG_FEEDBACK)
415			if (COPYOUT(&ad, addr, sizeof (ad), flags))
416				senderr(EFAULT);
417	}
418	if (!(flags & PFR_FLAG_DUMMY)) {
419		pfr_remove_kentries(kt, &workq);
420	}
421	if (ndel != NULL)
422		*ndel = xdel;
423	return (0);
424_bad:
425	if (flags & PFR_FLAG_FEEDBACK)
426		pfr_reset_feedback(_addr, size, flags);
427	return (rv);
428}
429
430int
431pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size,
432    int *size2, int *nadd, int *ndel, int *nchange, int flags,
433    u_int32_t ignore_pfrt_flags)
434{
435	struct pfr_ktable	*kt, *tmpkt;
436	struct pfr_kentryworkq	 addq, delq, changeq;
437	struct pfr_kentry	*p, *q;
438	struct pfr_addr		 ad;
439	user_addr_t		 addr = _addr;
440	int			 i, rv, xadd = 0, xdel = 0, xchange = 0;
441	u_int64_t		 tzero = pf_calendar_time_second();
442
443	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
444	    PFR_FLAG_FEEDBACK);
445	if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
446	    PFR_FLAG_USERIOCTL))
447		return (EINVAL);
448	kt = pfr_lookup_table(tbl);
449	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
450		return (ESRCH);
451	if (kt->pfrkt_flags & PFR_TFLAG_CONST)
452		return (EPERM);
453	tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0);
454	if (tmpkt == NULL)
455		return (ENOMEM);
456	pfr_mark_addrs(kt);
457	SLIST_INIT(&addq);
458	SLIST_INIT(&delq);
459	SLIST_INIT(&changeq);
460	for (i = 0; i < size; i++, addr += sizeof (ad)) {
461		if (COPYIN(addr, &ad, sizeof (ad), flags))
462			senderr(EFAULT);
463		if (pfr_validate_addr(&ad))
464			senderr(EINVAL);
465		ad.pfra_fback = PFR_FB_NONE;
466		p = pfr_lookup_addr(kt, &ad, 1);
467		if (p != NULL) {
468			if (p->pfrke_mark) {
469				ad.pfra_fback = PFR_FB_DUPLICATE;
470				goto _skip;
471			}
472			p->pfrke_mark = 1;
473			if (p->pfrke_not != ad.pfra_not) {
474				SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
475				ad.pfra_fback = PFR_FB_CHANGED;
476				xchange++;
477			}
478		} else {
479			q = pfr_lookup_addr(tmpkt, &ad, 1);
480			if (q != NULL) {
481				ad.pfra_fback = PFR_FB_DUPLICATE;
482				goto _skip;
483			}
484			p = pfr_create_kentry(&ad,
485			    !(flags & PFR_FLAG_USERIOCTL));
486			if (p == NULL)
487				senderr(ENOMEM);
488			if (pfr_route_kentry(tmpkt, p)) {
489				pfr_destroy_kentry(p);
490				ad.pfra_fback = PFR_FB_NONE;
491			} else {
492				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
493				ad.pfra_fback = PFR_FB_ADDED;
494				xadd++;
495			}
496		}
497_skip:
498		if (flags & PFR_FLAG_FEEDBACK)
499			if (COPYOUT(&ad, addr, sizeof (ad), flags))
500				senderr(EFAULT);
501	}
502	pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
503	if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
504		if (*size2 < size+xdel) {
505			*size2 = size+xdel;
506			senderr(0);
507		}
508		i = 0;
509		addr = _addr + size;
510		SLIST_FOREACH(p, &delq, pfrke_workq) {
511			pfr_copyout_addr(&ad, p);
512			ad.pfra_fback = PFR_FB_DELETED;
513			if (COPYOUT(&ad, addr, sizeof (ad), flags))
514				senderr(EFAULT);
515			addr += sizeof (ad);
516			i++;
517		}
518	}
519	pfr_clean_node_mask(tmpkt, &addq);
520	if (!(flags & PFR_FLAG_DUMMY)) {
521		pfr_insert_kentries(kt, &addq, tzero);
522		pfr_remove_kentries(kt, &delq);
523		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
524	} else
525		pfr_destroy_kentries(&addq);
526	if (nadd != NULL)
527		*nadd = xadd;
528	if (ndel != NULL)
529		*ndel = xdel;
530	if (nchange != NULL)
531		*nchange = xchange;
532	if ((flags & PFR_FLAG_FEEDBACK) && size2)
533		*size2 = size+xdel;
534	pfr_destroy_ktable(tmpkt, 0);
535	return (0);
536_bad:
537	pfr_clean_node_mask(tmpkt, &addq);
538	pfr_destroy_kentries(&addq);
539	if (flags & PFR_FLAG_FEEDBACK)
540		pfr_reset_feedback(_addr, size, flags);
541	pfr_destroy_ktable(tmpkt, 0);
542	return (rv);
543}
544
545int
546pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size,
547	int *nmatch, int flags)
548{
549	struct pfr_ktable	*kt;
550	struct pfr_kentry	*p;
551	struct pfr_addr		 ad;
552	int			 i, xmatch = 0;
553
554	ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
555	if (pfr_validate_table(tbl, 0, 0))
556		return (EINVAL);
557	kt = pfr_lookup_table(tbl);
558	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
559		return (ESRCH);
560
561	for (i = 0; i < size; i++, addr += sizeof (ad)) {
562		if (COPYIN(addr, &ad, sizeof (ad), flags))
563			return (EFAULT);
564		if (pfr_validate_addr(&ad))
565			return (EINVAL);
566		if (ADDR_NETWORK(&ad))
567			return (EINVAL);
568		p = pfr_lookup_addr(kt, &ad, 0);
569		if (flags & PFR_FLAG_REPLACE)
570			pfr_copyout_addr(&ad, p);
571		ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
572		    (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH);
573		if (p != NULL && !p->pfrke_not)
574			xmatch++;
575		if (COPYOUT(&ad, addr, sizeof (ad), flags))
576			return (EFAULT);
577	}
578	if (nmatch != NULL)
579		*nmatch = xmatch;
580	return (0);
581}
582
583int
584pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size,
585	int flags)
586{
587	struct pfr_ktable	*kt;
588	struct pfr_walktree	 w;
589	int			 rv;
590
591	ACCEPT_FLAGS(flags, 0);
592	if (pfr_validate_table(tbl, 0, 0))
593		return (EINVAL);
594	kt = pfr_lookup_table(tbl);
595	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
596		return (ESRCH);
597	if (kt->pfrkt_cnt > *size) {
598		*size = kt->pfrkt_cnt;
599		return (0);
600	}
601
602	bzero(&w, sizeof (w));
603	w.pfrw_op = PFRW_GET_ADDRS;
604	w.pfrw_addr = addr;
605	w.pfrw_free = kt->pfrkt_cnt;
606	w.pfrw_flags = flags;
607	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
608	if (!rv)
609		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
610		    pfr_walktree, &w);
611	if (rv)
612		return (rv);
613
614	if (w.pfrw_free) {
615		printf("pfr_get_addrs: corruption detected (%d).\n",
616		    w.pfrw_free);
617		return (ENOTTY);
618	}
619	*size = kt->pfrkt_cnt;
620	return (0);
621}
622
623int
624pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size,
625	int flags)
626{
627	struct pfr_ktable	*kt;
628	struct pfr_walktree	 w;
629	struct pfr_kentryworkq	 workq;
630	int			 rv;
631	u_int64_t		 tzero = pf_calendar_time_second();
632
633	/* XXX PFR_FLAG_CLSTATS disabled */
634	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC);
635	if (pfr_validate_table(tbl, 0, 0))
636		return (EINVAL);
637	kt = pfr_lookup_table(tbl);
638	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
639		return (ESRCH);
640	if (kt->pfrkt_cnt > *size) {
641		*size = kt->pfrkt_cnt;
642		return (0);
643	}
644
645	bzero(&w, sizeof (w));
646	w.pfrw_op = PFRW_GET_ASTATS;
647	w.pfrw_astats = addr;
648	w.pfrw_free = kt->pfrkt_cnt;
649	w.pfrw_flags = flags;
650	rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
651	if (!rv)
652		rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
653		    pfr_walktree, &w);
654	if (!rv && (flags & PFR_FLAG_CLSTATS)) {
655		pfr_enqueue_addrs(kt, &workq, NULL, 0);
656		pfr_clstats_kentries(&workq, tzero, 0);
657	}
658	if (rv)
659		return (rv);
660
661	if (w.pfrw_free) {
662		printf("pfr_get_astats: corruption detected (%d).\n",
663		    w.pfrw_free);
664		return (ENOTTY);
665	}
666	*size = kt->pfrkt_cnt;
667	return (0);
668}
669
670int
671pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size,
672    int *nzero, int flags)
673{
674	struct pfr_ktable	*kt;
675	struct pfr_kentryworkq	 workq;
676	struct pfr_kentry	*p;
677	struct pfr_addr		 ad;
678	user_addr_t		 addr = _addr;
679	int			 i, rv, xzero = 0;
680
681	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
682	    PFR_FLAG_FEEDBACK);
683	if (pfr_validate_table(tbl, 0, 0))
684		return (EINVAL);
685	kt = pfr_lookup_table(tbl);
686	if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
687		return (ESRCH);
688	SLIST_INIT(&workq);
689	for (i = 0; i < size; i++, addr += sizeof (ad)) {
690		if (COPYIN(addr, &ad, sizeof (ad), flags))
691			senderr(EFAULT);
692		if (pfr_validate_addr(&ad))
693			senderr(EINVAL);
694		p = pfr_lookup_addr(kt, &ad, 1);
695		if (flags & PFR_FLAG_FEEDBACK) {
696			ad.pfra_fback = (p != NULL) ?
697			    PFR_FB_CLEARED : PFR_FB_NONE;
698			if (COPYOUT(&ad, addr, sizeof (ad), flags))
699				senderr(EFAULT);
700		}
701		if (p != NULL) {
702			SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
703			xzero++;
704		}
705	}
706
707	if (!(flags & PFR_FLAG_DUMMY)) {
708		pfr_clstats_kentries(&workq, 0, 0);
709	}
710	if (nzero != NULL)
711		*nzero = xzero;
712	return (0);
713_bad:
714	if (flags & PFR_FLAG_FEEDBACK)
715		pfr_reset_feedback(_addr, size, flags);
716	return (rv);
717}
718
719static int
720pfr_validate_addr(struct pfr_addr *ad)
721{
722	int i;
723
724	switch (ad->pfra_af) {
725#if INET
726	case AF_INET:
727		if (ad->pfra_net > 32)
728			return (-1);
729		break;
730#endif /* INET */
731#if INET6
732	case AF_INET6:
733		if (ad->pfra_net > 128)
734			return (-1);
735		break;
736#endif /* INET6 */
737	default:
738		return (-1);
739	}
740	if (ad->pfra_net < 128 &&
741	    (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
742			return (-1);
743	for (i = (ad->pfra_net+7)/8; i < (int)sizeof (ad->pfra_u); i++)
744		if (((caddr_t)ad)[i])
745			return (-1);
746	if (ad->pfra_not && ad->pfra_not != 1)
747		return (-1);
748	if (ad->pfra_fback)
749		return (-1);
750	return (0);
751}
752
753static void
754pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
755	int *naddr, int sweep)
756{
757	struct pfr_walktree	w;
758
759	SLIST_INIT(workq);
760	bzero(&w, sizeof (w));
761	w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
762	w.pfrw_workq = workq;
763	if (kt->pfrkt_ip4 != NULL)
764		if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
765		    pfr_walktree, &w))
766			printf("pfr_enqueue_addrs: IPv4 walktree failed.\n");
767	if (kt->pfrkt_ip6 != NULL)
768		if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
769		    pfr_walktree, &w))
770			printf("pfr_enqueue_addrs: IPv6 walktree failed.\n");
771	if (naddr != NULL)
772		*naddr = w.pfrw_cnt;
773}
774
775static void
776pfr_mark_addrs(struct pfr_ktable *kt)
777{
778	struct pfr_walktree	w;
779
780	bzero(&w, sizeof (w));
781	w.pfrw_op = PFRW_MARK;
782	if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
783		printf("pfr_mark_addrs: IPv4 walktree failed.\n");
784	if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
785		printf("pfr_mark_addrs: IPv6 walktree failed.\n");
786}
787
788
789static struct pfr_kentry *
790pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
791{
792	union sockaddr_union	 sa, mask;
793	struct radix_node_head	*head;
794	struct pfr_kentry	*ke;
795
796	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
797
798	bzero(&sa, sizeof (sa));
799	if (ad->pfra_af == AF_INET) {
800		FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
801		head = kt->pfrkt_ip4;
802	} else if (ad->pfra_af == AF_INET6) {
803		FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
804		head = kt->pfrkt_ip6;
805	}
806	else
807		return NULL;
808	if (ADDR_NETWORK(ad)) {
809		pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
810		ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
811		if (ke && KENTRY_RNF_ROOT(ke))
812			ke = NULL;
813	} else {
814		ke = (struct pfr_kentry *)rn_match(&sa, head);
815		if (ke && KENTRY_RNF_ROOT(ke))
816			ke = NULL;
817		if (exact && ke && KENTRY_NETWORK(ke))
818			ke = NULL;
819	}
820	return (ke);
821}
822
823static struct pfr_kentry *
824pfr_create_kentry(struct pfr_addr *ad, int intr)
825{
826	struct pfr_kentry	*ke;
827
828	if (intr)
829		ke = pool_get(&pfr_kentry_pl2, PR_WAITOK);
830	else
831		ke = pool_get(&pfr_kentry_pl, PR_WAITOK);
832	if (ke == NULL)
833		return (NULL);
834	bzero(ke, sizeof (*ke));
835
836	if (ad->pfra_af == AF_INET)
837		FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
838	else if (ad->pfra_af == AF_INET6)
839		FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
840	ke->pfrke_af = ad->pfra_af;
841	ke->pfrke_net = ad->pfra_net;
842	ke->pfrke_not = ad->pfra_not;
843	ke->pfrke_intrpool = intr;
844	return (ke);
845}
846
847static void
848pfr_destroy_kentries(struct pfr_kentryworkq *workq)
849{
850	struct pfr_kentry	*p, *q;
851
852	for (p = SLIST_FIRST(workq); p != NULL; p = q) {
853		q = SLIST_NEXT(p, pfrke_workq);
854		pfr_destroy_kentry(p);
855	}
856}
857
858static void
859pfr_destroy_kentry(struct pfr_kentry *ke)
860{
861	if (ke->pfrke_intrpool)
862		pool_put(&pfr_kentry_pl2, ke);
863	else
864		pool_put(&pfr_kentry_pl, ke);
865}
866
867static void
868pfr_insert_kentries(struct pfr_ktable *kt,
869    struct pfr_kentryworkq *workq, u_int64_t tzero)
870{
871	struct pfr_kentry	*p;
872	int			 rv, n = 0;
873
874	SLIST_FOREACH(p, workq, pfrke_workq) {
875		rv = pfr_route_kentry(kt, p);
876		if (rv) {
877			printf("pfr_insert_kentries: cannot route entry "
878			    "(code=%d).\n", rv);
879			break;
880		}
881		p->pfrke_tzero = tzero;
882		n++;
883	}
884	kt->pfrkt_cnt += n;
885}
886
887int
888pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero)
889{
890	struct pfr_kentry	*p;
891	int			 rv;
892
893	p = pfr_lookup_addr(kt, ad, 1);
894	if (p != NULL)
895		return (0);
896	p = pfr_create_kentry(ad, 1);
897	if (p == NULL)
898		return (EINVAL);
899
900	rv = pfr_route_kentry(kt, p);
901	if (rv)
902		return (rv);
903
904	p->pfrke_tzero = tzero;
905	kt->pfrkt_cnt++;
906
907	return (0);
908}
909
910static void
911pfr_remove_kentries(struct pfr_ktable *kt,
912    struct pfr_kentryworkq *workq)
913{
914	struct pfr_kentry	*p;
915	int			 n = 0;
916
917	SLIST_FOREACH(p, workq, pfrke_workq) {
918		pfr_unroute_kentry(kt, p);
919		n++;
920	}
921	kt->pfrkt_cnt -= n;
922	pfr_destroy_kentries(workq);
923}
924
925static void
926pfr_clean_node_mask(struct pfr_ktable *kt,
927    struct pfr_kentryworkq *workq)
928{
929	struct pfr_kentry	*p;
930
931	SLIST_FOREACH(p, workq, pfrke_workq)
932		pfr_unroute_kentry(kt, p);
933}
934
935static void
936pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero,
937    int negchange)
938{
939	struct pfr_kentry	*p;
940
941	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
942
943	SLIST_FOREACH(p, workq, pfrke_workq) {
944		if (negchange)
945			p->pfrke_not = !p->pfrke_not;
946		bzero(p->pfrke_packets, sizeof (p->pfrke_packets));
947		bzero(p->pfrke_bytes, sizeof (p->pfrke_bytes));
948		p->pfrke_tzero = tzero;
949	}
950}
951
952static void
953pfr_reset_feedback(user_addr_t addr, int size, int flags)
954{
955	struct pfr_addr	ad;
956	int		i;
957
958	for (i = 0; i < size; i++, addr += sizeof (ad)) {
959		if (COPYIN(addr, &ad, sizeof (ad), flags))
960			break;
961		ad.pfra_fback = PFR_FB_NONE;
962		if (COPYOUT(&ad, addr, sizeof (ad), flags))
963			break;
964	}
965}
966
967static void
968pfr_prepare_network(union sockaddr_union *sa, int af, int net)
969{
970	int	i;
971
972	bzero(sa, sizeof (*sa));
973	if (af == AF_INET) {
974		sa->sin.sin_len = sizeof (sa->sin);
975		sa->sin.sin_family = AF_INET;
976		sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
977	} else if (af == AF_INET6) {
978		sa->sin6.sin6_len = sizeof (sa->sin6);
979		sa->sin6.sin6_family = AF_INET6;
980		for (i = 0; i < 4; i++) {
981			if (net <= 32) {
982				sa->sin6.sin6_addr.s6_addr32[i] =
983				    net ? htonl(-1 << (32-net)) : 0;
984				break;
985			}
986			sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
987			net -= 32;
988		}
989	}
990}
991
992static int
993pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
994{
995	union sockaddr_union	 mask;
996	struct radix_node	*rn;
997	struct radix_node_head	*head;
998
999	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1000
1001	bzero(ke->pfrke_node, sizeof (ke->pfrke_node));
1002	if (ke->pfrke_af == AF_INET)
1003		head = kt->pfrkt_ip4;
1004	else if (ke->pfrke_af == AF_INET6)
1005		head = kt->pfrkt_ip6;
1006	else
1007		return (-1);
1008
1009	if (KENTRY_NETWORK(ke)) {
1010		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1011		rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
1012	} else
1013		rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
1014
1015	return (rn == NULL ? -1 : 0);
1016}
1017
1018static int
1019pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1020{
1021	union sockaddr_union	 mask;
1022	struct radix_node	*rn;
1023	struct radix_node_head	*head;
1024
1025	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1026
1027	if (ke->pfrke_af == AF_INET)
1028		head = kt->pfrkt_ip4;
1029	else if (ke->pfrke_af == AF_INET6)
1030		head = kt->pfrkt_ip6;
1031	else
1032		return (-1);
1033
1034	if (KENTRY_NETWORK(ke)) {
1035		pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1036		rn = rn_delete(&ke->pfrke_sa, &mask, head);
1037	} else
1038		rn = rn_delete(&ke->pfrke_sa, NULL, head);
1039
1040	if (rn == NULL) {
1041		printf("pfr_unroute_kentry: delete failed.\n");
1042		return (-1);
1043	}
1044	return (0);
1045}
1046
1047static void
1048pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1049{
1050	bzero(ad, sizeof (*ad));
1051	if (ke == NULL)
1052		return;
1053	ad->pfra_af = ke->pfrke_af;
1054	ad->pfra_net = ke->pfrke_net;
1055	ad->pfra_not = ke->pfrke_not;
1056	if (ad->pfra_af == AF_INET)
1057		ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1058	else if (ad->pfra_af == AF_INET6)
1059		ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1060}
1061
1062static int
1063pfr_walktree(struct radix_node *rn, void *arg)
1064{
1065	struct pfr_kentry	*ke = (struct pfr_kentry *)rn;
1066	struct pfr_walktree	*w = arg;
1067	int			 flags = w->pfrw_flags;
1068
1069	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1070
1071	switch (w->pfrw_op) {
1072	case PFRW_MARK:
1073		ke->pfrke_mark = 0;
1074		break;
1075	case PFRW_SWEEP:
1076		if (ke->pfrke_mark)
1077			break;
1078		/* FALLTHROUGH */
1079	case PFRW_ENQUEUE:
1080		SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1081		w->pfrw_cnt++;
1082		break;
1083	case PFRW_GET_ADDRS:
1084		if (w->pfrw_free-- > 0) {
1085			struct pfr_addr ad;
1086
1087			pfr_copyout_addr(&ad, ke);
1088			if (copyout(&ad, w->pfrw_addr, sizeof (ad)))
1089				return (EFAULT);
1090			w->pfrw_addr += sizeof (ad);
1091		}
1092		break;
1093	case PFRW_GET_ASTATS:
1094		if (w->pfrw_free-- > 0) {
1095			struct pfr_astats as;
1096
1097			pfr_copyout_addr(&as.pfras_a, ke);
1098
1099			bcopy(ke->pfrke_packets, as.pfras_packets,
1100			    sizeof (as.pfras_packets));
1101			bcopy(ke->pfrke_bytes, as.pfras_bytes,
1102			    sizeof (as.pfras_bytes));
1103			as.pfras_tzero = ke->pfrke_tzero;
1104
1105			if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags))
1106				return (EFAULT);
1107			w->pfrw_astats += sizeof (as);
1108		}
1109		break;
1110	case PFRW_POOL_GET:
1111		if (ke->pfrke_not)
1112			break; /* negative entries are ignored */
1113		if (!w->pfrw_cnt--) {
1114			w->pfrw_kentry = ke;
1115			return (1); /* finish search */
1116		}
1117		break;
1118	case PFRW_DYNADDR_UPDATE:
1119		if (ke->pfrke_af == AF_INET) {
1120			if (w->pfrw_dyn->pfid_acnt4++ > 0)
1121				break;
1122			pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net);
1123			w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1124			    &ke->pfrke_sa, AF_INET);
1125			w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1126			    &pfr_mask, AF_INET);
1127		} else if (ke->pfrke_af == AF_INET6) {
1128			if (w->pfrw_dyn->pfid_acnt6++ > 0)
1129				break;
1130			pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net);
1131			w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1132			    &ke->pfrke_sa, AF_INET6);
1133			w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1134			    &pfr_mask, AF_INET6);
1135		}
1136		break;
1137	}
1138	return (0);
1139}
1140
1141int
1142pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1143{
1144	struct pfr_ktableworkq	 workq;
1145	struct pfr_ktable	*p;
1146	int			 xdel = 0;
1147
1148	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1149
1150	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1151	    PFR_FLAG_ALLRSETS);
1152	if (pfr_fix_anchor(filter->pfrt_anchor))
1153		return (EINVAL);
1154	if (pfr_table_count(filter, flags) < 0)
1155		return (ENOENT);
1156
1157	SLIST_INIT(&workq);
1158	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1159		if (pfr_skip_table(filter, p, flags))
1160			continue;
1161		if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0)
1162			continue;
1163		if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1164			continue;
1165		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1166		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1167		xdel++;
1168	}
1169	if (!(flags & PFR_FLAG_DUMMY)) {
1170		pfr_setflags_ktables(&workq);
1171	}
1172	if (ndel != NULL)
1173		*ndel = xdel;
1174	return (0);
1175}
1176
1177int
1178pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags)
1179{
1180	struct pfr_ktableworkq	 addq, changeq;
1181	struct pfr_ktable	*p, *q, *r, key;
1182	int			 i, rv, xadd = 0;
1183	u_int64_t		 tzero = pf_calendar_time_second();
1184
1185	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1186
1187	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1188	SLIST_INIT(&addq);
1189	SLIST_INIT(&changeq);
1190	for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
1191		if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1192			senderr(EFAULT);
1193		pfr_table_copyin_cleanup(&key.pfrkt_t);
1194		if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1195		    flags & PFR_FLAG_USERIOCTL))
1196			senderr(EINVAL);
1197		key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1198		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1199		if (p == NULL) {
1200			p = pfr_create_ktable(&key.pfrkt_t, tzero, 1);
1201			if (p == NULL)
1202				senderr(ENOMEM);
1203			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1204				if (!pfr_ktable_compare(p, q))
1205					goto _skip;
1206			}
1207			SLIST_INSERT_HEAD(&addq, p, pfrkt_workq);
1208			xadd++;
1209			if (!key.pfrkt_anchor[0])
1210				goto _skip;
1211
1212			/* find or create root table */
1213			bzero(key.pfrkt_anchor, sizeof (key.pfrkt_anchor));
1214			r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1215			if (r != NULL) {
1216				p->pfrkt_root = r;
1217				goto _skip;
1218			}
1219			SLIST_FOREACH(q, &addq, pfrkt_workq) {
1220				if (!pfr_ktable_compare(&key, q)) {
1221					p->pfrkt_root = q;
1222					goto _skip;
1223				}
1224			}
1225			key.pfrkt_flags = 0;
1226			r = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1227			if (r == NULL)
1228				senderr(ENOMEM);
1229			SLIST_INSERT_HEAD(&addq, r, pfrkt_workq);
1230			p->pfrkt_root = r;
1231		} else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1232			SLIST_FOREACH(q, &changeq, pfrkt_workq)
1233				if (!pfr_ktable_compare(&key, q))
1234					goto _skip;
1235			p->pfrkt_nflags = (p->pfrkt_flags &
1236			    ~PFR_TFLAG_USRMASK) | key.pfrkt_flags;
1237			SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1238			xadd++;
1239		}
1240_skip:
1241	;
1242	}
1243	if (!(flags & PFR_FLAG_DUMMY)) {
1244		pfr_insert_ktables(&addq);
1245		pfr_setflags_ktables(&changeq);
1246	} else
1247		pfr_destroy_ktables(&addq, 0);
1248	if (nadd != NULL)
1249		*nadd = xadd;
1250	return (0);
1251_bad:
1252	pfr_destroy_ktables(&addq, 0);
1253	return (rv);
1254}
1255
1256int
1257pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags)
1258{
1259	struct pfr_ktableworkq	 workq;
1260	struct pfr_ktable	*p, *q, key;
1261	int			 i, xdel = 0;
1262
1263	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1264
1265	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1266	SLIST_INIT(&workq);
1267	for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
1268		if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1269			return (EFAULT);
1270		pfr_table_copyin_cleanup(&key.pfrkt_t);
1271		if (pfr_validate_table(&key.pfrkt_t, 0,
1272		    flags & PFR_FLAG_USERIOCTL))
1273			return (EINVAL);
1274		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1275		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1276			SLIST_FOREACH(q, &workq, pfrkt_workq)
1277				if (!pfr_ktable_compare(p, q))
1278					goto _skip;
1279			p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1280			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1281			xdel++;
1282		}
1283_skip:
1284	;
1285	}
1286
1287	if (!(flags & PFR_FLAG_DUMMY)) {
1288		pfr_setflags_ktables(&workq);
1289	}
1290	if (ndel != NULL)
1291		*ndel = xdel;
1292	return (0);
1293}
1294
1295int
1296pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size,
1297	int flags)
1298{
1299	struct pfr_ktable	*p;
1300	int			 n, nn;
1301
1302	ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1303	if (pfr_fix_anchor(filter->pfrt_anchor))
1304		return (EINVAL);
1305	n = nn = pfr_table_count(filter, flags);
1306	if (n < 0)
1307		return (ENOENT);
1308	if (n > *size) {
1309		*size = n;
1310		return (0);
1311	}
1312	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1313		if (pfr_skip_table(filter, p, flags))
1314			continue;
1315		if (n-- <= 0)
1316			continue;
1317		if (COPYOUT(&p->pfrkt_t, tbl, sizeof (p->pfrkt_t), flags))
1318			return (EFAULT);
1319		tbl += sizeof (p->pfrkt_t);
1320	}
1321	if (n) {
1322		printf("pfr_get_tables: corruption detected (%d).\n", n);
1323		return (ENOTTY);
1324	}
1325	*size = nn;
1326	return (0);
1327}
1328
1329int
1330pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size,
1331	int flags)
1332{
1333	struct pfr_ktable	*p;
1334	struct pfr_ktableworkq	 workq;
1335	int			 n, nn;
1336	u_int64_t		 tzero = pf_calendar_time_second();
1337
1338	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1339
1340	/* XXX PFR_FLAG_CLSTATS disabled */
1341	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS);
1342	if (pfr_fix_anchor(filter->pfrt_anchor))
1343		return (EINVAL);
1344	n = nn = pfr_table_count(filter, flags);
1345	if (n < 0)
1346		return (ENOENT);
1347	if (n > *size) {
1348		*size = n;
1349		return (0);
1350	}
1351	SLIST_INIT(&workq);
1352	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1353		if (pfr_skip_table(filter, p, flags))
1354			continue;
1355		if (n-- <= 0)
1356			continue;
1357		if (COPYOUT(&p->pfrkt_ts, tbl, sizeof (p->pfrkt_ts), flags)) {
1358			return (EFAULT);
1359		}
1360		tbl += sizeof (p->pfrkt_ts);
1361		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1362	}
1363	if (flags & PFR_FLAG_CLSTATS)
1364		pfr_clstats_ktables(&workq, tzero,
1365		    flags & PFR_FLAG_ADDRSTOO);
1366	if (n) {
1367		printf("pfr_get_tstats: corruption detected (%d).\n", n);
1368		return (ENOTTY);
1369	}
1370	*size = nn;
1371	return (0);
1372}
1373
1374int
1375pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags)
1376{
1377	struct pfr_ktableworkq	 workq;
1378	struct pfr_ktable	*p, key;
1379	int			 i, xzero = 0;
1380	u_int64_t		 tzero = pf_calendar_time_second();
1381
1382	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1383
1384	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY |
1385	    PFR_FLAG_ADDRSTOO);
1386	SLIST_INIT(&workq);
1387	for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
1388		if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1389			return (EFAULT);
1390		pfr_table_copyin_cleanup(&key.pfrkt_t);
1391		if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1392			return (EINVAL);
1393		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1394		if (p != NULL) {
1395			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1396			xzero++;
1397		}
1398	}
1399	if (!(flags & PFR_FLAG_DUMMY)) {
1400		pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1401	}
1402	if (nzero != NULL)
1403		*nzero = xzero;
1404	return (0);
1405}
1406
1407int
1408pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag,
1409	int *nchange, int *ndel, int flags)
1410{
1411	struct pfr_ktableworkq	 workq;
1412	struct pfr_ktable	*p, *q, key;
1413	int			 i, xchange = 0, xdel = 0;
1414
1415	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1416
1417	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1418	if ((setflag & ~PFR_TFLAG_USRMASK) ||
1419	    (clrflag & ~PFR_TFLAG_USRMASK) ||
1420	    (setflag & clrflag))
1421		return (EINVAL);
1422	SLIST_INIT(&workq);
1423	for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) {
1424		if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags))
1425			return (EFAULT);
1426		pfr_table_copyin_cleanup(&key.pfrkt_t);
1427		if (pfr_validate_table(&key.pfrkt_t, 0,
1428		    flags & PFR_FLAG_USERIOCTL))
1429			return (EINVAL);
1430		p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1431		if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1432			p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1433			    ~clrflag;
1434			if (p->pfrkt_nflags == p->pfrkt_flags)
1435				goto _skip;
1436			SLIST_FOREACH(q, &workq, pfrkt_workq)
1437				if (!pfr_ktable_compare(p, q))
1438					goto _skip;
1439			SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1440			if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1441			    (clrflag & PFR_TFLAG_PERSIST) &&
1442			    !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1443				xdel++;
1444			else
1445				xchange++;
1446		}
1447_skip:
1448	;
1449	}
1450	if (!(flags & PFR_FLAG_DUMMY)) {
1451		pfr_setflags_ktables(&workq);
1452	}
1453	if (nchange != NULL)
1454		*nchange = xchange;
1455	if (ndel != NULL)
1456		*ndel = xdel;
1457	return (0);
1458}
1459
1460int
1461pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1462{
1463	struct pfr_ktableworkq	 workq;
1464	struct pfr_ktable	*p;
1465	struct pf_ruleset	*rs;
1466	int			 xdel = 0;
1467
1468	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1469
1470	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1471	rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1472	if (rs == NULL)
1473		return (ENOMEM);
1474	SLIST_INIT(&workq);
1475	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1476		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1477		    pfr_skip_table(trs, p, 0))
1478			continue;
1479		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1480		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1481		xdel++;
1482	}
1483	if (!(flags & PFR_FLAG_DUMMY)) {
1484		pfr_setflags_ktables(&workq);
1485		if (ticket != NULL)
1486			*ticket = ++rs->tticket;
1487		rs->topen = 1;
1488	} else
1489		pf_remove_if_empty_ruleset(rs);
1490	if (ndel != NULL)
1491		*ndel = xdel;
1492	return (0);
1493}
1494
1495int
1496pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size,
1497    int *nadd, int *naddr, u_int32_t ticket, int flags)
1498{
1499	struct pfr_ktableworkq	 tableq;
1500	struct pfr_kentryworkq	 addrq;
1501	struct pfr_ktable	*kt, *rt, *shadow, key;
1502	struct pfr_kentry	*p;
1503	struct pfr_addr		 ad;
1504	struct pf_ruleset	*rs;
1505	int			 i, rv, xadd = 0, xaddr = 0;
1506
1507	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1508
1509	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1510	if (size && !(flags & PFR_FLAG_ADDRSTOO))
1511		return (EINVAL);
1512	if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1513	    flags & PFR_FLAG_USERIOCTL))
1514		return (EINVAL);
1515	rs = pf_find_ruleset(tbl->pfrt_anchor);
1516	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1517		return (EBUSY);
1518	tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1519	SLIST_INIT(&tableq);
1520	kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl);
1521	if (kt == NULL) {
1522		kt = pfr_create_ktable(tbl, 0, 1);
1523		if (kt == NULL)
1524			return (ENOMEM);
1525		SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1526		xadd++;
1527		if (!tbl->pfrt_anchor[0])
1528			goto _skip;
1529
1530		/* find or create root table */
1531		bzero(&key, sizeof (key));
1532		strlcpy(key.pfrkt_name, tbl->pfrt_name,
1533		    sizeof (key.pfrkt_name));
1534		rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1535		if (rt != NULL) {
1536			kt->pfrkt_root = rt;
1537			goto _skip;
1538		}
1539		rt = pfr_create_ktable(&key.pfrkt_t, 0, 1);
1540		if (rt == NULL) {
1541			pfr_destroy_ktables(&tableq, 0);
1542			return (ENOMEM);
1543		}
1544		SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1545		kt->pfrkt_root = rt;
1546	} else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1547		xadd++;
1548_skip:
1549	shadow = pfr_create_ktable(tbl, 0, 0);
1550	if (shadow == NULL) {
1551		pfr_destroy_ktables(&tableq, 0);
1552		return (ENOMEM);
1553	}
1554	SLIST_INIT(&addrq);
1555	for (i = 0; i < size; i++, addr += sizeof (ad)) {
1556		if (COPYIN(addr, &ad, sizeof (ad), flags))
1557			senderr(EFAULT);
1558		if (pfr_validate_addr(&ad))
1559			senderr(EINVAL);
1560		if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1561			continue;
1562		p = pfr_create_kentry(&ad, 0);
1563		if (p == NULL)
1564			senderr(ENOMEM);
1565		if (pfr_route_kentry(shadow, p)) {
1566			pfr_destroy_kentry(p);
1567			continue;
1568		}
1569		SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1570		xaddr++;
1571	}
1572	if (!(flags & PFR_FLAG_DUMMY)) {
1573		if (kt->pfrkt_shadow != NULL)
1574			pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1575		kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1576		pfr_insert_ktables(&tableq);
1577		shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1578		    xaddr : NO_ADDRESSES;
1579		kt->pfrkt_shadow = shadow;
1580	} else {
1581		pfr_clean_node_mask(shadow, &addrq);
1582		pfr_destroy_ktable(shadow, 0);
1583		pfr_destroy_ktables(&tableq, 0);
1584		pfr_destroy_kentries(&addrq);
1585	}
1586	if (nadd != NULL)
1587		*nadd = xadd;
1588	if (naddr != NULL)
1589		*naddr = xaddr;
1590	return (0);
1591_bad:
1592	pfr_destroy_ktable(shadow, 0);
1593	pfr_destroy_ktables(&tableq, 0);
1594	pfr_destroy_kentries(&addrq);
1595	return (rv);
1596}
1597
1598int
1599pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
1600{
1601	struct pfr_ktableworkq	 workq;
1602	struct pfr_ktable	*p;
1603	struct pf_ruleset	*rs;
1604	int			 xdel = 0;
1605
1606	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1607
1608	ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1609	rs = pf_find_ruleset(trs->pfrt_anchor);
1610	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1611		return (0);
1612	SLIST_INIT(&workq);
1613	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1614		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1615		    pfr_skip_table(trs, p, 0))
1616			continue;
1617		p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1618		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1619		xdel++;
1620	}
1621	if (!(flags & PFR_FLAG_DUMMY)) {
1622		pfr_setflags_ktables(&workq);
1623		rs->topen = 0;
1624		pf_remove_if_empty_ruleset(rs);
1625	}
1626	if (ndel != NULL)
1627		*ndel = xdel;
1628	return (0);
1629}
1630
1631int
1632pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
1633    int *nchange, int flags)
1634{
1635	struct pfr_ktable	*p, *q;
1636	struct pfr_ktableworkq	 workq;
1637	struct pf_ruleset	*rs;
1638	int			 xadd = 0, xchange = 0;
1639	u_int64_t		 tzero = pf_calendar_time_second();
1640
1641	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1642
1643	ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY);
1644	rs = pf_find_ruleset(trs->pfrt_anchor);
1645	if (rs == NULL || !rs->topen || ticket != rs->tticket)
1646		return (EBUSY);
1647
1648	SLIST_INIT(&workq);
1649	RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1650		if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1651		    pfr_skip_table(trs, p, 0))
1652			continue;
1653		SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1654		if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
1655			xchange++;
1656		else
1657			xadd++;
1658	}
1659
1660	if (!(flags & PFR_FLAG_DUMMY)) {
1661		for (p = SLIST_FIRST(&workq); p != NULL; p = q) {
1662			q = SLIST_NEXT(p, pfrkt_workq);
1663			pfr_commit_ktable(p, tzero);
1664		}
1665		rs->topen = 0;
1666		pf_remove_if_empty_ruleset(rs);
1667	}
1668	if (nadd != NULL)
1669		*nadd = xadd;
1670	if (nchange != NULL)
1671		*nchange = xchange;
1672
1673	return (0);
1674}
1675
1676static void
1677pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero)
1678{
1679	struct pfr_ktable	*shadow = kt->pfrkt_shadow;
1680	int			 nflags;
1681
1682	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1683
1684	if (shadow->pfrkt_cnt == NO_ADDRESSES) {
1685		if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
1686			pfr_clstats_ktable(kt, tzero, 1);
1687	} else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
1688		/* kt might contain addresses */
1689		struct pfr_kentryworkq	 addrq, addq, changeq, delq, garbageq;
1690		struct pfr_kentry	*p, *q, *next;
1691		struct pfr_addr		 ad;
1692
1693		pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
1694		pfr_mark_addrs(kt);
1695		SLIST_INIT(&addq);
1696		SLIST_INIT(&changeq);
1697		SLIST_INIT(&delq);
1698		SLIST_INIT(&garbageq);
1699		pfr_clean_node_mask(shadow, &addrq);
1700		for (p = SLIST_FIRST(&addrq); p != NULL; p = next) {
1701			next = SLIST_NEXT(p, pfrke_workq);	/* XXX */
1702			pfr_copyout_addr(&ad, p);
1703			q = pfr_lookup_addr(kt, &ad, 1);
1704			if (q != NULL) {
1705				if (q->pfrke_not != p->pfrke_not)
1706					SLIST_INSERT_HEAD(&changeq, q,
1707					    pfrke_workq);
1708				q->pfrke_mark = 1;
1709				SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
1710			} else {
1711				p->pfrke_tzero = tzero;
1712				SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
1713			}
1714		}
1715		pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
1716		pfr_insert_kentries(kt, &addq, tzero);
1717		pfr_remove_kentries(kt, &delq);
1718		pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
1719		pfr_destroy_kentries(&garbageq);
1720	} else {
1721		/* kt cannot contain addresses */
1722		SWAP(struct radix_node_head *, kt->pfrkt_ip4,
1723		    shadow->pfrkt_ip4);
1724		SWAP(struct radix_node_head *, kt->pfrkt_ip6,
1725		    shadow->pfrkt_ip6);
1726		SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
1727		pfr_clstats_ktable(kt, tzero, 1);
1728	}
1729	nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
1730	    (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE) &
1731	    ~PFR_TFLAG_INACTIVE;
1732	pfr_destroy_ktable(shadow, 0);
1733	kt->pfrkt_shadow = NULL;
1734	pfr_setflags_ktable(kt, nflags);
1735}
1736
1737void
1738pfr_table_copyin_cleanup(struct pfr_table *tbl)
1739{
1740	tbl->pfrt_anchor[sizeof (tbl->pfrt_anchor) - 1] = '\0';
1741	tbl->pfrt_name[sizeof (tbl->pfrt_name) - 1] = '\0';
1742}
1743
1744static int
1745pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
1746{
1747	int i;
1748
1749	if (!tbl->pfrt_name[0])
1750		return (-1);
1751	if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0)
1752		return (-1);
1753	if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
1754		return (-1);
1755	for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
1756		if (tbl->pfrt_name[i])
1757			return (-1);
1758	if (pfr_fix_anchor(tbl->pfrt_anchor))
1759		return (-1);
1760	if (tbl->pfrt_flags & ~allowedflags)
1761		return (-1);
1762	return (0);
1763}
1764
1765/*
1766 * Rewrite anchors referenced by tables to remove slashes
1767 * and check for validity.
1768 */
1769static int
1770pfr_fix_anchor(char *anchor)
1771{
1772	size_t siz = MAXPATHLEN;
1773	int i;
1774
1775	if (anchor[0] == '/') {
1776		char *path;
1777		int off;
1778
1779		path = anchor;
1780		off = 1;
1781		while (*++path == '/')
1782			off++;
1783		bcopy(path, anchor, siz - off);
1784		memset(anchor + siz - off, 0, off);
1785	}
1786	if (anchor[siz - 1])
1787		return (-1);
1788	for (i = strlen(anchor); i < (int)siz; i++)
1789		if (anchor[i])
1790			return (-1);
1791	return (0);
1792}
1793
1794static int
1795pfr_table_count(struct pfr_table *filter, int flags)
1796{
1797	struct pf_ruleset *rs;
1798
1799	if (flags & PFR_FLAG_ALLRSETS)
1800		return (pfr_ktable_cnt);
1801	if (filter->pfrt_anchor[0]) {
1802		rs = pf_find_ruleset(filter->pfrt_anchor);
1803		return ((rs != NULL) ? rs->tables : -1);
1804	}
1805	return (pf_main_ruleset.tables);
1806}
1807
1808static int
1809pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
1810{
1811	if (flags & PFR_FLAG_ALLRSETS)
1812		return (0);
1813	if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
1814		return (1);
1815	return (0);
1816}
1817
1818static void
1819pfr_insert_ktables(struct pfr_ktableworkq *workq)
1820{
1821	struct pfr_ktable	*p;
1822
1823	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1824
1825	SLIST_FOREACH(p, workq, pfrkt_workq)
1826		pfr_insert_ktable(p);
1827}
1828
1829static void
1830pfr_insert_ktable(struct pfr_ktable *kt)
1831{
1832	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1833
1834	RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
1835	pfr_ktable_cnt++;
1836	if (kt->pfrkt_root != NULL)
1837		if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
1838			pfr_setflags_ktable(kt->pfrkt_root,
1839			    kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
1840}
1841
1842static void
1843pfr_setflags_ktables(struct pfr_ktableworkq *workq)
1844{
1845	struct pfr_ktable	*p, *q;
1846
1847	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1848
1849	for (p = SLIST_FIRST(workq); p; p = q) {
1850		q = SLIST_NEXT(p, pfrkt_workq);
1851		pfr_setflags_ktable(p, p->pfrkt_nflags);
1852	}
1853}
1854
1855static void
1856pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
1857{
1858	struct pfr_kentryworkq	addrq;
1859
1860	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1861
1862	if (!(newf & PFR_TFLAG_REFERENCED) &&
1863	    !(newf & PFR_TFLAG_PERSIST))
1864		newf &= ~PFR_TFLAG_ACTIVE;
1865	if (!(newf & PFR_TFLAG_ACTIVE))
1866		newf &= ~PFR_TFLAG_USRMASK;
1867	if (!(newf & PFR_TFLAG_SETMASK)) {
1868		RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
1869		if (kt->pfrkt_root != NULL)
1870			if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
1871				pfr_setflags_ktable(kt->pfrkt_root,
1872				    kt->pfrkt_root->pfrkt_flags &
1873				    ~PFR_TFLAG_REFDANCHOR);
1874		pfr_destroy_ktable(kt, 1);
1875		pfr_ktable_cnt--;
1876		return;
1877	}
1878	if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
1879		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1880		pfr_remove_kentries(kt, &addrq);
1881	}
1882	if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
1883		pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1884		kt->pfrkt_shadow = NULL;
1885	}
1886	kt->pfrkt_flags = newf;
1887}
1888
1889static void
1890pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse)
1891{
1892	struct pfr_ktable	*p;
1893
1894	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1895
1896	SLIST_FOREACH(p, workq, pfrkt_workq)
1897		pfr_clstats_ktable(p, tzero, recurse);
1898}
1899
1900static void
1901pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse)
1902{
1903	struct pfr_kentryworkq	 addrq;
1904
1905	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1906
1907	if (recurse) {
1908		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1909		pfr_clstats_kentries(&addrq, tzero, 0);
1910	}
1911	bzero(kt->pfrkt_packets, sizeof (kt->pfrkt_packets));
1912	bzero(kt->pfrkt_bytes, sizeof (kt->pfrkt_bytes));
1913	kt->pfrkt_match = kt->pfrkt_nomatch = 0;
1914	kt->pfrkt_tzero = tzero;
1915}
1916
1917static struct pfr_ktable *
1918pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset)
1919{
1920	struct pfr_ktable	*kt;
1921	struct pf_ruleset	*rs;
1922
1923	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1924
1925	kt = pool_get(&pfr_ktable_pl, PR_WAITOK);
1926	if (kt == NULL)
1927		return (NULL);
1928	bzero(kt, sizeof (*kt));
1929	kt->pfrkt_t = *tbl;
1930
1931	if (attachruleset) {
1932		rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
1933		if (!rs) {
1934			pfr_destroy_ktable(kt, 0);
1935			return (NULL);
1936		}
1937		kt->pfrkt_rs = rs;
1938		rs->tables++;
1939	}
1940
1941	if (!rn_inithead((void **)&kt->pfrkt_ip4,
1942	    offsetof(struct sockaddr_in, sin_addr) * 8) ||
1943	    !rn_inithead((void **)&kt->pfrkt_ip6,
1944	    offsetof(struct sockaddr_in6, sin6_addr) * 8)) {
1945		pfr_destroy_ktable(kt, 0);
1946		return (NULL);
1947	}
1948	kt->pfrkt_tzero = tzero;
1949
1950	return (kt);
1951}
1952
1953static void
1954pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
1955{
1956	struct pfr_ktable	*p, *q;
1957
1958	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1959
1960	for (p = SLIST_FIRST(workq); p; p = q) {
1961		q = SLIST_NEXT(p, pfrkt_workq);
1962		pfr_destroy_ktable(p, flushaddr);
1963	}
1964}
1965
1966static void
1967pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
1968{
1969	struct pfr_kentryworkq	 addrq;
1970
1971	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1972
1973	if (flushaddr) {
1974		pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1975		pfr_clean_node_mask(kt, &addrq);
1976		pfr_destroy_kentries(&addrq);
1977	}
1978	if (kt->pfrkt_ip4 != NULL)
1979		_FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE);
1980	if (kt->pfrkt_ip6 != NULL)
1981		_FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE);
1982	if (kt->pfrkt_shadow != NULL)
1983		pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
1984	if (kt->pfrkt_rs != NULL) {
1985		kt->pfrkt_rs->tables--;
1986		pf_remove_if_empty_ruleset(kt->pfrkt_rs);
1987	}
1988	pool_put(&pfr_ktable_pl, kt);
1989}
1990
1991static int
1992pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
1993{
1994	int d;
1995
1996	if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
1997		return (d);
1998	return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
1999}
2000
2001static struct pfr_ktable *
2002pfr_lookup_table(struct pfr_table *tbl)
2003{
2004	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2005
2006	/* struct pfr_ktable start like a struct pfr_table */
2007	return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2008	    (struct pfr_ktable *)(void *)tbl));
2009}
2010
2011int
2012pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2013{
2014	struct pfr_kentry	*ke = NULL;
2015	int			 match;
2016
2017	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2018
2019	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2020		kt = kt->pfrkt_root;
2021	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2022		return (0);
2023
2024	switch (af) {
2025#if INET
2026	case AF_INET:
2027		pfr_sin.sin_addr.s_addr = a->addr32[0];
2028		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2029		if (ke && KENTRY_RNF_ROOT(ke))
2030			ke = NULL;
2031		break;
2032#endif /* INET */
2033#if INET6
2034	case AF_INET6:
2035		bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr));
2036		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2037		if (ke && KENTRY_RNF_ROOT(ke))
2038			ke = NULL;
2039		break;
2040#endif /* INET6 */
2041	}
2042	match = (ke && !ke->pfrke_not);
2043	if (match)
2044		kt->pfrkt_match++;
2045	else
2046		kt->pfrkt_nomatch++;
2047	return (match);
2048}
2049
2050void
2051pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2052    u_int64_t len, int dir_out, int op_pass, int notrule)
2053{
2054	struct pfr_kentry	*ke = NULL;
2055
2056	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2057
2058	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2059		kt = kt->pfrkt_root;
2060	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2061		return;
2062
2063	switch (af) {
2064#if INET
2065	case AF_INET:
2066		pfr_sin.sin_addr.s_addr = a->addr32[0];
2067		ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
2068		if (ke && KENTRY_RNF_ROOT(ke))
2069			ke = NULL;
2070		break;
2071#endif /* INET */
2072#if INET6
2073	case AF_INET6:
2074		bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr));
2075		ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
2076		if (ke && KENTRY_RNF_ROOT(ke))
2077			ke = NULL;
2078		break;
2079#endif /* INET6 */
2080	default:
2081		;
2082	}
2083	if ((ke == NULL || ke->pfrke_not) != notrule) {
2084		if (op_pass != PFR_OP_PASS)
2085			printf("pfr_update_stats: assertion failed.\n");
2086		op_pass = PFR_OP_XPASS;
2087	}
2088	kt->pfrkt_packets[dir_out][op_pass]++;
2089	kt->pfrkt_bytes[dir_out][op_pass] += len;
2090	if (ke != NULL && op_pass != PFR_OP_XPASS) {
2091		ke->pfrke_packets[dir_out][op_pass]++;
2092		ke->pfrke_bytes[dir_out][op_pass] += len;
2093	}
2094}
2095
2096struct pfr_ktable *
2097pfr_attach_table(struct pf_ruleset *rs, char *name)
2098{
2099	struct pfr_ktable	*kt, *rt;
2100	struct pfr_table	 tbl;
2101	struct pf_anchor	*ac = rs->anchor;
2102
2103	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2104
2105	bzero(&tbl, sizeof (tbl));
2106	strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name));
2107	if (ac != NULL)
2108		strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor));
2109	kt = pfr_lookup_table(&tbl);
2110	if (kt == NULL) {
2111		kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1);
2112		if (kt == NULL)
2113			return (NULL);
2114		if (ac != NULL) {
2115			bzero(tbl.pfrt_anchor, sizeof (tbl.pfrt_anchor));
2116			rt = pfr_lookup_table(&tbl);
2117			if (rt == NULL) {
2118				rt = pfr_create_ktable(&tbl, 0, 1);
2119				if (rt == NULL) {
2120					pfr_destroy_ktable(kt, 0);
2121					return (NULL);
2122				}
2123				pfr_insert_ktable(rt);
2124			}
2125			kt->pfrkt_root = rt;
2126		}
2127		pfr_insert_ktable(kt);
2128	}
2129	if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2130		pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2131	return (kt);
2132}
2133
2134void
2135pfr_detach_table(struct pfr_ktable *kt)
2136{
2137	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2138
2139	if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2140		printf("pfr_detach_table: refcount = %d.\n",
2141		    kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2142	else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2143		pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2144}
2145
2146int
2147pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter,
2148    struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af)
2149{
2150	struct pfr_kentry	*ke, *ke2;
2151	struct pf_addr		*addr;
2152	union sockaddr_union	 mask;
2153	int			 idx = -1, use_counter = 0;
2154
2155	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2156
2157	if (af == AF_INET)
2158		addr = (struct pf_addr *)&pfr_sin.sin_addr;
2159	else if (af == AF_INET6)
2160		addr = (struct pf_addr *)&pfr_sin6.sin6_addr;
2161	else
2162		return (-1);
2163
2164	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2165		kt = kt->pfrkt_root;
2166	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2167		return (-1);
2168
2169	if (pidx != NULL)
2170		idx = *pidx;
2171	if (counter != NULL && idx >= 0)
2172		use_counter = 1;
2173	if (idx < 0)
2174		idx = 0;
2175
2176_next_block:
2177	ke = pfr_kentry_byidx(kt, idx, af);
2178	if (ke == NULL) {
2179		kt->pfrkt_nomatch++;
2180		return (1);
2181	}
2182	pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2183	*raddr = SUNION2PF(&ke->pfrke_sa, af);
2184	*rmask = SUNION2PF(&pfr_mask, af);
2185
2186	if (use_counter) {
2187		/* is supplied address within block? */
2188		if (!PF_MATCHA(0, *raddr, *rmask, counter, af)) {
2189			/* no, go to next block in table */
2190			idx++;
2191			use_counter = 0;
2192			goto _next_block;
2193		}
2194		PF_ACPY(addr, counter, af);
2195	} else {
2196		/* use first address of block */
2197		PF_ACPY(addr, *raddr, af);
2198	}
2199
2200	if (!KENTRY_NETWORK(ke)) {
2201		/* this is a single IP address - no possible nested block */
2202		PF_ACPY(counter, addr, af);
2203		*pidx = idx;
2204		kt->pfrkt_match++;
2205		return (0);
2206	}
2207	for (;;) {
2208		/* we don't want to use a nested block */
2209		if (af == AF_INET)
2210			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin,
2211			    kt->pfrkt_ip4);
2212		else if (af == AF_INET6)
2213			ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6,
2214			    kt->pfrkt_ip6);
2215		else
2216			return (-1); /* never happens */
2217		/* no need to check KENTRY_RNF_ROOT() here */
2218		if (ke2 == ke) {
2219			/* lookup return the same block - perfect */
2220			PF_ACPY(counter, addr, af);
2221			*pidx = idx;
2222			kt->pfrkt_match++;
2223			return (0);
2224		}
2225
2226		/* we need to increase the counter past the nested block */
2227		pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2228		PF_POOLMASK(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2229		PF_AINC(addr, af);
2230		if (!PF_MATCHA(0, *raddr, *rmask, addr, af)) {
2231			/* ok, we reached the end of our main block */
2232			/* go to next block in table */
2233			idx++;
2234			use_counter = 0;
2235			goto _next_block;
2236		}
2237	}
2238}
2239
2240static struct pfr_kentry *
2241pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2242{
2243	struct pfr_walktree	w;
2244
2245	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2246
2247	bzero(&w, sizeof (w));
2248	w.pfrw_op = PFRW_POOL_GET;
2249	w.pfrw_cnt = idx;
2250
2251	switch (af) {
2252#if INET
2253	case AF_INET:
2254		(void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2255		    pfr_walktree, &w);
2256		return (w.pfrw_kentry);
2257#endif /* INET */
2258#if INET6
2259	case AF_INET6:
2260		(void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2261		    pfr_walktree, &w);
2262		return (w.pfrw_kentry);
2263#endif /* INET6 */
2264	default:
2265		return (NULL);
2266	}
2267}
2268
2269void
2270pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2271{
2272	struct pfr_walktree	w;
2273
2274	lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
2275
2276	bzero(&w, sizeof (w));
2277	w.pfrw_op = PFRW_DYNADDR_UPDATE;
2278	w.pfrw_dyn = dyn;
2279
2280	dyn->pfid_acnt4 = 0;
2281	dyn->pfid_acnt6 = 0;
2282	if (!dyn->pfid_af || dyn->pfid_af == AF_INET)
2283		(void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4,
2284		    pfr_walktree, &w);
2285	if (!dyn->pfid_af || dyn->pfid_af == AF_INET6)
2286		(void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6,
2287		    pfr_walktree, &w);
2288}
2289