1
2/*-
3 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
4 *	The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from the Stanford/CMU enet packet filter,
7 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
8 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
9 * Berkeley Laboratory.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 */
39
40#include <sys/param.h>
41#include <sys/types.h>
42#include <sys/time.h>
43#include <sys/socket.h>
44
45#include <netinet/in.h>
46#include <net/if.h>
47
48#include "netinet/ip_compat.h"
49#include "bpf-ipf.h"
50
51
52#if (defined(__hpux) || SOLARIS) && (defined(_KERNEL) || defined(KERNEL))
53# include <sys/sysmacros.h>
54# include <sys/stream.h>
55#endif
56
57#include "pcap-ipf.h"
58
59#if !defined(KERNEL) && !defined(_KERNEL)
60#include <stdlib.h>
61#endif
62
63#define int32 bpf_int32
64#define u_int32 bpf_u_int32
65
66static int m_xword(mb_t *, int, int *);
67static int m_xhalf(mb_t *, int, int *);
68
69#ifndef LBL_ALIGN
70/*
71 * XXX - IA-64?  If not, this probably won't work on Win64 IA-64
72 * systems, unless LBL_ALIGN is defined elsewhere for them.
73 * XXX - SuperH?  If not, this probably won't work on WinCE SuperH
74 * systems, unless LBL_ALIGN is defined elsewhere for them.
75 */
76#if defined(sparc) || defined(__sparc__) || defined(mips) || \
77    defined(ibm032) || defined(__alpha) || defined(__hpux) || \
78    defined(__arm__)
79#define LBL_ALIGN
80#endif
81#endif
82
83#ifndef LBL_ALIGN
84
85#define EXTRACT_SHORT(p)	((u_short)ntohs(*(u_short *)p))
86#define EXTRACT_LONG(p)		(ntohl(*(u_int32 *)p))
87#else
88#define EXTRACT_SHORT(p)\
89	((u_short)\
90		((u_short)*((u_char *)p+0)<<8|\
91		 (u_short)*((u_char *)p+1)<<0))
92#define EXTRACT_LONG(p)\
93		((u_int32)*((u_char *)p+0)<<24|\
94		 (u_int32)*((u_char *)p+1)<<16|\
95		 (u_int32)*((u_char *)p+2)<<8|\
96		 (u_int32)*((u_char *)p+3)<<0)
97#endif
98
99#define MINDEX(len, _m, _k) \
100{ \
101	len = M_LEN(m); \
102	while ((_k) >= len) { \
103		(_k) -= len; \
104		(_m) = (_m)->m_next; \
105		if ((_m) == 0) \
106			return (0); \
107		len = M_LEN(m); \
108	} \
109}
110
111static int
112m_xword(mb_t *m, int k, int *err)
113{
114	register int len;
115	register u_char *cp, *np;
116	register mb_t *m0;
117
118	MINDEX(len, m, k);
119	cp = MTOD(m, u_char *) + k;
120	if (len - k >= 4) {
121		*err = 0;
122		return (EXTRACT_LONG(cp));
123	}
124	m0 = m->m_next;
125	if (m0 == NULL || M_LEN(m0) + len - k < 4)
126		goto bad;
127	*err = 0;
128	np = MTOD(m0, u_char *);
129	switch (len - k) {
130
131	case 1:
132		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
133
134	case 2:
135		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
136
137	default:
138		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
139	}
140    bad:
141	*err = 1;
142	return (0);
143}
144
145static int
146m_xhalf(mb_t *m, int k, int *err)
147{
148	register int len;
149	register u_char *cp;
150	register mb_t *m0;
151
152	MINDEX(len, m, k);
153	cp = MTOD(m, u_char *) + k;
154	if (len - k >= 2) {
155		*err = 0;
156		return (EXTRACT_SHORT(cp));
157	}
158	m0 = m->m_next;
159	if (m0 == NULL)
160		goto bad;
161	*err = 0;
162	return (cp[0] << 8) | MTOD(m0, u_char *)[0];
163 bad:
164	*err = 1;
165	return (0);
166}
167
168/*
169 * Execute the filter program starting at pc on the packet p
170 * wirelen is the length of the original packet
171 * buflen is the amount of data present
172 * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0,
173 * in all other cases, p is a pointer to a buffer and buflen is its size.
174 */
175u_int
176bpf_filter(struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
177{
178	register u_int32 A, X;
179	register int k;
180	int32 mem[BPF_MEMWORDS];
181	mb_t *m, *n;
182	int merr = 0;	/* XXX: GCC */
183	int len;
184
185	if (buflen == 0) {
186		m = (mb_t *)p;
187		p = MTOD(m, u_char *);
188		buflen = M_LEN(m);
189	} else
190		m = NULL;
191
192	if (pc == NULL)
193		/*
194		 * No filter means accept all.
195		 */
196		return (u_int)-1;
197	A = 0;
198	X = 0;
199	--pc;
200	while (1) {
201		++pc;
202		switch (pc->code) {
203
204		default:
205			return (0);
206		case BPF_RET|BPF_K:
207			return (u_int)pc->k;
208
209		case BPF_RET|BPF_A:
210			return (u_int)A;
211
212		case BPF_LD|BPF_W|BPF_ABS:
213			k = pc->k;
214			if (k + sizeof(int32) > buflen) {
215				if (m == NULL)
216					return (0);
217				A = m_xword(m, k, &merr);
218				if (merr != 0)
219					return (0);
220				continue;
221			}
222			A = EXTRACT_LONG(&p[k]);
223			continue;
224
225		case BPF_LD|BPF_H|BPF_ABS:
226			k = pc->k;
227			if (k + sizeof(short) > buflen) {
228				if (m == NULL)
229					return (0);
230				A = m_xhalf(m, k, &merr);
231				if (merr != 0)
232					return (0);
233				continue;
234			}
235			A = EXTRACT_SHORT(&p[k]);
236			continue;
237
238		case BPF_LD|BPF_B|BPF_ABS:
239			k = pc->k;
240			if (k >= buflen) {
241				if (m == NULL)
242					return (0);
243				n = m;
244				MINDEX(len, n, k);
245				A = MTOD(n, u_char *)[k];
246				continue;
247			}
248			A = p[k];
249			continue;
250
251		case BPF_LD|BPF_W|BPF_LEN:
252			A = wirelen;
253			continue;
254
255		case BPF_LDX|BPF_W|BPF_LEN:
256			X = wirelen;
257			continue;
258
259		case BPF_LD|BPF_W|BPF_IND:
260			k = X + pc->k;
261			if (k + sizeof(int32) > buflen) {
262				if (m == NULL)
263					return (0);
264				A = m_xword(m, k, &merr);
265				if (merr != 0)
266					return (0);
267				continue;
268			}
269			A = EXTRACT_LONG(&p[k]);
270			continue;
271
272		case BPF_LD|BPF_H|BPF_IND:
273			k = X + pc->k;
274			if (k + sizeof(short) > buflen) {
275				if (m == NULL)
276					return (0);
277				A = m_xhalf(m, k, &merr);
278				if (merr != 0)
279					return (0);
280				continue;
281			}
282			A = EXTRACT_SHORT(&p[k]);
283			continue;
284
285		case BPF_LD|BPF_B|BPF_IND:
286			k = X + pc->k;
287			if (k >= buflen) {
288				if (m == NULL)
289					return (0);
290				n = m;
291				MINDEX(len, n, k);
292				A = MTOD(n, u_char *)[k];
293				continue;
294			}
295			A = p[k];
296			continue;
297
298		case BPF_LDX|BPF_MSH|BPF_B:
299			k = pc->k;
300			if (k >= buflen) {
301				if (m == NULL)
302					return (0);
303				n = m;
304				MINDEX(len, n, k);
305				X = (MTOD(n, char *)[k] & 0xf) << 2;
306				continue;
307			}
308			X = (p[pc->k] & 0xf) << 2;
309			continue;
310
311		case BPF_LD|BPF_IMM:
312			A = pc->k;
313			continue;
314
315		case BPF_LDX|BPF_IMM:
316			X = pc->k;
317			continue;
318
319		case BPF_LD|BPF_MEM:
320			A = mem[pc->k];
321			continue;
322
323		case BPF_LDX|BPF_MEM:
324			X = mem[pc->k];
325			continue;
326
327		case BPF_ST:
328			mem[pc->k] = A;
329			continue;
330
331		case BPF_STX:
332			mem[pc->k] = X;
333			continue;
334
335		case BPF_JMP|BPF_JA:
336			pc += pc->k;
337			continue;
338
339		case BPF_JMP|BPF_JGT|BPF_K:
340			pc += (A > pc->k) ? pc->jt : pc->jf;
341			continue;
342
343		case BPF_JMP|BPF_JGE|BPF_K:
344			pc += (A >= pc->k) ? pc->jt : pc->jf;
345			continue;
346
347		case BPF_JMP|BPF_JEQ|BPF_K:
348			pc += (A == pc->k) ? pc->jt : pc->jf;
349			continue;
350
351		case BPF_JMP|BPF_JSET|BPF_K:
352			pc += (A & pc->k) ? pc->jt : pc->jf;
353			continue;
354
355		case BPF_JMP|BPF_JGT|BPF_X:
356			pc += (A > X) ? pc->jt : pc->jf;
357			continue;
358
359		case BPF_JMP|BPF_JGE|BPF_X:
360			pc += (A >= X) ? pc->jt : pc->jf;
361			continue;
362
363		case BPF_JMP|BPF_JEQ|BPF_X:
364			pc += (A == X) ? pc->jt : pc->jf;
365			continue;
366
367		case BPF_JMP|BPF_JSET|BPF_X:
368			pc += (A & X) ? pc->jt : pc->jf;
369			continue;
370
371		case BPF_ALU|BPF_ADD|BPF_X:
372			A += X;
373			continue;
374
375		case BPF_ALU|BPF_SUB|BPF_X:
376			A -= X;
377			continue;
378
379		case BPF_ALU|BPF_MUL|BPF_X:
380			A *= X;
381			continue;
382
383		case BPF_ALU|BPF_DIV|BPF_X:
384			if (X == 0)
385				return (0);
386			A /= X;
387			continue;
388
389		case BPF_ALU|BPF_AND|BPF_X:
390			A &= X;
391			continue;
392
393		case BPF_ALU|BPF_OR|BPF_X:
394			A |= X;
395			continue;
396
397		case BPF_ALU|BPF_LSH|BPF_X:
398			A <<= X;
399			continue;
400
401		case BPF_ALU|BPF_RSH|BPF_X:
402			A >>= X;
403			continue;
404
405		case BPF_ALU|BPF_ADD|BPF_K:
406			A += pc->k;
407			continue;
408
409		case BPF_ALU|BPF_SUB|BPF_K:
410			A -= pc->k;
411			continue;
412
413		case BPF_ALU|BPF_MUL|BPF_K:
414			A *= pc->k;
415			continue;
416
417		case BPF_ALU|BPF_DIV|BPF_K:
418			A /= pc->k;
419			continue;
420
421		case BPF_ALU|BPF_AND|BPF_K:
422			A &= pc->k;
423			continue;
424
425		case BPF_ALU|BPF_OR|BPF_K:
426			A |= pc->k;
427			continue;
428
429		case BPF_ALU|BPF_LSH|BPF_K:
430			A <<= pc->k;
431			continue;
432
433		case BPF_ALU|BPF_RSH|BPF_K:
434			A >>= pc->k;
435			continue;
436
437		case BPF_ALU|BPF_NEG:
438			A = -A;
439			continue;
440
441		case BPF_MISC|BPF_TAX:
442			X = A;
443			continue;
444
445		case BPF_MISC|BPF_TXA:
446			A = X;
447			continue;
448		}
449	}
450}
451
452
453/*
454 * Return true if the 'fcode' is a valid filter program.
455 * The constraints are that each jump be forward and to a valid
456 * code, that memory accesses are within valid ranges (to the
457 * extent that this can be checked statically; loads of packet
458 * data have to be, and are, also checked at run time), and that
459 * the code terminates with either an accept or reject.
460 *
461 * The kernel needs to be able to verify an application's filter code.
462 * Otherwise, a bogus program could easily crash the system.
463 */
464int
465bpf_validate(struct bpf_insn *f, int len)
466{
467	u_int i, from;
468	const struct bpf_insn *p;
469
470	if (len == 0)
471		return (1);
472
473	if (len < 1 || len > BPF_MAXINSNS)
474		return (0);
475
476	for (i = 0; i < len; ++i) {
477		p = &f[i];
478		switch (BPF_CLASS(p->code)) {
479		/*
480		 * Check that memory operations use valid addresses.
481		 */
482		case BPF_LD:
483		case BPF_LDX:
484			switch (BPF_MODE(p->code)) {
485			case BPF_IMM:
486				break;
487			case BPF_ABS:
488			case BPF_IND:
489			case BPF_MSH:
490				/*
491				 * More strict check with actual packet length
492				 * is done runtime.
493				 */
494#if 0
495				if (p->k >= bpf_maxbufsize)
496					return (0);
497#endif
498				break;
499			case BPF_MEM:
500				if (p->k >= BPF_MEMWORDS)
501					return (0);
502				break;
503			case BPF_LEN:
504				break;
505			default:
506				return (0);
507			}
508			break;
509		case BPF_ST:
510		case BPF_STX:
511			if (p->k >= BPF_MEMWORDS)
512				return (0);
513			break;
514		case BPF_ALU:
515			switch (BPF_OP(p->code)) {
516			case BPF_ADD:
517			case BPF_SUB:
518			case BPF_OR:
519			case BPF_AND:
520			case BPF_LSH:
521			case BPF_RSH:
522			case BPF_NEG:
523				break;
524			case BPF_DIV:
525				/*
526				 * Check for constant division by 0.
527				 */
528				if (BPF_RVAL(p->code) == BPF_K && p->k == 0)
529					return (0);
530			default:
531				return (0);
532			}
533			break;
534		case BPF_JMP:
535			/*
536			 * Check that jumps are within the code block,
537			 * and that unconditional branches don't go
538			 * backwards as a result of an overflow.
539			 * Unconditional branches have a 32-bit offset,
540			 * so they could overflow; we check to make
541			 * sure they don't.  Conditional branches have
542			 * an 8-bit offset, and the from address is <=
543			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
544			 * is sufficiently small that adding 255 to it
545			 * won't overflow.
546			 *
547			 * We know that len is <= BPF_MAXINSNS, and we
548			 * assume that BPF_MAXINSNS is < the maximum size
549			 * of a u_int, so that i + 1 doesn't overflow.
550			 */
551			from = i + 1;
552			switch (BPF_OP(p->code)) {
553			case BPF_JA:
554				if (from + p->k < from || from + p->k >= len)
555					return (0);
556				break;
557			case BPF_JEQ:
558			case BPF_JGT:
559			case BPF_JGE:
560			case BPF_JSET:
561				if (from + p->jt >= len || from + p->jf >= len)
562					return (0);
563				break;
564			default:
565				return (0);
566			}
567			break;
568		case BPF_RET:
569			break;
570		case BPF_MISC:
571			break;
572		default:
573			return (0);
574		}
575	}
576	return (BPF_CLASS(f[len - 1].code) == BPF_RET);
577}
578