optimize.c revision 172678
1/*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 *  Optimization module for tcpdump intermediate representation.
22 */
23#ifndef lint
24static const char rcsid[] _U_ =
25    "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.85.2.3 2007/09/12 21:29:45 guy Exp $ (LBL)";
26#endif
27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <stdio.h>
33#include <stdlib.h>
34#include <memory.h>
35#include <string.h>
36
37#include <errno.h>
38
39#include "pcap-int.h"
40
41#include "gencode.h"
42
43#ifdef HAVE_OS_PROTO_H
44#include "os-proto.h"
45#endif
46
47#ifdef BDEBUG
48extern int dflag;
49#endif
50
51#if defined(MSDOS) && !defined(__DJGPP__)
52extern int _w32_ffs (int mask);
53#define ffs _w32_ffs
54#endif
55
56/*
57 * Represents a deleted instruction.
58 */
59#define NOP -1
60
61/*
62 * Register numbers for use-def values.
63 * 0 through BPF_MEMWORDS-1 represent the corresponding scratch memory
64 * location.  A_ATOM is the accumulator and X_ATOM is the index
65 * register.
66 */
67#define A_ATOM BPF_MEMWORDS
68#define X_ATOM (BPF_MEMWORDS+1)
69
70/*
71 * This define is used to represent *both* the accumulator and
72 * x register in use-def computations.
73 * Currently, the use-def code assumes only one definition per instruction.
74 */
75#define AX_ATOM N_ATOMS
76
77/*
78 * A flag to indicate that further optimization is needed.
79 * Iterative passes are continued until a given pass yields no
80 * branch movement.
81 */
82static int done;
83
84/*
85 * A block is marked if only if its mark equals the current mark.
86 * Rather than traverse the code array, marking each item, 'cur_mark' is
87 * incremented.  This automatically makes each element unmarked.
88 */
89static int cur_mark;
90#define isMarked(p) ((p)->mark == cur_mark)
91#define unMarkAll() cur_mark += 1
92#define Mark(p) ((p)->mark = cur_mark)
93
94static void opt_init(struct block *);
95static void opt_cleanup(void);
96
97static void make_marks(struct block *);
98static void mark_code(struct block *);
99
100static void intern_blocks(struct block *);
101
102static int eq_slist(struct slist *, struct slist *);
103
104static void find_levels_r(struct block *);
105
106static void find_levels(struct block *);
107static void find_dom(struct block *);
108static void propedom(struct edge *);
109static void find_edom(struct block *);
110static void find_closure(struct block *);
111static int atomuse(struct stmt *);
112static int atomdef(struct stmt *);
113static void compute_local_ud(struct block *);
114static void find_ud(struct block *);
115static void init_val(void);
116static int F(int, int, int);
117static inline void vstore(struct stmt *, int *, int, int);
118static void opt_blk(struct block *, int);
119static int use_conflict(struct block *, struct block *);
120static void opt_j(struct edge *);
121static void or_pullup(struct block *);
122static void and_pullup(struct block *);
123static void opt_blks(struct block *, int);
124static inline void link_inedge(struct edge *, struct block *);
125static void find_inedges(struct block *);
126static void opt_root(struct block **);
127static void opt_loop(struct block *, int);
128static void fold_op(struct stmt *, int, int);
129static inline struct slist *this_op(struct slist *);
130static void opt_not(struct block *);
131static void opt_peep(struct block *);
132static void opt_stmt(struct stmt *, int[], int);
133static void deadstmt(struct stmt *, struct stmt *[]);
134static void opt_deadstores(struct block *);
135static struct block *fold_edge(struct block *, struct edge *);
136static inline int eq_blk(struct block *, struct block *);
137static int slength(struct slist *);
138static int count_blocks(struct block *);
139static void number_blks_r(struct block *);
140static int count_stmts(struct block *);
141static int convert_code_r(struct block *);
142#ifdef BDEBUG
143static void opt_dump(struct block *);
144#endif
145
146static int n_blocks;
147struct block **blocks;
148static int n_edges;
149struct edge **edges;
150
151/*
152 * A bit vector set representation of the dominators.
153 * We round up the set size to the next power of two.
154 */
155static int nodewords;
156static int edgewords;
157struct block **levels;
158bpf_u_int32 *space;
159#define BITS_PER_WORD (8*sizeof(bpf_u_int32))
160/*
161 * True if a is in uset {p}
162 */
163#define SET_MEMBER(p, a) \
164((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
165
166/*
167 * Add 'a' to uset p.
168 */
169#define SET_INSERT(p, a) \
170(p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
171
172/*
173 * Delete 'a' from uset p.
174 */
175#define SET_DELETE(p, a) \
176(p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
177
178/*
179 * a := a intersect b
180 */
181#define SET_INTERSECT(a, b, n)\
182{\
183	register bpf_u_int32 *_x = a, *_y = b;\
184	register int _n = n;\
185	while (--_n >= 0) *_x++ &= *_y++;\
186}
187
188/*
189 * a := a - b
190 */
191#define SET_SUBTRACT(a, b, n)\
192{\
193	register bpf_u_int32 *_x = a, *_y = b;\
194	register int _n = n;\
195	while (--_n >= 0) *_x++ &=~ *_y++;\
196}
197
198/*
199 * a := a union b
200 */
201#define SET_UNION(a, b, n)\
202{\
203	register bpf_u_int32 *_x = a, *_y = b;\
204	register int _n = n;\
205	while (--_n >= 0) *_x++ |= *_y++;\
206}
207
208static uset all_dom_sets;
209static uset all_closure_sets;
210static uset all_edge_sets;
211
212#ifndef MAX
213#define MAX(a,b) ((a)>(b)?(a):(b))
214#endif
215
216static void
217find_levels_r(b)
218	struct block *b;
219{
220	int level;
221
222	if (isMarked(b))
223		return;
224
225	Mark(b);
226	b->link = 0;
227
228	if (JT(b)) {
229		find_levels_r(JT(b));
230		find_levels_r(JF(b));
231		level = MAX(JT(b)->level, JF(b)->level) + 1;
232	} else
233		level = 0;
234	b->level = level;
235	b->link = levels[level];
236	levels[level] = b;
237}
238
239/*
240 * Level graph.  The levels go from 0 at the leaves to
241 * N_LEVELS at the root.  The levels[] array points to the
242 * first node of the level list, whose elements are linked
243 * with the 'link' field of the struct block.
244 */
245static void
246find_levels(root)
247	struct block *root;
248{
249	memset((char *)levels, 0, n_blocks * sizeof(*levels));
250	unMarkAll();
251	find_levels_r(root);
252}
253
254/*
255 * Find dominator relationships.
256 * Assumes graph has been leveled.
257 */
258static void
259find_dom(root)
260	struct block *root;
261{
262	int i;
263	struct block *b;
264	bpf_u_int32 *x;
265
266	/*
267	 * Initialize sets to contain all nodes.
268	 */
269	x = all_dom_sets;
270	i = n_blocks * nodewords;
271	while (--i >= 0)
272		*x++ = ~0;
273	/* Root starts off empty. */
274	for (i = nodewords; --i >= 0;)
275		root->dom[i] = 0;
276
277	/* root->level is the highest level no found. */
278	for (i = root->level; i >= 0; --i) {
279		for (b = levels[i]; b; b = b->link) {
280			SET_INSERT(b->dom, b->id);
281			if (JT(b) == 0)
282				continue;
283			SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
284			SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
285		}
286	}
287}
288
289static void
290propedom(ep)
291	struct edge *ep;
292{
293	SET_INSERT(ep->edom, ep->id);
294	if (ep->succ) {
295		SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
296		SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
297	}
298}
299
300/*
301 * Compute edge dominators.
302 * Assumes graph has been leveled and predecessors established.
303 */
304static void
305find_edom(root)
306	struct block *root;
307{
308	int i;
309	uset x;
310	struct block *b;
311
312	x = all_edge_sets;
313	for (i = n_edges * edgewords; --i >= 0; )
314		x[i] = ~0;
315
316	/* root->level is the highest level no found. */
317	memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
318	memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
319	for (i = root->level; i >= 0; --i) {
320		for (b = levels[i]; b != 0; b = b->link) {
321			propedom(&b->et);
322			propedom(&b->ef);
323		}
324	}
325}
326
327/*
328 * Find the backwards transitive closure of the flow graph.  These sets
329 * are backwards in the sense that we find the set of nodes that reach
330 * a given node, not the set of nodes that can be reached by a node.
331 *
332 * Assumes graph has been leveled.
333 */
334static void
335find_closure(root)
336	struct block *root;
337{
338	int i;
339	struct block *b;
340
341	/*
342	 * Initialize sets to contain no nodes.
343	 */
344	memset((char *)all_closure_sets, 0,
345	      n_blocks * nodewords * sizeof(*all_closure_sets));
346
347	/* root->level is the highest level no found. */
348	for (i = root->level; i >= 0; --i) {
349		for (b = levels[i]; b; b = b->link) {
350			SET_INSERT(b->closure, b->id);
351			if (JT(b) == 0)
352				continue;
353			SET_UNION(JT(b)->closure, b->closure, nodewords);
354			SET_UNION(JF(b)->closure, b->closure, nodewords);
355		}
356	}
357}
358
359/*
360 * Return the register number that is used by s.  If A and X are both
361 * used, return AX_ATOM.  If no register is used, return -1.
362 *
363 * The implementation should probably change to an array access.
364 */
365static int
366atomuse(s)
367	struct stmt *s;
368{
369	register int c = s->code;
370
371	if (c == NOP)
372		return -1;
373
374	switch (BPF_CLASS(c)) {
375
376	case BPF_RET:
377		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
378			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
379
380	case BPF_LD:
381	case BPF_LDX:
382		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
383			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;
384
385	case BPF_ST:
386		return A_ATOM;
387
388	case BPF_STX:
389		return X_ATOM;
390
391	case BPF_JMP:
392	case BPF_ALU:
393		if (BPF_SRC(c) == BPF_X)
394			return AX_ATOM;
395		return A_ATOM;
396
397	case BPF_MISC:
398		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
399	}
400	abort();
401	/* NOTREACHED */
402}
403
404/*
405 * Return the register number that is defined by 's'.  We assume that
406 * a single stmt cannot define more than one register.  If no register
407 * is defined, return -1.
408 *
409 * The implementation should probably change to an array access.
410 */
411static int
412atomdef(s)
413	struct stmt *s;
414{
415	if (s->code == NOP)
416		return -1;
417
418	switch (BPF_CLASS(s->code)) {
419
420	case BPF_LD:
421	case BPF_ALU:
422		return A_ATOM;
423
424	case BPF_LDX:
425		return X_ATOM;
426
427	case BPF_ST:
428	case BPF_STX:
429		return s->k;
430
431	case BPF_MISC:
432		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
433	}
434	return -1;
435}
436
437/*
438 * Compute the sets of registers used, defined, and killed by 'b'.
439 *
440 * "Used" means that a statement in 'b' uses the register before any
441 * statement in 'b' defines it, i.e. it uses the value left in
442 * that register by a predecessor block of this block.
443 * "Defined" means that a statement in 'b' defines it.
444 * "Killed" means that a statement in 'b' defines it before any
445 * statement in 'b' uses it, i.e. it kills the value left in that
446 * register by a predecessor block of this block.
447 */
448static void
449compute_local_ud(b)
450	struct block *b;
451{
452	struct slist *s;
453	atomset def = 0, use = 0, kill = 0;
454	int atom;
455
456	for (s = b->stmts; s; s = s->next) {
457		if (s->s.code == NOP)
458			continue;
459		atom = atomuse(&s->s);
460		if (atom >= 0) {
461			if (atom == AX_ATOM) {
462				if (!ATOMELEM(def, X_ATOM))
463					use |= ATOMMASK(X_ATOM);
464				if (!ATOMELEM(def, A_ATOM))
465					use |= ATOMMASK(A_ATOM);
466			}
467			else if (atom < N_ATOMS) {
468				if (!ATOMELEM(def, atom))
469					use |= ATOMMASK(atom);
470			}
471			else
472				abort();
473		}
474		atom = atomdef(&s->s);
475		if (atom >= 0) {
476			if (!ATOMELEM(use, atom))
477				kill |= ATOMMASK(atom);
478			def |= ATOMMASK(atom);
479		}
480	}
481	if (BPF_CLASS(b->s.code) == BPF_JMP) {
482		/*
483		 * XXX - what about RET?
484		 */
485		atom = atomuse(&b->s);
486		if (atom >= 0) {
487			if (atom == AX_ATOM) {
488				if (!ATOMELEM(def, X_ATOM))
489					use |= ATOMMASK(X_ATOM);
490				if (!ATOMELEM(def, A_ATOM))
491					use |= ATOMMASK(A_ATOM);
492			}
493			else if (atom < N_ATOMS) {
494				if (!ATOMELEM(def, atom))
495					use |= ATOMMASK(atom);
496			}
497			else
498				abort();
499		}
500	}
501
502	b->def = def;
503	b->kill = kill;
504	b->in_use = use;
505}
506
507/*
508 * Assume graph is already leveled.
509 */
510static void
511find_ud(root)
512	struct block *root;
513{
514	int i, maxlevel;
515	struct block *p;
516
517	/*
518	 * root->level is the highest level no found;
519	 * count down from there.
520	 */
521	maxlevel = root->level;
522	for (i = maxlevel; i >= 0; --i)
523		for (p = levels[i]; p; p = p->link) {
524			compute_local_ud(p);
525			p->out_use = 0;
526		}
527
528	for (i = 1; i <= maxlevel; ++i) {
529		for (p = levels[i]; p; p = p->link) {
530			p->out_use |= JT(p)->in_use | JF(p)->in_use;
531			p->in_use |= p->out_use &~ p->kill;
532		}
533	}
534}
535
536/*
537 * These data structures are used in a Cocke and Shwarz style
538 * value numbering scheme.  Since the flowgraph is acyclic,
539 * exit values can be propagated from a node's predecessors
540 * provided it is uniquely defined.
541 */
542struct valnode {
543	int code;
544	int v0, v1;
545	int val;
546	struct valnode *next;
547};
548
549#define MODULUS 213
550static struct valnode *hashtbl[MODULUS];
551static int curval;
552static int maxval;
553
554/* Integer constants mapped with the load immediate opcode. */
555#define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
556
557struct vmapinfo {
558	int is_const;
559	bpf_int32 const_val;
560};
561
562struct vmapinfo *vmap;
563struct valnode *vnode_base;
564struct valnode *next_vnode;
565
566static void
567init_val()
568{
569	curval = 0;
570	next_vnode = vnode_base;
571	memset((char *)vmap, 0, maxval * sizeof(*vmap));
572	memset((char *)hashtbl, 0, sizeof hashtbl);
573}
574
575/* Because we really don't have an IR, this stuff is a little messy. */
576static int
577F(code, v0, v1)
578	int code;
579	int v0, v1;
580{
581	u_int hash;
582	int val;
583	struct valnode *p;
584
585	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
586	hash %= MODULUS;
587
588	for (p = hashtbl[hash]; p; p = p->next)
589		if (p->code == code && p->v0 == v0 && p->v1 == v1)
590			return p->val;
591
592	val = ++curval;
593	if (BPF_MODE(code) == BPF_IMM &&
594	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
595		vmap[val].const_val = v0;
596		vmap[val].is_const = 1;
597	}
598	p = next_vnode++;
599	p->val = val;
600	p->code = code;
601	p->v0 = v0;
602	p->v1 = v1;
603	p->next = hashtbl[hash];
604	hashtbl[hash] = p;
605
606	return val;
607}
608
609static inline void
610vstore(s, valp, newval, alter)
611	struct stmt *s;
612	int *valp;
613	int newval;
614	int alter;
615{
616	if (alter && *valp == newval)
617		s->code = NOP;
618	else
619		*valp = newval;
620}
621
622static void
623fold_op(s, v0, v1)
624	struct stmt *s;
625	int v0, v1;
626{
627	bpf_u_int32 a, b;
628
629	a = vmap[v0].const_val;
630	b = vmap[v1].const_val;
631
632	switch (BPF_OP(s->code)) {
633	case BPF_ADD:
634		a += b;
635		break;
636
637	case BPF_SUB:
638		a -= b;
639		break;
640
641	case BPF_MUL:
642		a *= b;
643		break;
644
645	case BPF_DIV:
646		if (b == 0)
647			bpf_error("division by zero");
648		a /= b;
649		break;
650
651	case BPF_AND:
652		a &= b;
653		break;
654
655	case BPF_OR:
656		a |= b;
657		break;
658
659	case BPF_LSH:
660		a <<= b;
661		break;
662
663	case BPF_RSH:
664		a >>= b;
665		break;
666
667	case BPF_NEG:
668		a = -a;
669		break;
670
671	default:
672		abort();
673	}
674	s->k = a;
675	s->code = BPF_LD|BPF_IMM;
676	done = 0;
677}
678
679static inline struct slist *
680this_op(s)
681	struct slist *s;
682{
683	while (s != 0 && s->s.code == NOP)
684		s = s->next;
685	return s;
686}
687
688static void
689opt_not(b)
690	struct block *b;
691{
692	struct block *tmp = JT(b);
693
694	JT(b) = JF(b);
695	JF(b) = tmp;
696}
697
698static void
699opt_peep(b)
700	struct block *b;
701{
702	struct slist *s;
703	struct slist *next, *last;
704	int val;
705
706	s = b->stmts;
707	if (s == 0)
708		return;
709
710	last = s;
711	for (/*empty*/; /*empty*/; s = next) {
712		/*
713		 * Skip over nops.
714		 */
715		s = this_op(s);
716		if (s == 0)
717			break;	/* nothing left in the block */
718
719		/*
720		 * Find the next real instruction after that one
721		 * (skipping nops).
722		 */
723		next = this_op(s->next);
724		if (next == 0)
725			break;	/* no next instruction */
726		last = next;
727
728		/*
729		 * st  M[k]	-->	st  M[k]
730		 * ldx M[k]		tax
731		 */
732		if (s->s.code == BPF_ST &&
733		    next->s.code == (BPF_LDX|BPF_MEM) &&
734		    s->s.k == next->s.k) {
735			done = 0;
736			next->s.code = BPF_MISC|BPF_TAX;
737		}
738		/*
739		 * ld  #k	-->	ldx  #k
740		 * tax			txa
741		 */
742		if (s->s.code == (BPF_LD|BPF_IMM) &&
743		    next->s.code == (BPF_MISC|BPF_TAX)) {
744			s->s.code = BPF_LDX|BPF_IMM;
745			next->s.code = BPF_MISC|BPF_TXA;
746			done = 0;
747		}
748		/*
749		 * This is an ugly special case, but it happens
750		 * when you say tcp[k] or udp[k] where k is a constant.
751		 */
752		if (s->s.code == (BPF_LD|BPF_IMM)) {
753			struct slist *add, *tax, *ild;
754
755			/*
756			 * Check that X isn't used on exit from this
757			 * block (which the optimizer might cause).
758			 * We know the code generator won't generate
759			 * any local dependencies.
760			 */
761			if (ATOMELEM(b->out_use, X_ATOM))
762				continue;
763
764			/*
765			 * Check that the instruction following the ldi
766			 * is an addx, or it's an ldxms with an addx
767			 * following it (with 0 or more nops between the
768			 * ldxms and addx).
769			 */
770			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
771				add = next;
772			else
773				add = this_op(next->next);
774			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
775				continue;
776
777			/*
778			 * Check that a tax follows that (with 0 or more
779			 * nops between them).
780			 */
781			tax = this_op(add->next);
782			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
783				continue;
784
785			/*
786			 * Check that an ild follows that (with 0 or more
787			 * nops between them).
788			 */
789			ild = this_op(tax->next);
790			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
791			    BPF_MODE(ild->s.code) != BPF_IND)
792				continue;
793			/*
794			 * We want to turn this sequence:
795			 *
796			 * (004) ldi     #0x2		{s}
797			 * (005) ldxms   [14]		{next}  -- optional
798			 * (006) addx			{add}
799			 * (007) tax			{tax}
800			 * (008) ild     [x+0]		{ild}
801			 *
802			 * into this sequence:
803			 *
804			 * (004) nop
805			 * (005) ldxms   [14]
806			 * (006) nop
807			 * (007) nop
808			 * (008) ild     [x+2]
809			 *
810			 * XXX We need to check that X is not
811			 * subsequently used, because we want to change
812			 * what'll be in it after this sequence.
813			 *
814			 * We know we can eliminate the accumulator
815			 * modifications earlier in the sequence since
816			 * it is defined by the last stmt of this sequence
817			 * (i.e., the last statement of the sequence loads
818			 * a value into the accumulator, so we can eliminate
819			 * earlier operations on the accumulator).
820			 */
821			ild->s.k += s->s.k;
822			s->s.code = NOP;
823			add->s.code = NOP;
824			tax->s.code = NOP;
825			done = 0;
826		}
827	}
828	/*
829	 * If the comparison at the end of a block is an equality
830	 * comparison against a constant, and nobody uses the value
831	 * we leave in the A register at the end of a block, and
832	 * the operation preceding the comparison is an arithmetic
833	 * operation, we can sometime optimize it away.
834	 */
835	if (b->s.code == (BPF_JMP|BPF_JEQ|BPF_K) &&
836	    !ATOMELEM(b->out_use, A_ATOM)) {
837	    	/*
838	    	 * We can optimize away certain subtractions of the
839	    	 * X register.
840	    	 */
841		if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X)) {
842			val = b->val[X_ATOM];
843			if (vmap[val].is_const) {
844				/*
845				 * If we have a subtract to do a comparison,
846				 * and the X register is a known constant,
847				 * we can merge this value into the
848				 * comparison:
849				 *
850				 * sub x  ->	nop
851				 * jeq #y	jeq #(x+y)
852				 */
853				b->s.k += vmap[val].const_val;
854				last->s.code = NOP;
855				done = 0;
856			} else if (b->s.k == 0) {
857				/*
858				 * If the X register isn't a constant,
859				 * and the comparison in the test is
860				 * against 0, we can compare with the
861				 * X register, instead:
862				 *
863				 * sub x  ->	nop
864				 * jeq #0	jeq x
865				 */
866				last->s.code = NOP;
867				b->s.code = BPF_JMP|BPF_JEQ|BPF_X;
868				done = 0;
869			}
870		}
871		/*
872		 * Likewise, a constant subtract can be simplified:
873		 *
874		 * sub #x ->	nop
875		 * jeq #y ->	jeq #(x+y)
876		 */
877		else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K)) {
878			last->s.code = NOP;
879			b->s.k += last->s.k;
880			done = 0;
881		}
882		/*
883		 * And, similarly, a constant AND can be simplified
884		 * if we're testing against 0, i.e.:
885		 *
886		 * and #k	nop
887		 * jeq #0  ->	jset #k
888		 */
889		else if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
890		    b->s.k == 0) {
891			b->s.k = last->s.k;
892			b->s.code = BPF_JMP|BPF_K|BPF_JSET;
893			last->s.code = NOP;
894			done = 0;
895			opt_not(b);
896		}
897	}
898	/*
899	 * jset #0        ->   never
900	 * jset #ffffffff ->   always
901	 */
902	if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
903		if (b->s.k == 0)
904			JT(b) = JF(b);
905		if (b->s.k == 0xffffffff)
906			JF(b) = JT(b);
907	}
908	/*
909	 * If the accumulator is a known constant, we can compute the
910	 * comparison result.
911	 */
912	val = b->val[A_ATOM];
913	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
914		bpf_int32 v = vmap[val].const_val;
915		switch (BPF_OP(b->s.code)) {
916
917		case BPF_JEQ:
918			v = v == b->s.k;
919			break;
920
921		case BPF_JGT:
922			v = (unsigned)v > b->s.k;
923			break;
924
925		case BPF_JGE:
926			v = (unsigned)v >= b->s.k;
927			break;
928
929		case BPF_JSET:
930			v &= b->s.k;
931			break;
932
933		default:
934			abort();
935		}
936		if (JF(b) != JT(b))
937			done = 0;
938		if (v)
939			JF(b) = JT(b);
940		else
941			JT(b) = JF(b);
942	}
943}
944
945/*
946 * Compute the symbolic value of expression of 's', and update
947 * anything it defines in the value table 'val'.  If 'alter' is true,
948 * do various optimizations.  This code would be cleaner if symbolic
949 * evaluation and code transformations weren't folded together.
950 */
951static void
952opt_stmt(s, val, alter)
953	struct stmt *s;
954	int val[];
955	int alter;
956{
957	int op;
958	int v;
959
960	switch (s->code) {
961
962	case BPF_LD|BPF_ABS|BPF_W:
963	case BPF_LD|BPF_ABS|BPF_H:
964	case BPF_LD|BPF_ABS|BPF_B:
965		v = F(s->code, s->k, 0L);
966		vstore(s, &val[A_ATOM], v, alter);
967		break;
968
969	case BPF_LD|BPF_IND|BPF_W:
970	case BPF_LD|BPF_IND|BPF_H:
971	case BPF_LD|BPF_IND|BPF_B:
972		v = val[X_ATOM];
973		if (alter && vmap[v].is_const) {
974			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
975			s->k += vmap[v].const_val;
976			v = F(s->code, s->k, 0L);
977			done = 0;
978		}
979		else
980			v = F(s->code, s->k, v);
981		vstore(s, &val[A_ATOM], v, alter);
982		break;
983
984	case BPF_LD|BPF_LEN:
985		v = F(s->code, 0L, 0L);
986		vstore(s, &val[A_ATOM], v, alter);
987		break;
988
989	case BPF_LD|BPF_IMM:
990		v = K(s->k);
991		vstore(s, &val[A_ATOM], v, alter);
992		break;
993
994	case BPF_LDX|BPF_IMM:
995		v = K(s->k);
996		vstore(s, &val[X_ATOM], v, alter);
997		break;
998
999	case BPF_LDX|BPF_MSH|BPF_B:
1000		v = F(s->code, s->k, 0L);
1001		vstore(s, &val[X_ATOM], v, alter);
1002		break;
1003
1004	case BPF_ALU|BPF_NEG:
1005		if (alter && vmap[val[A_ATOM]].is_const) {
1006			s->code = BPF_LD|BPF_IMM;
1007			s->k = -vmap[val[A_ATOM]].const_val;
1008			val[A_ATOM] = K(s->k);
1009		}
1010		else
1011			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
1012		break;
1013
1014	case BPF_ALU|BPF_ADD|BPF_K:
1015	case BPF_ALU|BPF_SUB|BPF_K:
1016	case BPF_ALU|BPF_MUL|BPF_K:
1017	case BPF_ALU|BPF_DIV|BPF_K:
1018	case BPF_ALU|BPF_AND|BPF_K:
1019	case BPF_ALU|BPF_OR|BPF_K:
1020	case BPF_ALU|BPF_LSH|BPF_K:
1021	case BPF_ALU|BPF_RSH|BPF_K:
1022		op = BPF_OP(s->code);
1023		if (alter) {
1024			if (s->k == 0) {
1025				/* don't optimize away "sub #0"
1026				 * as it may be needed later to
1027				 * fixup the generated math code */
1028				if (op == BPF_ADD ||
1029				    op == BPF_LSH || op == BPF_RSH ||
1030				    op == BPF_OR) {
1031					s->code = NOP;
1032					break;
1033				}
1034				if (op == BPF_MUL || op == BPF_AND) {
1035					s->code = BPF_LD|BPF_IMM;
1036					val[A_ATOM] = K(s->k);
1037					break;
1038				}
1039			}
1040			if (vmap[val[A_ATOM]].is_const) {
1041				fold_op(s, val[A_ATOM], K(s->k));
1042				val[A_ATOM] = K(s->k);
1043				break;
1044			}
1045		}
1046		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
1047		break;
1048
1049	case BPF_ALU|BPF_ADD|BPF_X:
1050	case BPF_ALU|BPF_SUB|BPF_X:
1051	case BPF_ALU|BPF_MUL|BPF_X:
1052	case BPF_ALU|BPF_DIV|BPF_X:
1053	case BPF_ALU|BPF_AND|BPF_X:
1054	case BPF_ALU|BPF_OR|BPF_X:
1055	case BPF_ALU|BPF_LSH|BPF_X:
1056	case BPF_ALU|BPF_RSH|BPF_X:
1057		op = BPF_OP(s->code);
1058		if (alter && vmap[val[X_ATOM]].is_const) {
1059			if (vmap[val[A_ATOM]].is_const) {
1060				fold_op(s, val[A_ATOM], val[X_ATOM]);
1061				val[A_ATOM] = K(s->k);
1062			}
1063			else {
1064				s->code = BPF_ALU|BPF_K|op;
1065				s->k = vmap[val[X_ATOM]].const_val;
1066				done = 0;
1067				val[A_ATOM] =
1068					F(s->code, val[A_ATOM], K(s->k));
1069			}
1070			break;
1071		}
1072		/*
1073		 * Check if we're doing something to an accumulator
1074		 * that is 0, and simplify.  This may not seem like
1075		 * much of a simplification but it could open up further
1076		 * optimizations.
1077		 * XXX We could also check for mul by 1, etc.
1078		 */
1079		if (alter && vmap[val[A_ATOM]].is_const
1080		    && vmap[val[A_ATOM]].const_val == 0) {
1081			if (op == BPF_ADD || op == BPF_OR) {
1082				s->code = BPF_MISC|BPF_TXA;
1083				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1084				break;
1085			}
1086			else if (op == BPF_MUL || op == BPF_DIV ||
1087				 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
1088				s->code = BPF_LD|BPF_IMM;
1089				s->k = 0;
1090				vstore(s, &val[A_ATOM], K(s->k), alter);
1091				break;
1092			}
1093			else if (op == BPF_NEG) {
1094				s->code = NOP;
1095				break;
1096			}
1097		}
1098		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1099		break;
1100
1101	case BPF_MISC|BPF_TXA:
1102		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1103		break;
1104
1105	case BPF_LD|BPF_MEM:
1106		v = val[s->k];
1107		if (alter && vmap[v].is_const) {
1108			s->code = BPF_LD|BPF_IMM;
1109			s->k = vmap[v].const_val;
1110			done = 0;
1111		}
1112		vstore(s, &val[A_ATOM], v, alter);
1113		break;
1114
1115	case BPF_MISC|BPF_TAX:
1116		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1117		break;
1118
1119	case BPF_LDX|BPF_MEM:
1120		v = val[s->k];
1121		if (alter && vmap[v].is_const) {
1122			s->code = BPF_LDX|BPF_IMM;
1123			s->k = vmap[v].const_val;
1124			done = 0;
1125		}
1126		vstore(s, &val[X_ATOM], v, alter);
1127		break;
1128
1129	case BPF_ST:
1130		vstore(s, &val[s->k], val[A_ATOM], alter);
1131		break;
1132
1133	case BPF_STX:
1134		vstore(s, &val[s->k], val[X_ATOM], alter);
1135		break;
1136	}
1137}
1138
1139static void
1140deadstmt(s, last)
1141	register struct stmt *s;
1142	register struct stmt *last[];
1143{
1144	register int atom;
1145
1146	atom = atomuse(s);
1147	if (atom >= 0) {
1148		if (atom == AX_ATOM) {
1149			last[X_ATOM] = 0;
1150			last[A_ATOM] = 0;
1151		}
1152		else
1153			last[atom] = 0;
1154	}
1155	atom = atomdef(s);
1156	if (atom >= 0) {
1157		if (last[atom]) {
1158			done = 0;
1159			last[atom]->code = NOP;
1160		}
1161		last[atom] = s;
1162	}
1163}
1164
1165static void
1166opt_deadstores(b)
1167	register struct block *b;
1168{
1169	register struct slist *s;
1170	register int atom;
1171	struct stmt *last[N_ATOMS];
1172
1173	memset((char *)last, 0, sizeof last);
1174
1175	for (s = b->stmts; s != 0; s = s->next)
1176		deadstmt(&s->s, last);
1177	deadstmt(&b->s, last);
1178
1179	for (atom = 0; atom < N_ATOMS; ++atom)
1180		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1181			last[atom]->code = NOP;
1182			done = 0;
1183		}
1184}
1185
1186static void
1187opt_blk(b, do_stmts)
1188	struct block *b;
1189	int do_stmts;
1190{
1191	struct slist *s;
1192	struct edge *p;
1193	int i;
1194	bpf_int32 aval, xval;
1195
1196#if 0
1197	for (s = b->stmts; s && s->next; s = s->next)
1198		if (BPF_CLASS(s->s.code) == BPF_JMP) {
1199			do_stmts = 0;
1200			break;
1201		}
1202#endif
1203
1204	/*
1205	 * Initialize the atom values.
1206	 */
1207	p = b->in_edges;
1208	if (p == 0) {
1209		/*
1210		 * We have no predecessors, so everything is undefined
1211		 * upon entry to this block.
1212		 */
1213		memset((char *)b->val, 0, sizeof(b->val));
1214	} else {
1215		/*
1216		 * Inherit values from our predecessors.
1217		 *
1218		 * First, get the values from the predecessor along the
1219		 * first edge leading to this node.
1220		 */
1221		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1222		/*
1223		 * Now look at all the other nodes leading to this node.
1224		 * If, for the predecessor along that edge, a register
1225		 * has a different value from the one we have (i.e.,
1226		 * control paths are merging, and the merging paths
1227		 * assign different values to that register), give the
1228		 * register the undefined value of 0.
1229		 */
1230		while ((p = p->next) != NULL) {
1231			for (i = 0; i < N_ATOMS; ++i)
1232				if (b->val[i] != p->pred->val[i])
1233					b->val[i] = 0;
1234		}
1235	}
1236	aval = b->val[A_ATOM];
1237	xval = b->val[X_ATOM];
1238	for (s = b->stmts; s; s = s->next)
1239		opt_stmt(&s->s, b->val, do_stmts);
1240
1241	/*
1242	 * This is a special case: if we don't use anything from this
1243	 * block, and we load the accumulator or index register with a
1244	 * value that is already there, or if this block is a return,
1245	 * eliminate all the statements.
1246	 *
1247	 * XXX - what if it does a store?
1248	 *
1249	 * XXX - why does it matter whether we use anything from this
1250	 * block?  If the accumulator or index register doesn't change
1251	 * its value, isn't that OK even if we use that value?
1252	 *
1253	 * XXX - if we load the accumulator with a different value,
1254	 * and the block ends with a conditional branch, we obviously
1255	 * can't eliminate it, as the branch depends on that value.
1256	 * For the index register, the conditional branch only depends
1257	 * on the index register value if the test is against the index
1258	 * register value rather than a constant; if nothing uses the
1259	 * value we put into the index register, and we're not testing
1260	 * against the index register's value, and there aren't any
1261	 * other problems that would keep us from eliminating this
1262	 * block, can we eliminate it?
1263	 */
1264	if (do_stmts &&
1265	    ((b->out_use == 0 && aval != 0 && b->val[A_ATOM] == aval &&
1266	      xval != 0 && b->val[X_ATOM] == xval) ||
1267	     BPF_CLASS(b->s.code) == BPF_RET)) {
1268		if (b->stmts != 0) {
1269			b->stmts = 0;
1270			done = 0;
1271		}
1272	} else {
1273		opt_peep(b);
1274		opt_deadstores(b);
1275	}
1276	/*
1277	 * Set up values for branch optimizer.
1278	 */
1279	if (BPF_SRC(b->s.code) == BPF_K)
1280		b->oval = K(b->s.k);
1281	else
1282		b->oval = b->val[X_ATOM];
1283	b->et.code = b->s.code;
1284	b->ef.code = -b->s.code;
1285}
1286
1287/*
1288 * Return true if any register that is used on exit from 'succ', has
1289 * an exit value that is different from the corresponding exit value
1290 * from 'b'.
1291 */
1292static int
1293use_conflict(b, succ)
1294	struct block *b, *succ;
1295{
1296	int atom;
1297	atomset use = succ->out_use;
1298
1299	if (use == 0)
1300		return 0;
1301
1302	for (atom = 0; atom < N_ATOMS; ++atom)
1303		if (ATOMELEM(use, atom))
1304			if (b->val[atom] != succ->val[atom])
1305				return 1;
1306	return 0;
1307}
1308
1309static struct block *
1310fold_edge(child, ep)
1311	struct block *child;
1312	struct edge *ep;
1313{
1314	int sense;
1315	int aval0, aval1, oval0, oval1;
1316	int code = ep->code;
1317
1318	if (code < 0) {
1319		code = -code;
1320		sense = 0;
1321	} else
1322		sense = 1;
1323
1324	if (child->s.code != code)
1325		return 0;
1326
1327	aval0 = child->val[A_ATOM];
1328	oval0 = child->oval;
1329	aval1 = ep->pred->val[A_ATOM];
1330	oval1 = ep->pred->oval;
1331
1332	if (aval0 != aval1)
1333		return 0;
1334
1335	if (oval0 == oval1)
1336		/*
1337		 * The operands of the branch instructions are
1338		 * identical, so the result is true if a true
1339		 * branch was taken to get here, otherwise false.
1340		 */
1341		return sense ? JT(child) : JF(child);
1342
1343	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1344		/*
1345		 * At this point, we only know the comparison if we
1346		 * came down the true branch, and it was an equality
1347		 * comparison with a constant.
1348		 *
1349		 * I.e., if we came down the true branch, and the branch
1350		 * was an equality comparison with a constant, we know the
1351		 * accumulator contains that constant.  If we came down
1352		 * the false branch, or the comparison wasn't with a
1353		 * constant, we don't know what was in the accumulator.
1354		 *
1355		 * We rely on the fact that distinct constants have distinct
1356		 * value numbers.
1357		 */
1358		return JF(child);
1359
1360	return 0;
1361}
1362
1363static void
1364opt_j(ep)
1365	struct edge *ep;
1366{
1367	register int i, k;
1368	register struct block *target;
1369
1370	if (JT(ep->succ) == 0)
1371		return;
1372
1373	if (JT(ep->succ) == JF(ep->succ)) {
1374		/*
1375		 * Common branch targets can be eliminated, provided
1376		 * there is no data dependency.
1377		 */
1378		if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1379			done = 0;
1380			ep->succ = JT(ep->succ);
1381		}
1382	}
1383	/*
1384	 * For each edge dominator that matches the successor of this
1385	 * edge, promote the edge successor to the its grandchild.
1386	 *
1387	 * XXX We violate the set abstraction here in favor a reasonably
1388	 * efficient loop.
1389	 */
1390 top:
1391	for (i = 0; i < edgewords; ++i) {
1392		register bpf_u_int32 x = ep->edom[i];
1393
1394		while (x != 0) {
1395			k = ffs(x) - 1;
1396			x &=~ (1 << k);
1397			k += i * BITS_PER_WORD;
1398
1399			target = fold_edge(ep->succ, edges[k]);
1400			/*
1401			 * Check that there is no data dependency between
1402			 * nodes that will be violated if we move the edge.
1403			 */
1404			if (target != 0 && !use_conflict(ep->pred, target)) {
1405				done = 0;
1406				ep->succ = target;
1407				if (JT(target) != 0)
1408					/*
1409					 * Start over unless we hit a leaf.
1410					 */
1411					goto top;
1412				return;
1413			}
1414		}
1415	}
1416}
1417
1418
1419static void
1420or_pullup(b)
1421	struct block *b;
1422{
1423	int val, at_top;
1424	struct block *pull;
1425	struct block **diffp, **samep;
1426	struct edge *ep;
1427
1428	ep = b->in_edges;
1429	if (ep == 0)
1430		return;
1431
1432	/*
1433	 * Make sure each predecessor loads the same value.
1434	 * XXX why?
1435	 */
1436	val = ep->pred->val[A_ATOM];
1437	for (ep = ep->next; ep != 0; ep = ep->next)
1438		if (val != ep->pred->val[A_ATOM])
1439			return;
1440
1441	if (JT(b->in_edges->pred) == b)
1442		diffp = &JT(b->in_edges->pred);
1443	else
1444		diffp = &JF(b->in_edges->pred);
1445
1446	at_top = 1;
1447	while (1) {
1448		if (*diffp == 0)
1449			return;
1450
1451		if (JT(*diffp) != JT(b))
1452			return;
1453
1454		if (!SET_MEMBER((*diffp)->dom, b->id))
1455			return;
1456
1457		if ((*diffp)->val[A_ATOM] != val)
1458			break;
1459
1460		diffp = &JF(*diffp);
1461		at_top = 0;
1462	}
1463	samep = &JF(*diffp);
1464	while (1) {
1465		if (*samep == 0)
1466			return;
1467
1468		if (JT(*samep) != JT(b))
1469			return;
1470
1471		if (!SET_MEMBER((*samep)->dom, b->id))
1472			return;
1473
1474		if ((*samep)->val[A_ATOM] == val)
1475			break;
1476
1477		/* XXX Need to check that there are no data dependencies
1478		   between dp0 and dp1.  Currently, the code generator
1479		   will not produce such dependencies. */
1480		samep = &JF(*samep);
1481	}
1482#ifdef notdef
1483	/* XXX This doesn't cover everything. */
1484	for (i = 0; i < N_ATOMS; ++i)
1485		if ((*samep)->val[i] != pred->val[i])
1486			return;
1487#endif
1488	/* Pull up the node. */
1489	pull = *samep;
1490	*samep = JF(pull);
1491	JF(pull) = *diffp;
1492
1493	/*
1494	 * At the top of the chain, each predecessor needs to point at the
1495	 * pulled up node.  Inside the chain, there is only one predecessor
1496	 * to worry about.
1497	 */
1498	if (at_top) {
1499		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1500			if (JT(ep->pred) == b)
1501				JT(ep->pred) = pull;
1502			else
1503				JF(ep->pred) = pull;
1504		}
1505	}
1506	else
1507		*diffp = pull;
1508
1509	done = 0;
1510}
1511
1512static void
1513and_pullup(b)
1514	struct block *b;
1515{
1516	int val, at_top;
1517	struct block *pull;
1518	struct block **diffp, **samep;
1519	struct edge *ep;
1520
1521	ep = b->in_edges;
1522	if (ep == 0)
1523		return;
1524
1525	/*
1526	 * Make sure each predecessor loads the same value.
1527	 */
1528	val = ep->pred->val[A_ATOM];
1529	for (ep = ep->next; ep != 0; ep = ep->next)
1530		if (val != ep->pred->val[A_ATOM])
1531			return;
1532
1533	if (JT(b->in_edges->pred) == b)
1534		diffp = &JT(b->in_edges->pred);
1535	else
1536		diffp = &JF(b->in_edges->pred);
1537
1538	at_top = 1;
1539	while (1) {
1540		if (*diffp == 0)
1541			return;
1542
1543		if (JF(*diffp) != JF(b))
1544			return;
1545
1546		if (!SET_MEMBER((*diffp)->dom, b->id))
1547			return;
1548
1549		if ((*diffp)->val[A_ATOM] != val)
1550			break;
1551
1552		diffp = &JT(*diffp);
1553		at_top = 0;
1554	}
1555	samep = &JT(*diffp);
1556	while (1) {
1557		if (*samep == 0)
1558			return;
1559
1560		if (JF(*samep) != JF(b))
1561			return;
1562
1563		if (!SET_MEMBER((*samep)->dom, b->id))
1564			return;
1565
1566		if ((*samep)->val[A_ATOM] == val)
1567			break;
1568
1569		/* XXX Need to check that there are no data dependencies
1570		   between diffp and samep.  Currently, the code generator
1571		   will not produce such dependencies. */
1572		samep = &JT(*samep);
1573	}
1574#ifdef notdef
1575	/* XXX This doesn't cover everything. */
1576	for (i = 0; i < N_ATOMS; ++i)
1577		if ((*samep)->val[i] != pred->val[i])
1578			return;
1579#endif
1580	/* Pull up the node. */
1581	pull = *samep;
1582	*samep = JT(pull);
1583	JT(pull) = *diffp;
1584
1585	/*
1586	 * At the top of the chain, each predecessor needs to point at the
1587	 * pulled up node.  Inside the chain, there is only one predecessor
1588	 * to worry about.
1589	 */
1590	if (at_top) {
1591		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1592			if (JT(ep->pred) == b)
1593				JT(ep->pred) = pull;
1594			else
1595				JF(ep->pred) = pull;
1596		}
1597	}
1598	else
1599		*diffp = pull;
1600
1601	done = 0;
1602}
1603
1604static void
1605opt_blks(root, do_stmts)
1606	struct block *root;
1607	int do_stmts;
1608{
1609	int i, maxlevel;
1610	struct block *p;
1611
1612	init_val();
1613	maxlevel = root->level;
1614
1615	find_inedges(root);
1616	for (i = maxlevel; i >= 0; --i)
1617		for (p = levels[i]; p; p = p->link)
1618			opt_blk(p, do_stmts);
1619
1620	if (do_stmts)
1621		/*
1622		 * No point trying to move branches; it can't possibly
1623		 * make a difference at this point.
1624		 */
1625		return;
1626
1627	for (i = 1; i <= maxlevel; ++i) {
1628		for (p = levels[i]; p; p = p->link) {
1629			opt_j(&p->et);
1630			opt_j(&p->ef);
1631		}
1632	}
1633
1634	find_inedges(root);
1635	for (i = 1; i <= maxlevel; ++i) {
1636		for (p = levels[i]; p; p = p->link) {
1637			or_pullup(p);
1638			and_pullup(p);
1639		}
1640	}
1641}
1642
1643static inline void
1644link_inedge(parent, child)
1645	struct edge *parent;
1646	struct block *child;
1647{
1648	parent->next = child->in_edges;
1649	child->in_edges = parent;
1650}
1651
1652static void
1653find_inedges(root)
1654	struct block *root;
1655{
1656	int i;
1657	struct block *b;
1658
1659	for (i = 0; i < n_blocks; ++i)
1660		blocks[i]->in_edges = 0;
1661
1662	/*
1663	 * Traverse the graph, adding each edge to the predecessor
1664	 * list of its successors.  Skip the leaves (i.e. level 0).
1665	 */
1666	for (i = root->level; i > 0; --i) {
1667		for (b = levels[i]; b != 0; b = b->link) {
1668			link_inedge(&b->et, JT(b));
1669			link_inedge(&b->ef, JF(b));
1670		}
1671	}
1672}
1673
1674static void
1675opt_root(b)
1676	struct block **b;
1677{
1678	struct slist *tmp, *s;
1679
1680	s = (*b)->stmts;
1681	(*b)->stmts = 0;
1682	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1683		*b = JT(*b);
1684
1685	tmp = (*b)->stmts;
1686	if (tmp != 0)
1687		sappend(s, tmp);
1688	(*b)->stmts = s;
1689
1690	/*
1691	 * If the root node is a return, then there is no
1692	 * point executing any statements (since the bpf machine
1693	 * has no side effects).
1694	 */
1695	if (BPF_CLASS((*b)->s.code) == BPF_RET)
1696		(*b)->stmts = 0;
1697}
1698
1699static void
1700opt_loop(root, do_stmts)
1701	struct block *root;
1702	int do_stmts;
1703{
1704
1705#ifdef BDEBUG
1706	if (dflag > 1) {
1707		printf("opt_loop(root, %d) begin\n", do_stmts);
1708		opt_dump(root);
1709	}
1710#endif
1711	do {
1712		done = 1;
1713		find_levels(root);
1714		find_dom(root);
1715		find_closure(root);
1716		find_ud(root);
1717		find_edom(root);
1718		opt_blks(root, do_stmts);
1719#ifdef BDEBUG
1720		if (dflag > 1) {
1721			printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, done);
1722			opt_dump(root);
1723		}
1724#endif
1725	} while (!done);
1726}
1727
1728/*
1729 * Optimize the filter code in its dag representation.
1730 */
1731void
1732bpf_optimize(rootp)
1733	struct block **rootp;
1734{
1735	struct block *root;
1736
1737	root = *rootp;
1738
1739	opt_init(root);
1740	opt_loop(root, 0);
1741	opt_loop(root, 1);
1742	intern_blocks(root);
1743#ifdef BDEBUG
1744	if (dflag > 1) {
1745		printf("after intern_blocks()\n");
1746		opt_dump(root);
1747	}
1748#endif
1749	opt_root(rootp);
1750#ifdef BDEBUG
1751	if (dflag > 1) {
1752		printf("after opt_root()\n");
1753		opt_dump(root);
1754	}
1755#endif
1756	opt_cleanup();
1757}
1758
1759static void
1760make_marks(p)
1761	struct block *p;
1762{
1763	if (!isMarked(p)) {
1764		Mark(p);
1765		if (BPF_CLASS(p->s.code) != BPF_RET) {
1766			make_marks(JT(p));
1767			make_marks(JF(p));
1768		}
1769	}
1770}
1771
1772/*
1773 * Mark code array such that isMarked(i) is true
1774 * only for nodes that are alive.
1775 */
1776static void
1777mark_code(p)
1778	struct block *p;
1779{
1780	cur_mark += 1;
1781	make_marks(p);
1782}
1783
1784/*
1785 * True iff the two stmt lists load the same value from the packet into
1786 * the accumulator.
1787 */
1788static int
1789eq_slist(x, y)
1790	struct slist *x, *y;
1791{
1792	while (1) {
1793		while (x && x->s.code == NOP)
1794			x = x->next;
1795		while (y && y->s.code == NOP)
1796			y = y->next;
1797		if (x == 0)
1798			return y == 0;
1799		if (y == 0)
1800			return x == 0;
1801		if (x->s.code != y->s.code || x->s.k != y->s.k)
1802			return 0;
1803		x = x->next;
1804		y = y->next;
1805	}
1806}
1807
1808static inline int
1809eq_blk(b0, b1)
1810	struct block *b0, *b1;
1811{
1812	if (b0->s.code == b1->s.code &&
1813	    b0->s.k == b1->s.k &&
1814	    b0->et.succ == b1->et.succ &&
1815	    b0->ef.succ == b1->ef.succ)
1816		return eq_slist(b0->stmts, b1->stmts);
1817	return 0;
1818}
1819
1820static void
1821intern_blocks(root)
1822	struct block *root;
1823{
1824	struct block *p;
1825	int i, j;
1826	int done1; /* don't shadow global */
1827 top:
1828	done1 = 1;
1829	for (i = 0; i < n_blocks; ++i)
1830		blocks[i]->link = 0;
1831
1832	mark_code(root);
1833
1834	for (i = n_blocks - 1; --i >= 0; ) {
1835		if (!isMarked(blocks[i]))
1836			continue;
1837		for (j = i + 1; j < n_blocks; ++j) {
1838			if (!isMarked(blocks[j]))
1839				continue;
1840			if (eq_blk(blocks[i], blocks[j])) {
1841				blocks[i]->link = blocks[j]->link ?
1842					blocks[j]->link : blocks[j];
1843				break;
1844			}
1845		}
1846	}
1847	for (i = 0; i < n_blocks; ++i) {
1848		p = blocks[i];
1849		if (JT(p) == 0)
1850			continue;
1851		if (JT(p)->link) {
1852			done1 = 0;
1853			JT(p) = JT(p)->link;
1854		}
1855		if (JF(p)->link) {
1856			done1 = 0;
1857			JF(p) = JF(p)->link;
1858		}
1859	}
1860	if (!done1)
1861		goto top;
1862}
1863
1864static void
1865opt_cleanup()
1866{
1867	free((void *)vnode_base);
1868	free((void *)vmap);
1869	free((void *)edges);
1870	free((void *)space);
1871	free((void *)levels);
1872	free((void *)blocks);
1873}
1874
1875/*
1876 * Return the number of stmts in 's'.
1877 */
1878static int
1879slength(s)
1880	struct slist *s;
1881{
1882	int n = 0;
1883
1884	for (; s; s = s->next)
1885		if (s->s.code != NOP)
1886			++n;
1887	return n;
1888}
1889
1890/*
1891 * Return the number of nodes reachable by 'p'.
1892 * All nodes should be initially unmarked.
1893 */
1894static int
1895count_blocks(p)
1896	struct block *p;
1897{
1898	if (p == 0 || isMarked(p))
1899		return 0;
1900	Mark(p);
1901	return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1902}
1903
1904/*
1905 * Do a depth first search on the flow graph, numbering the
1906 * the basic blocks, and entering them into the 'blocks' array.`
1907 */
1908static void
1909number_blks_r(p)
1910	struct block *p;
1911{
1912	int n;
1913
1914	if (p == 0 || isMarked(p))
1915		return;
1916
1917	Mark(p);
1918	n = n_blocks++;
1919	p->id = n;
1920	blocks[n] = p;
1921
1922	number_blks_r(JT(p));
1923	number_blks_r(JF(p));
1924}
1925
1926/*
1927 * Return the number of stmts in the flowgraph reachable by 'p'.
1928 * The nodes should be unmarked before calling.
1929 *
1930 * Note that "stmts" means "instructions", and that this includes
1931 *
1932 *	side-effect statements in 'p' (slength(p->stmts));
1933 *
1934 *	statements in the true branch from 'p' (count_stmts(JT(p)));
1935 *
1936 *	statements in the false branch from 'p' (count_stmts(JF(p)));
1937 *
1938 *	the conditional jump itself (1);
1939 *
1940 *	an extra long jump if the true branch requires it (p->longjt);
1941 *
1942 *	an extra long jump if the false branch requires it (p->longjf).
1943 */
1944static int
1945count_stmts(p)
1946	struct block *p;
1947{
1948	int n;
1949
1950	if (p == 0 || isMarked(p))
1951		return 0;
1952	Mark(p);
1953	n = count_stmts(JT(p)) + count_stmts(JF(p));
1954	return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
1955}
1956
1957/*
1958 * Allocate memory.  All allocation is done before optimization
1959 * is begun.  A linear bound on the size of all data structures is computed
1960 * from the total number of blocks and/or statements.
1961 */
1962static void
1963opt_init(root)
1964	struct block *root;
1965{
1966	bpf_u_int32 *p;
1967	int i, n, max_stmts;
1968
1969	/*
1970	 * First, count the blocks, so we can malloc an array to map
1971	 * block number to block.  Then, put the blocks into the array.
1972	 */
1973	unMarkAll();
1974	n = count_blocks(root);
1975	blocks = (struct block **)calloc(n, sizeof(*blocks));
1976	if (blocks == NULL)
1977		bpf_error("malloc");
1978	unMarkAll();
1979	n_blocks = 0;
1980	number_blks_r(root);
1981
1982	n_edges = 2 * n_blocks;
1983	edges = (struct edge **)calloc(n_edges, sizeof(*edges));
1984	if (edges == NULL)
1985		bpf_error("malloc");
1986
1987	/*
1988	 * The number of levels is bounded by the number of nodes.
1989	 */
1990	levels = (struct block **)calloc(n_blocks, sizeof(*levels));
1991	if (levels == NULL)
1992		bpf_error("malloc");
1993
1994	edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1995	nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1996
1997	/* XXX */
1998	space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1999				 + n_edges * edgewords * sizeof(*space));
2000	if (space == NULL)
2001		bpf_error("malloc");
2002	p = space;
2003	all_dom_sets = p;
2004	for (i = 0; i < n; ++i) {
2005		blocks[i]->dom = p;
2006		p += nodewords;
2007	}
2008	all_closure_sets = p;
2009	for (i = 0; i < n; ++i) {
2010		blocks[i]->closure = p;
2011		p += nodewords;
2012	}
2013	all_edge_sets = p;
2014	for (i = 0; i < n; ++i) {
2015		register struct block *b = blocks[i];
2016
2017		b->et.edom = p;
2018		p += edgewords;
2019		b->ef.edom = p;
2020		p += edgewords;
2021		b->et.id = i;
2022		edges[i] = &b->et;
2023		b->ef.id = n_blocks + i;
2024		edges[n_blocks + i] = &b->ef;
2025		b->et.pred = b;
2026		b->ef.pred = b;
2027	}
2028	max_stmts = 0;
2029	for (i = 0; i < n; ++i)
2030		max_stmts += slength(blocks[i]->stmts) + 1;
2031	/*
2032	 * We allocate at most 3 value numbers per statement,
2033	 * so this is an upper bound on the number of valnodes
2034	 * we'll need.
2035	 */
2036	maxval = 3 * max_stmts;
2037	vmap = (struct vmapinfo *)calloc(maxval, sizeof(*vmap));
2038	vnode_base = (struct valnode *)calloc(maxval, sizeof(*vnode_base));
2039	if (vmap == NULL || vnode_base == NULL)
2040		bpf_error("malloc");
2041}
2042
2043/*
2044 * Some pointers used to convert the basic block form of the code,
2045 * into the array form that BPF requires.  'fstart' will point to
2046 * the malloc'd array while 'ftail' is used during the recursive traversal.
2047 */
2048static struct bpf_insn *fstart;
2049static struct bpf_insn *ftail;
2050
2051#ifdef BDEBUG
2052int bids[1000];
2053#endif
2054
2055/*
2056 * Returns true if successful.  Returns false if a branch has
2057 * an offset that is too large.  If so, we have marked that
2058 * branch so that on a subsequent iteration, it will be treated
2059 * properly.
2060 */
2061static int
2062convert_code_r(p)
2063	struct block *p;
2064{
2065	struct bpf_insn *dst;
2066	struct slist *src;
2067	int slen;
2068	u_int off;
2069	int extrajmps;		/* number of extra jumps inserted */
2070	struct slist **offset = NULL;
2071
2072	if (p == 0 || isMarked(p))
2073		return (1);
2074	Mark(p);
2075
2076	if (convert_code_r(JF(p)) == 0)
2077		return (0);
2078	if (convert_code_r(JT(p)) == 0)
2079		return (0);
2080
2081	slen = slength(p->stmts);
2082	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
2083		/* inflate length by any extra jumps */
2084
2085	p->offset = dst - fstart;
2086
2087	/* generate offset[] for convenience  */
2088	if (slen) {
2089		offset = (struct slist **)calloc(slen, sizeof(struct slist *));
2090		if (!offset) {
2091			bpf_error("not enough core");
2092			/*NOTREACHED*/
2093		}
2094	}
2095	src = p->stmts;
2096	for (off = 0; off < slen && src; off++) {
2097#if 0
2098		printf("off=%d src=%x\n", off, src);
2099#endif
2100		offset[off] = src;
2101		src = src->next;
2102	}
2103
2104	off = 0;
2105	for (src = p->stmts; src; src = src->next) {
2106		if (src->s.code == NOP)
2107			continue;
2108		dst->code = (u_short)src->s.code;
2109		dst->k = src->s.k;
2110
2111		/* fill block-local relative jump */
2112		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
2113#if 0
2114			if (src->s.jt || src->s.jf) {
2115				bpf_error("illegal jmp destination");
2116				/*NOTREACHED*/
2117			}
2118#endif
2119			goto filled;
2120		}
2121		if (off == slen - 2)	/*???*/
2122			goto filled;
2123
2124	    {
2125		int i;
2126		int jt, jf;
2127		const char *ljerr = "%s for block-local relative jump: off=%d";
2128
2129#if 0
2130		printf("code=%x off=%d %x %x\n", src->s.code,
2131			off, src->s.jt, src->s.jf);
2132#endif
2133
2134		if (!src->s.jt || !src->s.jf) {
2135			bpf_error(ljerr, "no jmp destination", off);
2136			/*NOTREACHED*/
2137		}
2138
2139		jt = jf = 0;
2140		for (i = 0; i < slen; i++) {
2141			if (offset[i] == src->s.jt) {
2142				if (jt) {
2143					bpf_error(ljerr, "multiple matches", off);
2144					/*NOTREACHED*/
2145				}
2146
2147				dst->jt = i - off - 1;
2148				jt++;
2149			}
2150			if (offset[i] == src->s.jf) {
2151				if (jf) {
2152					bpf_error(ljerr, "multiple matches", off);
2153					/*NOTREACHED*/
2154				}
2155				dst->jf = i - off - 1;
2156				jf++;
2157			}
2158		}
2159		if (!jt || !jf) {
2160			bpf_error(ljerr, "no destination found", off);
2161			/*NOTREACHED*/
2162		}
2163	    }
2164filled:
2165		++dst;
2166		++off;
2167	}
2168	if (offset)
2169		free(offset);
2170
2171#ifdef BDEBUG
2172	bids[dst - fstart] = p->id + 1;
2173#endif
2174	dst->code = (u_short)p->s.code;
2175	dst->k = p->s.k;
2176	if (JT(p)) {
2177		extrajmps = 0;
2178		off = JT(p)->offset - (p->offset + slen) - 1;
2179		if (off >= 256) {
2180		    /* offset too large for branch, must add a jump */
2181		    if (p->longjt == 0) {
2182		    	/* mark this instruction and retry */
2183			p->longjt++;
2184			return(0);
2185		    }
2186		    /* branch if T to following jump */
2187		    dst->jt = extrajmps;
2188		    extrajmps++;
2189		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2190		    dst[extrajmps].k = off - extrajmps;
2191		}
2192		else
2193		    dst->jt = off;
2194		off = JF(p)->offset - (p->offset + slen) - 1;
2195		if (off >= 256) {
2196		    /* offset too large for branch, must add a jump */
2197		    if (p->longjf == 0) {
2198		    	/* mark this instruction and retry */
2199			p->longjf++;
2200			return(0);
2201		    }
2202		    /* branch if F to following jump */
2203		    /* if two jumps are inserted, F goes to second one */
2204		    dst->jf = extrajmps;
2205		    extrajmps++;
2206		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2207		    dst[extrajmps].k = off - extrajmps;
2208		}
2209		else
2210		    dst->jf = off;
2211	}
2212	return (1);
2213}
2214
2215
2216/*
2217 * Convert flowgraph intermediate representation to the
2218 * BPF array representation.  Set *lenp to the number of instructions.
2219 *
2220 * This routine does *NOT* leak the memory pointed to by fp.  It *must
2221 * not* do free(fp) before returning fp; doing so would make no sense,
2222 * as the BPF array pointed to by the return value of icode_to_fcode()
2223 * must be valid - it's being returned for use in a bpf_program structure.
2224 *
2225 * If it appears that icode_to_fcode() is leaking, the problem is that
2226 * the program using pcap_compile() is failing to free the memory in
2227 * the BPF program when it's done - the leak is in the program, not in
2228 * the routine that happens to be allocating the memory.  (By analogy, if
2229 * a program calls fopen() without ever calling fclose() on the FILE *,
2230 * it will leak the FILE structure; the leak is not in fopen(), it's in
2231 * the program.)  Change the program to use pcap_freecode() when it's
2232 * done with the filter program.  See the pcap man page.
2233 */
2234struct bpf_insn *
2235icode_to_fcode(root, lenp)
2236	struct block *root;
2237	int *lenp;
2238{
2239	int n;
2240	struct bpf_insn *fp;
2241
2242	/*
2243	 * Loop doing convert_code_r() until no branches remain
2244	 * with too-large offsets.
2245	 */
2246	while (1) {
2247	    unMarkAll();
2248	    n = *lenp = count_stmts(root);
2249
2250	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2251	    if (fp == NULL)
2252		    bpf_error("malloc");
2253	    memset((char *)fp, 0, sizeof(*fp) * n);
2254	    fstart = fp;
2255	    ftail = fp + n;
2256
2257	    unMarkAll();
2258	    if (convert_code_r(root))
2259		break;
2260	    free(fp);
2261	}
2262
2263	return fp;
2264}
2265
2266/*
2267 * Make a copy of a BPF program and put it in the "fcode" member of
2268 * a "pcap_t".
2269 *
2270 * If we fail to allocate memory for the copy, fill in the "errbuf"
2271 * member of the "pcap_t" with an error message, and return -1;
2272 * otherwise, return 0.
2273 */
2274int
2275install_bpf_program(pcap_t *p, struct bpf_program *fp)
2276{
2277	size_t prog_size;
2278
2279	/*
2280	 * Free up any already installed program.
2281	 */
2282	pcap_freecode(&p->fcode);
2283
2284	prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2285	p->fcode.bf_len = fp->bf_len;
2286	p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2287	if (p->fcode.bf_insns == NULL) {
2288		snprintf(p->errbuf, sizeof(p->errbuf),
2289			 "malloc: %s", pcap_strerror(errno));
2290		return (-1);
2291	}
2292	memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2293	return (0);
2294}
2295
2296#ifdef BDEBUG
2297static void
2298opt_dump(root)
2299	struct block *root;
2300{
2301	struct bpf_program f;
2302
2303	memset(bids, 0, sizeof bids);
2304	f.bf_insns = icode_to_fcode(root, &f.bf_len);
2305	bpf_dump(&f, 1);
2306	putchar('\n');
2307	free((char *)f.bf_insns);
2308}
2309#endif
2310