2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 * Optimization module for tcpdump intermediate representation.
24 static const char rcsid[] _U_ =
25 "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.76.2.3 2003/12/22 00:26:36 guy Exp $ (LBL)";
42 #ifdef HAVE_OS_PROTO_H
50 #define A_ATOM BPF_MEMWORDS
51 #define X_ATOM (BPF_MEMWORDS+1)
56 * This define is used to represent *both* the accumulator and
57 * x register in use-def computations.
58 * Currently, the use-def code assumes only one definition per instruction.
60 #define AX_ATOM N_ATOMS
63 * A flag to indicate that further optimization is needed.
64 * Iterative passes are continued until a given pass yields no
70 * A block is marked if only if its mark equals the current mark.
71 * Rather than traverse the code array, marking each item, 'cur_mark' is
72 * incremented. This automatically makes each element unmarked.
75 #define isMarked(p) ((p)->mark == cur_mark)
76 #define unMarkAll() cur_mark += 1
77 #define Mark(p) ((p)->mark = cur_mark)
79 static void opt_init(struct block *);
80 static void opt_cleanup(void);
82 static void make_marks(struct block *);
83 static void mark_code(struct block *);
85 static void intern_blocks(struct block *);
87 static int eq_slist(struct slist *, struct slist *);
89 static void find_levels_r(struct block *);
91 static void find_levels(struct block *);
92 static void find_dom(struct block *);
93 static void propedom(struct edge *);
94 static void find_edom(struct block *);
95 static void find_closure(struct block *);
96 static int atomuse(struct stmt *);
97 static int atomdef(struct stmt *);
98 static void compute_local_ud(struct block *);
99 static void find_ud(struct block *);
100 static void init_val(void);
101 static int F(int, int, int);
102 static inline void vstore(struct stmt *, int *, int, int);
103 static void opt_blk(struct block *, int);
104 static int use_conflict(struct block *, struct block *);
105 static void opt_j(struct edge *);
106 static void or_pullup(struct block *);
107 static void and_pullup(struct block *);
108 static void opt_blks(struct block *, int);
109 static inline void link_inedge(struct edge *, struct block *);
110 static void find_inedges(struct block *);
111 static void opt_root(struct block **);
112 static void opt_loop(struct block *, int);
113 static void fold_op(struct stmt *, int, int);
114 static inline struct slist *this_op(struct slist *);
115 static void opt_not(struct block *);
116 static void opt_peep(struct block *);
117 static void opt_stmt(struct stmt *, int[], int);
118 static void deadstmt(struct stmt *, struct stmt *[]);
119 static void opt_deadstores(struct block *);
120 static struct block *fold_edge(struct block *, struct edge *);
121 static inline int eq_blk(struct block *, struct block *);
122 static int slength(struct slist *);
123 static int count_blocks(struct block *);
124 static void number_blks_r(struct block *);
125 static int count_stmts(struct block *);
126 static int convert_code_r(struct block *);
128 static void opt_dump(struct block *);
132 struct block **blocks;
137 * A bit vector set representation of the dominators.
138 * We round up the set size to the next power of two.
140 static int nodewords;
141 static int edgewords;
142 struct block **levels;
144 #define BITS_PER_WORD (8*sizeof(bpf_u_int32))
146 * True if a is in uset {p}
148 #define SET_MEMBER(p, a) \
149 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
154 #define SET_INSERT(p, a) \
155 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
158 * Delete 'a' from uset p.
160 #define SET_DELETE(p, a) \
161 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
166 #define SET_INTERSECT(a, b, n)\
168 register bpf_u_int32 *_x = a, *_y = b;\
169 register int _n = n;\
170 while (--_n >= 0) *_x++ &= *_y++;\
176 #define SET_SUBTRACT(a, b, n)\
178 register bpf_u_int32 *_x = a, *_y = b;\
179 register int _n = n;\
180 while (--_n >= 0) *_x++ &=~ *_y++;\
186 #define SET_UNION(a, b, n)\
188 register bpf_u_int32 *_x = a, *_y = b;\
189 register int _n = n;\
190 while (--_n >= 0) *_x++ |= *_y++;\
193 static uset all_dom_sets;
194 static uset all_closure_sets;
195 static uset all_edge_sets;
198 #define MAX(a,b) ((a)>(b)?(a):(b))
214 find_levels_r(JT(b));
215 find_levels_r(JF(b));
216 level = MAX(JT(b)->level, JF(b)->level) + 1;
220 b->link = levels[level];
225 * Level graph. The levels go from 0 at the leaves to
226 * N_LEVELS at the root. The levels[] array points to the
227 * first node of the level list, whose elements are linked
228 * with the 'link' field of the struct block.
234 memset((char *)levels, 0, n_blocks * sizeof(*levels));
240 * Find dominator relationships.
241 * Assumes graph has been leveled.
252 * Initialize sets to contain all nodes.
255 i = n_blocks * nodewords;
258 /* Root starts off empty. */
259 for (i = nodewords; --i >= 0;)
262 /* root->level is the highest level no found. */
263 for (i = root->level; i >= 0; --i) {
264 for (b = levels[i]; b; b = b->link) {
265 SET_INSERT(b->dom, b->id);
268 SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
269 SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
278 SET_INSERT(ep->edom, ep->id);
280 SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
281 SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
286 * Compute edge dominators.
287 * Assumes graph has been leveled and predecessors established.
298 for (i = n_edges * edgewords; --i >= 0; )
301 /* root->level is the highest level no found. */
302 memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
303 memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
304 for (i = root->level; i >= 0; --i) {
305 for (b = levels[i]; b != 0; b = b->link) {
313 * Find the backwards transitive closure of the flow graph. These sets
314 * are backwards in the sense that we find the set of nodes that reach
315 * a given node, not the set of nodes that can be reached by a node.
317 * Assumes graph has been leveled.
327 * Initialize sets to contain no nodes.
329 memset((char *)all_closure_sets, 0,
330 n_blocks * nodewords * sizeof(*all_closure_sets));
332 /* root->level is the highest level no found. */
333 for (i = root->level; i >= 0; --i) {
334 for (b = levels[i]; b; b = b->link) {
335 SET_INSERT(b->closure, b->id);
338 SET_UNION(JT(b)->closure, b->closure, nodewords);
339 SET_UNION(JF(b)->closure, b->closure, nodewords);
345 * Return the register number that is used by s. If A and X are both
346 * used, return AX_ATOM. If no register is used, return -1.
348 * The implementation should probably change to an array access.
354 register int c = s->code;
359 switch (BPF_CLASS(c)) {
362 return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
363 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
367 return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
368 (BPF_MODE(c) == BPF_MEM) ? s->k : -1;
378 if (BPF_SRC(c) == BPF_X)
383 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
390 * Return the register number that is defined by 's'. We assume that
391 * a single stmt cannot define more than one register. If no register
392 * is defined, return -1.
394 * The implementation should probably change to an array access.
403 switch (BPF_CLASS(s->code)) {
417 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
427 atomset def = 0, use = 0, kill = 0;
430 for (s = b->stmts; s; s = s->next) {
431 if (s->s.code == NOP)
433 atom = atomuse(&s->s);
435 if (atom == AX_ATOM) {
436 if (!ATOMELEM(def, X_ATOM))
437 use |= ATOMMASK(X_ATOM);
438 if (!ATOMELEM(def, A_ATOM))
439 use |= ATOMMASK(A_ATOM);
441 else if (atom < N_ATOMS) {
442 if (!ATOMELEM(def, atom))
443 use |= ATOMMASK(atom);
448 atom = atomdef(&s->s);
450 if (!ATOMELEM(use, atom))
451 kill |= ATOMMASK(atom);
452 def |= ATOMMASK(atom);
455 if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP)
456 use |= ATOMMASK(A_ATOM);
464 * Assume graph is already leveled.
474 * root->level is the highest level no found;
475 * count down from there.
477 maxlevel = root->level;
478 for (i = maxlevel; i >= 0; --i)
479 for (p = levels[i]; p; p = p->link) {
484 for (i = 1; i <= maxlevel; ++i) {
485 for (p = levels[i]; p; p = p->link) {
486 p->out_use |= JT(p)->in_use | JF(p)->in_use;
487 p->in_use |= p->out_use &~ p->kill;
493 * These data structures are used in a Cocke and Shwarz style
494 * value numbering scheme. Since the flowgraph is acyclic,
495 * exit values can be propagated from a node's predecessors
496 * provided it is uniquely defined.
502 struct valnode *next;
506 static struct valnode *hashtbl[MODULUS];
510 /* Integer constants mapped with the load immediate opcode. */
511 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
518 struct vmapinfo *vmap;
519 struct valnode *vnode_base;
520 struct valnode *next_vnode;
526 next_vnode = vnode_base;
527 memset((char *)vmap, 0, maxval * sizeof(*vmap));
528 memset((char *)hashtbl, 0, sizeof hashtbl);
531 /* Because we really don't have an IR, this stuff is a little messy. */
541 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
544 for (p = hashtbl[hash]; p; p = p->next)
545 if (p->code == code && p->v0 == v0 && p->v1 == v1)
549 if (BPF_MODE(code) == BPF_IMM &&
550 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
551 vmap[val].const_val = v0;
552 vmap[val].is_const = 1;
559 p->next = hashtbl[hash];
566 vstore(s, valp, newval, alter)
572 if (alter && *valp == newval)
585 a = vmap[v0].const_val;
586 b = vmap[v1].const_val;
588 switch (BPF_OP(s->code)) {
603 bpf_error("division by zero");
631 s->code = BPF_LD|BPF_IMM;
635 static inline struct slist *
639 while (s != 0 && s->s.code == NOP)
648 struct block *tmp = JT(b);
659 struct slist *next, *last;
671 next = this_op(s->next);
677 * st M[k] --> st M[k]
680 if (s->s.code == BPF_ST &&
681 next->s.code == (BPF_LDX|BPF_MEM) &&
682 s->s.k == next->s.k) {
684 next->s.code = BPF_MISC|BPF_TAX;
690 if (s->s.code == (BPF_LD|BPF_IMM) &&
691 next->s.code == (BPF_MISC|BPF_TAX)) {
692 s->s.code = BPF_LDX|BPF_IMM;
693 next->s.code = BPF_MISC|BPF_TXA;
697 * This is an ugly special case, but it happens
698 * when you say tcp[k] or udp[k] where k is a constant.
700 if (s->s.code == (BPF_LD|BPF_IMM)) {
701 struct slist *add, *tax, *ild;
704 * Check that X isn't used on exit from this
705 * block (which the optimizer might cause).
706 * We know the code generator won't generate
707 * any local dependencies.
709 if (ATOMELEM(b->out_use, X_ATOM))
712 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
715 add = this_op(next->next);
716 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
719 tax = this_op(add->next);
720 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
723 ild = this_op(tax->next);
724 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
725 BPF_MODE(ild->s.code) != BPF_IND)
728 * XXX We need to check that X is not
729 * subsequently used. We know we can eliminate the
730 * accumulator modifications since it is defined
731 * by the last stmt of this sequence.
733 * We want to turn this sequence:
736 * (005) ldxms [14] {next} -- optional
739 * (008) ild [x+0] {ild}
741 * into this sequence:
759 * If we have a subtract to do a comparison, and the X register
760 * is a known constant, we can merge this value into the
763 if (BPF_OP(b->s.code) == BPF_JEQ) {
764 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&
765 !ATOMELEM(b->out_use, A_ATOM)) {
766 val = b->val[X_ATOM];
767 if (vmap[val].is_const) {
772 b->s.k += vmap[val].const_val;
775 } else if (b->s.k == 0) {
781 b->s.code = BPF_CLASS(b->s.code) |
782 BPF_OP(b->s.code) | BPF_X;
787 * Likewise, a constant subtract can be simplified.
789 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&
790 !ATOMELEM(b->out_use, A_ATOM)) {
801 if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
802 !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) {
804 b->s.code = BPF_JMP|BPF_K|BPF_JSET;
811 * jset #ffffffff -> always
813 if (b->s.code == (BPF_JMP|BPF_K|BPF_JSET)) {
816 if (b->s.k == 0xffffffff)
820 * If the accumulator is a known constant, we can compute the
823 val = b->val[A_ATOM];
824 if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
825 bpf_int32 v = vmap[val].const_val;
826 switch (BPF_OP(b->s.code)) {
833 v = (unsigned)v > b->s.k;
837 v = (unsigned)v >= b->s.k;
857 * Compute the symbolic value of expression of 's', and update
858 * anything it defines in the value table 'val'. If 'alter' is true,
859 * do various optimizations. This code would be cleaner if symbolic
860 * evaluation and code transformations weren't folded together.
863 opt_stmt(s, val, alter)
873 case BPF_LD|BPF_ABS|BPF_W:
874 case BPF_LD|BPF_ABS|BPF_H:
875 case BPF_LD|BPF_ABS|BPF_B:
876 v = F(s->code, s->k, 0L);
877 vstore(s, &val[A_ATOM], v, alter);
880 case BPF_LD|BPF_IND|BPF_W:
881 case BPF_LD|BPF_IND|BPF_H:
882 case BPF_LD|BPF_IND|BPF_B:
884 if (alter && vmap[v].is_const) {
885 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
886 s->k += vmap[v].const_val;
887 v = F(s->code, s->k, 0L);
891 v = F(s->code, s->k, v);
892 vstore(s, &val[A_ATOM], v, alter);
896 v = F(s->code, 0L, 0L);
897 vstore(s, &val[A_ATOM], v, alter);
902 vstore(s, &val[A_ATOM], v, alter);
905 case BPF_LDX|BPF_IMM:
907 vstore(s, &val[X_ATOM], v, alter);
910 case BPF_LDX|BPF_MSH|BPF_B:
911 v = F(s->code, s->k, 0L);
912 vstore(s, &val[X_ATOM], v, alter);
915 case BPF_ALU|BPF_NEG:
916 if (alter && vmap[val[A_ATOM]].is_const) {
917 s->code = BPF_LD|BPF_IMM;
918 s->k = -vmap[val[A_ATOM]].const_val;
919 val[A_ATOM] = K(s->k);
922 val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
925 case BPF_ALU|BPF_ADD|BPF_K:
926 case BPF_ALU|BPF_SUB|BPF_K:
927 case BPF_ALU|BPF_MUL|BPF_K:
928 case BPF_ALU|BPF_DIV|BPF_K:
929 case BPF_ALU|BPF_AND|BPF_K:
930 case BPF_ALU|BPF_OR|BPF_K:
931 case BPF_ALU|BPF_LSH|BPF_K:
932 case BPF_ALU|BPF_RSH|BPF_K:
933 op = BPF_OP(s->code);
936 /* don't optimize away "sub #0"
937 * as it may be needed later to
938 * fixup the generated math code */
940 op == BPF_LSH || op == BPF_RSH ||
945 if (op == BPF_MUL || op == BPF_AND) {
946 s->code = BPF_LD|BPF_IMM;
947 val[A_ATOM] = K(s->k);
951 if (vmap[val[A_ATOM]].is_const) {
952 fold_op(s, val[A_ATOM], K(s->k));
953 val[A_ATOM] = K(s->k);
957 val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
960 case BPF_ALU|BPF_ADD|BPF_X:
961 case BPF_ALU|BPF_SUB|BPF_X:
962 case BPF_ALU|BPF_MUL|BPF_X:
963 case BPF_ALU|BPF_DIV|BPF_X:
964 case BPF_ALU|BPF_AND|BPF_X:
965 case BPF_ALU|BPF_OR|BPF_X:
966 case BPF_ALU|BPF_LSH|BPF_X:
967 case BPF_ALU|BPF_RSH|BPF_X:
968 op = BPF_OP(s->code);
969 if (alter && vmap[val[X_ATOM]].is_const) {
970 if (vmap[val[A_ATOM]].is_const) {
971 fold_op(s, val[A_ATOM], val[X_ATOM]);
972 val[A_ATOM] = K(s->k);
975 s->code = BPF_ALU|BPF_K|op;
976 s->k = vmap[val[X_ATOM]].const_val;
979 F(s->code, val[A_ATOM], K(s->k));
984 * Check if we're doing something to an accumulator
985 * that is 0, and simplify. This may not seem like
986 * much of a simplification but it could open up further
988 * XXX We could also check for mul by 1, etc.
990 if (alter && vmap[val[A_ATOM]].is_const
991 && vmap[val[A_ATOM]].const_val == 0) {
992 if (op == BPF_ADD || op == BPF_OR) {
993 s->code = BPF_MISC|BPF_TXA;
994 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
997 else if (op == BPF_MUL || op == BPF_DIV ||
998 op == BPF_AND || op == BPF_LSH || op == BPF_RSH) {
999 s->code = BPF_LD|BPF_IMM;
1001 vstore(s, &val[A_ATOM], K(s->k), alter);
1004 else if (op == BPF_NEG) {
1009 val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1012 case BPF_MISC|BPF_TXA:
1013 vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1016 case BPF_LD|BPF_MEM:
1018 if (alter && vmap[v].is_const) {
1019 s->code = BPF_LD|BPF_IMM;
1020 s->k = vmap[v].const_val;
1023 vstore(s, &val[A_ATOM], v, alter);
1026 case BPF_MISC|BPF_TAX:
1027 vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1030 case BPF_LDX|BPF_MEM:
1032 if (alter && vmap[v].is_const) {
1033 s->code = BPF_LDX|BPF_IMM;
1034 s->k = vmap[v].const_val;
1037 vstore(s, &val[X_ATOM], v, alter);
1041 vstore(s, &val[s->k], val[A_ATOM], alter);
1045 vstore(s, &val[s->k], val[X_ATOM], alter);
1052 register struct stmt *s;
1053 register struct stmt *last[];
1059 if (atom == AX_ATOM) {
1070 last[atom]->code = NOP;
1078 register struct block *b;
1080 register struct slist *s;
1082 struct stmt *last[N_ATOMS];
1084 memset((char *)last, 0, sizeof last);
1086 for (s = b->stmts; s != 0; s = s->next)
1087 deadstmt(&s->s, last);
1088 deadstmt(&b->s, last);
1090 for (atom = 0; atom < N_ATOMS; ++atom)
1091 if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1092 last[atom]->code = NOP;
1098 opt_blk(b, do_stmts)
1108 for (s = b->stmts; s && s->next; s = s->next)
1109 if (BPF_CLASS(s->s.code) == BPF_JMP) {
1116 * Initialize the atom values.
1117 * If we have no predecessors, everything is undefined.
1118 * Otherwise, we inherent our values from our predecessors.
1119 * If any register has an ambiguous value (i.e. control paths are
1120 * merging) give it the undefined value of 0.
1124 memset((char *)b->val, 0, sizeof(b->val));
1126 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1127 while ((p = p->next) != NULL) {
1128 for (i = 0; i < N_ATOMS; ++i)
1129 if (b->val[i] != p->pred->val[i])
1133 aval = b->val[A_ATOM];
1134 for (s = b->stmts; s; s = s->next)
1135 opt_stmt(&s->s, b->val, do_stmts);
1138 * This is a special case: if we don't use anything from this
1139 * block, and we load the accumulator with value that is
1140 * already there, or if this block is a return,
1141 * eliminate all the statements.
1144 ((b->out_use == 0 && aval != 0 &&b->val[A_ATOM] == aval) ||
1145 BPF_CLASS(b->s.code) == BPF_RET)) {
1146 if (b->stmts != 0) {
1155 * Set up values for branch optimizer.
1157 if (BPF_SRC(b->s.code) == BPF_K)
1158 b->oval = K(b->s.k);
1160 b->oval = b->val[X_ATOM];
1161 b->et.code = b->s.code;
1162 b->ef.code = -b->s.code;
1166 * Return true if any register that is used on exit from 'succ', has
1167 * an exit value that is different from the corresponding exit value
1171 use_conflict(b, succ)
1172 struct block *b, *succ;
1175 atomset use = succ->out_use;
1180 for (atom = 0; atom < N_ATOMS; ++atom)
1181 if (ATOMELEM(use, atom))
1182 if (b->val[atom] != succ->val[atom])
1187 static struct block *
1188 fold_edge(child, ep)
1189 struct block *child;
1193 int aval0, aval1, oval0, oval1;
1194 int code = ep->code;
1202 if (child->s.code != code)
1205 aval0 = child->val[A_ATOM];
1206 oval0 = child->oval;
1207 aval1 = ep->pred->val[A_ATOM];
1208 oval1 = ep->pred->oval;
1215 * The operands are identical, so the
1216 * result is true if a true branch was
1217 * taken to get here, otherwise false.
1219 return sense ? JT(child) : JF(child);
1221 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1223 * At this point, we only know the comparison if we
1224 * came down the true branch, and it was an equality
1225 * comparison with a constant. We rely on the fact that
1226 * distinct constants have distinct value numbers.
1238 register struct block *target;
1240 if (JT(ep->succ) == 0)
1243 if (JT(ep->succ) == JF(ep->succ)) {
1245 * Common branch targets can be eliminated, provided
1246 * there is no data dependency.
1248 if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1250 ep->succ = JT(ep->succ);
1254 * For each edge dominator that matches the successor of this
1255 * edge, promote the edge successor to the its grandchild.
1257 * XXX We violate the set abstraction here in favor a reasonably
1261 for (i = 0; i < edgewords; ++i) {
1262 register bpf_u_int32 x = ep->edom[i];
1267 k += i * BITS_PER_WORD;
1269 target = fold_edge(ep->succ, edges[k]);
1271 * Check that there is no data dependency between
1272 * nodes that will be violated if we move the edge.
1274 if (target != 0 && !use_conflict(ep->pred, target)) {
1277 if (JT(target) != 0)
1279 * Start over unless we hit a leaf.
1295 struct block **diffp, **samep;
1303 * Make sure each predecessor loads the same value.
1306 val = ep->pred->val[A_ATOM];
1307 for (ep = ep->next; ep != 0; ep = ep->next)
1308 if (val != ep->pred->val[A_ATOM])
1311 if (JT(b->in_edges->pred) == b)
1312 diffp = &JT(b->in_edges->pred);
1314 diffp = &JF(b->in_edges->pred);
1321 if (JT(*diffp) != JT(b))
1324 if (!SET_MEMBER((*diffp)->dom, b->id))
1327 if ((*diffp)->val[A_ATOM] != val)
1330 diffp = &JF(*diffp);
1333 samep = &JF(*diffp);
1338 if (JT(*samep) != JT(b))
1341 if (!SET_MEMBER((*samep)->dom, b->id))
1344 if ((*samep)->val[A_ATOM] == val)
1347 /* XXX Need to check that there are no data dependencies
1348 between dp0 and dp1. Currently, the code generator
1349 will not produce such dependencies. */
1350 samep = &JF(*samep);
1353 /* XXX This doesn't cover everything. */
1354 for (i = 0; i < N_ATOMS; ++i)
1355 if ((*samep)->val[i] != pred->val[i])
1358 /* Pull up the node. */
1364 * At the top of the chain, each predecessor needs to point at the
1365 * pulled up node. Inside the chain, there is only one predecessor
1369 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1370 if (JT(ep->pred) == b)
1371 JT(ep->pred) = pull;
1373 JF(ep->pred) = pull;
1388 struct block **diffp, **samep;
1396 * Make sure each predecessor loads the same value.
1398 val = ep->pred->val[A_ATOM];
1399 for (ep = ep->next; ep != 0; ep = ep->next)
1400 if (val != ep->pred->val[A_ATOM])
1403 if (JT(b->in_edges->pred) == b)
1404 diffp = &JT(b->in_edges->pred);
1406 diffp = &JF(b->in_edges->pred);
1413 if (JF(*diffp) != JF(b))
1416 if (!SET_MEMBER((*diffp)->dom, b->id))
1419 if ((*diffp)->val[A_ATOM] != val)
1422 diffp = &JT(*diffp);
1425 samep = &JT(*diffp);
1430 if (JF(*samep) != JF(b))
1433 if (!SET_MEMBER((*samep)->dom, b->id))
1436 if ((*samep)->val[A_ATOM] == val)
1439 /* XXX Need to check that there are no data dependencies
1440 between diffp and samep. Currently, the code generator
1441 will not produce such dependencies. */
1442 samep = &JT(*samep);
1445 /* XXX This doesn't cover everything. */
1446 for (i = 0; i < N_ATOMS; ++i)
1447 if ((*samep)->val[i] != pred->val[i])
1450 /* Pull up the node. */
1456 * At the top of the chain, each predecessor needs to point at the
1457 * pulled up node. Inside the chain, there is only one predecessor
1461 for (ep = b->in_edges; ep != 0; ep = ep->next) {
1462 if (JT(ep->pred) == b)
1463 JT(ep->pred) = pull;
1465 JF(ep->pred) = pull;
1475 opt_blks(root, do_stmts)
1483 maxlevel = root->level;
1486 for (i = maxlevel; i >= 0; --i)
1487 for (p = levels[i]; p; p = p->link)
1488 opt_blk(p, do_stmts);
1492 * No point trying to move branches; it can't possibly
1493 * make a difference at this point.
1497 for (i = 1; i <= maxlevel; ++i) {
1498 for (p = levels[i]; p; p = p->link) {
1505 for (i = 1; i <= maxlevel; ++i) {
1506 for (p = levels[i]; p; p = p->link) {
1514 link_inedge(parent, child)
1515 struct edge *parent;
1516 struct block *child;
1518 parent->next = child->in_edges;
1519 child->in_edges = parent;
1529 for (i = 0; i < n_blocks; ++i)
1530 blocks[i]->in_edges = 0;
1533 * Traverse the graph, adding each edge to the predecessor
1534 * list of its successors. Skip the leaves (i.e. level 0).
1536 for (i = root->level; i > 0; --i) {
1537 for (b = levels[i]; b != 0; b = b->link) {
1538 link_inedge(&b->et, JT(b));
1539 link_inedge(&b->ef, JF(b));
1548 struct slist *tmp, *s;
1552 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1561 * If the root node is a return, then there is no
1562 * point executing any statements (since the bpf machine
1563 * has no side effects).
1565 if (BPF_CLASS((*b)->s.code) == BPF_RET)
1570 opt_loop(root, do_stmts)
1577 printf("opt_loop(root, %d) begin\n", do_stmts);
1588 opt_blks(root, do_stmts);
1591 printf("opt_loop(root, %d) bottom, done=%d\n", do_stmts, done);
1599 * Optimize the filter code in its dag representation.
1603 struct block **rootp;
1612 intern_blocks(root);
1615 printf("after intern_blocks()\n");
1622 printf("after opt_root()\n");
1635 if (BPF_CLASS(p->s.code) != BPF_RET) {
1643 * Mark code array such that isMarked(i) is true
1644 * only for nodes that are alive.
1655 * True iff the two stmt lists load the same value from the packet into
1660 struct slist *x, *y;
1663 while (x && x->s.code == NOP)
1665 while (y && y->s.code == NOP)
1671 if (x->s.code != y->s.code || x->s.k != y->s.k)
1680 struct block *b0, *b1;
1682 if (b0->s.code == b1->s.code &&
1683 b0->s.k == b1->s.k &&
1684 b0->et.succ == b1->et.succ &&
1685 b0->ef.succ == b1->ef.succ)
1686 return eq_slist(b0->stmts, b1->stmts);
1699 for (i = 0; i < n_blocks; ++i)
1700 blocks[i]->link = 0;
1704 for (i = n_blocks - 1; --i >= 0; ) {
1705 if (!isMarked(blocks[i]))
1707 for (j = i + 1; j < n_blocks; ++j) {
1708 if (!isMarked(blocks[j]))
1710 if (eq_blk(blocks[i], blocks[j])) {
1711 blocks[i]->link = blocks[j]->link ?
1712 blocks[j]->link : blocks[j];
1717 for (i = 0; i < n_blocks; ++i) {
1723 JT(p) = JT(p)->link;
1727 JF(p) = JF(p)->link;
1737 free((void *)vnode_base);
1739 free((void *)edges);
1740 free((void *)space);
1741 free((void *)levels);
1742 free((void *)blocks);
1746 * Return the number of stmts in 's'.
1754 for (; s; s = s->next)
1755 if (s->s.code != NOP)
1761 * Return the number of nodes reachable by 'p'.
1762 * All nodes should be initially unmarked.
1768 if (p == 0 || isMarked(p))
1771 return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1775 * Do a depth first search on the flow graph, numbering the
1776 * the basic blocks, and entering them into the 'blocks' array.`
1784 if (p == 0 || isMarked(p))
1792 number_blks_r(JT(p));
1793 number_blks_r(JF(p));
1797 * Return the number of stmts in the flowgraph reachable by 'p'.
1798 * The nodes should be unmarked before calling.
1800 * Note that "stmts" means "instructions", and that this includes
1802 * side-effect statements in 'p' (slength(p->stmts));
1804 * statements in the true branch from 'p' (count_stmts(JT(p)));
1806 * statements in the false branch from 'p' (count_stmts(JF(p)));
1808 * the conditional jump itself (1);
1810 * an extra long jump if the true branch requires it (p->longjt);
1812 * an extra long jump if the false branch requires it (p->longjf).
1820 if (p == 0 || isMarked(p))
1823 n = count_stmts(JT(p)) + count_stmts(JF(p));
1824 return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
1828 * Allocate memory. All allocation is done before optimization
1829 * is begun. A linear bound on the size of all data structures is computed
1830 * from the total number of blocks and/or statements.
1837 int i, n, max_stmts;
1840 * First, count the blocks, so we can malloc an array to map
1841 * block number to block. Then, put the blocks into the array.
1844 n = count_blocks(root);
1845 blocks = (struct block **)malloc(n * sizeof(*blocks));
1847 bpf_error("malloc");
1850 number_blks_r(root);
1852 n_edges = 2 * n_blocks;
1853 edges = (struct edge **)malloc(n_edges * sizeof(*edges));
1855 bpf_error("malloc");
1858 * The number of levels is bounded by the number of nodes.
1860 levels = (struct block **)malloc(n_blocks * sizeof(*levels));
1862 bpf_error("malloc");
1864 edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1865 nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1868 space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1869 + n_edges * edgewords * sizeof(*space));
1871 bpf_error("malloc");
1874 for (i = 0; i < n; ++i) {
1878 all_closure_sets = p;
1879 for (i = 0; i < n; ++i) {
1880 blocks[i]->closure = p;
1884 for (i = 0; i < n; ++i) {
1885 register struct block *b = blocks[i];
1893 b->ef.id = n_blocks + i;
1894 edges[n_blocks + i] = &b->ef;
1899 for (i = 0; i < n; ++i)
1900 max_stmts += slength(blocks[i]->stmts) + 1;
1902 * We allocate at most 3 value numbers per statement,
1903 * so this is an upper bound on the number of valnodes
1906 maxval = 3 * max_stmts;
1907 vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap));
1908 vnode_base = (struct valnode *)malloc(maxval * sizeof(*vnode_base));
1909 if (vmap == NULL || vnode_base == NULL)
1910 bpf_error("malloc");
1914 * Some pointers used to convert the basic block form of the code,
1915 * into the array form that BPF requires. 'fstart' will point to
1916 * the malloc'd array while 'ftail' is used during the recursive traversal.
1918 static struct bpf_insn *fstart;
1919 static struct bpf_insn *ftail;
1926 * Returns true if successful. Returns false if a branch has
1927 * an offset that is too large. If so, we have marked that
1928 * branch so that on a subsequent iteration, it will be treated
1935 struct bpf_insn *dst;
1939 int extrajmps; /* number of extra jumps inserted */
1940 struct slist **offset = NULL;
1942 if (p == 0 || isMarked(p))
1946 if (convert_code_r(JF(p)) == 0)
1948 if (convert_code_r(JT(p)) == 0)
1951 slen = slength(p->stmts);
1952 dst = ftail -= (slen + 1 + p->longjt + p->longjf);
1953 /* inflate length by any extra jumps */
1955 p->offset = dst - fstart;
1957 /* generate offset[] for convenience */
1959 offset = (struct slist **)calloc(slen, sizeof(struct slist *));
1961 bpf_error("not enough core");
1966 for (off = 0; off < slen && src; off++) {
1968 printf("off=%d src=%x\n", off, src);
1975 for (src = p->stmts; src; src = src->next) {
1976 if (src->s.code == NOP)
1978 dst->code = (u_short)src->s.code;
1981 /* fill block-local relative jump */
1982 if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
1984 if (src->s.jt || src->s.jf) {
1985 bpf_error("illegal jmp destination");
1991 if (off == slen - 2) /*???*/
1997 char *ljerr = "%s for block-local relative jump: off=%d";
2000 printf("code=%x off=%d %x %x\n", src->s.code,
2001 off, src->s.jt, src->s.jf);
2004 if (!src->s.jt || !src->s.jf) {
2005 bpf_error(ljerr, "no jmp destination", off);
2010 for (i = 0; i < slen; i++) {
2011 if (offset[i] == src->s.jt) {
2013 bpf_error(ljerr, "multiple matches", off);
2017 dst->jt = i - off - 1;
2020 if (offset[i] == src->s.jf) {
2022 bpf_error(ljerr, "multiple matches", off);
2025 dst->jf = i - off - 1;
2030 bpf_error(ljerr, "no destination found", off);
2042 bids[dst - fstart] = p->id + 1;
2044 dst->code = (u_short)p->s.code;
2048 off = JT(p)->offset - (p->offset + slen) - 1;
2050 /* offset too large for branch, must add a jump */
2051 if (p->longjt == 0) {
2052 /* mark this instruction and retry */
2056 /* branch if T to following jump */
2057 dst->jt = extrajmps;
2059 dst[extrajmps].code = BPF_JMP|BPF_JA;
2060 dst[extrajmps].k = off - extrajmps;
2064 off = JF(p)->offset - (p->offset + slen) - 1;
2066 /* offset too large for branch, must add a jump */
2067 if (p->longjf == 0) {
2068 /* mark this instruction and retry */
2072 /* branch if F to following jump */
2073 /* if two jumps are inserted, F goes to second one */
2074 dst->jf = extrajmps;
2076 dst[extrajmps].code = BPF_JMP|BPF_JA;
2077 dst[extrajmps].k = off - extrajmps;
2087 * Convert flowgraph intermediate representation to the
2088 * BPF array representation. Set *lenp to the number of instructions.
2091 icode_to_fcode(root, lenp)
2096 struct bpf_insn *fp;
2099 * Loop doing convert_code_r() until no branches remain
2100 * with too-large offsets.
2104 n = *lenp = count_stmts(root);
2106 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2108 bpf_error("malloc");
2109 memset((char *)fp, 0, sizeof(*fp) * n);
2114 if (convert_code_r(root))
2123 * Make a copy of a BPF program and put it in the "fcode" member of
2126 * If we fail to allocate memory for the copy, fill in the "errbuf"
2127 * member of the "pcap_t" with an error message, and return -1;
2128 * otherwise, return 0.
2131 install_bpf_program(pcap_t *p, struct bpf_program *fp)
2136 * Free up any already installed program.
2138 pcap_freecode(&p->fcode);
2140 prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2141 p->fcode.bf_len = fp->bf_len;
2142 p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2143 if (p->fcode.bf_insns == NULL) {
2144 snprintf(p->errbuf, sizeof(p->errbuf),
2145 "malloc: %s", pcap_strerror(errno));
2148 memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2157 struct bpf_program f;
2159 memset(bids, 0, sizeof bids);
2160 f.bf_insns = icode_to_fcode(root, &f.bf_len);
2163 free((char *)f.bf_insns);