- Dont pass a complete interface struct to icmp_error, just pass the mtu.
[dragonfly.git] / sys / net / pf / pf.c
... / ...
CommitLineData
1/* $FreeBSD: src/sys/contrib/pf/net/pf.c,v 1.19 2004/09/11 11:18:25 mlaier Exp $ */
2/* $OpenBSD: pf.c,v 1.433.2.2 2004/07/17 03:22:34 brad Exp $ */
3/* add $OpenBSD: pf.c,v 1.448 2004/05/11 07:34:11 dhartmei Exp $ */
4/* $DragonFly: src/sys/net/pf/pf.c,v 1.5 2005/06/15 16:32:58 joerg Exp $ */
5
6/*
7 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
8 *
9 * Copyright (c) 2001 Daniel Hartmeier
10 * Copyright (c) 2002,2003 Henning Brauer
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Effort sponsored in part by the Defense Advanced Research Projects
38 * Agency (DARPA) and Air Force Research Laboratory, Air Force
39 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
40 *
41 */
42
43#include "opt_inet.h"
44#include "opt_inet6.h"
45#include "use_pfsync.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/filio.h>
52#include <sys/socket.h>
53#include <sys/socketvar.h>
54#include <sys/kernel.h>
55#include <sys/time.h>
56#include <sys/sysctl.h>
57#include <sys/endian.h>
58#include <vm/vm_zone.h>
59
60#include <machine/inttypes.h>
61
62#include <net/if.h>
63#include <net/if_types.h>
64#include <net/bpf.h>
65#include <net/route.h>
66
67#include <netinet/in.h>
68#include <netinet/in_var.h>
69#include <netinet/in_systm.h>
70#include <netinet/ip.h>
71#include <netinet/ip_var.h>
72#include <netinet/tcp.h>
73#include <netinet/tcp_seq.h>
74#include <netinet/udp.h>
75#include <netinet/ip_icmp.h>
76#include <netinet/in_pcb.h>
77#include <netinet/tcp_timer.h>
78#include <netinet/tcp_var.h>
79#include <netinet/udp_var.h>
80#include <netinet/icmp_var.h>
81
82#include <net/pf/pfvar.h>
83#include <net/pf/if_pflog.h>
84
85#if NPFSYNC > 0
86#include <net/pf/if_pfsync.h>
87#endif /* NPFSYNC > 0 */
88
89#ifdef INET6
90#include <netinet/ip6.h>
91#include <netinet/in_pcb.h>
92#include <netinet/icmp6.h>
93#include <netinet6/nd6.h>
94#include <netinet6/ip6_var.h>
95#include <netinet6/in6_pcb.h>
96#endif /* INET6 */
97
98#include <sys/in_cksum.h>
99#include <machine/limits.h>
100#include <sys/msgport2.h>
101#include <sys/ucred.h>
102
103extern int ip_optcopy(struct ip *, struct ip *);
104
105#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
106
107/*
108 * Global variables
109 */
110
111struct pf_anchorqueue pf_anchors;
112struct pf_ruleset pf_main_ruleset;
113struct pf_altqqueue pf_altqs[2];
114struct pf_palist pf_pabuf;
115struct pf_altqqueue *pf_altqs_active;
116struct pf_altqqueue *pf_altqs_inactive;
117struct pf_status pf_status;
118
119u_int32_t ticket_altqs_active;
120u_int32_t ticket_altqs_inactive;
121int altqs_inactive_open;
122u_int32_t ticket_pabuf;
123
124struct callout pf_expire_to; /* expire timeout */
125
126vm_zone_t pf_src_tree_pl, pf_rule_pl;
127vm_zone_t pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
128
129void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
130void pf_print_state(struct pf_state *);
131void pf_print_flags(u_int8_t);
132
133u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t,
134 u_int8_t);
135void pf_change_ap(struct pf_addr *, u_int16_t *,
136 u_int16_t *, u_int16_t *, struct pf_addr *,
137 u_int16_t, u_int8_t, sa_family_t);
138#ifdef INET6
139void pf_change_a6(struct pf_addr *, u_int16_t *,
140 struct pf_addr *, u_int8_t);
141#endif /* INET6 */
142void pf_change_icmp(struct pf_addr *, u_int16_t *,
143 struct pf_addr *, struct pf_addr *, u_int16_t,
144 u_int16_t *, u_int16_t *, u_int16_t *,
145 u_int16_t *, u_int8_t, sa_family_t);
146void pf_send_tcp(const struct pf_rule *, sa_family_t,
147 const struct pf_addr *, const struct pf_addr *,
148 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
149 u_int8_t, u_int16_t, u_int16_t, u_int8_t);
150void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
151 sa_family_t, struct pf_rule *);
152struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
153 int, int, struct pfi_kif *,
154 struct pf_addr *, u_int16_t, struct pf_addr *,
155 u_int16_t, int);
156struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
157 int, int, struct pfi_kif *, struct pf_src_node **,
158 struct pf_addr *, u_int16_t,
159 struct pf_addr *, u_int16_t,
160 struct pf_addr *, u_int16_t *);
161int pf_test_tcp(struct pf_rule **, struct pf_state **,
162 int, struct pfi_kif *, struct mbuf *, int,
163 void *, struct pf_pdesc *, struct pf_rule **,
164 struct pf_ruleset **);
165int pf_test_udp(struct pf_rule **, struct pf_state **,
166 int, struct pfi_kif *, struct mbuf *, int,
167 void *, struct pf_pdesc *, struct pf_rule **,
168 struct pf_ruleset **);
169int pf_test_icmp(struct pf_rule **, struct pf_state **,
170 int, struct pfi_kif *, struct mbuf *, int,
171 void *, struct pf_pdesc *, struct pf_rule **,
172 struct pf_ruleset **);
173int pf_test_other(struct pf_rule **, struct pf_state **,
174 int, struct pfi_kif *, struct mbuf *, int, void *,
175 struct pf_pdesc *, struct pf_rule **,
176 struct pf_ruleset **);
177int pf_test_fragment(struct pf_rule **, int,
178 struct pfi_kif *, struct mbuf *, void *,
179 struct pf_pdesc *, struct pf_rule **,
180 struct pf_ruleset **);
181int pf_test_state_tcp(struct pf_state **, int,
182 struct pfi_kif *, struct mbuf *, int,
183 void *, struct pf_pdesc *, u_short *);
184int pf_test_state_udp(struct pf_state **, int,
185 struct pfi_kif *, struct mbuf *, int,
186 void *, struct pf_pdesc *);
187int pf_test_state_icmp(struct pf_state **, int,
188 struct pfi_kif *, struct mbuf *, int,
189 void *, struct pf_pdesc *);
190int pf_test_state_other(struct pf_state **, int,
191 struct pfi_kif *, struct pf_pdesc *);
192static int pf_match_tag(struct mbuf *, struct pf_rule *,
193 struct pf_rule *, int *);
194void pf_hash(struct pf_addr *, struct pf_addr *,
195 struct pf_poolhashkey *, sa_family_t);
196int pf_map_addr(u_int8_t, struct pf_rule *,
197 struct pf_addr *, struct pf_addr *,
198 struct pf_addr *, struct pf_src_node **);
199int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *,
200 struct pf_addr *, struct pf_addr *, u_int16_t,
201 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t,
202 struct pf_src_node **);
203void pf_route(struct mbuf **, struct pf_rule *, int,
204 struct ifnet *, struct pf_state *);
205void pf_route6(struct mbuf **, struct pf_rule *, int,
206 struct ifnet *, struct pf_state *);
207int pf_socket_lookup(uid_t *, gid_t *,
208 int, struct pf_pdesc *);
209u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
210 sa_family_t);
211u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
212 sa_family_t);
213u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
214 u_int16_t);
215void pf_set_rt_ifp(struct pf_state *,
216 struct pf_addr *);
217int pf_check_proto_cksum(struct mbuf *, int, int,
218 u_int8_t, sa_family_t);
219int pf_addr_wrap_neq(struct pf_addr_wrap *,
220 struct pf_addr_wrap *);
221struct pf_state *pf_find_state_recurse(struct pfi_kif *,
222 struct pf_state *, u_int8_t);
223
224struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX];
225
226#define STATE_LOOKUP() \
227 do { \
228 if (direction == PF_IN) \
229 *state = pf_find_state_recurse( \
230 kif, &key, PF_EXT_GWY); \
231 else \
232 *state = pf_find_state_recurse( \
233 kif, &key, PF_LAN_EXT); \
234 if (*state == NULL) \
235 return (PF_DROP); \
236 if (direction == PF_OUT && \
237 (((*state)->rule.ptr->rt == PF_ROUTETO && \
238 (*state)->rule.ptr->direction == PF_OUT) || \
239 ((*state)->rule.ptr->rt == PF_REPLYTO && \
240 (*state)->rule.ptr->direction == PF_IN)) && \
241 (*state)->rt_kif != NULL && \
242 (*state)->rt_kif != kif) \
243 return (PF_PASS); \
244 } while (0)
245
246#define STATE_TRANSLATE(s) \
247 (s)->lan.addr.addr32[0] != (s)->gwy.addr.addr32[0] || \
248 ((s)->af == AF_INET6 && \
249 ((s)->lan.addr.addr32[1] != (s)->gwy.addr.addr32[1] || \
250 (s)->lan.addr.addr32[2] != (s)->gwy.addr.addr32[2] || \
251 (s)->lan.addr.addr32[3] != (s)->gwy.addr.addr32[3])) || \
252 (s)->lan.port != (s)->gwy.port
253
254#define BOUND_IFACE(r, k) (((r)->rule_flag & PFRULE_IFBOUND) ? (k) : \
255 ((r)->rule_flag & PFRULE_GRBOUND) ? (k)->pfik_parent : \
256 (k)->pfik_parent->pfik_parent)
257
258static int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
259static int pf_state_compare_lan_ext(struct pf_state *,
260 struct pf_state *);
261static int pf_state_compare_ext_gwy(struct pf_state *,
262 struct pf_state *);
263static int pf_state_compare_id(struct pf_state *,
264 struct pf_state *);
265
266struct pf_src_tree tree_src_tracking;
267
268struct pf_state_tree_id tree_id;
269struct pf_state_queue state_updates;
270
271RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
272RB_GENERATE(pf_state_tree_lan_ext, pf_state,
273 u.s.entry_lan_ext, pf_state_compare_lan_ext);
274RB_GENERATE(pf_state_tree_ext_gwy, pf_state,
275 u.s.entry_ext_gwy, pf_state_compare_ext_gwy);
276RB_GENERATE(pf_state_tree_id, pf_state,
277 u.s.entry_id, pf_state_compare_id);
278
279static int
280pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
281{
282 int diff;
283
284 if (a->rule.ptr > b->rule.ptr)
285 return (1);
286 if (a->rule.ptr < b->rule.ptr)
287 return (-1);
288 if ((diff = a->af - b->af) != 0)
289 return (diff);
290 switch (a->af) {
291#ifdef INET
292 case AF_INET:
293 if (a->addr.addr32[0] > b->addr.addr32[0])
294 return (1);
295 if (a->addr.addr32[0] < b->addr.addr32[0])
296 return (-1);
297 break;
298#endif /* INET */
299#ifdef INET6
300 case AF_INET6:
301 if (a->addr.addr32[3] > b->addr.addr32[3])
302 return (1);
303 if (a->addr.addr32[3] < b->addr.addr32[3])
304 return (-1);
305 if (a->addr.addr32[2] > b->addr.addr32[2])
306 return (1);
307 if (a->addr.addr32[2] < b->addr.addr32[2])
308 return (-1);
309 if (a->addr.addr32[1] > b->addr.addr32[1])
310 return (1);
311 if (a->addr.addr32[1] < b->addr.addr32[1])
312 return (-1);
313 if (a->addr.addr32[0] > b->addr.addr32[0])
314 return (1);
315 if (a->addr.addr32[0] < b->addr.addr32[0])
316 return (-1);
317 break;
318#endif /* INET6 */
319 }
320 return (0);
321}
322
323static int
324pf_state_compare_lan_ext(struct pf_state *a, struct pf_state *b)
325{
326 int diff;
327
328 if ((diff = a->proto - b->proto) != 0)
329 return (diff);
330 if ((diff = a->af - b->af) != 0)
331 return (diff);
332 switch (a->af) {
333#ifdef INET
334 case AF_INET:
335 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
336 return (1);
337 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
338 return (-1);
339 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
340 return (1);
341 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
342 return (-1);
343 break;
344#endif /* INET */
345#ifdef INET6
346 case AF_INET6:
347 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
348 return (1);
349 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
350 return (-1);
351 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
352 return (1);
353 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
354 return (-1);
355 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
356 return (1);
357 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
358 return (-1);
359 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
360 return (1);
361 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
362 return (-1);
363 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
364 return (1);
365 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
366 return (-1);
367 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
368 return (1);
369 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
370 return (-1);
371 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
372 return (1);
373 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
374 return (-1);
375 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
376 return (1);
377 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
378 return (-1);
379 break;
380#endif /* INET6 */
381 }
382
383 if ((diff = a->lan.port - b->lan.port) != 0)
384 return (diff);
385 if ((diff = a->ext.port - b->ext.port) != 0)
386 return (diff);
387
388 return (0);
389}
390
391static int
392pf_state_compare_ext_gwy(struct pf_state *a, struct pf_state *b)
393{
394 int diff;
395
396 if ((diff = a->proto - b->proto) != 0)
397 return (diff);
398 if ((diff = a->af - b->af) != 0)
399 return (diff);
400 switch (a->af) {
401#ifdef INET
402 case AF_INET:
403 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
404 return (1);
405 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
406 return (-1);
407 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
408 return (1);
409 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
410 return (-1);
411 break;
412#endif /* INET */
413#ifdef INET6
414 case AF_INET6:
415 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
416 return (1);
417 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
418 return (-1);
419 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
420 return (1);
421 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
422 return (-1);
423 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
424 return (1);
425 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
426 return (-1);
427 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
428 return (1);
429 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
430 return (-1);
431 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
432 return (1);
433 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
434 return (-1);
435 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
436 return (1);
437 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
438 return (-1);
439 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
440 return (1);
441 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
442 return (-1);
443 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
444 return (1);
445 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
446 return (-1);
447 break;
448#endif /* INET6 */
449 }
450
451 if ((diff = a->ext.port - b->ext.port) != 0)
452 return (diff);
453 if ((diff = a->gwy.port - b->gwy.port) != 0)
454 return (diff);
455
456 return (0);
457}
458
459static int
460pf_state_compare_id(struct pf_state *a, struct pf_state *b)
461{
462 if (a->id > b->id)
463 return (1);
464 if (a->id < b->id)
465 return (-1);
466 if (a->creatorid > b->creatorid)
467 return (1);
468 if (a->creatorid < b->creatorid)
469 return (-1);
470
471 return (0);
472}
473
474#ifdef INET6
475void
476pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
477{
478 switch (af) {
479#ifdef INET
480 case AF_INET:
481 dst->addr32[0] = src->addr32[0];
482 break;
483#endif /* INET */
484 case AF_INET6:
485 dst->addr32[0] = src->addr32[0];
486 dst->addr32[1] = src->addr32[1];
487 dst->addr32[2] = src->addr32[2];
488 dst->addr32[3] = src->addr32[3];
489 break;
490 }
491}
492#endif
493
494struct pf_state *
495pf_find_state_byid(struct pf_state *key)
496{
497 pf_status.fcounters[FCNT_STATE_SEARCH]++;
498 return (RB_FIND(pf_state_tree_id, &tree_id, key));
499}
500
501struct pf_state *
502pf_find_state_recurse(struct pfi_kif *kif, struct pf_state *key, u_int8_t tree)
503{
504 struct pf_state *s;
505
506 pf_status.fcounters[FCNT_STATE_SEARCH]++;
507
508 switch (tree) {
509 case PF_LAN_EXT:
510 for (; kif != NULL; kif = kif->pfik_parent) {
511 s = RB_FIND(pf_state_tree_lan_ext,
512 &kif->pfik_lan_ext, key);
513 if (s != NULL)
514 return (s);
515 }
516 return (NULL);
517 case PF_EXT_GWY:
518 for (; kif != NULL; kif = kif->pfik_parent) {
519 s = RB_FIND(pf_state_tree_ext_gwy,
520 &kif->pfik_ext_gwy, key);
521 if (s != NULL)
522 return (s);
523 }
524 return (NULL);
525 default:
526 panic("pf_find_state_recurse");
527 }
528}
529
530struct pf_state *
531pf_find_state_all(struct pf_state *key, u_int8_t tree, int *more)
532{
533 struct pf_state *s, *ss = NULL;
534 struct pfi_kif *kif;
535
536 pf_status.fcounters[FCNT_STATE_SEARCH]++;
537
538 switch (tree) {
539 case PF_LAN_EXT:
540 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
541 s = RB_FIND(pf_state_tree_lan_ext,
542 &kif->pfik_lan_ext, key);
543 if (s == NULL)
544 continue;
545 if (more == NULL)
546 return (s);
547 ss = s;
548 (*more)++;
549 }
550 return (ss);
551 case PF_EXT_GWY:
552 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
553 s = RB_FIND(pf_state_tree_ext_gwy,
554 &kif->pfik_ext_gwy, key);
555 if (s == NULL)
556 continue;
557 if (more == NULL)
558 return (s);
559 ss = s;
560 (*more)++;
561 }
562 return (ss);
563 default:
564 panic("pf_find_state_all");
565 }
566}
567
568int
569pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
570 struct pf_addr *src, sa_family_t af)
571{
572 struct pf_src_node k;
573
574 if (*sn == NULL) {
575 k.af = af;
576 PF_ACPY(&k.addr, src, af);
577 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
578 rule->rpool.opts & PF_POOL_STICKYADDR)
579 k.rule.ptr = rule;
580 else
581 k.rule.ptr = NULL;
582 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
583 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
584 }
585 if (*sn == NULL) {
586 if (!rule->max_src_nodes ||
587 rule->src_nodes < rule->max_src_nodes)
588 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT);
589 if ((*sn) == NULL)
590 return (-1);
591 bzero(*sn, sizeof(struct pf_src_node));
592 (*sn)->af = af;
593 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
594 rule->rpool.opts & PF_POOL_STICKYADDR)
595 (*sn)->rule.ptr = rule;
596 else
597 (*sn)->rule.ptr = NULL;
598 PF_ACPY(&(*sn)->addr, src, af);
599 if (RB_INSERT(pf_src_tree,
600 &tree_src_tracking, *sn) != NULL) {
601 if (pf_status.debug >= PF_DEBUG_MISC) {
602 printf("pf: src_tree insert failed: ");
603 pf_print_host(&(*sn)->addr, 0, af);
604 printf("\n");
605 }
606 pool_put(&pf_src_tree_pl, *sn);
607 return (-1);
608 }
609 (*sn)->creation = time_second;
610 (*sn)->ruletype = rule->action;
611 if ((*sn)->rule.ptr != NULL)
612 (*sn)->rule.ptr->src_nodes++;
613 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
614 pf_status.src_nodes++;
615 } else {
616 if (rule->max_src_states &&
617 (*sn)->states >= rule->max_src_states)
618 return (-1);
619 }
620 return (0);
621}
622
623int
624pf_insert_state(struct pfi_kif *kif, struct pf_state *state)
625{
626 /* Thou MUST NOT insert multiple duplicate keys */
627 state->u.s.kif = kif;
628 if (RB_INSERT(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state)) {
629 if (pf_status.debug >= PF_DEBUG_MISC) {
630 printf("pf: state insert failed: tree_lan_ext");
631 printf(" lan: ");
632 pf_print_host(&state->lan.addr, state->lan.port,
633 state->af);
634 printf(" gwy: ");
635 pf_print_host(&state->gwy.addr, state->gwy.port,
636 state->af);
637 printf(" ext: ");
638 pf_print_host(&state->ext.addr, state->ext.port,
639 state->af);
640 if (state->sync_flags & PFSTATE_FROMSYNC)
641 printf(" (from sync)");
642 printf("\n");
643 }
644 return (-1);
645 }
646
647 if (RB_INSERT(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state)) {
648 if (pf_status.debug >= PF_DEBUG_MISC) {
649 printf("pf: state insert failed: tree_ext_gwy");
650 printf(" lan: ");
651 pf_print_host(&state->lan.addr, state->lan.port,
652 state->af);
653 printf(" gwy: ");
654 pf_print_host(&state->gwy.addr, state->gwy.port,
655 state->af);
656 printf(" ext: ");
657 pf_print_host(&state->ext.addr, state->ext.port,
658 state->af);
659 if (state->sync_flags & PFSTATE_FROMSYNC)
660 printf(" (from sync)");
661 printf("\n");
662 }
663 RB_REMOVE(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state);
664 return (-1);
665 }
666
667 if (state->id == 0 && state->creatorid == 0) {
668 state->id = htobe64(pf_status.stateid++);
669 state->creatorid = pf_status.hostid;
670 }
671 if (RB_INSERT(pf_state_tree_id, &tree_id, state) != NULL) {
672 if (pf_status.debug >= PF_DEBUG_MISC) {
673 printf("pf: state insert failed: "
674 "id: %016" PRIx64 " creatorid: %08" PRIx32,
675 be64toh(state->id), ntohl(state->creatorid));
676 if (state->sync_flags & PFSTATE_FROMSYNC)
677 printf(" (from sync)");
678 printf("\n");
679 }
680 RB_REMOVE(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state);
681 RB_REMOVE(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state);
682 return (-1);
683 }
684 TAILQ_INSERT_HEAD(&state_updates, state, u.s.entry_updates);
685
686 pf_status.fcounters[FCNT_STATE_INSERT]++;
687 pf_status.states++;
688 pfi_attach_state(kif);
689#if NPFSYNC
690 pfsync_insert_state(state);
691#endif
692 return (0);
693}
694
695void
696pf_purge_timeout(void *arg)
697{
698 struct callout *to = arg;
699
700 crit_enter();
701 pf_purge_expired_states();
702 pf_purge_expired_fragments();
703 pf_purge_expired_src_nodes();
704 crit_exit();
705
706 callout_reset(to, pf_default_rule.timeout[PFTM_INTERVAL] * hz,
707 pf_purge_timeout, to);
708}
709
710u_int32_t
711pf_state_expires(const struct pf_state *state)
712{
713 u_int32_t timeout;
714 u_int32_t start;
715 u_int32_t end;
716 u_int32_t states;
717
718 /* handle all PFTM_* > PFTM_MAX here */
719 if (state->timeout == PFTM_PURGE)
720 return (time_second);
721 if (state->timeout == PFTM_UNTIL_PACKET)
722 return (0);
723 KASSERT((state->timeout < PFTM_MAX),
724 ("pf_state_expires: timeout > PFTM_MAX"));
725 timeout = state->rule.ptr->timeout[state->timeout];
726 if (!timeout)
727 timeout = pf_default_rule.timeout[state->timeout];
728 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
729 if (start) {
730 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
731 states = state->rule.ptr->states;
732 } else {
733 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
734 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
735 states = pf_status.states;
736 }
737 if (end && states > start && start < end) {
738 if (states < end)
739 return (state->expire + timeout * (end - states) /
740 (end - start));
741 else
742 return (time_second);
743 }
744 return (state->expire + timeout);
745}
746
747void
748pf_purge_expired_src_nodes(void)
749{
750 struct pf_src_node *cur, *next;
751
752 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
753 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
754
755 if (cur->states <= 0 && cur->expire <= time_second) {
756 if (cur->rule.ptr != NULL) {
757 cur->rule.ptr->src_nodes--;
758 if (cur->rule.ptr->states <= 0 &&
759 cur->rule.ptr->max_src_nodes <= 0)
760 pf_rm_rule(NULL, cur->rule.ptr);
761 }
762 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
763 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
764 pf_status.src_nodes--;
765 pool_put(&pf_src_tree_pl, cur);
766 }
767 }
768}
769
770void
771pf_src_tree_remove_state(struct pf_state *s)
772{
773 u_int32_t timeout;
774
775 if (s->src_node != NULL) {
776 if (--s->src_node->states <= 0) {
777 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
778 if (!timeout)
779 timeout =
780 pf_default_rule.timeout[PFTM_SRC_NODE];
781 s->src_node->expire = time_second + timeout;
782 }
783 }
784 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
785 if (--s->nat_src_node->states <= 0) {
786 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
787 if (!timeout)
788 timeout =
789 pf_default_rule.timeout[PFTM_SRC_NODE];
790 s->nat_src_node->expire = time_second + timeout;
791 }
792 }
793 s->src_node = s->nat_src_node = NULL;
794}
795
796void
797pf_purge_expired_states(void)
798{
799 struct pf_state *cur, *next;
800
801 for (cur = RB_MIN(pf_state_tree_id, &tree_id);
802 cur; cur = next) {
803 next = RB_NEXT(pf_state_tree_id, &tree_id, cur);
804
805 if (pf_state_expires(cur) <= time_second) {
806 if (cur->src.state == PF_TCPS_PROXY_DST)
807 pf_send_tcp(cur->rule.ptr, cur->af,
808 &cur->ext.addr, &cur->lan.addr,
809 cur->ext.port, cur->lan.port,
810 cur->src.seqhi, cur->src.seqlo + 1, 0,
811 TH_RST|TH_ACK, 0, 0);
812 RB_REMOVE(pf_state_tree_ext_gwy,
813 &cur->u.s.kif->pfik_ext_gwy, cur);
814 RB_REMOVE(pf_state_tree_lan_ext,
815 &cur->u.s.kif->pfik_lan_ext, cur);
816 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
817#if NPFSYNC
818 pfsync_delete_state(cur);
819#endif
820 pf_src_tree_remove_state(cur);
821 if (--cur->rule.ptr->states <= 0 &&
822 cur->rule.ptr->src_nodes <= 0)
823 pf_rm_rule(NULL, cur->rule.ptr);
824 if (cur->nat_rule.ptr != NULL)
825 if (--cur->nat_rule.ptr->states <= 0 &&
826 cur->nat_rule.ptr->src_nodes <= 0)
827 pf_rm_rule(NULL, cur->nat_rule.ptr);
828 if (cur->anchor.ptr != NULL)
829 if (--cur->anchor.ptr->states <= 0)
830 pf_rm_rule(NULL, cur->anchor.ptr);
831 pf_normalize_tcp_cleanup(cur);
832 pfi_detach_state(cur->u.s.kif);
833 TAILQ_REMOVE(&state_updates, cur, u.s.entry_updates);
834 pool_put(&pf_state_pl, cur);
835 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
836 pf_status.states--;
837 }
838 }
839}
840
841int
842pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
843{
844 if (aw->type != PF_ADDR_TABLE)
845 return (0);
846 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
847 return (1);
848 return (0);
849}
850
851void
852pf_tbladdr_remove(struct pf_addr_wrap *aw)
853{
854 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
855 return;
856 pfr_detach_table(aw->p.tbl);
857 aw->p.tbl = NULL;
858}
859
860void
861pf_tbladdr_copyout(struct pf_addr_wrap *aw)
862{
863 struct pfr_ktable *kt = aw->p.tbl;
864
865 if (aw->type != PF_ADDR_TABLE || kt == NULL)
866 return;
867 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
868 kt = kt->pfrkt_root;
869 aw->p.tbl = NULL;
870 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
871 kt->pfrkt_cnt : -1;
872}
873
874void
875pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
876{
877 switch (af) {
878#ifdef INET
879 case AF_INET: {
880 u_int32_t a = ntohl(addr->addr32[0]);
881 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
882 (a>>8)&255, a&255);
883 if (p) {
884 p = ntohs(p);
885 printf(":%u", p);
886 }
887 break;
888 }
889#endif /* INET */
890#ifdef INET6
891 case AF_INET6: {
892 u_int16_t b;
893 u_int8_t i, curstart = 255, curend = 0,
894 maxstart = 0, maxend = 0;
895 for (i = 0; i < 8; i++) {
896 if (!addr->addr16[i]) {
897 if (curstart == 255)
898 curstart = i;
899 else
900 curend = i;
901 } else {
902 if (curstart) {
903 if ((curend - curstart) >
904 (maxend - maxstart)) {
905 maxstart = curstart;
906 maxend = curend;
907 curstart = 255;
908 }
909 }
910 }
911 }
912 for (i = 0; i < 8; i++) {
913 if (i >= maxstart && i <= maxend) {
914 if (maxend != 7) {
915 if (i == maxstart)
916 printf(":");
917 } else {
918 if (i == maxend)
919 printf(":");
920 }
921 } else {
922 b = ntohs(addr->addr16[i]);
923 printf("%x", b);
924 if (i < 7)
925 printf(":");
926 }
927 }
928 if (p) {
929 p = ntohs(p);
930 printf("[%u]", p);
931 }
932 break;
933 }
934#endif /* INET6 */
935 }
936}
937
938void
939pf_print_state(struct pf_state *s)
940{
941 switch (s->proto) {
942 case IPPROTO_TCP:
943 printf("TCP ");
944 break;
945 case IPPROTO_UDP:
946 printf("UDP ");
947 break;
948 case IPPROTO_ICMP:
949 printf("ICMP ");
950 break;
951 case IPPROTO_ICMPV6:
952 printf("ICMPV6 ");
953 break;
954 default:
955 printf("%u ", s->proto);
956 break;
957 }
958 pf_print_host(&s->lan.addr, s->lan.port, s->af);
959 printf(" ");
960 pf_print_host(&s->gwy.addr, s->gwy.port, s->af);
961 printf(" ");
962 pf_print_host(&s->ext.addr, s->ext.port, s->af);
963 printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
964 s->src.seqhi, s->src.max_win, s->src.seqdiff);
965 if (s->src.wscale && s->dst.wscale)
966 printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
967 printf("]");
968 printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
969 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
970 if (s->src.wscale && s->dst.wscale)
971 printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
972 printf("]");
973 printf(" %u:%u", s->src.state, s->dst.state);
974}
975
976void
977pf_print_flags(u_int8_t f)
978{
979 if (f)
980 printf(" ");
981 if (f & TH_FIN)
982 printf("F");
983 if (f & TH_SYN)
984 printf("S");
985 if (f & TH_RST)
986 printf("R");
987 if (f & TH_PUSH)
988 printf("P");
989 if (f & TH_ACK)
990 printf("A");
991 if (f & TH_URG)
992 printf("U");
993 if (f & TH_ECE)
994 printf("E");
995 if (f & TH_CWR)
996 printf("W");
997}
998
999#define PF_SET_SKIP_STEPS(i) \
1000 do { \
1001 while (head[i] != cur) { \
1002 head[i]->skip[i].ptr = cur; \
1003 head[i] = TAILQ_NEXT(head[i], entries); \
1004 } \
1005 } while (0)
1006
1007void
1008pf_calc_skip_steps(struct pf_rulequeue *rules)
1009{
1010 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1011 int i;
1012
1013 cur = TAILQ_FIRST(rules);
1014 prev = cur;
1015 for (i = 0; i < PF_SKIP_COUNT; ++i)
1016 head[i] = cur;
1017 while (cur != NULL) {
1018
1019 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1020 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1021 if (cur->direction != prev->direction)
1022 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1023 if (cur->af != prev->af)
1024 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1025 if (cur->proto != prev->proto)
1026 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1027 if (cur->src.not != prev->src.not ||
1028 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1029 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1030 if (cur->src.port[0] != prev->src.port[0] ||
1031 cur->src.port[1] != prev->src.port[1] ||
1032 cur->src.port_op != prev->src.port_op)
1033 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1034 if (cur->dst.not != prev->dst.not ||
1035 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1036 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1037 if (cur->dst.port[0] != prev->dst.port[0] ||
1038 cur->dst.port[1] != prev->dst.port[1] ||
1039 cur->dst.port_op != prev->dst.port_op)
1040 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1041
1042 prev = cur;
1043 cur = TAILQ_NEXT(cur, entries);
1044 }
1045 for (i = 0; i < PF_SKIP_COUNT; ++i)
1046 PF_SET_SKIP_STEPS(i);
1047}
1048
1049int
1050pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1051{
1052 if (aw1->type != aw2->type)
1053 return (1);
1054 switch (aw1->type) {
1055 case PF_ADDR_ADDRMASK:
1056 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1057 return (1);
1058 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1059 return (1);
1060 return (0);
1061 case PF_ADDR_DYNIFTL:
1062 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1063 case PF_ADDR_NOROUTE:
1064 return (0);
1065 case PF_ADDR_TABLE:
1066 return (aw1->p.tbl != aw2->p.tbl);
1067 default:
1068 printf("invalid address type: %d\n", aw1->type);
1069 return (1);
1070 }
1071}
1072
1073void
1074pf_update_anchor_rules()
1075{
1076 struct pf_rule *rule;
1077 int i;
1078
1079 for (i = 0; i < PF_RULESET_MAX; ++i)
1080 TAILQ_FOREACH(rule, pf_main_ruleset.rules[i].active.ptr,
1081 entries)
1082 if (rule->anchorname[0])
1083 rule->anchor = pf_find_anchor(rule->anchorname);
1084 else
1085 rule->anchor = NULL;
1086}
1087
1088u_int16_t
1089pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1090{
1091 u_int32_t l;
1092
1093 if (udp && !cksum)
1094 return (0x0000);
1095 l = cksum + old - new;
1096 l = (l >> 16) + (l & 65535);
1097 l = l & 65535;
1098 if (udp && !l)
1099 return (0xFFFF);
1100 return (l);
1101}
1102
1103void
1104pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1105 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1106{
1107 struct pf_addr ao;
1108 u_int16_t po = *p;
1109
1110 PF_ACPY(&ao, a, af);
1111 PF_ACPY(a, an, af);
1112
1113 *p = pn;
1114
1115 switch (af) {
1116#ifdef INET
1117 case AF_INET:
1118 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1119 ao.addr16[0], an->addr16[0], 0),
1120 ao.addr16[1], an->addr16[1], 0);
1121 *p = pn;
1122 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1123 ao.addr16[0], an->addr16[0], u),
1124 ao.addr16[1], an->addr16[1], u),
1125 po, pn, u);
1126 break;
1127#endif /* INET */
1128#ifdef INET6
1129 case AF_INET6:
1130 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1131 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1132 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1133 ao.addr16[0], an->addr16[0], u),
1134 ao.addr16[1], an->addr16[1], u),
1135 ao.addr16[2], an->addr16[2], u),
1136 ao.addr16[3], an->addr16[3], u),
1137 ao.addr16[4], an->addr16[4], u),
1138 ao.addr16[5], an->addr16[5], u),
1139 ao.addr16[6], an->addr16[6], u),
1140 ao.addr16[7], an->addr16[7], u),
1141 po, pn, u);
1142 break;
1143#endif /* INET6 */
1144 }
1145}
1146
1147
1148/* Changes a u_int32_t. Uses a void * so there are no align restrictions */
1149void
1150pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
1151{
1152 u_int32_t ao;
1153
1154 memcpy(&ao, a, sizeof(ao));
1155 memcpy(a, &an, sizeof(u_int32_t));
1156 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
1157 ao % 65536, an % 65536, u);
1158}
1159
1160#ifdef INET6
1161void
1162pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
1163{
1164 struct pf_addr ao;
1165
1166 PF_ACPY(&ao, a, AF_INET6);
1167 PF_ACPY(a, an, AF_INET6);
1168
1169 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1170 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1171 pf_cksum_fixup(pf_cksum_fixup(*c,
1172 ao.addr16[0], an->addr16[0], u),
1173 ao.addr16[1], an->addr16[1], u),
1174 ao.addr16[2], an->addr16[2], u),
1175 ao.addr16[3], an->addr16[3], u),
1176 ao.addr16[4], an->addr16[4], u),
1177 ao.addr16[5], an->addr16[5], u),
1178 ao.addr16[6], an->addr16[6], u),
1179 ao.addr16[7], an->addr16[7], u);
1180}
1181#endif /* INET6 */
1182
1183void
1184pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
1185 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
1186 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
1187{
1188 struct pf_addr oia, ooa;
1189
1190 PF_ACPY(&oia, ia, af);
1191 PF_ACPY(&ooa, oa, af);
1192
1193 /* Change inner protocol port, fix inner protocol checksum. */
1194 if (ip != NULL) {
1195 u_int16_t oip = *ip;
1196 u_int32_t opc = 0;
1197
1198 if (pc != NULL)
1199 opc = *pc;
1200 *ip = np;
1201 if (pc != NULL)
1202 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
1203 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
1204 if (pc != NULL)
1205 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
1206 }
1207 /* Change inner ip address, fix inner ip and icmp checksums. */
1208 PF_ACPY(ia, na, af);
1209 switch (af) {
1210#ifdef INET
1211 case AF_INET: {
1212 u_int32_t oh2c = *h2c;
1213
1214 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
1215 oia.addr16[0], ia->addr16[0], 0),
1216 oia.addr16[1], ia->addr16[1], 0);
1217 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1218 oia.addr16[0], ia->addr16[0], 0),
1219 oia.addr16[1], ia->addr16[1], 0);
1220 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
1221 break;
1222 }
1223#endif /* INET */
1224#ifdef INET6
1225 case AF_INET6:
1226 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1227 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1228 pf_cksum_fixup(pf_cksum_fixup(*ic,
1229 oia.addr16[0], ia->addr16[0], u),
1230 oia.addr16[1], ia->addr16[1], u),
1231 oia.addr16[2], ia->addr16[2], u),
1232 oia.addr16[3], ia->addr16[3], u),
1233 oia.addr16[4], ia->addr16[4], u),
1234 oia.addr16[5], ia->addr16[5], u),
1235 oia.addr16[6], ia->addr16[6], u),
1236 oia.addr16[7], ia->addr16[7], u);
1237 break;
1238#endif /* INET6 */
1239 }
1240 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
1241 PF_ACPY(oa, na, af);
1242 switch (af) {
1243#ifdef INET
1244 case AF_INET:
1245 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
1246 ooa.addr16[0], oa->addr16[0], 0),
1247 ooa.addr16[1], oa->addr16[1], 0);
1248 break;
1249#endif /* INET */
1250#ifdef INET6
1251 case AF_INET6:
1252 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1253 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1254 pf_cksum_fixup(pf_cksum_fixup(*ic,
1255 ooa.addr16[0], oa->addr16[0], u),
1256 ooa.addr16[1], oa->addr16[1], u),
1257 ooa.addr16[2], oa->addr16[2], u),
1258 ooa.addr16[3], oa->addr16[3], u),
1259 ooa.addr16[4], oa->addr16[4], u),
1260 ooa.addr16[5], oa->addr16[5], u),
1261 ooa.addr16[6], oa->addr16[6], u),
1262 ooa.addr16[7], oa->addr16[7], u);
1263 break;
1264#endif /* INET6 */
1265 }
1266}
1267
1268void
1269pf_send_tcp(const struct pf_rule *r, sa_family_t af,
1270 const struct pf_addr *saddr, const struct pf_addr *daddr,
1271 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
1272 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl)
1273{
1274 struct mbuf *m;
1275 int len = 0, tlen;
1276#ifdef INET
1277 struct ip *h = NULL;
1278#endif /* INET */
1279#ifdef INET6
1280 struct ip6_hdr *h6 = NULL;
1281#endif /* INET6 */
1282 struct tcphdr *th = NULL;
1283 char *opt;
1284
1285 /* maximum segment size tcp option */
1286 tlen = sizeof(struct tcphdr);
1287 if (mss)
1288 tlen += 4;
1289
1290 switch (af) {
1291#ifdef INET
1292 case AF_INET:
1293 len = sizeof(struct ip) + tlen;
1294 break;
1295#endif /* INET */
1296#ifdef INET6
1297 case AF_INET6:
1298 len = sizeof(struct ip6_hdr) + tlen;
1299 break;
1300#endif /* INET6 */
1301 }
1302
1303 /* create outgoing mbuf */
1304 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
1305 if (m == NULL)
1306 return;
1307 m->m_pkthdr.fw_flags |= PF_MBUF_GENERATED;
1308#ifdef ALTQ
1309 if (r != NULL && r->qid) {
1310 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
1311 m->m_pkthdr.altq_qid = r->qid;
1312 m->m_pkthdr.ecn_af = af;
1313 m->m_pkthdr.header = mtod(m, struct ip *);
1314 }
1315#endif
1316 m->m_data += max_linkhdr;
1317 m->m_pkthdr.len = m->m_len = len;
1318 m->m_pkthdr.rcvif = NULL;
1319 bzero(m->m_data, len);
1320 switch (af) {
1321#ifdef INET
1322 case AF_INET:
1323 h = mtod(m, struct ip *);
1324
1325 /* IP header fields included in the TCP checksum */
1326 h->ip_p = IPPROTO_TCP;
1327 h->ip_len = tlen;
1328 h->ip_src.s_addr = saddr->v4.s_addr;
1329 h->ip_dst.s_addr = daddr->v4.s_addr;
1330
1331 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
1332 break;
1333#endif /* INET */
1334#ifdef INET6
1335 case AF_INET6:
1336 h6 = mtod(m, struct ip6_hdr *);
1337
1338 /* IP header fields included in the TCP checksum */
1339 h6->ip6_nxt = IPPROTO_TCP;
1340 h6->ip6_plen = htons(tlen);
1341 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
1342 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
1343
1344 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
1345 break;
1346#endif /* INET6 */
1347 }
1348
1349 /* TCP header */
1350 th->th_sport = sport;
1351 th->th_dport = dport;
1352 th->th_seq = htonl(seq);
1353 th->th_ack = htonl(ack);
1354 th->th_off = tlen >> 2;
1355 th->th_flags = flags;
1356 th->th_win = htons(win);
1357
1358 if (mss) {
1359 opt = (char *)(th + 1);
1360 opt[0] = TCPOPT_MAXSEG;
1361 opt[1] = 4;
1362 mss = htons(mss);
1363 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
1364 }
1365
1366 switch (af) {
1367#ifdef INET
1368 case AF_INET:
1369 /* TCP checksum */
1370 th->th_sum = in_cksum(m, len);
1371
1372 /* Finish the IP header */
1373 h->ip_v = 4;
1374 h->ip_hl = sizeof(*h) >> 2;
1375 h->ip_tos = IPTOS_LOWDELAY;
1376 h->ip_len = len;
1377 h->ip_off = path_mtu_discovery ? IP_DF : 0;
1378 h->ip_ttl = ttl ? ttl : ip_defttl;
1379 h->ip_sum = 0;
1380 ip_output(m, (void *)NULL, (void *)NULL, 0, (void *)NULL,
1381 (void *)NULL);
1382 break;
1383#endif /* INET */
1384#ifdef INET6
1385 case AF_INET6:
1386 /* TCP checksum */
1387 th->th_sum = in6_cksum(m, IPPROTO_TCP,
1388 sizeof(struct ip6_hdr), tlen);
1389
1390 h6->ip6_vfc |= IPV6_VERSION;
1391 h6->ip6_hlim = IPV6_DEFHLIM;
1392
1393 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1394 break;
1395#endif /* INET6 */
1396 }
1397}
1398
1399void
1400pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
1401 struct pf_rule *r)
1402{
1403 struct mbuf *m0;
1404
1405 m0 = m_copypacket(m, MB_DONTWAIT);
1406 if (m0 == NULL)
1407 return;
1408 m0->m_pkthdr.fw_flags |= PF_MBUF_GENERATED;
1409
1410#ifdef ALTQ
1411 if (r->qid) {
1412 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
1413 m->m_pkthdr.altq_qid = r->qid;
1414 m->m_pkthdr.ecn_af = af;
1415 m->m_pkthdr.header = mtod(m0, struct ip *);
1416 }
1417#endif
1418
1419 switch (af) {
1420#ifdef INET
1421 case AF_INET:
1422 icmp_error(m0, type, code, 0, (void *)NULL);
1423 break;
1424#endif /* INET */
1425#ifdef INET6
1426 case AF_INET6:
1427 icmp6_error(m0, type, code, 0);
1428 break;
1429#endif /* INET6 */
1430 }
1431}
1432
1433/*
1434 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
1435 * If n is 0, they match if they are equal. If n is != 0, they match if they
1436 * are different.
1437 */
1438int
1439pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
1440 struct pf_addr *b, sa_family_t af)
1441{
1442 int match = 0;
1443
1444 switch (af) {
1445#ifdef INET
1446 case AF_INET:
1447 if ((a->addr32[0] & m->addr32[0]) ==
1448 (b->addr32[0] & m->addr32[0]))
1449 match++;
1450 break;
1451#endif /* INET */
1452#ifdef INET6
1453 case AF_INET6:
1454 if (((a->addr32[0] & m->addr32[0]) ==
1455 (b->addr32[0] & m->addr32[0])) &&
1456 ((a->addr32[1] & m->addr32[1]) ==
1457 (b->addr32[1] & m->addr32[1])) &&
1458 ((a->addr32[2] & m->addr32[2]) ==
1459 (b->addr32[2] & m->addr32[2])) &&
1460 ((a->addr32[3] & m->addr32[3]) ==
1461 (b->addr32[3] & m->addr32[3])))
1462 match++;
1463 break;
1464#endif /* INET6 */
1465 }
1466 if (match) {
1467 if (n)
1468 return (0);
1469 else
1470 return (1);
1471 } else {
1472 if (n)
1473 return (1);
1474 else
1475 return (0);
1476 }
1477}
1478
1479int
1480pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
1481{
1482 switch (op) {
1483 case PF_OP_IRG:
1484 return ((p > a1) && (p < a2));
1485 case PF_OP_XRG:
1486 return ((p < a1) || (p > a2));
1487 case PF_OP_RRG:
1488 return ((p >= a1) && (p <= a2));
1489 case PF_OP_EQ:
1490 return (p == a1);
1491 case PF_OP_NE:
1492 return (p != a1);
1493 case PF_OP_LT:
1494 return (p < a1);
1495 case PF_OP_LE:
1496 return (p <= a1);
1497 case PF_OP_GT:
1498 return (p > a1);
1499 case PF_OP_GE:
1500 return (p >= a1);
1501 }
1502 return (0); /* never reached */
1503}
1504
1505int
1506pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
1507{
1508 a1 = ntohs(a1);
1509 a2 = ntohs(a2);
1510 p = ntohs(p);
1511 return (pf_match(op, a1, a2, p));
1512}
1513
1514int
1515pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
1516{
1517 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
1518 return (0);
1519 return (pf_match(op, a1, a2, u));
1520}
1521
1522int
1523pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
1524{
1525 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
1526 return (0);
1527 return (pf_match(op, a1, a2, g));
1528}
1529
1530static int
1531pf_match_tag(struct mbuf *m, struct pf_rule *r, struct pf_rule *nat_rule,
1532 int *tag)
1533{
1534 if (*tag == -1) { /* find mbuf tag */
1535 if (nat_rule != NULL && nat_rule->tag)
1536 *tag = nat_rule->tag;
1537 else if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED)
1538 *tag = m->m_pkthdr.pf_tag;
1539 else
1540 *tag = 0;
1541 }
1542
1543 return ((!r->match_tag_not && r->match_tag == *tag) ||
1544 (r->match_tag_not && r->match_tag != *tag));
1545}
1546
1547void
1548pf_tag_packet(struct mbuf *m, int tag)
1549{
1550 if (tag <= 0)
1551 return;
1552
1553 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
1554 m->m_pkthdr.pf_tag = tag;
1555}
1556
1557#define PF_STEP_INTO_ANCHOR(r, a, s, n) \
1558 do { \
1559 if ((r) == NULL || (r)->anchor == NULL || \
1560 (s) != NULL || (a) != NULL) \
1561 panic("PF_STEP_INTO_ANCHOR"); \
1562 (a) = (r); \
1563 (s) = TAILQ_FIRST(&(r)->anchor->rulesets); \
1564 (r) = NULL; \
1565 while ((s) != NULL && ((r) = \
1566 TAILQ_FIRST((s)->rules[n].active.ptr)) == NULL) \
1567 (s) = TAILQ_NEXT((s), entries); \
1568 if ((r) == NULL) { \
1569 (r) = TAILQ_NEXT((a), entries); \
1570 (a) = NULL; \
1571 } \
1572 } while (0)
1573
1574#define PF_STEP_OUT_OF_ANCHOR(r, a, s, n) \
1575 do { \
1576 if ((r) != NULL || (a) == NULL || (s) == NULL) \
1577 panic("PF_STEP_OUT_OF_ANCHOR"); \
1578 (s) = TAILQ_NEXT((s), entries); \
1579 while ((s) != NULL && ((r) = \
1580 TAILQ_FIRST((s)->rules[n].active.ptr)) == NULL) \
1581 (s) = TAILQ_NEXT((s), entries); \
1582 if ((r) == NULL) { \
1583 (r) = TAILQ_NEXT((a), entries); \
1584 (a) = NULL; \
1585 } \
1586 } while (0)
1587
1588#ifdef INET6
1589void
1590pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
1591 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
1592{
1593 switch (af) {
1594#ifdef INET
1595 case AF_INET:
1596 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
1597 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
1598 break;
1599#endif /* INET */
1600 case AF_INET6:
1601 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
1602 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
1603 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
1604 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
1605 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
1606 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
1607 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
1608 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
1609 break;
1610 }
1611}
1612
1613void
1614pf_addr_inc(struct pf_addr *addr, sa_family_t af)
1615{
1616 switch (af) {
1617#ifdef INET
1618 case AF_INET:
1619 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
1620 break;
1621#endif /* INET */
1622 case AF_INET6:
1623 if (addr->addr32[3] == 0xffffffff) {
1624 addr->addr32[3] = 0;
1625 if (addr->addr32[2] == 0xffffffff) {
1626 addr->addr32[2] = 0;
1627 if (addr->addr32[1] == 0xffffffff) {
1628 addr->addr32[1] = 0;
1629 addr->addr32[0] =
1630 htonl(ntohl(addr->addr32[0]) + 1);
1631 } else
1632 addr->addr32[1] =
1633 htonl(ntohl(addr->addr32[1]) + 1);
1634 } else
1635 addr->addr32[2] =
1636 htonl(ntohl(addr->addr32[2]) + 1);
1637 } else
1638 addr->addr32[3] =
1639 htonl(ntohl(addr->addr32[3]) + 1);
1640 break;
1641 }
1642}
1643#endif /* INET6 */
1644
1645#define mix(a,b,c) \
1646 do { \
1647 a -= b; a -= c; a ^= (c >> 13); \
1648 b -= c; b -= a; b ^= (a << 8); \
1649 c -= a; c -= b; c ^= (b >> 13); \
1650 a -= b; a -= c; a ^= (c >> 12); \
1651 b -= c; b -= a; b ^= (a << 16); \
1652 c -= a; c -= b; c ^= (b >> 5); \
1653 a -= b; a -= c; a ^= (c >> 3); \
1654 b -= c; b -= a; b ^= (a << 10); \
1655 c -= a; c -= b; c ^= (b >> 15); \
1656 } while (0)
1657
1658/*
1659 * hash function based on bridge_hash in if_bridge.c
1660 */
1661void
1662pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
1663 struct pf_poolhashkey *key, sa_family_t af)
1664{
1665 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
1666
1667 switch (af) {
1668#ifdef INET
1669 case AF_INET:
1670 a += inaddr->addr32[0];
1671 b += key->key32[1];
1672 mix(a, b, c);
1673 hash->addr32[0] = c + key->key32[2];
1674 break;
1675#endif /* INET */
1676#ifdef INET6
1677 case AF_INET6:
1678 a += inaddr->addr32[0];
1679 b += inaddr->addr32[2];
1680 mix(a, b, c);
1681 hash->addr32[0] = c;
1682 a += inaddr->addr32[1];
1683 b += inaddr->addr32[3];
1684 c += key->key32[1];
1685 mix(a, b, c);
1686 hash->addr32[1] = c;
1687 a += inaddr->addr32[2];
1688 b += inaddr->addr32[1];
1689 c += key->key32[2];
1690 mix(a, b, c);
1691 hash->addr32[2] = c;
1692 a += inaddr->addr32[3];
1693 b += inaddr->addr32[0];
1694 c += key->key32[3];
1695 mix(a, b, c);
1696 hash->addr32[3] = c;
1697 break;
1698#endif /* INET6 */
1699 }
1700}
1701
1702int
1703pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
1704 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
1705{
1706 unsigned char hash[16];
1707 struct pf_pool *rpool = &r->rpool;
1708 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
1709 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
1710 struct pf_pooladdr *acur = rpool->cur;
1711 struct pf_src_node k;
1712
1713 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
1714 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
1715 k.af = af;
1716 PF_ACPY(&k.addr, saddr, af);
1717 if (r->rule_flag & PFRULE_RULESRCTRACK ||
1718 r->rpool.opts & PF_POOL_STICKYADDR)
1719 k.rule.ptr = r;
1720 else
1721 k.rule.ptr = NULL;
1722 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
1723 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
1724 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
1725 PF_ACPY(naddr, &(*sn)->raddr, af);
1726 if (pf_status.debug >= PF_DEBUG_MISC) {
1727 printf("pf_map_addr: src tracking maps ");
1728 pf_print_host(&k.addr, 0, af);
1729 printf(" to ");
1730 pf_print_host(naddr, 0, af);
1731 printf("\n");
1732 }
1733 return (0);
1734 }
1735 }
1736
1737 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
1738 return (1);
1739 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
1740 if (af == AF_INET) {
1741 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
1742 (rpool->opts & PF_POOL_TYPEMASK) !=
1743 PF_POOL_ROUNDROBIN)
1744 return (1);
1745 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
1746 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
1747 } else {
1748 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
1749 (rpool->opts & PF_POOL_TYPEMASK) !=
1750 PF_POOL_ROUNDROBIN)
1751 return (1);
1752 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
1753 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
1754 }
1755 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
1756 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
1757 return (1); /* unsupported */
1758 } else {
1759 raddr = &rpool->cur->addr.v.a.addr;
1760 rmask = &rpool->cur->addr.v.a.mask;
1761 }
1762
1763 switch (rpool->opts & PF_POOL_TYPEMASK) {
1764 case PF_POOL_NONE:
1765 PF_ACPY(naddr, raddr, af);
1766 break;
1767 case PF_POOL_BITMASK:
1768 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
1769 break;
1770 case PF_POOL_RANDOM:
1771 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
1772 switch (af) {
1773#ifdef INET
1774 case AF_INET:
1775 rpool->counter.addr32[0] = arc4random();
1776 break;
1777#endif /* INET */
1778#ifdef INET6
1779 case AF_INET6:
1780 if (rmask->addr32[3] != 0xffffffff)
1781 rpool->counter.addr32[3] = arc4random();
1782 else
1783 break;
1784 if (rmask->addr32[2] != 0xffffffff)
1785 rpool->counter.addr32[2] = arc4random();
1786 else
1787 break;
1788 if (rmask->addr32[1] != 0xffffffff)
1789 rpool->counter.addr32[1] = arc4random();
1790 else
1791 break;
1792 if (rmask->addr32[0] != 0xffffffff)
1793 rpool->counter.addr32[0] = arc4random();
1794 break;
1795#endif /* INET6 */
1796 }
1797 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
1798 PF_ACPY(init_addr, naddr, af);
1799
1800 } else {
1801 PF_AINC(&rpool->counter, af);
1802 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
1803 }
1804 break;
1805 case PF_POOL_SRCHASH:
1806 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
1807 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
1808 break;
1809 case PF_POOL_ROUNDROBIN:
1810 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
1811 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
1812 &rpool->tblidx, &rpool->counter,
1813 &raddr, &rmask, af))
1814 goto get_addr;
1815 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
1816 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
1817 &rpool->tblidx, &rpool->counter,
1818 &raddr, &rmask, af))
1819 goto get_addr;
1820 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
1821 goto get_addr;
1822
1823 try_next:
1824 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
1825 rpool->cur = TAILQ_FIRST(&rpool->list);
1826 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
1827 rpool->tblidx = -1;
1828 if (pfr_pool_get(rpool->cur->addr.p.tbl,
1829 &rpool->tblidx, &rpool->counter,
1830 &raddr, &rmask, af)) {
1831 /* table contains no address of type 'af' */
1832 if (rpool->cur != acur)
1833 goto try_next;
1834 return (1);
1835 }
1836 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
1837 rpool->tblidx = -1;
1838 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
1839 &rpool->tblidx, &rpool->counter,
1840 &raddr, &rmask, af)) {
1841 /* table contains no address of type 'af' */
1842 if (rpool->cur != acur)
1843 goto try_next;
1844 return (1);
1845 }
1846 } else {
1847 raddr = &rpool->cur->addr.v.a.addr;
1848 rmask = &rpool->cur->addr.v.a.mask;
1849 PF_ACPY(&rpool->counter, raddr, af);
1850 }
1851
1852 get_addr:
1853 PF_ACPY(naddr, &rpool->counter, af);
1854 PF_AINC(&rpool->counter, af);
1855 break;
1856 }
1857 if (*sn != NULL)
1858 PF_ACPY(&(*sn)->raddr, naddr, af);
1859
1860 if (pf_status.debug >= PF_DEBUG_MISC &&
1861 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
1862 printf("pf_map_addr: selected address ");
1863 pf_print_host(naddr, 0, af);
1864 printf("\n");
1865 }
1866
1867 return (0);
1868}
1869
1870int
1871pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r,
1872 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport,
1873 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high,
1874 struct pf_src_node **sn)
1875{
1876 struct pf_state key;
1877 struct pf_addr init_addr;
1878 u_int16_t cut;
1879
1880 bzero(&init_addr, sizeof(init_addr));
1881 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
1882 return (1);
1883
1884 do {
1885 key.af = af;
1886 key.proto = proto;
1887 PF_ACPY(&key.ext.addr, daddr, key.af);
1888 PF_ACPY(&key.gwy.addr, naddr, key.af);
1889 key.ext.port = dport;
1890
1891 /*
1892 * port search; start random, step;
1893 * similar 2 portloop in in_pcbbind
1894 */
1895 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
1896 key.gwy.port = 0;
1897 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL)
1898 return (0);
1899 } else if (low == 0 && high == 0) {
1900 key.gwy.port = *nport;
1901 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL)
1902 return (0);
1903 } else if (low == high) {
1904 key.gwy.port = htons(low);
1905 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL) {
1906 *nport = htons(low);
1907 return (0);
1908 }
1909 } else {
1910 u_int16_t tmp;
1911
1912 if (low > high) {
1913 tmp = low;
1914 low = high;
1915 high = tmp;
1916 }
1917 /* low < high */
1918 cut = arc4random() % (1 + high - low) + low;
1919 /* low <= cut <= high */
1920 for (tmp = cut; tmp <= high; ++(tmp)) {
1921 key.gwy.port = htons(tmp);
1922 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) ==
1923 NULL) {
1924 *nport = htons(tmp);
1925 return (0);
1926 }
1927 }
1928 for (tmp = cut - 1; tmp >= low; --(tmp)) {
1929 key.gwy.port = htons(tmp);
1930 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) ==
1931 NULL) {
1932 *nport = htons(tmp);
1933 return (0);
1934 }
1935 }
1936 }
1937
1938 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
1939 case PF_POOL_RANDOM:
1940 case PF_POOL_ROUNDROBIN:
1941 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
1942 return (1);
1943 break;
1944 case PF_POOL_NONE:
1945 case PF_POOL_SRCHASH:
1946 case PF_POOL_BITMASK:
1947 default:
1948 return (1);
1949 }
1950 } while (! PF_AEQ(&init_addr, naddr, af) );
1951
1952 return (1); /* none available */
1953}
1954
1955struct pf_rule *
1956pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
1957 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
1958 struct pf_addr *daddr, u_int16_t dport, int rs_num)
1959{
1960 struct pf_rule *r, *rm = NULL, *anchorrule = NULL;
1961 struct pf_ruleset *ruleset = NULL;
1962
1963 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
1964 while (r && rm == NULL) {
1965 struct pf_rule_addr *src = NULL, *dst = NULL;
1966 struct pf_addr_wrap *xdst = NULL;
1967
1968 if (r->action == PF_BINAT && direction == PF_IN) {
1969 src = &r->dst;
1970 if (r->rpool.cur != NULL)
1971 xdst = &r->rpool.cur->addr;
1972 } else {
1973 src = &r->src;
1974 dst = &r->dst;
1975 }
1976
1977 r->evaluations++;
1978 if (r->kif != NULL &&
1979 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1980 r = r->skip[PF_SKIP_IFP].ptr;
1981 else if (r->direction && r->direction != direction)
1982 r = r->skip[PF_SKIP_DIR].ptr;
1983 else if (r->af && r->af != pd->af)
1984 r = r->skip[PF_SKIP_AF].ptr;
1985 else if (r->proto && r->proto != pd->proto)
1986 r = r->skip[PF_SKIP_PROTO].ptr;
1987 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, src->not))
1988 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
1989 PF_SKIP_DST_ADDR].ptr;
1990 else if (src->port_op && !pf_match_port(src->port_op,
1991 src->port[0], src->port[1], sport))
1992 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
1993 PF_SKIP_DST_PORT].ptr;
1994 else if (dst != NULL &&
1995 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->not))
1996 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1997 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 0))
1998 r = TAILQ_NEXT(r, entries);
1999 else if (dst != NULL && dst->port_op &&
2000 !pf_match_port(dst->port_op, dst->port[0],
2001 dst->port[1], dport))
2002 r = r->skip[PF_SKIP_DST_PORT].ptr;
2003 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
2004 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
2005 off, pd->hdr.tcp), r->os_fingerprint)))
2006 r = TAILQ_NEXT(r, entries);
2007 else if (r->anchorname[0] && r->anchor == NULL)
2008 r = TAILQ_NEXT(r, entries);
2009 else if (r->anchor == NULL)
2010 rm = r;
2011 else
2012 PF_STEP_INTO_ANCHOR(r, anchorrule, ruleset, rs_num);
2013 if (r == NULL && anchorrule != NULL)
2014 PF_STEP_OUT_OF_ANCHOR(r, anchorrule, ruleset,
2015 rs_num);
2016 }
2017 if (rm != NULL && (rm->action == PF_NONAT ||
2018 rm->action == PF_NORDR || rm->action == PF_NOBINAT))
2019 return (NULL);
2020 return (rm);
2021}
2022
2023struct pf_rule *
2024pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
2025 struct pfi_kif *kif, struct pf_src_node **sn,
2026 struct pf_addr *saddr, u_int16_t sport,
2027 struct pf_addr *daddr, u_int16_t dport,
2028 struct pf_addr *naddr, u_int16_t *nport)
2029{
2030 struct pf_rule *r = NULL;
2031
2032 if (direction == PF_OUT) {
2033 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2034 sport, daddr, dport, PF_RULESET_BINAT);
2035 if (r == NULL)
2036 r = pf_match_translation(pd, m, off, direction, kif,
2037 saddr, sport, daddr, dport, PF_RULESET_NAT);
2038 } else {
2039 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2040 sport, daddr, dport, PF_RULESET_RDR);
2041 if (r == NULL)
2042 r = pf_match_translation(pd, m, off, direction, kif,
2043 saddr, sport, daddr, dport, PF_RULESET_BINAT);
2044 }
2045
2046 if (r != NULL) {
2047 switch (r->action) {
2048 case PF_NONAT:
2049 case PF_NOBINAT:
2050 case PF_NORDR:
2051 return (NULL);
2052 case PF_NAT:
2053 if (pf_get_sport(pd->af, pd->proto, r, saddr,
2054 daddr, dport, naddr, nport, r->rpool.proxy_port[0],
2055 r->rpool.proxy_port[1], sn)) {
2056 DPFPRINTF(PF_DEBUG_MISC,
2057 ("pf: NAT proxy port allocation "
2058 "(%u-%u) failed\n",
2059 r->rpool.proxy_port[0],
2060 r->rpool.proxy_port[1]));
2061 return (NULL);
2062 }
2063 break;
2064 case PF_BINAT:
2065 switch (direction) {
2066 case PF_OUT:
2067 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){
2068 if (pd->af == AF_INET) {
2069 if (r->rpool.cur->addr.p.dyn->
2070 pfid_acnt4 < 1)
2071 return (NULL);
2072 PF_POOLMASK(naddr,
2073 &r->rpool.cur->addr.p.dyn->
2074 pfid_addr4,
2075 &r->rpool.cur->addr.p.dyn->
2076 pfid_mask4,
2077 saddr, AF_INET);
2078 } else {
2079 if (r->rpool.cur->addr.p.dyn->
2080 pfid_acnt6 < 1)
2081 return (NULL);
2082 PF_POOLMASK(naddr,
2083 &r->rpool.cur->addr.p.dyn->
2084 pfid_addr6,
2085 &r->rpool.cur->addr.p.dyn->
2086 pfid_mask6,
2087 saddr, AF_INET6);
2088 }
2089 } else
2090 PF_POOLMASK(naddr,
2091 &r->rpool.cur->addr.v.a.addr,
2092 &r->rpool.cur->addr.v.a.mask,
2093 saddr, pd->af);
2094 break;
2095 case PF_IN:
2096 if (r->src.addr.type == PF_ADDR_DYNIFTL){
2097 if (pd->af == AF_INET) {
2098 if (r->src.addr.p.dyn->
2099 pfid_acnt4 < 1)
2100 return (NULL);
2101 PF_POOLMASK(naddr,
2102 &r->src.addr.p.dyn->
2103 pfid_addr4,
2104 &r->src.addr.p.dyn->
2105 pfid_mask4,
2106 daddr, AF_INET);
2107 } else {
2108 if (r->src.addr.p.dyn->
2109 pfid_acnt6 < 1)
2110 return (NULL);
2111 PF_POOLMASK(naddr,
2112 &r->src.addr.p.dyn->
2113 pfid_addr6,
2114 &r->src.addr.p.dyn->
2115 pfid_mask6,
2116 daddr, AF_INET6);
2117 }
2118 } else
2119 PF_POOLMASK(naddr,
2120 &r->src.addr.v.a.addr,
2121 &r->src.addr.v.a.mask, daddr,
2122 pd->af);
2123 break;
2124 }
2125 break;
2126 case PF_RDR: {
2127 if (pf_map_addr(r->af, r, saddr, naddr, NULL, sn))
2128 return (NULL);
2129
2130 if (r->rpool.proxy_port[1]) {
2131 u_int32_t tmp_nport;
2132
2133 tmp_nport = ((ntohs(dport) -
2134 ntohs(r->dst.port[0])) %
2135 (r->rpool.proxy_port[1] -
2136 r->rpool.proxy_port[0] + 1)) +
2137 r->rpool.proxy_port[0];
2138
2139 /* wrap around if necessary */
2140 if (tmp_nport > 65535)
2141 tmp_nport -= 65535;
2142 *nport = htons((u_int16_t)tmp_nport);
2143 } else if (r->rpool.proxy_port[0])
2144 *nport = htons(r->rpool.proxy_port[0]);
2145 break;
2146 }
2147 default:
2148 return (NULL);
2149 }
2150 }
2151
2152 return (r);
2153}
2154
2155#ifdef SMP
2156struct netmsg_hashlookup {
2157 struct lwkt_msg nm_lmsg;
2158 struct inpcb **nm_pinp;
2159 struct inpcbinfo *nm_pcbinfo;
2160 struct pf_addr *nm_saddr;
2161 struct pf_addr *nm_daddr;
2162 uint16_t nm_sport;
2163 uint16_t nm_dport;
2164 sa_family_t nm_af;
2165};
2166
2167static int
2168in_pcblookup_hash_handler(struct lwkt_msg *msg0)
2169{
2170 struct netmsg_hashlookup *msg = (struct netmsg_hashlookup *)msg0;
2171
2172 if (msg->nm_af == AF_INET)
2173 *msg->nm_pinp = in_pcblookup_hash(msg->nm_pcbinfo,
2174 msg->nm_saddr->v4, msg->nm_sport, msg->nm_daddr->v4,
2175 msg->nm_dport, INPLOOKUP_WILDCARD, NULL);
2176#ifdef INET6
2177 else
2178 *msg->nm_pinp = in6_pcblookup_hash(msg->nm_pcbinfo,
2179 &msg->nm_saddr->v6, msg->nm_sport, &msg->nm_daddr->v6,
2180 msg->nm_dport, INPLOOKUP_WILDCARD, NULL);
2181#endif /* INET6 */
2182 lwkt_replymsg(&msg->nm_lmsg, 0);
2183 return (EASYNC);
2184}
2185#endif /* SMP */
2186
2187int
2188pf_socket_lookup(uid_t *uid, gid_t *gid, int direction, struct pf_pdesc *pd)
2189{
2190 struct pf_addr *saddr, *daddr;
2191 u_int16_t sport, dport;
2192 struct inpcbinfo *pi;
2193 struct inpcb *inp;
2194#ifdef SMP
2195 struct netmsg_hashlookup *msg = NULL;
2196#endif
2197 int pi_cpu = 0;
2198
2199 *uid = UID_MAX;
2200 *gid = GID_MAX;
2201 if (direction == PF_IN) {
2202 saddr = pd->src;
2203 daddr = pd->dst;
2204 } else {
2205 saddr = pd->dst;
2206 daddr = pd->src;
2207 }
2208 switch (pd->proto) {
2209 case IPPROTO_TCP:
2210 sport = pd->hdr.tcp->th_sport;
2211 dport = pd->hdr.tcp->th_dport;
2212
2213 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport);
2214 pi = &tcbinfo[pi_cpu];
2215#ifdef SMP
2216 /*
2217 * Our netstack runs lockless on MP systems
2218 * (only for TCP connections at the moment).
2219 *
2220 * As we are not allowed to read another CPU's tcbinfo,
2221 * we have to ask that CPU via remote call to search the
2222 * table for us.
2223 *
2224 * Prepare a msg iff data belongs to another CPU.
2225 */
2226 if (pi_cpu != mycpu->gd_cpuid) {
2227 msg = malloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT);
2228 lwkt_initmsg(&msg->nm_lmsg, &netisr_afree_rport, 0,
2229 lwkt_cmd_func(in_pcblookup_hash_handler),
2230 lwkt_cmd_op_none);
2231 msg->nm_pinp = &inp;
2232 msg->nm_pcbinfo = pi;
2233 msg->nm_saddr = saddr;
2234 msg->nm_sport = sport;
2235 msg->nm_daddr = daddr;
2236 msg->nm_dport = dport;
2237 msg->nm_af = pd->af;
2238 }
2239#endif /* SMP */
2240 break;
2241 case IPPROTO_UDP:
2242 sport = pd->hdr.udp->uh_sport;
2243 dport = pd->hdr.udp->uh_dport;
2244 pi = &udbinfo;
2245 break;
2246 default:
2247 return (0);
2248 }
2249 if (direction != PF_IN) {
2250 u_int16_t p;
2251
2252 p = sport;
2253 sport = dport;
2254 dport = p;
2255 }
2256 switch (pd->af) {
2257#ifdef INET6
2258 case AF_INET6:
2259#ifdef SMP
2260 /*
2261 * Query other CPU, second part
2262 *
2263 * msg only gets initialized when:
2264 * 1) packet is TCP
2265 * 2) the info belongs to another CPU
2266 *
2267 * Use some switch/case magic to avoid code duplication.
2268 */
2269 if (msg == NULL)
2270#endif /* SMP */
2271 {
2272 inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
2273 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL);
2274
2275 if (inp == NULL)
2276 return (0);
2277 break;
2278 }
2279 /* FALLTHROUGH if SMP and on other CPU */
2280#endif /* INET6 */
2281 case AF_INET:
2282#ifdef SMP
2283 if (msg != NULL) {
2284 lwkt_sendmsg(tcp_cport(pi_cpu), &msg->nm_lmsg);
2285 } else
2286#endif /* SMP */
2287 {
2288 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4,
2289 dport, INPLOOKUP_WILDCARD, NULL);
2290 }
2291 if (inp == NULL)
2292 return (0);
2293 break;
2294
2295 default:
2296 return (0);
2297 }
2298 *uid = inp->inp_socket->so_cred->cr_uid;
2299 *gid = inp->inp_socket->so_cred->cr_groups[0];
2300 return (1);
2301}
2302
2303u_int8_t
2304pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2305{
2306 int hlen;
2307 u_int8_t hdr[60];
2308 u_int8_t *opt, optlen;
2309 u_int8_t wscale = 0;
2310
2311 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2312 if (hlen <= sizeof(struct tcphdr))
2313 return (0);
2314 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2315 return (0);
2316 opt = hdr + sizeof(struct tcphdr);
2317 hlen -= sizeof(struct tcphdr);
2318 while (hlen >= 3) {
2319 switch (*opt) {
2320 case TCPOPT_EOL:
2321 case TCPOPT_NOP:
2322 ++opt;
2323 --hlen;
2324 break;
2325 case TCPOPT_WINDOW:
2326 wscale = opt[2];
2327 if (wscale > TCP_MAX_WINSHIFT)
2328 wscale = TCP_MAX_WINSHIFT;
2329 wscale |= PF_WSCALE_FLAG;
2330 /* FALLTHROUGH */
2331 default:
2332 optlen = opt[1];
2333 if (optlen < 2)
2334 optlen = 2;
2335 hlen -= optlen;
2336 opt += optlen;
2337 break;
2338 }
2339 }
2340 return (wscale);
2341}
2342
2343u_int16_t
2344pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2345{
2346 int hlen;
2347 u_int8_t hdr[60];
2348 u_int8_t *opt, optlen;
2349 u_int16_t mss = tcp_mssdflt;
2350
2351 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2352 if (hlen <= sizeof(struct tcphdr))
2353 return (0);
2354 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2355 return (0);
2356 opt = hdr + sizeof(struct tcphdr);
2357 hlen -= sizeof(struct tcphdr);
2358 while (hlen >= TCPOLEN_MAXSEG) {
2359 switch (*opt) {
2360 case TCPOPT_EOL:
2361 case TCPOPT_NOP:
2362 ++opt;
2363 --hlen;
2364 break;
2365 case TCPOPT_MAXSEG:
2366 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2367 /* FALLTHROUGH */
2368 default:
2369 optlen = opt[1];
2370 if (optlen < 2)
2371 optlen = 2;
2372 hlen -= optlen;
2373 opt += optlen;
2374 break;
2375 }
2376 }
2377 return (mss);
2378}
2379
2380u_int16_t
2381pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
2382{
2383#ifdef INET
2384 struct sockaddr_in *dst;
2385 struct route ro;
2386#endif /* INET */
2387#ifdef INET6
2388 struct sockaddr_in6 *dst6;
2389 struct route_in6 ro6;
2390#endif /* INET6 */
2391 struct rtentry *rt = NULL;
2392 int hlen = 0;
2393 u_int16_t mss = tcp_mssdflt;
2394
2395 switch (af) {
2396#ifdef INET
2397 case AF_INET:
2398 hlen = sizeof(struct ip);
2399 bzero(&ro, sizeof(ro));
2400 dst = (struct sockaddr_in *)&ro.ro_dst;
2401 dst->sin_family = AF_INET;
2402 dst->sin_len = sizeof(*dst);
2403 dst->sin_addr = addr->v4;
2404 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
2405 rt = ro.ro_rt;
2406 break;
2407#endif /* INET */
2408#ifdef INET6
2409 case AF_INET6:
2410 hlen = sizeof(struct ip6_hdr);
2411 bzero(&ro6, sizeof(ro6));
2412 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
2413 dst6->sin6_family = AF_INET6;
2414 dst6->sin6_len = sizeof(*dst6);
2415 dst6->sin6_addr = addr->v6;
2416 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING));
2417 rt = ro6.ro_rt;
2418 break;
2419#endif /* INET6 */
2420 }
2421
2422 if (rt && rt->rt_ifp) {
2423 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
2424 mss = max(tcp_mssdflt, mss);
2425 RTFREE(rt);
2426 }
2427 mss = min(mss, offer);
2428 mss = max(mss, 64); /* sanity - at least max opt space */
2429 return (mss);
2430}
2431
2432void
2433pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
2434{
2435 struct pf_rule *r = s->rule.ptr;
2436
2437 s->rt_kif = NULL;
2438 if (!r->rt || r->rt == PF_FASTROUTE)
2439 return;
2440 switch (s->af) {
2441#ifdef INET
2442 case AF_INET:
2443 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
2444 &s->nat_src_node);
2445 s->rt_kif = r->rpool.cur->kif;
2446 break;
2447#endif /* INET */
2448#ifdef INET6
2449 case AF_INET6:
2450 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
2451 &s->nat_src_node);
2452 s->rt_kif = r->rpool.cur->kif;
2453 break;
2454#endif /* INET6 */
2455 }
2456}
2457
2458int
2459pf_test_tcp(struct pf_rule **rm, struct pf_state **sm, int direction,
2460 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
2461 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm)
2462{
2463 struct pf_rule *nr = NULL;
2464 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
2465 struct tcphdr *th = pd->hdr.tcp;
2466 u_int16_t bport, nport = 0;
2467 sa_family_t af = pd->af;
2468 int lookup = -1;
2469 uid_t uid;
2470 gid_t gid;
2471 struct pf_rule *r, *a = NULL;
2472 struct pf_ruleset *ruleset = NULL;
2473 struct pf_src_node *nsn = NULL;
2474 u_short reason;
2475 int rewrite = 0;
2476 int tag = -1;
2477 u_int16_t mss = tcp_mssdflt;
2478
2479 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
2480
2481 if (direction == PF_OUT) {
2482 bport = nport = th->th_sport;
2483 /* check outgoing packet for BINAT/NAT */
2484 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
2485 saddr, th->th_sport, daddr, th->th_dport,
2486 &pd->naddr, &nport)) != NULL) {
2487 PF_ACPY(&pd->baddr, saddr, af);
2488 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
2489 &th->th_sum, &pd->naddr, nport, 0, af);
2490 rewrite++;
2491 if (nr->natpass)
2492 r = NULL;
2493 pd->nat_rule = nr;
2494 }
2495 } else {
2496 bport = nport = th->th_dport;
2497 /* check incoming packet for BINAT/RDR */
2498 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
2499 saddr, th->th_sport, daddr, th->th_dport,
2500 &pd->naddr, &nport)) != NULL) {
2501 PF_ACPY(&pd->baddr, daddr, af);
2502 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
2503 &th->th_sum, &pd->naddr, nport, 0, af);
2504 rewrite++;
2505 if (nr->natpass)
2506 r = NULL;
2507 pd->nat_rule = nr;
2508 }
2509 }
2510
2511 while (r != NULL) {
2512 r->evaluations++;
2513 if (r->kif != NULL &&
2514 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
2515 r = r->skip[PF_SKIP_IFP].ptr;
2516 else if (r->direction && r->direction != direction)
2517 r = r->skip[PF_SKIP_DIR].ptr;
2518 else if (r->af && r->af != af)
2519 r = r->skip[PF_SKIP_AF].ptr;
2520 else if (r->proto && r->proto != IPPROTO_TCP)
2521 r = r->skip[PF_SKIP_PROTO].ptr;
2522 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.not))
2523 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2524 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2525 r->src.port[0], r->src.port[1], th->th_sport))
2526 r = r->skip[PF_SKIP_SRC_PORT].ptr;
2527 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.not))
2528 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2529 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2530 r->dst.port[0], r->dst.port[1], th->th_dport))
2531 r = r->skip[PF_SKIP_DST_PORT].ptr;
2532 else if (r->tos && !(r->tos & pd->tos))
2533 r = TAILQ_NEXT(r, entries);
2534 else if (r->rule_flag & PFRULE_FRAGMENT)
2535 r = TAILQ_NEXT(r, entries);
2536 else if ((r->flagset & th->th_flags) != r->flags)
2537 r = TAILQ_NEXT(r, entries);
2538 else if (r->uid.op && (lookup != -1 || (lookup =
2539 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2540 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
2541 uid))
2542 r = TAILQ_NEXT(r, entries);
2543 else if (r->gid.op && (lookup != -1 || (lookup =
2544 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2545 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
2546 gid))
2547 r = TAILQ_NEXT(r, entries);
2548 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
2549 r = TAILQ_NEXT(r, entries);
2550 else if (r->anchorname[0] && r->anchor == NULL)
2551 r = TAILQ_NEXT(r, entries);
2552 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
2553 pf_osfp_fingerprint(pd, m, off, th), r->os_fingerprint))
2554 r = TAILQ_NEXT(r, entries);
2555 else {
2556 if (r->tag)
2557 tag = r->tag;
2558 if (r->anchor == NULL) {
2559 *rm = r;
2560 *am = a;
2561 *rsm = ruleset;
2562 if ((*rm)->quick)
2563 break;
2564 r = TAILQ_NEXT(r, entries);
2565 } else
2566 PF_STEP_INTO_ANCHOR(r, a, ruleset,
2567 PF_RULESET_FILTER);
2568 }
2569 if (r == NULL && a != NULL)
2570 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
2571 PF_RULESET_FILTER);
2572 }
2573 r = *rm;
2574 a = *am;
2575 ruleset = *rsm;
2576
2577 REASON_SET(&reason, PFRES_MATCH);
2578
2579 if (r->log) {
2580 if (rewrite)
2581 m_copyback(m, off, sizeof(*th), (caddr_t)th);
2582 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
2583 }
2584
2585 if ((r->action == PF_DROP) &&
2586 ((r->rule_flag & PFRULE_RETURNRST) ||
2587 (r->rule_flag & PFRULE_RETURNICMP) ||
2588 (r->rule_flag & PFRULE_RETURN))) {
2589 /* undo NAT changes, if they have taken place */
2590 if (nr != NULL) {
2591 if (direction == PF_OUT) {
2592 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
2593 &th->th_sum, &pd->baddr, bport, 0, af);
2594 rewrite++;
2595 } else {
2596 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
2597 &th->th_sum, &pd->baddr, bport, 0, af);
2598 rewrite++;
2599 }
2600 }
2601 if (((r->rule_flag & PFRULE_RETURNRST) ||
2602 (r->rule_flag & PFRULE_RETURN)) &&
2603 !(th->th_flags & TH_RST)) {
2604 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2605
2606 if (th->th_flags & TH_SYN)
2607 ack++;
2608 if (th->th_flags & TH_FIN)
2609 ack++;
2610 pf_send_tcp(r, af, pd->dst,
2611 pd->src, th->th_dport, th->th_sport,
2612 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
2613 r->return_ttl);
2614 } else if ((af == AF_INET) && r->return_icmp)
2615 pf_send_icmp(m, r->return_icmp >> 8,
2616 r->return_icmp & 255, af, r);
2617 else if ((af == AF_INET6) && r->return_icmp6)
2618 pf_send_icmp(m, r->return_icmp6 >> 8,
2619 r->return_icmp6 & 255, af, r);
2620 }
2621
2622 if (r->action == PF_DROP)
2623 return (PF_DROP);
2624
2625 pf_tag_packet(m, tag);
2626
2627 if (r->keep_state || nr != NULL ||
2628 (pd->flags & PFDESC_TCP_NORM)) {
2629 /* create new state */
2630 u_int16_t len;
2631 struct pf_state *s = NULL;
2632 struct pf_src_node *sn = NULL;
2633
2634 len = pd->tot_len - off - (th->th_off << 2);
2635
2636 /* check maximums */
2637 if (r->max_states && (r->states >= r->max_states))
2638 goto cleanup;
2639 /* src node for flter rule */
2640 if ((r->rule_flag & PFRULE_SRCTRACK ||
2641 r->rpool.opts & PF_POOL_STICKYADDR) &&
2642 pf_insert_src_node(&sn, r, saddr, af) != 0)
2643 goto cleanup;
2644 /* src node for translation rule */
2645 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
2646 ((direction == PF_OUT &&
2647 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
2648 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
2649 goto cleanup;
2650 s = pool_get(&pf_state_pl, PR_NOWAIT);
2651 if (s == NULL) {
2652cleanup:
2653 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
2654 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
2655 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2656 pf_status.src_nodes--;
2657 pool_put(&pf_src_tree_pl, sn);
2658 }
2659 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
2660 nsn->expire == 0) {
2661 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
2662 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2663 pf_status.src_nodes--;
2664 pool_put(&pf_src_tree_pl, nsn);
2665 }
2666 REASON_SET(&reason, PFRES_MEMORY);
2667 return (PF_DROP);
2668 }
2669 bzero(s, sizeof(*s));
2670 r->states++;
2671 if (a != NULL)
2672 a->states++;
2673 s->rule.ptr = r;
2674 s->nat_rule.ptr = nr;
2675 if (s->nat_rule.ptr != NULL)
2676 s->nat_rule.ptr->states++;
2677 s->anchor.ptr = a;
2678 s->allow_opts = r->allow_opts;
2679 s->log = r->log & 2;
2680 s->proto = IPPROTO_TCP;
2681 s->direction = direction;
2682 s->af = af;
2683 if (direction == PF_OUT) {
2684 PF_ACPY(&s->gwy.addr, saddr, af);
2685 s->gwy.port = th->th_sport; /* sport */
2686 PF_ACPY(&s->ext.addr, daddr, af);
2687 s->ext.port = th->th_dport;
2688 if (nr != NULL) {
2689 PF_ACPY(&s->lan.addr, &pd->baddr, af);
2690 s->lan.port = bport;
2691 } else {
2692 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
2693 s->lan.port = s->gwy.port;
2694 }
2695 } else {
2696 PF_ACPY(&s->lan.addr, daddr, af);
2697 s->lan.port = th->th_dport;
2698 PF_ACPY(&s->ext.addr, saddr, af);
2699 s->ext.port = th->th_sport;
2700 if (nr != NULL) {
2701 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
2702 s->gwy.port = bport;
2703 } else {
2704 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
2705 s->gwy.port = s->lan.port;
2706 }
2707 }
2708
2709 s->src.seqlo = ntohl(th->th_seq);
2710 s->src.seqhi = s->src.seqlo + len + 1;
2711 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
2712 r->keep_state == PF_STATE_MODULATE) {
2713 /* Generate sequence number modulator */
2714 while ((s->src.seqdiff = arc4random()) == 0)
2715 ;
2716 pf_change_a(&th->th_seq, &th->th_sum,
2717 htonl(s->src.seqlo + s->src.seqdiff), 0);
2718 rewrite = 1;
2719 } else
2720 s->src.seqdiff = 0;
2721 if (th->th_flags & TH_SYN) {
2722 s->src.seqhi++;
2723 s->src.wscale = pf_get_wscale(m, off, th->th_off, af);
2724 }
2725 s->src.max_win = MAX(ntohs(th->th_win), 1);
2726 if (s->src.wscale & PF_WSCALE_MASK) {
2727 /* Remove scale factor from initial window */
2728 int win = s->src.max_win;
2729 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
2730 s->src.max_win = (win - 1) >>
2731 (s->src.wscale & PF_WSCALE_MASK);
2732 }
2733 if (th->th_flags & TH_FIN)
2734 s->src.seqhi++;
2735 s->dst.seqhi = 1;
2736 s->dst.max_win = 1;
2737 s->src.state = TCPS_SYN_SENT;
2738 s->dst.state = TCPS_CLOSED;
2739 s->creation = time_second;
2740 s->expire = time_second;
2741 s->timeout = PFTM_TCP_FIRST_PACKET;
2742 pf_set_rt_ifp(s, saddr);
2743 if (sn != NULL) {
2744 s->src_node = sn;
2745 s->src_node->states++;
2746 }
2747 if (nsn != NULL) {
2748 PF_ACPY(&nsn->raddr, &pd->naddr, af);
2749 s->nat_src_node = nsn;
2750 s->nat_src_node->states++;
2751 }
2752 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
2753 off, pd, th, &s->src, &s->dst)) {
2754 REASON_SET(&reason, PFRES_MEMORY);
2755 pf_src_tree_remove_state(s);
2756 pool_put(&pf_state_pl, s);
2757 return (PF_DROP);
2758 }
2759 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
2760 pf_normalize_tcp_stateful(m, off, pd, &reason, th, &s->src,
2761 &s->dst, &rewrite)) {
2762 pf_normalize_tcp_cleanup(s);
2763 pf_src_tree_remove_state(s);
2764 pool_put(&pf_state_pl, s);
2765 return (PF_DROP);
2766 }
2767 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
2768 pf_normalize_tcp_cleanup(s);
2769 REASON_SET(&reason, PFRES_MEMORY);
2770 pf_src_tree_remove_state(s);
2771 pool_put(&pf_state_pl, s);
2772 return (PF_DROP);
2773 } else
2774 *sm = s;
2775 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
2776 r->keep_state == PF_STATE_SYNPROXY) {
2777 s->src.state = PF_TCPS_PROXY_SRC;
2778 if (nr != NULL) {
2779 if (direction == PF_OUT) {
2780 pf_change_ap(saddr, &th->th_sport,
2781 pd->ip_sum, &th->th_sum, &pd->baddr,
2782 bport, 0, af);
2783 } else {
2784 pf_change_ap(daddr, &th->th_dport,
2785 pd->ip_sum, &th->th_sum, &pd->baddr,
2786 bport, 0, af);
2787 }
2788 }
2789 s->src.seqhi = arc4random();
2790 /* Find mss option */
2791 mss = pf_get_mss(m, off, th->th_off, af);
2792 mss = pf_calc_mss(saddr, af, mss);
2793 mss = pf_calc_mss(daddr, af, mss);
2794 s->src.mss = mss;
2795 pf_send_tcp(r, af, daddr, saddr, th->th_dport,
2796 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
2797 TH_SYN|TH_ACK, 0, s->src.mss, 0);
2798 return (PF_SYNPROXY_DROP);
2799 }
2800 }
2801
2802 /* copy back packet headers if we performed NAT operations */
2803 if (rewrite)
2804 m_copyback(m, off, sizeof(*th), (caddr_t)th);
2805
2806 return (PF_PASS);
2807}
2808
2809int
2810pf_test_udp(struct pf_rule **rm, struct pf_state **sm, int direction,
2811 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
2812 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm)
2813{
2814 struct pf_rule *nr = NULL;
2815 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
2816 struct udphdr *uh = pd->hdr.udp;
2817 u_int16_t bport, nport = 0;
2818 sa_family_t af = pd->af;
2819 int lookup = -1;
2820 uid_t uid;
2821 gid_t gid;
2822 struct pf_rule *r, *a = NULL;
2823 struct pf_ruleset *ruleset = NULL;
2824 struct pf_src_node *nsn = NULL;
2825 u_short reason;
2826 int rewrite = 0;
2827 int tag = -1;
2828
2829 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
2830
2831 if (direction == PF_OUT) {
2832 bport = nport = uh->uh_sport;
2833 /* check outgoing packet for BINAT/NAT */
2834 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
2835 saddr, uh->uh_sport, daddr, uh->uh_dport,
2836 &pd->naddr, &nport)) != NULL) {
2837 PF_ACPY(&pd->baddr, saddr, af);
2838 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum,
2839 &uh->uh_sum, &pd->naddr, nport, 1, af);
2840 rewrite++;
2841 if (nr->natpass)
2842 r = NULL;
2843 pd->nat_rule = nr;
2844 }
2845 } else {
2846 bport = nport = uh->uh_dport;
2847 /* check incoming packet for BINAT/RDR */
2848 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
2849 saddr, uh->uh_sport, daddr, uh->uh_dport, &pd->naddr,
2850 &nport)) != NULL) {
2851 PF_ACPY(&pd->baddr, daddr, af);
2852 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum,
2853 &uh->uh_sum, &pd->naddr, nport, 1, af);
2854 rewrite++;
2855 if (nr->natpass)
2856 r = NULL;
2857 pd->nat_rule = nr;
2858 }
2859 }
2860
2861 while (r != NULL) {
2862 r->evaluations++;
2863 if (r->kif != NULL &&
2864 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
2865 r = r->skip[PF_SKIP_IFP].ptr;
2866 else if (r->direction && r->direction != direction)
2867 r = r->skip[PF_SKIP_DIR].ptr;
2868 else if (r->af && r->af != af)
2869 r = r->skip[PF_SKIP_AF].ptr;
2870 else if (r->proto && r->proto != IPPROTO_UDP)
2871 r = r->skip[PF_SKIP_PROTO].ptr;
2872 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.not))
2873 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2874 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2875 r->src.port[0], r->src.port[1], uh->uh_sport))
2876 r = r->skip[PF_SKIP_SRC_PORT].ptr;
2877 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.not))
2878 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2879 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2880 r->dst.port[0], r->dst.port[1], uh->uh_dport))
2881 r = r->skip[PF_SKIP_DST_PORT].ptr;
2882 else if (r->tos && !(r->tos & pd->tos))
2883 r = TAILQ_NEXT(r, entries);
2884 else if (r->rule_flag & PFRULE_FRAGMENT)
2885 r = TAILQ_NEXT(r, entries);
2886 else if (r->uid.op && (lookup != -1 || (lookup =
2887 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2888 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
2889 uid))
2890 r = TAILQ_NEXT(r, entries);
2891 else if (r->gid.op && (lookup != -1 || (lookup =
2892 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2893 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
2894 gid))
2895 r = TAILQ_NEXT(r, entries);
2896 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
2897 r = TAILQ_NEXT(r, entries);
2898 else if (r->anchorname[0] && r->anchor == NULL)
2899 r = TAILQ_NEXT(r, entries);
2900 else if (r->os_fingerprint != PF_OSFP_ANY)
2901 r = TAILQ_NEXT(r, entries);
2902 else {
2903 if (r->tag)
2904 tag = r->tag;
2905 if (r->anchor == NULL) {
2906 *rm = r;
2907 *am = a;
2908 *rsm = ruleset;
2909 if ((*rm)->quick)
2910 break;
2911 r = TAILQ_NEXT(r, entries);
2912 } else
2913 PF_STEP_INTO_ANCHOR(r, a, ruleset,
2914 PF_RULESET_FILTER);
2915 }
2916 if (r == NULL && a != NULL)
2917 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
2918 PF_RULESET_FILTER);
2919 }
2920 r = *rm;
2921 a = *am;
2922 ruleset = *rsm;
2923
2924 REASON_SET(&reason, PFRES_MATCH);
2925
2926 if (r->log) {
2927 if (rewrite)
2928 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
2929 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
2930 }
2931
2932 if ((r->action == PF_DROP) &&
2933 ((r->rule_flag & PFRULE_RETURNICMP) ||
2934 (r->rule_flag & PFRULE_RETURN))) {
2935 /* undo NAT changes, if they have taken place */
2936 if (nr != NULL) {
2937 if (direction == PF_OUT) {
2938 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum,
2939 &uh->uh_sum, &pd->baddr, bport, 1, af);
2940 rewrite++;
2941 } else {
2942 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum,
2943 &uh->uh_sum, &pd->baddr, bport, 1, af);
2944 rewrite++;
2945 }
2946 }
2947 if ((af == AF_INET) && r->return_icmp)
2948 pf_send_icmp(m, r->return_icmp >> 8,
2949 r->return_icmp & 255, af, r);
2950 else if ((af == AF_INET6) && r->return_icmp6)
2951 pf_send_icmp(m, r->return_icmp6 >> 8,
2952 r->return_icmp6 & 255, af, r);
2953 }
2954
2955 if (r->action == PF_DROP)
2956 return (PF_DROP);
2957
2958 pf_tag_packet(m, tag);
2959
2960 if (r->keep_state || nr != NULL) {
2961 /* create new state */
2962 struct pf_state *s = NULL;
2963 struct pf_src_node *sn = NULL;
2964
2965 /* check maximums */
2966 if (r->max_states && (r->states >= r->max_states))
2967 goto cleanup;
2968 /* src node for flter rule */
2969 if ((r->rule_flag & PFRULE_SRCTRACK ||
2970 r->rpool.opts & PF_POOL_STICKYADDR) &&
2971 pf_insert_src_node(&sn, r, saddr, af) != 0)
2972 goto cleanup;
2973 /* src node for translation rule */
2974 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
2975 ((direction == PF_OUT &&
2976 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
2977 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
2978 goto cleanup;
2979 s = pool_get(&pf_state_pl, PR_NOWAIT);
2980 if (s == NULL) {
2981cleanup:
2982 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
2983 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
2984 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2985 pf_status.src_nodes--;
2986 pool_put(&pf_src_tree_pl, sn);
2987 }
2988 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
2989 nsn->expire == 0) {
2990 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
2991 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2992 pf_status.src_nodes--;
2993 pool_put(&pf_src_tree_pl, nsn);
2994 }
2995 REASON_SET(&reason, PFRES_MEMORY);
2996 return (PF_DROP);
2997 }
2998 bzero(s, sizeof(*s));
2999 r->states++;
3000 if (a != NULL)
3001 a->states++;
3002 s->rule.ptr = r;
3003 s->nat_rule.ptr = nr;
3004 if (s->nat_rule.ptr != NULL)
3005 s->nat_rule.ptr->states++;
3006 s->anchor.ptr = a;
3007 s->allow_opts = r->allow_opts;
3008 s->log = r->log & 2;
3009 s->proto = IPPROTO_UDP;
3010 s->direction = direction;
3011 s->af = af;
3012 if (direction == PF_OUT) {
3013 PF_ACPY(&s->gwy.addr, saddr, af);
3014 s->gwy.port = uh->uh_sport;
3015 PF_ACPY(&s->ext.addr, daddr, af);
3016 s->ext.port = uh->uh_dport;
3017 if (nr != NULL) {
3018 PF_ACPY(&s->lan.addr, &pd->baddr, af);
3019 s->lan.port = bport;
3020 } else {
3021 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
3022 s->lan.port = s->gwy.port;
3023 }
3024 } else {
3025 PF_ACPY(&s->lan.addr, daddr, af);
3026 s->lan.port = uh->uh_dport;
3027 PF_ACPY(&s->ext.addr, saddr, af);
3028 s->ext.port = uh->uh_sport;
3029 if (nr != NULL) {
3030 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
3031 s->gwy.port = bport;
3032 } else {
3033 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
3034 s->gwy.port = s->lan.port;
3035 }
3036 }
3037 s->src.state = PFUDPS_SINGLE;
3038 s->dst.state = PFUDPS_NO_TRAFFIC;
3039 s->creation = time_second;
3040 s->expire = time_second;
3041 s->timeout = PFTM_UDP_FIRST_PACKET;
3042 pf_set_rt_ifp(s, saddr);
3043 if (sn != NULL) {
3044 s->src_node = sn;
3045 s->src_node->states++;
3046 }
3047 if (nsn != NULL) {
3048 PF_ACPY(&nsn->raddr, &pd->naddr, af);
3049 s->nat_src_node = nsn;
3050 s->nat_src_node->states++;
3051 }
3052 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
3053 REASON_SET(&reason, PFRES_MEMORY);
3054 pf_src_tree_remove_state(s);
3055 pool_put(&pf_state_pl, s);
3056 return (PF_DROP);
3057 } else
3058 *sm = s;
3059 }
3060
3061 /* copy back packet headers if we performed NAT operations */
3062 if (rewrite)
3063 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
3064
3065 return (PF_PASS);
3066}
3067
3068int
3069pf_test_icmp(struct pf_rule **rm, struct pf_state **sm, int direction,
3070 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3071 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm)
3072{
3073 struct pf_rule *nr = NULL;
3074 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3075 struct pf_rule *r, *a = NULL;
3076 struct pf_ruleset *ruleset = NULL;
3077 struct pf_src_node *nsn = NULL;
3078 u_short reason;
3079 u_int16_t icmpid = 0;
3080 sa_family_t af = pd->af;
3081 u_int8_t icmptype = 0, icmpcode = 0;
3082 int state_icmp = 0;
3083 int tag = -1;
3084#ifdef INET6
3085 int rewrite = 0;
3086#endif /* INET6 */
3087
3088 switch (pd->proto) {
3089#ifdef INET
3090 case IPPROTO_ICMP:
3091 icmptype = pd->hdr.icmp->icmp_type;
3092 icmpcode = pd->hdr.icmp->icmp_code;
3093 icmpid = pd->hdr.icmp->icmp_id;
3094
3095 if (icmptype == ICMP_UNREACH ||
3096 icmptype == ICMP_SOURCEQUENCH ||
3097 icmptype == ICMP_REDIRECT ||
3098 icmptype == ICMP_TIMXCEED ||
3099 icmptype == ICMP_PARAMPROB)
3100 state_icmp++;
3101 break;
3102#endif /* INET */
3103#ifdef INET6
3104 case IPPROTO_ICMPV6:
3105 icmptype = pd->hdr.icmp6->icmp6_type;
3106 icmpcode = pd->hdr.icmp6->icmp6_code;
3107 icmpid = pd->hdr.icmp6->icmp6_id;
3108
3109 if (icmptype == ICMP6_DST_UNREACH ||
3110 icmptype == ICMP6_PACKET_TOO_BIG ||
3111 icmptype == ICMP6_TIME_EXCEEDED ||
3112 icmptype == ICMP6_PARAM_PROB)
3113 state_icmp++;
3114 break;
3115#endif /* INET6 */
3116 }
3117
3118 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3119
3120 if (direction == PF_OUT) {
3121 /* check outgoing packet for BINAT/NAT */
3122 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
3123 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3124 PF_ACPY(&pd->baddr, saddr, af);
3125 switch (af) {
3126#ifdef INET
3127 case AF_INET:
3128 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3129 pd->naddr.v4.s_addr, 0);
3130 break;
3131#endif /* INET */
3132#ifdef INET6
3133 case AF_INET6:
3134 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3135 &pd->naddr, 0);
3136 rewrite++;
3137 break;
3138#endif /* INET6 */
3139 }
3140 if (nr->natpass)
3141 r = NULL;
3142 pd->nat_rule = nr;
3143 }
3144 } else {
3145 /* check incoming packet for BINAT/RDR */
3146 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
3147 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3148 PF_ACPY(&pd->baddr, daddr, af);
3149 switch (af) {
3150#ifdef INET
3151 case AF_INET:
3152 pf_change_a(&daddr->v4.s_addr,
3153 pd->ip_sum, pd->naddr.v4.s_addr, 0);
3154 break;
3155#endif /* INET */
3156#ifdef INET6
3157 case AF_INET6:
3158 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3159 &pd->naddr, 0);
3160 rewrite++;
3161 break;
3162#endif /* INET6 */
3163 }
3164 if (nr->natpass)
3165 r = NULL;
3166 pd->nat_rule = nr;
3167 }
3168 }
3169
3170 while (r != NULL) {
3171 r->evaluations++;
3172 if (r->kif != NULL &&
3173 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
3174 r = r->skip[PF_SKIP_IFP].ptr;
3175 else if (r->direction && r->direction != direction)
3176 r = r->skip[PF_SKIP_DIR].ptr;
3177 else if (r->af && r->af != af)
3178 r = r->skip[PF_SKIP_AF].ptr;
3179 else if (r->proto && r->proto != pd->proto)
3180 r = r->skip[PF_SKIP_PROTO].ptr;
3181 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.not))
3182 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3183 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.not))
3184 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3185 else if (r->type && r->type != icmptype + 1)
3186 r = TAILQ_NEXT(r, entries);
3187 else if (r->code && r->code != icmpcode + 1)
3188 r = TAILQ_NEXT(r, entries);
3189 else if (r->tos && !(r->tos & pd->tos))
3190 r = TAILQ_NEXT(r, entries);
3191 else if (r->rule_flag & PFRULE_FRAGMENT)
3192 r = TAILQ_NEXT(r, entries);
3193 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
3194 r = TAILQ_NEXT(r, entries);
3195 else if (r->anchorname[0] && r->anchor == NULL)
3196 r = TAILQ_NEXT(r, entries);
3197 else if (r->os_fingerprint != PF_OSFP_ANY)
3198 r = TAILQ_NEXT(r, entries);
3199 else {
3200 if (r->tag)
3201 tag = r->tag;
3202 if (r->anchor == NULL) {
3203 *rm = r;
3204 *am = a;
3205 *rsm = ruleset;
3206 if ((*rm)->quick)
3207 break;
3208 r = TAILQ_NEXT(r, entries);
3209 } else
3210 PF_STEP_INTO_ANCHOR(r, a, ruleset,
3211 PF_RULESET_FILTER);
3212 }
3213 if (r == NULL && a != NULL)
3214 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
3215 PF_RULESET_FILTER);
3216 }
3217 r = *rm;
3218 a = *am;
3219 ruleset = *rsm;
3220
3221 REASON_SET(&reason, PFRES_MATCH);
3222
3223 if (r->log) {
3224#ifdef INET6
3225 if (rewrite)
3226 m_copyback(m, off, sizeof(struct icmp6_hdr),
3227 (caddr_t)pd->hdr.icmp6);
3228#endif /* INET6 */
3229 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
3230 }
3231
3232 if (r->action != PF_PASS)
3233 return (PF_DROP);
3234
3235 pf_tag_packet(m, tag);
3236
3237 if (!state_icmp && (r->keep_state || nr != NULL)) {
3238 /* create new state */
3239 struct pf_state *s = NULL;
3240 struct pf_src_node *sn = NULL;
3241
3242 /* check maximums */
3243 if (r->max_states && (r->states >= r->max_states))
3244 goto cleanup;
3245 /* src node for flter rule */
3246 if ((r->rule_flag & PFRULE_SRCTRACK ||
3247 r->rpool.opts & PF_POOL_STICKYADDR) &&
3248 pf_insert_src_node(&sn, r, saddr, af) != 0)
3249 goto cleanup;
3250 /* src node for translation rule */
3251 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3252 ((direction == PF_OUT &&
3253 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
3254 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
3255 goto cleanup;
3256 s = pool_get(&pf_state_pl, PR_NOWAIT);
3257 if (s == NULL) {
3258cleanup:
3259 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3260 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
3261 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3262 pf_status.src_nodes--;
3263 pool_put(&pf_src_tree_pl, sn);
3264 }
3265 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
3266 nsn->expire == 0) {
3267 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
3268 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3269 pf_status.src_nodes--;
3270 pool_put(&pf_src_tree_pl, nsn);
3271 }
3272 REASON_SET(&reason, PFRES_MEMORY);
3273 return (PF_DROP);
3274 }
3275 bzero(s, sizeof(*s));
3276 r->states++;
3277 if (a != NULL)
3278 a->states++;
3279 s->rule.ptr = r;
3280 s->nat_rule.ptr = nr;
3281 if (s->nat_rule.ptr != NULL)
3282 s->nat_rule.ptr->states++;
3283 s->anchor.ptr = a;
3284 s->allow_opts = r->allow_opts;
3285 s->log = r->log & 2;
3286 s->proto = pd->proto;
3287 s->direction = direction;
3288 s->af = af;
3289 if (direction == PF_OUT) {
3290 PF_ACPY(&s->gwy.addr, saddr, af);
3291 s->gwy.port = icmpid;
3292 PF_ACPY(&s->ext.addr, daddr, af);
3293 s->ext.port = icmpid;
3294 if (nr != NULL)
3295 PF_ACPY(&s->lan.addr, &pd->baddr, af);
3296 else
3297 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
3298 s->lan.port = icmpid;
3299 } else {
3300 PF_ACPY(&s->lan.addr, daddr, af);
3301 s->lan.port = icmpid;
3302 PF_ACPY(&s->ext.addr, saddr, af);
3303 s->ext.port = icmpid;
3304 if (nr != NULL)
3305 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
3306 else
3307 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
3308 s->gwy.port = icmpid;
3309 }
3310 s->creation = time_second;
3311 s->expire = time_second;
3312 s->timeout = PFTM_ICMP_FIRST_PACKET;
3313 pf_set_rt_ifp(s, saddr);
3314 if (sn != NULL) {
3315 s->src_node = sn;
3316 s->src_node->states++;
3317 }
3318 if (nsn != NULL) {
3319 PF_ACPY(&nsn->raddr, &pd->naddr, af);
3320 s->nat_src_node = nsn;
3321 s->nat_src_node->states++;
3322 }
3323 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
3324 REASON_SET(&reason, PFRES_MEMORY);
3325 pf_src_tree_remove_state(s);
3326 pool_put(&pf_state_pl, s);
3327 return (PF_DROP);
3328 } else
3329 *sm = s;
3330 }
3331
3332#ifdef INET6
3333 /* copy back packet headers if we performed IPv6 NAT operations */
3334 if (rewrite)
3335 m_copyback(m, off, sizeof(struct icmp6_hdr),
3336 (caddr_t)pd->hdr.icmp6);
3337#endif /* INET6 */
3338
3339 return (PF_PASS);
3340}
3341
3342int
3343pf_test_other(struct pf_rule **rm, struct pf_state **sm, int direction,
3344 struct pfi_kif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
3345 struct pf_rule **am, struct pf_ruleset **rsm)
3346{
3347 struct pf_rule *nr = NULL;
3348 struct pf_rule *r, *a = NULL;
3349 struct pf_ruleset *ruleset = NULL;
3350 struct pf_src_node *nsn = NULL;
3351 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3352 sa_family_t af = pd->af;
3353 u_short reason;
3354 int tag = -1;
3355
3356 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3357
3358 if (direction == PF_OUT) {
3359 /* check outgoing packet for BINAT/NAT */
3360 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
3361 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3362 PF_ACPY(&pd->baddr, saddr, af);
3363 switch (af) {
3364#ifdef INET
3365 case AF_INET:
3366 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3367 pd->naddr.v4.s_addr, 0);
3368 break;
3369#endif /* INET */
3370#ifdef INET6
3371 case AF_INET6:
3372 PF_ACPY(saddr, &pd->naddr, af);
3373 break;
3374#endif /* INET6 */
3375 }
3376 if (nr->natpass)
3377 r = NULL;
3378 pd->nat_rule = nr;
3379 }
3380 } else {
3381 /* check incoming packet for BINAT/RDR */
3382 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
3383 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3384 PF_ACPY(&pd->baddr, daddr, af);
3385 switch (af) {
3386#ifdef INET
3387 case AF_INET:
3388 pf_change_a(&daddr->v4.s_addr,
3389 pd->ip_sum, pd->naddr.v4.s_addr, 0);
3390 break;
3391#endif /* INET */
3392#ifdef INET6
3393 case AF_INET6:
3394 PF_ACPY(daddr, &pd->naddr, af);
3395 break;
3396#endif /* INET6 */
3397 }
3398 if (nr->natpass)
3399 r = NULL;
3400 pd->nat_rule = nr;
3401 }
3402 }
3403
3404 while (r != NULL) {
3405 r->evaluations++;
3406 if (r->kif != NULL &&
3407 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
3408 r = r->skip[PF_SKIP_IFP].ptr;
3409 else if (r->direction && r->direction != direction)
3410 r = r->skip[PF_SKIP_DIR].ptr;
3411 else if (r->af && r->af != af)
3412 r = r->skip[PF_SKIP_AF].ptr;
3413 else if (r->proto && r->proto != pd->proto)
3414 r = r->skip[PF_SKIP_PROTO].ptr;
3415 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
3416 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3417 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
3418 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3419 else if (r->tos && !(r->tos & pd->tos))
3420 r = TAILQ_NEXT(r, entries);
3421 else if (r->rule_flag & PFRULE_FRAGMENT)
3422 r = TAILQ_NEXT(r, entries);
3423 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
3424 r = TAILQ_NEXT(r, entries);
3425 else if (r->anchorname[0] && r->anchor == NULL)
3426 r = TAILQ_NEXT(r, entries);
3427 else if (r->os_fingerprint != PF_OSFP_ANY)
3428 r = TAILQ_NEXT(r, entries);
3429 else {
3430 if (r->tag)
3431 tag = r->tag;
3432 if (r->anchor == NULL) {
3433 *rm = r;
3434 *am = a;
3435 *rsm = ruleset;
3436 if ((*rm)->quick)
3437 break;
3438 r = TAILQ_NEXT(r, entries);
3439 } else
3440 PF_STEP_INTO_ANCHOR(r, a, ruleset,
3441 PF_RULESET_FILTER);
3442 }
3443 if (r == NULL && a != NULL)
3444 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
3445 PF_RULESET_FILTER);
3446 }
3447 r = *rm;
3448 a = *am;
3449 ruleset = *rsm;
3450
3451 REASON_SET(&reason, PFRES_MATCH);
3452
3453 if (r->log)
3454 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
3455
3456 if ((r->action == PF_DROP) &&
3457 ((r->rule_flag & PFRULE_RETURNICMP) ||
3458 (r->rule_flag & PFRULE_RETURN))) {
3459 struct pf_addr *a = NULL;
3460
3461 if (nr != NULL) {
3462 if (direction == PF_OUT)
3463 a = saddr;
3464 else
3465 a = daddr;
3466 }
3467 if (a != NULL) {
3468 switch (af) {
3469#ifdef INET
3470 case AF_INET:
3471 pf_change_a(&a->v4.s_addr, pd->ip_sum,
3472 pd->baddr.v4.s_addr, 0);
3473 break;
3474#endif /* INET */
3475#ifdef INET6
3476 case AF_INET6:
3477 PF_ACPY(a, &pd->baddr, af);
3478 break;
3479#endif /* INET6 */
3480 }
3481 }
3482 if ((af == AF_INET) && r->return_icmp)
3483 pf_send_icmp(m, r->return_icmp >> 8,
3484 r->return_icmp & 255, af, r);
3485 else if ((af == AF_INET6) && r->return_icmp6)
3486 pf_send_icmp(m, r->return_icmp6 >> 8,
3487 r->return_icmp6 & 255, af, r);
3488 }
3489
3490 if (r->action != PF_PASS)
3491 return (PF_DROP);
3492
3493 pf_tag_packet(m, tag);
3494
3495 if (r->keep_state || nr != NULL) {
3496 /* create new state */
3497 struct pf_state *s = NULL;
3498 struct pf_src_node *sn = NULL;
3499
3500 /* check maximums */
3501 if (r->max_states && (r->states >= r->max_states))
3502 goto cleanup;
3503 /* src node for flter rule */
3504 if ((r->rule_flag & PFRULE_SRCTRACK ||
3505 r->rpool.opts & PF_POOL_STICKYADDR) &&
3506 pf_insert_src_node(&sn, r, saddr, af) != 0)
3507 goto cleanup;
3508 /* src node for translation rule */
3509 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3510 ((direction == PF_OUT &&
3511 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
3512 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
3513 goto cleanup;
3514 s = pool_get(&pf_state_pl, PR_NOWAIT);
3515 if (s == NULL) {
3516cleanup:
3517 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3518 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
3519 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3520 pf_status.src_nodes--;
3521 pool_put(&pf_src_tree_pl, sn);
3522 }
3523 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
3524 nsn->expire == 0) {
3525 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
3526 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3527 pf_status.src_nodes--;
3528 pool_put(&pf_src_tree_pl, nsn);
3529 }
3530 REASON_SET(&reason, PFRES_MEMORY);
3531 return (PF_DROP);
3532 }
3533 bzero(s, sizeof(*s));
3534 r->states++;
3535 if (a != NULL)
3536 a->states++;
3537 s->rule.ptr = r;
3538 s->nat_rule.ptr = nr;
3539 if (s->nat_rule.ptr != NULL)
3540 s->nat_rule.ptr->states++;
3541 s->anchor.ptr = a;
3542 s->allow_opts = r->allow_opts;
3543 s->log = r->log & 2;
3544 s->proto = pd->proto;
3545 s->direction = direction;
3546 s->af = af;
3547 if (direction == PF_OUT) {
3548 PF_ACPY(&s->gwy.addr, saddr, af);
3549 PF_ACPY(&s->ext.addr, daddr, af);
3550 if (nr != NULL)
3551 PF_ACPY(&s->lan.addr, &pd->baddr, af);
3552 else
3553 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
3554 } else {
3555 PF_ACPY(&s->lan.addr, daddr, af);
3556 PF_ACPY(&s->ext.addr, saddr, af);
3557 if (nr != NULL)
3558 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
3559 else
3560 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
3561 }
3562 s->src.state = PFOTHERS_SINGLE;
3563 s->dst.state = PFOTHERS_NO_TRAFFIC;
3564 s->creation = time_second;
3565 s->expire = time_second;
3566 s->timeout = PFTM_OTHER_FIRST_PACKET;
3567 pf_set_rt_ifp(s, saddr);
3568 if (sn != NULL) {
3569 s->src_node = sn;
3570 s->src_node->states++;
3571 }
3572 if (nsn != NULL) {
3573 PF_ACPY(&nsn->raddr, &pd->naddr, af);
3574 s->nat_src_node = nsn;
3575 s->nat_src_node->states++;
3576 }
3577 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
3578 REASON_SET(&reason, PFRES_MEMORY);
3579 pf_src_tree_remove_state(s);
3580 pool_put(&pf_state_pl, s);
3581 return (PF_DROP);
3582 } else
3583 *sm = s;
3584 }
3585
3586 return (PF_PASS);
3587}
3588
3589int
3590pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3591 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3592 struct pf_ruleset **rsm)
3593{
3594 struct pf_rule *r, *a = NULL;
3595 struct pf_ruleset *ruleset = NULL;
3596 sa_family_t af = pd->af;
3597 u_short reason;
3598 int tag = -1;
3599
3600 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3601 while (r != NULL) {
3602 r->evaluations++;
3603 if (r->kif != NULL &&
3604 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
3605 r = r->skip[PF_SKIP_IFP].ptr;
3606 else if (r->direction && r->direction != direction)
3607 r = r->skip[PF_SKIP_DIR].ptr;
3608 else if (r->af && r->af != af)
3609 r = r->skip[PF_SKIP_AF].ptr;
3610 else if (r->proto && r->proto != pd->proto)
3611 r = r->skip[PF_SKIP_PROTO].ptr;
3612 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
3613 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3614 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
3615 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3616 else if (r->tos && !(r->tos & pd->tos))
3617 r = TAILQ_NEXT(r, entries);
3618 else if (r->src.port_op || r->dst.port_op ||
3619 r->flagset || r->type || r->code ||
3620 r->os_fingerprint != PF_OSFP_ANY)
3621 r = TAILQ_NEXT(r, entries);
3622 else if (r->match_tag && !pf_match_tag(m, r, NULL, &tag))
3623 r = TAILQ_NEXT(r, entries);
3624 else if (r->anchorname[0] && r->anchor == NULL)
3625 r = TAILQ_NEXT(r, entries);
3626 else {
3627 if (r->anchor == NULL) {
3628 *rm = r;
3629 *am = a;
3630 *rsm = ruleset;
3631 if ((*rm)->quick)
3632 break;
3633 r = TAILQ_NEXT(r, entries);
3634 } else
3635 PF_STEP_INTO_ANCHOR(r, a, ruleset,
3636 PF_RULESET_FILTER);
3637 }
3638 if (r == NULL && a != NULL)
3639 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
3640 PF_RULESET_FILTER);
3641 }
3642 r = *rm;
3643 a = *am;
3644 ruleset = *rsm;
3645
3646 REASON_SET(&reason, PFRES_MATCH);
3647
3648 if (r->log)
3649 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
3650
3651 if (r->action != PF_PASS)
3652 return (PF_DROP);
3653
3654 pf_tag_packet(m, tag);
3655
3656 return (PF_PASS);
3657}
3658
3659int
3660pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
3661 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
3662 u_short *reason)
3663{
3664 struct pf_state key;
3665 struct tcphdr *th = pd->hdr.tcp;
3666 u_int16_t win = ntohs(th->th_win);
3667 u_int32_t ack, end, seq;
3668 u_int8_t sws, dws;
3669 int ackskew;
3670 int copyback = 0;
3671 struct pf_state_peer *src, *dst;
3672
3673 key.af = pd->af;
3674 key.proto = IPPROTO_TCP;
3675 if (direction == PF_IN) {
3676 PF_ACPY(&key.ext.addr, pd->src, key.af);
3677 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
3678 key.ext.port = th->th_sport;
3679 key.gwy.port = th->th_dport;
3680 } else {
3681 PF_ACPY(&key.lan.addr, pd->src, key.af);
3682 PF_ACPY(&key.ext.addr, pd->dst, key.af);
3683 key.lan.port = th->th_sport;
3684 key.ext.port = th->th_dport;
3685 }
3686
3687 STATE_LOOKUP();
3688
3689 if (direction == (*state)->direction) {
3690 src = &(*state)->src;
3691 dst = &(*state)->dst;
3692 } else {
3693 src = &(*state)->dst;
3694 dst = &(*state)->src;
3695 }
3696
3697 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
3698 if (direction != (*state)->direction)
3699 return (PF_SYNPROXY_DROP);
3700 if (th->th_flags & TH_SYN) {
3701 if (ntohl(th->th_seq) != (*state)->src.seqlo)
3702 return (PF_DROP);
3703 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
3704 pd->src, th->th_dport, th->th_sport,
3705 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
3706 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0);
3707 return (PF_SYNPROXY_DROP);
3708 } else if (!(th->th_flags & TH_ACK) ||
3709 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
3710 (ntohl(th->th_seq) != (*state)->src.seqlo + 1))
3711 return (PF_DROP);
3712 else
3713 (*state)->src.state = PF_TCPS_PROXY_DST;
3714 }
3715 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
3716 struct pf_state_host *src, *dst;
3717
3718 if (direction == PF_OUT) {
3719 src = &(*state)->gwy;
3720 dst = &(*state)->ext;
3721 } else {
3722 src = &(*state)->ext;
3723 dst = &(*state)->lan;
3724 }
3725 if (direction == (*state)->direction) {
3726 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
3727 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
3728 (ntohl(th->th_seq) != (*state)->src.seqlo + 1))
3729 return (PF_DROP);
3730 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
3731 if ((*state)->dst.seqhi == 1)
3732 (*state)->dst.seqhi = arc4random();
3733 pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
3734 &dst->addr, src->port, dst->port,
3735 (*state)->dst.seqhi, 0, TH_SYN, 0,
3736 (*state)->src.mss, 0);
3737 return (PF_SYNPROXY_DROP);
3738 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
3739 (TH_SYN|TH_ACK)) ||
3740 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1))
3741 return (PF_DROP);
3742 else {
3743 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
3744 (*state)->dst.seqlo = ntohl(th->th_seq);
3745 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
3746 pd->src, th->th_dport, th->th_sport,
3747 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
3748 TH_ACK, (*state)->src.max_win, 0, 0);
3749 pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
3750 &dst->addr, src->port, dst->port,
3751 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
3752 TH_ACK, (*state)->dst.max_win, 0, 0);
3753 (*state)->src.seqdiff = (*state)->dst.seqhi -
3754 (*state)->src.seqlo;
3755 (*state)->dst.seqdiff = (*state)->src.seqhi -
3756 (*state)->dst.seqlo;
3757 (*state)->src.seqhi = (*state)->src.seqlo +
3758 (*state)->src.max_win;
3759 (*state)->dst.seqhi = (*state)->dst.seqlo +
3760 (*state)->dst.max_win;
3761 (*state)->src.wscale = (*state)->dst.wscale = 0;
3762 (*state)->src.state = (*state)->dst.state =
3763 TCPS_ESTABLISHED;
3764 return (PF_SYNPROXY_DROP);
3765 }
3766 }
3767
3768 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3769 sws = src->wscale & PF_WSCALE_MASK;
3770 dws = dst->wscale & PF_WSCALE_MASK;
3771 } else
3772 sws = dws = 0;
3773
3774 /*
3775 * Sequence tracking algorithm from Guido van Rooij's paper:
3776 * http://www.madison-gurkha.com/publications/tcp_filtering/
3777 * tcp_filtering.ps
3778 */
3779
3780 seq = ntohl(th->th_seq);
3781 if (src->seqlo == 0) {
3782 /* First packet from this end. Set its state */
3783
3784 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3785 src->scrub == NULL) {
3786 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3787 REASON_SET(reason, PFRES_MEMORY);
3788 return (PF_DROP);
3789 }
3790 }
3791
3792 /* Deferred generation of sequence number modulator */
3793 if (dst->seqdiff && !src->seqdiff) {
3794 while ((src->seqdiff = arc4random()) == 0)
3795 ;
3796 ack = ntohl(th->th_ack) - dst->seqdiff;
3797 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3798 src->seqdiff), 0);
3799 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3800 copyback = 1;
3801 } else {
3802 ack = ntohl(th->th_ack);
3803 }
3804
3805 end = seq + pd->p_len;
3806 if (th->th_flags & TH_SYN) {
3807 end++;
3808 if (dst->wscale & PF_WSCALE_FLAG) {
3809 src->wscale = pf_get_wscale(m, off, th->th_off,
3810 pd->af);
3811 if (src->wscale & PF_WSCALE_FLAG) {
3812 /* Remove scale factor from initial
3813 * window */
3814 sws = src->wscale & PF_WSCALE_MASK;
3815 win = ((u_int32_t)win + (1 << sws) - 1)
3816 >> sws;
3817 dws = dst->wscale & PF_WSCALE_MASK;
3818 } else {
3819 /* fixup other window */
3820 dst->max_win <<= dst->wscale &
3821 PF_WSCALE_MASK;
3822 /* in case of a retrans SYN|ACK */
3823 dst->wscale = 0;
3824 }
3825 }
3826 }
3827 if (th->th_flags & TH_FIN)
3828 end++;
3829
3830 src->seqlo = seq;
3831 if (src->state < TCPS_SYN_SENT)
3832 src->state = TCPS_SYN_SENT;
3833
3834 /*
3835 * May need to slide the window (seqhi may have been set by
3836 * the crappy stack check or if we picked up the connection
3837 * after establishment)
3838 */
3839 if (src->seqhi == 1 ||
3840 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3841 src->seqhi = end + MAX(1, dst->max_win << dws);
3842 if (win > src->max_win)
3843 src->max_win = win;
3844
3845 } else {
3846 ack = ntohl(th->th_ack) - dst->seqdiff;
3847 if (src->seqdiff) {
3848 /* Modulate sequence numbers */
3849 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3850 src->seqdiff), 0);
3851 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3852 copyback = 1;
3853 }
3854 end = seq + pd->p_len;
3855 if (th->th_flags & TH_SYN)
3856 end++;
3857 if (th->th_flags & TH_FIN)
3858 end++;
3859 }
3860
3861 if ((th->th_flags & TH_ACK) == 0) {
3862 /* Let it pass through the ack skew check */
3863 ack = dst->seqlo;
3864 } else if ((ack == 0 &&
3865 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3866 /* broken tcp stacks do not set ack */
3867 (dst->state < TCPS_SYN_SENT)) {
3868 /*
3869 * Many stacks (ours included) will set the ACK number in an
3870 * FIN|ACK if the SYN times out -- no sequence to ACK.
3871 */
3872 ack = dst->seqlo;
3873 }
3874
3875 if (seq == end) {
3876 /* Ease sequencing restrictions on no data packets */
3877 seq = src->seqlo;
3878 end = seq;
3879 }
3880
3881 ackskew = dst->seqlo - ack;
3882
3883#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
3884 if (SEQ_GEQ(src->seqhi, end) &&
3885 /* Last octet inside other's window space */
3886 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3887 /* Retrans: not more than one window back */
3888 (ackskew >= -MAXACKWINDOW) &&
3889 /* Acking not more than one reassembled fragment backwards */
3890 (ackskew <= (MAXACKWINDOW << sws))) {
3891 /* Acking not more than one window forward */
3892
3893 /* update max window */
3894 if (src->max_win < win)
3895 src->max_win = win;
3896 /* synchronize sequencing */
3897 if (SEQ_GT(end, src->seqlo))
3898 src->seqlo = end;
3899 /* slide the window of what the other end can send */
3900 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3901 dst->seqhi = ack + MAX((win << sws), 1);
3902
3903
3904 /* update states */
3905 if (th->th_flags & TH_SYN)
3906 if (src->state < TCPS_SYN_SENT)
3907 src->state = TCPS_SYN_SENT;
3908 if (th->th_flags & TH_FIN)
3909 if (src->state < TCPS_CLOSING)
3910 src->state = TCPS_CLOSING;
3911 if (th->th_flags & TH_ACK) {
3912 if (dst->state == TCPS_SYN_SENT)
3913 dst->state = TCPS_ESTABLISHED;
3914 else if (dst->state == TCPS_CLOSING)
3915 dst->state = TCPS_FIN_WAIT_2;
3916 }
3917 if (th->th_flags & TH_RST)
3918 src->state = dst->state = TCPS_TIME_WAIT;
3919
3920 /* update expire time */
3921 (*state)->expire = time_second;
3922 if (src->state >= TCPS_FIN_WAIT_2 &&
3923 dst->state >= TCPS_FIN_WAIT_2)
3924 (*state)->timeout = PFTM_TCP_CLOSED;
3925 else if (src->state >= TCPS_FIN_WAIT_2 ||
3926 dst->state >= TCPS_FIN_WAIT_2)
3927 (*state)->timeout = PFTM_TCP_FIN_WAIT;
3928 else if (src->state < TCPS_ESTABLISHED ||
3929 dst->state < TCPS_ESTABLISHED)
3930 (*state)->timeout = PFTM_TCP_OPENING;
3931 else if (src->state >= TCPS_CLOSING ||
3932 dst->state >= TCPS_CLOSING)
3933 (*state)->timeout = PFTM_TCP_CLOSING;
3934 else
3935 (*state)->timeout = PFTM_TCP_ESTABLISHED;
3936
3937 /* Fall through to PASS packet */
3938
3939 } else if ((dst->state < TCPS_SYN_SENT ||
3940 dst->state >= TCPS_FIN_WAIT_2 ||
3941 src->state >= TCPS_FIN_WAIT_2) &&
3942 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
3943 /* Within a window forward of the originating packet */
3944 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
3945 /* Within a window backward of the originating packet */
3946
3947 /*
3948 * This currently handles three situations:
3949 * 1) Stupid stacks will shotgun SYNs before their peer
3950 * replies.
3951 * 2) When PF catches an already established stream (the
3952 * firewall rebooted, the state table was flushed, routes
3953 * changed...)
3954 * 3) Packets get funky immediately after the connection
3955 * closes (this should catch Solaris spurious ACK|FINs
3956 * that web servers like to spew after a close)
3957 *
3958 * This must be a little more careful than the above code
3959 * since packet floods will also be caught here. We don't
3960 * update the TTL here to mitigate the damage of a packet
3961 * flood and so the same code can handle awkward establishment
3962 * and a loosened connection close.
3963 * In the establishment case, a correct peer response will
3964 * validate the connection, go through the normal state code
3965 * and keep updating the state TTL.
3966 */
3967
3968 if (pf_status.debug >= PF_DEBUG_MISC) {
3969 printf("pf: loose state match: ");
3970 pf_print_state(*state);
3971 pf_print_flags(th->th_flags);
3972 printf(" seq=%u ack=%u len=%u ackskew=%d pkts=%d:%d\n",
3973 seq, ack, pd->p_len, ackskew,
3974 (*state)->packets[0], (*state)->packets[1]);
3975 }
3976
3977 /* update max window */
3978 if (src->max_win < win)
3979 src->max_win = win;
3980 /* synchronize sequencing */
3981 if (SEQ_GT(end, src->seqlo))
3982 src->seqlo = end;
3983 /* slide the window of what the other end can send */
3984 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3985 dst->seqhi = ack + MAX((win << sws), 1);
3986
3987 /*
3988 * Cannot set dst->seqhi here since this could be a shotgunned
3989 * SYN and not an already established connection.
3990 */
3991
3992 if (th->th_flags & TH_FIN)
3993 if (src->state < TCPS_CLOSING)
3994 src->state = TCPS_CLOSING;
3995 if (th->th_flags & TH_RST)
3996 src->state = dst->state = TCPS_TIME_WAIT;
3997
3998 /* Fall through to PASS packet */
3999
4000 } else {
4001 if ((*state)->dst.state == TCPS_SYN_SENT &&
4002 (*state)->src.state == TCPS_SYN_SENT) {
4003 /* Send RST for state mismatches during handshake */
4004 if (!(th->th_flags & TH_RST)) {
4005 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
4006
4007 if (th->th_flags & TH_SYN)
4008 ack++;
4009 if (th->th_flags & TH_FIN)
4010 ack++;
4011 pf_send_tcp((*state)->rule.ptr, pd->af,
4012 pd->dst, pd->src, th->th_dport,
4013 th->th_sport, ntohl(th->th_ack), ack,
4014 TH_RST|TH_ACK, 0, 0,
4015 (*state)->rule.ptr->return_ttl);
4016 }
4017 src->seqlo = 0;
4018 src->seqhi = 1;
4019 src->max_win = 1;
4020 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4021 printf("pf: BAD state: ");
4022 pf_print_state(*state);
4023 pf_print_flags(th->th_flags);
4024 printf(" seq=%u ack=%u len=%u ackskew=%d pkts=%d:%d "
4025 "dir=%s,%s\n", seq, ack, pd->p_len, ackskew,
4026 (*state)->packets[0], (*state)->packets[1],
4027 direction == PF_IN ? "in" : "out",
4028 direction == (*state)->direction ? "fwd" : "rev");
4029 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4030 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4031 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4032 ' ': '2',
4033 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4034 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4035 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4036 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4037 }
4038 return (PF_DROP);
4039 }
4040
4041 if (dst->scrub || src->scrub) {
4042 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4043 src, dst, &copyback))
4044 return (PF_DROP);
4045 }
4046
4047 /* Any packets which have gotten here are to be passed */
4048
4049 /* translate source/destination address, if necessary */
4050 if (STATE_TRANSLATE(*state)) {
4051 if (direction == PF_OUT)
4052 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4053 &th->th_sum, &(*state)->gwy.addr,
4054 (*state)->gwy.port, 0, pd->af);
4055 else
4056 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4057 &th->th_sum, &(*state)->lan.addr,
4058 (*state)->lan.port, 0, pd->af);
4059 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4060 } else if (copyback) {
4061 /* Copyback sequence modulation or stateful scrub changes */
4062 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4063 }
4064
4065 return (PF_PASS);
4066}
4067
4068int
4069pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4070 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4071{
4072 struct pf_state_peer *src, *dst;
4073 struct pf_state key;
4074 struct udphdr *uh = pd->hdr.udp;
4075
4076 key.af = pd->af;
4077 key.proto = IPPROTO_UDP;
4078 if (direction == PF_IN) {
4079 PF_ACPY(&key.ext.addr, pd->src, key.af);
4080 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
4081 key.ext.port = uh->uh_sport;
4082 key.gwy.port = uh->uh_dport;
4083 } else {
4084 PF_ACPY(&key.lan.addr, pd->src, key.af);
4085 PF_ACPY(&key.ext.addr, pd->dst, key.af);
4086 key.lan.port = uh->uh_sport;
4087 key.ext.port = uh->uh_dport;
4088 }
4089
4090 STATE_LOOKUP();
4091
4092 if (direction == (*state)->direction) {
4093 src = &(*state)->src;
4094 dst = &(*state)->dst;
4095 } else {
4096 src = &(*state)->dst;
4097 dst = &(*state)->src;
4098 }
4099
4100 /* update states */
4101 if (src->state < PFUDPS_SINGLE)
4102 src->state = PFUDPS_SINGLE;
4103 if (dst->state == PFUDPS_SINGLE)
4104 dst->state = PFUDPS_MULTIPLE;
4105
4106 /* update expire time */
4107 (*state)->expire = time_second;
4108 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4109 (*state)->timeout = PFTM_UDP_MULTIPLE;
4110 else
4111 (*state)->timeout = PFTM_UDP_SINGLE;
4112
4113 /* translate source/destination address, if necessary */
4114 if (STATE_TRANSLATE(*state)) {
4115 if (direction == PF_OUT)
4116 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4117 &uh->uh_sum, &(*state)->gwy.addr,
4118 (*state)->gwy.port, 1, pd->af);
4119 else
4120 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4121 &uh->uh_sum, &(*state)->lan.addr,
4122 (*state)->lan.port, 1, pd->af);
4123 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4124 }
4125
4126 return (PF_PASS);
4127}
4128
4129int
4130pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4131 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4132{
4133 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4134 u_int16_t icmpid = 0;
4135 u_int16_t *icmpsum = NULL;
4136 u_int8_t icmptype = 0;
4137 int state_icmp = 0;
4138
4139 switch (pd->proto) {
4140#ifdef INET
4141 case IPPROTO_ICMP:
4142 icmptype = pd->hdr.icmp->icmp_type;
4143 icmpid = pd->hdr.icmp->icmp_id;
4144 icmpsum = &pd->hdr.icmp->icmp_cksum;
4145
4146 if (icmptype == ICMP_UNREACH ||
4147 icmptype == ICMP_SOURCEQUENCH ||
4148 icmptype == ICMP_REDIRECT ||
4149 icmptype == ICMP_TIMXCEED ||
4150 icmptype == ICMP_PARAMPROB)
4151 state_icmp++;
4152 break;
4153#endif /* INET */
4154#ifdef INET6
4155 case IPPROTO_ICMPV6:
4156 icmptype = pd->hdr.icmp6->icmp6_type;
4157 icmpid = pd->hdr.icmp6->icmp6_id;
4158 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4159
4160 if (icmptype == ICMP6_DST_UNREACH ||
4161 icmptype == ICMP6_PACKET_TOO_BIG ||
4162 icmptype == ICMP6_TIME_EXCEEDED ||
4163 icmptype == ICMP6_PARAM_PROB)
4164 state_icmp++;
4165 break;
4166#endif /* INET6 */
4167 }
4168
4169 if (!state_icmp) {
4170
4171 /*
4172 * ICMP query/reply message not related to a TCP/UDP packet.
4173 * Search for an ICMP state.
4174 */
4175 struct pf_state key;
4176
4177 key.af = pd->af;
4178 key.proto = pd->proto;
4179 if (direction == PF_IN) {
4180 PF_ACPY(&key.ext.addr, pd->src, key.af);
4181 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
4182 key.ext.port = icmpid;
4183 key.gwy.port = icmpid;
4184 } else {
4185 PF_ACPY(&key.lan.addr, pd->src, key.af);
4186 PF_ACPY(&key.ext.addr, pd->dst, key.af);
4187 key.lan.port = icmpid;
4188 key.ext.port = icmpid;
4189 }
4190
4191 STATE_LOOKUP();
4192
4193 (*state)->expire = time_second;
4194 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4195
4196 /* translate source/destination address, if necessary */
4197 if (PF_ANEQ(&(*state)->lan.addr, &(*state)->gwy.addr, pd->af)) {
4198 if (direction == PF_OUT) {
4199 switch (pd->af) {
4200#ifdef INET
4201 case AF_INET:
4202 pf_change_a(&saddr->v4.s_addr,
4203 pd->ip_sum,
4204 (*state)->gwy.addr.v4.s_addr, 0);
4205 break;
4206#endif /* INET */
4207#ifdef INET6
4208 case AF_INET6:
4209 pf_change_a6(saddr,
4210 &pd->hdr.icmp6->icmp6_cksum,
4211 &(*state)->gwy.addr, 0);
4212 m_copyback(m, off,
4213 sizeof(struct icmp6_hdr),
4214 (caddr_t)pd->hdr.icmp6);
4215 break;
4216#endif /* INET6 */
4217 }
4218 } else {
4219 switch (pd->af) {
4220#ifdef INET
4221 case AF_INET:
4222 pf_change_a(&daddr->v4.s_addr,
4223 pd->ip_sum,
4224 (*state)->lan.addr.v4.s_addr, 0);
4225 break;
4226#endif /* INET */
4227#ifdef INET6
4228 case AF_INET6:
4229 pf_change_a6(daddr,
4230 &pd->hdr.icmp6->icmp6_cksum,
4231 &(*state)->lan.addr, 0);
4232 m_copyback(m, off,
4233 sizeof(struct icmp6_hdr),
4234 (caddr_t)pd->hdr.icmp6);
4235 break;
4236#endif /* INET6 */
4237 }
4238 }
4239 }
4240
4241 return (PF_PASS);
4242
4243 } else {
4244 /*
4245 * ICMP error message in response to a TCP/UDP packet.
4246 * Extract the inner TCP/UDP header and search for that state.
4247 */
4248
4249 struct pf_pdesc pd2;
4250#ifdef INET
4251 struct ip h2;
4252#endif /* INET */
4253#ifdef INET6
4254 struct ip6_hdr h2_6;
4255 int terminal = 0;
4256#endif /* INET6 */
4257 int ipoff2 = 0;
4258 int off2 = 0;
4259
4260 pd2.af = pd->af;
4261 switch (pd->af) {
4262#ifdef INET
4263 case AF_INET:
4264 /* offset of h2 in mbuf chain */
4265 ipoff2 = off + ICMP_MINLEN;
4266
4267 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4268 NULL, NULL, pd2.af)) {
4269 DPFPRINTF(PF_DEBUG_MISC,
4270 ("pf: ICMP error message too short "
4271 "(ip)\n"));
4272 return (PF_DROP);
4273 }
4274 /*
4275 * ICMP error messages don't refer to non-first
4276 * fragments
4277 */
4278 /*
4279 * Note: We are dealing with an encapsulated
4280 * header. This means ip_off/ip_len are not
4281 * in host byte order!
4282 */
4283 if (h2.ip_off & htons(IP_OFFMASK))
4284 return (PF_DROP);
4285
4286 /* offset of protocol header that follows h2 */
4287 off2 = ipoff2 + (h2.ip_hl << 2);
4288
4289 pd2.proto = h2.ip_p;
4290 pd2.src = (struct pf_addr *)&h2.ip_src;
4291 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4292 pd2.ip_sum = &h2.ip_sum;
4293 break;
4294#endif /* INET */
4295#ifdef INET6
4296 case AF_INET6:
4297 ipoff2 = off + sizeof(struct icmp6_hdr);
4298
4299 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4300 NULL, NULL, pd2.af)) {
4301 DPFPRINTF(PF_DEBUG_MISC,
4302 ("pf: ICMP error message too short "
4303 "(ip6)\n"));
4304 return (PF_DROP);
4305 }
4306 pd2.proto = h2_6.ip6_nxt;
4307 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4308 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4309 pd2.ip_sum = NULL;
4310 off2 = ipoff2 + sizeof(h2_6);
4311 do {
4312 switch (pd2.proto) {
4313 case IPPROTO_FRAGMENT:
4314 /*
4315 * ICMPv6 error messages for
4316 * non-first fragments
4317 */
4318 return (PF_DROP);
4319 case IPPROTO_AH:
4320 case IPPROTO_HOPOPTS:
4321 case IPPROTO_ROUTING:
4322 case IPPROTO_DSTOPTS: {
4323 /* get next header and header length */
4324 struct ip6_ext opt6;
4325
4326 if (!pf_pull_hdr(m, off2, &opt6,
4327 sizeof(opt6), NULL, NULL, pd2.af)) {
4328 DPFPRINTF(PF_DEBUG_MISC,
4329 ("pf: ICMPv6 short opt\n"));
4330 return (PF_DROP);
4331 }
4332 if (pd2.proto == IPPROTO_AH)
4333 off2 += (opt6.ip6e_len + 2) * 4;
4334 else
4335 off2 += (opt6.ip6e_len + 1) * 8;
4336 pd2.proto = opt6.ip6e_nxt;
4337 /* goto the next header */
4338 break;
4339 }
4340 default:
4341 terminal++;
4342 break;
4343 }
4344 } while (!terminal);
4345 break;
4346#endif /* INET6 */
4347 }
4348
4349 switch (pd2.proto) {
4350 case IPPROTO_TCP: {
4351 struct tcphdr th;
4352 u_int32_t seq;
4353 struct pf_state key;
4354 struct pf_state_peer *src, *dst;
4355 u_int8_t dws;
4356 int copyback = 0;
4357
4358 /*
4359 * Only the first 8 bytes of the TCP header can be
4360 * expected. Don't access any TCP header fields after
4361 * th_seq, an ackskew test is not possible.
4362 */
4363 if (!pf_pull_hdr(m, off2, &th, 8, NULL, NULL, pd2.af)) {
4364 DPFPRINTF(PF_DEBUG_MISC,
4365 ("pf: ICMP error message too short "
4366 "(tcp)\n"));
4367 return (PF_DROP);
4368 }
4369
4370 key.af = pd2.af;
4371 key.proto = IPPROTO_TCP;
4372 if (direction == PF_IN) {
4373 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4374 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4375 key.ext.port = th.th_dport;
4376 key.gwy.port = th.th_sport;
4377 } else {
4378 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4379 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4380 key.lan.port = th.th_dport;
4381 key.ext.port = th.th_sport;
4382 }
4383
4384 STATE_LOOKUP();
4385
4386 if (direction == (*state)->direction) {
4387 src = &(*state)->dst;
4388 dst = &(*state)->src;
4389 } else {
4390 src = &(*state)->src;
4391 dst = &(*state)->dst;
4392 }
4393
4394 if (src->wscale && dst->wscale &&
4395 !(th.th_flags & TH_SYN))
4396 dws = dst->wscale & PF_WSCALE_MASK;
4397 else
4398 dws = 0;
4399
4400 /* Demodulate sequence number */
4401 seq = ntohl(th.th_seq) - src->seqdiff;
4402 if (src->seqdiff) {
4403 pf_change_a(&th.th_seq, icmpsum,
4404 htonl(seq), 0);
4405 copyback = 1;
4406 }
4407
4408 if (!SEQ_GEQ(src->seqhi, seq) ||
4409 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))) {
4410 if (pf_status.debug >= PF_DEBUG_MISC) {
4411 printf("pf: BAD ICMP %d:%d ",
4412 icmptype, pd->hdr.icmp->icmp_code);
4413 pf_print_host(pd->src, 0, pd->af);
4414 printf(" -> ");
4415 pf_print_host(pd->dst, 0, pd->af);
4416 printf(" state: ");
4417 pf_print_state(*state);
4418 printf(" seq=%u\n", seq);
4419 }
4420 return (PF_DROP);
4421 }
4422
4423 if (STATE_TRANSLATE(*state)) {
4424 if (direction == PF_IN) {
4425 pf_change_icmp(pd2.src, &th.th_sport,
4426 daddr, &(*state)->lan.addr,
4427 (*state)->lan.port, NULL,
4428 pd2.ip_sum, icmpsum,
4429 pd->ip_sum, 0, pd2.af);
4430 } else {
4431 pf_change_icmp(pd2.dst, &th.th_dport,
4432 saddr, &(*state)->gwy.addr,
4433 (*state)->gwy.port, NULL,
4434 pd2.ip_sum, icmpsum,
4435 pd->ip_sum, 0, pd2.af);
4436 }
4437 copyback = 1;
4438 }
4439
4440 if (copyback) {
4441 switch (pd2.af) {
4442#ifdef INET
4443 case AF_INET:
4444 m_copyback(m, off, ICMP_MINLEN,
4445 (caddr_t)pd->hdr.icmp);
4446 m_copyback(m, ipoff2, sizeof(h2),
4447 (caddr_t)&h2);
4448 break;
4449#endif /* INET */
4450#ifdef INET6
4451 case AF_INET6:
4452 m_copyback(m, off,
4453 sizeof(struct icmp6_hdr),
4454 (caddr_t)pd->hdr.icmp6);
4455 m_copyback(m, ipoff2, sizeof(h2_6),
4456 (caddr_t)&h2_6);
4457 break;
4458#endif /* INET6 */
4459 }
4460 m_copyback(m, off2, 8, (caddr_t)&th);
4461 }
4462
4463 return (PF_PASS);
4464 break;
4465 }
4466 case IPPROTO_UDP: {
4467 struct udphdr uh;
4468 struct pf_state key;
4469
4470 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4471 NULL, NULL, pd2.af)) {
4472 DPFPRINTF(PF_DEBUG_MISC,
4473 ("pf: ICMP error message too short "
4474 "(udp)\n"));
4475 return (PF_DROP);
4476 }
4477
4478 key.af = pd2.af;
4479 key.proto = IPPROTO_UDP;
4480 if (direction == PF_IN) {
4481 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4482 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4483 key.ext.port = uh.uh_dport;
4484 key.gwy.port = uh.uh_sport;
4485 } else {
4486 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4487 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4488 key.lan.port = uh.uh_dport;
4489 key.ext.port = uh.uh_sport;
4490 }
4491
4492 STATE_LOOKUP();
4493
4494 if (STATE_TRANSLATE(*state)) {
4495 if (direction == PF_IN) {
4496 pf_change_icmp(pd2.src, &uh.uh_sport,
4497 daddr, &(*state)->lan.addr,
4498 (*state)->lan.port, &uh.uh_sum,
4499 pd2.ip_sum, icmpsum,
4500 pd->ip_sum, 1, pd2.af);
4501 } else {
4502 pf_change_icmp(pd2.dst, &uh.uh_dport,
4503 saddr, &(*state)->gwy.addr,
4504 (*state)->gwy.port, &uh.uh_sum,
4505 pd2.ip_sum, icmpsum,
4506 pd->ip_sum, 1, pd2.af);
4507 }
4508 switch (pd2.af) {
4509#ifdef INET
4510 case AF_INET:
4511 m_copyback(m, off, ICMP_MINLEN,
4512 (caddr_t)pd->hdr.icmp);
4513 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4514 break;
4515#endif /* INET */
4516#ifdef INET6
4517 case AF_INET6:
4518 m_copyback(m, off,
4519 sizeof(struct icmp6_hdr),
4520 (caddr_t)pd->hdr.icmp6);
4521 m_copyback(m, ipoff2, sizeof(h2_6),
4522 (caddr_t)&h2_6);
4523 break;
4524#endif /* INET6 */
4525 }
4526 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4527 }
4528
4529 return (PF_PASS);
4530 break;
4531 }
4532#ifdef INET
4533 case IPPROTO_ICMP: {
4534 struct icmp iih;
4535 struct pf_state key;
4536
4537 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4538 NULL, NULL, pd2.af)) {
4539 DPFPRINTF(PF_DEBUG_MISC,
4540 ("pf: ICMP error message too short i"
4541 "(icmp)\n"));
4542 return (PF_DROP);
4543 }
4544
4545 key.af = pd2.af;
4546 key.proto = IPPROTO_ICMP;
4547 if (direction == PF_IN) {
4548 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4549 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4550 key.ext.port = iih.icmp_id;
4551 key.gwy.port = iih.icmp_id;
4552 } else {
4553 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4554 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4555 key.lan.port = iih.icmp_id;
4556 key.ext.port = iih.icmp_id;
4557 }
4558
4559 STATE_LOOKUP();
4560
4561 if (STATE_TRANSLATE(*state)) {
4562 if (direction == PF_IN) {
4563 pf_change_icmp(pd2.src, &iih.icmp_id,
4564 daddr, &(*state)->lan.addr,
4565 (*state)->lan.port, NULL,
4566 pd2.ip_sum, icmpsum,
4567 pd->ip_sum, 0, AF_INET);
4568 } else {
4569 pf_change_icmp(pd2.dst, &iih.icmp_id,
4570 saddr, &(*state)->gwy.addr,
4571 (*state)->gwy.port, NULL,
4572 pd2.ip_sum, icmpsum,
4573 pd->ip_sum, 0, AF_INET);
4574 }
4575 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4576 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4577 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4578 }
4579
4580 return (PF_PASS);
4581 break;
4582 }
4583#endif /* INET */
4584#ifdef INET6
4585 case IPPROTO_ICMPV6: {
4586 struct icmp6_hdr iih;
4587 struct pf_state key;
4588
4589 if (!pf_pull_hdr(m, off2, &iih,
4590 sizeof(struct icmp6_hdr), NULL, NULL, pd2.af)) {
4591 DPFPRINTF(PF_DEBUG_MISC,
4592 ("pf: ICMP error message too short "
4593 "(icmp6)\n"));
4594 return (PF_DROP);
4595 }
4596
4597 key.af = pd2.af;
4598 key.proto = IPPROTO_ICMPV6;
4599 if (direction == PF_IN) {
4600 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4601 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4602 key.ext.port = iih.icmp6_id;
4603 key.gwy.port = iih.icmp6_id;
4604 } else {
4605 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4606 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4607 key.lan.port = iih.icmp6_id;
4608 key.ext.port = iih.icmp6_id;
4609 }
4610
4611 STATE_LOOKUP();
4612
4613 if (STATE_TRANSLATE(*state)) {
4614 if (direction == PF_IN) {
4615 pf_change_icmp(pd2.src, &iih.icmp6_id,
4616 daddr, &(*state)->lan.addr,
4617 (*state)->lan.port, NULL,
4618 pd2.ip_sum, icmpsum,
4619 pd->ip_sum, 0, AF_INET6);
4620 } else {
4621 pf_change_icmp(pd2.dst, &iih.icmp6_id,
4622 saddr, &(*state)->gwy.addr,
4623 (*state)->gwy.port, NULL,
4624 pd2.ip_sum, icmpsum,
4625 pd->ip_sum, 0, AF_INET6);
4626 }
4627 m_copyback(m, off, sizeof(struct icmp6_hdr),
4628 (caddr_t)pd->hdr.icmp6);
4629 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4630 m_copyback(m, off2, sizeof(struct icmp6_hdr),
4631 (caddr_t)&iih);
4632 }
4633
4634 return (PF_PASS);
4635 break;
4636 }
4637#endif /* INET6 */
4638 default: {
4639 struct pf_state key;
4640
4641 key.af = pd2.af;
4642 key.proto = pd2.proto;
4643 if (direction == PF_IN) {
4644 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4645 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4646 key.ext.port = 0;
4647 key.gwy.port = 0;
4648 } else {
4649 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4650 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4651 key.lan.port = 0;
4652 key.ext.port = 0;
4653 }
4654
4655 STATE_LOOKUP();
4656
4657 if (STATE_TRANSLATE(*state)) {
4658 if (direction == PF_IN) {
4659 pf_change_icmp(pd2.src, NULL,
4660 daddr, &(*state)->lan.addr,
4661 0, NULL,
4662 pd2.ip_sum, icmpsum,
4663 pd->ip_sum, 0, pd2.af);
4664 } else {
4665 pf_change_icmp(pd2.dst, NULL,
4666 saddr, &(*state)->gwy.addr,
4667 0, NULL,
4668 pd2.ip_sum, icmpsum,
4669 pd->ip_sum, 0, pd2.af);
4670 }
4671 switch (pd2.af) {
4672#ifdef INET
4673 case AF_INET:
4674 m_copyback(m, off, ICMP_MINLEN,
4675 (caddr_t)pd->hdr.icmp);
4676 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4677 break;
4678#endif /* INET */
4679#ifdef INET6
4680 case AF_INET6:
4681 m_copyback(m, off,
4682 sizeof(struct icmp6_hdr),
4683 (caddr_t)pd->hdr.icmp6);
4684 m_copyback(m, ipoff2, sizeof(h2_6),
4685 (caddr_t)&h2_6);
4686 break;
4687#endif /* INET6 */
4688 }
4689 }
4690
4691 return (PF_PASS);
4692 break;
4693 }
4694 }
4695 }
4696}
4697
4698int
4699pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
4700 struct pf_pdesc *pd)
4701{
4702 struct pf_state_peer *src, *dst;
4703 struct pf_state key;
4704
4705 key.af = pd->af;
4706 key.proto = pd->proto;
4707 if (direction == PF_IN) {
4708 PF_ACPY(&key.ext.addr, pd->src, key.af);
4709 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
4710 key.ext.port = 0;
4711 key.gwy.port = 0;
4712 } else {
4713 PF_ACPY(&key.lan.addr, pd->src, key.af);
4714 PF_ACPY(&key.ext.addr, pd->dst, key.af);
4715 key.lan.port = 0;
4716 key.ext.port = 0;
4717 }
4718
4719 STATE_LOOKUP();
4720
4721 if (direction == (*state)->direction) {
4722 src = &(*state)->src;
4723 dst = &(*state)->dst;
4724 } else {
4725 src = &(*state)->dst;
4726 dst = &(*state)->src;
4727 }
4728
4729 /* update states */
4730 if (src->state < PFOTHERS_SINGLE)
4731 src->state = PFOTHERS_SINGLE;
4732 if (dst->state == PFOTHERS_SINGLE)
4733 dst->state = PFOTHERS_MULTIPLE;
4734
4735 /* update expire time */
4736 (*state)->expire = time_second;
4737 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
4738 (*state)->timeout = PFTM_OTHER_MULTIPLE;
4739 else
4740 (*state)->timeout = PFTM_OTHER_SINGLE;
4741
4742 /* translate source/destination address, if necessary */
4743 if (STATE_TRANSLATE(*state)) {
4744 if (direction == PF_OUT)
4745 switch (pd->af) {
4746#ifdef INET
4747 case AF_INET:
4748 pf_change_a(&pd->src->v4.s_addr,
4749 pd->ip_sum, (*state)->gwy.addr.v4.s_addr,
4750 0);
4751 break;
4752#endif /* INET */
4753#ifdef INET6
4754 case AF_INET6:
4755 PF_ACPY(pd->src, &(*state)->gwy.addr, pd->af);
4756 break;
4757#endif /* INET6 */
4758 }
4759 else
4760 switch (pd->af) {
4761#ifdef INET
4762 case AF_INET:
4763 pf_change_a(&pd->dst->v4.s_addr,
4764 pd->ip_sum, (*state)->lan.addr.v4.s_addr,
4765 0);
4766 break;
4767#endif /* INET */
4768#ifdef INET6
4769 case AF_INET6:
4770 PF_ACPY(pd->dst, &(*state)->lan.addr, pd->af);
4771 break;
4772#endif /* INET6 */
4773 }
4774 }
4775
4776 return (PF_PASS);
4777}
4778
4779/*
4780 * ipoff and off are measured from the start of the mbuf chain.
4781 * h must be at "ipoff" on the mbuf chain.
4782 */
4783void *
4784pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
4785 u_short *actionp, u_short *reasonp, sa_family_t af)
4786{
4787 switch (af) {
4788#ifdef INET
4789 case AF_INET: {
4790 struct ip *h = mtod(m, struct ip *);
4791 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
4792
4793 if (fragoff) {
4794 if (fragoff >= len)
4795 ACTION_SET(actionp, PF_PASS);
4796 else {
4797 ACTION_SET(actionp, PF_DROP);
4798 REASON_SET(reasonp, PFRES_FRAG);
4799 }
4800 return (NULL);
4801 }
4802 if (m->m_pkthdr.len < off + len ||
4803 h->ip_len < off + len) {
4804 ACTION_SET(actionp, PF_DROP);
4805 REASON_SET(reasonp, PFRES_SHORT);
4806 return (NULL);
4807 }
4808 break;
4809 }
4810#endif /* INET */
4811#ifdef INET6
4812 case AF_INET6: {
4813 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
4814
4815 if (m->m_pkthdr.len < off + len ||
4816 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
4817 (unsigned)(off + len)) {
4818 ACTION_SET(actionp, PF_DROP);
4819 REASON_SET(reasonp, PFRES_SHORT);
4820 return (NULL);
4821 }
4822 break;
4823 }
4824#endif /* INET6 */
4825 }
4826 m_copydata(m, off, len, p);
4827 return (p);
4828}
4829
4830int
4831pf_routable(struct pf_addr *addr, sa_family_t af)
4832{
4833 struct sockaddr_in *dst;
4834 struct route ro;
4835 int ret = 0;
4836
4837 bzero(&ro, sizeof(ro));
4838 dst = satosin(&ro.ro_dst);
4839 dst->sin_family = af;
4840 dst->sin_len = sizeof(*dst);
4841 dst->sin_addr = addr->v4;
4842 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
4843
4844 if (ro.ro_rt != NULL) {
4845 ret = 1;
4846 RTFREE(ro.ro_rt);
4847 }
4848
4849 return (ret);
4850}
4851
4852#ifdef INET
4853void
4854pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
4855 struct pf_state *s)
4856{
4857 struct mbuf *m0, *m1;
4858 struct route iproute;
4859 struct route *ro = NULL;
4860 struct sockaddr_in *dst;
4861 struct ip *ip;
4862 struct ifnet *ifp = NULL;
4863 struct pf_addr naddr;
4864 struct pf_src_node *sn = NULL;
4865 int error = 0;
4866 int sw_csum;
4867
4868 if (m == NULL || *m == NULL || r == NULL ||
4869 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
4870 panic("pf_route: invalid parameters");
4871
4872 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
4873 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
4874 (*m)->m_pkthdr.pf_routed = 1;
4875 } else {
4876 if ((*m)->m_pkthdr.pf_routed > 3) {
4877 m0 = *m;
4878 *m = NULL;
4879 goto bad;
4880 }
4881 (*m)->m_pkthdr.pf_routed++;
4882 }
4883
4884 if (r->rt == PF_DUPTO) {
4885 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL)
4886 return;
4887 } else {
4888 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
4889 return;
4890 m0 = *m;
4891 }
4892
4893 if (m0->m_len < sizeof(struct ip))
4894 panic("pf_route: m0->m_len < sizeof(struct ip)");
4895 ip = mtod(m0, struct ip *);
4896
4897 ro = &iproute;
4898 bzero((caddr_t)ro, sizeof(*ro));
4899 dst = satosin(&ro->ro_dst);
4900 dst->sin_family = AF_INET;
4901 dst->sin_len = sizeof(*dst);
4902 dst->sin_addr = ip->ip_dst;
4903
4904 if (r->rt == PF_FASTROUTE) {
4905 rtalloc(ro);
4906 if (ro->ro_rt == 0) {
4907 ipstat.ips_noroute++;
4908 goto bad;
4909 }
4910
4911 ifp = ro->ro_rt->rt_ifp;
4912 ro->ro_rt->rt_use++;
4913
4914 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
4915 dst = satosin(ro->ro_rt->rt_gateway);
4916 } else {
4917 if (TAILQ_EMPTY(&r->rpool.list))
4918 panic("pf_route: TAILQ_EMPTY(&r->rpool.list)");
4919 if (s == NULL) {
4920 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
4921 &naddr, NULL, &sn);
4922 if (!PF_AZERO(&naddr, AF_INET))
4923 dst->sin_addr.s_addr = naddr.v4.s_addr;
4924 ifp = r->rpool.cur->kif ?
4925 r->rpool.cur->kif->pfik_ifp : NULL;
4926 } else {
4927 if (!PF_AZERO(&s->rt_addr, AF_INET))
4928 dst->sin_addr.s_addr =
4929 s->rt_addr.v4.s_addr;
4930 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
4931 }
4932 }
4933 if (ifp == NULL)
4934 goto bad;
4935
4936 if (oifp != ifp) {
4937 if (pf_test(PF_OUT, ifp, &m0) != PF_PASS)
4938 goto bad;
4939 else if (m0 == NULL)
4940 goto done;
4941 if (m0->m_len < sizeof(struct ip))
4942 panic("pf_route: m0->m_len < sizeof(struct ip)");
4943 ip = mtod(m0, struct ip *);
4944 }
4945
4946 /* Copied from ip_output. */
4947 m0->m_pkthdr.csum_flags |= CSUM_IP;
4948 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
4949 if (sw_csum & CSUM_DELAY_DATA) {
4950 in_delayed_cksum(m0);
4951 sw_csum &= ~CSUM_DELAY_DATA;
4952 }
4953 m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
4954
4955 /*
4956 * If small enough for interface, or the interface will take
4957 * care of the fragmentation for us, can just send directly.
4958 */
4959 if (ip->ip_len <= ifp->if_mtu || ((ifp->if_hwassist & CSUM_FRAGMENT) &&
4960 (ip->ip_off & IP_DF) == 0)) {
4961 ip->ip_len = htons(ip->ip_len);
4962 ip->ip_off = htons(ip->ip_off);
4963 ip->ip_sum = 0;
4964 if (sw_csum & CSUM_DELAY_IP) {
4965 /* From KAME */
4966 if (ip->ip_v == IPVERSION &&
4967 (ip->ip_hl << 2) == sizeof(*ip)) {
4968 ip->ip_sum = in_cksum_hdr(ip);
4969 } else {
4970 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
4971 }
4972 }
4973
4974 error = (*ifp->if_output)(ifp, m0, sintosa(dst), ro->ro_rt);
4975 goto done;
4976 }
4977
4978 /*
4979 * Too large for interface; fragment if possible.
4980 * Must be able to put at least 8 bytes per fragment.
4981 */
4982 if (ip->ip_off & IP_DF) {
4983 ipstat.ips_cantfrag++;
4984 if (r->rt != PF_DUPTO) {
4985 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
4986 ifp);
4987 goto done;
4988 } else
4989 goto bad;
4990 }
4991
4992 m1 = m0;
4993 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
4994 if (error)
4995 goto bad;
4996
4997 for (m0 = m1; m0; m0 = m1) {
4998 m1 = m0->m_nextpkt;
4999 m0->m_nextpkt = 0;
5000 if (error == 0)
5001 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
5002 NULL);
5003 else
5004 m_freem(m0);
5005 }
5006
5007 if (error == 0)
5008 ipstat.ips_fragmented++;
5009
5010done:
5011 if (r->rt != PF_DUPTO)
5012 *m = NULL;
5013 if (ro == &iproute && ro->ro_rt)
5014 RTFREE(ro->ro_rt);
5015 return;
5016
5017bad:
5018 m_freem(m0);
5019 goto done;
5020}
5021#endif /* INET */
5022
5023#ifdef INET6
5024void
5025pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5026 struct pf_state *s)
5027{
5028 struct mbuf *m0;
5029 struct route_in6 ip6route;
5030 struct route_in6 *ro;
5031 struct sockaddr_in6 *dst;
5032 struct ip6_hdr *ip6;
5033 struct ifnet *ifp = NULL;
5034 struct pf_addr naddr;
5035 struct pf_src_node *sn = NULL;
5036 int error = 0;
5037
5038 if (m == NULL || *m == NULL || r == NULL ||
5039 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5040 panic("pf_route6: invalid parameters");
5041
5042 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
5043 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
5044 (*m)->m_pkthdr.pf_routed = 1;
5045 } else {
5046 if ((*m)->m_pkthdr.pf_routed > 3) {
5047 m0 = *m;
5048 *m = NULL;
5049 goto bad;
5050 }
5051 (*m)->m_pkthdr.pf_routed++;
5052 }
5053
5054 if (r->rt == PF_DUPTO) {
5055 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL)
5056 return;
5057 } else {
5058 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
5059 return;
5060 m0 = *m;
5061 }
5062
5063 if (m0->m_len < sizeof(struct ip6_hdr))
5064 panic("pf_route6: m0->m_len < sizeof(struct ip6_hdr)");
5065 ip6 = mtod(m0, struct ip6_hdr *);
5066
5067 ro = &ip6route;
5068 bzero((caddr_t)ro, sizeof(*ro));
5069 dst = (struct sockaddr_in6 *)&ro->ro_dst;
5070 dst->sin6_family = AF_INET6;
5071 dst->sin6_len = sizeof(*dst);
5072 dst->sin6_addr = ip6->ip6_dst;
5073
5074 /* Cheat. */
5075 if (r->rt == PF_FASTROUTE) {
5076 m0->m_pkthdr.fw_flags |= PF_MBUF_GENERATED;
5077 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5078 return;
5079 }
5080
5081 if (TAILQ_EMPTY(&r->rpool.list))
5082 panic("pf_route6: TAILQ_EMPTY(&r->rpool.list)");
5083 if (s == NULL) {
5084 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5085 &naddr, NULL, &sn);
5086 if (!PF_AZERO(&naddr, AF_INET6))
5087 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
5088 &naddr, AF_INET6);
5089 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5090 } else {
5091 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5092 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
5093 &s->rt_addr, AF_INET6);
5094 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5095 }
5096 if (ifp == NULL)
5097 goto bad;
5098
5099 if (oifp != ifp) {
5100 if (pf_test6(PF_OUT, ifp, &m0) != PF_PASS)
5101 goto bad;
5102 else if (m0 == NULL)
5103 goto done;
5104 if (m0->m_len < sizeof(struct ip6_hdr))
5105 panic("pf_route6: m0->m_len < sizeof(struct ip6_hdr)");
5106 ip6 = mtod(m0, struct ip6_hdr *);
5107 }
5108
5109 /*
5110 * If the packet is too large for the outgoing interface,
5111 * send back an icmp6 error.
5112 */
5113 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr))
5114 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5115 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
5116 error = nd6_output(ifp, ifp, m0, dst, NULL);
5117 } else {
5118 in6_ifstat_inc(ifp, ifs6_in_toobig);
5119 if (r->rt != PF_DUPTO)
5120 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5121 else
5122 goto bad;
5123 }
5124
5125done:
5126 if (r->rt != PF_DUPTO)
5127 *m = NULL;
5128 return;
5129
5130bad:
5131 m_freem(m0);
5132 goto done;
5133}
5134#endif /* INET6 */
5135
5136
5137/*
5138 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
5139 * off is the offset where the protocol header starts
5140 * len is the total length of protocol header plus payload
5141 * returns 0 when the checksum is valid, otherwise returns 1.
5142 */
5143/*
5144 * XXX
5145 * FreeBSD supports cksum offload for the following drivers.
5146 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4)
5147 * If we can make full use of it we would outperform ipfw/ipfilter in
5148 * very heavy traffic.
5149 * I have not tested 'cause I don't have NICs that supports cksum offload.
5150 * (There might be problems. Typical phenomena would be
5151 * 1. No route message for UDP packet.
5152 * 2. No connection acceptance from external hosts regardless of rule set.)
5153 */
5154int
5155pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
5156 sa_family_t af)
5157{
5158 u_int16_t sum = 0;
5159 int hw_assist = 0;
5160 struct ip *ip;
5161
5162 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5163 return (1);
5164 if (m->m_pkthdr.len < off + len)
5165 return (1);
5166
5167 switch (p) {
5168 case IPPROTO_TCP:
5169 case IPPROTO_UDP:
5170 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5171 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5172 sum = m->m_pkthdr.csum_data;
5173 } else {
5174 ip = mtod(m, struct ip *);
5175 sum = in_pseudo(ip->ip_src.s_addr,
5176 ip->ip_dst.s_addr, htonl((u_short)len +
5177 m->m_pkthdr.csum_data + p));
5178 }
5179 sum ^= 0xffff;
5180 ++hw_assist;
5181 }
5182 break;
5183 case IPPROTO_ICMP:
5184#ifdef INET6
5185 case IPPROTO_ICMPV6:
5186#endif /* INET6 */
5187 break;
5188 default:
5189 return (1);
5190 }
5191
5192 if (!hw_assist) {
5193 switch (af) {
5194 case AF_INET:
5195 if (p == IPPROTO_ICMP) {
5196 if (m->m_len < off)
5197 return (1);
5198 m->m_data += off;
5199 m->m_len -= off;
5200 sum = in_cksum(m, len);
5201 m->m_data -= off;
5202 m->m_len += off;
5203 } else {
5204 if (m->m_len < sizeof(struct ip))
5205 return (1);
5206 sum = in_cksum_range(m, p, off, len);
5207 if (sum == 0) {
5208 m->m_pkthdr.csum_flags |=
5209 (CSUM_DATA_VALID |
5210 CSUM_PSEUDO_HDR);
5211 m->m_pkthdr.csum_data = 0xffff;
5212 }
5213 }
5214 break;
5215#ifdef INET6
5216 case AF_INET6:
5217 if (m->m_len < sizeof(struct ip6_hdr))
5218 return (1);
5219 sum = in6_cksum(m, p, off, len);
5220 /*
5221 * XXX
5222 * IPv6 H/W cksum off-load not supported yet!
5223 *
5224 * if (sum == 0) {
5225 * m->m_pkthdr.csum_flags |=
5226 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
5227 * m->m_pkthdr.csum_data = 0xffff;
5228 *}
5229 */
5230 break;
5231#endif /* INET6 */
5232 default:
5233 return (1);
5234 }
5235 }
5236 if (sum) {
5237 switch (p) {
5238 case IPPROTO_TCP:
5239 tcpstat.tcps_rcvbadsum++;
5240 break;
5241 case IPPROTO_UDP:
5242 udpstat.udps_badsum++;
5243 break;
5244 case IPPROTO_ICMP:
5245 icmpstat.icps_checksum++;
5246 break;
5247#ifdef INET6
5248 case IPPROTO_ICMPV6:
5249 icmp6stat.icp6s_checksum++;
5250 break;
5251#endif /* INET6 */
5252 }
5253 return (1);
5254 }
5255 return (0);
5256}
5257
5258#ifdef INET
5259int
5260pf_test(int dir, struct ifnet *ifp, struct mbuf **m0)
5261{
5262 struct pfi_kif *kif;
5263 u_short action, reason = 0, log = 0;
5264 struct mbuf *m = *m0;
5265 struct ip *h = NULL;
5266 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
5267 struct pf_state *s = NULL;
5268 struct pf_ruleset *ruleset = NULL;
5269 struct pf_pdesc pd;
5270 int off, dirndx, pqid = 0;
5271
5272 if (!pf_status.running || (m->m_pkthdr.fw_flags & PF_MBUF_GENERATED))
5273 return (PF_PASS);
5274
5275 kif = pfi_index2kif[ifp->if_index];
5276 if (kif == NULL)
5277 return (PF_DROP);
5278
5279#ifdef DIAGNOSTIC
5280 if ((m->m_flags & M_PKTHDR) == 0)
5281 panic("non-M_PKTHDR is passed to pf_test");
5282#endif
5283
5284 memset(&pd, 0, sizeof(pd));
5285 if (m->m_pkthdr.len < (int)sizeof(*h)) {
5286 action = PF_DROP;
5287 REASON_SET(&reason, PFRES_SHORT);
5288 log = 1;
5289 goto done;
5290 }
5291
5292 /* We do IP header normalization and packet reassembly here */
5293 if (pf_normalize_ip(m0, dir, kif, &reason) != PF_PASS) {
5294 action = PF_DROP;
5295 goto done;
5296 }
5297 m = *m0;
5298 h = mtod(m, struct ip *);
5299
5300 off = h->ip_hl << 2;
5301 if (off < (int)sizeof(*h)) {
5302 action = PF_DROP;
5303 REASON_SET(&reason, PFRES_SHORT);
5304 log = 1;
5305 goto done;
5306 }
5307
5308 pd.src = (struct pf_addr *)&h->ip_src;
5309 pd.dst = (struct pf_addr *)&h->ip_dst;
5310 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
5311 pd.ip_sum = &h->ip_sum;
5312 pd.proto = h->ip_p;
5313 pd.af = AF_INET;
5314 pd.tos = h->ip_tos;
5315 pd.tot_len = h->ip_len;
5316
5317 /* handle fragments that didn't get reassembled by normalization */
5318 if (h->ip_off & (IP_MF | IP_OFFMASK)) {
5319 action = pf_test_fragment(&r, dir, kif, m, h,
5320 &pd, &a, &ruleset);
5321 goto done;
5322 }
5323
5324 switch (h->ip_p) {
5325
5326 case IPPROTO_TCP: {
5327 struct tcphdr th;
5328
5329 pd.hdr.tcp = &th;
5330 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5331 &action, &reason, AF_INET)) {
5332 log = action != PF_PASS;
5333 goto done;
5334 }
5335 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5336 h->ip_len - off, IPPROTO_TCP, AF_INET)) {
5337 action = PF_DROP;
5338 goto done;
5339 }
5340 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5341 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5342 pqid = 1;
5343 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5344 if (action == PF_DROP)
5345 goto done;
5346 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5347 &reason);
5348 if (action == PF_PASS) {
5349#if NPFSYNC
5350 pfsync_update_state(s);
5351#endif
5352 r = s->rule.ptr;
5353 a = s->anchor.ptr;
5354 log = s->log;
5355 } else if (s == NULL)
5356 action = pf_test_tcp(&r, &s, dir, kif,
5357 m, off, h, &pd, &a, &ruleset);
5358 break;
5359 }
5360
5361 case IPPROTO_UDP: {
5362 struct udphdr uh;
5363
5364 pd.hdr.udp = &uh;
5365 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5366 &action, &reason, AF_INET)) {
5367 log = action != PF_PASS;
5368 goto done;
5369 }
5370 if (dir == PF_IN && uh.uh_sum && pf_check_proto_cksum(m,
5371 off, h->ip_len - off, IPPROTO_UDP, AF_INET)) {
5372 action = PF_DROP;
5373 goto done;
5374 }
5375 if (uh.uh_dport == 0 ||
5376 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5377 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5378 action = PF_DROP;
5379 goto done;
5380 }
5381 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5382 if (action == PF_PASS) {
5383#if NPFSYNC
5384 pfsync_update_state(s);
5385#endif
5386 r = s->rule.ptr;
5387 a = s->anchor.ptr;
5388 log = s->log;
5389 } else if (s == NULL)
5390 action = pf_test_udp(&r, &s, dir, kif,
5391 m, off, h, &pd, &a, &ruleset);
5392 break;
5393 }
5394
5395 case IPPROTO_ICMP: {
5396 struct icmp ih;
5397
5398 pd.hdr.icmp = &ih;
5399 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5400 &action, &reason, AF_INET)) {
5401 log = action != PF_PASS;
5402 goto done;
5403 }
5404 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5405 h->ip_len - off, IPPROTO_ICMP, AF_INET)) {
5406 action = PF_DROP;
5407 goto done;
5408 }
5409 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd);
5410 if (action == PF_PASS) {
5411#if NPFSYNC
5412 pfsync_update_state(s);
5413#endif
5414 r = s->rule.ptr;
5415 a = s->anchor.ptr;
5416 log = s->log;
5417 } else if (s == NULL)
5418 action = pf_test_icmp(&r, &s, dir, kif,
5419 m, off, h, &pd, &a, &ruleset);
5420 break;
5421 }
5422
5423 default:
5424 action = pf_test_state_other(&s, dir, kif, &pd);
5425 if (action == PF_PASS) {
5426#if NPFSYNC
5427 pfsync_update_state(s);
5428#endif
5429 r = s->rule.ptr;
5430 a = s->anchor.ptr;
5431 log = s->log;
5432 } else if (s == NULL)
5433 action = pf_test_other(&r, &s, dir, kif, m, off, h,
5434 &pd, &a, &ruleset);
5435 break;
5436 }
5437
5438done:
5439 if (action == PF_PASS && h->ip_hl > 5 &&
5440 !((s && s->allow_opts) || r->allow_opts)) {
5441 action = PF_DROP;
5442 REASON_SET(&reason, PFRES_SHORT);
5443 log = 1;
5444 DPFPRINTF(PF_DEBUG_MISC,
5445 ("pf: dropping packet with ip options\n"));
5446 }
5447
5448#ifdef ALTQ
5449 if (action == PF_PASS && r->qid) {
5450 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
5451 if (pd.tos == IPTOS_LOWDELAY)
5452 m->m_pkthdr.altq_qid = r->pqid;
5453 else
5454 m->m_pkthdr.altq_qid = r->qid;
5455 m->m_pkthdr.ecn_af = AF_INET;
5456 m->m_pkthdr.header = h;
5457 }
5458#endif
5459
5460 /*
5461 * connections redirected to loopback should not match sockets
5462 * bound specifically to loopback due to security implications,
5463 * see tcp_input() and in_pcblookup_listen().
5464 */
5465 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5466 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5467 (s->nat_rule.ptr->action == PF_RDR ||
5468 s->nat_rule.ptr->action == PF_BINAT) &&
5469 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
5470 action = PF_DROP;
5471 REASON_SET(&reason, PFRES_MEMORY);
5472 }
5473
5474 m->m_pkthdr.fw_flags |= PF_MBUF_TRANSLATE_LOCALHOST;
5475
5476 if (log)
5477 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, a, ruleset);
5478
5479 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5480 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
5481
5482 if (action == PF_PASS || r->action == PF_DROP) {
5483 r->packets++;
5484 r->bytes += pd.tot_len;
5485 if (a != NULL) {
5486 a->packets++;
5487 a->bytes += pd.tot_len;
5488 }
5489 if (s != NULL) {
5490 dirndx = (dir == s->direction) ? 0 : 1;
5491 s->packets[dirndx]++;
5492 s->bytes[dirndx] += pd.tot_len;
5493 if (s->nat_rule.ptr != NULL) {
5494 s->nat_rule.ptr->packets++;
5495 s->nat_rule.ptr->bytes += pd.tot_len;
5496 }
5497 if (s->src_node != NULL) {
5498 s->src_node->packets++;
5499 s->src_node->bytes += pd.tot_len;
5500 }
5501 if (s->nat_src_node != NULL) {
5502 s->nat_src_node->packets++;
5503 s->nat_src_node->bytes += pd.tot_len;
5504 }
5505 }
5506 tr = r;
5507 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5508 if (nr != NULL) {
5509 struct pf_addr *x;
5510 /*
5511 * XXX: we need to make sure that the addresses
5512 * passed to pfr_update_stats() are the same than
5513 * the addresses used during matching (pfr_match)
5514 */
5515 if (r == &pf_default_rule) {
5516 tr = nr;
5517 x = (s == NULL || s->direction == dir) ?
5518 &pd.baddr : &pd.naddr;
5519 } else
5520 x = (s == NULL || s->direction == dir) ?
5521 &pd.naddr : &pd.baddr;
5522 if (x == &pd.baddr || s == NULL) {
5523 /* we need to change the address */
5524 if (dir == PF_OUT)
5525 pd.src = x;
5526 else
5527 pd.dst = x;
5528 }
5529 }
5530 if (tr->src.addr.type == PF_ADDR_TABLE)
5531 pfr_update_stats(tr->src.addr.p.tbl, (s == NULL ||
5532 s->direction == dir) ? pd.src : pd.dst, pd.af,
5533 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5534 tr->src.not);
5535 if (tr->dst.addr.type == PF_ADDR_TABLE)
5536 pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL ||
5537 s->direction == dir) ? pd.dst : pd.src, pd.af,
5538 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5539 tr->dst.not);
5540 }
5541
5542
5543 if (action == PF_SYNPROXY_DROP) {
5544 m_freem(*m0);
5545 *m0 = NULL;
5546 action = PF_PASS;
5547 } else if (r->rt)
5548 /* pf_route can free the mbuf causing *m0 to become NULL */
5549 pf_route(m0, r, dir, ifp, s);
5550
5551 return (action);
5552}
5553#endif /* INET */
5554
5555#ifdef INET6
5556int
5557pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0)
5558{
5559 struct pfi_kif *kif;
5560 u_short action, reason = 0, log = 0;
5561 struct mbuf *m = *m0;
5562 struct ip6_hdr *h = NULL;
5563 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
5564 struct pf_state *s = NULL;
5565 struct pf_ruleset *ruleset = NULL;
5566 struct pf_pdesc pd;
5567 int off, terminal = 0, dirndx;
5568
5569 if (!pf_status.running || (m->m_pkthdr.fw_flags & PF_MBUF_GENERATED))
5570 return (PF_PASS);
5571
5572 kif = pfi_index2kif[ifp->if_index];
5573 if (kif == NULL)
5574 return (PF_DROP);
5575
5576#ifdef DIAGNOSTIC
5577 if ((m->m_flags & M_PKTHDR) == 0)
5578 panic("non-M_PKTHDR is passed to pf_test");
5579#endif
5580
5581 memset(&pd, 0, sizeof(pd));
5582 if (m->m_pkthdr.len < (int)sizeof(*h)) {
5583 action = PF_DROP;
5584 REASON_SET(&reason, PFRES_SHORT);
5585 log = 1;
5586 goto done;
5587 }
5588
5589 /* We do IP header normalization and packet reassembly here */
5590 if (pf_normalize_ip6(m0, dir, kif, &reason) != PF_PASS) {
5591 action = PF_DROP;
5592 goto done;
5593 }
5594 m = *m0;
5595 h = mtod(m, struct ip6_hdr *);
5596
5597 pd.src = (struct pf_addr *)&h->ip6_src;
5598 pd.dst = (struct pf_addr *)&h->ip6_dst;
5599 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
5600 pd.ip_sum = NULL;
5601 pd.af = AF_INET6;
5602 pd.tos = 0;
5603 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
5604
5605 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
5606 pd.proto = h->ip6_nxt;
5607 do {
5608 switch (pd.proto) {
5609 case IPPROTO_FRAGMENT:
5610 action = pf_test_fragment(&r, dir, kif, m, h,
5611 &pd, &a, &ruleset);
5612 if (action == PF_DROP)
5613 REASON_SET(&reason, PFRES_FRAG);
5614 goto done;
5615 case IPPROTO_AH:
5616 case IPPROTO_HOPOPTS:
5617 case IPPROTO_ROUTING:
5618 case IPPROTO_DSTOPTS: {
5619 /* get next header and header length */
5620 struct ip6_ext opt6;
5621
5622 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
5623 NULL, NULL, pd.af)) {
5624 DPFPRINTF(PF_DEBUG_MISC,
5625 ("pf: IPv6 short opt\n"));
5626 action = PF_DROP;
5627 REASON_SET(&reason, PFRES_SHORT);
5628 log = 1;
5629 goto done;
5630 }
5631 if (pd.proto == IPPROTO_AH)
5632 off += (opt6.ip6e_len + 2) * 4;
5633 else
5634 off += (opt6.ip6e_len + 1) * 8;
5635 pd.proto = opt6.ip6e_nxt;
5636 /* goto the next header */
5637 break;
5638 }
5639 default:
5640 terminal++;
5641 break;
5642 }
5643 } while (!terminal);
5644
5645 switch (pd.proto) {
5646
5647 case IPPROTO_TCP: {
5648 struct tcphdr th;
5649
5650 pd.hdr.tcp = &th;
5651 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5652 &action, &reason, AF_INET6)) {
5653 log = action != PF_PASS;
5654 goto done;
5655 }
5656 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5657 ntohs(h->ip6_plen), IPPROTO_TCP, AF_INET6)) {
5658 action = PF_DROP;
5659 goto done;
5660 }
5661 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5662 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5663 if (action == PF_DROP)
5664 goto done;
5665 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5666 &reason);
5667 if (action == PF_PASS) {
5668#if NPFSYNC
5669 pfsync_update_state(s);
5670#endif
5671 r = s->rule.ptr;
5672 a = s->anchor.ptr;
5673 log = s->log;
5674 } else if (s == NULL)
5675 action = pf_test_tcp(&r, &s, dir, kif,
5676 m, off, h, &pd, &a, &ruleset);
5677 break;
5678 }
5679
5680 case IPPROTO_UDP: {
5681 struct udphdr uh;
5682
5683 pd.hdr.udp = &uh;
5684 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5685 &action, &reason, AF_INET6)) {
5686 log = action != PF_PASS;
5687 goto done;
5688 }
5689 if (dir == PF_IN && uh.uh_sum && pf_check_proto_cksum(m,
5690 off, ntohs(h->ip6_plen), IPPROTO_UDP, AF_INET6)) {
5691 action = PF_DROP;
5692 goto done;
5693 }
5694 if (uh.uh_dport == 0 ||
5695 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5696 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5697 action = PF_DROP;
5698 goto done;
5699 }
5700 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5701 if (action == PF_PASS) {
5702#if NPFSYNC
5703 pfsync_update_state(s);
5704#endif
5705 r = s->rule.ptr;
5706 a = s->anchor.ptr;
5707 log = s->log;
5708 } else if (s == NULL)
5709 action = pf_test_udp(&r, &s, dir, kif,
5710 m, off, h, &pd, &a, &ruleset);
5711 break;
5712 }
5713
5714 case IPPROTO_ICMPV6: {
5715 struct icmp6_hdr ih;
5716
5717 pd.hdr.icmp6 = &ih;
5718 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
5719 &action, &reason, AF_INET6)) {
5720 log = action != PF_PASS;
5721 goto done;
5722 }
5723 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5724 ntohs(h->ip6_plen), IPPROTO_ICMPV6, AF_INET6)) {
5725 action = PF_DROP;
5726 goto done;
5727 }
5728 action = pf_test_state_icmp(&s, dir, kif,
5729 m, off, h, &pd);
5730 if (action == PF_PASS) {
5731#if NPFSYNC
5732 pfsync_update_state(s);
5733#endif
5734 r = s->rule.ptr;
5735 a = s->anchor.ptr;
5736 log = s->log;
5737 } else if (s == NULL)
5738 action = pf_test_icmp(&r, &s, dir, kif,
5739 m, off, h, &pd, &a, &ruleset);
5740 break;
5741 }
5742
5743 default:
5744 action = pf_test_state_other(&s, dir, kif, &pd);
5745 if (action == PF_PASS) {
5746 r = s->rule.ptr;
5747 a = s->anchor.ptr;
5748 log = s->log;
5749 } else if (s == NULL)
5750 action = pf_test_other(&r, &s, dir, kif, m, off, h,
5751 &pd, &a, &ruleset);
5752 break;
5753 }
5754
5755done:
5756 /* XXX handle IPv6 options, if not allowed. not implemented. */
5757
5758#ifdef ALTQ
5759 if (action == PF_PASS && r->qid) {
5760 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
5761 if (pd.tos == IPTOS_LOWDELAY)
5762 m->m_pkthdr.altq_qid = r->pqid;
5763 else
5764 m->m_pkthdr.altq_qid = r->qid;
5765 m->m_pkthdr.ecn_af = AF_INET6;
5766 m->m_pkthdr.header = h;
5767 }
5768#endif
5769
5770 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5771 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5772 (s->nat_rule.ptr->action == PF_RDR ||
5773 s->nat_rule.ptr->action == PF_BINAT) &&
5774 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) {
5775 action = PF_DROP;
5776 REASON_SET(&reason, PFRES_MEMORY);
5777 }
5778
5779 m->m_pkthdr.fw_flags |= PF_MBUF_TRANSLATE_LOCALHOST;
5780
5781 if (log)
5782 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, r, a, ruleset);
5783
5784 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5785 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
5786
5787 if (action == PF_PASS || r->action == PF_DROP) {
5788 r->packets++;
5789 r->bytes += pd.tot_len;
5790 if (a != NULL) {
5791 a->packets++;
5792 a->bytes += pd.tot_len;
5793 }
5794 if (s != NULL) {
5795 dirndx = (dir == s->direction) ? 0 : 1;
5796 s->packets[dirndx]++;
5797 s->bytes[dirndx] += pd.tot_len;
5798 if (s->nat_rule.ptr != NULL) {
5799 s->nat_rule.ptr->packets++;
5800 s->nat_rule.ptr->bytes += pd.tot_len;
5801 }
5802 if (s->src_node != NULL) {
5803 s->src_node->packets++;
5804 s->src_node->bytes += pd.tot_len;
5805 }
5806 if (s->nat_src_node != NULL) {
5807 s->nat_src_node->packets++;
5808 s->nat_src_node->bytes += pd.tot_len;
5809 }
5810 }
5811 tr = r;
5812 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5813 if (nr != NULL) {
5814 struct pf_addr *x;
5815 /*
5816 * XXX: we need to make sure that the addresses
5817 * passed to pfr_update_stats() are the same than
5818 * the addresses used during matching (pfr_match)
5819 */
5820 if (r == &pf_default_rule) {
5821 tr = nr;
5822 x = (s == NULL || s->direction == dir) ?
5823 &pd.baddr : &pd.naddr;
5824 } else {
5825 x = (s == NULL || s->direction == dir) ?
5826 &pd.naddr : &pd.baddr;
5827 }
5828 if (x == &pd.baddr || s == NULL) {
5829 if (dir == PF_OUT)
5830 pd.src = x;
5831 else
5832 pd.dst = x;
5833 }
5834 }
5835 if (tr->src.addr.type == PF_ADDR_TABLE)
5836 pfr_update_stats(tr->src.addr.p.tbl, (s == NULL ||
5837 s->direction == dir) ? pd.src : pd.dst, pd.af,
5838 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5839 tr->src.not);
5840 if (tr->dst.addr.type == PF_ADDR_TABLE)
5841 pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL ||
5842 s->direction == dir) ? pd.dst : pd.src, pd.af,
5843 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5844 tr->dst.not);
5845 }
5846
5847
5848 if (action == PF_SYNPROXY_DROP) {
5849 m_freem(*m0);
5850 *m0 = NULL;
5851 action = PF_PASS;
5852 } else if (r->rt)
5853 /* pf_route6 can free the mbuf causing *m0 to become NULL */
5854 pf_route6(m0, r, dir, ifp, s);
5855
5856 return (action);
5857}
5858#endif /* INET6 */