Explicitly mark places in the IPv6 code that require a contiguous buffer.
[dragonfly.git] / sys / net / pf / pf.c
... / ...
CommitLineData
1/* $FreeBSD: src/sys/contrib/pf/net/pf.c,v 1.19 2004/09/11 11:18:25 mlaier Exp $ */
2/* $OpenBSD: pf.c,v 1.433.2.2 2004/07/17 03:22:34 brad Exp $ */
3/* add $OpenBSD: pf.c,v 1.448 2004/05/11 07:34:11 dhartmei Exp $ */
4/* $DragonFly: src/sys/net/pf/pf.c,v 1.11 2006/09/05 00:55:47 dillon Exp $ */
5
6/*
7 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
8 *
9 * Copyright (c) 2001 Daniel Hartmeier
10 * Copyright (c) 2002,2003 Henning Brauer
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * - Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * Effort sponsored in part by the Defense Advanced Research Projects
38 * Agency (DARPA) and Air Force Research Laboratory, Air Force
39 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
40 *
41 */
42
43#include "opt_inet.h"
44#include "opt_inet6.h"
45#include "use_pfsync.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/filio.h>
52#include <sys/socket.h>
53#include <sys/socketvar.h>
54#include <sys/kernel.h>
55#include <sys/time.h>
56#include <sys/sysctl.h>
57#include <sys/endian.h>
58#include <vm/vm_zone.h>
59
60#include <machine/inttypes.h>
61
62#include <net/if.h>
63#include <net/if_types.h>
64#include <net/bpf.h>
65#include <net/route.h>
66
67#include <netinet/in.h>
68#include <netinet/in_var.h>
69#include <netinet/in_systm.h>
70#include <netinet/ip.h>
71#include <netinet/ip_var.h>
72#include <netinet/tcp.h>
73#include <netinet/tcp_seq.h>
74#include <netinet/udp.h>
75#include <netinet/ip_icmp.h>
76#include <netinet/in_pcb.h>
77#include <netinet/tcp_timer.h>
78#include <netinet/tcp_var.h>
79#include <netinet/udp_var.h>
80#include <netinet/icmp_var.h>
81
82#include <net/pf/pfvar.h>
83#include <net/pf/if_pflog.h>
84
85#if NPFSYNC > 0
86#include <net/pf/if_pfsync.h>
87#endif /* NPFSYNC > 0 */
88
89#ifdef INET6
90#include <netinet/ip6.h>
91#include <netinet/in_pcb.h>
92#include <netinet/icmp6.h>
93#include <netinet6/nd6.h>
94#include <netinet6/ip6_var.h>
95#include <netinet6/in6_pcb.h>
96#endif /* INET6 */
97
98#include <sys/in_cksum.h>
99#include <machine/limits.h>
100#include <sys/msgport2.h>
101#include <sys/ucred.h>
102
103extern int ip_optcopy(struct ip *, struct ip *);
104
105#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
106
107/*
108 * Global variables
109 */
110
111struct pf_anchorqueue pf_anchors;
112struct pf_ruleset pf_main_ruleset;
113struct pf_altqqueue pf_altqs[2];
114struct pf_palist pf_pabuf;
115struct pf_altqqueue *pf_altqs_active;
116struct pf_altqqueue *pf_altqs_inactive;
117struct pf_status pf_status;
118
119u_int32_t ticket_altqs_active;
120u_int32_t ticket_altqs_inactive;
121int altqs_inactive_open;
122u_int32_t ticket_pabuf;
123
124struct callout pf_expire_to; /* expire timeout */
125
126vm_zone_t pf_src_tree_pl, pf_rule_pl;
127vm_zone_t pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
128
129void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
130void pf_print_state(struct pf_state *);
131void pf_print_flags(u_int8_t);
132
133u_int16_t pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t,
134 u_int8_t);
135void pf_change_ap(struct pf_addr *, u_int16_t *,
136 u_int16_t *, u_int16_t *, struct pf_addr *,
137 u_int16_t, u_int8_t, sa_family_t);
138#ifdef INET6
139void pf_change_a6(struct pf_addr *, u_int16_t *,
140 struct pf_addr *, u_int8_t);
141#endif /* INET6 */
142void pf_change_icmp(struct pf_addr *, u_int16_t *,
143 struct pf_addr *, struct pf_addr *, u_int16_t,
144 u_int16_t *, u_int16_t *, u_int16_t *,
145 u_int16_t *, u_int8_t, sa_family_t);
146void pf_send_tcp(const struct pf_rule *, sa_family_t,
147 const struct pf_addr *, const struct pf_addr *,
148 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
149 u_int8_t, u_int16_t, u_int16_t, u_int8_t);
150void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
151 sa_family_t, struct pf_rule *);
152struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
153 int, int, struct pfi_kif *,
154 struct pf_addr *, u_int16_t, struct pf_addr *,
155 u_int16_t, int);
156struct pf_rule *pf_get_translation(struct pf_pdesc *, struct mbuf *,
157 int, int, struct pfi_kif *, struct pf_src_node **,
158 struct pf_addr *, u_int16_t,
159 struct pf_addr *, u_int16_t,
160 struct pf_addr *, u_int16_t *);
161int pf_test_tcp(struct pf_rule **, struct pf_state **,
162 int, struct pfi_kif *, struct mbuf *, int,
163 void *, struct pf_pdesc *, struct pf_rule **,
164 struct pf_ruleset **);
165int pf_test_udp(struct pf_rule **, struct pf_state **,
166 int, struct pfi_kif *, struct mbuf *, int,
167 void *, struct pf_pdesc *, struct pf_rule **,
168 struct pf_ruleset **);
169int pf_test_icmp(struct pf_rule **, struct pf_state **,
170 int, struct pfi_kif *, struct mbuf *, int,
171 void *, struct pf_pdesc *, struct pf_rule **,
172 struct pf_ruleset **);
173int pf_test_other(struct pf_rule **, struct pf_state **,
174 int, struct pfi_kif *, struct mbuf *, int, void *,
175 struct pf_pdesc *, struct pf_rule **,
176 struct pf_ruleset **);
177int pf_test_fragment(struct pf_rule **, int,
178 struct pfi_kif *, struct mbuf *, void *,
179 struct pf_pdesc *, struct pf_rule **,
180 struct pf_ruleset **);
181int pf_test_state_tcp(struct pf_state **, int,
182 struct pfi_kif *, struct mbuf *, int,
183 void *, struct pf_pdesc *, u_short *);
184int pf_test_state_udp(struct pf_state **, int,
185 struct pfi_kif *, struct mbuf *, int,
186 void *, struct pf_pdesc *);
187int pf_test_state_icmp(struct pf_state **, int,
188 struct pfi_kif *, struct mbuf *, int,
189 void *, struct pf_pdesc *);
190int pf_test_state_other(struct pf_state **, int,
191 struct pfi_kif *, struct pf_pdesc *);
192static int pf_match_tag(struct mbuf *, struct pf_rule *,
193 struct pf_rule *, int *);
194void pf_hash(struct pf_addr *, struct pf_addr *,
195 struct pf_poolhashkey *, sa_family_t);
196int pf_map_addr(u_int8_t, struct pf_rule *,
197 struct pf_addr *, struct pf_addr *,
198 struct pf_addr *, struct pf_src_node **);
199int pf_get_sport(sa_family_t, u_int8_t, struct pf_rule *,
200 struct pf_addr *, struct pf_addr *, u_int16_t,
201 struct pf_addr *, u_int16_t*, u_int16_t, u_int16_t,
202 struct pf_src_node **);
203void pf_route(struct mbuf **, struct pf_rule *, int,
204 struct ifnet *, struct pf_state *);
205void pf_route6(struct mbuf **, struct pf_rule *, int,
206 struct ifnet *, struct pf_state *);
207int pf_socket_lookup(uid_t *, gid_t *,
208 int, struct pf_pdesc *);
209u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
210 sa_family_t);
211u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
212 sa_family_t);
213u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
214 u_int16_t);
215void pf_set_rt_ifp(struct pf_state *,
216 struct pf_addr *);
217int pf_check_proto_cksum(struct mbuf *, int, int,
218 u_int8_t, sa_family_t);
219int pf_addr_wrap_neq(struct pf_addr_wrap *,
220 struct pf_addr_wrap *);
221struct pf_state *pf_find_state_recurse(struct pfi_kif *,
222 struct pf_state *, u_int8_t);
223
224struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX];
225
226#define STATE_LOOKUP() \
227 do { \
228 if (direction == PF_IN) \
229 *state = pf_find_state_recurse( \
230 kif, &key, PF_EXT_GWY); \
231 else \
232 *state = pf_find_state_recurse( \
233 kif, &key, PF_LAN_EXT); \
234 if (*state == NULL) \
235 return (PF_DROP); \
236 if (direction == PF_OUT && \
237 (((*state)->rule.ptr->rt == PF_ROUTETO && \
238 (*state)->rule.ptr->direction == PF_OUT) || \
239 ((*state)->rule.ptr->rt == PF_REPLYTO && \
240 (*state)->rule.ptr->direction == PF_IN)) && \
241 (*state)->rt_kif != NULL && \
242 (*state)->rt_kif != kif) \
243 return (PF_PASS); \
244 } while (0)
245
246#define STATE_TRANSLATE(s) \
247 (s)->lan.addr.addr32[0] != (s)->gwy.addr.addr32[0] || \
248 ((s)->af == AF_INET6 && \
249 ((s)->lan.addr.addr32[1] != (s)->gwy.addr.addr32[1] || \
250 (s)->lan.addr.addr32[2] != (s)->gwy.addr.addr32[2] || \
251 (s)->lan.addr.addr32[3] != (s)->gwy.addr.addr32[3])) || \
252 (s)->lan.port != (s)->gwy.port
253
254#define BOUND_IFACE(r, k) (((r)->rule_flag & PFRULE_IFBOUND) ? (k) : \
255 ((r)->rule_flag & PFRULE_GRBOUND) ? (k)->pfik_parent : \
256 (k)->pfik_parent->pfik_parent)
257
258static int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
259static int pf_state_compare_lan_ext(struct pf_state *,
260 struct pf_state *);
261static int pf_state_compare_ext_gwy(struct pf_state *,
262 struct pf_state *);
263static int pf_state_compare_id(struct pf_state *,
264 struct pf_state *);
265
266struct pf_src_tree tree_src_tracking;
267
268struct pf_state_tree_id tree_id;
269struct pf_state_queue state_updates;
270
271RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
272RB_GENERATE(pf_state_tree_lan_ext, pf_state,
273 u.s.entry_lan_ext, pf_state_compare_lan_ext);
274RB_GENERATE(pf_state_tree_ext_gwy, pf_state,
275 u.s.entry_ext_gwy, pf_state_compare_ext_gwy);
276RB_GENERATE(pf_state_tree_id, pf_state,
277 u.s.entry_id, pf_state_compare_id);
278
279static int
280pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
281{
282 int diff;
283
284 if (a->rule.ptr > b->rule.ptr)
285 return (1);
286 if (a->rule.ptr < b->rule.ptr)
287 return (-1);
288 if ((diff = a->af - b->af) != 0)
289 return (diff);
290 switch (a->af) {
291#ifdef INET
292 case AF_INET:
293 if (a->addr.addr32[0] > b->addr.addr32[0])
294 return (1);
295 if (a->addr.addr32[0] < b->addr.addr32[0])
296 return (-1);
297 break;
298#endif /* INET */
299#ifdef INET6
300 case AF_INET6:
301 if (a->addr.addr32[3] > b->addr.addr32[3])
302 return (1);
303 if (a->addr.addr32[3] < b->addr.addr32[3])
304 return (-1);
305 if (a->addr.addr32[2] > b->addr.addr32[2])
306 return (1);
307 if (a->addr.addr32[2] < b->addr.addr32[2])
308 return (-1);
309 if (a->addr.addr32[1] > b->addr.addr32[1])
310 return (1);
311 if (a->addr.addr32[1] < b->addr.addr32[1])
312 return (-1);
313 if (a->addr.addr32[0] > b->addr.addr32[0])
314 return (1);
315 if (a->addr.addr32[0] < b->addr.addr32[0])
316 return (-1);
317 break;
318#endif /* INET6 */
319 }
320 return (0);
321}
322
323static int
324pf_state_compare_lan_ext(struct pf_state *a, struct pf_state *b)
325{
326 int diff;
327
328 if ((diff = a->proto - b->proto) != 0)
329 return (diff);
330 if ((diff = a->af - b->af) != 0)
331 return (diff);
332 switch (a->af) {
333#ifdef INET
334 case AF_INET:
335 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
336 return (1);
337 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
338 return (-1);
339 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
340 return (1);
341 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
342 return (-1);
343 break;
344#endif /* INET */
345#ifdef INET6
346 case AF_INET6:
347 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
348 return (1);
349 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
350 return (-1);
351 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
352 return (1);
353 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
354 return (-1);
355 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
356 return (1);
357 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
358 return (-1);
359 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
360 return (1);
361 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
362 return (-1);
363 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
364 return (1);
365 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
366 return (-1);
367 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
368 return (1);
369 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
370 return (-1);
371 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
372 return (1);
373 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
374 return (-1);
375 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
376 return (1);
377 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
378 return (-1);
379 break;
380#endif /* INET6 */
381 }
382
383 if ((diff = a->lan.port - b->lan.port) != 0)
384 return (diff);
385 if ((diff = a->ext.port - b->ext.port) != 0)
386 return (diff);
387
388 return (0);
389}
390
391static int
392pf_state_compare_ext_gwy(struct pf_state *a, struct pf_state *b)
393{
394 int diff;
395
396 if ((diff = a->proto - b->proto) != 0)
397 return (diff);
398 if ((diff = a->af - b->af) != 0)
399 return (diff);
400 switch (a->af) {
401#ifdef INET
402 case AF_INET:
403 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
404 return (1);
405 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
406 return (-1);
407 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
408 return (1);
409 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
410 return (-1);
411 break;
412#endif /* INET */
413#ifdef INET6
414 case AF_INET6:
415 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
416 return (1);
417 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
418 return (-1);
419 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
420 return (1);
421 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
422 return (-1);
423 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
424 return (1);
425 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
426 return (-1);
427 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
428 return (1);
429 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
430 return (-1);
431 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
432 return (1);
433 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
434 return (-1);
435 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
436 return (1);
437 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
438 return (-1);
439 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
440 return (1);
441 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
442 return (-1);
443 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
444 return (1);
445 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
446 return (-1);
447 break;
448#endif /* INET6 */
449 }
450
451 if ((diff = a->ext.port - b->ext.port) != 0)
452 return (diff);
453 if ((diff = a->gwy.port - b->gwy.port) != 0)
454 return (diff);
455
456 return (0);
457}
458
459static int
460pf_state_compare_id(struct pf_state *a, struct pf_state *b)
461{
462 if (a->id > b->id)
463 return (1);
464 if (a->id < b->id)
465 return (-1);
466 if (a->creatorid > b->creatorid)
467 return (1);
468 if (a->creatorid < b->creatorid)
469 return (-1);
470
471 return (0);
472}
473
474#ifdef INET6
475void
476pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
477{
478 switch (af) {
479#ifdef INET
480 case AF_INET:
481 dst->addr32[0] = src->addr32[0];
482 break;
483#endif /* INET */
484 case AF_INET6:
485 dst->addr32[0] = src->addr32[0];
486 dst->addr32[1] = src->addr32[1];
487 dst->addr32[2] = src->addr32[2];
488 dst->addr32[3] = src->addr32[3];
489 break;
490 }
491}
492#endif
493
494struct pf_state *
495pf_find_state_byid(struct pf_state *key)
496{
497 pf_status.fcounters[FCNT_STATE_SEARCH]++;
498 return (RB_FIND(pf_state_tree_id, &tree_id, key));
499}
500
501struct pf_state *
502pf_find_state_recurse(struct pfi_kif *kif, struct pf_state *key, u_int8_t tree)
503{
504 struct pf_state *s;
505
506 pf_status.fcounters[FCNT_STATE_SEARCH]++;
507
508 switch (tree) {
509 case PF_LAN_EXT:
510 for (; kif != NULL; kif = kif->pfik_parent) {
511 s = RB_FIND(pf_state_tree_lan_ext,
512 &kif->pfik_lan_ext, key);
513 if (s != NULL)
514 return (s);
515 }
516 return (NULL);
517 case PF_EXT_GWY:
518 for (; kif != NULL; kif = kif->pfik_parent) {
519 s = RB_FIND(pf_state_tree_ext_gwy,
520 &kif->pfik_ext_gwy, key);
521 if (s != NULL)
522 return (s);
523 }
524 return (NULL);
525 default:
526 panic("pf_find_state_recurse");
527 }
528}
529
530struct pf_state *
531pf_find_state_all(struct pf_state *key, u_int8_t tree, int *more)
532{
533 struct pf_state *s, *ss = NULL;
534 struct pfi_kif *kif;
535
536 pf_status.fcounters[FCNT_STATE_SEARCH]++;
537
538 switch (tree) {
539 case PF_LAN_EXT:
540 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
541 s = RB_FIND(pf_state_tree_lan_ext,
542 &kif->pfik_lan_ext, key);
543 if (s == NULL)
544 continue;
545 if (more == NULL)
546 return (s);
547 ss = s;
548 (*more)++;
549 }
550 return (ss);
551 case PF_EXT_GWY:
552 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) {
553 s = RB_FIND(pf_state_tree_ext_gwy,
554 &kif->pfik_ext_gwy, key);
555 if (s == NULL)
556 continue;
557 if (more == NULL)
558 return (s);
559 ss = s;
560 (*more)++;
561 }
562 return (ss);
563 default:
564 panic("pf_find_state_all");
565 }
566}
567
568int
569pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
570 struct pf_addr *src, sa_family_t af)
571{
572 struct pf_src_node k;
573
574 if (*sn == NULL) {
575 k.af = af;
576 PF_ACPY(&k.addr, src, af);
577 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
578 rule->rpool.opts & PF_POOL_STICKYADDR)
579 k.rule.ptr = rule;
580 else
581 k.rule.ptr = NULL;
582 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
583 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
584 }
585 if (*sn == NULL) {
586 if (!rule->max_src_nodes ||
587 rule->src_nodes < rule->max_src_nodes)
588 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT);
589 if ((*sn) == NULL)
590 return (-1);
591 bzero(*sn, sizeof(struct pf_src_node));
592 (*sn)->af = af;
593 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
594 rule->rpool.opts & PF_POOL_STICKYADDR)
595 (*sn)->rule.ptr = rule;
596 else
597 (*sn)->rule.ptr = NULL;
598 PF_ACPY(&(*sn)->addr, src, af);
599 if (RB_INSERT(pf_src_tree,
600 &tree_src_tracking, *sn) != NULL) {
601 if (pf_status.debug >= PF_DEBUG_MISC) {
602 printf("pf: src_tree insert failed: ");
603 pf_print_host(&(*sn)->addr, 0, af);
604 printf("\n");
605 }
606 pool_put(&pf_src_tree_pl, *sn);
607 return (-1);
608 }
609 (*sn)->creation = time_second;
610 (*sn)->ruletype = rule->action;
611 if ((*sn)->rule.ptr != NULL)
612 (*sn)->rule.ptr->src_nodes++;
613 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
614 pf_status.src_nodes++;
615 } else {
616 if (rule->max_src_states &&
617 (*sn)->states >= rule->max_src_states)
618 return (-1);
619 }
620 return (0);
621}
622
623int
624pf_insert_state(struct pfi_kif *kif, struct pf_state *state)
625{
626 /* Thou MUST NOT insert multiple duplicate keys */
627 state->u.s.kif = kif;
628 if (RB_INSERT(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state)) {
629 if (pf_status.debug >= PF_DEBUG_MISC) {
630 printf("pf: state insert failed: tree_lan_ext");
631 printf(" lan: ");
632 pf_print_host(&state->lan.addr, state->lan.port,
633 state->af);
634 printf(" gwy: ");
635 pf_print_host(&state->gwy.addr, state->gwy.port,
636 state->af);
637 printf(" ext: ");
638 pf_print_host(&state->ext.addr, state->ext.port,
639 state->af);
640 if (state->sync_flags & PFSTATE_FROMSYNC)
641 printf(" (from sync)");
642 printf("\n");
643 }
644 return (-1);
645 }
646
647 if (RB_INSERT(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state)) {
648 if (pf_status.debug >= PF_DEBUG_MISC) {
649 printf("pf: state insert failed: tree_ext_gwy");
650 printf(" lan: ");
651 pf_print_host(&state->lan.addr, state->lan.port,
652 state->af);
653 printf(" gwy: ");
654 pf_print_host(&state->gwy.addr, state->gwy.port,
655 state->af);
656 printf(" ext: ");
657 pf_print_host(&state->ext.addr, state->ext.port,
658 state->af);
659 if (state->sync_flags & PFSTATE_FROMSYNC)
660 printf(" (from sync)");
661 printf("\n");
662 }
663 RB_REMOVE(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state);
664 return (-1);
665 }
666
667 if (state->id == 0 && state->creatorid == 0) {
668 state->id = htobe64(pf_status.stateid++);
669 state->creatorid = pf_status.hostid;
670 }
671 if (RB_INSERT(pf_state_tree_id, &tree_id, state) != NULL) {
672 if (pf_status.debug >= PF_DEBUG_MISC) {
673 printf("pf: state insert failed: "
674 "id: %016" PRIx64 " creatorid: %08" PRIx32,
675 be64toh(state->id), ntohl(state->creatorid));
676 if (state->sync_flags & PFSTATE_FROMSYNC)
677 printf(" (from sync)");
678 printf("\n");
679 }
680 RB_REMOVE(pf_state_tree_lan_ext, &kif->pfik_lan_ext, state);
681 RB_REMOVE(pf_state_tree_ext_gwy, &kif->pfik_ext_gwy, state);
682 return (-1);
683 }
684 TAILQ_INSERT_HEAD(&state_updates, state, u.s.entry_updates);
685
686 pf_status.fcounters[FCNT_STATE_INSERT]++;
687 pf_status.states++;
688 pfi_attach_state(kif);
689#if NPFSYNC
690 pfsync_insert_state(state);
691#endif
692 return (0);
693}
694
695void
696pf_purge_timeout(void *arg)
697{
698 struct callout *to = arg;
699
700 crit_enter();
701 pf_purge_expired_states();
702 pf_purge_expired_fragments();
703 pf_purge_expired_src_nodes();
704 crit_exit();
705
706 callout_reset(to, pf_default_rule.timeout[PFTM_INTERVAL] * hz,
707 pf_purge_timeout, to);
708}
709
710u_int32_t
711pf_state_expires(const struct pf_state *state)
712{
713 u_int32_t timeout;
714 u_int32_t start;
715 u_int32_t end;
716 u_int32_t states;
717
718 /* handle all PFTM_* > PFTM_MAX here */
719 if (state->timeout == PFTM_PURGE)
720 return (time_second);
721 if (state->timeout == PFTM_UNTIL_PACKET)
722 return (0);
723 KASSERT((state->timeout < PFTM_MAX),
724 ("pf_state_expires: timeout > PFTM_MAX"));
725 timeout = state->rule.ptr->timeout[state->timeout];
726 if (!timeout)
727 timeout = pf_default_rule.timeout[state->timeout];
728 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
729 if (start) {
730 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
731 states = state->rule.ptr->states;
732 } else {
733 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
734 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
735 states = pf_status.states;
736 }
737 if (end && states > start && start < end) {
738 if (states < end)
739 return (state->expire + timeout * (end - states) /
740 (end - start));
741 else
742 return (time_second);
743 }
744 return (state->expire + timeout);
745}
746
747void
748pf_purge_expired_src_nodes(void)
749{
750 struct pf_src_node *cur, *next;
751
752 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
753 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
754
755 if (cur->states <= 0 && cur->expire <= time_second) {
756 if (cur->rule.ptr != NULL) {
757 cur->rule.ptr->src_nodes--;
758 if (cur->rule.ptr->states <= 0 &&
759 cur->rule.ptr->max_src_nodes <= 0)
760 pf_rm_rule(NULL, cur->rule.ptr);
761 }
762 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
763 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
764 pf_status.src_nodes--;
765 pool_put(&pf_src_tree_pl, cur);
766 }
767 }
768}
769
770void
771pf_src_tree_remove_state(struct pf_state *s)
772{
773 u_int32_t timeout;
774
775 if (s->src_node != NULL) {
776 if (--s->src_node->states <= 0) {
777 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
778 if (!timeout)
779 timeout =
780 pf_default_rule.timeout[PFTM_SRC_NODE];
781 s->src_node->expire = time_second + timeout;
782 }
783 }
784 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
785 if (--s->nat_src_node->states <= 0) {
786 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
787 if (!timeout)
788 timeout =
789 pf_default_rule.timeout[PFTM_SRC_NODE];
790 s->nat_src_node->expire = time_second + timeout;
791 }
792 }
793 s->src_node = s->nat_src_node = NULL;
794}
795
796static int
797pf_purge_expired_states_callback(struct pf_state *cur, void *data __unused)
798{
799 if (pf_state_expires(cur) <= time_second) {
800 RB_REMOVE(pf_state_tree_ext_gwy,
801 &cur->u.s.kif->pfik_ext_gwy, cur);
802 RB_REMOVE(pf_state_tree_lan_ext,
803 &cur->u.s.kif->pfik_lan_ext, cur);
804 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
805 if (cur->src.state == PF_TCPS_PROXY_DST) {
806 pf_send_tcp(cur->rule.ptr, cur->af,
807 &cur->ext.addr, &cur->lan.addr,
808 cur->ext.port, cur->lan.port,
809 cur->src.seqhi, cur->src.seqlo + 1, 0,
810 TH_RST|TH_ACK, 0, 0);
811 }
812#if NPFSYNC
813 pfsync_delete_state(cur);
814#endif
815 pf_src_tree_remove_state(cur);
816 if (--cur->rule.ptr->states <= 0 &&
817 cur->rule.ptr->src_nodes <= 0)
818 pf_rm_rule(NULL, cur->rule.ptr);
819 if (cur->nat_rule.ptr != NULL)
820 if (--cur->nat_rule.ptr->states <= 0 &&
821 cur->nat_rule.ptr->src_nodes <= 0)
822 pf_rm_rule(NULL, cur->nat_rule.ptr);
823 if (cur->anchor.ptr != NULL)
824 if (--cur->anchor.ptr->states <= 0)
825 pf_rm_rule(NULL, cur->anchor.ptr);
826 pf_normalize_tcp_cleanup(cur);
827 pfi_detach_state(cur->u.s.kif);
828 TAILQ_REMOVE(&state_updates, cur, u.s.entry_updates);
829 pool_put(&pf_state_pl, cur);
830 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
831 pf_status.states--;
832 }
833 return(0);
834}
835
836void
837pf_purge_expired_states(void)
838{
839 RB_SCAN(pf_state_tree_id, &tree_id, NULL,
840 pf_purge_expired_states_callback, NULL);
841}
842
843
844int
845pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
846{
847 if (aw->type != PF_ADDR_TABLE)
848 return (0);
849 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
850 return (1);
851 return (0);
852}
853
854void
855pf_tbladdr_remove(struct pf_addr_wrap *aw)
856{
857 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
858 return;
859 pfr_detach_table(aw->p.tbl);
860 aw->p.tbl = NULL;
861}
862
863void
864pf_tbladdr_copyout(struct pf_addr_wrap *aw)
865{
866 struct pfr_ktable *kt = aw->p.tbl;
867
868 if (aw->type != PF_ADDR_TABLE || kt == NULL)
869 return;
870 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
871 kt = kt->pfrkt_root;
872 aw->p.tbl = NULL;
873 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
874 kt->pfrkt_cnt : -1;
875}
876
877void
878pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
879{
880 switch (af) {
881#ifdef INET
882 case AF_INET: {
883 u_int32_t a = ntohl(addr->addr32[0]);
884 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
885 (a>>8)&255, a&255);
886 if (p) {
887 p = ntohs(p);
888 printf(":%u", p);
889 }
890 break;
891 }
892#endif /* INET */
893#ifdef INET6
894 case AF_INET6: {
895 u_int16_t b;
896 u_int8_t i, curstart = 255, curend = 0,
897 maxstart = 0, maxend = 0;
898 for (i = 0; i < 8; i++) {
899 if (!addr->addr16[i]) {
900 if (curstart == 255)
901 curstart = i;
902 else
903 curend = i;
904 } else {
905 if (curstart) {
906 if ((curend - curstart) >
907 (maxend - maxstart)) {
908 maxstart = curstart;
909 maxend = curend;
910 curstart = 255;
911 }
912 }
913 }
914 }
915 for (i = 0; i < 8; i++) {
916 if (i >= maxstart && i <= maxend) {
917 if (maxend != 7) {
918 if (i == maxstart)
919 printf(":");
920 } else {
921 if (i == maxend)
922 printf(":");
923 }
924 } else {
925 b = ntohs(addr->addr16[i]);
926 printf("%x", b);
927 if (i < 7)
928 printf(":");
929 }
930 }
931 if (p) {
932 p = ntohs(p);
933 printf("[%u]", p);
934 }
935 break;
936 }
937#endif /* INET6 */
938 }
939}
940
941void
942pf_print_state(struct pf_state *s)
943{
944 switch (s->proto) {
945 case IPPROTO_TCP:
946 printf("TCP ");
947 break;
948 case IPPROTO_UDP:
949 printf("UDP ");
950 break;
951 case IPPROTO_ICMP:
952 printf("ICMP ");
953 break;
954 case IPPROTO_ICMPV6:
955 printf("ICMPV6 ");
956 break;
957 default:
958 printf("%u ", s->proto);
959 break;
960 }
961 pf_print_host(&s->lan.addr, s->lan.port, s->af);
962 printf(" ");
963 pf_print_host(&s->gwy.addr, s->gwy.port, s->af);
964 printf(" ");
965 pf_print_host(&s->ext.addr, s->ext.port, s->af);
966 printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
967 s->src.seqhi, s->src.max_win, s->src.seqdiff);
968 if (s->src.wscale && s->dst.wscale)
969 printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
970 printf("]");
971 printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
972 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
973 if (s->src.wscale && s->dst.wscale)
974 printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
975 printf("]");
976 printf(" %u:%u", s->src.state, s->dst.state);
977}
978
979void
980pf_print_flags(u_int8_t f)
981{
982 if (f)
983 printf(" ");
984 if (f & TH_FIN)
985 printf("F");
986 if (f & TH_SYN)
987 printf("S");
988 if (f & TH_RST)
989 printf("R");
990 if (f & TH_PUSH)
991 printf("P");
992 if (f & TH_ACK)
993 printf("A");
994 if (f & TH_URG)
995 printf("U");
996 if (f & TH_ECE)
997 printf("E");
998 if (f & TH_CWR)
999 printf("W");
1000}
1001
1002#define PF_SET_SKIP_STEPS(i) \
1003 do { \
1004 while (head[i] != cur) { \
1005 head[i]->skip[i].ptr = cur; \
1006 head[i] = TAILQ_NEXT(head[i], entries); \
1007 } \
1008 } while (0)
1009
1010void
1011pf_calc_skip_steps(struct pf_rulequeue *rules)
1012{
1013 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1014 int i;
1015
1016 cur = TAILQ_FIRST(rules);
1017 prev = cur;
1018 for (i = 0; i < PF_SKIP_COUNT; ++i)
1019 head[i] = cur;
1020 while (cur != NULL) {
1021
1022 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1023 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1024 if (cur->direction != prev->direction)
1025 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1026 if (cur->af != prev->af)
1027 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1028 if (cur->proto != prev->proto)
1029 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1030 if (cur->src.not != prev->src.not ||
1031 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1032 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1033 if (cur->src.port[0] != prev->src.port[0] ||
1034 cur->src.port[1] != prev->src.port[1] ||
1035 cur->src.port_op != prev->src.port_op)
1036 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1037 if (cur->dst.not != prev->dst.not ||
1038 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1039 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1040 if (cur->dst.port[0] != prev->dst.port[0] ||
1041 cur->dst.port[1] != prev->dst.port[1] ||
1042 cur->dst.port_op != prev->dst.port_op)
1043 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1044
1045 prev = cur;
1046 cur = TAILQ_NEXT(cur, entries);
1047 }
1048 for (i = 0; i < PF_SKIP_COUNT; ++i)
1049 PF_SET_SKIP_STEPS(i);
1050}
1051
1052int
1053pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1054{
1055 if (aw1->type != aw2->type)
1056 return (1);
1057 switch (aw1->type) {
1058 case PF_ADDR_ADDRMASK:
1059 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1060 return (1);
1061 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1062 return (1);
1063 return (0);
1064 case PF_ADDR_DYNIFTL:
1065 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1066 case PF_ADDR_NOROUTE:
1067 return (0);
1068 case PF_ADDR_TABLE:
1069 return (aw1->p.tbl != aw2->p.tbl);
1070 default:
1071 printf("invalid address type: %d\n", aw1->type);
1072 return (1);
1073 }
1074}
1075
1076void
1077pf_update_anchor_rules(void)
1078{
1079 struct pf_rule *rule;
1080 int i;
1081
1082 for (i = 0; i < PF_RULESET_MAX; ++i)
1083 TAILQ_FOREACH(rule, pf_main_ruleset.rules[i].active.ptr,
1084 entries)
1085 if (rule->anchorname[0])
1086 rule->anchor = pf_find_anchor(rule->anchorname);
1087 else
1088 rule->anchor = NULL;
1089}
1090
1091u_int16_t
1092pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1093{
1094 u_int32_t l;
1095
1096 if (udp && !cksum)
1097 return (0x0000);
1098 l = cksum + old - new;
1099 l = (l >> 16) + (l & 65535);
1100 l = l & 65535;
1101 if (udp && !l)
1102 return (0xFFFF);
1103 return (l);
1104}
1105
1106void
1107pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1108 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1109{
1110 struct pf_addr ao;
1111 u_int16_t po = *p;
1112
1113 PF_ACPY(&ao, a, af);
1114 PF_ACPY(a, an, af);
1115
1116 *p = pn;
1117
1118 switch (af) {
1119#ifdef INET
1120 case AF_INET:
1121 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1122 ao.addr16[0], an->addr16[0], 0),
1123 ao.addr16[1], an->addr16[1], 0);
1124 *p = pn;
1125 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1126 ao.addr16[0], an->addr16[0], u),
1127 ao.addr16[1], an->addr16[1], u),
1128 po, pn, u);
1129 break;
1130#endif /* INET */
1131#ifdef INET6
1132 case AF_INET6:
1133 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1134 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1135 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1136 ao.addr16[0], an->addr16[0], u),
1137 ao.addr16[1], an->addr16[1], u),
1138 ao.addr16[2], an->addr16[2], u),
1139 ao.addr16[3], an->addr16[3], u),
1140 ao.addr16[4], an->addr16[4], u),
1141 ao.addr16[5], an->addr16[5], u),
1142 ao.addr16[6], an->addr16[6], u),
1143 ao.addr16[7], an->addr16[7], u),
1144 po, pn, u);
1145 break;
1146#endif /* INET6 */
1147 }
1148}
1149
1150
1151/* Changes a u_int32_t. Uses a void * so there are no align restrictions */
1152void
1153pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
1154{
1155 u_int32_t ao;
1156
1157 memcpy(&ao, a, sizeof(ao));
1158 memcpy(a, &an, sizeof(u_int32_t));
1159 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
1160 ao % 65536, an % 65536, u);
1161}
1162
1163#ifdef INET6
1164void
1165pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
1166{
1167 struct pf_addr ao;
1168
1169 PF_ACPY(&ao, a, AF_INET6);
1170 PF_ACPY(a, an, AF_INET6);
1171
1172 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1173 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1174 pf_cksum_fixup(pf_cksum_fixup(*c,
1175 ao.addr16[0], an->addr16[0], u),
1176 ao.addr16[1], an->addr16[1], u),
1177 ao.addr16[2], an->addr16[2], u),
1178 ao.addr16[3], an->addr16[3], u),
1179 ao.addr16[4], an->addr16[4], u),
1180 ao.addr16[5], an->addr16[5], u),
1181 ao.addr16[6], an->addr16[6], u),
1182 ao.addr16[7], an->addr16[7], u);
1183}
1184#endif /* INET6 */
1185
1186void
1187pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
1188 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
1189 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
1190{
1191 struct pf_addr oia, ooa;
1192
1193 PF_ACPY(&oia, ia, af);
1194 PF_ACPY(&ooa, oa, af);
1195
1196 /* Change inner protocol port, fix inner protocol checksum. */
1197 if (ip != NULL) {
1198 u_int16_t oip = *ip;
1199 u_int32_t opc = 0;
1200
1201 if (pc != NULL)
1202 opc = *pc;
1203 *ip = np;
1204 if (pc != NULL)
1205 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
1206 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
1207 if (pc != NULL)
1208 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
1209 }
1210 /* Change inner ip address, fix inner ip and icmp checksums. */
1211 PF_ACPY(ia, na, af);
1212 switch (af) {
1213#ifdef INET
1214 case AF_INET: {
1215 u_int32_t oh2c = *h2c;
1216
1217 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
1218 oia.addr16[0], ia->addr16[0], 0),
1219 oia.addr16[1], ia->addr16[1], 0);
1220 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1221 oia.addr16[0], ia->addr16[0], 0),
1222 oia.addr16[1], ia->addr16[1], 0);
1223 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
1224 break;
1225 }
1226#endif /* INET */
1227#ifdef INET6
1228 case AF_INET6:
1229 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1230 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1231 pf_cksum_fixup(pf_cksum_fixup(*ic,
1232 oia.addr16[0], ia->addr16[0], u),
1233 oia.addr16[1], ia->addr16[1], u),
1234 oia.addr16[2], ia->addr16[2], u),
1235 oia.addr16[3], ia->addr16[3], u),
1236 oia.addr16[4], ia->addr16[4], u),
1237 oia.addr16[5], ia->addr16[5], u),
1238 oia.addr16[6], ia->addr16[6], u),
1239 oia.addr16[7], ia->addr16[7], u);
1240 break;
1241#endif /* INET6 */
1242 }
1243 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
1244 PF_ACPY(oa, na, af);
1245 switch (af) {
1246#ifdef INET
1247 case AF_INET:
1248 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
1249 ooa.addr16[0], oa->addr16[0], 0),
1250 ooa.addr16[1], oa->addr16[1], 0);
1251 break;
1252#endif /* INET */
1253#ifdef INET6
1254 case AF_INET6:
1255 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1256 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1257 pf_cksum_fixup(pf_cksum_fixup(*ic,
1258 ooa.addr16[0], oa->addr16[0], u),
1259 ooa.addr16[1], oa->addr16[1], u),
1260 ooa.addr16[2], oa->addr16[2], u),
1261 ooa.addr16[3], oa->addr16[3], u),
1262 ooa.addr16[4], oa->addr16[4], u),
1263 ooa.addr16[5], oa->addr16[5], u),
1264 ooa.addr16[6], oa->addr16[6], u),
1265 ooa.addr16[7], oa->addr16[7], u);
1266 break;
1267#endif /* INET6 */
1268 }
1269}
1270
1271void
1272pf_send_tcp(const struct pf_rule *r, sa_family_t af,
1273 const struct pf_addr *saddr, const struct pf_addr *daddr,
1274 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
1275 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl)
1276{
1277 struct mbuf *m;
1278 int len = 0, tlen;
1279#ifdef INET
1280 struct ip *h = NULL;
1281#endif /* INET */
1282#ifdef INET6
1283 struct ip6_hdr *h6 = NULL;
1284#endif /* INET6 */
1285 struct tcphdr *th = NULL;
1286 char *opt;
1287
1288 /* maximum segment size tcp option */
1289 tlen = sizeof(struct tcphdr);
1290 if (mss)
1291 tlen += 4;
1292
1293 switch (af) {
1294#ifdef INET
1295 case AF_INET:
1296 len = sizeof(struct ip) + tlen;
1297 break;
1298#endif /* INET */
1299#ifdef INET6
1300 case AF_INET6:
1301 len = sizeof(struct ip6_hdr) + tlen;
1302 break;
1303#endif /* INET6 */
1304 }
1305
1306 /* create outgoing mbuf */
1307 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
1308 if (m == NULL)
1309 return;
1310 m->m_pkthdr.fw_flags |= PF_MBUF_GENERATED;
1311#ifdef ALTQ
1312 if (r != NULL && r->qid) {
1313 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
1314 m->m_pkthdr.altq_qid = r->qid;
1315 m->m_pkthdr.ecn_af = af;
1316 m->m_pkthdr.header = mtod(m, struct ip *);
1317 }
1318#endif
1319 m->m_data += max_linkhdr;
1320 m->m_pkthdr.len = m->m_len = len;
1321 m->m_pkthdr.rcvif = NULL;
1322 bzero(m->m_data, len);
1323 switch (af) {
1324#ifdef INET
1325 case AF_INET:
1326 h = mtod(m, struct ip *);
1327
1328 /* IP header fields included in the TCP checksum */
1329 h->ip_p = IPPROTO_TCP;
1330 h->ip_len = tlen;
1331 h->ip_src.s_addr = saddr->v4.s_addr;
1332 h->ip_dst.s_addr = daddr->v4.s_addr;
1333
1334 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
1335 break;
1336#endif /* INET */
1337#ifdef INET6
1338 case AF_INET6:
1339 h6 = mtod(m, struct ip6_hdr *);
1340
1341 /* IP header fields included in the TCP checksum */
1342 h6->ip6_nxt = IPPROTO_TCP;
1343 h6->ip6_plen = htons(tlen);
1344 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
1345 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
1346
1347 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
1348 break;
1349#endif /* INET6 */
1350 }
1351
1352 /* TCP header */
1353 th->th_sport = sport;
1354 th->th_dport = dport;
1355 th->th_seq = htonl(seq);
1356 th->th_ack = htonl(ack);
1357 th->th_off = tlen >> 2;
1358 th->th_flags = flags;
1359 th->th_win = htons(win);
1360
1361 if (mss) {
1362 opt = (char *)(th + 1);
1363 opt[0] = TCPOPT_MAXSEG;
1364 opt[1] = 4;
1365 mss = htons(mss);
1366 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
1367 }
1368
1369 switch (af) {
1370#ifdef INET
1371 case AF_INET:
1372 /* TCP checksum */
1373 th->th_sum = in_cksum(m, len);
1374
1375 /* Finish the IP header */
1376 h->ip_v = 4;
1377 h->ip_hl = sizeof(*h) >> 2;
1378 h->ip_tos = IPTOS_LOWDELAY;
1379 h->ip_len = len;
1380 h->ip_off = path_mtu_discovery ? IP_DF : 0;
1381 h->ip_ttl = ttl ? ttl : ip_defttl;
1382 h->ip_sum = 0;
1383 ip_output(m, (void *)NULL, (void *)NULL, 0, (void *)NULL,
1384 (void *)NULL);
1385 break;
1386#endif /* INET */
1387#ifdef INET6
1388 case AF_INET6:
1389 /* TCP checksum */
1390 th->th_sum = in6_cksum(m, IPPROTO_TCP,
1391 sizeof(struct ip6_hdr), tlen);
1392
1393 h6->ip6_vfc |= IPV6_VERSION;
1394 h6->ip6_hlim = IPV6_DEFHLIM;
1395
1396 ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
1397 break;
1398#endif /* INET6 */
1399 }
1400}
1401
1402void
1403pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
1404 struct pf_rule *r)
1405{
1406 struct mbuf *m0;
1407
1408 m0 = m_copypacket(m, MB_DONTWAIT);
1409 if (m0 == NULL)
1410 return;
1411 m0->m_pkthdr.fw_flags |= PF_MBUF_GENERATED;
1412
1413#ifdef ALTQ
1414 if (r->qid) {
1415 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
1416 m->m_pkthdr.altq_qid = r->qid;
1417 m->m_pkthdr.ecn_af = af;
1418 m->m_pkthdr.header = mtod(m0, struct ip *);
1419 }
1420#endif
1421
1422 switch (af) {
1423#ifdef INET
1424 case AF_INET:
1425 icmp_error(m0, type, code, 0, 0);
1426 break;
1427#endif /* INET */
1428#ifdef INET6
1429 case AF_INET6:
1430 icmp6_error(m0, type, code, 0);
1431 break;
1432#endif /* INET6 */
1433 }
1434}
1435
1436/*
1437 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
1438 * If n is 0, they match if they are equal. If n is != 0, they match if they
1439 * are different.
1440 */
1441int
1442pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
1443 struct pf_addr *b, sa_family_t af)
1444{
1445 int match = 0;
1446
1447 switch (af) {
1448#ifdef INET
1449 case AF_INET:
1450 if ((a->addr32[0] & m->addr32[0]) ==
1451 (b->addr32[0] & m->addr32[0]))
1452 match++;
1453 break;
1454#endif /* INET */
1455#ifdef INET6
1456 case AF_INET6:
1457 if (((a->addr32[0] & m->addr32[0]) ==
1458 (b->addr32[0] & m->addr32[0])) &&
1459 ((a->addr32[1] & m->addr32[1]) ==
1460 (b->addr32[1] & m->addr32[1])) &&
1461 ((a->addr32[2] & m->addr32[2]) ==
1462 (b->addr32[2] & m->addr32[2])) &&
1463 ((a->addr32[3] & m->addr32[3]) ==
1464 (b->addr32[3] & m->addr32[3])))
1465 match++;
1466 break;
1467#endif /* INET6 */
1468 }
1469 if (match) {
1470 if (n)
1471 return (0);
1472 else
1473 return (1);
1474 } else {
1475 if (n)
1476 return (1);
1477 else
1478 return (0);
1479 }
1480}
1481
1482int
1483pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
1484{
1485 switch (op) {
1486 case PF_OP_IRG:
1487 return ((p > a1) && (p < a2));
1488 case PF_OP_XRG:
1489 return ((p < a1) || (p > a2));
1490 case PF_OP_RRG:
1491 return ((p >= a1) && (p <= a2));
1492 case PF_OP_EQ:
1493 return (p == a1);
1494 case PF_OP_NE:
1495 return (p != a1);
1496 case PF_OP_LT:
1497 return (p < a1);
1498 case PF_OP_LE:
1499 return (p <= a1);
1500 case PF_OP_GT:
1501 return (p > a1);
1502 case PF_OP_GE:
1503 return (p >= a1);
1504 }
1505 return (0); /* never reached */
1506}
1507
1508int
1509pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
1510{
1511 a1 = ntohs(a1);
1512 a2 = ntohs(a2);
1513 p = ntohs(p);
1514 return (pf_match(op, a1, a2, p));
1515}
1516
1517int
1518pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
1519{
1520 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
1521 return (0);
1522 return (pf_match(op, a1, a2, u));
1523}
1524
1525int
1526pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
1527{
1528 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
1529 return (0);
1530 return (pf_match(op, a1, a2, g));
1531}
1532
1533static int
1534pf_match_tag(struct mbuf *m, struct pf_rule *r, struct pf_rule *nat_rule,
1535 int *tag)
1536{
1537 if (*tag == -1) { /* find mbuf tag */
1538 if (nat_rule != NULL && nat_rule->tag)
1539 *tag = nat_rule->tag;
1540 else if (m->m_pkthdr.fw_flags & PF_MBUF_TAGGED)
1541 *tag = m->m_pkthdr.pf_tag;
1542 else
1543 *tag = 0;
1544 }
1545
1546 return ((!r->match_tag_not && r->match_tag == *tag) ||
1547 (r->match_tag_not && r->match_tag != *tag));
1548}
1549
1550void
1551pf_tag_packet(struct mbuf *m, int tag)
1552{
1553 if (tag <= 0)
1554 return;
1555
1556 m->m_pkthdr.fw_flags |= PF_MBUF_TAGGED;
1557 m->m_pkthdr.pf_tag = tag;
1558}
1559
1560#define PF_STEP_INTO_ANCHOR(r, a, s, n) \
1561 do { \
1562 if ((r) == NULL || (r)->anchor == NULL || \
1563 (s) != NULL || (a) != NULL) \
1564 panic("PF_STEP_INTO_ANCHOR"); \
1565 (a) = (r); \
1566 (s) = TAILQ_FIRST(&(r)->anchor->rulesets); \
1567 (r) = NULL; \
1568 while ((s) != NULL && ((r) = \
1569 TAILQ_FIRST((s)->rules[n].active.ptr)) == NULL) \
1570 (s) = TAILQ_NEXT((s), entries); \
1571 if ((r) == NULL) { \
1572 (r) = TAILQ_NEXT((a), entries); \
1573 (a) = NULL; \
1574 } \
1575 } while (0)
1576
1577#define PF_STEP_OUT_OF_ANCHOR(r, a, s, n) \
1578 do { \
1579 if ((r) != NULL || (a) == NULL || (s) == NULL) \
1580 panic("PF_STEP_OUT_OF_ANCHOR"); \
1581 (s) = TAILQ_NEXT((s), entries); \
1582 while ((s) != NULL && ((r) = \
1583 TAILQ_FIRST((s)->rules[n].active.ptr)) == NULL) \
1584 (s) = TAILQ_NEXT((s), entries); \
1585 if ((r) == NULL) { \
1586 (r) = TAILQ_NEXT((a), entries); \
1587 (a) = NULL; \
1588 } \
1589 } while (0)
1590
1591#ifdef INET6
1592void
1593pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
1594 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
1595{
1596 switch (af) {
1597#ifdef INET
1598 case AF_INET:
1599 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
1600 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
1601 break;
1602#endif /* INET */
1603 case AF_INET6:
1604 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
1605 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
1606 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
1607 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
1608 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
1609 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
1610 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
1611 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
1612 break;
1613 }
1614}
1615
1616void
1617pf_addr_inc(struct pf_addr *addr, sa_family_t af)
1618{
1619 switch (af) {
1620#ifdef INET
1621 case AF_INET:
1622 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
1623 break;
1624#endif /* INET */
1625 case AF_INET6:
1626 if (addr->addr32[3] == 0xffffffff) {
1627 addr->addr32[3] = 0;
1628 if (addr->addr32[2] == 0xffffffff) {
1629 addr->addr32[2] = 0;
1630 if (addr->addr32[1] == 0xffffffff) {
1631 addr->addr32[1] = 0;
1632 addr->addr32[0] =
1633 htonl(ntohl(addr->addr32[0]) + 1);
1634 } else
1635 addr->addr32[1] =
1636 htonl(ntohl(addr->addr32[1]) + 1);
1637 } else
1638 addr->addr32[2] =
1639 htonl(ntohl(addr->addr32[2]) + 1);
1640 } else
1641 addr->addr32[3] =
1642 htonl(ntohl(addr->addr32[3]) + 1);
1643 break;
1644 }
1645}
1646#endif /* INET6 */
1647
1648#define mix(a,b,c) \
1649 do { \
1650 a -= b; a -= c; a ^= (c >> 13); \
1651 b -= c; b -= a; b ^= (a << 8); \
1652 c -= a; c -= b; c ^= (b >> 13); \
1653 a -= b; a -= c; a ^= (c >> 12); \
1654 b -= c; b -= a; b ^= (a << 16); \
1655 c -= a; c -= b; c ^= (b >> 5); \
1656 a -= b; a -= c; a ^= (c >> 3); \
1657 b -= c; b -= a; b ^= (a << 10); \
1658 c -= a; c -= b; c ^= (b >> 15); \
1659 } while (0)
1660
1661/*
1662 * hash function based on bridge_hash in if_bridge.c
1663 */
1664void
1665pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
1666 struct pf_poolhashkey *key, sa_family_t af)
1667{
1668 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
1669
1670 switch (af) {
1671#ifdef INET
1672 case AF_INET:
1673 a += inaddr->addr32[0];
1674 b += key->key32[1];
1675 mix(a, b, c);
1676 hash->addr32[0] = c + key->key32[2];
1677 break;
1678#endif /* INET */
1679#ifdef INET6
1680 case AF_INET6:
1681 a += inaddr->addr32[0];
1682 b += inaddr->addr32[2];
1683 mix(a, b, c);
1684 hash->addr32[0] = c;
1685 a += inaddr->addr32[1];
1686 b += inaddr->addr32[3];
1687 c += key->key32[1];
1688 mix(a, b, c);
1689 hash->addr32[1] = c;
1690 a += inaddr->addr32[2];
1691 b += inaddr->addr32[1];
1692 c += key->key32[2];
1693 mix(a, b, c);
1694 hash->addr32[2] = c;
1695 a += inaddr->addr32[3];
1696 b += inaddr->addr32[0];
1697 c += key->key32[3];
1698 mix(a, b, c);
1699 hash->addr32[3] = c;
1700 break;
1701#endif /* INET6 */
1702 }
1703}
1704
1705int
1706pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
1707 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
1708{
1709 unsigned char hash[16];
1710 struct pf_pool *rpool = &r->rpool;
1711 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
1712 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
1713 struct pf_pooladdr *acur = rpool->cur;
1714 struct pf_src_node k;
1715
1716 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
1717 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
1718 k.af = af;
1719 PF_ACPY(&k.addr, saddr, af);
1720 if (r->rule_flag & PFRULE_RULESRCTRACK ||
1721 r->rpool.opts & PF_POOL_STICKYADDR)
1722 k.rule.ptr = r;
1723 else
1724 k.rule.ptr = NULL;
1725 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
1726 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
1727 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
1728 PF_ACPY(naddr, &(*sn)->raddr, af);
1729 if (pf_status.debug >= PF_DEBUG_MISC) {
1730 printf("pf_map_addr: src tracking maps ");
1731 pf_print_host(&k.addr, 0, af);
1732 printf(" to ");
1733 pf_print_host(naddr, 0, af);
1734 printf("\n");
1735 }
1736 return (0);
1737 }
1738 }
1739
1740 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
1741 return (1);
1742 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
1743 if (af == AF_INET) {
1744 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
1745 (rpool->opts & PF_POOL_TYPEMASK) !=
1746 PF_POOL_ROUNDROBIN)
1747 return (1);
1748 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
1749 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
1750 } else {
1751 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
1752 (rpool->opts & PF_POOL_TYPEMASK) !=
1753 PF_POOL_ROUNDROBIN)
1754 return (1);
1755 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
1756 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
1757 }
1758 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
1759 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
1760 return (1); /* unsupported */
1761 } else {
1762 raddr = &rpool->cur->addr.v.a.addr;
1763 rmask = &rpool->cur->addr.v.a.mask;
1764 }
1765
1766 switch (rpool->opts & PF_POOL_TYPEMASK) {
1767 case PF_POOL_NONE:
1768 PF_ACPY(naddr, raddr, af);
1769 break;
1770 case PF_POOL_BITMASK:
1771 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
1772 break;
1773 case PF_POOL_RANDOM:
1774 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
1775 switch (af) {
1776#ifdef INET
1777 case AF_INET:
1778 rpool->counter.addr32[0] = karc4random();
1779 break;
1780#endif /* INET */
1781#ifdef INET6
1782 case AF_INET6:
1783 if (rmask->addr32[3] != 0xffffffff)
1784 rpool->counter.addr32[3] = karc4random();
1785 else
1786 break;
1787 if (rmask->addr32[2] != 0xffffffff)
1788 rpool->counter.addr32[2] = karc4random();
1789 else
1790 break;
1791 if (rmask->addr32[1] != 0xffffffff)
1792 rpool->counter.addr32[1] = karc4random();
1793 else
1794 break;
1795 if (rmask->addr32[0] != 0xffffffff)
1796 rpool->counter.addr32[0] = karc4random();
1797 break;
1798#endif /* INET6 */
1799 }
1800 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
1801 PF_ACPY(init_addr, naddr, af);
1802
1803 } else {
1804 PF_AINC(&rpool->counter, af);
1805 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
1806 }
1807 break;
1808 case PF_POOL_SRCHASH:
1809 pf_hash(saddr, (struct pf_addr *)&hash, &rpool->key, af);
1810 PF_POOLMASK(naddr, raddr, rmask, (struct pf_addr *)&hash, af);
1811 break;
1812 case PF_POOL_ROUNDROBIN:
1813 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
1814 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
1815 &rpool->tblidx, &rpool->counter,
1816 &raddr, &rmask, af))
1817 goto get_addr;
1818 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
1819 if (!pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
1820 &rpool->tblidx, &rpool->counter,
1821 &raddr, &rmask, af))
1822 goto get_addr;
1823 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
1824 goto get_addr;
1825
1826 try_next:
1827 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
1828 rpool->cur = TAILQ_FIRST(&rpool->list);
1829 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
1830 rpool->tblidx = -1;
1831 if (pfr_pool_get(rpool->cur->addr.p.tbl,
1832 &rpool->tblidx, &rpool->counter,
1833 &raddr, &rmask, af)) {
1834 /* table contains no address of type 'af' */
1835 if (rpool->cur != acur)
1836 goto try_next;
1837 return (1);
1838 }
1839 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
1840 rpool->tblidx = -1;
1841 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
1842 &rpool->tblidx, &rpool->counter,
1843 &raddr, &rmask, af)) {
1844 /* table contains no address of type 'af' */
1845 if (rpool->cur != acur)
1846 goto try_next;
1847 return (1);
1848 }
1849 } else {
1850 raddr = &rpool->cur->addr.v.a.addr;
1851 rmask = &rpool->cur->addr.v.a.mask;
1852 PF_ACPY(&rpool->counter, raddr, af);
1853 }
1854
1855 get_addr:
1856 PF_ACPY(naddr, &rpool->counter, af);
1857 PF_AINC(&rpool->counter, af);
1858 break;
1859 }
1860 if (*sn != NULL)
1861 PF_ACPY(&(*sn)->raddr, naddr, af);
1862
1863 if (pf_status.debug >= PF_DEBUG_MISC &&
1864 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
1865 printf("pf_map_addr: selected address ");
1866 pf_print_host(naddr, 0, af);
1867 printf("\n");
1868 }
1869
1870 return (0);
1871}
1872
1873int
1874pf_get_sport(sa_family_t af, u_int8_t proto, struct pf_rule *r,
1875 struct pf_addr *saddr, struct pf_addr *daddr, u_int16_t dport,
1876 struct pf_addr *naddr, u_int16_t *nport, u_int16_t low, u_int16_t high,
1877 struct pf_src_node **sn)
1878{
1879 struct pf_state key;
1880 struct pf_addr init_addr;
1881 u_int16_t cut;
1882
1883 bzero(&init_addr, sizeof(init_addr));
1884 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
1885 return (1);
1886
1887 do {
1888 key.af = af;
1889 key.proto = proto;
1890 PF_ACPY(&key.ext.addr, daddr, key.af);
1891 PF_ACPY(&key.gwy.addr, naddr, key.af);
1892 key.ext.port = dport;
1893
1894 /*
1895 * port search; start random, step;
1896 * similar 2 portloop in in_pcbbind
1897 */
1898 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
1899 key.gwy.port = 0;
1900 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL)
1901 return (0);
1902 } else if (low == 0 && high == 0) {
1903 key.gwy.port = *nport;
1904 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL)
1905 return (0);
1906 } else if (low == high) {
1907 key.gwy.port = htons(low);
1908 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) == NULL) {
1909 *nport = htons(low);
1910 return (0);
1911 }
1912 } else {
1913 u_int16_t tmp;
1914
1915 if (low > high) {
1916 tmp = low;
1917 low = high;
1918 high = tmp;
1919 }
1920 /* low < high */
1921 cut = karc4random() % (1 + high - low) + low;
1922 /* low <= cut <= high */
1923 for (tmp = cut; tmp <= high; ++(tmp)) {
1924 key.gwy.port = htons(tmp);
1925 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) ==
1926 NULL) {
1927 *nport = htons(tmp);
1928 return (0);
1929 }
1930 }
1931 for (tmp = cut - 1; tmp >= low; --(tmp)) {
1932 key.gwy.port = htons(tmp);
1933 if (pf_find_state_all(&key, PF_EXT_GWY, NULL) ==
1934 NULL) {
1935 *nport = htons(tmp);
1936 return (0);
1937 }
1938 }
1939 }
1940
1941 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
1942 case PF_POOL_RANDOM:
1943 case PF_POOL_ROUNDROBIN:
1944 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
1945 return (1);
1946 break;
1947 case PF_POOL_NONE:
1948 case PF_POOL_SRCHASH:
1949 case PF_POOL_BITMASK:
1950 default:
1951 return (1);
1952 }
1953 } while (! PF_AEQ(&init_addr, naddr, af) );
1954
1955 return (1); /* none available */
1956}
1957
1958struct pf_rule *
1959pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
1960 int direction, struct pfi_kif *kif, struct pf_addr *saddr, u_int16_t sport,
1961 struct pf_addr *daddr, u_int16_t dport, int rs_num)
1962{
1963 struct pf_rule *r, *rm = NULL, *anchorrule = NULL;
1964 struct pf_ruleset *ruleset = NULL;
1965
1966 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
1967 while (r && rm == NULL) {
1968 struct pf_rule_addr *src = NULL, *dst = NULL;
1969 struct pf_addr_wrap *xdst = NULL;
1970
1971 if (r->action == PF_BINAT && direction == PF_IN) {
1972 src = &r->dst;
1973 if (r->rpool.cur != NULL)
1974 xdst = &r->rpool.cur->addr;
1975 } else {
1976 src = &r->src;
1977 dst = &r->dst;
1978 }
1979
1980 r->evaluations++;
1981 if (r->kif != NULL &&
1982 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
1983 r = r->skip[PF_SKIP_IFP].ptr;
1984 else if (r->direction && r->direction != direction)
1985 r = r->skip[PF_SKIP_DIR].ptr;
1986 else if (r->af && r->af != pd->af)
1987 r = r->skip[PF_SKIP_AF].ptr;
1988 else if (r->proto && r->proto != pd->proto)
1989 r = r->skip[PF_SKIP_PROTO].ptr;
1990 else if (PF_MISMATCHAW(&src->addr, saddr, pd->af, src->not))
1991 r = r->skip[src == &r->src ? PF_SKIP_SRC_ADDR :
1992 PF_SKIP_DST_ADDR].ptr;
1993 else if (src->port_op && !pf_match_port(src->port_op,
1994 src->port[0], src->port[1], sport))
1995 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
1996 PF_SKIP_DST_PORT].ptr;
1997 else if (dst != NULL &&
1998 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->not))
1999 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2000 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, 0))
2001 r = TAILQ_NEXT(r, entries);
2002 else if (dst != NULL && dst->port_op &&
2003 !pf_match_port(dst->port_op, dst->port[0],
2004 dst->port[1], dport))
2005 r = r->skip[PF_SKIP_DST_PORT].ptr;
2006 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
2007 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
2008 off, pd->hdr.tcp), r->os_fingerprint)))
2009 r = TAILQ_NEXT(r, entries);
2010 else if (r->anchorname[0] && r->anchor == NULL)
2011 r = TAILQ_NEXT(r, entries);
2012 else if (r->anchor == NULL)
2013 rm = r;
2014 else
2015 PF_STEP_INTO_ANCHOR(r, anchorrule, ruleset, rs_num);
2016 if (r == NULL && anchorrule != NULL)
2017 PF_STEP_OUT_OF_ANCHOR(r, anchorrule, ruleset,
2018 rs_num);
2019 }
2020 if (rm != NULL && (rm->action == PF_NONAT ||
2021 rm->action == PF_NORDR || rm->action == PF_NOBINAT))
2022 return (NULL);
2023 return (rm);
2024}
2025
2026struct pf_rule *
2027pf_get_translation(struct pf_pdesc *pd, struct mbuf *m, int off, int direction,
2028 struct pfi_kif *kif, struct pf_src_node **sn,
2029 struct pf_addr *saddr, u_int16_t sport,
2030 struct pf_addr *daddr, u_int16_t dport,
2031 struct pf_addr *naddr, u_int16_t *nport)
2032{
2033 struct pf_rule *r = NULL;
2034
2035 if (direction == PF_OUT) {
2036 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2037 sport, daddr, dport, PF_RULESET_BINAT);
2038 if (r == NULL)
2039 r = pf_match_translation(pd, m, off, direction, kif,
2040 saddr, sport, daddr, dport, PF_RULESET_NAT);
2041 } else {
2042 r = pf_match_translation(pd, m, off, direction, kif, saddr,
2043 sport, daddr, dport, PF_RULESET_RDR);
2044 if (r == NULL)
2045 r = pf_match_translation(pd, m, off, direction, kif,
2046 saddr, sport, daddr, dport, PF_RULESET_BINAT);
2047 }
2048
2049 if (r != NULL) {
2050 switch (r->action) {
2051 case PF_NONAT:
2052 case PF_NOBINAT:
2053 case PF_NORDR:
2054 return (NULL);
2055 case PF_NAT:
2056 if (pf_get_sport(pd->af, pd->proto, r, saddr,
2057 daddr, dport, naddr, nport, r->rpool.proxy_port[0],
2058 r->rpool.proxy_port[1], sn)) {
2059 DPFPRINTF(PF_DEBUG_MISC,
2060 ("pf: NAT proxy port allocation "
2061 "(%u-%u) failed\n",
2062 r->rpool.proxy_port[0],
2063 r->rpool.proxy_port[1]));
2064 return (NULL);
2065 }
2066 break;
2067 case PF_BINAT:
2068 switch (direction) {
2069 case PF_OUT:
2070 if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL){
2071 if (pd->af == AF_INET) {
2072 if (r->rpool.cur->addr.p.dyn->
2073 pfid_acnt4 < 1)
2074 return (NULL);
2075 PF_POOLMASK(naddr,
2076 &r->rpool.cur->addr.p.dyn->
2077 pfid_addr4,
2078 &r->rpool.cur->addr.p.dyn->
2079 pfid_mask4,
2080 saddr, AF_INET);
2081 } else {
2082 if (r->rpool.cur->addr.p.dyn->
2083 pfid_acnt6 < 1)
2084 return (NULL);
2085 PF_POOLMASK(naddr,
2086 &r->rpool.cur->addr.p.dyn->
2087 pfid_addr6,
2088 &r->rpool.cur->addr.p.dyn->
2089 pfid_mask6,
2090 saddr, AF_INET6);
2091 }
2092 } else
2093 PF_POOLMASK(naddr,
2094 &r->rpool.cur->addr.v.a.addr,
2095 &r->rpool.cur->addr.v.a.mask,
2096 saddr, pd->af);
2097 break;
2098 case PF_IN:
2099 if (r->src.addr.type == PF_ADDR_DYNIFTL){
2100 if (pd->af == AF_INET) {
2101 if (r->src.addr.p.dyn->
2102 pfid_acnt4 < 1)
2103 return (NULL);
2104 PF_POOLMASK(naddr,
2105 &r->src.addr.p.dyn->
2106 pfid_addr4,
2107 &r->src.addr.p.dyn->
2108 pfid_mask4,
2109 daddr, AF_INET);
2110 } else {
2111 if (r->src.addr.p.dyn->
2112 pfid_acnt6 < 1)
2113 return (NULL);
2114 PF_POOLMASK(naddr,
2115 &r->src.addr.p.dyn->
2116 pfid_addr6,
2117 &r->src.addr.p.dyn->
2118 pfid_mask6,
2119 daddr, AF_INET6);
2120 }
2121 } else
2122 PF_POOLMASK(naddr,
2123 &r->src.addr.v.a.addr,
2124 &r->src.addr.v.a.mask, daddr,
2125 pd->af);
2126 break;
2127 }
2128 break;
2129 case PF_RDR: {
2130 if (pf_map_addr(r->af, r, saddr, naddr, NULL, sn))
2131 return (NULL);
2132
2133 if (r->rpool.proxy_port[1]) {
2134 u_int32_t tmp_nport;
2135
2136 tmp_nport = ((ntohs(dport) -
2137 ntohs(r->dst.port[0])) %
2138 (r->rpool.proxy_port[1] -
2139 r->rpool.proxy_port[0] + 1)) +
2140 r->rpool.proxy_port[0];
2141
2142 /* wrap around if necessary */
2143 if (tmp_nport > 65535)
2144 tmp_nport -= 65535;
2145 *nport = htons((u_int16_t)tmp_nport);
2146 } else if (r->rpool.proxy_port[0])
2147 *nport = htons(r->rpool.proxy_port[0]);
2148 break;
2149 }
2150 default:
2151 return (NULL);
2152 }
2153 }
2154
2155 return (r);
2156}
2157
2158#ifdef SMP
2159struct netmsg_hashlookup {
2160 struct lwkt_msg nm_lmsg;
2161 struct inpcb **nm_pinp;
2162 struct inpcbinfo *nm_pcbinfo;
2163 struct pf_addr *nm_saddr;
2164 struct pf_addr *nm_daddr;
2165 uint16_t nm_sport;
2166 uint16_t nm_dport;
2167 sa_family_t nm_af;
2168};
2169
2170static int
2171in_pcblookup_hash_handler(struct lwkt_msg *msg0)
2172{
2173 struct netmsg_hashlookup *msg = (struct netmsg_hashlookup *)msg0;
2174
2175 if (msg->nm_af == AF_INET)
2176 *msg->nm_pinp = in_pcblookup_hash(msg->nm_pcbinfo,
2177 msg->nm_saddr->v4, msg->nm_sport, msg->nm_daddr->v4,
2178 msg->nm_dport, INPLOOKUP_WILDCARD, NULL);
2179#ifdef INET6
2180 else
2181 *msg->nm_pinp = in6_pcblookup_hash(msg->nm_pcbinfo,
2182 &msg->nm_saddr->v6, msg->nm_sport, &msg->nm_daddr->v6,
2183 msg->nm_dport, INPLOOKUP_WILDCARD, NULL);
2184#endif /* INET6 */
2185 lwkt_replymsg(&msg->nm_lmsg, 0);
2186 return (EASYNC);
2187}
2188#endif /* SMP */
2189
2190int
2191pf_socket_lookup(uid_t *uid, gid_t *gid, int direction, struct pf_pdesc *pd)
2192{
2193 struct pf_addr *saddr, *daddr;
2194 u_int16_t sport, dport;
2195 struct inpcbinfo *pi;
2196 struct inpcb *inp;
2197#ifdef SMP
2198 struct netmsg_hashlookup *msg = NULL;
2199#endif
2200 int pi_cpu = 0;
2201
2202 *uid = UID_MAX;
2203 *gid = GID_MAX;
2204 if (direction == PF_IN) {
2205 saddr = pd->src;
2206 daddr = pd->dst;
2207 } else {
2208 saddr = pd->dst;
2209 daddr = pd->src;
2210 }
2211 switch (pd->proto) {
2212 case IPPROTO_TCP:
2213 sport = pd->hdr.tcp->th_sport;
2214 dport = pd->hdr.tcp->th_dport;
2215
2216 pi_cpu = tcp_addrcpu(saddr->v4.s_addr, sport, daddr->v4.s_addr, dport);
2217 pi = &tcbinfo[pi_cpu];
2218#ifdef SMP
2219 /*
2220 * Our netstack runs lockless on MP systems
2221 * (only for TCP connections at the moment).
2222 *
2223 * As we are not allowed to read another CPU's tcbinfo,
2224 * we have to ask that CPU via remote call to search the
2225 * table for us.
2226 *
2227 * Prepare a msg iff data belongs to another CPU.
2228 */
2229 if (pi_cpu != mycpu->gd_cpuid) {
2230 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_INTWAIT);
2231 lwkt_initmsg(&msg->nm_lmsg, &netisr_afree_rport, 0,
2232 lwkt_cmd_func(in_pcblookup_hash_handler),
2233 lwkt_cmd_op_none);
2234 msg->nm_pinp = &inp;
2235 msg->nm_pcbinfo = pi;
2236 msg->nm_saddr = saddr;
2237 msg->nm_sport = sport;
2238 msg->nm_daddr = daddr;
2239 msg->nm_dport = dport;
2240 msg->nm_af = pd->af;
2241 }
2242#endif /* SMP */
2243 break;
2244 case IPPROTO_UDP:
2245 sport = pd->hdr.udp->uh_sport;
2246 dport = pd->hdr.udp->uh_dport;
2247 pi = &udbinfo;
2248 break;
2249 default:
2250 return (0);
2251 }
2252 if (direction != PF_IN) {
2253 u_int16_t p;
2254
2255 p = sport;
2256 sport = dport;
2257 dport = p;
2258 }
2259 switch (pd->af) {
2260#ifdef INET6
2261 case AF_INET6:
2262#ifdef SMP
2263 /*
2264 * Query other CPU, second part
2265 *
2266 * msg only gets initialized when:
2267 * 1) packet is TCP
2268 * 2) the info belongs to another CPU
2269 *
2270 * Use some switch/case magic to avoid code duplication.
2271 */
2272 if (msg == NULL)
2273#endif /* SMP */
2274 {
2275 inp = in6_pcblookup_hash(pi, &saddr->v6, sport,
2276 &daddr->v6, dport, INPLOOKUP_WILDCARD, NULL);
2277
2278 if (inp == NULL)
2279 return (0);
2280 break;
2281 }
2282 /* FALLTHROUGH if SMP and on other CPU */
2283#endif /* INET6 */
2284 case AF_INET:
2285#ifdef SMP
2286 if (msg != NULL) {
2287 lwkt_sendmsg(tcp_cport(pi_cpu), &msg->nm_lmsg);
2288 } else
2289#endif /* SMP */
2290 {
2291 inp = in_pcblookup_hash(pi, saddr->v4, sport, daddr->v4,
2292 dport, INPLOOKUP_WILDCARD, NULL);
2293 }
2294 if (inp == NULL)
2295 return (0);
2296 break;
2297
2298 default:
2299 return (0);
2300 }
2301 *uid = inp->inp_socket->so_cred->cr_uid;
2302 *gid = inp->inp_socket->so_cred->cr_groups[0];
2303 return (1);
2304}
2305
2306u_int8_t
2307pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2308{
2309 int hlen;
2310 u_int8_t hdr[60];
2311 u_int8_t *opt, optlen;
2312 u_int8_t wscale = 0;
2313
2314 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2315 if (hlen <= sizeof(struct tcphdr))
2316 return (0);
2317 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2318 return (0);
2319 opt = hdr + sizeof(struct tcphdr);
2320 hlen -= sizeof(struct tcphdr);
2321 while (hlen >= 3) {
2322 switch (*opt) {
2323 case TCPOPT_EOL:
2324 case TCPOPT_NOP:
2325 ++opt;
2326 --hlen;
2327 break;
2328 case TCPOPT_WINDOW:
2329 wscale = opt[2];
2330 if (wscale > TCP_MAX_WINSHIFT)
2331 wscale = TCP_MAX_WINSHIFT;
2332 wscale |= PF_WSCALE_FLAG;
2333 /* FALLTHROUGH */
2334 default:
2335 optlen = opt[1];
2336 if (optlen < 2)
2337 optlen = 2;
2338 hlen -= optlen;
2339 opt += optlen;
2340 break;
2341 }
2342 }
2343 return (wscale);
2344}
2345
2346u_int16_t
2347pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2348{
2349 int hlen;
2350 u_int8_t hdr[60];
2351 u_int8_t *opt, optlen;
2352 u_int16_t mss = tcp_mssdflt;
2353
2354 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
2355 if (hlen <= sizeof(struct tcphdr))
2356 return (0);
2357 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2358 return (0);
2359 opt = hdr + sizeof(struct tcphdr);
2360 hlen -= sizeof(struct tcphdr);
2361 while (hlen >= TCPOLEN_MAXSEG) {
2362 switch (*opt) {
2363 case TCPOPT_EOL:
2364 case TCPOPT_NOP:
2365 ++opt;
2366 --hlen;
2367 break;
2368 case TCPOPT_MAXSEG:
2369 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2370 /* FALLTHROUGH */
2371 default:
2372 optlen = opt[1];
2373 if (optlen < 2)
2374 optlen = 2;
2375 hlen -= optlen;
2376 opt += optlen;
2377 break;
2378 }
2379 }
2380 return (mss);
2381}
2382
2383u_int16_t
2384pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
2385{
2386#ifdef INET
2387 struct sockaddr_in *dst;
2388 struct route ro;
2389#endif /* INET */
2390#ifdef INET6
2391 struct sockaddr_in6 *dst6;
2392 struct route_in6 ro6;
2393#endif /* INET6 */
2394 struct rtentry *rt = NULL;
2395 int hlen = 0;
2396 u_int16_t mss = tcp_mssdflt;
2397
2398 switch (af) {
2399#ifdef INET
2400 case AF_INET:
2401 hlen = sizeof(struct ip);
2402 bzero(&ro, sizeof(ro));
2403 dst = (struct sockaddr_in *)&ro.ro_dst;
2404 dst->sin_family = AF_INET;
2405 dst->sin_len = sizeof(*dst);
2406 dst->sin_addr = addr->v4;
2407 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
2408 rt = ro.ro_rt;
2409 break;
2410#endif /* INET */
2411#ifdef INET6
2412 case AF_INET6:
2413 hlen = sizeof(struct ip6_hdr);
2414 bzero(&ro6, sizeof(ro6));
2415 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
2416 dst6->sin6_family = AF_INET6;
2417 dst6->sin6_len = sizeof(*dst6);
2418 dst6->sin6_addr = addr->v6;
2419 rtalloc_ign((struct route *)&ro6, (RTF_CLONING | RTF_PRCLONING));
2420 rt = ro6.ro_rt;
2421 break;
2422#endif /* INET6 */
2423 }
2424
2425 if (rt && rt->rt_ifp) {
2426 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
2427 mss = max(tcp_mssdflt, mss);
2428 RTFREE(rt);
2429 }
2430 mss = min(mss, offer);
2431 mss = max(mss, 64); /* sanity - at least max opt space */
2432 return (mss);
2433}
2434
2435void
2436pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
2437{
2438 struct pf_rule *r = s->rule.ptr;
2439
2440 s->rt_kif = NULL;
2441 if (!r->rt || r->rt == PF_FASTROUTE)
2442 return;
2443 switch (s->af) {
2444#ifdef INET
2445 case AF_INET:
2446 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
2447 &s->nat_src_node);
2448 s->rt_kif = r->rpool.cur->kif;
2449 break;
2450#endif /* INET */
2451#ifdef INET6
2452 case AF_INET6:
2453 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
2454 &s->nat_src_node);
2455 s->rt_kif = r->rpool.cur->kif;
2456 break;
2457#endif /* INET6 */
2458 }
2459}
2460
2461int
2462pf_test_tcp(struct pf_rule **rm, struct pf_state **sm, int direction,
2463 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
2464 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm)
2465{
2466 struct pf_rule *nr = NULL;
2467 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
2468 struct tcphdr *th = pd->hdr.tcp;
2469 u_int16_t bport, nport = 0;
2470 sa_family_t af = pd->af;
2471 int lookup = -1;
2472 uid_t uid;
2473 gid_t gid;
2474 struct pf_rule *r, *a = NULL;
2475 struct pf_ruleset *ruleset = NULL;
2476 struct pf_src_node *nsn = NULL;
2477 u_short reason;
2478 int rewrite = 0;
2479 int tag = -1;
2480 u_int16_t mss = tcp_mssdflt;
2481
2482 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
2483
2484 if (direction == PF_OUT) {
2485 bport = nport = th->th_sport;
2486 /* check outgoing packet for BINAT/NAT */
2487 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
2488 saddr, th->th_sport, daddr, th->th_dport,
2489 &pd->naddr, &nport)) != NULL) {
2490 PF_ACPY(&pd->baddr, saddr, af);
2491 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
2492 &th->th_sum, &pd->naddr, nport, 0, af);
2493 rewrite++;
2494 if (nr->natpass)
2495 r = NULL;
2496 pd->nat_rule = nr;
2497 }
2498 } else {
2499 bport = nport = th->th_dport;
2500 /* check incoming packet for BINAT/RDR */
2501 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
2502 saddr, th->th_sport, daddr, th->th_dport,
2503 &pd->naddr, &nport)) != NULL) {
2504 PF_ACPY(&pd->baddr, daddr, af);
2505 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
2506 &th->th_sum, &pd->naddr, nport, 0, af);
2507 rewrite++;
2508 if (nr->natpass)
2509 r = NULL;
2510 pd->nat_rule = nr;
2511 }
2512 }
2513
2514 while (r != NULL) {
2515 r->evaluations++;
2516 if (r->kif != NULL &&
2517 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
2518 r = r->skip[PF_SKIP_IFP].ptr;
2519 else if (r->direction && r->direction != direction)
2520 r = r->skip[PF_SKIP_DIR].ptr;
2521 else if (r->af && r->af != af)
2522 r = r->skip[PF_SKIP_AF].ptr;
2523 else if (r->proto && r->proto != IPPROTO_TCP)
2524 r = r->skip[PF_SKIP_PROTO].ptr;
2525 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.not))
2526 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2527 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2528 r->src.port[0], r->src.port[1], th->th_sport))
2529 r = r->skip[PF_SKIP_SRC_PORT].ptr;
2530 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.not))
2531 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2532 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2533 r->dst.port[0], r->dst.port[1], th->th_dport))
2534 r = r->skip[PF_SKIP_DST_PORT].ptr;
2535 else if (r->tos && !(r->tos & pd->tos))
2536 r = TAILQ_NEXT(r, entries);
2537 else if (r->rule_flag & PFRULE_FRAGMENT)
2538 r = TAILQ_NEXT(r, entries);
2539 else if ((r->flagset & th->th_flags) != r->flags)
2540 r = TAILQ_NEXT(r, entries);
2541 else if (r->uid.op && (lookup != -1 || (lookup =
2542 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2543 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
2544 uid))
2545 r = TAILQ_NEXT(r, entries);
2546 else if (r->gid.op && (lookup != -1 || (lookup =
2547 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2548 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
2549 gid))
2550 r = TAILQ_NEXT(r, entries);
2551 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
2552 r = TAILQ_NEXT(r, entries);
2553 else if (r->anchorname[0] && r->anchor == NULL)
2554 r = TAILQ_NEXT(r, entries);
2555 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
2556 pf_osfp_fingerprint(pd, m, off, th), r->os_fingerprint))
2557 r = TAILQ_NEXT(r, entries);
2558 else {
2559 if (r->tag)
2560 tag = r->tag;
2561 if (r->anchor == NULL) {
2562 *rm = r;
2563 *am = a;
2564 *rsm = ruleset;
2565 if ((*rm)->quick)
2566 break;
2567 r = TAILQ_NEXT(r, entries);
2568 } else
2569 PF_STEP_INTO_ANCHOR(r, a, ruleset,
2570 PF_RULESET_FILTER);
2571 }
2572 if (r == NULL && a != NULL)
2573 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
2574 PF_RULESET_FILTER);
2575 }
2576 r = *rm;
2577 a = *am;
2578 ruleset = *rsm;
2579
2580 REASON_SET(&reason, PFRES_MATCH);
2581
2582 if (r->log) {
2583 if (rewrite)
2584 m_copyback(m, off, sizeof(*th), (caddr_t)th);
2585 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
2586 }
2587
2588 if ((r->action == PF_DROP) &&
2589 ((r->rule_flag & PFRULE_RETURNRST) ||
2590 (r->rule_flag & PFRULE_RETURNICMP) ||
2591 (r->rule_flag & PFRULE_RETURN))) {
2592 /* undo NAT changes, if they have taken place */
2593 if (nr != NULL) {
2594 if (direction == PF_OUT) {
2595 pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
2596 &th->th_sum, &pd->baddr, bport, 0, af);
2597 rewrite++;
2598 } else {
2599 pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
2600 &th->th_sum, &pd->baddr, bport, 0, af);
2601 rewrite++;
2602 }
2603 }
2604 if (((r->rule_flag & PFRULE_RETURNRST) ||
2605 (r->rule_flag & PFRULE_RETURN)) &&
2606 !(th->th_flags & TH_RST)) {
2607 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2608
2609 if (th->th_flags & TH_SYN)
2610 ack++;
2611 if (th->th_flags & TH_FIN)
2612 ack++;
2613 pf_send_tcp(r, af, pd->dst,
2614 pd->src, th->th_dport, th->th_sport,
2615 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
2616 r->return_ttl);
2617 } else if ((af == AF_INET) && r->return_icmp)
2618 pf_send_icmp(m, r->return_icmp >> 8,
2619 r->return_icmp & 255, af, r);
2620 else if ((af == AF_INET6) && r->return_icmp6)
2621 pf_send_icmp(m, r->return_icmp6 >> 8,
2622 r->return_icmp6 & 255, af, r);
2623 }
2624
2625 if (r->action == PF_DROP)
2626 return (PF_DROP);
2627
2628 pf_tag_packet(m, tag);
2629
2630 if (r->keep_state || nr != NULL ||
2631 (pd->flags & PFDESC_TCP_NORM)) {
2632 /* create new state */
2633 u_int16_t len;
2634 struct pf_state *s = NULL;
2635 struct pf_src_node *sn = NULL;
2636
2637 len = pd->tot_len - off - (th->th_off << 2);
2638
2639 /* check maximums */
2640 if (r->max_states && (r->states >= r->max_states))
2641 goto cleanup;
2642 /* src node for flter rule */
2643 if ((r->rule_flag & PFRULE_SRCTRACK ||
2644 r->rpool.opts & PF_POOL_STICKYADDR) &&
2645 pf_insert_src_node(&sn, r, saddr, af) != 0)
2646 goto cleanup;
2647 /* src node for translation rule */
2648 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
2649 ((direction == PF_OUT &&
2650 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
2651 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
2652 goto cleanup;
2653 s = pool_get(&pf_state_pl, PR_NOWAIT);
2654 if (s == NULL) {
2655cleanup:
2656 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
2657 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
2658 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2659 pf_status.src_nodes--;
2660 pool_put(&pf_src_tree_pl, sn);
2661 }
2662 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
2663 nsn->expire == 0) {
2664 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
2665 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2666 pf_status.src_nodes--;
2667 pool_put(&pf_src_tree_pl, nsn);
2668 }
2669 REASON_SET(&reason, PFRES_MEMORY);
2670 return (PF_DROP);
2671 }
2672 bzero(s, sizeof(*s));
2673 r->states++;
2674 if (a != NULL)
2675 a->states++;
2676 s->rule.ptr = r;
2677 s->nat_rule.ptr = nr;
2678 if (s->nat_rule.ptr != NULL)
2679 s->nat_rule.ptr->states++;
2680 s->anchor.ptr = a;
2681 s->allow_opts = r->allow_opts;
2682 s->log = r->log & 2;
2683 s->proto = IPPROTO_TCP;
2684 s->direction = direction;
2685 s->af = af;
2686 if (direction == PF_OUT) {
2687 PF_ACPY(&s->gwy.addr, saddr, af);
2688 s->gwy.port = th->th_sport; /* sport */
2689 PF_ACPY(&s->ext.addr, daddr, af);
2690 s->ext.port = th->th_dport;
2691 if (nr != NULL) {
2692 PF_ACPY(&s->lan.addr, &pd->baddr, af);
2693 s->lan.port = bport;
2694 } else {
2695 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
2696 s->lan.port = s->gwy.port;
2697 }
2698 } else {
2699 PF_ACPY(&s->lan.addr, daddr, af);
2700 s->lan.port = th->th_dport;
2701 PF_ACPY(&s->ext.addr, saddr, af);
2702 s->ext.port = th->th_sport;
2703 if (nr != NULL) {
2704 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
2705 s->gwy.port = bport;
2706 } else {
2707 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
2708 s->gwy.port = s->lan.port;
2709 }
2710 }
2711
2712 s->src.seqlo = ntohl(th->th_seq);
2713 s->src.seqhi = s->src.seqlo + len + 1;
2714 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
2715 r->keep_state == PF_STATE_MODULATE) {
2716 /* Generate sequence number modulator */
2717 while ((s->src.seqdiff = karc4random()) == 0)
2718 ;
2719 pf_change_a(&th->th_seq, &th->th_sum,
2720 htonl(s->src.seqlo + s->src.seqdiff), 0);
2721 rewrite = 1;
2722 } else
2723 s->src.seqdiff = 0;
2724 if (th->th_flags & TH_SYN) {
2725 s->src.seqhi++;
2726 s->src.wscale = pf_get_wscale(m, off, th->th_off, af);
2727 }
2728 s->src.max_win = MAX(ntohs(th->th_win), 1);
2729 if (s->src.wscale & PF_WSCALE_MASK) {
2730 /* Remove scale factor from initial window */
2731 int win = s->src.max_win;
2732 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
2733 s->src.max_win = (win - 1) >>
2734 (s->src.wscale & PF_WSCALE_MASK);
2735 }
2736 if (th->th_flags & TH_FIN)
2737 s->src.seqhi++;
2738 s->dst.seqhi = 1;
2739 s->dst.max_win = 1;
2740 s->src.state = TCPS_SYN_SENT;
2741 s->dst.state = TCPS_CLOSED;
2742 s->creation = time_second;
2743 s->expire = time_second;
2744 s->timeout = PFTM_TCP_FIRST_PACKET;
2745 pf_set_rt_ifp(s, saddr);
2746 if (sn != NULL) {
2747 s->src_node = sn;
2748 s->src_node->states++;
2749 }
2750 if (nsn != NULL) {
2751 PF_ACPY(&nsn->raddr, &pd->naddr, af);
2752 s->nat_src_node = nsn;
2753 s->nat_src_node->states++;
2754 }
2755 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
2756 off, pd, th, &s->src, &s->dst)) {
2757 REASON_SET(&reason, PFRES_MEMORY);
2758 pf_src_tree_remove_state(s);
2759 pool_put(&pf_state_pl, s);
2760 return (PF_DROP);
2761 }
2762 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
2763 pf_normalize_tcp_stateful(m, off, pd, &reason, th, &s->src,
2764 &s->dst, &rewrite)) {
2765 pf_normalize_tcp_cleanup(s);
2766 pf_src_tree_remove_state(s);
2767 pool_put(&pf_state_pl, s);
2768 return (PF_DROP);
2769 }
2770 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
2771 pf_normalize_tcp_cleanup(s);
2772 REASON_SET(&reason, PFRES_MEMORY);
2773 pf_src_tree_remove_state(s);
2774 pool_put(&pf_state_pl, s);
2775 return (PF_DROP);
2776 } else
2777 *sm = s;
2778 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
2779 r->keep_state == PF_STATE_SYNPROXY) {
2780 s->src.state = PF_TCPS_PROXY_SRC;
2781 if (nr != NULL) {
2782 if (direction == PF_OUT) {
2783 pf_change_ap(saddr, &th->th_sport,
2784 pd->ip_sum, &th->th_sum, &pd->baddr,
2785 bport, 0, af);
2786 } else {
2787 pf_change_ap(daddr, &th->th_dport,
2788 pd->ip_sum, &th->th_sum, &pd->baddr,
2789 bport, 0, af);
2790 }
2791 }
2792 s->src.seqhi = karc4random();
2793 /* Find mss option */
2794 mss = pf_get_mss(m, off, th->th_off, af);
2795 mss = pf_calc_mss(saddr, af, mss);
2796 mss = pf_calc_mss(daddr, af, mss);
2797 s->src.mss = mss;
2798 pf_send_tcp(r, af, daddr, saddr, th->th_dport,
2799 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
2800 TH_SYN|TH_ACK, 0, s->src.mss, 0);
2801 return (PF_SYNPROXY_DROP);
2802 }
2803 }
2804
2805 /* copy back packet headers if we performed NAT operations */
2806 if (rewrite)
2807 m_copyback(m, off, sizeof(*th), (caddr_t)th);
2808
2809 return (PF_PASS);
2810}
2811
2812int
2813pf_test_udp(struct pf_rule **rm, struct pf_state **sm, int direction,
2814 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
2815 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm)
2816{
2817 struct pf_rule *nr = NULL;
2818 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
2819 struct udphdr *uh = pd->hdr.udp;
2820 u_int16_t bport, nport = 0;
2821 sa_family_t af = pd->af;
2822 int lookup = -1;
2823 uid_t uid;
2824 gid_t gid;
2825 struct pf_rule *r, *a = NULL;
2826 struct pf_ruleset *ruleset = NULL;
2827 struct pf_src_node *nsn = NULL;
2828 u_short reason;
2829 int rewrite = 0;
2830 int tag = -1;
2831
2832 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
2833
2834 if (direction == PF_OUT) {
2835 bport = nport = uh->uh_sport;
2836 /* check outgoing packet for BINAT/NAT */
2837 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
2838 saddr, uh->uh_sport, daddr, uh->uh_dport,
2839 &pd->naddr, &nport)) != NULL) {
2840 PF_ACPY(&pd->baddr, saddr, af);
2841 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum,
2842 &uh->uh_sum, &pd->naddr, nport, 1, af);
2843 rewrite++;
2844 if (nr->natpass)
2845 r = NULL;
2846 pd->nat_rule = nr;
2847 }
2848 } else {
2849 bport = nport = uh->uh_dport;
2850 /* check incoming packet for BINAT/RDR */
2851 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
2852 saddr, uh->uh_sport, daddr, uh->uh_dport, &pd->naddr,
2853 &nport)) != NULL) {
2854 PF_ACPY(&pd->baddr, daddr, af);
2855 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum,
2856 &uh->uh_sum, &pd->naddr, nport, 1, af);
2857 rewrite++;
2858 if (nr->natpass)
2859 r = NULL;
2860 pd->nat_rule = nr;
2861 }
2862 }
2863
2864 while (r != NULL) {
2865 r->evaluations++;
2866 if (r->kif != NULL &&
2867 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
2868 r = r->skip[PF_SKIP_IFP].ptr;
2869 else if (r->direction && r->direction != direction)
2870 r = r->skip[PF_SKIP_DIR].ptr;
2871 else if (r->af && r->af != af)
2872 r = r->skip[PF_SKIP_AF].ptr;
2873 else if (r->proto && r->proto != IPPROTO_UDP)
2874 r = r->skip[PF_SKIP_PROTO].ptr;
2875 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.not))
2876 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2877 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2878 r->src.port[0], r->src.port[1], uh->uh_sport))
2879 r = r->skip[PF_SKIP_SRC_PORT].ptr;
2880 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.not))
2881 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2882 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2883 r->dst.port[0], r->dst.port[1], uh->uh_dport))
2884 r = r->skip[PF_SKIP_DST_PORT].ptr;
2885 else if (r->tos && !(r->tos & pd->tos))
2886 r = TAILQ_NEXT(r, entries);
2887 else if (r->rule_flag & PFRULE_FRAGMENT)
2888 r = TAILQ_NEXT(r, entries);
2889 else if (r->uid.op && (lookup != -1 || (lookup =
2890 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2891 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
2892 uid))
2893 r = TAILQ_NEXT(r, entries);
2894 else if (r->gid.op && (lookup != -1 || (lookup =
2895 pf_socket_lookup(&uid, &gid, direction, pd), 1)) &&
2896 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
2897 gid))
2898 r = TAILQ_NEXT(r, entries);
2899 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
2900 r = TAILQ_NEXT(r, entries);
2901 else if (r->anchorname[0] && r->anchor == NULL)
2902 r = TAILQ_NEXT(r, entries);
2903 else if (r->os_fingerprint != PF_OSFP_ANY)
2904 r = TAILQ_NEXT(r, entries);
2905 else {
2906 if (r->tag)
2907 tag = r->tag;
2908 if (r->anchor == NULL) {
2909 *rm = r;
2910 *am = a;
2911 *rsm = ruleset;
2912 if ((*rm)->quick)
2913 break;
2914 r = TAILQ_NEXT(r, entries);
2915 } else
2916 PF_STEP_INTO_ANCHOR(r, a, ruleset,
2917 PF_RULESET_FILTER);
2918 }
2919 if (r == NULL && a != NULL)
2920 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
2921 PF_RULESET_FILTER);
2922 }
2923 r = *rm;
2924 a = *am;
2925 ruleset = *rsm;
2926
2927 REASON_SET(&reason, PFRES_MATCH);
2928
2929 if (r->log) {
2930 if (rewrite)
2931 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
2932 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
2933 }
2934
2935 if ((r->action == PF_DROP) &&
2936 ((r->rule_flag & PFRULE_RETURNICMP) ||
2937 (r->rule_flag & PFRULE_RETURN))) {
2938 /* undo NAT changes, if they have taken place */
2939 if (nr != NULL) {
2940 if (direction == PF_OUT) {
2941 pf_change_ap(saddr, &uh->uh_sport, pd->ip_sum,
2942 &uh->uh_sum, &pd->baddr, bport, 1, af);
2943 rewrite++;
2944 } else {
2945 pf_change_ap(daddr, &uh->uh_dport, pd->ip_sum,
2946 &uh->uh_sum, &pd->baddr, bport, 1, af);
2947 rewrite++;
2948 }
2949 }
2950 if ((af == AF_INET) && r->return_icmp)
2951 pf_send_icmp(m, r->return_icmp >> 8,
2952 r->return_icmp & 255, af, r);
2953 else if ((af == AF_INET6) && r->return_icmp6)
2954 pf_send_icmp(m, r->return_icmp6 >> 8,
2955 r->return_icmp6 & 255, af, r);
2956 }
2957
2958 if (r->action == PF_DROP)
2959 return (PF_DROP);
2960
2961 pf_tag_packet(m, tag);
2962
2963 if (r->keep_state || nr != NULL) {
2964 /* create new state */
2965 struct pf_state *s = NULL;
2966 struct pf_src_node *sn = NULL;
2967
2968 /* check maximums */
2969 if (r->max_states && (r->states >= r->max_states))
2970 goto cleanup;
2971 /* src node for flter rule */
2972 if ((r->rule_flag & PFRULE_SRCTRACK ||
2973 r->rpool.opts & PF_POOL_STICKYADDR) &&
2974 pf_insert_src_node(&sn, r, saddr, af) != 0)
2975 goto cleanup;
2976 /* src node for translation rule */
2977 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
2978 ((direction == PF_OUT &&
2979 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
2980 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
2981 goto cleanup;
2982 s = pool_get(&pf_state_pl, PR_NOWAIT);
2983 if (s == NULL) {
2984cleanup:
2985 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
2986 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
2987 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2988 pf_status.src_nodes--;
2989 pool_put(&pf_src_tree_pl, sn);
2990 }
2991 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
2992 nsn->expire == 0) {
2993 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
2994 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
2995 pf_status.src_nodes--;
2996 pool_put(&pf_src_tree_pl, nsn);
2997 }
2998 REASON_SET(&reason, PFRES_MEMORY);
2999 return (PF_DROP);
3000 }
3001 bzero(s, sizeof(*s));
3002 r->states++;
3003 if (a != NULL)
3004 a->states++;
3005 s->rule.ptr = r;
3006 s->nat_rule.ptr = nr;
3007 if (s->nat_rule.ptr != NULL)
3008 s->nat_rule.ptr->states++;
3009 s->anchor.ptr = a;
3010 s->allow_opts = r->allow_opts;
3011 s->log = r->log & 2;
3012 s->proto = IPPROTO_UDP;
3013 s->direction = direction;
3014 s->af = af;
3015 if (direction == PF_OUT) {
3016 PF_ACPY(&s->gwy.addr, saddr, af);
3017 s->gwy.port = uh->uh_sport;
3018 PF_ACPY(&s->ext.addr, daddr, af);
3019 s->ext.port = uh->uh_dport;
3020 if (nr != NULL) {
3021 PF_ACPY(&s->lan.addr, &pd->baddr, af);
3022 s->lan.port = bport;
3023 } else {
3024 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
3025 s->lan.port = s->gwy.port;
3026 }
3027 } else {
3028 PF_ACPY(&s->lan.addr, daddr, af);
3029 s->lan.port = uh->uh_dport;
3030 PF_ACPY(&s->ext.addr, saddr, af);
3031 s->ext.port = uh->uh_sport;
3032 if (nr != NULL) {
3033 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
3034 s->gwy.port = bport;
3035 } else {
3036 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
3037 s->gwy.port = s->lan.port;
3038 }
3039 }
3040 s->src.state = PFUDPS_SINGLE;
3041 s->dst.state = PFUDPS_NO_TRAFFIC;
3042 s->creation = time_second;
3043 s->expire = time_second;
3044 s->timeout = PFTM_UDP_FIRST_PACKET;
3045 pf_set_rt_ifp(s, saddr);
3046 if (sn != NULL) {
3047 s->src_node = sn;
3048 s->src_node->states++;
3049 }
3050 if (nsn != NULL) {
3051 PF_ACPY(&nsn->raddr, &pd->naddr, af);
3052 s->nat_src_node = nsn;
3053 s->nat_src_node->states++;
3054 }
3055 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
3056 REASON_SET(&reason, PFRES_MEMORY);
3057 pf_src_tree_remove_state(s);
3058 pool_put(&pf_state_pl, s);
3059 return (PF_DROP);
3060 } else
3061 *sm = s;
3062 }
3063
3064 /* copy back packet headers if we performed NAT operations */
3065 if (rewrite)
3066 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
3067
3068 return (PF_PASS);
3069}
3070
3071int
3072pf_test_icmp(struct pf_rule **rm, struct pf_state **sm, int direction,
3073 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
3074 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm)
3075{
3076 struct pf_rule *nr = NULL;
3077 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3078 struct pf_rule *r, *a = NULL;
3079 struct pf_ruleset *ruleset = NULL;
3080 struct pf_src_node *nsn = NULL;
3081 u_short reason;
3082 u_int16_t icmpid = 0;
3083 sa_family_t af = pd->af;
3084 u_int8_t icmptype = 0, icmpcode = 0;
3085 int state_icmp = 0;
3086 int tag = -1;
3087#ifdef INET6
3088 int rewrite = 0;
3089#endif /* INET6 */
3090
3091 switch (pd->proto) {
3092#ifdef INET
3093 case IPPROTO_ICMP:
3094 icmptype = pd->hdr.icmp->icmp_type;
3095 icmpcode = pd->hdr.icmp->icmp_code;
3096 icmpid = pd->hdr.icmp->icmp_id;
3097
3098 if (icmptype == ICMP_UNREACH ||
3099 icmptype == ICMP_SOURCEQUENCH ||
3100 icmptype == ICMP_REDIRECT ||
3101 icmptype == ICMP_TIMXCEED ||
3102 icmptype == ICMP_PARAMPROB)
3103 state_icmp++;
3104 break;
3105#endif /* INET */
3106#ifdef INET6
3107 case IPPROTO_ICMPV6:
3108 icmptype = pd->hdr.icmp6->icmp6_type;
3109 icmpcode = pd->hdr.icmp6->icmp6_code;
3110 icmpid = pd->hdr.icmp6->icmp6_id;
3111
3112 if (icmptype == ICMP6_DST_UNREACH ||
3113 icmptype == ICMP6_PACKET_TOO_BIG ||
3114 icmptype == ICMP6_TIME_EXCEEDED ||
3115 icmptype == ICMP6_PARAM_PROB)
3116 state_icmp++;
3117 break;
3118#endif /* INET6 */
3119 }
3120
3121 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3122
3123 if (direction == PF_OUT) {
3124 /* check outgoing packet for BINAT/NAT */
3125 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
3126 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3127 PF_ACPY(&pd->baddr, saddr, af);
3128 switch (af) {
3129#ifdef INET
3130 case AF_INET:
3131 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3132 pd->naddr.v4.s_addr, 0);
3133 break;
3134#endif /* INET */
3135#ifdef INET6
3136 case AF_INET6:
3137 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3138 &pd->naddr, 0);
3139 rewrite++;
3140 break;
3141#endif /* INET6 */
3142 }
3143 if (nr->natpass)
3144 r = NULL;
3145 pd->nat_rule = nr;
3146 }
3147 } else {
3148 /* check incoming packet for BINAT/RDR */
3149 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
3150 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3151 PF_ACPY(&pd->baddr, daddr, af);
3152 switch (af) {
3153#ifdef INET
3154 case AF_INET:
3155 pf_change_a(&daddr->v4.s_addr,
3156 pd->ip_sum, pd->naddr.v4.s_addr, 0);
3157 break;
3158#endif /* INET */
3159#ifdef INET6
3160 case AF_INET6:
3161 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3162 &pd->naddr, 0);
3163 rewrite++;
3164 break;
3165#endif /* INET6 */
3166 }
3167 if (nr->natpass)
3168 r = NULL;
3169 pd->nat_rule = nr;
3170 }
3171 }
3172
3173 while (r != NULL) {
3174 r->evaluations++;
3175 if (r->kif != NULL &&
3176 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
3177 r = r->skip[PF_SKIP_IFP].ptr;
3178 else if (r->direction && r->direction != direction)
3179 r = r->skip[PF_SKIP_DIR].ptr;
3180 else if (r->af && r->af != af)
3181 r = r->skip[PF_SKIP_AF].ptr;
3182 else if (r->proto && r->proto != pd->proto)
3183 r = r->skip[PF_SKIP_PROTO].ptr;
3184 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, r->src.not))
3185 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3186 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, r->dst.not))
3187 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3188 else if (r->type && r->type != icmptype + 1)
3189 r = TAILQ_NEXT(r, entries);
3190 else if (r->code && r->code != icmpcode + 1)
3191 r = TAILQ_NEXT(r, entries);
3192 else if (r->tos && !(r->tos & pd->tos))
3193 r = TAILQ_NEXT(r, entries);
3194 else if (r->rule_flag & PFRULE_FRAGMENT)
3195 r = TAILQ_NEXT(r, entries);
3196 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
3197 r = TAILQ_NEXT(r, entries);
3198 else if (r->anchorname[0] && r->anchor == NULL)
3199 r = TAILQ_NEXT(r, entries);
3200 else if (r->os_fingerprint != PF_OSFP_ANY)
3201 r = TAILQ_NEXT(r, entries);
3202 else {
3203 if (r->tag)
3204 tag = r->tag;
3205 if (r->anchor == NULL) {
3206 *rm = r;
3207 *am = a;
3208 *rsm = ruleset;
3209 if ((*rm)->quick)
3210 break;
3211 r = TAILQ_NEXT(r, entries);
3212 } else
3213 PF_STEP_INTO_ANCHOR(r, a, ruleset,
3214 PF_RULESET_FILTER);
3215 }
3216 if (r == NULL && a != NULL)
3217 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
3218 PF_RULESET_FILTER);
3219 }
3220 r = *rm;
3221 a = *am;
3222 ruleset = *rsm;
3223
3224 REASON_SET(&reason, PFRES_MATCH);
3225
3226 if (r->log) {
3227#ifdef INET6
3228 if (rewrite)
3229 m_copyback(m, off, sizeof(struct icmp6_hdr),
3230 (caddr_t)pd->hdr.icmp6);
3231#endif /* INET6 */
3232 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
3233 }
3234
3235 if (r->action != PF_PASS)
3236 return (PF_DROP);
3237
3238 pf_tag_packet(m, tag);
3239
3240 if (!state_icmp && (r->keep_state || nr != NULL)) {
3241 /* create new state */
3242 struct pf_state *s = NULL;
3243 struct pf_src_node *sn = NULL;
3244
3245 /* check maximums */
3246 if (r->max_states && (r->states >= r->max_states))
3247 goto cleanup;
3248 /* src node for flter rule */
3249 if ((r->rule_flag & PFRULE_SRCTRACK ||
3250 r->rpool.opts & PF_POOL_STICKYADDR) &&
3251 pf_insert_src_node(&sn, r, saddr, af) != 0)
3252 goto cleanup;
3253 /* src node for translation rule */
3254 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3255 ((direction == PF_OUT &&
3256 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
3257 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
3258 goto cleanup;
3259 s = pool_get(&pf_state_pl, PR_NOWAIT);
3260 if (s == NULL) {
3261cleanup:
3262 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3263 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
3264 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3265 pf_status.src_nodes--;
3266 pool_put(&pf_src_tree_pl, sn);
3267 }
3268 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
3269 nsn->expire == 0) {
3270 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
3271 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3272 pf_status.src_nodes--;
3273 pool_put(&pf_src_tree_pl, nsn);
3274 }
3275 REASON_SET(&reason, PFRES_MEMORY);
3276 return (PF_DROP);
3277 }
3278 bzero(s, sizeof(*s));
3279 r->states++;
3280 if (a != NULL)
3281 a->states++;
3282 s->rule.ptr = r;
3283 s->nat_rule.ptr = nr;
3284 if (s->nat_rule.ptr != NULL)
3285 s->nat_rule.ptr->states++;
3286 s->anchor.ptr = a;
3287 s->allow_opts = r->allow_opts;
3288 s->log = r->log & 2;
3289 s->proto = pd->proto;
3290 s->direction = direction;
3291 s->af = af;
3292 if (direction == PF_OUT) {
3293 PF_ACPY(&s->gwy.addr, saddr, af);
3294 s->gwy.port = icmpid;
3295 PF_ACPY(&s->ext.addr, daddr, af);
3296 s->ext.port = icmpid;
3297 if (nr != NULL)
3298 PF_ACPY(&s->lan.addr, &pd->baddr, af);
3299 else
3300 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
3301 s->lan.port = icmpid;
3302 } else {
3303 PF_ACPY(&s->lan.addr, daddr, af);
3304 s->lan.port = icmpid;
3305 PF_ACPY(&s->ext.addr, saddr, af);
3306 s->ext.port = icmpid;
3307 if (nr != NULL)
3308 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
3309 else
3310 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
3311 s->gwy.port = icmpid;
3312 }
3313 s->creation = time_second;
3314 s->expire = time_second;
3315 s->timeout = PFTM_ICMP_FIRST_PACKET;
3316 pf_set_rt_ifp(s, saddr);
3317 if (sn != NULL) {
3318 s->src_node = sn;
3319 s->src_node->states++;
3320 }
3321 if (nsn != NULL) {
3322 PF_ACPY(&nsn->raddr, &pd->naddr, af);
3323 s->nat_src_node = nsn;
3324 s->nat_src_node->states++;
3325 }
3326 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
3327 REASON_SET(&reason, PFRES_MEMORY);
3328 pf_src_tree_remove_state(s);
3329 pool_put(&pf_state_pl, s);
3330 return (PF_DROP);
3331 } else
3332 *sm = s;
3333 }
3334
3335#ifdef INET6
3336 /* copy back packet headers if we performed IPv6 NAT operations */
3337 if (rewrite)
3338 m_copyback(m, off, sizeof(struct icmp6_hdr),
3339 (caddr_t)pd->hdr.icmp6);
3340#endif /* INET6 */
3341
3342 return (PF_PASS);
3343}
3344
3345int
3346pf_test_other(struct pf_rule **rm, struct pf_state **sm, int direction,
3347 struct pfi_kif *kif, struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
3348 struct pf_rule **am, struct pf_ruleset **rsm)
3349{
3350 struct pf_rule *nr = NULL;
3351 struct pf_rule *r, *a = NULL;
3352 struct pf_ruleset *ruleset = NULL;
3353 struct pf_src_node *nsn = NULL;
3354 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
3355 sa_family_t af = pd->af;
3356 u_short reason;
3357 int tag = -1;
3358
3359 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3360
3361 if (direction == PF_OUT) {
3362 /* check outgoing packet for BINAT/NAT */
3363 if ((nr = pf_get_translation(pd, m, off, PF_OUT, kif, &nsn,
3364 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3365 PF_ACPY(&pd->baddr, saddr, af);
3366 switch (af) {
3367#ifdef INET
3368 case AF_INET:
3369 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3370 pd->naddr.v4.s_addr, 0);
3371 break;
3372#endif /* INET */
3373#ifdef INET6
3374 case AF_INET6:
3375 PF_ACPY(saddr, &pd->naddr, af);
3376 break;
3377#endif /* INET6 */
3378 }
3379 if (nr->natpass)
3380 r = NULL;
3381 pd->nat_rule = nr;
3382 }
3383 } else {
3384 /* check incoming packet for BINAT/RDR */
3385 if ((nr = pf_get_translation(pd, m, off, PF_IN, kif, &nsn,
3386 saddr, 0, daddr, 0, &pd->naddr, NULL)) != NULL) {
3387 PF_ACPY(&pd->baddr, daddr, af);
3388 switch (af) {
3389#ifdef INET
3390 case AF_INET:
3391 pf_change_a(&daddr->v4.s_addr,
3392 pd->ip_sum, pd->naddr.v4.s_addr, 0);
3393 break;
3394#endif /* INET */
3395#ifdef INET6
3396 case AF_INET6:
3397 PF_ACPY(daddr, &pd->naddr, af);
3398 break;
3399#endif /* INET6 */
3400 }
3401 if (nr->natpass)
3402 r = NULL;
3403 pd->nat_rule = nr;
3404 }
3405 }
3406
3407 while (r != NULL) {
3408 r->evaluations++;
3409 if (r->kif != NULL &&
3410 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
3411 r = r->skip[PF_SKIP_IFP].ptr;
3412 else if (r->direction && r->direction != direction)
3413 r = r->skip[PF_SKIP_DIR].ptr;
3414 else if (r->af && r->af != af)
3415 r = r->skip[PF_SKIP_AF].ptr;
3416 else if (r->proto && r->proto != pd->proto)
3417 r = r->skip[PF_SKIP_PROTO].ptr;
3418 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
3419 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3420 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
3421 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3422 else if (r->tos && !(r->tos & pd->tos))
3423 r = TAILQ_NEXT(r, entries);
3424 else if (r->rule_flag & PFRULE_FRAGMENT)
3425 r = TAILQ_NEXT(r, entries);
3426 else if (r->match_tag && !pf_match_tag(m, r, nr, &tag))
3427 r = TAILQ_NEXT(r, entries);
3428 else if (r->anchorname[0] && r->anchor == NULL)
3429 r = TAILQ_NEXT(r, entries);
3430 else if (r->os_fingerprint != PF_OSFP_ANY)
3431 r = TAILQ_NEXT(r, entries);
3432 else {
3433 if (r->tag)
3434 tag = r->tag;
3435 if (r->anchor == NULL) {
3436 *rm = r;
3437 *am = a;
3438 *rsm = ruleset;
3439 if ((*rm)->quick)
3440 break;
3441 r = TAILQ_NEXT(r, entries);
3442 } else
3443 PF_STEP_INTO_ANCHOR(r, a, ruleset,
3444 PF_RULESET_FILTER);
3445 }
3446 if (r == NULL && a != NULL)
3447 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
3448 PF_RULESET_FILTER);
3449 }
3450 r = *rm;
3451 a = *am;
3452 ruleset = *rsm;
3453
3454 REASON_SET(&reason, PFRES_MATCH);
3455
3456 if (r->log)
3457 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
3458
3459 if ((r->action == PF_DROP) &&
3460 ((r->rule_flag & PFRULE_RETURNICMP) ||
3461 (r->rule_flag & PFRULE_RETURN))) {
3462 struct pf_addr *a = NULL;
3463
3464 if (nr != NULL) {
3465 if (direction == PF_OUT)
3466 a = saddr;
3467 else
3468 a = daddr;
3469 }
3470 if (a != NULL) {
3471 switch (af) {
3472#ifdef INET
3473 case AF_INET:
3474 pf_change_a(&a->v4.s_addr, pd->ip_sum,
3475 pd->baddr.v4.s_addr, 0);
3476 break;
3477#endif /* INET */
3478#ifdef INET6
3479 case AF_INET6:
3480 PF_ACPY(a, &pd->baddr, af);
3481 break;
3482#endif /* INET6 */
3483 }
3484 }
3485 if ((af == AF_INET) && r->return_icmp)
3486 pf_send_icmp(m, r->return_icmp >> 8,
3487 r->return_icmp & 255, af, r);
3488 else if ((af == AF_INET6) && r->return_icmp6)
3489 pf_send_icmp(m, r->return_icmp6 >> 8,
3490 r->return_icmp6 & 255, af, r);
3491 }
3492
3493 if (r->action != PF_PASS)
3494 return (PF_DROP);
3495
3496 pf_tag_packet(m, tag);
3497
3498 if (r->keep_state || nr != NULL) {
3499 /* create new state */
3500 struct pf_state *s = NULL;
3501 struct pf_src_node *sn = NULL;
3502
3503 /* check maximums */
3504 if (r->max_states && (r->states >= r->max_states))
3505 goto cleanup;
3506 /* src node for flter rule */
3507 if ((r->rule_flag & PFRULE_SRCTRACK ||
3508 r->rpool.opts & PF_POOL_STICKYADDR) &&
3509 pf_insert_src_node(&sn, r, saddr, af) != 0)
3510 goto cleanup;
3511 /* src node for translation rule */
3512 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3513 ((direction == PF_OUT &&
3514 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
3515 (pf_insert_src_node(&nsn, nr, saddr, af) != 0)))
3516 goto cleanup;
3517 s = pool_get(&pf_state_pl, PR_NOWAIT);
3518 if (s == NULL) {
3519cleanup:
3520 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
3521 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
3522 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3523 pf_status.src_nodes--;
3524 pool_put(&pf_src_tree_pl, sn);
3525 }
3526 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
3527 nsn->expire == 0) {
3528 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
3529 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
3530 pf_status.src_nodes--;
3531 pool_put(&pf_src_tree_pl, nsn);
3532 }
3533 REASON_SET(&reason, PFRES_MEMORY);
3534 return (PF_DROP);
3535 }
3536 bzero(s, sizeof(*s));
3537 r->states++;
3538 if (a != NULL)
3539 a->states++;
3540 s->rule.ptr = r;
3541 s->nat_rule.ptr = nr;
3542 if (s->nat_rule.ptr != NULL)
3543 s->nat_rule.ptr->states++;
3544 s->anchor.ptr = a;
3545 s->allow_opts = r->allow_opts;
3546 s->log = r->log & 2;
3547 s->proto = pd->proto;
3548 s->direction = direction;
3549 s->af = af;
3550 if (direction == PF_OUT) {
3551 PF_ACPY(&s->gwy.addr, saddr, af);
3552 PF_ACPY(&s->ext.addr, daddr, af);
3553 if (nr != NULL)
3554 PF_ACPY(&s->lan.addr, &pd->baddr, af);
3555 else
3556 PF_ACPY(&s->lan.addr, &s->gwy.addr, af);
3557 } else {
3558 PF_ACPY(&s->lan.addr, daddr, af);
3559 PF_ACPY(&s->ext.addr, saddr, af);
3560 if (nr != NULL)
3561 PF_ACPY(&s->gwy.addr, &pd->baddr, af);
3562 else
3563 PF_ACPY(&s->gwy.addr, &s->lan.addr, af);
3564 }
3565 s->src.state = PFOTHERS_SINGLE;
3566 s->dst.state = PFOTHERS_NO_TRAFFIC;
3567 s->creation = time_second;
3568 s->expire = time_second;
3569 s->timeout = PFTM_OTHER_FIRST_PACKET;
3570 pf_set_rt_ifp(s, saddr);
3571 if (sn != NULL) {
3572 s->src_node = sn;
3573 s->src_node->states++;
3574 }
3575 if (nsn != NULL) {
3576 PF_ACPY(&nsn->raddr, &pd->naddr, af);
3577 s->nat_src_node = nsn;
3578 s->nat_src_node->states++;
3579 }
3580 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
3581 REASON_SET(&reason, PFRES_MEMORY);
3582 pf_src_tree_remove_state(s);
3583 pool_put(&pf_state_pl, s);
3584 return (PF_DROP);
3585 } else
3586 *sm = s;
3587 }
3588
3589 return (PF_PASS);
3590}
3591
3592int
3593pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3594 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3595 struct pf_ruleset **rsm)
3596{
3597 struct pf_rule *r, *a = NULL;
3598 struct pf_ruleset *ruleset = NULL;
3599 sa_family_t af = pd->af;
3600 u_short reason;
3601 int tag = -1;
3602
3603 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3604 while (r != NULL) {
3605 r->evaluations++;
3606 if (r->kif != NULL &&
3607 (r->kif != kif && r->kif != kif->pfik_parent) == !r->ifnot)
3608 r = r->skip[PF_SKIP_IFP].ptr;
3609 else if (r->direction && r->direction != direction)
3610 r = r->skip[PF_SKIP_DIR].ptr;
3611 else if (r->af && r->af != af)
3612 r = r->skip[PF_SKIP_AF].ptr;
3613 else if (r->proto && r->proto != pd->proto)
3614 r = r->skip[PF_SKIP_PROTO].ptr;
3615 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
3616 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3617 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
3618 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3619 else if (r->tos && !(r->tos & pd->tos))
3620 r = TAILQ_NEXT(r, entries);
3621 else if (r->src.port_op || r->dst.port_op ||
3622 r->flagset || r->type || r->code ||
3623 r->os_fingerprint != PF_OSFP_ANY)
3624 r = TAILQ_NEXT(r, entries);
3625 else if (r->match_tag && !pf_match_tag(m, r, NULL, &tag))
3626 r = TAILQ_NEXT(r, entries);
3627 else if (r->anchorname[0] && r->anchor == NULL)
3628 r = TAILQ_NEXT(r, entries);
3629 else {
3630 if (r->anchor == NULL) {
3631 *rm = r;
3632 *am = a;
3633 *rsm = ruleset;
3634 if ((*rm)->quick)
3635 break;
3636 r = TAILQ_NEXT(r, entries);
3637 } else
3638 PF_STEP_INTO_ANCHOR(r, a, ruleset,
3639 PF_RULESET_FILTER);
3640 }
3641 if (r == NULL && a != NULL)
3642 PF_STEP_OUT_OF_ANCHOR(r, a, ruleset,
3643 PF_RULESET_FILTER);
3644 }
3645 r = *rm;
3646 a = *am;
3647 ruleset = *rsm;
3648
3649 REASON_SET(&reason, PFRES_MATCH);
3650
3651 if (r->log)
3652 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset);
3653
3654 if (r->action != PF_PASS)
3655 return (PF_DROP);
3656
3657 pf_tag_packet(m, tag);
3658
3659 return (PF_PASS);
3660}
3661
3662int
3663pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
3664 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
3665 u_short *reason)
3666{
3667 struct pf_state key;
3668 struct tcphdr *th = pd->hdr.tcp;
3669 u_int16_t win = ntohs(th->th_win);
3670 u_int32_t ack, end, seq;
3671 u_int8_t sws, dws;
3672 int ackskew;
3673 int copyback = 0;
3674 struct pf_state_peer *src, *dst;
3675
3676 key.af = pd->af;
3677 key.proto = IPPROTO_TCP;
3678 if (direction == PF_IN) {
3679 PF_ACPY(&key.ext.addr, pd->src, key.af);
3680 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
3681 key.ext.port = th->th_sport;
3682 key.gwy.port = th->th_dport;
3683 } else {
3684 PF_ACPY(&key.lan.addr, pd->src, key.af);
3685 PF_ACPY(&key.ext.addr, pd->dst, key.af);
3686 key.lan.port = th->th_sport;
3687 key.ext.port = th->th_dport;
3688 }
3689
3690 STATE_LOOKUP();
3691
3692 if (direction == (*state)->direction) {
3693 src = &(*state)->src;
3694 dst = &(*state)->dst;
3695 } else {
3696 src = &(*state)->dst;
3697 dst = &(*state)->src;
3698 }
3699
3700 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
3701 if (direction != (*state)->direction)
3702 return (PF_SYNPROXY_DROP);
3703 if (th->th_flags & TH_SYN) {
3704 if (ntohl(th->th_seq) != (*state)->src.seqlo)
3705 return (PF_DROP);
3706 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
3707 pd->src, th->th_dport, th->th_sport,
3708 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
3709 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0);
3710 return (PF_SYNPROXY_DROP);
3711 } else if (!(th->th_flags & TH_ACK) ||
3712 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
3713 (ntohl(th->th_seq) != (*state)->src.seqlo + 1))
3714 return (PF_DROP);
3715 else
3716 (*state)->src.state = PF_TCPS_PROXY_DST;
3717 }
3718 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
3719 struct pf_state_host *src, *dst;
3720
3721 if (direction == PF_OUT) {
3722 src = &(*state)->gwy;
3723 dst = &(*state)->ext;
3724 } else {
3725 src = &(*state)->ext;
3726 dst = &(*state)->lan;
3727 }
3728 if (direction == (*state)->direction) {
3729 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
3730 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
3731 (ntohl(th->th_seq) != (*state)->src.seqlo + 1))
3732 return (PF_DROP);
3733 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
3734 if ((*state)->dst.seqhi == 1)
3735 (*state)->dst.seqhi = karc4random();
3736 pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
3737 &dst->addr, src->port, dst->port,
3738 (*state)->dst.seqhi, 0, TH_SYN, 0,
3739 (*state)->src.mss, 0);
3740 return (PF_SYNPROXY_DROP);
3741 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
3742 (TH_SYN|TH_ACK)) ||
3743 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1))
3744 return (PF_DROP);
3745 else {
3746 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
3747 (*state)->dst.seqlo = ntohl(th->th_seq);
3748 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
3749 pd->src, th->th_dport, th->th_sport,
3750 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
3751 TH_ACK, (*state)->src.max_win, 0, 0);
3752 pf_send_tcp((*state)->rule.ptr, pd->af, &src->addr,
3753 &dst->addr, src->port, dst->port,
3754 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
3755 TH_ACK, (*state)->dst.max_win, 0, 0);
3756 (*state)->src.seqdiff = (*state)->dst.seqhi -
3757 (*state)->src.seqlo;
3758 (*state)->dst.seqdiff = (*state)->src.seqhi -
3759 (*state)->dst.seqlo;
3760 (*state)->src.seqhi = (*state)->src.seqlo +
3761 (*state)->src.max_win;
3762 (*state)->dst.seqhi = (*state)->dst.seqlo +
3763 (*state)->dst.max_win;
3764 (*state)->src.wscale = (*state)->dst.wscale = 0;
3765 (*state)->src.state = (*state)->dst.state =
3766 TCPS_ESTABLISHED;
3767 return (PF_SYNPROXY_DROP);
3768 }
3769 }
3770
3771 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3772 sws = src->wscale & PF_WSCALE_MASK;
3773 dws = dst->wscale & PF_WSCALE_MASK;
3774 } else
3775 sws = dws = 0;
3776
3777 /*
3778 * Sequence tracking algorithm from Guido van Rooij's paper:
3779 * http://www.madison-gurkha.com/publications/tcp_filtering/
3780 * tcp_filtering.ps
3781 */
3782
3783 seq = ntohl(th->th_seq);
3784 if (src->seqlo == 0) {
3785 /* First packet from this end. Set its state */
3786
3787 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3788 src->scrub == NULL) {
3789 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3790 REASON_SET(reason, PFRES_MEMORY);
3791 return (PF_DROP);
3792 }
3793 }
3794
3795 /* Deferred generation of sequence number modulator */
3796 if (dst->seqdiff && !src->seqdiff) {
3797 while ((src->seqdiff = karc4random()) == 0)
3798 ;
3799 ack = ntohl(th->th_ack) - dst->seqdiff;
3800 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3801 src->seqdiff), 0);
3802 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3803 copyback = 1;
3804 } else {
3805 ack = ntohl(th->th_ack);
3806 }
3807
3808 end = seq + pd->p_len;
3809 if (th->th_flags & TH_SYN) {
3810 end++;
3811 if (dst->wscale & PF_WSCALE_FLAG) {
3812 src->wscale = pf_get_wscale(m, off, th->th_off,
3813 pd->af);
3814 if (src->wscale & PF_WSCALE_FLAG) {
3815 /* Remove scale factor from initial
3816 * window */
3817 sws = src->wscale & PF_WSCALE_MASK;
3818 win = ((u_int32_t)win + (1 << sws) - 1)
3819 >> sws;
3820 dws = dst->wscale & PF_WSCALE_MASK;
3821 } else {
3822 /* fixup other window */
3823 dst->max_win <<= dst->wscale &
3824 PF_WSCALE_MASK;
3825 /* in case of a retrans SYN|ACK */
3826 dst->wscale = 0;
3827 }
3828 }
3829 }
3830 if (th->th_flags & TH_FIN)
3831 end++;
3832
3833 src->seqlo = seq;
3834 if (src->state < TCPS_SYN_SENT)
3835 src->state = TCPS_SYN_SENT;
3836
3837 /*
3838 * May need to slide the window (seqhi may have been set by
3839 * the crappy stack check or if we picked up the connection
3840 * after establishment)
3841 */
3842 if (src->seqhi == 1 ||
3843 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3844 src->seqhi = end + MAX(1, dst->max_win << dws);
3845 if (win > src->max_win)
3846 src->max_win = win;
3847
3848 } else {
3849 ack = ntohl(th->th_ack) - dst->seqdiff;
3850 if (src->seqdiff) {
3851 /* Modulate sequence numbers */
3852 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3853 src->seqdiff), 0);
3854 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3855 copyback = 1;
3856 }
3857 end = seq + pd->p_len;
3858 if (th->th_flags & TH_SYN)
3859 end++;
3860 if (th->th_flags & TH_FIN)
3861 end++;
3862 }
3863
3864 if ((th->th_flags & TH_ACK) == 0) {
3865 /* Let it pass through the ack skew check */
3866 ack = dst->seqlo;
3867 } else if ((ack == 0 &&
3868 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3869 /* broken tcp stacks do not set ack */
3870 (dst->state < TCPS_SYN_SENT)) {
3871 /*
3872 * Many stacks (ours included) will set the ACK number in an
3873 * FIN|ACK if the SYN times out -- no sequence to ACK.
3874 */
3875 ack = dst->seqlo;
3876 }
3877
3878 if (seq == end) {
3879 /* Ease sequencing restrictions on no data packets */
3880 seq = src->seqlo;
3881 end = seq;
3882 }
3883
3884 ackskew = dst->seqlo - ack;
3885
3886#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
3887 if (SEQ_GEQ(src->seqhi, end) &&
3888 /* Last octet inside other's window space */
3889 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3890 /* Retrans: not more than one window back */
3891 (ackskew >= -MAXACKWINDOW) &&
3892 /* Acking not more than one reassembled fragment backwards */
3893 (ackskew <= (MAXACKWINDOW << sws))) {
3894 /* Acking not more than one window forward */
3895
3896 /* update max window */
3897 if (src->max_win < win)
3898 src->max_win = win;
3899 /* synchronize sequencing */
3900 if (SEQ_GT(end, src->seqlo))
3901 src->seqlo = end;
3902 /* slide the window of what the other end can send */
3903 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3904 dst->seqhi = ack + MAX((win << sws), 1);
3905
3906
3907 /* update states */
3908 if (th->th_flags & TH_SYN)
3909 if (src->state < TCPS_SYN_SENT)
3910 src->state = TCPS_SYN_SENT;
3911 if (th->th_flags & TH_FIN)
3912 if (src->state < TCPS_CLOSING)
3913 src->state = TCPS_CLOSING;
3914 if (th->th_flags & TH_ACK) {
3915 if (dst->state == TCPS_SYN_SENT)
3916 dst->state = TCPS_ESTABLISHED;
3917 else if (dst->state == TCPS_CLOSING)
3918 dst->state = TCPS_FIN_WAIT_2;
3919 }
3920 if (th->th_flags & TH_RST)
3921 src->state = dst->state = TCPS_TIME_WAIT;
3922
3923 /* update expire time */
3924 (*state)->expire = time_second;
3925 if (src->state >= TCPS_FIN_WAIT_2 &&
3926 dst->state >= TCPS_FIN_WAIT_2)
3927 (*state)->timeout = PFTM_TCP_CLOSED;
3928 else if (src->state >= TCPS_FIN_WAIT_2 ||
3929 dst->state >= TCPS_FIN_WAIT_2)
3930 (*state)->timeout = PFTM_TCP_FIN_WAIT;
3931 else if (src->state < TCPS_ESTABLISHED ||
3932 dst->state < TCPS_ESTABLISHED)
3933 (*state)->timeout = PFTM_TCP_OPENING;
3934 else if (src->state >= TCPS_CLOSING ||
3935 dst->state >= TCPS_CLOSING)
3936 (*state)->timeout = PFTM_TCP_CLOSING;
3937 else
3938 (*state)->timeout = PFTM_TCP_ESTABLISHED;
3939
3940 /* Fall through to PASS packet */
3941
3942 } else if ((dst->state < TCPS_SYN_SENT ||
3943 dst->state >= TCPS_FIN_WAIT_2 ||
3944 src->state >= TCPS_FIN_WAIT_2) &&
3945 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
3946 /* Within a window forward of the originating packet */
3947 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
3948 /* Within a window backward of the originating packet */
3949
3950 /*
3951 * This currently handles three situations:
3952 * 1) Stupid stacks will shotgun SYNs before their peer
3953 * replies.
3954 * 2) When PF catches an already established stream (the
3955 * firewall rebooted, the state table was flushed, routes
3956 * changed...)
3957 * 3) Packets get funky immediately after the connection
3958 * closes (this should catch Solaris spurious ACK|FINs
3959 * that web servers like to spew after a close)
3960 *
3961 * This must be a little more careful than the above code
3962 * since packet floods will also be caught here. We don't
3963 * update the TTL here to mitigate the damage of a packet
3964 * flood and so the same code can handle awkward establishment
3965 * and a loosened connection close.
3966 * In the establishment case, a correct peer response will
3967 * validate the connection, go through the normal state code
3968 * and keep updating the state TTL.
3969 */
3970
3971 if (pf_status.debug >= PF_DEBUG_MISC) {
3972 printf("pf: loose state match: ");
3973 pf_print_state(*state);
3974 pf_print_flags(th->th_flags);
3975 printf(" seq=%u ack=%u len=%u ackskew=%d pkts=%d:%d\n",
3976 seq, ack, pd->p_len, ackskew,
3977 (*state)->packets[0], (*state)->packets[1]);
3978 }
3979
3980 /* update max window */
3981 if (src->max_win < win)
3982 src->max_win = win;
3983 /* synchronize sequencing */
3984 if (SEQ_GT(end, src->seqlo))
3985 src->seqlo = end;
3986 /* slide the window of what the other end can send */
3987 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3988 dst->seqhi = ack + MAX((win << sws), 1);
3989
3990 /*
3991 * Cannot set dst->seqhi here since this could be a shotgunned
3992 * SYN and not an already established connection.
3993 */
3994
3995 if (th->th_flags & TH_FIN)
3996 if (src->state < TCPS_CLOSING)
3997 src->state = TCPS_CLOSING;
3998 if (th->th_flags & TH_RST)
3999 src->state = dst->state = TCPS_TIME_WAIT;
4000
4001 /* Fall through to PASS packet */
4002
4003 } else {
4004 if ((*state)->dst.state == TCPS_SYN_SENT &&
4005 (*state)->src.state == TCPS_SYN_SENT) {
4006 /* Send RST for state mismatches during handshake */
4007 if (!(th->th_flags & TH_RST)) {
4008 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
4009
4010 if (th->th_flags & TH_SYN)
4011 ack++;
4012 if (th->th_flags & TH_FIN)
4013 ack++;
4014 pf_send_tcp((*state)->rule.ptr, pd->af,
4015 pd->dst, pd->src, th->th_dport,
4016 th->th_sport, ntohl(th->th_ack), ack,
4017 TH_RST|TH_ACK, 0, 0,
4018 (*state)->rule.ptr->return_ttl);
4019 }
4020 src->seqlo = 0;
4021 src->seqhi = 1;
4022 src->max_win = 1;
4023 } else if (pf_status.debug >= PF_DEBUG_MISC) {
4024 printf("pf: BAD state: ");
4025 pf_print_state(*state);
4026 pf_print_flags(th->th_flags);
4027 printf(" seq=%u ack=%u len=%u ackskew=%d pkts=%d:%d "
4028 "dir=%s,%s\n", seq, ack, pd->p_len, ackskew,
4029 (*state)->packets[0], (*state)->packets[1],
4030 direction == PF_IN ? "in" : "out",
4031 direction == (*state)->direction ? "fwd" : "rev");
4032 printf("pf: State failure on: %c %c %c %c | %c %c\n",
4033 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
4034 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
4035 ' ': '2',
4036 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
4037 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
4038 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
4039 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
4040 }
4041 return (PF_DROP);
4042 }
4043
4044 if (dst->scrub || src->scrub) {
4045 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
4046 src, dst, &copyback))
4047 return (PF_DROP);
4048 }
4049
4050 /* Any packets which have gotten here are to be passed */
4051
4052 /* translate source/destination address, if necessary */
4053 if (STATE_TRANSLATE(*state)) {
4054 if (direction == PF_OUT)
4055 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4056 &th->th_sum, &(*state)->gwy.addr,
4057 (*state)->gwy.port, 0, pd->af);
4058 else
4059 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4060 &th->th_sum, &(*state)->lan.addr,
4061 (*state)->lan.port, 0, pd->af);
4062 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4063 } else if (copyback) {
4064 /* Copyback sequence modulation or stateful scrub changes */
4065 m_copyback(m, off, sizeof(*th), (caddr_t)th);
4066 }
4067
4068 return (PF_PASS);
4069}
4070
4071int
4072pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4073 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4074{
4075 struct pf_state_peer *src, *dst;
4076 struct pf_state key;
4077 struct udphdr *uh = pd->hdr.udp;
4078
4079 key.af = pd->af;
4080 key.proto = IPPROTO_UDP;
4081 if (direction == PF_IN) {
4082 PF_ACPY(&key.ext.addr, pd->src, key.af);
4083 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
4084 key.ext.port = uh->uh_sport;
4085 key.gwy.port = uh->uh_dport;
4086 } else {
4087 PF_ACPY(&key.lan.addr, pd->src, key.af);
4088 PF_ACPY(&key.ext.addr, pd->dst, key.af);
4089 key.lan.port = uh->uh_sport;
4090 key.ext.port = uh->uh_dport;
4091 }
4092
4093 STATE_LOOKUP();
4094
4095 if (direction == (*state)->direction) {
4096 src = &(*state)->src;
4097 dst = &(*state)->dst;
4098 } else {
4099 src = &(*state)->dst;
4100 dst = &(*state)->src;
4101 }
4102
4103 /* update states */
4104 if (src->state < PFUDPS_SINGLE)
4105 src->state = PFUDPS_SINGLE;
4106 if (dst->state == PFUDPS_SINGLE)
4107 dst->state = PFUDPS_MULTIPLE;
4108
4109 /* update expire time */
4110 (*state)->expire = time_second;
4111 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4112 (*state)->timeout = PFTM_UDP_MULTIPLE;
4113 else
4114 (*state)->timeout = PFTM_UDP_SINGLE;
4115
4116 /* translate source/destination address, if necessary */
4117 if (STATE_TRANSLATE(*state)) {
4118 if (direction == PF_OUT)
4119 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4120 &uh->uh_sum, &(*state)->gwy.addr,
4121 (*state)->gwy.port, 1, pd->af);
4122 else
4123 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4124 &uh->uh_sum, &(*state)->lan.addr,
4125 (*state)->lan.port, 1, pd->af);
4126 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4127 }
4128
4129 return (PF_PASS);
4130}
4131
4132int
4133pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4134 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4135{
4136 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4137 u_int16_t icmpid = 0;
4138 u_int16_t *icmpsum = NULL;
4139 u_int8_t icmptype = 0;
4140 int state_icmp = 0;
4141
4142 switch (pd->proto) {
4143#ifdef INET
4144 case IPPROTO_ICMP:
4145 icmptype = pd->hdr.icmp->icmp_type;
4146 icmpid = pd->hdr.icmp->icmp_id;
4147 icmpsum = &pd->hdr.icmp->icmp_cksum;
4148
4149 if (icmptype == ICMP_UNREACH ||
4150 icmptype == ICMP_SOURCEQUENCH ||
4151 icmptype == ICMP_REDIRECT ||
4152 icmptype == ICMP_TIMXCEED ||
4153 icmptype == ICMP_PARAMPROB)
4154 state_icmp++;
4155 break;
4156#endif /* INET */
4157#ifdef INET6
4158 case IPPROTO_ICMPV6:
4159 icmptype = pd->hdr.icmp6->icmp6_type;
4160 icmpid = pd->hdr.icmp6->icmp6_id;
4161 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4162
4163 if (icmptype == ICMP6_DST_UNREACH ||
4164 icmptype == ICMP6_PACKET_TOO_BIG ||
4165 icmptype == ICMP6_TIME_EXCEEDED ||
4166 icmptype == ICMP6_PARAM_PROB)
4167 state_icmp++;
4168 break;
4169#endif /* INET6 */
4170 }
4171
4172 if (!state_icmp) {
4173
4174 /*
4175 * ICMP query/reply message not related to a TCP/UDP packet.
4176 * Search for an ICMP state.
4177 */
4178 struct pf_state key;
4179
4180 key.af = pd->af;
4181 key.proto = pd->proto;
4182 if (direction == PF_IN) {
4183 PF_ACPY(&key.ext.addr, pd->src, key.af);
4184 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
4185 key.ext.port = icmpid;
4186 key.gwy.port = icmpid;
4187 } else {
4188 PF_ACPY(&key.lan.addr, pd->src, key.af);
4189 PF_ACPY(&key.ext.addr, pd->dst, key.af);
4190 key.lan.port = icmpid;
4191 key.ext.port = icmpid;
4192 }
4193
4194 STATE_LOOKUP();
4195
4196 (*state)->expire = time_second;
4197 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4198
4199 /* translate source/destination address, if necessary */
4200 if (PF_ANEQ(&(*state)->lan.addr, &(*state)->gwy.addr, pd->af)) {
4201 if (direction == PF_OUT) {
4202 switch (pd->af) {
4203#ifdef INET
4204 case AF_INET:
4205 pf_change_a(&saddr->v4.s_addr,
4206 pd->ip_sum,
4207 (*state)->gwy.addr.v4.s_addr, 0);
4208 break;
4209#endif /* INET */
4210#ifdef INET6
4211 case AF_INET6:
4212 pf_change_a6(saddr,
4213 &pd->hdr.icmp6->icmp6_cksum,
4214 &(*state)->gwy.addr, 0);
4215 m_copyback(m, off,
4216 sizeof(struct icmp6_hdr),
4217 (caddr_t)pd->hdr.icmp6);
4218 break;
4219#endif /* INET6 */
4220 }
4221 } else {
4222 switch (pd->af) {
4223#ifdef INET
4224 case AF_INET:
4225 pf_change_a(&daddr->v4.s_addr,
4226 pd->ip_sum,
4227 (*state)->lan.addr.v4.s_addr, 0);
4228 break;
4229#endif /* INET */
4230#ifdef INET6
4231 case AF_INET6:
4232 pf_change_a6(daddr,
4233 &pd->hdr.icmp6->icmp6_cksum,
4234 &(*state)->lan.addr, 0);
4235 m_copyback(m, off,
4236 sizeof(struct icmp6_hdr),
4237 (caddr_t)pd->hdr.icmp6);
4238 break;
4239#endif /* INET6 */
4240 }
4241 }
4242 }
4243
4244 return (PF_PASS);
4245
4246 } else {
4247 /*
4248 * ICMP error message in response to a TCP/UDP packet.
4249 * Extract the inner TCP/UDP header and search for that state.
4250 */
4251
4252 struct pf_pdesc pd2;
4253#ifdef INET
4254 struct ip h2;
4255#endif /* INET */
4256#ifdef INET6
4257 struct ip6_hdr h2_6;
4258 int terminal = 0;
4259#endif /* INET6 */
4260 int ipoff2 = 0;
4261 int off2 = 0;
4262
4263 pd2.af = pd->af;
4264 switch (pd->af) {
4265#ifdef INET
4266 case AF_INET:
4267 /* offset of h2 in mbuf chain */
4268 ipoff2 = off + ICMP_MINLEN;
4269
4270 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4271 NULL, NULL, pd2.af)) {
4272 DPFPRINTF(PF_DEBUG_MISC,
4273 ("pf: ICMP error message too short "
4274 "(ip)\n"));
4275 return (PF_DROP);
4276 }
4277 /*
4278 * ICMP error messages don't refer to non-first
4279 * fragments
4280 */
4281 /*
4282 * Note: We are dealing with an encapsulated
4283 * header. This means ip_off/ip_len are not
4284 * in host byte order!
4285 */
4286 if (h2.ip_off & htons(IP_OFFMASK))
4287 return (PF_DROP);
4288
4289 /* offset of protocol header that follows h2 */
4290 off2 = ipoff2 + (h2.ip_hl << 2);
4291
4292 pd2.proto = h2.ip_p;
4293 pd2.src = (struct pf_addr *)&h2.ip_src;
4294 pd2.dst = (struct pf_addr *)&h2.ip_dst;
4295 pd2.ip_sum = &h2.ip_sum;
4296 break;
4297#endif /* INET */
4298#ifdef INET6
4299 case AF_INET6:
4300 ipoff2 = off + sizeof(struct icmp6_hdr);
4301
4302 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4303 NULL, NULL, pd2.af)) {
4304 DPFPRINTF(PF_DEBUG_MISC,
4305 ("pf: ICMP error message too short "
4306 "(ip6)\n"));
4307 return (PF_DROP);
4308 }
4309 pd2.proto = h2_6.ip6_nxt;
4310 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4311 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4312 pd2.ip_sum = NULL;
4313 off2 = ipoff2 + sizeof(h2_6);
4314 do {
4315 switch (pd2.proto) {
4316 case IPPROTO_FRAGMENT:
4317 /*
4318 * ICMPv6 error messages for
4319 * non-first fragments
4320 */
4321 return (PF_DROP);
4322 case IPPROTO_AH:
4323 case IPPROTO_HOPOPTS:
4324 case IPPROTO_ROUTING:
4325 case IPPROTO_DSTOPTS: {
4326 /* get next header and header length */
4327 struct ip6_ext opt6;
4328
4329 if (!pf_pull_hdr(m, off2, &opt6,
4330 sizeof(opt6), NULL, NULL, pd2.af)) {
4331 DPFPRINTF(PF_DEBUG_MISC,
4332 ("pf: ICMPv6 short opt\n"));
4333 return (PF_DROP);
4334 }
4335 if (pd2.proto == IPPROTO_AH)
4336 off2 += (opt6.ip6e_len + 2) * 4;
4337 else
4338 off2 += (opt6.ip6e_len + 1) * 8;
4339 pd2.proto = opt6.ip6e_nxt;
4340 /* goto the next header */
4341 break;
4342 }
4343 default:
4344 terminal++;
4345 break;
4346 }
4347 } while (!terminal);
4348 break;
4349#endif /* INET6 */
4350 }
4351
4352 switch (pd2.proto) {
4353 case IPPROTO_TCP: {
4354 struct tcphdr th;
4355 u_int32_t seq;
4356 struct pf_state key;
4357 struct pf_state_peer *src, *dst;
4358 u_int8_t dws;
4359 int copyback = 0;
4360
4361 /*
4362 * Only the first 8 bytes of the TCP header can be
4363 * expected. Don't access any TCP header fields after
4364 * th_seq, an ackskew test is not possible.
4365 */
4366 if (!pf_pull_hdr(m, off2, &th, 8, NULL, NULL, pd2.af)) {
4367 DPFPRINTF(PF_DEBUG_MISC,
4368 ("pf: ICMP error message too short "
4369 "(tcp)\n"));
4370 return (PF_DROP);
4371 }
4372
4373 key.af = pd2.af;
4374 key.proto = IPPROTO_TCP;
4375 if (direction == PF_IN) {
4376 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4377 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4378 key.ext.port = th.th_dport;
4379 key.gwy.port = th.th_sport;
4380 } else {
4381 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4382 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4383 key.lan.port = th.th_dport;
4384 key.ext.port = th.th_sport;
4385 }
4386
4387 STATE_LOOKUP();
4388
4389 if (direction == (*state)->direction) {
4390 src = &(*state)->dst;
4391 dst = &(*state)->src;
4392 } else {
4393 src = &(*state)->src;
4394 dst = &(*state)->dst;
4395 }
4396
4397 if (src->wscale && dst->wscale &&
4398 !(th.th_flags & TH_SYN))
4399 dws = dst->wscale & PF_WSCALE_MASK;
4400 else
4401 dws = 0;
4402
4403 /* Demodulate sequence number */
4404 seq = ntohl(th.th_seq) - src->seqdiff;
4405 if (src->seqdiff) {
4406 pf_change_a(&th.th_seq, icmpsum,
4407 htonl(seq), 0);
4408 copyback = 1;
4409 }
4410
4411 if (!SEQ_GEQ(src->seqhi, seq) ||
4412 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws))) {
4413 if (pf_status.debug >= PF_DEBUG_MISC) {
4414 printf("pf: BAD ICMP %d:%d ",
4415 icmptype, pd->hdr.icmp->icmp_code);
4416 pf_print_host(pd->src, 0, pd->af);
4417 printf(" -> ");
4418 pf_print_host(pd->dst, 0, pd->af);
4419 printf(" state: ");
4420 pf_print_state(*state);
4421 printf(" seq=%u\n", seq);
4422 }
4423 return (PF_DROP);
4424 }
4425
4426 if (STATE_TRANSLATE(*state)) {
4427 if (direction == PF_IN) {
4428 pf_change_icmp(pd2.src, &th.th_sport,
4429 daddr, &(*state)->lan.addr,
4430 (*state)->lan.port, NULL,
4431 pd2.ip_sum, icmpsum,
4432 pd->ip_sum, 0, pd2.af);
4433 } else {
4434 pf_change_icmp(pd2.dst, &th.th_dport,
4435 saddr, &(*state)->gwy.addr,
4436 (*state)->gwy.port, NULL,
4437 pd2.ip_sum, icmpsum,
4438 pd->ip_sum, 0, pd2.af);
4439 }
4440 copyback = 1;
4441 }
4442
4443 if (copyback) {
4444 switch (pd2.af) {
4445#ifdef INET
4446 case AF_INET:
4447 m_copyback(m, off, ICMP_MINLEN,
4448 (caddr_t)pd->hdr.icmp);
4449 m_copyback(m, ipoff2, sizeof(h2),
4450 (caddr_t)&h2);
4451 break;
4452#endif /* INET */
4453#ifdef INET6
4454 case AF_INET6:
4455 m_copyback(m, off,
4456 sizeof(struct icmp6_hdr),
4457 (caddr_t)pd->hdr.icmp6);
4458 m_copyback(m, ipoff2, sizeof(h2_6),
4459 (caddr_t)&h2_6);
4460 break;
4461#endif /* INET6 */
4462 }
4463 m_copyback(m, off2, 8, (caddr_t)&th);
4464 }
4465
4466 return (PF_PASS);
4467 break;
4468 }
4469 case IPPROTO_UDP: {
4470 struct udphdr uh;
4471 struct pf_state key;
4472
4473 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4474 NULL, NULL, pd2.af)) {
4475 DPFPRINTF(PF_DEBUG_MISC,
4476 ("pf: ICMP error message too short "
4477 "(udp)\n"));
4478 return (PF_DROP);
4479 }
4480
4481 key.af = pd2.af;
4482 key.proto = IPPROTO_UDP;
4483 if (direction == PF_IN) {
4484 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4485 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4486 key.ext.port = uh.uh_dport;
4487 key.gwy.port = uh.uh_sport;
4488 } else {
4489 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4490 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4491 key.lan.port = uh.uh_dport;
4492 key.ext.port = uh.uh_sport;
4493 }
4494
4495 STATE_LOOKUP();
4496
4497 if (STATE_TRANSLATE(*state)) {
4498 if (direction == PF_IN) {
4499 pf_change_icmp(pd2.src, &uh.uh_sport,
4500 daddr, &(*state)->lan.addr,
4501 (*state)->lan.port, &uh.uh_sum,
4502 pd2.ip_sum, icmpsum,
4503 pd->ip_sum, 1, pd2.af);
4504 } else {
4505 pf_change_icmp(pd2.dst, &uh.uh_dport,
4506 saddr, &(*state)->gwy.addr,
4507 (*state)->gwy.port, &uh.uh_sum,
4508 pd2.ip_sum, icmpsum,
4509 pd->ip_sum, 1, pd2.af);
4510 }
4511 switch (pd2.af) {
4512#ifdef INET
4513 case AF_INET:
4514 m_copyback(m, off, ICMP_MINLEN,
4515 (caddr_t)pd->hdr.icmp);
4516 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4517 break;
4518#endif /* INET */
4519#ifdef INET6
4520 case AF_INET6:
4521 m_copyback(m, off,
4522 sizeof(struct icmp6_hdr),
4523 (caddr_t)pd->hdr.icmp6);
4524 m_copyback(m, ipoff2, sizeof(h2_6),
4525 (caddr_t)&h2_6);
4526 break;
4527#endif /* INET6 */
4528 }
4529 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4530 }
4531
4532 return (PF_PASS);
4533 break;
4534 }
4535#ifdef INET
4536 case IPPROTO_ICMP: {
4537 struct icmp iih;
4538 struct pf_state key;
4539
4540 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4541 NULL, NULL, pd2.af)) {
4542 DPFPRINTF(PF_DEBUG_MISC,
4543 ("pf: ICMP error message too short i"
4544 "(icmp)\n"));
4545 return (PF_DROP);
4546 }
4547
4548 key.af = pd2.af;
4549 key.proto = IPPROTO_ICMP;
4550 if (direction == PF_IN) {
4551 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4552 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4553 key.ext.port = iih.icmp_id;
4554 key.gwy.port = iih.icmp_id;
4555 } else {
4556 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4557 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4558 key.lan.port = iih.icmp_id;
4559 key.ext.port = iih.icmp_id;
4560 }
4561
4562 STATE_LOOKUP();
4563
4564 if (STATE_TRANSLATE(*state)) {
4565 if (direction == PF_IN) {
4566 pf_change_icmp(pd2.src, &iih.icmp_id,
4567 daddr, &(*state)->lan.addr,
4568 (*state)->lan.port, NULL,
4569 pd2.ip_sum, icmpsum,
4570 pd->ip_sum, 0, AF_INET);
4571 } else {
4572 pf_change_icmp(pd2.dst, &iih.icmp_id,
4573 saddr, &(*state)->gwy.addr,
4574 (*state)->gwy.port, NULL,
4575 pd2.ip_sum, icmpsum,
4576 pd->ip_sum, 0, AF_INET);
4577 }
4578 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4579 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4580 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4581 }
4582
4583 return (PF_PASS);
4584 break;
4585 }
4586#endif /* INET */
4587#ifdef INET6
4588 case IPPROTO_ICMPV6: {
4589 struct icmp6_hdr iih;
4590 struct pf_state key;
4591
4592 if (!pf_pull_hdr(m, off2, &iih,
4593 sizeof(struct icmp6_hdr), NULL, NULL, pd2.af)) {
4594 DPFPRINTF(PF_DEBUG_MISC,
4595 ("pf: ICMP error message too short "
4596 "(icmp6)\n"));
4597 return (PF_DROP);
4598 }
4599
4600 key.af = pd2.af;
4601 key.proto = IPPROTO_ICMPV6;
4602 if (direction == PF_IN) {
4603 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4604 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4605 key.ext.port = iih.icmp6_id;
4606 key.gwy.port = iih.icmp6_id;
4607 } else {
4608 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4609 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4610 key.lan.port = iih.icmp6_id;
4611 key.ext.port = iih.icmp6_id;
4612 }
4613
4614 STATE_LOOKUP();
4615
4616 if (STATE_TRANSLATE(*state)) {
4617 if (direction == PF_IN) {
4618 pf_change_icmp(pd2.src, &iih.icmp6_id,
4619 daddr, &(*state)->lan.addr,
4620 (*state)->lan.port, NULL,
4621 pd2.ip_sum, icmpsum,
4622 pd->ip_sum, 0, AF_INET6);
4623 } else {
4624 pf_change_icmp(pd2.dst, &iih.icmp6_id,
4625 saddr, &(*state)->gwy.addr,
4626 (*state)->gwy.port, NULL,
4627 pd2.ip_sum, icmpsum,
4628 pd->ip_sum, 0, AF_INET6);
4629 }
4630 m_copyback(m, off, sizeof(struct icmp6_hdr),
4631 (caddr_t)pd->hdr.icmp6);
4632 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4633 m_copyback(m, off2, sizeof(struct icmp6_hdr),
4634 (caddr_t)&iih);
4635 }
4636
4637 return (PF_PASS);
4638 break;
4639 }
4640#endif /* INET6 */
4641 default: {
4642 struct pf_state key;
4643
4644 key.af = pd2.af;
4645 key.proto = pd2.proto;
4646 if (direction == PF_IN) {
4647 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
4648 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
4649 key.ext.port = 0;
4650 key.gwy.port = 0;
4651 } else {
4652 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
4653 PF_ACPY(&key.ext.addr, pd2.src, key.af);
4654 key.lan.port = 0;
4655 key.ext.port = 0;
4656 }
4657
4658 STATE_LOOKUP();
4659
4660 if (STATE_TRANSLATE(*state)) {
4661 if (direction == PF_IN) {
4662 pf_change_icmp(pd2.src, NULL,
4663 daddr, &(*state)->lan.addr,
4664 0, NULL,
4665 pd2.ip_sum, icmpsum,
4666 pd->ip_sum, 0, pd2.af);
4667 } else {
4668 pf_change_icmp(pd2.dst, NULL,
4669 saddr, &(*state)->gwy.addr,
4670 0, NULL,
4671 pd2.ip_sum, icmpsum,
4672 pd->ip_sum, 0, pd2.af);
4673 }
4674 switch (pd2.af) {
4675#ifdef INET
4676 case AF_INET:
4677 m_copyback(m, off, ICMP_MINLEN,
4678 (caddr_t)pd->hdr.icmp);
4679 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4680 break;
4681#endif /* INET */
4682#ifdef INET6
4683 case AF_INET6:
4684 m_copyback(m, off,
4685 sizeof(struct icmp6_hdr),
4686 (caddr_t)pd->hdr.icmp6);
4687 m_copyback(m, ipoff2, sizeof(h2_6),
4688 (caddr_t)&h2_6);
4689 break;
4690#endif /* INET6 */
4691 }
4692 }
4693
4694 return (PF_PASS);
4695 break;
4696 }
4697 }
4698 }
4699}
4700
4701int
4702pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
4703 struct pf_pdesc *pd)
4704{
4705 struct pf_state_peer *src, *dst;
4706 struct pf_state key;
4707
4708 key.af = pd->af;
4709 key.proto = pd->proto;
4710 if (direction == PF_IN) {
4711 PF_ACPY(&key.ext.addr, pd->src, key.af);
4712 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
4713 key.ext.port = 0;
4714 key.gwy.port = 0;
4715 } else {
4716 PF_ACPY(&key.lan.addr, pd->src, key.af);
4717 PF_ACPY(&key.ext.addr, pd->dst, key.af);
4718 key.lan.port = 0;
4719 key.ext.port = 0;
4720 }
4721
4722 STATE_LOOKUP();
4723
4724 if (direction == (*state)->direction) {
4725 src = &(*state)->src;
4726 dst = &(*state)->dst;
4727 } else {
4728 src = &(*state)->dst;
4729 dst = &(*state)->src;
4730 }
4731
4732 /* update states */
4733 if (src->state < PFOTHERS_SINGLE)
4734 src->state = PFOTHERS_SINGLE;
4735 if (dst->state == PFOTHERS_SINGLE)
4736 dst->state = PFOTHERS_MULTIPLE;
4737
4738 /* update expire time */
4739 (*state)->expire = time_second;
4740 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
4741 (*state)->timeout = PFTM_OTHER_MULTIPLE;
4742 else
4743 (*state)->timeout = PFTM_OTHER_SINGLE;
4744
4745 /* translate source/destination address, if necessary */
4746 if (STATE_TRANSLATE(*state)) {
4747 if (direction == PF_OUT)
4748 switch (pd->af) {
4749#ifdef INET
4750 case AF_INET:
4751 pf_change_a(&pd->src->v4.s_addr,
4752 pd->ip_sum, (*state)->gwy.addr.v4.s_addr,
4753 0);
4754 break;
4755#endif /* INET */
4756#ifdef INET6
4757 case AF_INET6:
4758 PF_ACPY(pd->src, &(*state)->gwy.addr, pd->af);
4759 break;
4760#endif /* INET6 */
4761 }
4762 else
4763 switch (pd->af) {
4764#ifdef INET
4765 case AF_INET:
4766 pf_change_a(&pd->dst->v4.s_addr,
4767 pd->ip_sum, (*state)->lan.addr.v4.s_addr,
4768 0);
4769 break;
4770#endif /* INET */
4771#ifdef INET6
4772 case AF_INET6:
4773 PF_ACPY(pd->dst, &(*state)->lan.addr, pd->af);
4774 break;
4775#endif /* INET6 */
4776 }
4777 }
4778
4779 return (PF_PASS);
4780}
4781
4782/*
4783 * ipoff and off are measured from the start of the mbuf chain.
4784 * h must be at "ipoff" on the mbuf chain.
4785 */
4786void *
4787pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
4788 u_short *actionp, u_short *reasonp, sa_family_t af)
4789{
4790 switch (af) {
4791#ifdef INET
4792 case AF_INET: {
4793 struct ip *h = mtod(m, struct ip *);
4794 u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
4795
4796 if (fragoff) {
4797 if (fragoff >= len)
4798 ACTION_SET(actionp, PF_PASS);
4799 else {
4800 ACTION_SET(actionp, PF_DROP);
4801 REASON_SET(reasonp, PFRES_FRAG);
4802 }
4803 return (NULL);
4804 }
4805 if (m->m_pkthdr.len < off + len ||
4806 h->ip_len < off + len) {
4807 ACTION_SET(actionp, PF_DROP);
4808 REASON_SET(reasonp, PFRES_SHORT);
4809 return (NULL);
4810 }
4811 break;
4812 }
4813#endif /* INET */
4814#ifdef INET6
4815 case AF_INET6: {
4816 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
4817
4818 if (m->m_pkthdr.len < off + len ||
4819 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
4820 (unsigned)(off + len)) {
4821 ACTION_SET(actionp, PF_DROP);
4822 REASON_SET(reasonp, PFRES_SHORT);
4823 return (NULL);
4824 }
4825 break;
4826 }
4827#endif /* INET6 */
4828 }
4829 m_copydata(m, off, len, p);
4830 return (p);
4831}
4832
4833int
4834pf_routable(struct pf_addr *addr, sa_family_t af)
4835{
4836 struct sockaddr_in *dst;
4837 struct route ro;
4838 int ret = 0;
4839
4840 bzero(&ro, sizeof(ro));
4841 dst = satosin(&ro.ro_dst);
4842 dst->sin_family = af;
4843 dst->sin_len = sizeof(*dst);
4844 dst->sin_addr = addr->v4;
4845 rtalloc_ign(&ro, (RTF_CLONING | RTF_PRCLONING));
4846
4847 if (ro.ro_rt != NULL) {
4848 ret = 1;
4849 RTFREE(ro.ro_rt);
4850 }
4851
4852 return (ret);
4853}
4854
4855#ifdef INET
4856void
4857pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
4858 struct pf_state *s)
4859{
4860 struct mbuf *m0, *m1;
4861 struct route iproute;
4862 struct route *ro = NULL;
4863 struct sockaddr_in *dst;
4864 struct ip *ip;
4865 struct ifnet *ifp = NULL;
4866 struct pf_addr naddr;
4867 struct pf_src_node *sn = NULL;
4868 int error = 0;
4869 int sw_csum;
4870
4871 if (m == NULL || *m == NULL || r == NULL ||
4872 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
4873 panic("pf_route: invalid parameters");
4874
4875 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
4876 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
4877 (*m)->m_pkthdr.pf_routed = 1;
4878 } else {
4879 if ((*m)->m_pkthdr.pf_routed > 3) {
4880 m0 = *m;
4881 *m = NULL;
4882 goto bad;
4883 }
4884 (*m)->m_pkthdr.pf_routed++;
4885 }
4886
4887 if (r->rt == PF_DUPTO) {
4888 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL)
4889 return;
4890 } else {
4891 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
4892 return;
4893 m0 = *m;
4894 }
4895
4896 if (m0->m_len < sizeof(struct ip))
4897 panic("pf_route: m0->m_len < sizeof(struct ip)");
4898 ip = mtod(m0, struct ip *);
4899
4900 ro = &iproute;
4901 bzero((caddr_t)ro, sizeof(*ro));
4902 dst = satosin(&ro->ro_dst);
4903 dst->sin_family = AF_INET;
4904 dst->sin_len = sizeof(*dst);
4905 dst->sin_addr = ip->ip_dst;
4906
4907 if (r->rt == PF_FASTROUTE) {
4908 rtalloc(ro);
4909 if (ro->ro_rt == 0) {
4910 ipstat.ips_noroute++;
4911 goto bad;
4912 }
4913
4914 ifp = ro->ro_rt->rt_ifp;
4915 ro->ro_rt->rt_use++;
4916
4917 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
4918 dst = satosin(ro->ro_rt->rt_gateway);
4919 } else {
4920 if (TAILQ_EMPTY(&r->rpool.list))
4921 panic("pf_route: TAILQ_EMPTY(&r->rpool.list)");
4922 if (s == NULL) {
4923 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
4924 &naddr, NULL, &sn);
4925 if (!PF_AZERO(&naddr, AF_INET))
4926 dst->sin_addr.s_addr = naddr.v4.s_addr;
4927 ifp = r->rpool.cur->kif ?
4928 r->rpool.cur->kif->pfik_ifp : NULL;
4929 } else {
4930 if (!PF_AZERO(&s->rt_addr, AF_INET))
4931 dst->sin_addr.s_addr =
4932 s->rt_addr.v4.s_addr;
4933 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
4934 }
4935 }
4936 if (ifp == NULL)
4937 goto bad;
4938
4939 if (oifp != ifp) {
4940 if (pf_test(PF_OUT, ifp, &m0) != PF_PASS)
4941 goto bad;
4942 else if (m0 == NULL)
4943 goto done;
4944 if (m0->m_len < sizeof(struct ip))
4945 panic("pf_route: m0->m_len < sizeof(struct ip)");
4946 ip = mtod(m0, struct ip *);
4947 }
4948
4949 /* Copied from ip_output. */
4950 m0->m_pkthdr.csum_flags |= CSUM_IP;
4951 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist;
4952 if (sw_csum & CSUM_DELAY_DATA) {
4953 in_delayed_cksum(m0);
4954 sw_csum &= ~CSUM_DELAY_DATA;
4955 }
4956 m0->m_pkthdr.csum_flags &= ifp->if_hwassist;
4957
4958 /*
4959 * If small enough for interface, or the interface will take
4960 * care of the fragmentation for us, can just send directly.
4961 */
4962 if (ip->ip_len <= ifp->if_mtu || ((ifp->if_hwassist & CSUM_FRAGMENT) &&
4963 (ip->ip_off & IP_DF) == 0)) {
4964 ip->ip_len = htons(ip->ip_len);
4965 ip->ip_off = htons(ip->ip_off);
4966 ip->ip_sum = 0;
4967 if (sw_csum & CSUM_DELAY_IP) {
4968 /* From KAME */
4969 if (ip->ip_v == IPVERSION &&
4970 (ip->ip_hl << 2) == sizeof(*ip)) {
4971 ip->ip_sum = in_cksum_hdr(ip);
4972 } else {
4973 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
4974 }
4975 }
4976
4977 lwkt_serialize_enter(ifp->if_serializer);
4978 error = (*ifp->if_output)(ifp, m0, sintosa(dst), ro->ro_rt);
4979 lwkt_serialize_exit(ifp->if_serializer);
4980 goto done;
4981 }
4982
4983 /*
4984 * Too large for interface; fragment if possible.
4985 * Must be able to put at least 8 bytes per fragment.
4986 */
4987 if (ip->ip_off & IP_DF) {
4988 ipstat.ips_cantfrag++;
4989 if (r->rt != PF_DUPTO) {
4990 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
4991 ifp->if_mtu);
4992 goto done;
4993 } else
4994 goto bad;
4995 }
4996
4997 m1 = m0;
4998 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum);
4999 if (error)
5000 goto bad;
5001
5002 for (m0 = m1; m0; m0 = m1) {
5003 m1 = m0->m_nextpkt;
5004 m0->m_nextpkt = 0;
5005 if (error == 0) {
5006 lwkt_serialize_enter(ifp->if_serializer);
5007 error = (*ifp->if_output)(ifp, m0, sintosa(dst),
5008 NULL);
5009 lwkt_serialize_exit(ifp->if_serializer);
5010 } else {
5011 m_freem(m0);
5012 }
5013 }
5014
5015 if (error == 0)
5016 ipstat.ips_fragmented++;
5017
5018done:
5019 if (r->rt != PF_DUPTO)
5020 *m = NULL;
5021 if (ro == &iproute && ro->ro_rt)
5022 RTFREE(ro->ro_rt);
5023 return;
5024
5025bad:
5026 m_freem(m0);
5027 goto done;
5028}
5029#endif /* INET */
5030
5031#ifdef INET6
5032void
5033pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5034 struct pf_state *s)
5035{
5036 struct mbuf *m0;
5037 struct route_in6 ip6route;
5038 struct route_in6 *ro;
5039 struct sockaddr_in6 *dst;
5040 struct ip6_hdr *ip6;
5041 struct ifnet *ifp = NULL;
5042 struct pf_addr naddr;
5043 struct pf_src_node *sn = NULL;
5044 int error = 0;
5045
5046 if (m == NULL || *m == NULL || r == NULL ||
5047 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
5048 panic("pf_route6: invalid parameters");
5049
5050 if (((*m)->m_pkthdr.fw_flags & PF_MBUF_ROUTED) == 0) {
5051 (*m)->m_pkthdr.fw_flags |= PF_MBUF_ROUTED;
5052 (*m)->m_pkthdr.pf_routed = 1;
5053 } else {
5054 if ((*m)->m_pkthdr.pf_routed > 3) {
5055 m0 = *m;
5056 *m = NULL;
5057 goto bad;
5058 }
5059 (*m)->m_pkthdr.pf_routed++;
5060 }
5061
5062 if (r->rt == PF_DUPTO) {
5063 if ((m0 = m_dup(*m, MB_DONTWAIT)) == NULL)
5064 return;
5065 } else {
5066 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
5067 return;
5068 m0 = *m;
5069 }
5070
5071 if (m0->m_len < sizeof(struct ip6_hdr))
5072 panic("pf_route6: m0->m_len < sizeof(struct ip6_hdr)");
5073 ip6 = mtod(m0, struct ip6_hdr *);
5074
5075 ro = &ip6route;
5076 bzero((caddr_t)ro, sizeof(*ro));
5077 dst = (struct sockaddr_in6 *)&ro->ro_dst;
5078 dst->sin6_family = AF_INET6;
5079 dst->sin6_len = sizeof(*dst);
5080 dst->sin6_addr = ip6->ip6_dst;
5081
5082 /* Cheat. */
5083 if (r->rt == PF_FASTROUTE) {
5084 m0->m_pkthdr.fw_flags |= PF_MBUF_GENERATED;
5085 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5086 return;
5087 }
5088
5089 if (TAILQ_EMPTY(&r->rpool.list))
5090 panic("pf_route6: TAILQ_EMPTY(&r->rpool.list)");
5091 if (s == NULL) {
5092 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5093 &naddr, NULL, &sn);
5094 if (!PF_AZERO(&naddr, AF_INET6))
5095 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
5096 &naddr, AF_INET6);
5097 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5098 } else {
5099 if (!PF_AZERO(&s->rt_addr, AF_INET6))
5100 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
5101 &s->rt_addr, AF_INET6);
5102 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5103 }
5104 if (ifp == NULL)
5105 goto bad;
5106
5107 if (oifp != ifp) {
5108 if (pf_test6(PF_OUT, ifp, &m0) != PF_PASS)
5109 goto bad;
5110 else if (m0 == NULL)
5111 goto done;
5112 if (m0->m_len < sizeof(struct ip6_hdr))
5113 panic("pf_route6: m0->m_len < sizeof(struct ip6_hdr)");
5114 ip6 = mtod(m0, struct ip6_hdr *);
5115 }
5116
5117 /*
5118 * If the packet is too large for the outgoing interface,
5119 * send back an icmp6 error.
5120 */
5121 if (IN6_IS_ADDR_LINKLOCAL(&dst->sin6_addr))
5122 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5123 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
5124 error = nd6_output(ifp, ifp, m0, dst, NULL);
5125 } else {
5126 in6_ifstat_inc(ifp, ifs6_in_toobig);
5127 if (r->rt != PF_DUPTO)
5128 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5129 else
5130 goto bad;
5131 }
5132
5133done:
5134 if (r->rt != PF_DUPTO)
5135 *m = NULL;
5136 return;
5137
5138bad:
5139 m_freem(m0);
5140 goto done;
5141}
5142#endif /* INET6 */
5143
5144
5145/*
5146 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
5147 * off is the offset where the protocol header starts
5148 * len is the total length of protocol header plus payload
5149 * returns 0 when the checksum is valid, otherwise returns 1.
5150 */
5151/*
5152 * XXX
5153 * FreeBSD supports cksum offload for the following drivers.
5154 * em(4), gx(4), lge(4), nge(4), ti(4), xl(4)
5155 * If we can make full use of it we would outperform ipfw/ipfilter in
5156 * very heavy traffic.
5157 * I have not tested 'cause I don't have NICs that supports cksum offload.
5158 * (There might be problems. Typical phenomena would be
5159 * 1. No route message for UDP packet.
5160 * 2. No connection acceptance from external hosts regardless of rule set.)
5161 */
5162int
5163pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
5164 sa_family_t af)
5165{
5166 u_int16_t sum = 0;
5167 int hw_assist = 0;
5168 struct ip *ip;
5169
5170 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5171 return (1);
5172 if (m->m_pkthdr.len < off + len)
5173 return (1);
5174
5175 switch (p) {
5176 case IPPROTO_TCP:
5177 case IPPROTO_UDP:
5178 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5179 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5180 sum = m->m_pkthdr.csum_data;
5181 } else {
5182 ip = mtod(m, struct ip *);
5183 sum = in_pseudo(ip->ip_src.s_addr,
5184 ip->ip_dst.s_addr, htonl((u_short)len +
5185 m->m_pkthdr.csum_data + p));
5186 }
5187 sum ^= 0xffff;
5188 ++hw_assist;
5189 }
5190 break;
5191 case IPPROTO_ICMP:
5192#ifdef INET6
5193 case IPPROTO_ICMPV6:
5194#endif /* INET6 */
5195 break;
5196 default:
5197 return (1);
5198 }
5199
5200 if (!hw_assist) {
5201 switch (af) {
5202 case AF_INET:
5203 if (p == IPPROTO_ICMP) {
5204 if (m->m_len < off)
5205 return (1);
5206 m->m_data += off;
5207 m->m_len -= off;
5208 sum = in_cksum(m, len);
5209 m->m_data -= off;
5210 m->m_len += off;
5211 } else {
5212 if (m->m_len < sizeof(struct ip))
5213 return (1);
5214 sum = in_cksum_range(m, p, off, len);
5215 if (sum == 0) {
5216 m->m_pkthdr.csum_flags |=
5217 (CSUM_DATA_VALID |
5218 CSUM_PSEUDO_HDR);
5219 m->m_pkthdr.csum_data = 0xffff;
5220 }
5221 }
5222 break;
5223#ifdef INET6
5224 case AF_INET6:
5225 if (m->m_len < sizeof(struct ip6_hdr))
5226 return (1);
5227 sum = in6_cksum(m, p, off, len);
5228 /*
5229 * XXX
5230 * IPv6 H/W cksum off-load not supported yet!
5231 *
5232 * if (sum == 0) {
5233 * m->m_pkthdr.csum_flags |=
5234 * (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
5235 * m->m_pkthdr.csum_data = 0xffff;
5236 *}
5237 */
5238 break;
5239#endif /* INET6 */
5240 default:
5241 return (1);
5242 }
5243 }
5244 if (sum) {
5245 switch (p) {
5246 case IPPROTO_TCP:
5247 tcpstat.tcps_rcvbadsum++;
5248 break;
5249 case IPPROTO_UDP:
5250 udpstat.udps_badsum++;
5251 break;
5252 case IPPROTO_ICMP:
5253 icmpstat.icps_checksum++;
5254 break;
5255#ifdef INET6
5256 case IPPROTO_ICMPV6:
5257 icmp6stat.icp6s_checksum++;
5258 break;
5259#endif /* INET6 */
5260 }
5261 return (1);
5262 }
5263 return (0);
5264}
5265
5266#ifdef INET
5267int
5268pf_test(int dir, struct ifnet *ifp, struct mbuf **m0)
5269{
5270 struct pfi_kif *kif;
5271 u_short action, reason = 0, log = 0;
5272 struct mbuf *m = *m0;
5273 struct ip *h = NULL;
5274 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
5275 struct pf_state *s = NULL;
5276 struct pf_ruleset *ruleset = NULL;
5277 struct pf_pdesc pd;
5278 int off, dirndx, pqid = 0;
5279
5280 if (!pf_status.running || (m->m_pkthdr.fw_flags & PF_MBUF_GENERATED))
5281 return (PF_PASS);
5282
5283 kif = pfi_index2kif[ifp->if_index];
5284 if (kif == NULL)
5285 return (PF_DROP);
5286
5287#ifdef DIAGNOSTIC
5288 if ((m->m_flags & M_PKTHDR) == 0)
5289 panic("non-M_PKTHDR is passed to pf_test");
5290#endif
5291
5292 memset(&pd, 0, sizeof(pd));
5293 if (m->m_pkthdr.len < (int)sizeof(*h)) {
5294 action = PF_DROP;
5295 REASON_SET(&reason, PFRES_SHORT);
5296 log = 1;
5297 goto done;
5298 }
5299
5300 /* We do IP header normalization and packet reassembly here */
5301 if (pf_normalize_ip(m0, dir, kif, &reason) != PF_PASS) {
5302 action = PF_DROP;
5303 goto done;
5304 }
5305 m = *m0;
5306 h = mtod(m, struct ip *);
5307
5308 off = h->ip_hl << 2;
5309 if (off < (int)sizeof(*h)) {
5310 action = PF_DROP;
5311 REASON_SET(&reason, PFRES_SHORT);
5312 log = 1;
5313 goto done;
5314 }
5315
5316 pd.src = (struct pf_addr *)&h->ip_src;
5317 pd.dst = (struct pf_addr *)&h->ip_dst;
5318 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
5319 pd.ip_sum = &h->ip_sum;
5320 pd.proto = h->ip_p;
5321 pd.af = AF_INET;
5322 pd.tos = h->ip_tos;
5323 pd.tot_len = h->ip_len;
5324
5325 /* handle fragments that didn't get reassembled by normalization */
5326 if (h->ip_off & (IP_MF | IP_OFFMASK)) {
5327 action = pf_test_fragment(&r, dir, kif, m, h,
5328 &pd, &a, &ruleset);
5329 goto done;
5330 }
5331
5332 switch (h->ip_p) {
5333
5334 case IPPROTO_TCP: {
5335 struct tcphdr th;
5336
5337 pd.hdr.tcp = &th;
5338 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5339 &action, &reason, AF_INET)) {
5340 log = action != PF_PASS;
5341 goto done;
5342 }
5343 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5344 h->ip_len - off, IPPROTO_TCP, AF_INET)) {
5345 action = PF_DROP;
5346 goto done;
5347 }
5348 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5349 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5350 pqid = 1;
5351 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5352 if (action == PF_DROP)
5353 goto done;
5354 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5355 &reason);
5356 if (action == PF_PASS) {
5357#if NPFSYNC
5358 pfsync_update_state(s);
5359#endif
5360 r = s->rule.ptr;
5361 a = s->anchor.ptr;
5362 log = s->log;
5363 } else if (s == NULL)
5364 action = pf_test_tcp(&r, &s, dir, kif,
5365 m, off, h, &pd, &a, &ruleset);
5366 break;
5367 }
5368
5369 case IPPROTO_UDP: {
5370 struct udphdr uh;
5371
5372 pd.hdr.udp = &uh;
5373 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5374 &action, &reason, AF_INET)) {
5375 log = action != PF_PASS;
5376 goto done;
5377 }
5378 if (dir == PF_IN && uh.uh_sum && pf_check_proto_cksum(m,
5379 off, h->ip_len - off, IPPROTO_UDP, AF_INET)) {
5380 action = PF_DROP;
5381 goto done;
5382 }
5383 if (uh.uh_dport == 0 ||
5384 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5385 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5386 action = PF_DROP;
5387 goto done;
5388 }
5389 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5390 if (action == PF_PASS) {
5391#if NPFSYNC
5392 pfsync_update_state(s);
5393#endif
5394 r = s->rule.ptr;
5395 a = s->anchor.ptr;
5396 log = s->log;
5397 } else if (s == NULL)
5398 action = pf_test_udp(&r, &s, dir, kif,
5399 m, off, h, &pd, &a, &ruleset);
5400 break;
5401 }
5402
5403 case IPPROTO_ICMP: {
5404 struct icmp ih;
5405
5406 pd.hdr.icmp = &ih;
5407 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5408 &action, &reason, AF_INET)) {
5409 log = action != PF_PASS;
5410 goto done;
5411 }
5412 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5413 h->ip_len - off, IPPROTO_ICMP, AF_INET)) {
5414 action = PF_DROP;
5415 goto done;
5416 }
5417 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd);
5418 if (action == PF_PASS) {
5419#if NPFSYNC
5420 pfsync_update_state(s);
5421#endif
5422 r = s->rule.ptr;
5423 a = s->anchor.ptr;
5424 log = s->log;
5425 } else if (s == NULL)
5426 action = pf_test_icmp(&r, &s, dir, kif,
5427 m, off, h, &pd, &a, &ruleset);
5428 break;
5429 }
5430
5431 default:
5432 action = pf_test_state_other(&s, dir, kif, &pd);
5433 if (action == PF_PASS) {
5434#if NPFSYNC
5435 pfsync_update_state(s);
5436#endif
5437 r = s->rule.ptr;
5438 a = s->anchor.ptr;
5439 log = s->log;
5440 } else if (s == NULL)
5441 action = pf_test_other(&r, &s, dir, kif, m, off, h,
5442 &pd, &a, &ruleset);
5443 break;
5444 }
5445
5446done:
5447 if (action == PF_PASS && h->ip_hl > 5 &&
5448 !((s && s->allow_opts) || r->allow_opts)) {
5449 action = PF_DROP;
5450 REASON_SET(&reason, PFRES_SHORT);
5451 log = 1;
5452 DPFPRINTF(PF_DEBUG_MISC,
5453 ("pf: dropping packet with ip options\n"));
5454 }
5455
5456#ifdef ALTQ
5457 if (action == PF_PASS && r->qid) {
5458 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
5459 if (pd.tos == IPTOS_LOWDELAY)
5460 m->m_pkthdr.altq_qid = r->pqid;
5461 else
5462 m->m_pkthdr.altq_qid = r->qid;
5463 m->m_pkthdr.ecn_af = AF_INET;
5464 m->m_pkthdr.header = h;
5465 }
5466#endif
5467
5468 /*
5469 * connections redirected to loopback should not match sockets
5470 * bound specifically to loopback due to security implications,
5471 * see tcp_input() and in_pcblookup_listen().
5472 */
5473 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5474 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5475 (s->nat_rule.ptr->action == PF_RDR ||
5476 s->nat_rule.ptr->action == PF_BINAT) &&
5477 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
5478 action = PF_DROP;
5479 REASON_SET(&reason, PFRES_MEMORY);
5480 }
5481
5482 m->m_pkthdr.fw_flags |= PF_MBUF_TRANSLATE_LOCALHOST;
5483
5484 if (log)
5485 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, a, ruleset);
5486
5487 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5488 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
5489
5490 if (action == PF_PASS || r->action == PF_DROP) {
5491 r->packets++;
5492 r->bytes += pd.tot_len;
5493 if (a != NULL) {
5494 a->packets++;
5495 a->bytes += pd.tot_len;
5496 }
5497 if (s != NULL) {
5498 dirndx = (dir == s->direction) ? 0 : 1;
5499 s->packets[dirndx]++;
5500 s->bytes[dirndx] += pd.tot_len;
5501 if (s->nat_rule.ptr != NULL) {
5502 s->nat_rule.ptr->packets++;
5503 s->nat_rule.ptr->bytes += pd.tot_len;
5504 }
5505 if (s->src_node != NULL) {
5506 s->src_node->packets++;
5507 s->src_node->bytes += pd.tot_len;
5508 }
5509 if (s->nat_src_node != NULL) {
5510 s->nat_src_node->packets++;
5511 s->nat_src_node->bytes += pd.tot_len;
5512 }
5513 }
5514 tr = r;
5515 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5516 if (nr != NULL) {
5517 struct pf_addr *x;
5518 /*
5519 * XXX: we need to make sure that the addresses
5520 * passed to pfr_update_stats() are the same than
5521 * the addresses used during matching (pfr_match)
5522 */
5523 if (r == &pf_default_rule) {
5524 tr = nr;
5525 x = (s == NULL || s->direction == dir) ?
5526 &pd.baddr : &pd.naddr;
5527 } else
5528 x = (s == NULL || s->direction == dir) ?
5529 &pd.naddr : &pd.baddr;
5530 if (x == &pd.baddr || s == NULL) {
5531 /* we need to change the address */
5532 if (dir == PF_OUT)
5533 pd.src = x;
5534 else
5535 pd.dst = x;
5536 }
5537 }
5538 if (tr->src.addr.type == PF_ADDR_TABLE)
5539 pfr_update_stats(tr->src.addr.p.tbl, (s == NULL ||
5540 s->direction == dir) ? pd.src : pd.dst, pd.af,
5541 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5542 tr->src.not);
5543 if (tr->dst.addr.type == PF_ADDR_TABLE)
5544 pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL ||
5545 s->direction == dir) ? pd.dst : pd.src, pd.af,
5546 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5547 tr->dst.not);
5548 }
5549
5550
5551 if (action == PF_SYNPROXY_DROP) {
5552 m_freem(*m0);
5553 *m0 = NULL;
5554 action = PF_PASS;
5555 } else if (r->rt)
5556 /* pf_route can free the mbuf causing *m0 to become NULL */
5557 pf_route(m0, r, dir, ifp, s);
5558
5559 return (action);
5560}
5561#endif /* INET */
5562
5563#ifdef INET6
5564int
5565pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0)
5566{
5567 struct pfi_kif *kif;
5568 u_short action, reason = 0, log = 0;
5569 struct mbuf *m = *m0;
5570 struct ip6_hdr *h = NULL;
5571 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
5572 struct pf_state *s = NULL;
5573 struct pf_ruleset *ruleset = NULL;
5574 struct pf_pdesc pd;
5575 int off, terminal = 0, dirndx;
5576
5577 if (!pf_status.running || (m->m_pkthdr.fw_flags & PF_MBUF_GENERATED))
5578 return (PF_PASS);
5579
5580 kif = pfi_index2kif[ifp->if_index];
5581 if (kif == NULL)
5582 return (PF_DROP);
5583
5584#ifdef DIAGNOSTIC
5585 if ((m->m_flags & M_PKTHDR) == 0)
5586 panic("non-M_PKTHDR is passed to pf_test");
5587#endif
5588
5589 memset(&pd, 0, sizeof(pd));
5590 if (m->m_pkthdr.len < (int)sizeof(*h)) {
5591 action = PF_DROP;
5592 REASON_SET(&reason, PFRES_SHORT);
5593 log = 1;
5594 goto done;
5595 }
5596
5597 /* We do IP header normalization and packet reassembly here */
5598 if (pf_normalize_ip6(m0, dir, kif, &reason) != PF_PASS) {
5599 action = PF_DROP;
5600 goto done;
5601 }
5602 m = *m0;
5603 h = mtod(m, struct ip6_hdr *);
5604
5605 pd.src = (struct pf_addr *)&h->ip6_src;
5606 pd.dst = (struct pf_addr *)&h->ip6_dst;
5607 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
5608 pd.ip_sum = NULL;
5609 pd.af = AF_INET6;
5610 pd.tos = 0;
5611 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
5612
5613 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
5614 pd.proto = h->ip6_nxt;
5615 do {
5616 switch (pd.proto) {
5617 case IPPROTO_FRAGMENT:
5618 action = pf_test_fragment(&r, dir, kif, m, h,
5619 &pd, &a, &ruleset);
5620 if (action == PF_DROP)
5621 REASON_SET(&reason, PFRES_FRAG);
5622 goto done;
5623 case IPPROTO_AH:
5624 case IPPROTO_HOPOPTS:
5625 case IPPROTO_ROUTING:
5626 case IPPROTO_DSTOPTS: {
5627 /* get next header and header length */
5628 struct ip6_ext opt6;
5629
5630 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
5631 NULL, NULL, pd.af)) {
5632 DPFPRINTF(PF_DEBUG_MISC,
5633 ("pf: IPv6 short opt\n"));
5634 action = PF_DROP;
5635 REASON_SET(&reason, PFRES_SHORT);
5636 log = 1;
5637 goto done;
5638 }
5639 if (pd.proto == IPPROTO_AH)
5640 off += (opt6.ip6e_len + 2) * 4;
5641 else
5642 off += (opt6.ip6e_len + 1) * 8;
5643 pd.proto = opt6.ip6e_nxt;
5644 /* goto the next header */
5645 break;
5646 }
5647 default:
5648 terminal++;
5649 break;
5650 }
5651 } while (!terminal);
5652
5653 switch (pd.proto) {
5654
5655 case IPPROTO_TCP: {
5656 struct tcphdr th;
5657
5658 pd.hdr.tcp = &th;
5659 if (!pf_pull_hdr(m, off, &th, sizeof(th),
5660 &action, &reason, AF_INET6)) {
5661 log = action != PF_PASS;
5662 goto done;
5663 }
5664 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5665 ntohs(h->ip6_plen), IPPROTO_TCP, AF_INET6)) {
5666 action = PF_DROP;
5667 goto done;
5668 }
5669 pd.p_len = pd.tot_len - off - (th.th_off << 2);
5670 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5671 if (action == PF_DROP)
5672 goto done;
5673 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5674 &reason);
5675 if (action == PF_PASS) {
5676#if NPFSYNC
5677 pfsync_update_state(s);
5678#endif
5679 r = s->rule.ptr;
5680 a = s->anchor.ptr;
5681 log = s->log;
5682 } else if (s == NULL)
5683 action = pf_test_tcp(&r, &s, dir, kif,
5684 m, off, h, &pd, &a, &ruleset);
5685 break;
5686 }
5687
5688 case IPPROTO_UDP: {
5689 struct udphdr uh;
5690
5691 pd.hdr.udp = &uh;
5692 if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5693 &action, &reason, AF_INET6)) {
5694 log = action != PF_PASS;
5695 goto done;
5696 }
5697 if (dir == PF_IN && uh.uh_sum && pf_check_proto_cksum(m,
5698 off, ntohs(h->ip6_plen), IPPROTO_UDP, AF_INET6)) {
5699 action = PF_DROP;
5700 goto done;
5701 }
5702 if (uh.uh_dport == 0 ||
5703 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5704 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5705 action = PF_DROP;
5706 goto done;
5707 }
5708 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5709 if (action == PF_PASS) {
5710#if NPFSYNC
5711 pfsync_update_state(s);
5712#endif
5713 r = s->rule.ptr;
5714 a = s->anchor.ptr;
5715 log = s->log;
5716 } else if (s == NULL)
5717 action = pf_test_udp(&r, &s, dir, kif,
5718 m, off, h, &pd, &a, &ruleset);
5719 break;
5720 }
5721
5722 case IPPROTO_ICMPV6: {
5723 struct icmp6_hdr ih;
5724
5725 pd.hdr.icmp6 = &ih;
5726 if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
5727 &action, &reason, AF_INET6)) {
5728 log = action != PF_PASS;
5729 goto done;
5730 }
5731 if (dir == PF_IN && pf_check_proto_cksum(m, off,
5732 ntohs(h->ip6_plen), IPPROTO_ICMPV6, AF_INET6)) {
5733 action = PF_DROP;
5734 goto done;
5735 }
5736 action = pf_test_state_icmp(&s, dir, kif,
5737 m, off, h, &pd);
5738 if (action == PF_PASS) {
5739#if NPFSYNC
5740 pfsync_update_state(s);
5741#endif
5742 r = s->rule.ptr;
5743 a = s->anchor.ptr;
5744 log = s->log;
5745 } else if (s == NULL)
5746 action = pf_test_icmp(&r, &s, dir, kif,
5747 m, off, h, &pd, &a, &ruleset);
5748 break;
5749 }
5750
5751 default:
5752 action = pf_test_state_other(&s, dir, kif, &pd);
5753 if (action == PF_PASS) {
5754 r = s->rule.ptr;
5755 a = s->anchor.ptr;
5756 log = s->log;
5757 } else if (s == NULL)
5758 action = pf_test_other(&r, &s, dir, kif, m, off, h,
5759 &pd, &a, &ruleset);
5760 break;
5761 }
5762
5763done:
5764 /* XXX handle IPv6 options, if not allowed. not implemented. */
5765
5766#ifdef ALTQ
5767 if (action == PF_PASS && r->qid) {
5768 m->m_pkthdr.fw_flags |= ALTQ_MBUF_TAGGED;
5769 if (pd.tos == IPTOS_LOWDELAY)
5770 m->m_pkthdr.altq_qid = r->pqid;
5771 else
5772 m->m_pkthdr.altq_qid = r->qid;
5773 m->m_pkthdr.ecn_af = AF_INET6;
5774 m->m_pkthdr.header = h;
5775 }
5776#endif
5777
5778 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5779 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5780 (s->nat_rule.ptr->action == PF_RDR ||
5781 s->nat_rule.ptr->action == PF_BINAT) &&
5782 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) {
5783 action = PF_DROP;
5784 REASON_SET(&reason, PFRES_MEMORY);
5785 }
5786
5787 m->m_pkthdr.fw_flags |= PF_MBUF_TRANSLATE_LOCALHOST;
5788
5789 if (log)
5790 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, r, a, ruleset);
5791
5792 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5793 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
5794
5795 if (action == PF_PASS || r->action == PF_DROP) {
5796 r->packets++;
5797 r->bytes += pd.tot_len;
5798 if (a != NULL) {
5799 a->packets++;
5800 a->bytes += pd.tot_len;
5801 }
5802 if (s != NULL) {
5803 dirndx = (dir == s->direction) ? 0 : 1;
5804 s->packets[dirndx]++;
5805 s->bytes[dirndx] += pd.tot_len;
5806 if (s->nat_rule.ptr != NULL) {
5807 s->nat_rule.ptr->packets++;
5808 s->nat_rule.ptr->bytes += pd.tot_len;
5809 }
5810 if (s->src_node != NULL) {
5811 s->src_node->packets++;
5812 s->src_node->bytes += pd.tot_len;
5813 }
5814 if (s->nat_src_node != NULL) {
5815 s->nat_src_node->packets++;
5816 s->nat_src_node->bytes += pd.tot_len;
5817 }
5818 }
5819 tr = r;
5820 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5821 if (nr != NULL) {
5822 struct pf_addr *x;
5823 /*
5824 * XXX: we need to make sure that the addresses
5825 * passed to pfr_update_stats() are the same than
5826 * the addresses used during matching (pfr_match)
5827 */
5828 if (r == &pf_default_rule) {
5829 tr = nr;
5830 x = (s == NULL || s->direction == dir) ?
5831 &pd.baddr : &pd.naddr;
5832 } else {
5833 x = (s == NULL || s->direction == dir) ?
5834 &pd.naddr : &pd.baddr;
5835 }
5836 if (x == &pd.baddr || s == NULL) {
5837 if (dir == PF_OUT)
5838 pd.src = x;
5839 else
5840 pd.dst = x;
5841 }
5842 }
5843 if (tr->src.addr.type == PF_ADDR_TABLE)
5844 pfr_update_stats(tr->src.addr.p.tbl, (s == NULL ||
5845 s->direction == dir) ? pd.src : pd.dst, pd.af,
5846 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5847 tr->src.not);
5848 if (tr->dst.addr.type == PF_ADDR_TABLE)
5849 pfr_update_stats(tr->dst.addr.p.tbl, (s == NULL ||
5850 s->direction == dir) ? pd.dst : pd.src, pd.af,
5851 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
5852 tr->dst.not);
5853 }
5854
5855
5856 if (action == PF_SYNPROXY_DROP) {
5857 m_freem(*m0);
5858 *m0 = NULL;
5859 action = PF_PASS;
5860 } else if (r->rt)
5861 /* pf_route6 can free the mbuf causing *m0 to become NULL */
5862 pf_route6(m0, r, dir, ifp, s);
5863
5864 return (action);
5865}
5866#endif /* INET6 */