From: Jan Lentfer Date: Thu, 6 Jan 2011 10:03:55 +0000 (+0100) Subject: pf: convert to use kmalloc instead of zalloc X-Git-Tag: v3.0.0~762 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/1186cbc0243ce65d2462a9af6fb08f7599064d76 pf: convert to use kmalloc instead of zalloc --- diff --git a/sys/net/pf/if_pflog.c b/sys/net/pf/if_pflog.c index 9a87fbf26d..c83b91d605 100644 --- a/sys/net/pf/if_pflog.c +++ b/sys/net/pf/if_pflog.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include diff --git a/sys/net/pf/if_pfsync.c b/sys/net/pf/if_pfsync.c index 3f260bd1b1..255a534d8e 100644 --- a/sys/net/pf/if_pfsync.c +++ b/sys/net/pf/if_pfsync.c @@ -43,7 +43,6 @@ #include #include #include -#include #include @@ -224,7 +223,8 @@ pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, struct pf_state_peer *d) { if (s->scrub.scrub_flag && d->scrub == NULL) { - d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); + d->scrub = kmalloc(sizeof(struct pf_state_scrub), M_PFSYNC, M_NOWAIT|M_ZERO); + if (d->scrub == NULL) return (ENOMEM); } @@ -334,11 +334,11 @@ pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) goto cleanup; if (flags & PFSYNC_SI_IOCTL) - pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; + pool_flags = M_WAITOK | M_NULLOK | M_ZERO; else - pool_flags = PR_LIMITFAIL | PR_ZERO; + pool_flags = M_WAITOK | M_ZERO; - if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) + if ((st = kmalloc(sizeof(struct pf_state), M_PFSYNC, pool_flags)) == NULL) goto cleanup; if ((skw = pf_alloc_state_key(pool_flags)) == NULL) @@ -423,17 +423,17 @@ pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) if (skw == sks) sks = NULL; if (skw != NULL) - pool_put(&pf_state_key_pl, skw); + kfree(skw, M_PFSYNC); if (sks != NULL) - pool_put(&pf_state_key_pl, sks); + kfree(sks, M_PFSYNC); cleanup_state: /* pf_state_insert frees the state keys */ if (st) { if (st->dst.scrub) - pool_put(&pf_state_scrub_pl, st->dst.scrub); + kfree(st->dst.scrub, M_PFSYNC); if (st->src.scrub) - pool_put(&pf_state_scrub_pl, st->src.scrub); - pool_put(&pf_state_pl, st); + kfree(st->src.scrub, M_PFSYNC); + kfree(st, M_PFSYNC); } return (error); } diff --git a/sys/net/pf/pf.c b/sys/net/pf/pf.c index 1d68b958f0..a2cb967f4d 100644 --- a/sys/net/pf/pf.c +++ b/sys/net/pf/pf.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include @@ -141,9 +140,9 @@ struct pf_anchor_stackframe { struct pf_anchor *child; } pf_anchor_stack[64]; -vm_zone_t pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl; -vm_zone_t pf_state_pl, pf_state_key_pl, pf_state_item_pl; -vm_zone_t pf_altq_pl; +struct malloc_type *pf_src_tree_pl, *pf_rule_pl, *pf_pooladdr_pl; +struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl; +struct malloc_type *pf_altq_pl; void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); @@ -315,6 +314,11 @@ struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = { s->rule.ptr->states_cur--; \ } while (0) +static MALLOC_DEFINE(M_PFSTATEPL, "pfstatepl", "pf state pool list"); +static MALLOC_DEFINE(M_PFSRCTREEPL, "pfsrctpl", "pf source tree pool list"); +static MALLOC_DEFINE(M_PFSTATEKEYPL, "pfstatekeypl", "pf state key pool list"); +static MALLOC_DEFINE(M_PFSTATEITEMPL, "pfstateitempl", "pf state item pool list"); + static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); static __inline int pf_state_compare_key(struct pf_state_key *, struct pf_state_key *); @@ -553,7 +557,7 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, if (*sn == NULL) { if (!rule->max_src_nodes || rule->src_nodes < rule->max_src_nodes) - (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO); + (*sn) = kmalloc(sizeof(struct pf_src_node), M_PFSRCTREEPL, M_NOWAIT|M_ZERO); else pf_status.lcounters[LCNT_SRCNODES]++; if ((*sn) == NULL) @@ -577,7 +581,7 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, pf_print_host(&(*sn)->addr, 0, af); kprintf("\n"); } - pool_put(&pf_src_tree_pl, *sn); + kfree(*sn, M_PFSRCTREEPL); return (-1); } (*sn)->creation = time_second; @@ -705,15 +709,16 @@ pf_state_key_attach(struct pf_state_key *sk, struct pf_state *s, int idx) (idx == PF_SK_STACK) ? sk : NULL); kprintf("\n"); } - pool_put(&pf_state_key_pl, sk); + kfree(sk, M_PFSTATEKEYPL); return (-1); /* collision! */ } - pool_put(&pf_state_key_pl, sk); + kfree(sk, M_PFSTATEKEYPL); + s->key[idx] = cur; } else s->key[idx] = sk; - if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) { + if ((si = kmalloc(sizeof(struct pf_state_item), M_PFSTATEITEMPL, M_NOWAIT)) == NULL) { pf_state_key_detach(s, idx); return (-1); } @@ -744,14 +749,13 @@ void pf_state_key_detach(struct pf_state *s, int idx) { struct pf_state_item *si; - si = TAILQ_FIRST(&s->key[idx]->states); while (si && si->s != s) si = TAILQ_NEXT(si, entry); if (si) { TAILQ_REMOVE(&s->key[idx]->states, si, entry); - pool_put(&pf_state_item_pl, si); + kfree(si, M_PFSTATEITEMPL); } if (TAILQ_EMPTY(&s->key[idx]->states)) { @@ -760,7 +764,7 @@ pf_state_key_detach(struct pf_state *s, int idx) s->key[idx]->reverse->reverse = NULL; if (s->key[idx]->inp) s->key[idx]->inp->inp_pf_sk = NULL; - pool_put(&pf_state_key_pl, s->key[idx]); + kfree(s->key[idx], M_PFSTATEKEYPL); } s->key[idx] = NULL; } @@ -770,8 +774,8 @@ pf_alloc_state_key(int pool_flags) { struct pf_state_key *sk; - if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL) - return (NULL); + if ((sk = kmalloc(sizeof(struct pf_state_key), M_PFSTATEKEYPL, pool_flags)) == NULL) + return (NULL); TAILQ_INIT(&sk->states); return (sk); @@ -786,7 +790,7 @@ pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, { KKASSERT((*skp == NULL && *nkp == NULL)); - if ((*skp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) + if ((*skp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) return (ENOMEM); PF_ACPY(&(*skp)->addr[pd->sidx], saddr, pd->af); @@ -797,7 +801,7 @@ pf_state_key_setup(struct pf_pdesc *pd, struct pf_rule *nr, (*skp)->af = pd->af; if (nr != NULL) { - if ((*nkp = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) + if ((*nkp = pf_alloc_state_key(M_NOWAIT | M_ZERO)) == NULL) return (ENOMEM); /* caller must handle cleanup */ /* XXX maybe just bcopy and TAILQ_INIT(&(*nkp)->states) */ @@ -833,7 +837,7 @@ pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; } else { if (pf_state_key_attach(skw, s, PF_SK_WIRE)) { - pool_put(&pf_state_key_pl, sks); + kfree(sks, M_PFSTATEKEYPL); return (-1); } if (pf_state_key_attach(sks, s, PF_SK_STACK)) { @@ -1054,7 +1058,7 @@ pf_purge_expired_src_nodes(int waslocked) RB_REMOVE(pf_src_tree, &tree_src_tracking, cur); pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; pf_status.src_nodes--; - pool_put(&pf_src_tree_pl, cur); + kfree(cur, M_PFSRCTREEPL); } } @@ -1149,7 +1153,7 @@ pf_free_state(struct pf_state *cur) TAILQ_REMOVE(&state_list, cur, entry_list); if (cur->tag) pf_tag_unref(cur->tag); - pool_put(&pf_state_pl, cur); + kfree(cur, M_PFSTATEPL); pf_status.fcounters[FCNT_STATE_REMOVALS]++; pf_status.states--; } @@ -3579,9 +3583,9 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, cleanup: if (sk != NULL) - pool_put(&pf_state_key_pl, sk); + kfree(sk, M_PFSTATEKEYPL); if (nk != NULL) - pool_put(&pf_state_key_pl, nk); + kfree(nk, M_PFSTATEKEYPL); return (PF_DROP); } @@ -3618,7 +3622,7 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, REASON_SET(&reason, PFRES_SRCLIMIT); goto csfailed; } - s = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO); + s = kmalloc(sizeof(struct pf_state), M_PFSTATEPL, M_NOWAIT|M_ZERO); if (s == NULL) { REASON_SET(&reason, PFRES_MEMORY); goto csfailed; @@ -3708,7 +3712,7 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, REASON_SET(&reason, PFRES_MEMORY); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); - pool_put(&pf_state_pl, s); + kfree(s, M_PFSTATEPL); return (PF_DROP); } if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && @@ -3720,7 +3724,7 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, pf_normalize_tcp_cleanup(s); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); - pool_put(&pf_state_pl, s); + kfree(s, M_PFSTATEPL); return (PF_DROP); } } @@ -3736,7 +3740,7 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, REASON_SET(&reason, PFRES_STATEINS); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); - pool_put(&pf_state_pl, s); + kfree(s, M_PFSTATEPL); return (PF_DROP); } else *sm = s; @@ -3783,21 +3787,21 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, csfailed: if (sk != NULL) - pool_put(&pf_state_key_pl, sk); + kfree(sk, M_PFSTATEKEYPL); if (nk != NULL) - pool_put(&pf_state_key_pl, nk); + kfree(nk, M_PFSTATEKEYPL); if (sn != NULL && sn->states == 0 && sn->expire == 0) { RB_REMOVE(pf_src_tree, &tree_src_tracking, sn); pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; pf_status.src_nodes--; - pool_put(&pf_src_tree_pl, sn); + kfree(sn, M_PFSRCTREEPL); } if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) { RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn); pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; pf_status.src_nodes--; - pool_put(&pf_src_tree_pl, nsn); + kfree(nsn, M_PFSRCTREEPL); } return (PF_DROP); } diff --git a/sys/net/pf/pf_if.c b/sys/net/pf/pf_if.c index 2b93d38229..955a1b50ed 100644 --- a/sys/net/pf/pf_if.c +++ b/sys/net/pf/pf_if.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include @@ -65,7 +64,6 @@ #endif /* INET6 */ struct pfi_kif *pfi_all = NULL; -vm_zone_t pfi_addr_pl; struct pfi_ifhead pfi_ifs; long pfi_update = 1; struct pfr_addr *pfi_buffer; @@ -97,6 +95,8 @@ RB_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); #define PFI_BUFFER_MAX 0x10000 MALLOC_DEFINE(PFI_MTYPE, "pf_if", "pf interface table"); +static MALLOC_DEFINE(M_PFIADDRPL, "pfiaddrpl", "pf interface address pool list"); + void pfi_initialize(void) @@ -417,7 +417,7 @@ pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af) if (aw->type != PF_ADDR_DYNIFTL) return (0); - if ((dyn = pool_get(&pfi_addr_pl, PR_WAITOK | PR_LIMITFAIL | PR_ZERO)) + if ((dyn = kmalloc(sizeof(struct pfi_dynaddr), M_PFIADDRPL, M_WAITOK|M_NULLOK|M_ZERO)) == NULL) return (1); @@ -474,7 +474,7 @@ _bad: pf_remove_if_empty_ruleset(ruleset); if (dyn->pfid_kif != NULL) pfi_kif_unref(dyn->pfid_kif, PFI_KIF_REF_RULE); - pool_put(&pfi_addr_pl, dyn); + kfree(dyn, M_PFIADDRPL); crit_exit(); return (rv); } @@ -664,7 +664,7 @@ pfi_dynaddr_remove(struct pf_addr_wrap *aw) aw->p.dyn->pfid_kif = NULL; pfr_detach_table(aw->p.dyn->pfid_kt); aw->p.dyn->pfid_kt = NULL; - pool_put(&pfi_addr_pl, aw->p.dyn); + kfree(aw->p.dyn, M_PFIADDRPL); aw->p.dyn = NULL; crit_exit(); } diff --git a/sys/net/pf/pf_ioctl.c b/sys/net/pf/pf_ioctl.c index fc99414c76..7232408424 100644 --- a/sys/net/pf/pf_ioctl.c +++ b/sys/net/pf/pf_ioctl.c @@ -56,7 +56,6 @@ #include #include #include -#include #include #include @@ -97,9 +96,7 @@ u_int rt_numfibs = RT_NUMFIBS; -void init_zone_var(void); -void cleanup_pf_zone(void); -int pfattach(void); +void pfattach(void); struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t, u_int8_t, u_int8_t, u_int8_t); @@ -146,6 +143,12 @@ void pf_rtlabel_copyout(struct pf_addr_wrap *); static cdev_t pf_dev; +static MALLOC_DEFINE(M_PFRULEPL, "pfrulepl", "pf rule pool list"); +static MALLOC_DEFINE(M_PFALTQPL, "pfaltqpl", "pf altq pool list"); +static MALLOC_DEFINE(M_PFPOOLADDRPL, "pfpooladdrpl", "pf pool address pool list"); +static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list"); + + /* * XXX - These are new and need to be checked when moveing to a new version */ @@ -196,85 +199,30 @@ SYSCTL_INT(_debug, OID_AUTO, pfugidhack, CTLFLAG_RW, &debug_pfugidhack, 0, "Enable/disable pf user/group rules mpsafe hack"); void -init_zone_var(void) -{ - pf_src_tree_pl = pf_rule_pl = NULL; - pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; - pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; - pf_state_scrub_pl = NULL; - pfr_ktable_pl = pfr_kentry_pl = NULL; -} - -void -cleanup_pf_zone(void) -{ - ZONE_DESTROY(pf_src_tree_pl); - ZONE_DESTROY(pf_rule_pl); - ZONE_DESTROY(pf_state_pl); - ZONE_DESTROY(pf_altq_pl); - ZONE_DESTROY(pf_pooladdr_pl); - ZONE_DESTROY(pf_frent_pl); - ZONE_DESTROY(pf_frag_pl); - ZONE_DESTROY(pf_cache_pl); - ZONE_DESTROY(pf_cent_pl); - ZONE_DESTROY(pfr_ktable_pl); - ZONE_DESTROY(pfr_kentry_pl); - ZONE_DESTROY(pfr_kentry_pl2); - ZONE_DESTROY(pf_state_scrub_pl); - ZONE_DESTROY(pfi_addr_pl); -} - -int pfattach(void) { u_int32_t *my_timeout = pf_default_rule.timeout; - int error = 1; + if (!rn_inithead((void **)&pf_maskhead, NULL, 0)) { kprintf("pf mask radix tree create failed\n"); return ENOMEM; } - - do { - ZONE_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); - ZONE_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); - ZONE_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); - ZONE_CREATE(pf_state_key_pl, struct pf_state_key, "pfstatekeypl"); - ZONE_CREATE(pf_state_item_pl, struct pf_state_item, "pfstateitempl"); - ZONE_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); - ZONE_CREATE(pf_pooladdr_pl,struct pf_pooladdr, "pfpooladdrpl"); - ZONE_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); - ZONE_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); - ZONE_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2"); - ZONE_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); - ZONE_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); - ZONE_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); - ZONE_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); - ZONE_CREATE(pf_state_scrub_pl, struct pf_state_scrub, - "pfstatescrub"); - ZONE_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); - error = 0; - } while(0); - if (error) { - cleanup_pf_zone(); - return (error); - } + kmalloc_create(&pf_state_pl, "pf state pool list"); + kmalloc_raise_limit(pf_state_pl, 0); + kmalloc_create(&pf_frent_pl, "pf fragment pool list"); + kmalloc_raise_limit(pf_frent_pl, 0); + kmalloc_create(&pf_cent_pl, "pf cent pool list"); + kmalloc_raise_limit(pf_cent_pl, 0); + pfr_initialize(); pfi_initialize(); - error = pf_osfp_initialize(); - if (error) { - cleanup_pf_zone(); - pf_osfp_cleanup(); - return (error); - } + pf_osfp_initialize(); pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; - /* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, - pf_pool_limits[PF_LIMIT_STATES].limit); - */ if (ctob(physmem) <= 100*1024*1024) pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT_SMALL; @@ -320,14 +268,11 @@ pfattach(void) pf_normalize_init(); bzero(&pf_status, sizeof(pf_status)); pf_status.debug = PF_DEBUG_URGENT; - /* XXX do our best to avoid a conflict */ pf_status.hostid = karc4random(); if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) panic("pfpurge thread"); - - return (error); } int @@ -421,7 +366,7 @@ pf_empty_pool(struct pf_palist *poola) pf_tbladdr_remove(&empty_pool_pa->addr); pfi_kif_unref(empty_pool_pa->kif, PFI_KIF_REF_RULE); TAILQ_REMOVE(poola, empty_pool_pa, entries); - pool_put(&pf_pooladdr_pl, empty_pool_pa); + kfree(empty_pool_pa, M_PFPOOLADDRPL); } } @@ -468,7 +413,7 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); pf_anchor_remove(rule); pf_empty_pool(&rule->rpool.list); - pool_put(&pf_rule_pl, rule); + kfree(rule, M_PFRULEPL); } u_int16_t @@ -627,7 +572,7 @@ pf_begin_altq(u_int32_t *ticket) error = altq_remove(altq); } else pf_qid_unref(altq->qid); - pool_put(&pf_altq_pl, altq); + kfree(altq, M_PFALTQPL); } if (error) return (error); @@ -652,7 +597,7 @@ pf_rollback_altq(u_int32_t ticket) error = altq_remove(altq); } else pf_qid_unref(altq->qid); - pool_put(&pf_altq_pl, altq); + kfree(altq, M_PFALTQPL); } altqs_inactive_open = 0; return (error); @@ -702,7 +647,7 @@ pf_commit_altq(u_int32_t ticket) error = err; } else pf_qid_unref(altq->qid); - pool_put(&pf_altq_pl, altq); + kfree(altq, M_PFALTQPL); } crit_exit(); @@ -1191,7 +1136,7 @@ pfioctl(struct dev_ioctl_args *ap) error = EBUSY; break; } - rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL); + rule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL,M_WAITOK); if (rule == NULL) { error = ENOMEM; break; @@ -1208,14 +1153,14 @@ pfioctl(struct dev_ioctl_args *ap) rule->entries.tqe_prev = NULL; #ifndef INET if (rule->af == AF_INET) { - pool_put(&pf_rule_pl, rule); + kfree(rule, M_PFRULEPL); error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (rule->af == AF_INET6) { - pool_put(&pf_rule_pl, rule); + kfree(rule, M_PFRULEPL); error = EAFNOSUPPORT; break; } @@ -1229,7 +1174,7 @@ pfioctl(struct dev_ioctl_args *ap) if (rule->ifname[0]) { rule->kif = pfi_kif_get(rule->ifname); if (rule->kif == NULL) { - pool_put(&pf_rule_pl, rule); + kfree(rule, M_PFRULEPL); error = EINVAL; break; } @@ -1433,7 +1378,7 @@ pfioctl(struct dev_ioctl_args *ap) } if (pcr->action != PF_CHANGE_REMOVE) { - newrule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL); + newrule = kmalloc(sizeof(struct pf_rule), M_PFRULEPL, M_WAITOK|M_NULLOK); if (newrule == NULL) { error = ENOMEM; break; @@ -1447,14 +1392,14 @@ pfioctl(struct dev_ioctl_args *ap) newrule->entries.tqe_prev = NULL; #ifndef INET if (newrule->af == AF_INET) { - pool_put(&pf_rule_pl, newrule); + kfree(newrule, M_PFRULEPL); error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (newrule->af == AF_INET6) { - pool_put(&pf_rule_pl, newrule); + kfree(newrule, M_PFRULEPL); error = EAFNOSUPPORT; break; } @@ -1462,7 +1407,7 @@ pfioctl(struct dev_ioctl_args *ap) if (newrule->ifname[0]) { newrule->kif = pfi_kif_get(newrule->ifname); if (newrule->kif == NULL) { - pool_put(&pf_rule_pl, newrule); + kfree(newrule, M_PFRULEPL); error = EINVAL; break; } @@ -1968,7 +1913,7 @@ pfioctl(struct dev_ioctl_args *ap) error = EBUSY; break; } - altq = pool_get(&pf_altq_pl, PR_WAITOK|PR_LIMITFAIL); + altq = kmalloc(sizeof(struct pf_altq), M_PFALTQPL, M_WAITOK|M_NULLOK); if (altq == NULL) { error = ENOMEM; break; @@ -1982,7 +1927,7 @@ pfioctl(struct dev_ioctl_args *ap) if (altq->qname[0] != 0) { if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { error = EBUSY; - pool_put(&pf_altq_pl, altq); + kfree(altq, M_PFALTQPL); break; } altq->altq_disc = NULL; @@ -1997,7 +1942,7 @@ pfioctl(struct dev_ioctl_args *ap) error = altq_add(altq); if (error) { - pool_put(&pf_altq_pl, altq); + kfree(altq, M_PFALTQPL); break; } @@ -2108,7 +2053,7 @@ pfioctl(struct dev_ioctl_args *ap) error = EINVAL; break; } - pa = pool_get(&pf_pooladdr_pl, PR_WAITOK|PR_LIMITFAIL); + pa = kmalloc(sizeof(struct pf_altq), M_PFPOOLADDRPL, M_WAITOK|M_NULLOK); if (pa == NULL) { error = ENOMEM; break; @@ -2117,7 +2062,7 @@ pfioctl(struct dev_ioctl_args *ap) if (pa->ifname[0]) { pa->kif = pfi_kif_get(pa->ifname); if (pa->kif == NULL) { - pool_put(&pf_pooladdr_pl, pa); + kfree(ap, M_PFPOOLADDRPL); error = EINVAL; break; } @@ -2126,7 +2071,7 @@ pfioctl(struct dev_ioctl_args *ap) if (pfi_dynaddr_setup(&pa->addr, pp->af)) { pfi_dynaddr_remove(&pa->addr); pfi_kif_unref(pa->kif, PFI_KIF_REF_RULE); - pool_put(&pf_pooladdr_pl, pa); + kfree(pa, M_PFPOOLADDRPL); error = EINVAL; break; } @@ -2202,8 +2147,8 @@ pfioctl(struct dev_ioctl_args *ap) break; } if (pca->action != PF_CHANGE_REMOVE) { - newpa = pool_get(&pf_pooladdr_pl, - PR_WAITOK|PR_LIMITFAIL); + newpa = kmalloc(sizeof(struct pf_pooladdr), + M_PFPOOLADDRPL, M_WAITOK|M_NULLOK); if (newpa == NULL) { error = ENOMEM; break; @@ -2211,14 +2156,14 @@ pfioctl(struct dev_ioctl_args *ap) bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); #ifndef INET if (pca->af == AF_INET) { - pool_put(&pf_pooladdr_pl, newpa); + kfree(newpa, M_PFPOOLADDRPL); error = EAFNOSUPPORT; break; } #endif /* INET */ #ifndef INET6 if (pca->af == AF_INET6) { - pool_put(&pf_pooladdr_pl, newpa); + kfree(newpa, M_PFPOOLADDRPL); error = EAFNOSUPPORT; break; } @@ -2226,7 +2171,7 @@ pfioctl(struct dev_ioctl_args *ap) if (newpa->ifname[0]) { newpa->kif = pfi_kif_get(newpa->ifname); if (newpa->kif == NULL) { - pool_put(&pf_pooladdr_pl, newpa); + kfree(newpa, M_PFPOOLADDRPL); error = EINVAL; break; } @@ -2237,7 +2182,7 @@ pfioctl(struct dev_ioctl_args *ap) pf_tbladdr_setup(ruleset, &newpa->addr)) { pfi_dynaddr_remove(&newpa->addr); pfi_kif_unref(newpa->kif, PFI_KIF_REF_RULE); - pool_put(&pf_pooladdr_pl, newpa); + kfree(newpa, M_PFPOOLADDRPL); error = EINVAL; break; } @@ -2266,7 +2211,7 @@ pfioctl(struct dev_ioctl_args *ap) pfi_dynaddr_remove(&oldpa->addr); pf_tbladdr_remove(&oldpa->addr); pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); - pool_put(&pf_pooladdr_pl, oldpa); + kfree(oldpa, M_PFPOOLADDRPL); } else { if (oldpa == NULL) TAILQ_INSERT_TAIL(&pool->list, newpa, entries); @@ -2963,15 +2908,19 @@ fail: static void pf_clear_states(void) { - struct pf_state *state; + struct pf_state *s, *nexts; + u_int killed = 0; + + for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { + nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); - RB_FOREACH(state, pf_state_tree_id, &tree_id) { - state->timeout = PFTM_PURGE; /* don't send out individual delete messages */ - state->sync_flags = PFSTATE_NOSYNC; - pf_unlink_state(state); + s->sync_flags = PFSTATE_NOSYNC; + pf_unlink_state(s); + killed++; + } - pf_status.states = 0; + #if 0 /* PFSYNC */ /* * XXX This is called on module unload, we do not want to sync that over? */ @@ -3011,6 +2960,7 @@ pf_clear_srcnodes(void) pf_purge_expired_src_nodes(0); pf_status.src_nodes = 0; } + /* * XXX - Check for version missmatch!!! */ @@ -3027,6 +2977,13 @@ shutdown_pf(void) pf_status.running = 0; + error = dehook_pf(); + if (error) { + pf_status.running = 1; + DPFPRINTF(PF_DEBUG_MISC, + ("pf: pfil unregistration failed\n")); + return(error); + } do { if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn)) != 0) { DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); @@ -3068,13 +3025,11 @@ shutdown_pf(void) pf_commit_altq(t[0]); #endif pf_clear_states(); - pf_clear_srcnodes(); /* status does not use malloced mem so no need to cleanup */ /* fingerprints and interfaces have their own cleanup code */ } while(0); - return (error); } @@ -3253,16 +3208,9 @@ pf_load(void) lwkt_gettoken(&pf_token); - init_zone_var(); lockinit(&pf_mod_lck, "pf task lck", 0, LK_CANRECURSE); pf_dev = make_dev(&pf_ops, 0, 0, 0, 0600, PF_NAME); - error = pfattach(); - if (error) { - dev_ops_remove_all(&pf_ops); - lockuninit(&pf_mod_lck); - lwkt_reltoken(&pf_token); - return (error); - } + pfattach(); lockinit(&pf_consistency_lock, "pfconslck", 0, LK_CANRECURSE); lwkt_reltoken(&pf_token); return (0); @@ -3306,8 +3254,6 @@ pf_unload(void) } pfi_cleanup(); pf_osfp_flush(); - pf_osfp_cleanup(); - cleanup_pf_zone(); dev_ops_remove_all(&pf_ops); lockuninit(&pf_consistency_lock); lockuninit(&pf_mod_lck); @@ -3319,6 +3265,9 @@ pf_unload(void) Free(pf_maskhead); pf_maskhead = NULL; } + kmalloc_destroy(&pf_state_pl); + kmalloc_destroy(&pf_frent_pl); + kmalloc_destroy(&pf_cent_pl); return 0; } diff --git a/sys/net/pf/pf_norm.c b/sys/net/pf/pf_norm.c index ba44b3c91f..f55df1a5b5 100644 --- a/sys/net/pf/pf_norm.c +++ b/sys/net/pf/pf_norm.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include @@ -97,9 +96,15 @@ int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *, } \ } while(0) +static MALLOC_DEFINE(M_PFFRAGPL, "pffrag", "pf fragment pool list"); +static MALLOC_DEFINE(M_PFCACHEPL, "pffrcache", "pf fragment cache pool list"); +static MALLOC_DEFINE(M_PFFRENTPL, "pffrent", "pf frent pool list"); +static MALLOC_DEFINE(M_PFCENTPL, "pffrcent", "pf fragment cent pool list"); +static MALLOC_DEFINE(M_PFSTATESCRUBPL, "pfstatescrub", "pf state scrub pool list"); + /* Globals */ -vm_zone_t pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl; -vm_zone_t pf_state_scrub_pl; +struct malloc_type *pf_frent_pl, *pf_frag_pl, *pf_cache_pl, *pf_cent_pl; +struct malloc_type *pf_state_scrub_pl; int pf_nfrents, pf_ncache; void @@ -215,7 +220,7 @@ pf_free_fragment(struct pf_fragment *frag) LIST_REMOVE(frent, fr_next); m_freem(frent->fr_m); - pool_put(&pf_frent_pl, frent); + kfree(frent, M_PFFRENTPL); pf_nfrents--; } } else { @@ -229,7 +234,7 @@ pf_free_fragment(struct pf_fragment *frag) ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >" " frcache->fr_end): %s", __func__)); - pool_put(&pf_cent_pl, frcache); + kfree(frcache, M_PFCENTPL); pf_ncache--; } } @@ -278,11 +283,11 @@ pf_remove_fragment(struct pf_fragment *frag) if (BUFFER_FRAGMENTS(frag)) { RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag); TAILQ_REMOVE(&pf_fragqueue, frag, frag_next); - pool_put(&pf_frag_pl, frag); + kfree(frag, M_PFFRAGPL); } else { RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag); TAILQ_REMOVE(&pf_cachequeue, frag, frag_next); - pool_put(&pf_cache_pl, frag); + kfree(frag, M_PFCACHEPL); } } @@ -309,10 +314,10 @@ pf_reassemble(struct mbuf **m0, struct pf_fragment **frag, /* Create a new reassembly queue for this packet */ if (*frag == NULL) { - *frag = pool_get(&pf_frag_pl, PR_NOWAIT); + *frag = kmalloc(sizeof(struct pf_fragment), M_PFFRAGPL, M_NOWAIT); if (*frag == NULL) { pf_flush_fragments(); - *frag = pool_get(&pf_frag_pl, PR_NOWAIT); + *frag = kmalloc(sizeof(struct pf_fragment), M_PFFRAGPL, M_NOWAIT); if (*frag == NULL) goto drop_fragment; } @@ -388,7 +393,7 @@ pf_reassemble(struct mbuf **m0, struct pf_fragment **frag, next = LIST_NEXT(frea, fr_next); m_freem(frea->fr_m); LIST_REMOVE(frea, fr_next); - pool_put(&pf_frent_pl, frea); + kfree(frea, M_PFFRENTPL); pf_nfrents--; } @@ -445,13 +450,13 @@ pf_reassemble(struct mbuf **m0, struct pf_fragment **frag, m2 = m->m_next; m->m_next = NULL; m_cat(m, m2); - pool_put(&pf_frent_pl, frent); + kfree(frent, M_PFFRENTPL); pf_nfrents--; for (frent = next; frent != NULL; frent = next) { next = LIST_NEXT(frent, fr_next); m2 = frent->fr_m; - pool_put(&pf_frent_pl, frent); + kfree(frent, M_PFFRENTPL); pf_nfrents--; m_cat(m, m2); } @@ -482,7 +487,7 @@ pf_reassemble(struct mbuf **m0, struct pf_fragment **frag, drop_fragment: /* Oops - fail safe - drop packet */ - pool_put(&pf_frent_pl, frent); + kfree(frent, M_PFFRENTPL); pf_nfrents--; m_freem(m); return (NULL); @@ -504,18 +509,18 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, /* Create a new range queue for this packet */ if (*frag == NULL) { - *frag = pool_get(&pf_cache_pl, PR_NOWAIT); + *frag = kmalloc(sizeof(struct pf_fragment), M_PFCACHEPL, M_NOWAIT); if (*frag == NULL) { pf_flush_fragments(); - *frag = pool_get(&pf_cache_pl, PR_NOWAIT); + *frag = kmalloc(sizeof(struct pf_fragment), M_PFCACHEPL, M_NOWAIT); if (*frag == NULL) goto no_mem; } /* Get an entry for the queue */ - cur = pool_get(&pf_cent_pl, PR_NOWAIT); + cur = kmalloc(sizeof(struct pf_frcache), M_PFCENTPL, M_NOWAIT); if (cur == NULL) { - pool_put(&pf_cache_pl, *frag); + kfree(*frag, M_PFCACHEPL); *frag = NULL; goto no_mem; } @@ -636,7 +641,7 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, h->ip_id, -precut, frp->fr_off, frp->fr_end, off, max)); - cur = pool_get(&pf_cent_pl, PR_NOWAIT); + cur = kmalloc(sizeof(struct pf_frcache), M_PFCENTPL, M_NOWAIT); if (cur == NULL) goto no_mem; pf_ncache++; @@ -691,7 +696,7 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, h->ip_id, -aftercut, off, max, fra->fr_off, fra->fr_end)); - cur = pool_get(&pf_cent_pl, PR_NOWAIT); + cur = kmalloc(sizeof(struct pf_frcache), M_PFCENTPL, M_NOWAIT); if (cur == NULL) goto no_mem; pf_ncache++; @@ -712,7 +717,7 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, max, fra->fr_off, fra->fr_end)); fra->fr_off = cur->fr_off; LIST_REMOVE(cur, fr_next); - pool_put(&pf_cent_pl, cur); + kfree(cur, M_PFCENTPL); pf_ncache--; cur = NULL; @@ -726,7 +731,7 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, max, fra->fr_off, fra->fr_end)); fra->fr_off = frp->fr_off; LIST_REMOVE(frp, fr_next); - pool_put(&pf_cent_pl, frp); + kfree(frp, M_PFCENTPL); pf_ncache--; frp = NULL; @@ -898,7 +903,7 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason, goto bad; /* Get an entry for the fragment queue */ - frent = pool_get(&pf_frent_pl, PR_NOWAIT); + frent = kmalloc(sizeof(struct pf_frent), M_PFFRENTPL, M_NOWAIT); if (frent == NULL) { REASON_SET(reason, PFRES_MEMORY); return (PF_DROP); @@ -1349,7 +1354,7 @@ pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, KASSERT((src->scrub == NULL), ("pf_normalize_tcp_init: src->scrub != NULL")); - src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT); + src->scrub = kmalloc(sizeof(struct pf_state_scrub), M_PFSTATESCRUBPL, M_NOWAIT); if (src->scrub == NULL) return (1); bzero(src->scrub, sizeof(*src->scrub)); @@ -1425,9 +1430,9 @@ void pf_normalize_tcp_cleanup(struct pf_state *state) { if (state->src.scrub) - pool_put(&pf_state_scrub_pl, state->src.scrub); + kfree(state->src.scrub, M_PFSTATESCRUBPL); if (state->dst.scrub) - pool_put(&pf_state_scrub_pl, state->dst.scrub); + kfree(state->dst.scrub, M_PFSTATESCRUBPL); /* Someday... flush the TCP segment reassembly descriptors. */ } diff --git a/sys/net/pf/pf_osfp.c b/sys/net/pf/pf_osfp.c index b8044f5b98..13963bcefe 100644 --- a/sys/net/pf/pf_osfp.c +++ b/sys/net/pf/pf_osfp.c @@ -20,7 +20,7 @@ #include #include #ifdef _KERNEL -# include +#include #endif /* _KERNEL */ #include @@ -42,7 +42,6 @@ # define DPFPRINTF(format, x...) \ if (pf_status.debug >= PF_DEBUG_NOISY) \ kprintf(format , ##x) -typedef vm_zone_t pool_t; #else /* Userland equivalents so we can lend code to tcpdump et al. */ @@ -53,10 +52,6 @@ typedef vm_zone_t pool_t; # include # include # include -# define pool_t int -# define pool_get(pool, flags) malloc(*(pool)) -# define pool_put(pool, item) free(item) -# define pool_init(pool, size, a, ao, f, m, p) (*(pool)) = (size) # ifdef PFDEBUG # include @@ -66,10 +61,10 @@ typedef vm_zone_t pool_t; # endif /* PFDEBUG */ #endif /* _KERNEL */ +static MALLOC_DEFINE(M_PFOSFPENTRYPL, "pfospfen", "pf OS finger printing pool list"); +static MALLOC_DEFINE(M_PFOSFPPL, "pfosfp", "pf OS finger printing pool list"); SLIST_HEAD(pf_osfp_list, pf_os_fingerprint) pf_osfp_list; -pool_t pf_osfp_entry_pl; -pool_t pf_osfp_pl; struct pf_os_fingerprint *pf_osfp_find(struct pf_osfp_list *, struct pf_os_fingerprint *, u_int8_t); @@ -288,39 +283,12 @@ pf_osfp_match(struct pf_osfp_enlist *list, pf_osfp_t os) } /* Initialize the OS fingerprint system */ -int +void pf_osfp_initialize(void) { - int error = 0; - -#ifdef _KERNEL - do { - error = ENOMEM; - pf_osfp_entry_pl = pf_osfp_pl = NULL; - ZONE_CREATE(pf_osfp_entry_pl, struct pf_osfp_entry, "pfospfen"); - ZONE_CREATE(pf_osfp_pl, struct pf_os_fingerprint, "pfosfp"); - error = 0; - } while(0); -#else - pool_init(&pf_osfp_entry_pl, sizeof(struct pf_osfp_entry), 0, 0, 0, - "pfosfpen", NULL); - pool_init(&pf_osfp_pl, sizeof(struct pf_os_fingerprint), 0, 0, 0, - "pfosfp", NULL); -#endif SLIST_INIT(&pf_osfp_list); - - return (error); } -#ifdef _KERNEL -void -pf_osfp_cleanup(void) -{ - ZONE_DESTROY(pf_osfp_entry_pl); - ZONE_DESTROY(pf_osfp_pl); -} -#endif - /* Flush the fingerprint list */ void pf_osfp_flush(void) @@ -332,9 +300,9 @@ pf_osfp_flush(void) SLIST_REMOVE_HEAD(&pf_osfp_list, fp_next); while ((entry = SLIST_FIRST(&fp->fp_oses))) { SLIST_REMOVE_HEAD(&fp->fp_oses, fp_entry); - pool_put(&pf_osfp_entry_pl, entry); + kfree(entry, M_PFOSFPENTRYPL); } - pool_put(&pf_osfp_pl, fp); + kfree(fp, M_PFOSFPPL); } } @@ -387,12 +355,12 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) if (PF_OSFP_ENTRY_EQ(entry, &fpioc->fp_os)) return (EEXIST); } - if ((entry = pool_get(&pf_osfp_entry_pl, - PR_WAITOK|PR_LIMITFAIL)) == NULL) + if ((entry = kmalloc(sizeof(struct pf_osfp_entry), + M_PFOSFPENTRYPL, M_WAITOK|M_NULLOK)) == NULL) return (ENOMEM); } else { - if ((fp = pool_get(&pf_osfp_pl, - PR_WAITOK|PR_LIMITFAIL)) == NULL) + if ((fp = kmalloc(sizeof(struct pf_os_fingerprint), + M_PFOSFPPL, M_WAITOK|M_NULLOK)) == NULL) return (ENOMEM); memset(fp, 0, sizeof(*fp)); fp->fp_tcpopts = fpioc->fp_tcpopts; @@ -404,9 +372,9 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) fp->fp_wscale = fpioc->fp_wscale; fp->fp_ttl = fpioc->fp_ttl; SLIST_INIT(&fp->fp_oses); - if ((entry = pool_get(&pf_osfp_entry_pl, - PR_WAITOK|PR_LIMITFAIL)) == NULL) { - pool_put(&pf_osfp_pl, fp); + if ((entry = kmalloc(sizeof(struct pf_osfp_entry), + M_PFOSFPENTRYPL, M_WAITOK|M_NULLOK)) == NULL) { + kfree(fp, M_PFOSFPPL); return (ENOMEM); } pf_osfp_insert(&pf_osfp_list, fp); diff --git a/sys/net/pf/pf_ruleset.c b/sys/net/pf/pf_ruleset.c index 33562d6d6d..49b9675c1c 100644 --- a/sys/net/pf/pf_ruleset.c +++ b/sys/net/pf/pf_ruleset.c @@ -59,9 +59,12 @@ # define DPFPRINTF(format, x...) \ if (pf_status.debug >= PF_DEBUG_NOISY) \ kprintf(format , ##x) -#define rs_malloc(x) kmalloc(x, M_TEMP, M_WAITOK) -#define rs_free(x) kfree(x, M_TEMP) +#define rs_malloc(x) kmalloc(x, M_PFRS, M_WAITOK) +#define rs_free(x) kfree(x, M_PFRS) #define printf kprintf + +static MALLOC_DEFINE(M_PFRS, "pfrulesetpl", "pf ruleset pool list"); + #else /* Userland equivalents so we can lend code to pfctl et al. */ @@ -81,7 +84,6 @@ # endif /* PFDEBUG */ #endif /* _KERNEL */ - struct pf_anchor_global pf_anchors; struct pf_anchor pf_main_anchor; diff --git a/sys/net/pf/pf_subr.c b/sys/net/pf/pf_subr.c index 3135245a12..1491a94d9d 100644 --- a/sys/net/pf/pf_subr.c +++ b/sys/net/pf/pf_subr.c @@ -45,7 +45,6 @@ #include #include #include -#include #include @@ -176,16 +175,3 @@ dohooks(struct hook_desc_head *head, int flags) #define ISN_BYTES_PER_SECOND 1048576 #define ISN_STATIC_INCREMENT 4096 #define ISN_RANDOM_INCREMENT (4096 - 1) - -/* wrapper functions for pool_* */ -void * -pool_get(vm_zone_t *pp, int flags) -{ - void *retval; - retval = zalloc(*(pp)); - - if (flags & PR_ZERO) - bzero(retval, (*pp)->zsize); - - return retval; -} diff --git a/sys/net/pf/pf_table.c b/sys/net/pf/pf_table.c index 82eb6791be..b32776183a 100644 --- a/sys/net/pf/pf_table.c +++ b/sys/net/pf/pf_table.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include @@ -101,6 +100,11 @@ #define ENQUEUE_UNMARKED_ONLY (1) #define INVERT_NEG_FLAG (1) +static MALLOC_DEFINE(M_PFRKTABLEPL, "pfrktable", "pf radix table pool list"); +static MALLOC_DEFINE(M_PFRKENTRYPL, "pfrkentry", "pf radix entry pool list"); +static MALLOC_DEFINE(M_PFRKENTRYPL2, "pfrkentry2", "pf radix entry 2 pool list"); +static MALLOC_DEFINE(M_PFRKCOUNTERSPL, "pfrkcounters", "pf radix counters"); + struct pfr_walktree { enum pfrw_op { PFRW_MARK, @@ -129,11 +133,9 @@ struct pfr_walktree { #define pfrw_cnt pfrw_free #define senderr(e) do { rv = (e); goto _bad; } while (0) - -vm_zone_t pfr_ktable_pl; -vm_zone_t pfr_kentry_pl; -vm_zone_t pfr_kentry_pl2; -vm_zone_t pfr_kcounters_pl; +struct malloc_type *pfr_ktable_pl; +struct malloc_type *pfr_kentry_pl; +struct malloc_type *pfr_kentry_pl2; struct sockaddr_in pfr_sin; struct sockaddr_in6 pfr_sin6; union sockaddr_union pfr_mask; @@ -807,9 +809,9 @@ pfr_create_kentry(struct pfr_addr *ad, int intr) struct pfr_kentry *ke; if (intr) - ke = pool_get(&pfr_kentry_pl2, PR_NOWAIT | PR_ZERO); + ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL2, M_NOWAIT|M_ZERO); else - ke = pool_get(&pfr_kentry_pl, PR_NOWAIT|PR_ZERO|PR_LIMITFAIL); + ke = kmalloc(sizeof(struct pfr_kentry), M_PFRKENTRYPL, M_NOWAIT|M_ZERO|M_NULLOK); if (ke == NULL) return (NULL); @@ -839,11 +841,11 @@ void pfr_destroy_kentry(struct pfr_kentry *ke) { if (ke->pfrke_counters) - pool_put(&pfr_kcounters_pl, ke->pfrke_counters); + kfree(ke->pfrke_counters, M_PFRKCOUNTERSPL); if (ke->pfrke_intrpool) - pool_put(&pfr_kentry_pl2, ke); + kfree(ke, M_PFRKENTRYPL2); else - pool_put(&pfr_kentry_pl, ke); + kfree(ke, M_PFRKENTRYPL); } void @@ -924,7 +926,7 @@ pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero, int negchange) if (negchange) p->pfrke_not = !p->pfrke_not; if (p->pfrke_counters) { - pool_put(&pfr_kcounters_pl, p->pfrke_counters); + kfree(p->pfrke_counters, M_PFRKCOUNTERSPL); p->pfrke_counters = NULL; } crit_exit(); @@ -1894,7 +1896,7 @@ pfr_create_ktable(struct pfr_table *tbl, long tzero, int attachruleset) struct pfr_ktable *kt; struct pf_ruleset *rs; - kt = pool_get(&pfr_ktable_pl, PR_NOWAIT| PR_ZERO | PR_LIMITFAIL); + kt = kmalloc(sizeof(struct pfr_ktable), M_PFRKTABLEPL, M_NOWAIT|M_ZERO|M_NULLOK); if (kt == NULL) return (NULL); kt->pfrkt_t = *tbl; @@ -1954,7 +1956,7 @@ pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) kt->pfrkt_rs->tables--; pf_remove_if_empty_ruleset(kt->pfrkt_rs); } - pool_put(&pfr_ktable_pl, kt); + kfree(kt, M_PFRKTABLEPL); } int @@ -2057,8 +2059,8 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, if (ke != NULL && op_pass != PFR_OP_XPASS && (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) { if (ke->pfrke_counters == NULL) - ke->pfrke_counters = pool_get(&pfr_kcounters_pl, - PR_NOWAIT | PR_ZERO); + ke->pfrke_counters = kmalloc(sizeof(struct pfr_kcounters), + M_PFRKCOUNTERSPL, M_NOWAIT|M_ZERO); if (ke->pfrke_counters != NULL) { ke->pfrke_counters->pfrkc_packets[dir_out][op_pass]++; ke->pfrke_counters->pfrkc_bytes[dir_out][op_pass] += len; diff --git a/sys/net/pf/pfvar.h b/sys/net/pf/pfvar.h index fa1c51a215..1204e983b4 100644 --- a/sys/net/pf/pfvar.h +++ b/sys/net/pf/pfvar.h @@ -48,9 +48,6 @@ #include #include -#ifdef _KERNEL -#include -#endif /* * XXX @@ -227,15 +224,6 @@ struct pfi_dynaddr { * Address manipulation macros */ -/* XXX correct values for zinit? */ -#define ZONE_CREATE(var, type, desc) \ - var = zinit(desc, sizeof(type), 1, ZONE_DESTROYABLE, 1); \ - if (var == NULL) break -#define ZONE_DESTROY(a) zdestroy(a) - -/* #define pool_get(p, f) zalloc(*(p)) */ -#define pool_put(p, o) zfree(*(p), (o)) - #define NTOHS(x) (x) = ntohs((__uint16_t)(x)) #define HTONS(x) (x) = htons((__uint16_t)(x)) @@ -263,7 +251,6 @@ TAILQ_HEAD(hook_desc_head, hook_desc); void *hook_establish(struct hook_desc_head *, int, void (*)(void *), void *); void hook_disestablish(struct hook_desc_head *, void *); void dohooks(struct hook_desc_head *, int); -void *pool_get (vm_zone_t *, int); #define HOOK_REMOVE 0x01 #define HOOK_FREE 0x02 @@ -419,7 +406,6 @@ void *pool_get (vm_zone_t *, int); (neg) \ ) - struct pf_rule_uid { uid_t uid[2]; u_int8_t op; @@ -1769,14 +1755,14 @@ extern int pf_tbladdr_setup(struct pf_ruleset *, extern void pf_tbladdr_remove(struct pf_addr_wrap *); extern void pf_tbladdr_copyout(struct pf_addr_wrap *); extern void pf_calc_skip_steps(struct pf_rulequeue *); -extern vm_zone_t pf_src_tree_pl, pf_rule_pl; -extern vm_zone_t pf_state_pl, pf_state_key_pl, pf_state_item_pl, - pf_altq_pl, pf_pooladdr_pl; -extern vm_zone_t pfr_ktable_pl, pfr_kentry_pl; -extern vm_zone_t pfr_kentry_pl2; -extern vm_zone_t pf_cache_pl, pf_cent_pl; -extern vm_zone_t pf_state_scrub_pl; -extern vm_zone_t pfi_addr_pl; +extern struct malloc_type *pf_src_tree_pl, *pf_rule_pl; +extern struct malloc_type *pf_state_pl, *pf_state_key_pl, *pf_state_item_pl, + *pf_altq_pl, *pf_pooladdr_pl; +extern struct malloc_type *pfr_ktable_pl, *pfr_kentry_pl; +extern struct malloc_type *pfr_kentry_pl2; +extern struct malloc_type *pf_cache_pl, *pf_cent_pl; +extern struct malloc_type *pf_state_scrub_pl; +extern struct malloc_type *pfi_addr_pl; extern void pf_purge_thread(void *); extern int pf_purge_expired_src_nodes(int); extern int pf_purge_expired_states(u_int32_t, int); @@ -1928,7 +1914,7 @@ void pf_qid2qname(u_int32_t, char *); void pf_qid_unref(u_int32_t); extern struct pf_status pf_status; -extern vm_zone_t pf_frent_pl, pf_frag_pl; +extern struct malloc_type *pf_frent_pl, *pf_frag_pl; extern struct lock pf_consistency_lock; struct pf_pool_limit { @@ -2000,8 +1986,7 @@ struct pf_osfp_enlist * const struct tcphdr *); void pf_osfp_flush(void); int pf_osfp_get(struct pf_osfp_ioctl *); -int pf_osfp_initialize(void); -void pf_osfp_cleanup(void); +void pf_osfp_initialize(void); int pf_osfp_match(struct pf_osfp_enlist *, pf_osfp_t); struct pf_os_fingerprint * pf_osfp_validate(void);