kernel: Add three missing ')', two of them in (yet) unused code.
[dragonfly.git] / sys / netgraph7 / netflow / netflow.c
CommitLineData
b06ebda0
MD
1/*-
2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
5a975a3d 28 * $FreeBSD: src/sys/netgraph/netflow/netflow.c,v 1.29 2008/05/09 23:02:57 julian Exp $
b06ebda0
MD
29 */
30
b06ebda0
MD
31#include <sys/param.h>
32#include <sys/kernel.h>
33#include <sys/limits.h>
34#include <sys/mbuf.h>
35#include <sys/syslog.h>
36#include <sys/systm.h>
37#include <sys/socket.h>
38
39#include <machine/atomic.h>
40
41#include <net/if.h>
42#include <net/route.h>
43#include <netinet/in.h>
44#include <netinet/in_systm.h>
45#include <netinet/ip.h>
46#include <netinet/tcp.h>
47#include <netinet/udp.h>
48
5a975a3d
MD
49#include "ng_message.h"
50#include "netgraph.h"
b06ebda0 51
5a975a3d
MD
52#include "netflow/netflow.h"
53#include "netflow/ng_netflow.h"
b06ebda0
MD
54
55#define NBUCKETS (65536) /* must be power of 2 */
56
57/* This hash is for TCP or UDP packets. */
58#define FULL_HASH(addr1, addr2, port1, port2) \
59 (((addr1 ^ (addr1 >> 16) ^ \
60 htons(addr2 ^ (addr2 >> 16))) ^ \
61 port1 ^ htons(port2)) & \
62 (NBUCKETS - 1))
63
64/* This hash is for all other IP packets. */
65#define ADDR_HASH(addr1, addr2) \
66 ((addr1 ^ (addr1 >> 16) ^ \
67 htons(addr2 ^ (addr2 >> 16))) & \
68 (NBUCKETS - 1))
69
70/* Macros to shorten logical constructions */
71/* XXX: priv must exist in namespace */
72#define INACTIVE(fle) (time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
73#define AGED(fle) (time_uptime - fle->f.first > priv->info.nfinfo_act_t)
74#define ISFREE(fle) (fle->f.packets == 0)
75
76/*
77 * 4 is a magical number: statistically number of 4-packet flows is
78 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
79 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
80 * of reachable host and 4-packet otherwise.
81 */
82#define SMALL(fle) (fle->f.packets <= 4)
83
84/*
85 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
86 * every 48+ days. But we will do same to keep compatibility. This macro
87 * does overflowable multiplication to 1000.
88 */
89#define MILLIUPTIME(t) (((t) << 9) + /* 512 */ \
90 ((t) << 8) + /* 256 */ \
91 ((t) << 7) + /* 128 */ \
92 ((t) << 6) + /* 64 */ \
93 ((t) << 5) + /* 32 */ \
94 ((t) << 3)) /* 8 */
95
96MALLOC_DECLARE(M_NETFLOW_HASH);
97MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
98
99static int export_add(item_p, struct flow_entry *);
100static int export_send(priv_p, item_p, int flags);
101
102/* Generate hash for a given flow record. */
103static __inline uint32_t
104ip_hash(struct flow_rec *r)
105{
106 switch (r->r_ip_p) {
107 case IPPROTO_TCP:
108 case IPPROTO_UDP:
109 return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
110 r->r_sport, r->r_dport);
111 default:
112 return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
113 }
114}
115
116/* This is callback from uma(9), called on alloc. */
117static int
118uma_ctor_flow(void *mem, int size, void *arg, int how)
119{
120 priv_p priv = (priv_p )arg;
121
122 if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
123 return (ENOMEM);
124
125 atomic_add_32(&priv->info.nfinfo_used, 1);
126
127 return (0);
128}
129
130/* This is callback from uma(9), called on free. */
131static void
132uma_dtor_flow(void *mem, int size, void *arg)
133{
134 priv_p priv = (priv_p )arg;
135
136 atomic_subtract_32(&priv->info.nfinfo_used, 1);
137}
138
139/*
140 * Detach export datagram from priv, if there is any.
141 * If there is no, allocate a new one.
142 */
143static item_p
144get_export_dgram(priv_p priv)
145{
146 item_p item = NULL;
147
148 mtx_lock(&priv->export_mtx);
149 if (priv->export_item != NULL) {
150 item = priv->export_item;
151 priv->export_item = NULL;
152 }
153 mtx_unlock(&priv->export_mtx);
154
155 if (item == NULL) {
156 struct netflow_v5_export_dgram *dgram;
157 struct mbuf *m;
158
5a975a3d 159 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
b06ebda0
MD
160 if (m == NULL)
161 return (NULL);
162 item = ng_package_data(m, NG_NOFLAGS);
163 if (item == NULL)
164 return (NULL);
165 dgram = mtod(m, struct netflow_v5_export_dgram *);
166 dgram->header.count = 0;
167 dgram->header.version = htons(NETFLOW_V5);
168
169 }
170
171 return (item);
172}
173
174/*
175 * Re-attach incomplete datagram back to priv.
176 * If there is already another one, then send incomplete. */
177static void
178return_export_dgram(priv_p priv, item_p item, int flags)
179{
180 /*
181 * It may happen on SMP, that some thread has already
182 * put its item there, in this case we bail out and
183 * send what we have to collector.
184 */
185 mtx_lock(&priv->export_mtx);
186 if (priv->export_item == NULL) {
187 priv->export_item = item;
188 mtx_unlock(&priv->export_mtx);
189 } else {
190 mtx_unlock(&priv->export_mtx);
191 export_send(priv, item, flags);
192 }
193}
194
195/*
196 * The flow is over. Call export_add() and free it. If datagram is
197 * full, then call export_send().
198 */
199static __inline void
200expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
201{
202 if (*item == NULL)
203 *item = get_export_dgram(priv);
204 if (*item == NULL) {
205 atomic_add_32(&priv->info.nfinfo_export_failed, 1);
206 uma_zfree_arg(priv->zone, fle, priv);
207 return;
208 }
209 if (export_add(*item, fle) > 0) {
210 export_send(priv, *item, flags);
211 *item = NULL;
212 }
213 uma_zfree_arg(priv->zone, fle, priv);
214}
215
216/* Get a snapshot of node statistics */
217void
218ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
219{
220 /* XXX: atomic */
221 memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
222}
223
224/*
225 * Insert a record into defined slot.
226 *
227 * First we get for us a free flow entry, then fill in all
228 * possible fields in it.
229 *
230 * TODO: consider dropping hash mutex while filling in datagram,
231 * as this was done in previous version. Need to test & profile
232 * to be sure.
233 */
234static __inline int
235hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
236 int plen, uint8_t tcp_flags)
237{
238 struct flow_entry *fle;
239 struct sockaddr_in sin;
240 struct rtentry *rt;
241
a6c72860 242 KKASSERT(mtx_owned(&hsh->mtx));
b06ebda0 243
5a975a3d 244 fle = uma_zalloc_arg(priv->zone, priv, M_WAITOK | M_NULLOK);
b06ebda0
MD
245 if (fle == NULL) {
246 atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
247 return (ENOMEM);
248 }
249
250 /*
251 * Now fle is totally ours. It is detached from all lists,
252 * we can safely edit it.
253 */
254
255 bcopy(r, &fle->f.r, sizeof(struct flow_rec));
256 fle->f.bytes = plen;
257 fle->f.packets = 1;
258 fle->f.tcp_flags = tcp_flags;
259
260 fle->f.first = fle->f.last = time_uptime;
261
262 /*
263 * First we do route table lookup on destination address. So we can
264 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
265 */
266 bzero(&sin, sizeof(sin));
267 sin.sin_len = sizeof(struct sockaddr_in);
268 sin.sin_family = AF_INET;
269 sin.sin_addr = fle->f.r.r_dst;
270 /* XXX MRT 0 as a default.. need the m here to get fib */
271 rt = rtalloc1_fib((struct sockaddr *)&sin, 0, RTF_CLONING, 0);
272 if (rt != NULL) {
273 fle->f.fle_o_ifx = rt->rt_ifp->if_index;
274
275 if (rt->rt_flags & RTF_GATEWAY &&
276 rt->rt_gateway->sa_family == AF_INET)
277 fle->f.next_hop =
278 ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
279
280 if (rt_mask(rt))
281 fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
282 rt_mask(rt))->sin_addr.s_addr);
283 else if (rt->rt_flags & RTF_HOST)
284 /* Give up. We can't determine mask :( */
285 fle->f.dst_mask = 32;
286
287 RTFREE_LOCKED(rt);
288 }
289
290 /* Do route lookup on source address, to fill in src_mask. */
291 bzero(&sin, sizeof(sin));
292 sin.sin_len = sizeof(struct sockaddr_in);
293 sin.sin_family = AF_INET;
294 sin.sin_addr = fle->f.r.r_src;
295 /* XXX MRT 0 as a default revisit. need the mbuf for fib*/
296 rt = rtalloc1_fib((struct sockaddr *)&sin, 0, RTF_CLONING, 0);
297 if (rt != NULL) {
298 if (rt_mask(rt))
299 fle->f.src_mask = bitcount32(((struct sockaddr_in *)
300 rt_mask(rt))->sin_addr.s_addr);
301 else if (rt->rt_flags & RTF_HOST)
302 /* Give up. We can't determine mask :( */
303 fle->f.src_mask = 32;
304
305 RTFREE_LOCKED(rt);
306 }
307
308 /* Push new flow at the and of hash. */
309 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
310
311 return (0);
312}
313
314
315/*
316 * Non-static functions called from ng_netflow.c
317 */
318
319/* Allocate memory and set up flow cache */
320int
321ng_netflow_cache_init(priv_p priv)
322{
323 struct flow_hash_entry *hsh;
324 int i;
325
326 /* Initialize cache UMA zone. */
327 priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
328 uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
329 uma_zone_set_max(priv->zone, CACHESIZE);
330
331 /* Allocate hash. */
fc025606
SW
332 priv->hash = kmalloc(NBUCKETS * sizeof(struct flow_hash_entry),
333 M_NETFLOW_HASH, M_WAITOK | M_ZERO);
b06ebda0 334
b06ebda0
MD
335 /* Initialize hash. */
336 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
337 mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
338 TAILQ_INIT(&hsh->head);
339 }
340
341 mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
342
343 return (0);
344}
345
346/* Free all flow cache memory. Called from node close method. */
347void
348ng_netflow_cache_flush(priv_p priv)
349{
350 struct flow_entry *fle, *fle1;
351 struct flow_hash_entry *hsh;
352 item_p item = NULL;
353 int i;
354
355 /*
356 * We are going to free probably billable data.
357 * Expire everything before freeing it.
358 * No locking is required since callout is already drained.
359 */
360 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
361 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
362 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
363 expire_flow(priv, &item, fle, NG_QUEUE);
364 }
365
366 if (item != NULL)
367 export_send(priv, item, NG_QUEUE);
368
369 uma_zdestroy(priv->zone);
370
371 /* Destroy hash mutexes. */
372 for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
373 mtx_destroy(&hsh->mtx);
374
375 /* Free hash memory. */
376 if (priv->hash)
fc025606 377 kfree(priv->hash, M_NETFLOW_HASH);
b06ebda0
MD
378
379 mtx_destroy(&priv->export_mtx);
380}
381
382/* Insert packet from into flow cache. */
383int
384ng_netflow_flow_add(priv_p priv, struct ip *ip, iface_p iface,
385 struct ifnet *ifp)
386{
387 register struct flow_entry *fle, *fle1;
388 struct flow_hash_entry *hsh;
389 struct flow_rec r;
390 item_p item = NULL;
391 int hlen, plen;
392 int error = 0;
393 uint8_t tcp_flags = 0;
394
395 /* Try to fill flow_rec r */
396 bzero(&r, sizeof(r));
397 /* check version */
398 if (ip->ip_v != IPVERSION)
399 return (EINVAL);
400
401 /* verify min header length */
402 hlen = ip->ip_hl << 2;
403
404 if (hlen < sizeof(struct ip))
405 return (EINVAL);
406
407 r.r_src = ip->ip_src;
408 r.r_dst = ip->ip_dst;
409
410 /* save packet length */
411 plen = ntohs(ip->ip_len);
412
413 r.r_ip_p = ip->ip_p;
414 r.r_tos = ip->ip_tos;
415
416 /* Configured in_ifx overrides mbuf's */
417 if (iface->info.ifinfo_index == 0) {
418 if (ifp != NULL)
419 r.r_i_ifx = ifp->if_index;
420 } else
421 r.r_i_ifx = iface->info.ifinfo_index;
422
423 /*
424 * XXX NOTE: only first fragment of fragmented TCP, UDP and
425 * ICMP packet will be recorded with proper s_port and d_port.
426 * Following fragments will be recorded simply as IP packet with
427 * ip_proto = ip->ip_p and s_port, d_port set to zero.
428 * I know, it looks like bug. But I don't want to re-implement
429 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
430 * and nobody complains yet :)
431 */
432 if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
433 switch(r.r_ip_p) {
434 case IPPROTO_TCP:
435 {
436 register struct tcphdr *tcp;
437
438 tcp = (struct tcphdr *)((caddr_t )ip + hlen);
439 r.r_sport = tcp->th_sport;
440 r.r_dport = tcp->th_dport;
441 tcp_flags = tcp->th_flags;
442 break;
443 }
444 case IPPROTO_UDP:
445 r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
446 break;
447 }
448
449 /* Update node statistics. XXX: race... */
450 priv->info.nfinfo_packets ++;
451 priv->info.nfinfo_bytes += plen;
452
453 /* Find hash slot. */
454 hsh = &priv->hash[ip_hash(&r)];
455
456 mtx_lock(&hsh->mtx);
457
458 /*
459 * Go through hash and find our entry. If we encounter an
460 * entry, that should be expired, purge it. We do a reverse
461 * search since most active entries are first, and most
462 * searches are done on most active entries.
463 */
464 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
465 if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
466 break;
467 if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
468 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
469 expire_flow(priv, &item, fle, NG_QUEUE);
470 atomic_add_32(&priv->info.nfinfo_act_exp, 1);
471 }
472 }
473
474 if (fle) { /* An existent entry. */
475
476 fle->f.bytes += plen;
477 fle->f.packets ++;
478 fle->f.tcp_flags |= tcp_flags;
479 fle->f.last = time_uptime;
480
481 /*
482 * We have the following reasons to expire flow in active way:
483 * - it hit active timeout
484 * - a TCP connection closed
485 * - it is going to overflow counter
486 */
487 if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
488 (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
489 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
490 expire_flow(priv, &item, fle, NG_QUEUE);
491 atomic_add_32(&priv->info.nfinfo_act_exp, 1);
492 } else {
493 /*
494 * It is the newest, move it to the tail,
495 * if it isn't there already. Next search will
496 * locate it quicker.
497 */
498 if (fle != TAILQ_LAST(&hsh->head, fhead)) {
499 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
500 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
501 }
502 }
503 } else /* A new flow entry. */
504 error = hash_insert(priv, hsh, &r, plen, tcp_flags);
505
506 mtx_unlock(&hsh->mtx);
507
508 if (item != NULL)
509 return_export_dgram(priv, item, NG_QUEUE);
510
511 return (error);
512}
513
514/*
515 * Return records from cache to userland.
516 *
517 * TODO: matching particular IP should be done in kernel, here.
518 */
519int
520ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
521{
522 struct flow_hash_entry *hsh;
523 struct flow_entry *fle;
524 struct ngnf_flows *data;
525 int i;
526
527 data = (struct ngnf_flows *)resp->data;
528 data->last = 0;
529 data->nentries = 0;
530
531 /* Check if this is a first run */
532 if (last == 0) {
533 hsh = priv->hash;
534 i = 0;
535 } else {
536 if (last > NBUCKETS-1)
537 return (EINVAL);
538 hsh = priv->hash + last;
539 i = last;
540 }
541
542 /*
543 * We will transfer not more than NREC_AT_ONCE. More data
544 * will come in next message.
545 * We send current hash index to userland, and userland should
546 * return it back to us. Then, we will restart with new entry.
547 *
548 * The resulting cache snapshot is inaccurate for the
549 * following reasons:
550 * - we skip locked hash entries
551 * - we bail out, if someone wants our entry
552 * - we skip rest of entry, when hit NREC_AT_ONCE
553 */
554 for (; i < NBUCKETS; hsh++, i++) {
555 if (mtx_trylock(&hsh->mtx) == 0)
556 continue;
557
558 TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
338366e8 559 if (mtx_contested(&hsh->mtx))
b06ebda0
MD
560 break;
561
562 bcopy(&fle->f, &(data->entries[data->nentries]),
563 sizeof(fle->f));
564 data->nentries++;
565 if (data->nentries == NREC_AT_ONCE) {
566 mtx_unlock(&hsh->mtx);
567 if (++i < NBUCKETS)
568 data->last = i;
569 return (0);
570 }
571 }
572 mtx_unlock(&hsh->mtx);
573 }
574
575 return (0);
576}
577
578/* We have full datagram in privdata. Send it to export hook. */
579static int
580export_send(priv_p priv, item_p item, int flags)
581{
582 struct mbuf *m = NGI_M(item);
583 struct netflow_v5_export_dgram *dgram = mtod(m,
584 struct netflow_v5_export_dgram *);
585 struct netflow_v5_header *header = &dgram->header;
586 struct timespec ts;
587 int error = 0;
588
589 /* Fill mbuf header. */
590 m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
591 header->count + sizeof(struct netflow_v5_header);
592
593 /* Fill export header. */
594 header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
595 getnanotime(&ts);
596 header->unix_secs = htonl(ts.tv_sec);
597 header->unix_nsecs = htonl(ts.tv_nsec);
598 header->engine_type = 0;
599 header->engine_id = 0;
600 header->pad = 0;
601 header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
602 header->count));
603 header->count = htons(header->count);
604
605 if (priv->export != NULL)
606 NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
607 else
608 NG_FREE_ITEM(item);
609
610 return (error);
611}
612
613
614/* Add export record to dgram. */
615static int
616export_add(item_p item, struct flow_entry *fle)
617{
618 struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
619 struct netflow_v5_export_dgram *);
620 struct netflow_v5_header *header = &dgram->header;
621 struct netflow_v5_record *rec;
622
623 rec = &dgram->r[header->count];
624 header->count ++;
625
626 KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
627 ("ng_netflow: export too big"));
628
629 /* Fill in export record. */
630 rec->src_addr = fle->f.r.r_src.s_addr;
631 rec->dst_addr = fle->f.r.r_dst.s_addr;
632 rec->next_hop = fle->f.next_hop.s_addr;
633 rec->i_ifx = htons(fle->f.fle_i_ifx);
634 rec->o_ifx = htons(fle->f.fle_o_ifx);
635 rec->packets = htonl(fle->f.packets);
636 rec->octets = htonl(fle->f.bytes);
637 rec->first = htonl(MILLIUPTIME(fle->f.first));
638 rec->last = htonl(MILLIUPTIME(fle->f.last));
639 rec->s_port = fle->f.r.r_sport;
640 rec->d_port = fle->f.r.r_dport;
641 rec->flags = fle->f.tcp_flags;
642 rec->prot = fle->f.r.r_ip_p;
643 rec->tos = fle->f.r.r_tos;
644 rec->dst_mask = fle->f.dst_mask;
645 rec->src_mask = fle->f.src_mask;
646
647 /* Not supported fields. */
648 rec->src_as = rec->dst_as = 0;
649
650 if (header->count == NETFLOW_V5_MAX_RECORDS)
651 return (1); /* end of datagram */
652 else
653 return (0);
654}
655
656/* Periodic flow expiry run. */
657void
658ng_netflow_expire(void *arg)
659{
660 struct flow_entry *fle, *fle1;
661 struct flow_hash_entry *hsh;
662 priv_p priv = (priv_p )arg;
663 item_p item = NULL;
664 uint32_t used;
665 int i;
666
667 /*
668 * Going through all the cache.
669 */
670 for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
671 /*
672 * Skip entries, that are already being worked on.
673 */
674 if (mtx_trylock(&hsh->mtx) == 0)
675 continue;
676
677 used = atomic_load_acq_32(&priv->info.nfinfo_used);
678 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
679 /*
680 * Interrupt thread wants this entry!
681 * Quick! Quick! Bail out!
682 */
5a975a3d 683 if (mtx_contested(&hsh->mtx))
b06ebda0
MD
684 break;
685
686 /*
687 * Don't expire aggressively while hash collision
688 * ratio is predicted small.
689 */
690 if (used <= (NBUCKETS*2) && !INACTIVE(fle))
691 break;
692
693 if ((INACTIVE(fle) && (SMALL(fle) ||
694 (used > (NBUCKETS*2)))) || AGED(fle)) {
695 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
696 expire_flow(priv, &item, fle, NG_NOFLAGS);
697 used--;
698 atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
699 }
700 }
701 mtx_unlock(&hsh->mtx);
702 }
703
704 if (item != NULL)
705 return_export_dgram(priv, item, NG_NOFLAGS);
706
707 /* Schedule next expire. */
708 callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
709 (void *)priv);
710}