40#include <sys/bitstring.h>
42#include <sys/counter.h>
43#include <sys/kernel.h>
45#include <sys/limits.h>
47#include <sys/syslog.h>
48#include <sys/socket.h>
53#include <net/if_var.h>
55#include <net/route/nhop.h>
56#include <net/route/route_ctl.h>
57#include <net/ethernet.h>
58#include <netinet/in.h>
59#include <netinet/in_fib.h>
60#include <netinet/in_systm.h>
61#include <netinet/ip.h>
62#include <netinet/ip6.h>
63#include <netinet/tcp.h>
64#include <netinet/udp.h>
66#include <netinet6/in6_fib.h>
75#define NBUCKETS (65536)
78#define FULL_HASH(addr1, addr2, port1, port2) \
79 (((addr1 ^ (addr1 >> 16) ^ \
80 htons(addr2 ^ (addr2 >> 16))) ^ \
81 port1 ^ htons(port2)) & \
85#define ADDR_HASH(addr1, addr2) \
86 ((addr1 ^ (addr1 >> 16) ^ \
87 htons(addr2 ^ (addr2 >> 16))) & \
92#define INACTIVE(fle) (time_uptime - fle->f.last > priv->nfinfo_inact_t)
93#define AGED(fle) (time_uptime - fle->f.first > priv->nfinfo_act_t)
94#define ISFREE(fle) (fle->f.packets == 0)
102#define SMALL(fle) (fle->f.packets <= 4)
111 int, uint8_t, uint8_t);
115 int, uint8_t, uint8_t);
131static inline uint32_t
139 r->r_sport,
r->r_dport);
141 return ADDR_HASH(
r->r_src.s_addr,
r->r_dst.s_addr);
148static inline uint32_t
155 return FULL_HASH(
r->src.r_src6.__u6_addr.__u6_addr32[3],
156 r->dst.r_dst6.__u6_addr.__u6_addr32[3],
r->r_sport,
159 return ADDR_HASH(
r->src.r_src6.__u6_addr.__u6_addr32[3],
160 r->dst.r_dst6.__u6_addr.__u6_addr32[3]);
186 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
233 if ((
priv->export != NULL) && (
version == IPVERSION)) {
235 if (exp.
item == NULL) {
236 priv->nfinfo_export_failed++;
237 if (
priv->export9 != NULL)
238 priv->nfinfo_export9_failed++;
240 uma_zfree_arg(
priv->zone, fle,
priv);
250 if (
priv->export9 != NULL) {
252 if (exp.
item9 == NULL) {
253 priv->nfinfo_export9_failed++;
255 uma_zfree_arg(
priv->zone, fle,
priv);
258 uma_zfree_arg(
priv->zone6, fle,
priv);
261 panic(
"ng_netflow: Unknown IP proto: %d",
274 uma_zfree_arg(
priv->zone, fle,
priv);
277 uma_zfree_arg(
priv->zone6, fle,
priv);
324 int plen, uint8_t
flags, uint8_t tcp_flags)
328 mtx_assert(&hsh->
mtx, MA_OWNED);
330 fle = uma_zalloc_arg(
priv->zone,
priv, M_NOWAIT);
332 priv->nfinfo_alloc_failed++;
354 struct route_nhop_data rnd;
356 rt = fib4_lookup_rt(
r->fib, fle->
f.
r.
r_dst, 0, NHR_NONE, &rnd);
360 struct nhop_object *nh = nhop_select_func(rnd.rnd_nhop, 0);
363 rt_get_inet_prefix_plen(rt, &addr, &plen, &scopeid);
365 if (nh->gw_sa.sa_family == AF_INET)
378 struct route_nhop_data rnd;
380 rt = fib4_lookup_rt(
r->fib, fle->
f.
r.
r_src, 0, NHR_NONE, &rnd);
386 rt_get_inet_prefix_plen(rt, &addr, &plen, &scopeid);
392 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
401 int plen, uint8_t
flags, uint8_t tcp_flags)
405 mtx_assert(&hsh6->
mtx, MA_OWNED);
407 fle6 = uma_zalloc_arg(
priv->zone6,
priv, M_NOWAIT);
409 priv->nfinfo_alloc_failed++;
432 struct route_nhop_data rnd;
434 rt = fib6_lookup_rt(
r->fib, &fle6->
f.
r.
dst.
r_dst6, 0, NHR_NONE, &rnd);
436 struct in6_addr addr;
438 struct nhop_object *nh = nhop_select_func(rnd.rnd_nhop, 0);
441 rt_get_inet6_prefix_plen(rt, &addr, &plen, &scopeid);
443 if (nh->gw_sa.sa_family == AF_INET6)
452 struct route_nhop_data rnd;
454 rt = fib6_lookup_rt(
r->fib, &fle6->
f.
r.
src.
r_src6, 0, NHR_NONE, &rnd);
456 struct in6_addr addr;
460 rt_get_inet6_prefix_plen(rt, &addr, &plen, &scopeid);
466 TAILQ_INSERT_TAIL(&hsh6->head, (
struct flow_entry *)fle6, fle_hash);
484 priv->zone = uma_zcreate(
"NetFlow IPv4 cache",
485 sizeof(
struct flow_entry), NULL, NULL, NULL, NULL,
489 priv->zone6 = uma_zcreate(
"NetFlow IPv6 cache",
490 sizeof(
struct flow6_entry), NULL, NULL, NULL, NULL,
497 M_NETFLOW_HASH, M_WAITOK | M_ZERO);
500 for (i = 0, hsh =
priv->hash; i <
NBUCKETS; i++, hsh++) {
501 mtx_init(&hsh->
mtx,
"hash mutex", NULL, MTX_DEF);
502 TAILQ_INIT(&hsh->head);
508 M_NETFLOW_HASH, M_WAITOK | M_ZERO);
511 for (i = 0, hsh =
priv->hash6; i <
NBUCKETS; i++, hsh++) {
512 mtx_init(&hsh->
mtx,
"hash mutex", NULL, MTX_DEF);
513 TAILQ_INIT(&hsh->head);
517 priv->nfinfo_bytes = counter_u64_alloc(M_WAITOK);
518 priv->nfinfo_packets = counter_u64_alloc(M_WAITOK);
519 priv->nfinfo_bytes6 = counter_u64_alloc(M_WAITOK);
520 priv->nfinfo_packets6 = counter_u64_alloc(M_WAITOK);
521 priv->nfinfo_sbytes = counter_u64_alloc(M_WAITOK);
522 priv->nfinfo_spackets = counter_u64_alloc(M_WAITOK);
523 priv->nfinfo_sbytes6 = counter_u64_alloc(M_WAITOK);
524 priv->nfinfo_spackets6 = counter_u64_alloc(M_WAITOK);
525 priv->nfinfo_act_exp = counter_u64_alloc(M_WAITOK);
526 priv->nfinfo_inact_exp = counter_u64_alloc(M_WAITOK);
529 CTR0(KTR_NET,
"ng_netflow startup()");
538 CTR1(KTR_NET,
"ng_netflow(): fib init: %d", fib);
543 if ((fe = malloc(
sizeof(
struct fib_export), M_NETGRAPH,
544 M_NOWAIT | M_ZERO)) == NULL)
547 mtx_init(&fe->
export_mtx,
"export dgram lock", NULL, MTX_DEF);
548 mtx_init(&fe->
export9_mtx,
"export9 dgram lock", NULL, MTX_DEF);
552 if (atomic_cmpset_ptr((
volatile uintptr_t *)&
priv->fib_data[fib],
553 (uintptr_t)NULL, (uintptr_t)fe) == 0) {
555 CTR3(KTR_NET,
"ng_netflow(): fib init: %d setup %p but got %p",
559 free(fe, M_NETGRAPH);
562 CTR3(KTR_NET,
"ng_netflow(): fib %d setup to %p (%p)",
564 priv->nfinfo_alloc_fibs++;
580 bzero(&exp,
sizeof(exp));
588 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
589 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
594 for (hsh =
priv->hash6, i = 0; i <
NBUCKETS; hsh++, i++)
595 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
596 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
602 uma_zdestroy(
priv->zone);
605 mtx_destroy(&hsh->
mtx);
608 if (
priv->hash != NULL)
609 free(
priv->hash, M_NETFLOW_HASH);
611 uma_zdestroy(
priv->zone6);
613 for (i = 0, hsh =
priv->hash6; i <
NBUCKETS; i++, hsh++)
614 mtx_destroy(&hsh->
mtx);
617 if (
priv->hash6 != NULL)
618 free(
priv->hash6, M_NETFLOW_HASH);
621 for (i = 0; i <
priv->maxfibs; i++) {
625 if (fe->exp.item != NULL)
628 if (fe->exp.item9 != NULL)
632 mtx_destroy(&fe->export_mtx);
633 mtx_destroy(&fe->export9_mtx);
634 free(fe, M_NETGRAPH);
637 counter_u64_free(
priv->nfinfo_bytes);
638 counter_u64_free(
priv->nfinfo_packets);
639 counter_u64_free(
priv->nfinfo_bytes6);
640 counter_u64_free(
priv->nfinfo_packets6);
641 counter_u64_free(
priv->nfinfo_sbytes);
642 counter_u64_free(
priv->nfinfo_spackets);
643 counter_u64_free(
priv->nfinfo_sbytes6);
644 counter_u64_free(
priv->nfinfo_spackets6);
645 counter_u64_free(
priv->nfinfo_act_exp);
646 counter_u64_free(
priv->nfinfo_inact_exp);
655 caddr_t upper_ptr, uint8_t upper_proto, uint8_t
flags,
656 unsigned int src_if_index)
664 uint8_t tcp_flags = 0;
666 bzero(&
r,
sizeof(
r));
668 if (ip->ip_v != IPVERSION)
671 hlen = ip->ip_hl << 2;
672 if (hlen <
sizeof(
struct ip))
675 eproto = ETHERTYPE_IP;
679 r.r_src = ip->ip_src;
680 r.r_dst = ip->ip_dst;
683 plen = ntohs(ip->ip_len);
686 r.r_tos = ip->ip_tos;
688 r.r_i_ifx = src_if_index;
699 if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
705 tcp = (
struct tcphdr *)((caddr_t )ip + hlen);
706 r.r_sport = tcp->th_sport;
707 r.r_dport = tcp->th_dport;
708 tcp_flags = tcp->th_flags;
712 r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
716 counter_u64_add(
priv->nfinfo_packets, 1);
717 counter_u64_add(
priv->nfinfo_bytes, plen);
730 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
731 if (bcmp(&
r, &fle->
f.
r,
sizeof(
struct flow_rec)) == 0)
734 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
737 counter_u64_add(
priv->nfinfo_act_exp, 1);
746 fle->
f.
last = time_uptime;
754 if (tcp_flags & TH_FIN || tcp_flags & TH_RST ||
AGED(fle) ||
756 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
759 counter_u64_add(
priv->nfinfo_act_exp, 1);
766 if (fle != TAILQ_LAST(&hsh->head, fhead)) {
767 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
768 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
772 error = hash_insert(
priv, hsh, &
r, plen,
flags, tcp_flags);
774 mtx_unlock(&hsh->
mtx);
784 caddr_t upper_ptr, uint8_t upper_proto, uint8_t
flags,
785 unsigned int src_if_index)
793 uint8_t tcp_flags = 0;
796 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
799 bzero(&
r,
sizeof(
r));
801 r.src.r_src6 = ip6->ip6_src;
802 r.dst.r_dst6 = ip6->ip6_dst;
808 plen = ntohs(ip6->ip6_plen) +
sizeof(
struct ip6_hdr);
812 r.r_tos = ip->ip_tos;
815 switch(upper_proto) {
820 tcp = (
struct tcphdr *)upper_ptr;
821 r.r_ports = *(uint32_t *)upper_ptr;
822 tcp_flags = tcp->th_flags;
827 r.r_ports = *(uint32_t *)upper_ptr;
832 r.r_ip_p = upper_proto;
833 r.r_i_ifx = src_if_index;
835 counter_u64_add(
priv->nfinfo_packets6, 1);
836 counter_u64_add(
priv->nfinfo_bytes6, plen);
839 hsh = &
priv->hash6[ip6_hash(&
r)];
849 TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
853 if (bcmp(&
r, &fle6->
f.
r,
sizeof(
struct flow6_rec)) == 0)
856 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
859 counter_u64_add(
priv->nfinfo_act_exp, 1);
869 fle6->
f.
last = time_uptime;
877 if (tcp_flags & TH_FIN || tcp_flags & TH_RST ||
AGED(fle6) ||
879 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
882 counter_u64_add(
priv->nfinfo_act_exp, 1);
889 if (fle != TAILQ_LAST(&hsh->head, fhead)) {
890 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
891 TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
895 error = hash6_insert(
priv, hsh, &
r, plen,
flags, tcp_flags);
897 mtx_unlock(&hsh->
mtx);
927 hsh =
priv->hash6 + i;
933 hsh =
priv->hash + i;
953 if (mtx_trylock(&hsh->
mtx) == 0) {
965 TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
966 if (hsh->
mtx.mtx_lock & MTX_CONTESTED) {
969 mtx_unlock(&hsh->
mtx);
976 if (list_id < req->list_id)
1001 mtx_unlock(&hsh->
mtx);
1005 mtx_unlock(&hsh->
mtx);
1017 struct mbuf *m =
NGI_M(item);
1031 header->unix_secs = htonl(ts.tv_sec);
1032 header->unix_nsecs = htonl(ts.tv_nsec);
1040 if (
priv->export != NULL)
1057 rec = &dgram->
r[
header->count];
1061 (
"ng_netflow: export too big"));
1067 rec->
i_ifx = htons(fle->
f.fle_i_ifx);
1076 rec->
prot = fle->
f.
r.r_ip_p;
1077 rec->
tos = fle->
f.
r.r_tos;
1104 used = uma_zone_get_cur(
priv->zone);
1105 for (hsh =
priv->hash, i = 0; i <
NBUCKETS; hsh++, i++) {
1109 if (mtx_trylock(&hsh->
mtx) == 0)
1112 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1117 if (hsh->
mtx.mtx_lock & MTX_CONTESTED)
1129 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1133 counter_u64_add(
priv->nfinfo_inact_exp, 1);
1136 mtx_unlock(&hsh->
mtx);
1140 used = uma_zone_get_cur(
priv->zone6);
1141 for (hsh =
priv->hash6, i = 0; i <
NBUCKETS; hsh++, i++) {
1147 if (mtx_trylock(&hsh->
mtx) == 0)
1150 TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
1156 if (hsh->
mtx.mtx_lock & MTX_CONTESTED)
1168 TAILQ_REMOVE(&hsh->head, fle, fle_hash);
1172 counter_u64_add(
priv->nfinfo_inact_exp, 1);
1175 mtx_unlock(&hsh->
mtx);
void ng_netflow_expire(void *arg)
void ng_netflow_cache_flush(priv_p priv)
static int export_send(priv_p, fib_export_p, item_p, int)
int ng_netflow_fib_init(priv_p priv, int fib)
int ng_netflow_flow_show(priv_p priv, struct ngnf_show_header *req, struct ngnf_show_header *resp)
static void return_export_dgram(priv_p priv, fib_export_p fe, item_p item, int flags)
#define FULL_HASH(addr1, addr2, port1, port2)
#define ADDR_HASH(addr1, addr2)
static item_p get_export_dgram(priv_p priv, fib_export_p fe)
void ng_netflow_cache_init(priv_p priv)
static int export_add(item_p, struct flow_entry *)
static void expire_flow(priv_p, fib_export_p, struct flow_entry *, int)
MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash")
void ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
#define NETFLOW_V5_MAX_RECORDS
struct netflow_v5_record r[NETFLOW_V5_MAX_RECORDS]
void return_export9_dgram(priv_p priv, fib_export_p fe, item_p item, struct netflow_v9_packet_opt *t, int flags)
void ng_netflow_v9_cache_init(priv_p priv)
int export9_add(item_p item, struct netflow_v9_packet_opt *t, struct flow_entry *fle)
int export9_send(priv_p priv, fib_export_p fe, item_p item, struct netflow_v9_packet_opt *t, int flags)
void ng_netflow_v9_cache_flush(priv_p priv)
item_p get_export9_dgram(priv_p priv, fib_export_p fe, struct netflow_v9_packet_opt **tt)
#define NETFLOW_V9_FLOW_V6_L4
#define NETFLOW_V9_FLOW_V4_L4
#define NG_FWD_ITEM_HOOK_FLAGS(error, item, hook, flags)
item_p ng_package_data(struct mbuf *m, int flags)
#define NG_FREE_ITEM(item)
#define NG_NETFLOW_IS_FRAG
#define NG_NETFLOW_CONF_NOSRCLOOKUP
#define priv_to_fib(priv, fib)
#define NG_NETFLOW_CONF_NODSTLOOKUP
int ng_netflow_flow6_add(priv_p, fib_export_p, struct ip6_hdr *, caddr_t, uint8_t, uint8_t, unsigned int)
int ng_netflow_flow_add(priv_p, fib_export_p, struct ip *, caddr_t, uint8_t, uint8_t, unsigned int)
static int ip_hash(struct mbuf *m, int offset)
struct ubt_hci_evhdr header
struct netflow_export_item exp
struct in6_addr next_hop6
union flow6_entry_data::@22 n
struct flow6_entry_data f
struct netflow_v9_packet_opt * item9_opt
struct netflow_v5_header header
struct netflow_v5_record r[NETFLOW_V5_MAX_RECORDS]
uint64_t nfinfo_inact_exp
uint32_t nfinfo_export_failed
uint32_t nfinfo_realloc_mbuf
uint32_t nfinfo_export9_failed
uint32_t nfinfo_alloc_failed
uint64_t nfinfo_spackets6
uint32_t nfinfo_alloc_fibs