51#include <sys/kernel.h>
54#include <sys/malloc.h>
55#include <sys/module.h>
56#include <sys/socket.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
66#include <net/route/route_ctl.h>
67#include <net/route/fib_algo.h>
69#define DXR_TRIE_BITS 20
79#define DXR_D (DXR_TRIE_BITS - 1)
82#define D_TBL_SIZE (1 << DXR_D)
83#define DIRECT_TBL_SIZE (1 << DXR_TRIE_BITS)
84#define DXR_RANGE_MASK (0xffffffffU >> DXR_TRIE_BITS)
85#define DXR_RANGE_SHIFT (32 - DXR_TRIE_BITS)
87#define DESC_BASE_BITS 22
88#define DESC_FRAGMENTS_BITS (32 - DESC_BASE_BITS)
89#define BASE_MAX ((1 << DESC_BASE_BITS) - 1)
90#define RTBL_SIZE_INCR (BASE_MAX / 64)
93#define FRAGS_MASK_SHORT ((1 << (23 - DXR_TRIE_BITS)) - 1)
95#define FRAGS_MASK_SHORT 0
97#define FRAGS_PREF_SHORT (((1 << DESC_FRAGMENTS_BITS) - 1) & \
99#define FRAGS_MARK_XL (FRAGS_PREF_SHORT - 1)
100#define FRAGS_MARK_HIT (FRAGS_PREF_SHORT - 2)
102#define IS_SHORT_FORMAT(x) ((x & FRAGS_PREF_SHORT) == FRAGS_PREF_SHORT)
103#define IS_LONG_FORMAT(x) ((x & FRAGS_PREF_SHORT) != FRAGS_PREF_SHORT)
104#define IS_XL_FORMAT(x) (x == FRAGS_MARK_XL)
106#define RE_SHORT_MAX_NH ((1 << (DXR_TRIE_BITS - 8)) - 1)
108#define CHUNK_HASH_BITS 16
109#define CHUNK_HASH_SIZE (1 << CHUNK_HASH_BITS)
110#define CHUNK_HASH_MASK (CHUNK_HASH_SIZE - 1)
112#define TRIE_HASH_BITS 16
113#define TRIE_HASH_SIZE (1 << TRIE_HASH_BITS)
114#define TRIE_HASH_MASK (TRIE_HASH_SIZE - 1)
116#define XTBL_SIZE_INCR (DIRECT_TBL_SIZE / 16)
118#define UNUSED_BUCKETS 8
132#if DXR_TRIE_BITS < 24
235#define V_frag_limit VNET(frag_limit)
238#define DXR_LOOKUP_STAGE \
239 if (masked_dst < range[middle].start) { \
240 upperbound = middle; \
241 middle = (middle + lowerbound) / 2; \
242 } else if (masked_dst < range[middle + 1].start) \
243 return (range[middle].nexthop); \
245 lowerbound = middle + 1; \
246 middle = (upperbound + middle + 1) / 2; \
248 if (upperbound == lowerbound) \
249 return (range[lowerbound].nexthop);
264#if DXR_TRIE_BITS < 24
272 upperbound = upperbound * 2 + 1;
282 middle = upperbound / 2;
287 middle = upperbound / 2;
296#define DXR_LOOKUP_DEFINE(D) \
298 dxr_lookup_##D(struct dxr *dxr, uint32_t dst) \
300 struct direct_entry de; \
301 uint16_t *dt = dxr->d; \
302 struct direct_entry *xt = dxr->x; \
304 de = xt[(dt[dst >> (32 - (D))] << (DXR_TRIE_BITS - (D))) \
305 + ((dst >> DXR_RANGE_SHIFT) & \
306 (0xffffffffU >> (32 - DXR_TRIE_BITS + (D))))]; \
307 if (__predict_true(de.fragments == FRAGS_MARK_HIT)) \
309 return (range_lookup(dxr->r, de, dst)); \
312 static struct nhop_object * \
313 dxr_fib_lookup_##D(void *algo_data, \
314 const struct flm_lookup_key key, uint32_t scopeid __unused) \
316 struct dxr *dxr = algo_data; \
318 return (dxr->nh_tbl[dxr_lookup_##D(dxr, \
319 ntohl(key.addr4.s_addr))]); \
323#if DXR_TRIE_BITS > 16
360 struct route_nhop_data rnd;
363 da->dst.sin_addr.s_addr = htonl(dst_u32);
370 rt_get_inet_prefix_plen(rt, &addr, &fhp->
preflen, &scopeid);
375 fhp->
nexthop = fib_get_nhop_idx(da->
fd, rnd.rnd_nhop);
378 fhp->
end = 0xffffffffU;
403 hash = (hash << 7) + (hash >> 13) + *p;
405 return (hash + (hash >> 16));
421 if (cdp->cd_hash != hash || cdp->cd_cur_size != size ||
425 da->rtbl_top = fdesc->
base;
426 fdesc->
base = cdp->cd_base;
432 for (cdp = NULL, i = size; cdp == NULL && i <
UNUSED_BUCKETS; i++)
433 cdp = LIST_FIRST(&da->unused_chunks[i]);
436 LIST_FOREACH(empty_cdp, &da->unused_chunks[0], cd_hash_le)
437 if (empty_cdp->cd_max_size >= size && (cdp == NULL ||
438 empty_cdp->cd_max_size < cdp->cd_max_size)) {
440 if (empty_cdp->cd_max_size == size)
448 fdesc->
base = cdp->cd_base;
449 da->rtbl_top -= size;
450 da->unused_chunks_size -= cdp->cd_max_size;
451 if (cdp->cd_max_size > size) {
454 if (empty_cdp == NULL)
456 LIST_INSERT_BEFORE(cdp, empty_cdp, cd_all_le);
457 empty_cdp->cd_base = cdp->cd_base + size;
458 empty_cdp->cd_cur_size = 0;
459 empty_cdp->cd_max_size = cdp->cd_max_size - size;
461 i = empty_cdp->cd_max_size;
464 LIST_INSERT_HEAD(&da->unused_chunks[i], empty_cdp,
467 da->unused_chunks_size += empty_cdp->cd_max_size;
468 cdp->cd_max_size = size;
470 LIST_REMOVE(cdp, cd_hash_le);
476 cdp->cd_max_size = size;
477 cdp->cd_base = fdesc->
base;
478 LIST_INSERT_HEAD(&da->all_chunks, cdp, cd_all_le);
479 KASSERT(cdp->cd_base + cdp->cd_max_size == da->rtbl_top,
480 (
"dxr: %s %d", __FUNCTION__, __LINE__));
485 cdp->cd_cur_size = size;
488 if (da->rtbl_top >= da->rtbl_size) {
490 FIB_PRINTF(LOG_ERR, da->
fd,
491 "structural limit exceeded at %d "
492 "range table elements", da->rtbl_top);
497 FIB_PRINTF(i, da->
fd,
"range table at %d%% structural limit",
522 if (cdp->cd_hash == hash && cdp->cd_cur_size == size &&
527 KASSERT(cdp != NULL, (
"dxr: dangling chunk"));
528 if (--cdp->cd_refcnt > 0)
531 LIST_REMOVE(cdp, cd_hash_le);
532 da->unused_chunks_size += cdp->cd_max_size;
533 cdp->cd_cur_size = 0;
536 cdp2 = LIST_NEXT(cdp, cd_all_le);
537 if (cdp2 != NULL && cdp2->cd_cur_size == 0) {
538 KASSERT(cdp2->cd_base + cdp2->cd_max_size == cdp->cd_base,
539 (
"dxr: %s %d", __FUNCTION__, __LINE__));
540 LIST_REMOVE(cdp, cd_all_le);
541 LIST_REMOVE(cdp2, cd_hash_le);
542 cdp2->cd_max_size += cdp->cd_max_size;
548 cdp2 = LIST_PREV(cdp, &da->all_chunks,
chunk_desc, cd_all_le);
549 if (cdp2 != NULL && cdp2->cd_cur_size == 0) {
550 KASSERT(cdp->cd_base + cdp->cd_max_size == cdp2->cd_base,
551 (
"dxr: %s %d", __FUNCTION__, __LINE__));
552 LIST_REMOVE(cdp, cd_all_le);
553 LIST_REMOVE(cdp2, cd_hash_le);
554 cdp2->cd_max_size += cdp->cd_max_size;
555 cdp2->cd_base = cdp->cd_base;
560 if (cdp->cd_base + cdp->cd_max_size == da->rtbl_top) {
562 KASSERT(cdp == LIST_FIRST(&da->all_chunks),
563 (
"dxr: %s %d", __FUNCTION__, __LINE__));
564 da->rtbl_top -= cdp->cd_max_size;
565 da->unused_chunks_size -= cdp->cd_max_size;
566 LIST_REMOVE(cdp, cd_all_le);
571 i = cdp->cd_max_size;
574 LIST_INSERT_HEAD(&da->unused_chunks[i], cdp, cd_hash_le);
584 for (i = 0; i < (1 << dxr_x); i++) {
585 hash = (hash << 3) ^ (hash >> 3);
587 (
void *) &da->
direct_tbl[(index << dxr_x) + i];
592 return (hash + (hash >> 16));
604 LIST_FOREACH(tp, &da->trie_hashtbl[hash &
TRIE_HASH_MASK], td_hash_le)
605 if (tp->td_hash == hash &&
607 &da->
x_tbl[tp->td_index << dxr_x],
608 sizeof(*da->
x_tbl) << dxr_x) == 0) {
611 return(tp->td_index);
614 tp = LIST_FIRST(&da->unused_trie);
616 LIST_REMOVE(tp, td_hash_le);
617 da->unused_trie_cnt--;
622 LIST_INSERT_HEAD(&da->all_trie, tp, td_all_le);
623 tp->td_index = da->all_trie_cnt++;
630 memcpy(&da->
x_tbl[tp->td_index << dxr_x],
633 if (da->all_trie_cnt >= da->xtbl_size >> dxr_x) {
636 sizeof(*da->
x_tbl) * da->xtbl_size, M_DXRAUX, M_NOWAIT);
637 if (da->
x_tbl == NULL)
640 return(tp->td_index);
651 if (--tp->td_refcnt > 0)
654 LIST_REMOVE(tp, td_hash_le);
655 da->unused_trie_cnt++;
656 if (tp->td_index != da->all_trie_cnt - 1) {
657 LIST_INSERT_HEAD(&da->unused_trie, tp, td_hash_le);
663 da->unused_trie_cnt--;
664 LIST_REMOVE(tp, td_all_le);
666 LIST_FOREACH(tp, &da->unused_trie, td_hash_le)
667 if (tp->td_index == da->all_trie_cnt - 1) {
668 LIST_REMOVE(tp, td_hash_le);
671 }
while (tp != NULL);
682 for (i = da->heap_index; i >= 0; i--) {
683 if (
preflen > da->heap[i].preflen)
685 else if (preflen < da->heap[i].
preflen)
686 da->heap[i + 1] = da->heap[i];
691 fhp = &da->heap[i + 1];
707 &da->
range_tbl[da->rtbl_top + da->rtbl_work_frags].
re;
708 struct heap_entry *fhp = &da->heap[da->heap_index];
712 rt_get_inet_prefix_plen(rt, &addr, &preflen, &scopeid);
713 start = ntohl(addr.
s_addr);
721 end |= (0xffffffffU >> preflen);
722 nh = fib_get_nhop_idx(da->
fd, rt_get_raw_nhop(rt));
724 if (start == fhp->
start)
728 while (start > fhp->
end) {
731 if (da->heap_index > 0) {
738 da->rtbl_work_frags++;
746 da->rtbl_work_frags++;
748 }
else if (da->rtbl_work_frags) {
749 if ((--fp)->nexthop == nh)
750 da->rtbl_work_frags--;
765#if DXR_TRIE_BITS < 24
779 da->rtbl_work_frags = 0;
781 fp->
nexthop = da->heap[0].nexthop;
783 da->dst.sin_addr.s_addr = htonl(first);
786 da->work_chunk = chunk;
787 rib_walk_from(da->
fibnum, AF_INET, RIB_FLAG_LOCKED,
788 (
struct sockaddr *) &da->dst, (
struct sockaddr *) &da->mask,
792 fp = &da->
range_tbl[da->rtbl_top + da->rtbl_work_frags].
re;
793 fhp = &da->heap[da->heap_index];
797 if (da->heap_index > 0) {
807 da->rtbl_work_frags++;
814 if (da->rtbl_work_frags == 0) {
823#if DXR_TRIE_BITS < 24
826 for (i = 0; i <= da->rtbl_work_frags; i++, fp++)
829 if (i == da->rtbl_work_frags + 1) {
832 for (i = 0; i <= da->rtbl_work_frags; i++, fp++, fps++) {
840 da->rtbl_work_frags >>= 1;
847 memmove(&da->
range_tbl[da->rtbl_top + 1],
849 (da->rtbl_work_frags + 1) *
sizeof(*da->
range_tbl));
851 da->rtbl_work_frags++;
853 da->rtbl_top += (da->rtbl_work_frags + 1);
862 struct rib_rtable_info rinfo;
863 struct timeval t0, t1, t2, t3;
869 uint32_t d_tbl_size, dxr_x, d_size, x_size;
870 uint32_t ti, trie_rebuild = 0, prev_size = 0;
874 KASSERT(
dxr->
d == NULL, (
"dxr: d not free"));
877 da = malloc(
sizeof(*
dxr->
aux), M_DXRAUX, M_NOWAIT);
883 LIST_INIT(&da->all_chunks);
884 LIST_INIT(&da->all_trie);
889 bzero(&da->dst,
sizeof(da->dst));
890 bzero(&da->mask,
sizeof(da->mask));
891 da->dst.sin_len =
sizeof(da->dst);
892 da->mask.sin_len =
sizeof(da->mask);
893 da->dst.sin_family = AF_INET;
894 da->mask.sin_family = AF_INET;
904 if (da->
x_tbl == NULL) {
905 da->
x_tbl = malloc(
sizeof(*da->
x_tbl) * da->xtbl_size,
907 if (da->
x_tbl == NULL)
917 fib_get_rtable_info(fib_get_rh(da->
fd), &rinfo);
919 if (da->updates_low > da->updates_high)
925 bzero(da->chunk_hashtbl,
sizeof(da->chunk_hashtbl));
926 while ((cdp = LIST_FIRST(&da->all_chunks)) != NULL) {
927 LIST_REMOVE(cdp, cd_all_le);
931 LIST_INIT(&da->unused_chunks[i]);
932 da->unused_chunks_size = 0;
942 da->prefixes = rinfo.num_prefixes;
945 for (i = da->updates_low; i <= da->updates_high; i++) {
955 range_frag = da->unused_chunks_size * 10000ULL / da->rtbl_top;
961 r_size =
sizeof(*da->
range_tbl) * da->rtbl_top;
966 abs(fls(da->prefixes) - fls(da->trie_rebuilt_prefixes)) > 1)
971 da->trie_rebuilt_prefixes = da->prefixes;
984 bzero(da->trie_hashtbl,
sizeof(da->trie_hashtbl));
985 while ((tp = LIST_FIRST(&da->all_trie)) != NULL) {
986 LIST_REMOVE(tp, td_all_le);
989 LIST_INIT(&da->unused_trie);
990 da->all_trie_cnt = da->unused_trie_cnt = 0;
995 d_tbl_size = (1 << da->d_bits);
997 for (i = da->updates_low >> dxr_x; i <= da->updates_high >> dxr_x;
1001 for (
int j = 0; j < (1 << dxr_x); j += 32)
1014 if (da->all_trie_cnt)
1015 trie_frag = da->unused_trie_cnt * 10000ULL / da->all_trie_cnt;
1021 d_size =
sizeof(*da->
d_tbl) * d_tbl_size;
1024 dxr_tot_size = d_size + x_size + r_size;
1026 if (trie_rebuild == 1) {
1028 if (prev_size == 0 || dxr_tot_size <= prev_size)
1034 prev_size = dxr_tot_size;
1035 goto dxr2_try_squeeze;
1039 dxr_tot_size =
sizeof(da->
direct_tbl) + r_size;
1043 dxr->
d = malloc(dxr_tot_size, M_DXRLPM, M_NOWAIT);
1048 dxr->
x = ((
char *)
dxr->
d) + d_size;
1050 dxr->
r = ((
char *)
dxr->
x) + x_size;
1053 dxr->
x_mask = 0xffffffffU >> (32 - dxr_x);
1060 if (da->updates_low <= da->updates_high)
1062 (da->updates_high - da->updates_low) / 8 + 1);
1064 da->updates_high = 0;
1068 FIB_PRINTF(LOG_INFO, da->
fd,
"D%dX%dR, %d prefixes, %d nhops (max)",
1069 da->d_bits, dxr_x, rinfo.num_prefixes, rinfo.num_nhops);
1071 FIB_PRINTF(LOG_INFO, da->
fd,
"D%dR, %d prefixes, %d nhops (max)",
1072 DXR_D, rinfo.num_prefixes, rinfo.num_nhops);
1074 i = dxr_tot_size * 100;
1075 if (rinfo.num_prefixes)
1076 i /= rinfo.num_prefixes;
1077 FIB_PRINTF(LOG_INFO, da->
fd,
"%d.%02d KBytes, %d.%02d Bytes/prefix",
1078 dxr_tot_size / 1024, dxr_tot_size * 100 / 1024 % 100,
1081 FIB_PRINTF(LOG_INFO, da->
fd,
1082 "%d.%02d%% trie, %d.%02d%% range fragmentation",
1083 trie_frag / 100, trie_frag % 100,
1084 range_frag / 100, range_frag % 100);
1086 FIB_PRINTF(LOG_INFO, da->
fd,
"%d.%01d%% range fragmentation",
1087 range_frag / 100, range_frag % 100);
1089 i = (t1.tv_sec - t0.tv_sec) * 1000000 + t1.tv_usec - t0.tv_usec;
1090 FIB_PRINTF(LOG_INFO, da->
fd,
"range table %s in %u.%03u ms",
1091 range_rebuild ?
"rebuilt" :
"updated", i / 1000, i % 1000);
1093 i = (t2.tv_sec - t1.tv_sec) * 1000000 + t2.tv_usec - t1.tv_usec;
1094 FIB_PRINTF(LOG_INFO, da->
fd,
"trie %s in %u.%03u ms",
1095 trie_rebuild ?
"rebuilt" :
"updated", i / 1000, i % 1000);
1097 i = (t3.tv_sec - t2.tv_sec) * 1000000 + t3.tv_usec - t2.tv_usec;
1098 FIB_PRINTF(LOG_INFO, da->
fd,
"snapshot forked in %u.%03u ms",
1099 i / 1000, i % 1000);
1106static struct nhop_object *
1110 struct dxr *
dxr = algo_data;
1115static enum flm_op_result
1118 struct dxr *old_dxr = old_data;
1122 dxr = malloc(
sizeof(*
dxr), M_DXRAUX, M_NOWAIT);
1124 return (FLM_REBUILD);
1127 if (old_dxr != NULL && old_dxr->
aux != NULL) {
1129 atomic_add_int(&da->
refcnt, 1);
1138 return (FLM_SUCCESS);
1150 free(
dxr->
d, M_DXRLPM);
1153 free(
dxr, M_DXRAUX);
1155 if (da == NULL || atomic_fetchadd_int(&da->
refcnt, -1) > 1)
1159 while ((cdp = LIST_FIRST(&da->all_chunks)) != NULL) {
1160 LIST_REMOVE(cdp, cd_all_le);
1163 while ((tp = LIST_FIRST(&da->all_trie)) != NULL) {
1164 LIST_REMOVE(tp, td_all_le);
1168 free(da->
x_tbl, M_DXRAUX);
1185 switch (da->d_bits) {
1186#if DXR_TRIE_BITS > 16
1188 return (dxr_fib_lookup_16);
1191 return (dxr_fib_lookup_15);
1193 return (dxr_fib_lookup_14);
1195 return (dxr_fib_lookup_13);
1197 return (dxr_fib_lookup_12);
1199 return (dxr_fib_lookup_11);
1201 return (dxr_fib_lookup_10);
1203 return (dxr_fib_lookup_9);
1209static enum flm_op_result
1219 return (FLM_REBUILD);
1227 return (FLM_REBUILD);
1232 return (FLM_SUCCESS);
1235static enum flm_op_result
1239 return (FLM_SUCCESS);
1242static enum flm_op_result
1250static enum flm_op_result
1255 struct dxr *new_dxr;
1257 struct fib_dp new_dp;
1258 enum flm_op_result res;
1261 struct rib_rtable_info rinfo;
1262 int update_delta = 0;
1265 KASSERT(data != NULL, (
"%s: NULL data", __FUNCTION__));
1266 KASSERT(q != NULL, (
"%s: NULL q", __FUNCTION__));
1267 KASSERT(q->count < q->size, (
"%s: q->count %d q->size %d",
1268 __FUNCTION__, q->count, q->size));
1271 KASSERT(da != NULL, (
"%s: NULL dxr->aux", __FUNCTION__));
1273 FIB_PRINTF(LOG_INFO, da->
fd,
"processing %d update(s)", q->count);
1274 for (ui = 0; ui < q->count; ui++) {
1276 if (q->entries[ui].nh_new != NULL)
1278 if (q->entries[ui].nh_old != NULL)
1281 plen = q->entries[ui].plen;
1282 ip = ntohl(q->entries[ui].addr4.s_addr);
1284 hmask = 0xffffffffU >> plen;
1290 if ((start & 0x1f) == 0 && (end & 0x1f) == 0x1f)
1291 for (i = start >> 5; i <= end >> 5; i++)
1294 for (i = start; i <= end; i++)
1296 if (start < da->updates_low)
1297 da->updates_low = start;
1298 if (end > da->updates_high)
1299 da->updates_high = end;
1303 fib_get_rtable_info(fib_get_rh(da->
fd), &rinfo);
1304 KASSERT(da->prefixes + update_delta == rinfo.num_prefixes,
1305 (
"%s: update count mismatch", __FUNCTION__));
1309 if (res != FLM_SUCCESS)
1321 if (new_dxr->
d == NULL) {
1323 return (FLM_REBUILD);
1327 new_dp.arg = new_dxr;
1328 if (fib_set_datapath_ptr(
dxr->
fd, &new_dp)) {
1329 fib_set_algo_ptr(
dxr->
fd, new_dxr);
1331 return (FLM_SUCCESS);
1335 return (FLM_REBUILD);
1356 snprintf(buf,
sizeof(buf),
"%d.%02d%%",
V_frag_limit / 100,
1358 error = sysctl_handle_string(oidp, buf,
sizeof(buf), req);
1359 if (error != 0 || req->newptr == NULL)
1361 if (!isdigit(*buf) && *buf !=
'.')
1363 for (i = 0,
new = 0; isdigit(buf[i]) && i <
sizeof(buf); i++)
1364 new =
new * 10 + buf[i] -
'0';
1366 if (buf[i++] ==
'.') {
1367 if (!isdigit(buf[i]))
1369 new += (buf[i++] -
'0') * 10;
1370 if (isdigit(buf[i]))
1371 new += buf[i++] -
'0';
1376 snprintf(buf,
sizeof(buf),
"%d.%02d%%",
V_frag_limit / 100,
1382 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_VNET,
1384 "Fragmentation threshold to full rebuild");
1388 .flm_family = AF_INET,
1406 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1408 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
struct rtentry * fib4_lookup_rt(uint32_t fibnum, struct in_addr dst, uint32_t scopeid, uint32_t flags, struct route_nhop_data *nrd)
static uint32_t chunk_size(struct dxr_aux *da, struct direct_entry *fdesc)
static void chunk_unref(struct dxr_aux *da, uint32_t chunk)
CTASSERT(DXR_TRIE_BITS >=16 &&DXR_TRIE_BITS<=24)
static void dxr_destroy(void *data)
#define DESC_FRAGMENTS_BITS
static void initheap(struct dxr_aux *da, uint32_t dst_u32, uint32_t chunk)
MODULE_VERSION(fib_dxr, 1)
static enum flm_op_result dxr_change_rib_batch(struct rib_head *rnh, struct fib_change_queue *q, void *data)
static void heap_inject(struct dxr_aux *da, uint32_t start, uint32_t end, uint32_t preflen, uint32_t nh)
static enum flm_op_result dxr_init(uint32_t fibnum, struct fib_data *fd, void *old_data, void **data)
static void epoch_dxr_destroy(epoch_context_t ctx)
static struct fib_lookup_module fib_dxr_mod
SYSCTL_DECL(_net_route_algo)
static int dxr_walk(struct rtentry *rt, void *arg)
#define DXR_LOOKUP_DEFINE(D)
static int sysctl_dxr_frag_limit(SYSCTL_HANDLER_ARGS)
DECLARE_MODULE(fib_dxr, dxr_mod, SI_SUB_PSEUDO, SI_ORDER_ANY)
#define IS_SHORT_FORMAT(x)
static void trie_unref(struct dxr_aux *da, uint32_t index)
static int range_lookup(struct range_entry_long *rt, struct direct_entry de, uint32_t dst)
SYSCTL_PROC(_net_route_algo_dxr, OID_AUTO, frag_limit, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_VNET, 0, 0, sysctl_dxr_frag_limit, "A", "Fragmentation threshold to full rebuild")
static uint32_t chunk_hash(struct dxr_aux *da, struct direct_entry *fdesc)
static enum flm_op_result dxr_dump_end(void *data, struct fib_dp *dp)
static int update_chunk(struct dxr_aux *da, uint32_t chunk)
static int trie_ref(struct dxr_aux *da, uint32_t index)
static int dxr_lookup(struct dxr *dxr, uint32_t dst)
static struct nhop_object * dxr_fib_lookup(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
static MALLOC_DEFINE(M_DXRLPM, "dxr", "DXR LPM")
static uint8_t dxr_get_pref(const struct rib_rtable_info *rinfo)
static int dxr_modevent(module_t mod, int type, void *unused)
static enum flm_op_result dxr_dump_rib_item(struct rtentry *rt, void *data)
static uint32_t trie_hash(struct dxr_aux *da, uint32_t dxr_x, uint32_t index)
SYSCTL_NODE(_net_route_algo, OID_AUTO, dxr, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "DXR tunables")
static int chunk_ref(struct dxr_aux *da, uint32_t chunk)
static moduledata_t dxr_mod
static enum flm_op_result dxr_change_rib_item(struct rib_head *rnh, struct rib_cmd_info *rc, void *data)
VNET_DEFINE_STATIC(int, frag_limit)
static void * choose_lookup_fn(struct dxr_aux *da)
static void dxr_build(struct dxr *dxr)
static LIST_HEAD(carp_softc)
struct direct_entry * x_tbl
struct direct_entry direct_tbl[DIRECT_TBL_SIZE]
uint32_t updates_mask[DIRECT_TBL_SIZE/32]
struct range_entry_long re
struct trie_desc * trietbl[D_TBL_SIZE]
uint16_t d_tbl[D_TBL_SIZE]
union dxr_aux::@1 * range_tbl
struct nhop_object ** nh_tbl
struct epoch_context epoch_ctx