43#include <sys/kernel.h>
44#include <sys/malloc.h>
46#include <sys/socket.h>
47#include <sys/socketvar.h>
48#include <sys/sockbuf.h>
49#include <sys/sysctl.h>
52#include <net/if_var.h>
53#include <net/ethernet.h>
63#include <netinet6/in6_pcb.h>
72#include <netinet6/ip6_var.h>
74#include <machine/in_cksum.h>
78#define TCP_LRO_TS_OPTION \
79 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \
80 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
87static bool do_bpf_strip_and_compress(
struct inpcb *,
struct lro_ctrl *,
88 struct lro_entry *,
struct mbuf **,
struct mbuf **,
struct mbuf **,
bool *,
bool);
92SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
109 "default number of LRO entries");
114 "Number of interrups in a row on the same CPU that will make us declare an 'affinity' cpu?");
125 &
tcp_extra_mbuf,
"Number of times we had an extra compressed ack dropped into the tp");
127 &
tcp_would_have_but,
"Number of times we would have had an extra compressed, but mget failed");
129 &
tcp_comp_total,
"Number of mbufs queued with M_ACKCMP flags set");
133 &
tcp_bad_csums,
"Number of packets that the common code saw with bad csums");
153 LIST_INSERT_HEAD(
bucket, le, hash_next);
160 LIST_REMOVE(le,
next);
161 LIST_REMOVE(le, hash_next);
172 unsigned lro_entries,
unsigned lro_mbufs)
176 unsigned i, elements;
191 if (lro_entries > lro_mbufs)
192 elements = lro_entries;
194 elements = lro_mbufs;
198 memset(lc, 0,
sizeof(*lc));
204 (lro_entries *
sizeof(*le));
206 malloc(size, M_LRO, M_NOWAIT | M_ZERO);
211 memset(lc, 0,
sizeof(*lc));
219 for (i = 0; i != lro_entries; i++)
233 const struct ether_vlan_header *eh;
238 memset(parser, 0,
sizeof(*parser));
245 ptr = (
uint8_t *)ptr +
sizeof(*vxh);
253 if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) {
254 eth_type = eh->evl_proto;
257 parser->
data.
vlan_id = eh->evl_tag & htons(EVL_VLID_MASK);
260 ptr = (
uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
261 mlen -= (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
263 eth_type = eh->evl_encap_proto;
265 mlen -= ETHER_HDR_LEN;
266 ptr = (
uint8_t *)ptr + ETHER_HDR_LEN;
268 if (__predict_false(mlen <= 0))
272 case htons(ETHERTYPE_IP):
274 if (__predict_false(mlen <
sizeof(
struct ip)))
277 if ((parser->
ip4->
ip_hl << 2) != sizeof (*parser->
ip4))
283 mlen -=
sizeof(
struct ip);
290 if (__predict_false(mlen <
sizeof(
struct udphdr)))
300 ptr = ((
uint8_t *)ptr +
sizeof(*parser->
udp));
305 if (__predict_false(mlen <
sizeof(
struct tcphdr)))
314 if (__predict_false(mlen < (parser->
tcp->th_off << 2)))
316 ptr = (
uint8_t *)ptr + (parser->
tcp->th_off << 2);
325 case htons(ETHERTYPE_IPV6):
327 if (__predict_false(mlen <
sizeof(
struct ip6_hdr)))
334 mlen -=
sizeof(
struct ip6_hdr);
335 switch (parser->
ip6->ip6_nxt) {
337 if (__predict_false(mlen <
sizeof(
struct udphdr)))
351 if (__predict_false(mlen <
sizeof(
struct tcphdr)))
361 if (__predict_false(mlen < (parser->
tcp->th_off << 2)))
363 ptr = (
uint8_t *)ptr + (parser->
tcp->th_off << 2);
378static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
379 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
393 if (__predict_false(m->m_flags & M_VLANTAG)) {
395 htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK);
398 if (__predict_false((m->m_pkthdr.csum_flags &
399 CSUM_TLS_MASK) == CSUM_TLS_DECRYPTED))
412 (m->m_len - ((caddr_t)data_ptr - m->m_data)));
428 memset(pi, 0,
sizeof(*pi));
451 ntohs(po->
ip6->ip6_plen) +
sizeof(*po->
ip6);
462 if (__predict_true(m->m_pkthdr.len == len)) {
464 }
else if (m->m_pkthdr.len > len) {
465 m_adj(m, len - m->m_pkthdr.len);
471static struct tcphdr *
474 return ((
struct tcphdr *)((
uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off));
500 while ((le = LIST_FIRST(&lc->
lro_active)) != NULL) {
536 while (csum > 0xffff)
537 csum = (csum >> 16) + (csum & 0xffff);
554 cs = in6_cksum_pseudo(pa->
ip6, ntohs(pa->
ip6->ip6_plen), pa->
ip6->ip6_nxt, 0);
574 cs = ~tcp_lro_rx_csum_tcphdr(pa->
tcp);
579 c = (c >> 16) + (c & 0xffff);
589 while ((le = LIST_FIRST(&lc->
lro_active)) != NULL) {
608 now = bintime2ns(&bt);
609 tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000));
611 if (now >= (bintime2ns(&le->
alloc_time) + tov)) {
620tcp_lro_rx_ipv4(
struct lro_ctrl *lc,
struct mbuf *m,
struct ip *ip4)
625 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
626 if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) {
632 if (__predict_false(csum != 0)) {
644 const struct lro_entry *le,
const struct mbuf *m,
645 int frm, int32_t tcp_data_len,
uint32_t th_seq,
650 struct timeval tv, btv;
655 log.u_bbr.flex8 = frm;
656 log.u_bbr.flex1 = tcp_data_len;
658 log.u_bbr.flex2 = m->m_pkthdr.len;
661 log.u_bbr.flex3 = le->
m_head->m_pkthdr.lro_nsegs;
662 log.u_bbr.flex4 = le->
m_head->m_pkthdr.lro_tcp_d_len;
664 log.u_bbr.flex5 = le->
m_head->m_pkthdr.len;
665 log.u_bbr.delRate = le->
m_head->m_flags;
666 log.u_bbr.rttProp = le->
m_head->m_pkthdr.rcv_tstmp;
668 log.u_bbr.inflight = th_seq;
669 log.u_bbr.delivered = th_ack;
670 log.u_bbr.timeStamp = cts;
672 log.u_bbr.lt_epoch = le->
ack_seq;
673 log.u_bbr.pacing_gain = th_win;
674 log.u_bbr.cwnd_gain = le->
window;
675 log.u_bbr.lost = curcpu;
676 log.u_bbr.cur_del_rate = (uintptr_t)m;
677 log.u_bbr.bw_inuse = (uintptr_t)le->
m_head;
682 if (in_epoch(net_epoch_preempt))
683 log.u_bbr.inhpts = 1;
685 log.u_bbr.inhpts = 0;
690 0, &log,
false, &tv);
700 csum = 0xffff - *ptr + value;
701 while (csum > 0xffff)
702 csum = (csum >> 16) + (csum & 0xffff);
718 tlen = (pa->
ip4->
ip_hl << 2) + (pa->
tcp->th_off << 2) + payload_len;
722 csum = pa->
ip4->
ip_sum + 0xffff - temp[0];
723 while (csum > 0xffff)
724 csum = (csum >> 16) + (csum & 0xffff);
726 goto update_tcp_header;
730 tlen = (pa->
tcp->th_off << 2) + payload_len;
732 goto update_tcp_header;
736 tlen = (pa->
ip4->
ip_hl << 2) +
sizeof(*pa->
udp) + payload_len;
740 csum = pa->
ip4->
ip_sum + 0xffff - temp[0];
741 while (csum > 0xffff)
742 csum = (csum >> 16) + (csum & 0xffff);
744 goto update_udp_header;
748 tlen =
sizeof(*pa->
udp) + payload_len;
750 goto update_udp_header;
769 ts_ptr[1] = htonl(le->
tsval);
770 ts_ptr[2] = le->
tsecr;
777 csum = pa->
tcp->th_sum + 0xffff - delta_sum +
778 0xffff - temp[0] + 0xffff - temp[3] + temp[2];
779 while (csum > 0xffff)
780 csum = (csum >> 16) + (csum & 0xffff);
786 csum = temp[0] + temp[1] + 0xffff - temp[2] +
787 temp[3] + temp[4] + delta_sum;
788 while (csum > 0xffff)
789 csum = (csum >> 16) + (csum & 0xffff);
795 tlen =
sizeof(*pa->
udp) + payload_len;
800 if (__predict_false(pa->
udp->
uh_sum != 0)) {
802 csum = pa->
udp->
uh_sum + 0xffff - delta_sum +
803 0xffff - temp[0] + 0xffff - temp[2];
804 while (csum > 0xffff)
805 csum = (csum >> 16) + (csum & 0xffff);
811 csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum;
812 while (csum > 0xffff)
813 csum = (csum >> 16) + (csum & 0xffff);
829 le->
m_head->m_pkthdr.lro_tcp_d_len,
830 le->
m_head->m_pkthdr.lro_tcp_d_csum);
832 le->
m_head->m_pkthdr.lro_tcp_d_len +
834 le->
m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
835 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
836 le->
m_head->m_pkthdr.csum_data = 0xffff;
838 le->
m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
842 le->
m_head->m_pkthdr.lro_tcp_d_len,
843 le->
m_head->m_pkthdr.lro_tcp_d_csum);
845 le->
m_head->m_pkthdr.lro_tcp_d_len +
847 le->
m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
849 le->
m_head->m_pkthdr.csum_data = 0xffff;
851 le->
m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
857 le->
m_head->m_pkthdr.lro_tcp_d_len,
858 le->
m_head->m_pkthdr.lro_tcp_d_csum);
859 le->
m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
860 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
861 le->
m_head->m_pkthdr.csum_data = 0xffff;
863 le->
m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
867 le->
m_head->m_pkthdr.lro_tcp_d_len,
868 le->
m_head->m_pkthdr.lro_tcp_d_csum);
869 le->
m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
871 le->
m_head->m_pkthdr.csum_data = 0xffff;
873 le->
m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
889 le->
m_head->m_nextpkt = NULL;
896 struct mbuf *m,
struct tcphdr *th)
903 tcp_opt_len = (th->th_off << 2);
904 tcp_opt_len -=
sizeof(*th);
907 if (tcp_opt_len == 0 ||
908 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
914 le->
tsval = ntohl(*(ts_ptr + 1));
915 le->
tsecr = *(ts_ptr + 2);
918 tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
921 le->
next_seq = ntohl(th->th_seq) + tcp_data_len;
944 msave = le->
m_head->m_nextpkt;
945 le->
m_head->m_nextpkt = NULL;
953 (
"tcp_push_and_replace: LRO parser failed on m=%p\n", m));
963 m->m_nextpkt = msave;
973 if (m->m_pkthdr.lro_nsegs == 1) {
975 csum = p->m_pkthdr.lro_tcp_d_csum;
978 csum = (
uint32_t)m->m_pkthdr.lro_tcp_d_csum +
979 (
uint32_t)p->m_pkthdr.lro_tcp_d_csum;
980 while (csum > 0xffff)
981 csum = (csum >> 16) + (csum & 0xffff);
985 m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len;
986 m->m_pkthdr.lro_tcp_d_csum = csum;
987 m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len;
988 m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs;
1015 m = le->
m_head->m_nextpkt;
1022 tcp_opt_len = (th->th_off << 2);
1023 tcp_opt_len -=
sizeof(*th);
1026 if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1032 le->
m_head->m_nextpkt = m->m_nextpkt;
1041 le->
m_head->m_nextpkt = m->m_nextpkt;
1045 while((m = le->
m_head->m_nextpkt) != NULL) {
1050 le->
m_head->m_nextpkt = m->m_nextpkt;
1051 m->m_nextpkt = NULL;
1053 tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
1056 tcp_opt_len = (th->th_off << 2);
1057 tcp_opt_len -=
sizeof(*th);
1058 tcp_data_len_total = le->
m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len;
1059 tcp_data_seg_total = le->
m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs;
1067 if (tcp_opt_len != 0 &&
1068 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1084 if (tcp_opt_len != 0) {
1085 uint32_t tsval = ntohl(*(ts_ptr + 1));
1092 le->
tsecr = *(ts_ptr + 2);
1095 if (__predict_false(ntohl(th->th_seq) != le->
next_seq ||
1097 (le->
flags & TH_ACK)) ||
1098 (tcp_data_len == 0 &&
1100 le->
window == th->th_win))) {
1105 if (tcp_data_len != 0 ||
1111 }
else if (th->th_ack == le->
ack_seq) {
1118 if (tcp_data_len == 0) {
1131 m_adj(m, m->m_pkthdr.len - tcp_data_len);
1158 struct inpcb *inp, int32_t *new_m)
1164 if (__predict_false(tp == NULL))
1169 if (m != NULL && (m->m_flags &
M_ACKCMP) != 0) {
1170 if (M_TRAILINGSPACE(m) >=
sizeof(
struct tcp_ackent)) {
1171 tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0);
1182 m = m_getcl(M_NOWAIT, MT_DATA,
M_ACKCMP | M_PKTHDR);
1184 m = m_gethdr(M_NOWAIT, MT_DATA);
1186 if (__predict_false(m == NULL)) {
1196static struct inpcb *
1197tcp_lro_lookup(
struct ifnet *ifp,
struct lro_parser *pa)
1232tcp_lro_ack_valid(
struct mbuf *m,
struct tcphdr *th,
uint32_t **ppts,
bool *other_opts)
1245 switch (th->th_off << 2) {
1246 case (
sizeof(*th) + TCPOLEN_TSTAMP_APPA):
1250 *other_opts =
false;
1259 *other_opts =
false;
1268 if ((
tcp_get_flags(th) & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0)
1271 if (m->m_pkthdr.lro_tcp_d_len)
1285 struct mbuf **pp, *cmp, *mv_to;
1286 bool bpf_req, should_wake;
1312 inp = tcp_lro_lookup(lc->
ifp,
1339 should_wake =
false;
1343 bpf_req = bpf_peers_present(lc->
ifp->if_bpf);
1347 for (pp = &le->
m_head; *pp != NULL; ) {
1349 if (do_bpf_strip_and_compress(inp, lc, le, pp,
1350 &cmp, &mv_to, &should_wake, bpf_req ) ==
false) {
1352 pp = &(*pp)->m_nextpkt;
1353 }
else if (mv_to != NULL) {
1355 pp = &mv_to->m_nextpkt;
1362 le->
m_last_mbuf = __containerof(pp,
struct mbuf, m_nextpkt);
1365 if (le->
m_head != NULL) {
1367 tcp_lro_log(tp, lc, le, NULL, 22, 1, inp->
inp_flags2, 0, 1);
1368 tcp_queue_pkts(inp, tp, le);
1392 CURVNET_SET(lc->
ifp->if_vnet);
1393 error = tcp_lro_flush_tcphpts(lc, le);
1403 bzero(le,
sizeof(*le));
1407#ifdef HAVE_INLINE_FLSLL
1408#define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
1410static inline uint64_t
1419 return (x & ~(x >> 1));
1444 for (x = 1; x < size; x++) {
1446 for (y = x; y > 0 && temp.
seq < parray[y - 1].
seq; y--)
1447 parray[y] = parray[y - 1];
1456 for (x = 0; x != size; x++) {
1457 ones |= parray[x].
seq;
1458 zeros |= ~parray[x].seq;
1473 for (x = y = 0; y != size; y++) {
1475 if (parray[y].
seq & ones)
1479 parray[x] = parray[y];
1484 KASSERT(x != 0 && x != size, (
"Memory is corrupted\n"));
1517 CURVNET_SET(lc->
ifp->if_vnet);
1547 (*lc->
ifp->if_input)(lc->
ifp, mb);
1565build_ack_entry(
struct tcp_ackent *ae,
struct tcphdr *th,
struct mbuf *m,
1573 if (m->m_flags & M_TSTMP_LRO)
1575 else if (m->m_flags & M_TSTMP)
1577 ae->
seq = ntohl(th->th_seq);
1578 ae->
ack = ntohl(th->th_ack);
1580 if (ts_ptr != NULL) {
1582 ae->
ts_echo = ntohl(ts_ptr[2]);
1585 ae->
win = ntohs(th->th_win);
1594do_bpf_strip_and_compress(
struct inpcb *inp,
struct lro_ctrl *lc,
1595 struct lro_entry *le,
struct mbuf **pp,
struct mbuf **cmp,
struct mbuf **mv_to,
1596 bool *should_wake,
bool bpf_req)
1609 bool other_opts, can_compress;
1619 if (__predict_false(bpf_req))
1620 ETHER_BPF_MTAP(lc->
ifp, m);
1622 tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off;
1629 tcp_hdr_offset -=
sizeof(*le->
outer.
ip4);
1630 m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1633 tcp_hdr_offset -=
sizeof(*le->
outer.
ip6);
1634 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1641 tcp_hdr_offset -=
sizeof(*le->
outer.
ip4);
1642 m->m_pkthdr.lro_etype = ETHERTYPE_IP;
1645 tcp_hdr_offset -=
sizeof(*le->
outer.
ip6);
1646 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6;
1652 MPASS(tcp_hdr_offset >= 0);
1654 m_adj(m, tcp_hdr_offset);
1656 m->m_flags &= ~M_ACKCMP;
1657 m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset;
1664 can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts);
1667 if ((other_opts ==
true) &&
1674 *should_wake =
true;
1677 if (can_compress ==
false)
1684 l3.ptr = mtod(m,
void *);
1687 iptos = l3.ip4->ip_tos;
1699 nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf);
1700 if (__predict_false(nm == NULL))
1719 pp = &nm->m_nextpkt;
1724 if (M_TRAILINGSPACE(nm) <
sizeof(
struct tcp_ackent)) {
1730 MPASS(M_TRAILINGSPACE(nm) >=
sizeof(
struct tcp_ackent));
1735 idx = (nm->m_len /
sizeof(
struct tcp_ackent));
1736 build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos);
1740 nm->m_pkthdr.len +=
sizeof(
struct tcp_ackent);
1744 m->m_nextpkt = NULL;
1754static struct lro_head *
1759 if (M_HASHTYPE_ISHASH(m)) {
1760 hash = m->m_pkthdr.flowid;
1789 if (__predict_false(V_ip6_forwarding != 0))
1792 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1793 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1794 (m->m_pkthdr.csum_data != 0xffff)) {
1805 if (__predict_false(pa == NULL))
1810 if (__predict_false(error != 0))
1816 error = tcp_lro_rx_ipv4(lc, m, pa->
ip4);
1817 if (__predict_false(error != 0))
1825 if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) {
1827 m->m_flags |= M_TSTMP_LRO;
1838 tcp_opt_len = (th->th_off << 2);
1839 tcp_data_len = m->m_pkthdr.len - ((
uint8_t *)th -
1840 (
uint8_t *)m->m_data) - tcp_opt_len;
1841 tcp_opt_len -=
sizeof(*th);
1844 if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0))
1848 if (tcp_data_len == 0)
1850 else if (__predict_false(csum != 0))
1856 m->m_nextpkt = NULL;
1857 m->m_pkthdr.rcvif = lc->
ifp;
1858 m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum;
1859 m->m_pkthdr.lro_tcp_d_len = tcp_data_len;
1860 m->m_pkthdr.lro_tcp_h_off = ((
uint8_t *)th - (
uint8_t *)m->m_data);
1861 m->m_pkthdr.lro_nsegs = 1;
1871 LIST_FOREACH(le,
bucket, hash_next) {
1878 if (tcp_data_len == 0 &&
1897 LIST_REMOVE(le,
next);
1920 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1921 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1922 (m->m_pkthdr.csum_data != 0xffff)) {
1933 CURVNET_SET(lc->
ifp->if_vnet);
1953 if (__predict_false((lc->
ifp->if_capenable & IFCAP_LRO) == 0)) {
1955 (*lc->
ifp->if_input) (lc->
ifp, mb);
1961 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
1962 (((uint64_t)mb->m_pkthdr.flowid) << 24) |
u_short in_addword(u_short a, u_short b)
u_int in_cksum_hdr(const struct ip *ip)
u_short in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
struct inpcb * in_pcblookup(struct inpcbinfo *, struct in_addr, u_int, struct in_addr, u_int, int, struct ifnet *)
#define INP_DONT_SACK_QUEUE
#define INP_MBUF_QUEUE_READY
#define INP_WLOCK_ASSERT(inp)
#define INP_SUPPORTS_MBUFQ
#define IPV6_TRAFFIC_CLASS(ip6)
struct socket * inp_socket
volatile uint16_t inp_irq_cpu
struct in_addr ip_src ip_dst
struct lro_head * lro_hash
struct bintime lro_last_queue_time
uint32_t lro_cnt_of_same_cpu
unsigned short lro_ackcnt_lim
struct lro_mbuf_sort * lro_mbuf_data
struct lro_head lro_active
struct mbuf * m_last_mbuf
struct bintime alloc_time
int(* tfb_do_queued_segments)(struct socket *, struct tcpcb *, int)
struct tcp_function_block * t_fb
static __inline uint32_t tcp_tv_to_usectick(const struct timeval *sv)
static __inline uint32_t tcp_get_usecs(struct timeval *tv)
#define TCP_LOG_EVENTP(tp, th, rxbuf, txbuf, eventid, errornum, len, stackinfo, th_hostorder, tv)
static uint16_t tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le, uint16_t payload_len, uint16_t delta_sum)
static uint16_t tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum)
static void tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
static uint32_t tcp_lro_cpu_set_thresh
void tcp_lro_flush_all(struct lro_ctrl *lc)
SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, CTLFLAG_RDTUN|CTLFLAG_MPSAFE, &tcp_lro_entries, 0, "default number of LRO entries")
static void tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum)
static void tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
static struct tcphdr * tcp_lro_get_th(struct mbuf *m)
counter_u64_t tcp_inp_lro_compressed
static unsigned tcp_lro_entries
counter_u64_t tcp_inp_lro_locks_taken
counter_u64_t tcp_uncomp_total
void tcp_lro_reg_mbufq(void)
static const int vxlan_csum
static void tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD, &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport")
static void lro_free_mbuf_chain(struct mbuf *m)
void tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "TCP LRO")
static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
static int tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po)
#define TCP_LRO_TS_OPTION
void tcp_lro_dereg_mbufq(void)
static uint64_t tcp_lro_msb_64(uint64_t x)
void tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
static void * tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan, int mlen)
counter_u64_t tcp_inp_lro_wokeup_queue
static void tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
counter_u64_t tcp_bad_csums
static long tcplro_stacks_wanting_mbufq
int tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
static uint16_t tcp_lro_rx_csum_tcphdr(const struct tcphdr *th)
counter_u64_t tcp_would_have_but
counter_u64_t tcp_comp_total
counter_u64_t tcp_extra_mbuf
static void tcp_lro_rx_done(struct lro_ctrl *lc)
static struct lro_head * tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
static __inline void tcp_lro_active_remove(struct lro_entry *le)
void tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
static struct lro_parser * tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data)
counter_u64_t tcp_inp_lro_direct_queue
static void tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m, struct tcphdr *th)
static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures")
void tcp_lro_free(struct lro_ctrl *lc)
int tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, unsigned lro_entries, unsigned lro_mbufs)
static __inline void tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, struct lro_entry *le)
static void tcp_lro_mbuf_append_pkthdr(struct lro_entry *le, const struct mbuf *p)
int tcp_lro_init(struct lro_ctrl *lc)
#define TCP_LRO_ACKCNT_MAX
#define LRO_TYPE_IPV6_TCP
#define TCP_LRO_CPU_DECLARATION_THRESH
#define LRO_FLAG_DECRYPTED
#define TCP_LRO_LENGTH_MAX
#define TCP_LRO_NOT_SUPPORTED
#define LRO_TYPE_IPV6_UDP
#define TCP_LRO_NO_ENTRIES
#define LRO_RAW_ADDRESS_MAX
static bool lro_address_compare(const union lro_address *pa, const union lro_address *pb)
#define LRO_TYPE_IPV4_UDP
#define LRO_TYPE_IPV4_TCP
static uint16_t tcp_get_flags(const struct tcphdr *th)
union lro_address::@41::@44 d_addr
union lro_address::@41::@43 s_addr