32#include <sys/socket.h>
33#include <sys/socketvar.h>
34#include <sys/sysctl.h>
37#include <sys/eventhandler.h>
38#include <machine/atomic.h>
42#define M_LEADINGSPACE_NOWRITE(m) \
43 ((m)->m_data - M_START(m))
51 "Free saved packets when the memory system comes under pressure");
54 "Number of clusters currently referenced on TCP PCAP queues");
57 "Maximum number of clusters allowed to be referenced on TCP PCAP "
65 "Number of mbufs with external storage reused for the TCP PCAP "
69 "Number of mbufs with internal storage reused for the TCP PCAP "
73 "Number of new mbufs allocated for the TCP PCAP functionality");
76#define V_tcp_pcap_packets VNET(tcp_pcap_packets)
78 CTLFLAG_RW, &VNET_NAME(tcp_pcap_packets), 0,
79 "Default number of packets saved per direction per TCPCB");
95 NULL, EVENTHANDLER_PRI_ANY);
123 if (m->m_flags & M_EXT)
142 if (mb->m_flags & M_EXT)
161 struct mbuf *m_cur = m;
162 int bytes_to_copy=0, trailing_data, skip=0, tcp_off;
165 KASSERT(th, (
"%s: called with th == NULL", __func__));
166 KASSERT(m, (
"%s: called with m == NULL", __func__));
167 KASSERT(n, (
"%s: called with n == NULL", __func__));
170 KASSERT(n->m_len == 0, (
"%s: called with n->m_len=%d (expected 0)",
171 __func__, n->m_len));
172 KASSERT(n->m_data == M_START(n),
173 (
"%s: called with n->m_data != M_START(n)", __func__));
179 tcp_off = th->th_off << 2;
182 while (m && m->m_len == 0)
196 bytes_to_copy = tcp_off;
197 if (bytes_to_copy > M_SIZE(n))
198 bytes_to_copy = M_SIZE(n);
199 bcopy(th, n->m_data, bytes_to_copy);
200 n->m_len = bytes_to_copy;
209 if ((caddr_t) th >= (caddr_t) m_cur->m_data &&
210 (caddr_t) th < (caddr_t) (m_cur->m_data + m_cur->m_len))
212 bytes_to_copy += m_cur->m_len;
213 m_cur = m_cur->m_next;
216 bytes_to_copy += (caddr_t) th - (caddr_t) m_cur->m_data;
219 bytes_to_copy += tcp_off;
228 if (bytes_to_copy > M_SIZE(n)) {
229 skip = bytes_to_copy - M_SIZE(n);
230 bytes_to_copy = M_SIZE(n);
246 trailing_data = m_cur->m_len - tcp_off;
247 trailing_data -= (caddr_t) th - (caddr_t) m_cur->m_data;
248 m_cur = m_cur->m_next;
250 trailing_data += m_cur->m_len;
251 m_cur = m_cur->m_next;
253 if ((bytes_to_copy + trailing_data) > M_SIZE(n))
254 bytes_to_copy = M_SIZE(n);
256 bytes_to_copy += trailing_data;
259 m_copydata(m, skip, bytes_to_copy, n->m_data);
260 n->m_len = bytes_to_copy;
266 struct mbuf *n = NULL, *mhead;
268 KASSERT(th, (
"%s: called with th == NULL", __func__));
269 KASSERT(m, (
"%s: called with m == NULL", __func__));
270 KASSERT(queue, (
"%s: called with queue == NULL", __func__));
273 while (m && m->m_type != MT_DATA)
281 if (queue->mq_maxlen == 0)
298 while (mbufq_full(queue)) {
299 mhead = mbufq_dequeue(queue);
311 if (mhead->m_flags & M_EXTPG) {
315 }
else if (mhead->m_flags & M_EXT) {
316 switch (mhead->m_ext.ext_type) {
322 if (atomic_fetchadd_int(
323 mhead->m_ext.ext_cnt, -1) == 1)
331 *(mhead->m_ext.ext_cnt) = 1;
351 m_init(n, M_NOWAIT, MT_DATA, 0);
357 if (!(n = m_get(M_NOWAIT, MT_DATA)))
371 if ((m->m_flags & (M_EXT|M_EXTPG)) &&
373 n->m_data = m->m_data;
377 else if (((m->m_data + m->m_len) - M_START(m)) <= M_SIZE(n)) {
385 KASSERT((n->m_flags & (M_EXT | M_PKTHDR)) == 0,
386 (
"%s: Unexpected flags (%#x) for mbuf",
387 __func__, n->m_flags));
390 if (m->m_flags & M_EXTPG)
391 m_copydata(m, 0, m->m_len, n->m_data);
393 bcopy(M_START(m), n->m_dat,
412 n->m_next = m_copym(m->m_next, 0, M_COPYALL, M_NOWAIT);
418 if (mbufq_enqueue(queue, n)) {
420 KASSERT(0, (
"%s: mbufq was unexpectedly full!", __func__));
429 while ((m = mbufq_dequeue(queue)))
443 queue->mq_maxlen = newval;
444 while (queue->mq_len > queue->mq_maxlen)
451 return queue->mq_maxlen;
static __inline bool tcp_pcap_take_cluster_reference(void)
#define V_tcp_pcap_packets
SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_pcap_aggressive_free, CTLFLAG_RW, &tcp_pcap_aggressive_free, 0, "Free saved packets when the memory system comes under pressure")
static __inline void tcp_pcap_adj_cluster_reference(struct mbuf *m, int adj)
static int tcp_pcap_alloc_reuse_ext
int tcp_pcap_aggressive_free
static int tcp_pcap_alloc_reuse_mbuf
static int tcp_pcap_clusters_referenced_max
void tcp_pcap_set_sock_max(struct mbufq *queue, int newval)
static void tcp_pcap_copy_bestfit(struct tcphdr *th, struct mbuf *m, struct mbuf *n)
static int tcp_pcap_alloc_new_mbuf
VNET_DEFINE(int, tcp_pcap_packets)=0
static void tcp_pcap_max_set(void)
void tcp_pcap_tcpcb_init(struct tcpcb *tp)
static void tcp_pcap_m_freem(struct mbuf *mb)
void tcp_pcap_drain(struct mbufq *queue)
int tcp_pcap_get_sock_max(struct mbufq *queue)
void tcp_pcap_add(struct tcphdr *th, struct mbuf *m, struct mbufq *queue)
#define M_LEADINGSPACE_NOWRITE(m)
static int tcp_pcap_clusters_referenced_cur