FreeBSD kernel IPv4 code
tcp_hpts.c
Go to the documentation of this file.
1/*-
2 * Copyright (c) 2016-2018 Netflix, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 */
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD$");
29#include "opt_inet.h"
30#include "opt_inet6.h"
31#include "opt_rss.h"
32#include "opt_tcpdebug.h"
105#include <sys/param.h>
106#include <sys/bus.h>
107#include <sys/interrupt.h>
108#include <sys/module.h>
109#include <sys/kernel.h>
110#include <sys/hhook.h>
111#include <sys/malloc.h>
112#include <sys/mbuf.h>
113#include <sys/proc.h> /* for proc0 declaration */
114#include <sys/socket.h>
115#include <sys/socketvar.h>
116#include <sys/sysctl.h>
117#include <sys/systm.h>
118#include <sys/refcount.h>
119#include <sys/sched.h>
120#include <sys/queue.h>
121#include <sys/smp.h>
122#include <sys/counter.h>
123#include <sys/time.h>
124#include <sys/kthread.h>
125#include <sys/kern_prefetch.h>
126
127#include <vm/uma.h>
128#include <vm/vm.h>
129
130#include <net/route.h>
131#include <net/vnet.h>
132
133#ifdef RSS
134#include <net/netisr.h>
135#include <net/rss_config.h>
136#endif
137
138#define TCPSTATES /* for logging */
139
140#include <netinet/in.h>
141#include <netinet/in_kdtrace.h>
142#include <netinet/in_pcb.h>
143#include <netinet/ip.h>
144#include <netinet/ip_icmp.h> /* required for icmp_var.h */
145#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
146#include <netinet/ip_var.h>
147#include <netinet/ip6.h>
148#include <netinet6/in6_pcb.h>
149#include <netinet6/ip6_var.h>
150#include <netinet/tcp.h>
151#include <netinet/tcp_fsm.h>
152#include <netinet/tcp_seq.h>
153#include <netinet/tcp_timer.h>
154#include <netinet/tcp_var.h>
155#include <netinet/tcpip.h>
156#include <netinet/cc/cc.h>
157#include <netinet/tcp_hpts.h>
158#include <netinet/tcp_log_buf.h>
159
160#ifdef tcpdebug
161#include <netinet/tcp_debug.h>
162#endif /* tcpdebug */
163#ifdef tcp_offload
164#include <netinet/tcp_offload.h>
165#endif
166
167/*
168 * The hpts uses a 102400 wheel. The wheel
169 * defines the time in 10 usec increments (102400 x 10).
170 * This gives a range of 10usec - 1024ms to place
171 * an entry within. If the user requests more than
172 * 1.024 second, a remaineder is attached and the hpts
173 * when seeing the remainder will re-insert the
174 * inpcb forward in time from where it is until
175 * the remainder is zero.
176 */
177
178#define NUM_OF_HPTSI_SLOTS 102400
179
180/* Each hpts has its own p_mtx which is used for locking */
181#define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
182#define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
183#define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
185 /* Cache line 0x00 */
186 struct mtx p_mtx; /* Mutex for hpts */
187 struct timeval p_mysleep; /* Our min sleep time */
188 uint64_t syscall_cnt;
189 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
190 uint16_t p_hpts_active; /* Flag that says hpts is awake */
191 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
192 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
193 uint32_t p_runningslot; /* Current tick we are at if we are running */
194 uint32_t p_prev_slot; /* Previous slot we were on */
195 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
196 uint32_t p_nxt_slot; /* The next slot outside the current range of
197 * slots that the hpts is running on. */
198 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
199 uint32_t p_lasttick; /* Last tick before the current one */
200 uint8_t p_direct_wake :1, /* boolean */
201 p_on_min_sleep:1, /* boolean */
202 p_hpts_wake_scheduled:1, /* boolean */
204 uint8_t p_fill[3]; /* Fill to 32 bits */
205 /* Cache line 0x40 */
206 struct hptsh {
207 TAILQ_HEAD(, inpcb) head;
209 uint32_t gencnt;
210 } *p_hptss; /* Hptsi wheel */
211 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
212 * of 255ms */
213 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
214 uint32_t saved_lasttick; /* for logging */
215 uint32_t saved_curtick; /* for logging */
216 uint32_t saved_curslot; /* for logging */
217 uint32_t saved_prev_slot; /* for logging */
218 uint32_t p_delayed_by; /* How much were we delayed by */
219 /* Cache line 0x80 */
220 struct sysctl_ctx_list hpts_ctx;
221 struct sysctl_oid *hpts_root;
222 struct intr_event *ie;
224 uint16_t p_num; /* The hpts number one per cpu */
225 uint16_t p_cpu; /* The hpts CPU */
226 /* There is extra space in here */
227 /* Cache line 0x100 */
228 struct callout co __aligned(CACHE_LINE_SIZE);
229} __aligned(CACHE_LINE_SIZE);
230
231static struct tcp_hptsi {
232 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
234 uint32_t rp_num_hptss; /* Number of hpts threads */
236
237MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
238#ifdef RSS
239static int tcp_bind_threads = 1;
240#else
241static int tcp_bind_threads = 2;
242#endif
243static int tcp_use_irq_cpu = 0;
245static int hpts_does_tp_logging = 0;
248
249static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
250static void tcp_hpts_thread(void *ctx);
251static void tcp_init_hptsi(void *st);
252
257
258
259
260SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
261 "TCP Hpts controls");
262SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
263 "TCP Hpts statistics");
264
265#define timersub(tvp, uvp, vvp) \
266 do { \
267 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
268 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
269 if ((vvp)->tv_usec < 0) { \
270 (vvp)->tv_sec--; \
271 (vvp)->tv_usec += 1000000; \
272 } \
273 } while (0)
274
275static int32_t tcp_hpts_precision = 120;
276
277static struct hpts_domain_info {
278 int count;
279 int cpu[MAXCPU];
280} hpts_domains[MAXMEMDOM];
281
282enum {
286};
287
289
290SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
292 "Number of times hpts could not catch up and was behind hopelessly");
293
294counter_u64_t hpts_loops;
295
296SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
297 &hpts_loops, "Number of times hpts had to loop to catch up");
298
299counter_u64_t back_tosleep;
300
301SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
302 &back_tosleep, "Number of times hpts found no tcbs");
303
305
306SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
307 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
308
309counter_u64_t wheel_wrap;
310
311SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
312 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
313
314counter_u64_t hpts_direct_call;
315SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
316 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
317
318counter_u64_t hpts_wake_timeout;
319
320SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
321 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
322
324
325SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
326 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
327
328counter_u64_t hpts_back_tosleep;
329
330SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
331 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
332
333counter_u64_t cpu_uses_flowid;
334counter_u64_t cpu_uses_random;
335
336SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
337 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
338SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
339 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
340
341TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
342TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
343SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
345 "Thread Binding tunable");
346SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
347 &tcp_use_irq_cpu, 0,
348 "Use of irq CPU tunable");
349SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
350 &tcp_hpts_precision, 120,
351 "Value for PRE() precision of callout");
352SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
353 &conn_cnt_thresh, 0,
354 "How many connections (below) make us use the callout based mechanism");
355SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
357 "Do we add to any tp that has logging on pacer logs");
358SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_assigned_cpu, CTLFLAG_RW,
360 "Do we start any hpts timer on the assigned cpu?");
361SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_oldest, CTLFLAG_RW,
363 "Do syscalls look for the hpts that has been the longest since running (or just use cpu no if 0)?");
364SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
365 &dynamic_min_sleep, 250,
366 "What is the dynamic minsleep value?");
367SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
368 &dynamic_max_sleep, 5000,
369 "What is the dynamic maxsleep value?");
370
371
372
373
374
375static int32_t max_pacer_loops = 10;
376SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
377 &max_pacer_loops, 10,
378 "What is the maximum number of times the pacer will loop trying to catch up");
379
380#define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
381
383
384static int
386{
387 int error;
388 uint32_t new;
389
390 new = hpts_sleep_max;
391 error = sysctl_handle_int(oidp, &new, 0, req);
392 if (error == 0 && req->newptr) {
393 if ((new < dynamic_min_sleep) ||
395 error = EINVAL;
396 else
397 hpts_sleep_max = new;
398 }
399 return (error);
400}
401
402static int
404{
405 int error;
406 uint32_t new;
407
408 new = tcp_min_hptsi_time;
409 error = sysctl_handle_int(oidp, &new, 0, req);
410 if (error == 0 && req->newptr) {
411 if (new < LOWEST_SLEEP_ALLOWED)
412 error = EINVAL;
413 else
414 tcp_min_hptsi_time = new;
415 }
416 return (error);
417}
418
419SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
420 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
421 &hpts_sleep_max, 0,
423 "Maximum time hpts will sleep");
424
425SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
426 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
429 "The minimum time the hpts must sleep before processing more slots");
430
434
435SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
437 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
438SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
440 "If we process this many or more on a timeout, we need less sleep on the next callout");
441SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
443 "When we are over the threshold on the pacer do we prohibit wakeups?");
444
445static void
446tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
447 int slots_to_run, int idx, int from_callout)
448{
449 union tcp_log_stackspecific log;
450 /*
451 * Unused logs are
452 * 64 bit - delRate, rttProp, bw_inuse
453 * 16 bit - cwnd_gain
454 * 8 bit - bbr_state, bbr_substate, inhpts;
455 */
456 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
457 log.u_bbr.flex1 = hpts->p_nxt_slot;
458 log.u_bbr.flex2 = hpts->p_cur_slot;
459 log.u_bbr.flex3 = hpts->p_prev_slot;
460 log.u_bbr.flex4 = idx;
461 log.u_bbr.flex5 = hpts->p_curtick;
462 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
463 log.u_bbr.flex7 = hpts->p_cpu;
464 log.u_bbr.flex8 = (uint8_t)from_callout;
465 log.u_bbr.inflight = slots_to_run;
466 log.u_bbr.applimited = hpts->overidden_sleep;
467 log.u_bbr.delivered = hpts->saved_curtick;
469 log.u_bbr.epoch = hpts->saved_curslot;
470 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
471 log.u_bbr.pkts_out = hpts->p_delayed_by;
472 log.u_bbr.lost = hpts->p_hpts_sleep_time;
473 log.u_bbr.pacing_gain = hpts->p_cpu;
474 log.u_bbr.pkt_epoch = hpts->p_runningslot;
475 log.u_bbr.use_lt_bw = 1;
476 TCP_LOG_EVENTP(tp, NULL,
477 &tp->t_inpcb->inp_socket->so_rcv,
478 &tp->t_inpcb->inp_socket->so_snd,
480 0, &log, false, tv);
481}
482
483static void
485{
486 HPTS_MTX_ASSERT(hpts);
487
489 hpts->p_direct_wake = 0;
490 return;
491 }
492 if (hpts->p_hpts_wake_scheduled == 0) {
493 hpts->p_hpts_wake_scheduled = 1;
494 swi_sched(hpts->ie_cookie, 0);
495 }
496}
497
498static void
500{
501 struct tcp_hpts_entry *hpts;
502
503 hpts = (struct tcp_hpts_entry *)arg;
504 swi_sched(hpts->ie_cookie, 0);
505}
506
507static void
508inp_hpts_insert(struct inpcb *inp, struct tcp_hpts_entry *hpts)
509{
510 struct hptsh *hptsh;
511
512 INP_WLOCK_ASSERT(inp);
513 HPTS_MTX_ASSERT(hpts);
514 MPASS(hpts->p_cpu == inp->inp_hpts_cpu);
515 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
516
517 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
518
519 if (inp->inp_in_hpts == IHPTS_NONE) {
521 in_pcbref(inp);
522 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
524 } else
525 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
526 inp->inp_hpts_gencnt = hptsh->gencnt;
527
528 TAILQ_INSERT_TAIL(&hptsh->head, inp, inp_hpts);
529 hptsh->count++;
530 hpts->p_on_queue_cnt++;
531}
532
533static struct tcp_hpts_entry *
535{
536 struct tcp_hpts_entry *hpts;
537
538 INP_LOCK_ASSERT(inp);
539
540 hpts = tcp_pace.rp_ent[inp->inp_hpts_cpu];
541 HPTS_LOCK(hpts);
542
543 return (hpts);
544}
545
546static void
548{
549 bool released __diagused;
550
551 inp->inp_in_hpts = IHPTS_NONE;
552 released = in_pcbrele_wlocked(inp);
553 MPASS(released == false);
554}
555
556/*
557 * Called normally with the INP_LOCKED but it
558 * does not matter, the hpts lock is the key
559 * but the lock order allows us to hold the
560 * INP lock and then get the hpts lock.
561 */
562void
564{
565 struct tcp_hpts_entry *hpts;
566 struct hptsh *hptsh;
567
568 INP_WLOCK_ASSERT(inp);
569
570 hpts = tcp_hpts_lock(inp);
571 if (inp->inp_in_hpts == IHPTS_ONQUEUE) {
572 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
573 inp->inp_hpts_request = 0;
574 if (__predict_true(inp->inp_hpts_gencnt == hptsh->gencnt)) {
575 TAILQ_REMOVE(&hptsh->head, inp, inp_hpts);
576 MPASS(hptsh->count > 0);
577 hptsh->count--;
578 MPASS(hpts->p_on_queue_cnt > 0);
579 hpts->p_on_queue_cnt--;
580 inp_hpts_release(inp);
581 } else {
582 /*
583 * tcp_hptsi() now owns the TAILQ head of this inp.
584 * Can't TAILQ_REMOVE, just mark it.
585 */
586#ifdef INVARIANTS
587 struct inpcb *tmp;
588
589 TAILQ_FOREACH(tmp, &hptsh->head, inp_hpts)
590 MPASS(tmp != inp);
591#endif
593 inp->inp_hptsslot = -1;
594 }
595 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
596 /*
597 * Handle a special race condition:
598 * tcp_hptsi() moves inpcb to detached tailq
599 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
600 * tcp_hpts_insert() sets slot to a meaningful value
601 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
602 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
603 */
604 inp->inp_hptsslot = -1;
605 }
606 HPTS_UNLOCK(hpts);
607}
608
609bool
610tcp_in_hpts(struct inpcb *inp)
611{
612
613 return (inp->inp_in_hpts == IHPTS_ONQUEUE);
614}
615
616static inline int
617hpts_slot(uint32_t wheel_slot, uint32_t plus)
618{
619 /*
620 * Given a slot on the wheel, what slot
621 * is that plus ticks out?
622 */
623 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
624 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
625}
626
627static inline int
629{
630 /*
631 * Given a timestamp in ticks (so by
632 * default to get it to a real time one
633 * would multiply by 10.. i.e the number
634 * of ticks in a slot) map it to our limited
635 * space wheel.
636 */
637 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
638}
639
640static inline int
641hpts_slots_diff(int prev_slot, int slot_now)
642{
643 /*
644 * Given two slots that are someplace
645 * on our wheel. How far are they apart?
646 */
647 if (slot_now > prev_slot)
648 return (slot_now - prev_slot);
649 else if (slot_now == prev_slot)
650 /*
651 * Special case, same means we can go all of our
652 * wheel less one slot.
653 */
654 return (NUM_OF_HPTSI_SLOTS - 1);
655 else
656 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
657}
658
659/*
660 * Given a slot on the wheel that is the current time
661 * mapped to the wheel (wheel_slot), what is the maximum
662 * distance forward that can be obtained without
663 * wrapping past either prev_slot or running_slot
664 * depending on the htps state? Also if passed
665 * a uint32_t *, fill it with the slot location.
666 *
667 * Note if you do not give this function the current
668 * time (that you think it is) mapped to the wheel slot
669 * then the results will not be what you expect and
670 * could lead to invalid inserts.
671 */
672static inline int32_t
673max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
674{
675 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
676
677 if ((hpts->p_hpts_active == 1) &&
678 (hpts->p_wheel_complete == 0)) {
679 end_slot = hpts->p_runningslot;
680 /* Back up one tick */
681 if (end_slot == 0)
682 end_slot = NUM_OF_HPTSI_SLOTS - 1;
683 else
684 end_slot--;
685 if (target_slot)
686 *target_slot = end_slot;
687 } else {
688 /*
689 * For the case where we are
690 * not active, or we have
691 * completed the pass over
692 * the wheel, we can use the
693 * prev tick and subtract one from it. This puts us
694 * as far out as possible on the wheel.
695 */
696 end_slot = hpts->p_prev_slot;
697 if (end_slot == 0)
698 end_slot = NUM_OF_HPTSI_SLOTS - 1;
699 else
700 end_slot--;
701 if (target_slot)
702 *target_slot = end_slot;
703 /*
704 * Now we have close to the full wheel left minus the
705 * time it has been since the pacer went to sleep. Note
706 * that wheel_tick, passed in, should be the current time
707 * from the perspective of the caller, mapped to the wheel.
708 */
709 if (hpts->p_prev_slot != wheel_slot)
710 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
711 else
712 dis_to_travel = 1;
713 /*
714 * dis_to_travel in this case is the space from when the
715 * pacer stopped (p_prev_slot) and where our wheel_slot
716 * is now. To know how many slots we can put it in we
717 * subtract from the wheel size. We would not want
718 * to place something after p_prev_slot or it will
719 * get ran too soon.
720 */
721 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
722 }
723 /*
724 * So how many slots are open between p_runningslot -> p_cur_slot
725 * that is what is currently un-available for insertion. Special
726 * case when we are at the last slot, this gets 1, so that
727 * the answer to how many slots are available is all but 1.
728 */
729 if (hpts->p_runningslot == hpts->p_cur_slot)
730 dis_to_travel = 1;
731 else
732 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
733 /*
734 * How long has the pacer been running?
735 */
736 if (hpts->p_cur_slot != wheel_slot) {
737 /* The pacer is a bit late */
738 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
739 } else {
740 /* The pacer is right on time, now == pacers start time */
741 pacer_to_now = 0;
742 }
743 /*
744 * To get the number left we can insert into we simply
745 * subract the distance the pacer has to run from how
746 * many slots there are.
747 */
748 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
749 /*
750 * Now how many of those we will eat due to the pacer's
751 * time (p_cur_slot) of start being behind the
752 * real time (wheel_slot)?
753 */
754 if (avail_on_wheel <= pacer_to_now) {
755 /*
756 * Wheel wrap, we can't fit on the wheel, that
757 * is unusual the system must be way overloaded!
758 * Insert into the assured slot, and return special
759 * "0".
760 */
761 counter_u64_add(combined_wheel_wrap, 1);
762 *target_slot = hpts->p_nxt_slot;
763 return (0);
764 } else {
765 /*
766 * We know how many slots are open
767 * on the wheel (the reverse of what
768 * is left to run. Take away the time
769 * the pacer started to now (wheel_slot)
770 * and that tells you how many slots are
771 * open that can be inserted into that won't
772 * be touched by the pacer until later.
773 */
774 return (avail_on_wheel - pacer_to_now);
775 }
776}
777
778
779#ifdef INVARIANTS
780static void
781check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
782{
783 /*
784 * Sanity checks for the pacer with invariants
785 * on insert.
786 */
788 ("hpts:%p inp:%p slot:%d > max",
789 hpts, inp, inp_hptsslot));
790 if ((hpts->p_hpts_active) &&
791 (hpts->p_wheel_complete == 0)) {
792 /*
793 * If the pacer is processing a arc
794 * of the wheel, we need to make
795 * sure we are not inserting within
796 * that arc.
797 */
798 int distance, yet_to_run;
799
800 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
801 if (hpts->p_runningslot != hpts->p_cur_slot)
802 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
803 else
804 yet_to_run = 0; /* processing last slot */
805 KASSERT(yet_to_run <= distance,
806 ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
807 hpts, inp, inp_hptsslot,
808 distance, yet_to_run,
809 hpts->p_runningslot, hpts->p_cur_slot));
810 }
811}
812#endif
813
815tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
816{
817 struct tcp_hpts_entry *hpts;
818 struct timeval tv;
819 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
820 int32_t wheel_slot, maxslots;
821 int cpu;
822 bool need_wakeup = false;
823
824 INP_WLOCK_ASSERT(inp);
825 MPASS(!tcp_in_hpts(inp));
826 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
827
828 /*
829 * We now return the next-slot the hpts will be on, beyond its
830 * current run (if up) or where it was when it stopped if it is
831 * sleeping.
832 */
833 hpts = tcp_hpts_lock(inp);
834 microuptime(&tv);
835 if (diag) {
836 memset(diag, 0, sizeof(struct hpts_diag));
837 diag->p_hpts_active = hpts->p_hpts_active;
838 diag->p_prev_slot = hpts->p_prev_slot;
839 diag->p_runningslot = hpts->p_runningslot;
840 diag->p_nxt_slot = hpts->p_nxt_slot;
841 diag->p_cur_slot = hpts->p_cur_slot;
842 diag->p_curtick = hpts->p_curtick;
843 diag->p_lasttick = hpts->p_lasttick;
844 diag->slot_req = slot;
845 diag->p_on_min_sleep = hpts->p_on_min_sleep;
847 }
848 if (slot == 0) {
849 /* Ok we need to set it on the hpts in the current slot */
850 inp->inp_hpts_request = 0;
851 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
852 /*
853 * A sleeping hpts we want in next slot to run
854 * note that in this state p_prev_slot == p_cur_slot
855 */
856 inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
857 if ((hpts->p_on_min_sleep == 0) &&
858 (hpts->p_hpts_active == 0))
859 need_wakeup = true;
860 } else
861 inp->inp_hptsslot = hpts->p_runningslot;
862 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
863 inp_hpts_insert(inp, hpts);
864 if (need_wakeup) {
865 /*
866 * Activate the hpts if it is sleeping and its
867 * timeout is not 1.
868 */
869 hpts->p_direct_wake = 1;
870 tcp_wakehpts(hpts);
871 }
872 slot_on = hpts->p_nxt_slot;
873 HPTS_UNLOCK(hpts);
874
875 return (slot_on);
876 }
877 /* Get the current time relative to the wheel */
878 wheel_cts = tcp_tv_to_hptstick(&tv);
879 /* Map it onto the wheel */
880 wheel_slot = tick_to_wheel(wheel_cts);
881 /* Now what's the max we can place it at? */
882 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
883 if (diag) {
884 diag->wheel_slot = wheel_slot;
885 diag->maxslots = maxslots;
886 diag->wheel_cts = wheel_cts;
887 }
888 if (maxslots == 0) {
889 /* The pacer is in a wheel wrap behind, yikes! */
890 if (slot > 1) {
891 /*
892 * Reduce by 1 to prevent a forever loop in
893 * case something else is wrong. Note this
894 * probably does not hurt because the pacer
895 * if its true is so far behind we will be
896 * > 1second late calling anyway.
897 */
898 slot--;
899 }
900 inp->inp_hptsslot = last_slot;
901 inp->inp_hpts_request = slot;
902 } else if (maxslots >= slot) {
903 /* It all fits on the wheel */
904 inp->inp_hpts_request = 0;
905 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
906 } else {
907 /* It does not fit */
908 inp->inp_hpts_request = slot - maxslots;
909 inp->inp_hptsslot = last_slot;
910 }
911 if (diag) {
912 diag->slot_remaining = inp->inp_hpts_request;
913 diag->inp_hptsslot = inp->inp_hptsslot;
914 }
915#ifdef INVARIANTS
916 check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
917#endif
918 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
919 inp_hpts_insert(inp, hpts);
920 if ((hpts->p_hpts_active == 0) &&
921 (inp->inp_hpts_request == 0) &&
922 (hpts->p_on_min_sleep == 0)) {
923 /*
924 * The hpts is sleeping and NOT on a minimum
925 * sleep time, we need to figure out where
926 * it will wake up at and if we need to reschedule
927 * its time-out.
928 */
929 uint32_t have_slept, yet_to_sleep;
930
931 /* Now do we need to restart the hpts's timer? */
932 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
933 if (have_slept < hpts->p_hpts_sleep_time)
934 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
935 else {
936 /* We are over-due */
937 yet_to_sleep = 0;
938 need_wakeup = 1;
939 }
940 if (diag) {
941 diag->have_slept = have_slept;
942 diag->yet_to_sleep = yet_to_sleep;
943 }
944 if (yet_to_sleep &&
945 (yet_to_sleep > slot)) {
946 /*
947 * We need to reschedule the hpts's time-out.
948 */
949 hpts->p_hpts_sleep_time = slot;
950 need_new_to = slot * HPTS_TICKS_PER_SLOT;
951 }
952 }
953 /*
954 * Now how far is the hpts sleeping to? if active is 1, its
955 * up and ticking we do nothing, otherwise we may need to
956 * reschedule its callout if need_new_to is set from above.
957 */
958 if (need_wakeup) {
959 hpts->p_direct_wake = 1;
960 tcp_wakehpts(hpts);
961 if (diag) {
962 diag->need_new_to = 0;
963 diag->co_ret = 0xffff0000;
964 }
965 } else if (need_new_to) {
966 int32_t co_ret;
967 struct timeval tv;
968 sbintime_t sb;
969
970 tv.tv_sec = 0;
971 tv.tv_usec = 0;
972 while (need_new_to > HPTS_USEC_IN_SEC) {
973 tv.tv_sec++;
974 need_new_to -= HPTS_USEC_IN_SEC;
975 }
976 tv.tv_usec = need_new_to;
977 sb = tvtosbt(tv);
978 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
979 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
980 hpts_timeout_swi, hpts, cpu,
981 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
982 if (diag) {
983 diag->need_new_to = need_new_to;
984 diag->co_ret = co_ret;
985 }
986 }
987 slot_on = hpts->p_nxt_slot;
988 HPTS_UNLOCK(hpts);
989
990 return (slot_on);
991}
992
995 /*
996 * No flow type set distribute the load randomly.
997 */
998 uint16_t cpuid;
999 uint32_t ran;
1000
1001 /*
1002 * Shortcut if it is already set. XXXGL: does it happen?
1003 */
1004 if (inp->inp_hpts_cpu_set) {
1005 return (inp->inp_hpts_cpu);
1006 }
1007 /* Nothing set use a random number */
1008 ran = arc4random();
1009 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
1010 return (cpuid);
1011}
1012
1013static uint16_t
1014hpts_cpuid(struct inpcb *inp, int *failed)
1015{
1016 u_int cpuid;
1017#ifdef NUMA
1018 struct hpts_domain_info *di;
1019#endif
1020
1021 *failed = 0;
1022 if (inp->inp_hpts_cpu_set) {
1023 return (inp->inp_hpts_cpu);
1024 }
1025 /*
1026 * If we are using the irq cpu set by LRO or
1027 * the driver then it overrides all other domains.
1028 */
1029 if (tcp_use_irq_cpu) {
1030 if (inp->inp_irq_cpu_set == 0) {
1031 *failed = 1;
1032 return(0);
1033 }
1034 return(inp->inp_irq_cpu);
1035 }
1036 /* If one is set the other must be the same */
1037#ifdef RSS
1038 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1039 if (cpuid == NETISR_CPUID_NONE)
1040 return (hpts_random_cpu(inp));
1041 else
1042 return (cpuid);
1043#endif
1044 /*
1045 * We don't have a flowid -> cpuid mapping, so cheat and just map
1046 * unknown cpuids to curcpu. Not the best, but apparently better
1047 * than defaulting to swi 0.
1048 */
1049 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1050 counter_u64_add(cpu_uses_random, 1);
1051 return (hpts_random_cpu(inp));
1052 }
1053 /*
1054 * Hash to a thread based on the flowid. If we are using numa,
1055 * then restrict the hash to the numa domain where the inp lives.
1056 */
1057#ifdef NUMA
1058 if (tcp_bind_threads == 2 && inp->inp_numa_domain != M_NODOM) {
1059 di = &hpts_domains[inp->inp_numa_domain];
1060 cpuid = di->cpu[inp->inp_flowid % di->count];
1061 } else
1062#endif
1063 cpuid = inp->inp_flowid % mp_ncpus;
1064 counter_u64_add(cpu_uses_flowid, 1);
1065 return (cpuid);
1066}
1067
1068static void
1070{
1071 struct mbuf *m, *n;
1072
1073 m = tp->t_in_pkt;
1074 if (m)
1075 n = m->m_nextpkt;
1076 else
1077 n = NULL;
1078 tp->t_in_pkt = NULL;
1079 while (m) {
1080 m_freem(m);
1081 m = n;
1082 if (m)
1083 n = m->m_nextpkt;
1084 }
1085}
1086
1087static void
1088tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1089{
1090 uint32_t t = 0, i, fnd = 0;
1091
1092 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1093 /*
1094 * Find next slot that is occupied and use that to
1095 * be the sleep time.
1096 */
1097 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1098 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1099 fnd = 1;
1100 break;
1101 }
1102 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1103 }
1104 KASSERT(fnd != 0, ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1105 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1106 } else {
1107 /* No one on the wheel sleep for all but 400 slots or sleep max */
1109 }
1110}
1111
1112static int32_t
1113tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1114{
1115 struct tcpcb *tp;
1116 struct inpcb *inp;
1117 struct timeval tv;
1118 uint64_t total_slots_processed = 0;
1119 int32_t slots_to_run, i, error;
1120 int32_t paced_cnt = 0;
1121 int32_t loop_cnt = 0;
1122 int32_t did_prefetch = 0;
1123 int32_t prefetch_ninp = 0;
1124 int32_t prefetch_tp = 0;
1125 int32_t wrap_loop_cnt = 0;
1126 int32_t slot_pos_of_endpoint = 0;
1127 int32_t orig_exit_slot;
1128 int8_t completed_measure = 0, seen_endpoint = 0;
1129
1130 HPTS_MTX_ASSERT(hpts);
1131 NET_EPOCH_ASSERT();
1132 /* record previous info for any logging */
1133 hpts->saved_lasttick = hpts->p_lasttick;
1134 hpts->saved_curtick = hpts->p_curtick;
1135 hpts->saved_curslot = hpts->p_cur_slot;
1136 hpts->saved_prev_slot = hpts->p_prev_slot;
1137
1138 hpts->p_lasttick = hpts->p_curtick;
1139 hpts->p_curtick = tcp_gethptstick(&tv);
1141 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1142 if ((hpts->p_on_queue_cnt == 0) ||
1143 (hpts->p_lasttick == hpts->p_curtick)) {
1144 /*
1145 * No time has yet passed,
1146 * or nothing to do.
1147 */
1148 hpts->p_prev_slot = hpts->p_cur_slot;
1149 hpts->p_lasttick = hpts->p_curtick;
1150 goto no_run;
1151 }
1152again:
1153 hpts->p_wheel_complete = 0;
1154 HPTS_MTX_ASSERT(hpts);
1155 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1156 if (((hpts->p_curtick - hpts->p_lasttick) >
1158 (hpts->p_on_queue_cnt != 0)) {
1159 /*
1160 * Wheel wrap is occuring, basically we
1161 * are behind and the distance between
1162 * run's has spread so much it has exceeded
1163 * the time on the wheel (1.024 seconds). This
1164 * is ugly and should NOT be happening. We
1165 * need to run the entire wheel. We last processed
1166 * p_prev_slot, so that needs to be the last slot
1167 * we run. The next slot after that should be our
1168 * reserved first slot for new, and then starts
1169 * the running postion. Now the problem is the
1170 * reserved "not to yet" place does not exist
1171 * and there may be inp's in there that need
1172 * running. We can merge those into the
1173 * first slot at the head.
1174 */
1175 wrap_loop_cnt++;
1176 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1177 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1178 /*
1179 * Adjust p_cur_slot to be where we are starting from
1180 * hopefully we will catch up (fat chance if something
1181 * is broken this bad :( )
1182 */
1183 hpts->p_cur_slot = hpts->p_prev_slot;
1184 /*
1185 * The next slot has guys to run too, and that would
1186 * be where we would normally start, lets move them into
1187 * the next slot (p_prev_slot + 2) so that we will
1188 * run them, the extra 10usecs of late (by being
1189 * put behind) does not really matter in this situation.
1190 */
1191 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1192 inp_hpts) {
1193 MPASS(inp->inp_hptsslot == hpts->p_nxt_slot);
1194 MPASS(inp->inp_hpts_gencnt ==
1195 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1196 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1197
1198 /*
1199 * Update gencnt and nextslot accordingly to match
1200 * the new location. This is safe since it takes both
1201 * the INP lock and the pacer mutex to change the
1202 * inp_hptsslot and inp_hpts_gencnt.
1203 */
1204 inp->inp_hpts_gencnt =
1205 hpts->p_hptss[hpts->p_runningslot].gencnt;
1206 inp->inp_hptsslot = hpts->p_runningslot;
1207 }
1208 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1209 &hpts->p_hptss[hpts->p_nxt_slot].head, inp_hpts);
1210 hpts->p_hptss[hpts->p_runningslot].count +=
1211 hpts->p_hptss[hpts->p_nxt_slot].count;
1212 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1213 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1214 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1215 counter_u64_add(wheel_wrap, 1);
1216 } else {
1217 /*
1218 * Nxt slot is always one after p_runningslot though
1219 * its not used usually unless we are doing wheel wrap.
1220 */
1221 hpts->p_nxt_slot = hpts->p_prev_slot;
1222 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1223 }
1224 if (hpts->p_on_queue_cnt == 0) {
1225 goto no_one;
1226 }
1227 for (i = 0; i < slots_to_run; i++) {
1228 struct inpcb *inp, *ninp;
1229 TAILQ_HEAD(, inpcb) head = TAILQ_HEAD_INITIALIZER(head);
1230 struct hptsh *hptsh;
1231 uint32_t runningslot;
1232
1233 /*
1234 * Calculate our delay, if there are no extra ticks there
1235 * was not any (i.e. if slots_to_run == 1, no delay).
1236 */
1237 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1239
1240 runningslot = hpts->p_runningslot;
1241 hptsh = &hpts->p_hptss[runningslot];
1242 TAILQ_SWAP(&head, &hptsh->head, inpcb, inp_hpts);
1243 hpts->p_on_queue_cnt -= hptsh->count;
1244 hptsh->count = 0;
1245 hptsh->gencnt++;
1246
1247 HPTS_UNLOCK(hpts);
1248
1249 TAILQ_FOREACH_SAFE(inp, &head, inp_hpts, ninp) {
1250 bool set_cpu;
1251
1252 if (ninp != NULL) {
1253 /* We prefetch the next inp if possible */
1254 kern_prefetch(ninp, &prefetch_ninp);
1255 prefetch_ninp = 1;
1256 }
1257
1258 /* For debugging */
1259 if (seen_endpoint == 0) {
1260 seen_endpoint = 1;
1261 orig_exit_slot = slot_pos_of_endpoint =
1262 runningslot;
1263 } else if (completed_measure == 0) {
1264 /* Record the new position */
1265 orig_exit_slot = runningslot;
1266 }
1267 total_slots_processed++;
1268 paced_cnt++;
1269
1270 INP_WLOCK(inp);
1271 if (inp->inp_hpts_cpu_set == 0) {
1272 set_cpu = true;
1273 } else {
1274 set_cpu = false;
1275 }
1276
1277 if (__predict_false(inp->inp_in_hpts == IHPTS_MOVING)) {
1278 if (inp->inp_hptsslot == -1) {
1279 inp->inp_in_hpts = IHPTS_NONE;
1280 if (in_pcbrele_wlocked(inp) == false)
1281 INP_WUNLOCK(inp);
1282 } else {
1283 HPTS_LOCK(hpts);
1284 inp_hpts_insert(inp, hpts);
1285 HPTS_UNLOCK(hpts);
1286 INP_WUNLOCK(inp);
1287 }
1288 continue;
1289 }
1290
1291 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1292 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
1293 KASSERT(runningslot == inp->inp_hptsslot,
1294 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1295 hpts, inp, runningslot, inp->inp_hptsslot));
1296
1297 if (inp->inp_hpts_request) {
1298 /*
1299 * This guy is deferred out further in time
1300 * then our wheel had available on it.
1301 * Push him back on the wheel or run it
1302 * depending.
1303 */
1304 uint32_t maxslots, last_slot, remaining_slots;
1305
1306 remaining_slots = slots_to_run - (i + 1);
1307 if (inp->inp_hpts_request > remaining_slots) {
1308 HPTS_LOCK(hpts);
1309 /*
1310 * How far out can we go?
1311 */
1312 maxslots = max_slots_available(hpts,
1313 hpts->p_cur_slot, &last_slot);
1314 if (maxslots >= inp->inp_hpts_request) {
1315 /* We can place it finally to
1316 * be processed. */
1317 inp->inp_hptsslot = hpts_slot(
1318 hpts->p_runningslot,
1319 inp->inp_hpts_request);
1320 inp->inp_hpts_request = 0;
1321 } else {
1322 /* Work off some more time */
1323 inp->inp_hptsslot = last_slot;
1324 inp->inp_hpts_request -=
1325 maxslots;
1326 }
1327 inp_hpts_insert(inp, hpts);
1328 HPTS_UNLOCK(hpts);
1329 INP_WUNLOCK(inp);
1330 continue;
1331 }
1332 inp->inp_hpts_request = 0;
1333 /* Fall through we will so do it now */
1334 }
1335
1336 inp_hpts_release(inp);
1337 tp = intotcpcb(inp);
1338 MPASS(tp);
1339 if (set_cpu) {
1340 /*
1341 * Setup so the next time we will move to
1342 * the right CPU. This should be a rare
1343 * event. It will sometimes happens when we
1344 * are the client side (usually not the
1345 * server). Somehow tcp_output() gets called
1346 * before the tcp_do_segment() sets the
1347 * intial state. This means the r_cpu and
1348 * r_hpts_cpu is 0. We get on the hpts, and
1349 * then tcp_input() gets called setting up
1350 * the r_cpu to the correct value. The hpts
1351 * goes off and sees the mis-match. We
1352 * simply correct it here and the CPU will
1353 * switch to the new hpts nextime the tcb
1354 * gets added to the the hpts (not this one)
1355 * :-)
1356 */
1357 tcp_set_hpts(inp);
1358 }
1359 CURVNET_SET(inp->inp_vnet);
1360 /* Lets do any logging that we might want to */
1362 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1363 }
1364
1365 if (tp->t_fb_ptr != NULL) {
1366 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1367 did_prefetch = 1;
1368 }
1369 if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && tp->t_in_pkt) {
1370 error = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1371 if (error) {
1372 /* The input killed the connection */
1373 goto skip_pacing;
1374 }
1375 }
1376 inp->inp_hpts_calls = 1;
1377 error = tcp_output(tp);
1378 if (error < 0)
1379 goto skip_pacing;
1380 inp->inp_hpts_calls = 0;
1381 if (ninp && ninp->inp_ppcb) {
1382 /*
1383 * If we have a nxt inp, see if we can
1384 * prefetch its ppcb. Note this may seem
1385 * "risky" since we have no locks (other
1386 * than the previous inp) and there no
1387 * assurance that ninp was not pulled while
1388 * we were processing inp and freed. If this
1389 * occured it could mean that either:
1390 *
1391 * a) Its NULL (which is fine we won't go
1392 * here) <or> b) Its valid (which is cool we
1393 * will prefetch it) <or> c) The inp got
1394 * freed back to the slab which was
1395 * reallocated. Then the piece of memory was
1396 * re-used and something else (not an
1397 * address) is in inp_ppcb. If that occurs
1398 * we don't crash, but take a TLB shootdown
1399 * performance hit (same as if it was NULL
1400 * and we tried to pre-fetch it).
1401 *
1402 * Considering that the likelyhood of <c> is
1403 * quite rare we will take a risk on doing
1404 * this. If performance drops after testing
1405 * we can always take this out. NB: the
1406 * kern_prefetch on amd64 actually has
1407 * protection against a bad address now via
1408 * the DMAP_() tests. This will prevent the
1409 * TLB hit, and instead if <c> occurs just
1410 * cause us to load cache with a useless
1411 * address (to us).
1412 */
1413 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1414 prefetch_tp = 1;
1415 }
1416 INP_WUNLOCK(inp);
1417 skip_pacing:
1418 CURVNET_RESTORE();
1419 }
1420 if (seen_endpoint) {
1421 /*
1422 * We now have a accurate distance between
1423 * slot_pos_of_endpoint <-> orig_exit_slot
1424 * to tell us how late we were, orig_exit_slot
1425 * is where we calculated the end of our cycle to
1426 * be when we first entered.
1427 */
1428 completed_measure = 1;
1429 }
1430 HPTS_LOCK(hpts);
1431 hpts->p_runningslot++;
1432 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1433 hpts->p_runningslot = 0;
1434 }
1435 }
1436no_one:
1437 HPTS_MTX_ASSERT(hpts);
1438 hpts->p_delayed_by = 0;
1439 /*
1440 * Check to see if we took an excess amount of time and need to run
1441 * more ticks (if we did not hit eno-bufs).
1442 */
1443 hpts->p_prev_slot = hpts->p_cur_slot;
1444 hpts->p_lasttick = hpts->p_curtick;
1445 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1446 /*
1447 * Something is serious slow we have
1448 * looped through processing the wheel
1449 * and by the time we cleared the
1450 * needs to run max_pacer_loops time
1451 * we still needed to run. That means
1452 * the system is hopelessly behind and
1453 * can never catch up :(
1454 *
1455 * We will just lie to this thread
1456 * and let it thing p_curtick is
1457 * correct. When it next awakens
1458 * it will find itself further behind.
1459 */
1460 if (from_callout)
1461 counter_u64_add(hpts_hopelessly_behind, 1);
1462 goto no_run;
1463 }
1464 hpts->p_curtick = tcp_gethptstick(&tv);
1465 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1466 if (seen_endpoint == 0) {
1467 /* We saw no endpoint but we may be looping */
1468 orig_exit_slot = hpts->p_cur_slot;
1469 }
1470 if ((wrap_loop_cnt < 2) &&
1471 (hpts->p_lasttick != hpts->p_curtick)) {
1472 counter_u64_add(hpts_loops, 1);
1473 loop_cnt++;
1474 goto again;
1475 }
1476no_run:
1478 /*
1479 * Set flag to tell that we are done for
1480 * any slot input that happens during
1481 * input.
1482 */
1483 hpts->p_wheel_complete = 1;
1484 /*
1485 * Now did we spend too long running input and need to run more ticks?
1486 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1487 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1488 * is greater than 2, then the condtion most likely are *not* true.
1489 * Also if we are called not from the callout, we don't run the wheel
1490 * multiple times so the slots may not align either.
1491 */
1492 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1493 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1494 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1495 hpts->p_prev_slot, hpts->p_cur_slot));
1496 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1497 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1498 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1499 hpts->p_lasttick, hpts->p_curtick));
1500 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1501 hpts->p_curtick = tcp_gethptstick(&tv);
1502 counter_u64_add(hpts_loops, 1);
1503 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1504 goto again;
1505 }
1506
1507 if (from_callout){
1508 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1509 }
1510 if (seen_endpoint)
1511 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1512 else
1513 return (0);
1514}
1515
1516void
1517__tcp_set_hpts(struct inpcb *inp, int32_t line)
1518{
1519 struct tcp_hpts_entry *hpts;
1520 int failed;
1521
1522 INP_WLOCK_ASSERT(inp);
1523 hpts = tcp_hpts_lock(inp);
1524 if ((inp->inp_in_hpts == 0) &&
1525 (inp->inp_hpts_cpu_set == 0)) {
1526 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1527 if (failed == 0)
1528 inp->inp_hpts_cpu_set = 1;
1529 }
1530 mtx_unlock(&hpts->p_mtx);
1531}
1532
1533static void
1535{
1536 int ticks_ran;
1537
1538 if (hpts->p_hpts_active) {
1539 /* Already active */
1540 return;
1541 }
1542 if (mtx_trylock(&hpts->p_mtx) == 0) {
1543 /* Someone else got the lock */
1544 return;
1545 }
1546 if (hpts->p_hpts_active)
1547 goto out_with_mtx;
1548 hpts->syscall_cnt++;
1549 counter_u64_add(hpts_direct_call, 1);
1550 hpts->p_hpts_active = 1;
1551 ticks_ran = tcp_hptsi(hpts, 0);
1552 /* We may want to adjust the sleep values here */
1553 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1554 if (ticks_ran > ticks_indicate_less_sleep) {
1555 struct timeval tv;
1556 sbintime_t sb;
1557 int cpu;
1558
1559 hpts->p_mysleep.tv_usec /= 2;
1560 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1561 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1562 /* Reschedule with new to value */
1563 tcp_hpts_set_max_sleep(hpts, 0);
1564 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1565 /* Validate its in the right ranges */
1566 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1567 hpts->overidden_sleep = tv.tv_usec;
1568 tv.tv_usec = hpts->p_mysleep.tv_usec;
1569 } else if (tv.tv_usec > dynamic_max_sleep) {
1570 /* Lets not let sleep get above this value */
1571 hpts->overidden_sleep = tv.tv_usec;
1572 tv.tv_usec = dynamic_max_sleep;
1573 }
1574 /*
1575 * In this mode the timer is a backstop to
1576 * all the userret/lro_flushes so we use
1577 * the dynamic value and set the on_min_sleep
1578 * flag so we will not be awoken.
1579 */
1580 sb = tvtosbt(tv);
1581 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
1582 /* Store off to make visible the actual sleep time */
1583 hpts->sleeping = tv.tv_usec;
1584 callout_reset_sbt_on(&hpts->co, sb, 0,
1585 hpts_timeout_swi, hpts, cpu,
1586 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1587 } else if (ticks_ran < ticks_indicate_more_sleep) {
1588 /* For the further sleep, don't reschedule hpts */
1589 hpts->p_mysleep.tv_usec *= 2;
1590 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1591 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1592 }
1593 hpts->p_on_min_sleep = 1;
1594 }
1595 hpts->p_hpts_active = 0;
1596out_with_mtx:
1597 HPTS_MTX_ASSERT(hpts);
1598 mtx_unlock(&hpts->p_mtx);
1599}
1600
1601static struct tcp_hpts_entry *
1603{
1604 int i, oldest_idx;
1605 uint32_t cts, time_since_ran, calc;
1606
1607 if ((hpts_uses_oldest == 0) ||
1608 ((hpts_uses_oldest > 1) &&
1610 /*
1611 * We have either disabled the feature (0), or
1612 * we have crossed over the oldest threshold on the
1613 * last hpts. We use the last one for simplification
1614 * since we don't want to use the first one (it may
1615 * have starting connections that have not settled
1616 * on the cpu yet).
1617 */
1618 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1619 }
1620 /* Lets find the oldest hpts to attempt to run */
1621 cts = tcp_get_usecs(NULL);
1622 time_since_ran = 0;
1623 oldest_idx = -1;
1624 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1625 if (TSTMP_GT(cts, cts_last_ran[i]))
1626 calc = cts - cts_last_ran[i];
1627 else
1628 calc = 0;
1629 if (calc > time_since_ran) {
1630 oldest_idx = i;
1631 time_since_ran = calc;
1632 }
1633 }
1634 if (oldest_idx >= 0)
1635 return(tcp_pace.rp_ent[oldest_idx]);
1636 else
1637 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1638}
1639
1640
1641void
1643{
1644 static struct tcp_hpts_entry *hpts;
1645 struct epoch_tracker et;
1646
1647 NET_EPOCH_ENTER(et);
1648 hpts = tcp_choose_hpts_to_run();
1649 __tcp_run_hpts(hpts);
1650 NET_EPOCH_EXIT(et);
1651}
1652
1653
1654static void
1656{
1657 struct tcp_hpts_entry *hpts;
1658 struct epoch_tracker et;
1659 struct timeval tv;
1660 sbintime_t sb;
1661 int cpu, ticks_ran;
1662
1663 hpts = (struct tcp_hpts_entry *)ctx;
1664 mtx_lock(&hpts->p_mtx);
1665 if (hpts->p_direct_wake) {
1666 /* Signaled by input or output with low occupancy count. */
1667 callout_stop(&hpts->co);
1668 counter_u64_add(hpts_direct_awakening, 1);
1669 } else {
1670 /* Timed out, the normal case. */
1671 counter_u64_add(hpts_wake_timeout, 1);
1672 if (callout_pending(&hpts->co) ||
1673 !callout_active(&hpts->co)) {
1674 mtx_unlock(&hpts->p_mtx);
1675 return;
1676 }
1677 }
1678 callout_deactivate(&hpts->co);
1679 hpts->p_hpts_wake_scheduled = 0;
1680 NET_EPOCH_ENTER(et);
1681 if (hpts->p_hpts_active) {
1682 /*
1683 * We are active already. This means that a syscall
1684 * trap or LRO is running in behalf of hpts. In that case
1685 * we need to double our timeout since there seems to be
1686 * enough activity in the system that we don't need to
1687 * run as often (if we were not directly woken).
1688 */
1689 if (hpts->p_direct_wake == 0) {
1690 counter_u64_add(hpts_back_tosleep, 1);
1691 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1692 hpts->p_mysleep.tv_usec *= 2;
1693 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1694 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1695 tv.tv_usec = hpts->p_mysleep.tv_usec;
1696 hpts->p_on_min_sleep = 1;
1697 } else {
1698 /*
1699 * Here we have low count on the wheel, but
1700 * somehow we still collided with one of the
1701 * connections. Lets go back to sleep for a
1702 * min sleep time, but clear the flag so we
1703 * can be awoken by insert.
1704 */
1705 hpts->p_on_min_sleep = 0;
1706 tv.tv_usec = tcp_min_hptsi_time;
1707 }
1708 } else {
1709 /*
1710 * Directly woken most likely to reset the
1711 * callout time.
1712 */
1713 tv.tv_sec = 0;
1714 tv.tv_usec = hpts->p_mysleep.tv_usec;
1715 }
1716 goto back_to_sleep;
1717 }
1718 hpts->sleeping = 0;
1719 hpts->p_hpts_active = 1;
1720 ticks_ran = tcp_hptsi(hpts, 1);
1721 tv.tv_sec = 0;
1722 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1723 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1724 if(hpts->p_direct_wake == 0) {
1725 /*
1726 * Only adjust sleep time if we were
1727 * called from the callout i.e. direct_wake == 0.
1728 */
1729 if (ticks_ran < ticks_indicate_more_sleep) {
1730 hpts->p_mysleep.tv_usec *= 2;
1731 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1732 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1733 } else if (ticks_ran > ticks_indicate_less_sleep) {
1734 hpts->p_mysleep.tv_usec /= 2;
1735 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1736 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1737 }
1738 }
1739 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1740 hpts->overidden_sleep = tv.tv_usec;
1741 tv.tv_usec = hpts->p_mysleep.tv_usec;
1742 } else if (tv.tv_usec > dynamic_max_sleep) {
1743 /* Lets not let sleep get above this value */
1744 hpts->overidden_sleep = tv.tv_usec;
1745 tv.tv_usec = dynamic_max_sleep;
1746 }
1747 /*
1748 * In this mode the timer is a backstop to
1749 * all the userret/lro_flushes so we use
1750 * the dynamic value and set the on_min_sleep
1751 * flag so we will not be awoken.
1752 */
1753 hpts->p_on_min_sleep = 1;
1754 } else if (hpts->p_on_queue_cnt == 0) {
1755 /*
1756 * No one on the wheel, please wake us up
1757 * if you insert on the wheel.
1758 */
1759 hpts->p_on_min_sleep = 0;
1760 hpts->overidden_sleep = 0;
1761 } else {
1762 /*
1763 * We hit here when we have a low number of
1764 * clients on the wheel (our else clause).
1765 * We may need to go on min sleep, if we set
1766 * the flag we will not be awoken if someone
1767 * is inserted ahead of us. Clearing the flag
1768 * means we can be awoken. This is "old mode"
1769 * where the timer is what runs hpts mainly.
1770 */
1771 if (tv.tv_usec < tcp_min_hptsi_time) {
1772 /*
1773 * Yes on min sleep, which means
1774 * we cannot be awoken.
1775 */
1776 hpts->overidden_sleep = tv.tv_usec;
1777 tv.tv_usec = tcp_min_hptsi_time;
1778 hpts->p_on_min_sleep = 1;
1779 } else {
1780 /* Clear the min sleep flag */
1781 hpts->overidden_sleep = 0;
1782 hpts->p_on_min_sleep = 0;
1783 }
1784 }
1785 HPTS_MTX_ASSERT(hpts);
1786 hpts->p_hpts_active = 0;
1787back_to_sleep:
1788 hpts->p_direct_wake = 0;
1789 sb = tvtosbt(tv);
1790 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
1791 /* Store off to make visible the actual sleep time */
1792 hpts->sleeping = tv.tv_usec;
1793 callout_reset_sbt_on(&hpts->co, sb, 0,
1794 hpts_timeout_swi, hpts, cpu,
1795 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1796 NET_EPOCH_EXIT(et);
1797 mtx_unlock(&hpts->p_mtx);
1798}
1799
1800#undef timersub
1801
1802static void
1804{
1805 int32_t i, j, error, bound = 0, created = 0;
1806 size_t sz, asz;
1807 struct timeval tv;
1808 sbintime_t sb;
1809 struct tcp_hpts_entry *hpts;
1810 struct pcpu *pc;
1811 cpuset_t cs;
1812 char unit[16];
1813 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1814 int count, domain, cpu;
1815
1816 tcp_pace.rp_num_hptss = ncpus;
1817 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1818 hpts_loops = counter_u64_alloc(M_WAITOK);
1819 back_tosleep = counter_u64_alloc(M_WAITOK);
1820 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1821 wheel_wrap = counter_u64_alloc(M_WAITOK);
1822 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1823 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1824 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1825 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1826 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1827 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1828
1829
1830 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1831 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1832 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1833 cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1834 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1835 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1836 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1837 M_TCPHPTS, M_WAITOK | M_ZERO);
1838 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1839 M_TCPHPTS, M_WAITOK);
1840 hpts = tcp_pace.rp_ent[i];
1841 /*
1842 * Init all the hpts structures that are not specifically
1843 * zero'd by the allocations. Also lets attach them to the
1844 * appropriate sysctl block as well.
1845 */
1846 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1847 "hpts", MTX_DEF | MTX_DUPOK);
1848 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1849 TAILQ_INIT(&hpts->p_hptss[j].head);
1850 hpts->p_hptss[j].count = 0;
1851 hpts->p_hptss[j].gencnt = 0;
1852 }
1853 sysctl_ctx_init(&hpts->hpts_ctx);
1854 sprintf(unit, "%d", i);
1855 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1856 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1857 OID_AUTO,
1858 unit,
1859 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1860 "");
1861 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1862 SYSCTL_CHILDREN(hpts->hpts_root),
1863 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1864 &hpts->p_on_queue_cnt, 0,
1865 "Count TCB's awaiting output processing");
1866 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1867 SYSCTL_CHILDREN(hpts->hpts_root),
1868 OID_AUTO, "active", CTLFLAG_RD,
1869 &hpts->p_hpts_active, 0,
1870 "Is the hpts active");
1871 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1872 SYSCTL_CHILDREN(hpts->hpts_root),
1873 OID_AUTO, "curslot", CTLFLAG_RD,
1874 &hpts->p_cur_slot, 0,
1875 "What the current running pacers goal");
1876 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1877 SYSCTL_CHILDREN(hpts->hpts_root),
1878 OID_AUTO, "runtick", CTLFLAG_RD,
1879 &hpts->p_runningslot, 0,
1880 "What the running pacers current slot is");
1881 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1882 SYSCTL_CHILDREN(hpts->hpts_root),
1883 OID_AUTO, "curtick", CTLFLAG_RD,
1884 &hpts->p_curtick, 0,
1885 "What the running pacers last tick mapped to the wheel was");
1886 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1887 SYSCTL_CHILDREN(hpts->hpts_root),
1888 OID_AUTO, "lastran", CTLFLAG_RD,
1889 &cts_last_ran[i], 0,
1890 "The last usec tick that this hpts ran");
1891 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1892 SYSCTL_CHILDREN(hpts->hpts_root),
1893 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1894 &hpts->p_mysleep.tv_usec,
1895 "What the running pacers is using for p_mysleep.tv_usec");
1896 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1897 SYSCTL_CHILDREN(hpts->hpts_root),
1898 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1899 &hpts->sleeping, 0,
1900 "What the running pacers is actually sleeping for");
1901 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1902 SYSCTL_CHILDREN(hpts->hpts_root),
1903 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1904 &hpts->syscall_cnt, 0,
1905 "How many times we had syscalls on this hpts");
1906
1908 hpts->p_num = i;
1909 hpts->p_curtick = tcp_gethptstick(&tv);
1911 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1912 hpts->p_cpu = 0xffff;
1913 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1914 callout_init(&hpts->co, 1);
1915 }
1916
1917 /* Don't try to bind to NUMA domains if we don't have any */
1918 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1919 tcp_bind_threads = 0;
1920
1921 /*
1922 * Now lets start ithreads to handle the hptss.
1923 */
1924 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1925 hpts = tcp_pace.rp_ent[i];
1926 hpts->p_cpu = i;
1927 error = swi_add(&hpts->ie, "hpts",
1928 tcp_hpts_thread, (void *)hpts,
1929 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1930 KASSERT(error == 0,
1931 ("Can't add hpts:%p i:%d err:%d",
1932 hpts, i, error));
1933 created++;
1934 hpts->p_mysleep.tv_sec = 0;
1935 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1936 if (tcp_bind_threads == 1) {
1937 if (intr_event_bind(hpts->ie, i) == 0)
1938 bound++;
1939 } else if (tcp_bind_threads == 2) {
1940 pc = pcpu_find(i);
1941 domain = pc->pc_domain;
1942 CPU_COPY(&cpuset_domain[domain], &cs);
1943 if (intr_event_bind_ithread_cpuset(hpts->ie, &cs)
1944 == 0) {
1945 bound++;
1946 count = hpts_domains[domain].count;
1947 hpts_domains[domain].cpu[count] = i;
1948 hpts_domains[domain].count++;
1949 }
1950 }
1951 tv.tv_sec = 0;
1952 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1953 hpts->sleeping = tv.tv_usec;
1954 sb = tvtosbt(tv);
1955 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
1956 callout_reset_sbt_on(&hpts->co, sb, 0,
1957 hpts_timeout_swi, hpts, cpu,
1958 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1959 }
1960 /*
1961 * If we somehow have an empty domain, fall back to choosing
1962 * among all htps threads.
1963 */
1964 for (i = 0; i < vm_ndomains; i++) {
1965 if (hpts_domains[i].count == 0) {
1966 tcp_bind_threads = 0;
1967 break;
1968 }
1969 }
1970 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
1971 created, bound,
1972 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
1973#ifdef INVARIANTS
1974 printf("HPTS is in INVARIANT mode!!\n");
1975#endif
1976}
1977
1978SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1979MODULE_VERSION(tcphpts, 1);
static TAILQ_HEAD(handler_chain, proto_handler)
Definition: alias_mod.c:57
__uint32_t uint32_t
Definition: in.h:62
__uint16_t uint16_t
Definition: in.h:57
__uint8_t uint8_t
Definition: in.h:52
void in_pcbref(struct inpcb *inp)
Definition: in_pcb.c:1762
bool in_pcbrele_wlocked(struct inpcb *inp)
Definition: in_pcb.c:1792
#define INP_LOCK_ASSERT(inp)
Definition: in_pcb.h:527
#define INP_WLOCK(inp)
Definition: in_pcb.h:518
#define INP_WLOCK_ASSERT(inp)
Definition: in_pcb.h:529
#define INP_SUPPORTS_MBUFQ
Definition: in_pcb.h:677
#define INP_TIMEWAIT
Definition: in_pcb.h:644
#define INP_DROPPED
Definition: in_pcb.h:646
#define INP_WUNLOCK(inp)
Definition: in_pcb.h:522
u_int16_t count
Definition: ip_fw.h:18
uint32_t p_hpts_active
Definition: tcp_hpts.h:40
uint32_t slot_req
Definition: tcp_hpts.h:45
uint32_t p_nxt_slot
Definition: tcp_hpts.h:41
uint32_t yet_to_sleep
Definition: tcp_hpts.h:50
uint32_t p_prev_slot
Definition: tcp_hpts.h:43
uint32_t wheel_cts
Definition: tcp_hpts.h:54
uint8_t p_on_min_sleep
Definition: tcp_hpts.h:58
uint32_t p_curtick
Definition: tcp_hpts.h:56
uint32_t slot_remaining
Definition: tcp_hpts.h:47
int32_t co_ret
Definition: tcp_hpts.h:55
uint32_t wheel_slot
Definition: tcp_hpts.h:52
uint32_t inp_hptsslot
Definition: tcp_hpts.h:46
uint32_t p_lasttick
Definition: tcp_hpts.h:57
uint32_t p_runningslot
Definition: tcp_hpts.h:44
uint32_t hpts_sleep_time
Definition: tcp_hpts.h:49
uint32_t maxslots
Definition: tcp_hpts.h:53
uint32_t need_new_to
Definition: tcp_hpts.h:51
uint32_t have_slept
Definition: tcp_hpts.h:48
uint32_t p_cur_slot
Definition: tcp_hpts.h:42
int cpu[MAXCPU]
Definition: tcp_hpts.c:279
Definition: tcp_hpts.c:21
Definition: in_pcb.h:217
struct socket * inp_socket
Definition: in_pcb.h:254
uint32_t inp_hpts_gencnt
Definition: in_pcb.h:226
volatile uint16_t inp_hpts_cpu
Definition: in_pcb.h:243
uint8_t inp_irq_cpu_set
Definition: in_pcb.h:250
uint32_t inp_in_hpts
Definition: in_pcb.h:241
uint8_t inp_hpts_calls
Definition: in_pcb.h:249
int inp_flags
Definition: in_pcb.h:246
int inp_flags2
Definition: in_pcb.h:247
uint32_t inp_flowtype
Definition: in_pcb.h:266
void * inp_ppcb
Definition: in_pcb.h:253
uint32_t inp_hpts_request
Definition: in_pcb.h:227
uint8_t inp_numa_domain
Definition: in_pcb.h:252
uint8_t inp_hpts_cpu_set
Definition: in_pcb.h:248
int32_t inp_hptsslot
Definition: in_pcb.h:255
uint32_t inp_flowid
Definition: in_pcb.h:264
volatile uint16_t inp_irq_cpu
Definition: in_pcb.h:244
int(* tfb_do_queued_segments)(struct socket *, struct tcpcb *, int)
Definition: tcp_var.h:354
Definition: tcp_hpts.c:206
Definition: tcp_hpts.c:184
uint32_t overidden_sleep
Definition: tcp_hpts.c:213
struct intr_event * ie
Definition: tcp_hpts.c:222
uint32_t p_curtick
Definition: tcp_hpts.c:192
uint32_t saved_curslot
Definition: tcp_hpts.c:216
uint64_t sleeping
Definition: tcp_hpts.c:189
uint32_t p_delayed_by
Definition: tcp_hpts.c:218
struct timeval p_mysleep
Definition: tcp_hpts.c:187
uint32_t saved_lasttick
Definition: tcp_hpts.c:214
uint32_t saved_prev_slot
Definition: tcp_hpts.c:217
struct tcp_hpts_entry::hptsh * p_hptss
uint8_t p_fill[3]
Definition: tcp_hpts.c:204
struct callout co __aligned(CACHE_LINE_SIZE)
uint64_t syscall_cnt
Definition: tcp_hpts.c:188
uint32_t p_nxt_slot
Definition: tcp_hpts.c:196
uint8_t p_hpts_wake_scheduled
Definition: tcp_hpts.c:202
struct sysctl_ctx_list hpts_ctx
Definition: tcp_hpts.c:220
uint32_t p_cur_slot
Definition: tcp_hpts.c:195
uint8_t p_direct_wake
Definition: tcp_hpts.c:200
void * ie_cookie
Definition: tcp_hpts.c:223
uint8_t p_avail
Definition: tcp_hpts.c:203
uint16_t p_cpu
Definition: tcp_hpts.c:225
int32_t p_on_queue_cnt
Definition: tcp_hpts.c:198
uint16_t p_num
Definition: tcp_hpts.c:224
uint32_t p_hpts_sleep_time
Definition: tcp_hpts.c:211
uint32_t p_prev_slot
Definition: tcp_hpts.c:194
uint16_t p_hpts_active
Definition: tcp_hpts.c:190
struct sysctl_oid * hpts_root
Definition: tcp_hpts.c:221
uint8_t p_on_min_sleep
Definition: tcp_hpts.c:201
struct mtx p_mtx
Definition: tcp_hpts.c:186
uint32_t p_lasttick
Definition: tcp_hpts.c:199
uint32_t p_runningslot
Definition: tcp_hpts.c:193
uint32_t saved_curtick
Definition: tcp_hpts.c:215
uint8_t p_wheel_complete
Definition: tcp_hpts.c:191
uint32_t * cts_last_ran
Definition: tcp_hpts.c:233
uint32_t rp_num_hptss
Definition: tcp_hpts.c:234
struct tcp_hpts_entry ** rp_ent
Definition: tcp_hpts.c:232
uint32_t lost
Definition: tcp_log_buf.h:91
uint32_t inflight
Definition: tcp_log_buf.h:78
uint8_t use_lt_bw
Definition: tcp_log_buf.h:99
uint32_t flex3
Definition: tcp_log_buf.h:87
uint32_t timeStamp
Definition: tcp_log_buf.h:81
uint32_t flex5
Definition: tcp_log_buf.h:89
uint32_t lt_epoch
Definition: tcp_log_buf.h:83
uint16_t pacing_gain
Definition: tcp_log_buf.h:92
uint32_t flex2
Definition: tcp_log_buf.h:86
uint32_t pkt_epoch
Definition: tcp_log_buf.h:101
uint32_t flex4
Definition: tcp_log_buf.h:88
uint32_t flex1
Definition: tcp_log_buf.h:85
uint32_t applimited
Definition: tcp_log_buf.h:79
uint16_t flex7
Definition: tcp_log_buf.h:94
uint32_t flex6
Definition: tcp_log_buf.h:90
uint32_t pkts_out
Definition: tcp_log_buf.h:84
uint32_t delivered
Definition: tcp_log_buf.h:80
uint8_t flex8
Definition: tcp_log_buf.h:100
uint32_t epoch
Definition: tcp_log_buf.h:82
Definition: tcp_var.h:132
uint32_t t_logstate
Definition: tcp_var.h:138
struct mbuf * t_in_pkt
Definition: tcp_var.h:181
void * t_fb_ptr
Definition: tcp_var.h:136
struct tcp_function_block * t_fb
Definition: tcp_var.h:135
struct inpcb * t_inpcb
Definition: tcp_var.h:134
static void __tcp_run_hpts(struct tcp_hpts_entry *hpts)
Definition: tcp_hpts.c:1534
void tcp_hpts_remove(struct inpcb *inp)
Definition: tcp_hpts.c:563
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD, &hpts_hopelessly_behind, "Number of times hpts could not catch up and was behind hopelessly")
#define HPTS_LOCK(hpts)
Definition: tcp_hpts.c:182
static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
Definition: tcp_hpts.c:1113
counter_u64_t hpts_back_tosleep
Definition: tcp_hpts.c:328
static uint32_t hpts_sleep_max
Definition: tcp_hpts.c:382
#define HPTS_UNLOCK(hpts)
Definition: tcp_hpts.c:183
counter_u64_t cpu_uses_flowid
Definition: tcp_hpts.c:333
counter_u64_t hpts_direct_awakening
Definition: tcp_hpts.c:323
MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts")
counter_u64_t hpts_direct_call
Definition: tcp_hpts.c:314
int32_t tcp_min_hptsi_time
Definition: tcp_hpts.c:253
#define NUM_OF_HPTSI_SLOTS
Definition: tcp_hpts.c:178
TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads)
@ IHPTS_ONQUEUE
Definition: tcp_hpts.c:284
@ IHPTS_MOVING
Definition: tcp_hpts.c:285
@ IHPTS_NONE
Definition: tcp_hpts.c:283
void tcp_run_hpts(void)
Definition: tcp_hpts.c:1642
#define HPTS_MAX_SLEEP_ALLOWED
Definition: tcp_hpts.c:380
static void tcp_init_hptsi(void *st)
Definition: tcp_hpts.c:1803
static void tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
Definition: tcp_hpts.c:1088
static int32_t hpts_uses_oldest
Definition: tcp_hpts.c:247
static int32_t max_pacer_loops
Definition: tcp_hpts.c:375
MODULE_VERSION(tcphpts, 1)
static int tcp_hpts_no_wake_over_thresh
Definition: tcp_hpts.c:433
static struct tcp_hpts_entry * tcp_hpts_lock(struct inpcb *inp)
Definition: tcp_hpts.c:534
static uint32_t * cts_last_ran
Definition: tcp_hpts.c:244
counter_u64_t hpts_hopelessly_behind
Definition: tcp_hpts.c:288
static void tcp_wakehpts(struct tcp_hpts_entry *hpts)
Definition: tcp_hpts.c:484
uint32_t p_hpts_sleep_time
Definition: tcp_hpts.c:26
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD, &tcp_bind_threads, 2, "Thread Binding tunable")
static void tcp_drop_in_pkts(struct tcpcb *tp)
Definition: tcp_hpts.c:1069
static int ticks_indicate_more_sleep
Definition: tcp_hpts.c:431
void __tcp_set_hpts(struct inpcb *inp, int32_t line)
Definition: tcp_hpts.c:1517
SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL)
static int hpts_use_assigned_cpu
Definition: tcp_hpts.c:246
uint16_t hpts_random_cpu(struct inpcb *inp)
Definition: tcp_hpts.c:994
SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLTYPE_UINT|CTLFLAG_RW|CTLFLAG_NEEDGIANT, &hpts_sleep_max, 0, &sysctl_net_inet_tcp_hpts_max_sleep, "IU", "Maximum time hpts will sleep")
static int tcp_use_irq_cpu
Definition: tcp_hpts.c:243
static void tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv, int slots_to_run, int idx, int from_callout)
Definition: tcp_hpts.c:446
static struct tcp_hpts_entry * tcp_choose_hpts_to_run()
Definition: tcp_hpts.c:1602
__FBSDID("$FreeBSD$")
static int tick_to_wheel(uint32_t cts_in_wticks)
Definition: tcp_hpts.c:628
static struct hpts_domain_info hpts_domains[MAXMEMDOM]
static void inp_hpts_insert(struct inpcb *inp, struct tcp_hpts_entry *hpts)
Definition: tcp_hpts.c:508
static uint16_t hpts_cpuid(struct inpcb *inp, int *failed)
Definition: tcp_hpts.c:1014
counter_u64_t back_tosleep
Definition: tcp_hpts.c:299
#define HPTS_MTX_ASSERT(hpts)
Definition: tcp_hpts.c:181
bool tcp_in_hpts(struct inpcb *inp)
Definition: tcp_hpts.c:610
counter_u64_t hpts_wake_timeout
Definition: tcp_hpts.c:318
counter_u64_t cpu_uses_random
Definition: tcp_hpts.c:334
uint32_t tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
Definition: tcp_hpts.c:815
counter_u64_t combined_wheel_wrap
Definition: tcp_hpts.c:304
static int32_t dynamic_max_sleep
Definition: tcp_hpts.c:256
static int32_t max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
Definition: tcp_hpts.c:673
static int hpts_does_tp_logging
Definition: tcp_hpts.c:245
static int sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
Definition: tcp_hpts.c:403
static int conn_cnt_thresh
Definition: tcp_hpts.c:254
static int hpts_slots_diff(int prev_slot, int slot_now)
Definition: tcp_hpts.c:641
static struct tcp_hptsi tcp_pace
static int tcp_bind_threads
Definition: tcp_hpts.c:241
counter_u64_t hpts_loops
Definition: tcp_hpts.c:294
static void hpts_timeout_swi(void *arg)
Definition: tcp_hpts.c:499
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "TCP Hpts controls")
static void tcp_hpts_thread(void *ctx)
Definition: tcp_hpts.c:1655
static void inp_hpts_release(struct inpcb *inp)
Definition: tcp_hpts.c:547
static int sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
Definition: tcp_hpts.c:385
counter_u64_t wheel_wrap
Definition: tcp_hpts.c:309
static int32_t tcp_hpts_precision
Definition: tcp_hpts.c:275
static int32_t dynamic_min_sleep
Definition: tcp_hpts.c:255
static int hpts_slot(uint32_t wheel_slot, uint32_t plus)
Definition: tcp_hpts.c:617
static int ticks_indicate_less_sleep
Definition: tcp_hpts.c:432
struct tcp_hpts_entry __aligned(CACHE_LINE_SIZE)
#define DEFAULT_MIN_SLEEP
Definition: tcp_hpts.h:86
#define HPTS_USEC_IN_SEC
Definition: tcp_hpts.h:35
#define TICKS_INDICATE_MORE_SLEEP
Definition: tcp_hpts.h:92
#define HPTS_TICKS_PER_SLOT
Definition: tcp_hpts.h:32
#define DYNAMIC_MIN_SLEEP
Definition: tcp_hpts.h:87
#define DEFAULT_CONNECTION_THESHOLD
Definition: tcp_hpts.h:71
static __inline uint32_t tcp_tv_to_usectick(const struct timeval *sv)
Definition: tcp_hpts.h:168
#define tcp_set_hpts(a)
Definition: tcp_hpts.h:145
#define OLDEST_THRESHOLD
Definition: tcp_hpts.h:90
#define DYNAMIC_MAX_SLEEP
Definition: tcp_hpts.h:88
static __inline uint32_t tcp_gethptstick(struct timeval *sv)
Definition: tcp_hpts.h:187
#define TICKS_INDICATE_LESS_SLEEP
Definition: tcp_hpts.h:93
#define LOWEST_SLEEP_ALLOWED
Definition: tcp_hpts.h:85
static __inline uint32_t tcp_get_usecs(struct timeval *tv)
Definition: tcp_hpts.h:198
static __inline uint32_t tcp_tv_to_hptstick(const struct timeval *sv)
Definition: tcp_hpts.h:162
@ TCP_LOG_STATE_OFF
Definition: tcp_log_buf.h:244
#define TCP_LOG_EVENTP(tp, th, rxbuf, txbuf, eventid, errornum, len, stackinfo, th_hostorder, tv)
Definition: tcp_log_buf.h:346
@ BBR_LOG_HPTSDIAG
Definition: tcp_log_buf.h:210
#define TSTMP_GT(a, b)
Definition: tcp_seq.h:60
#define intotcpcb(ip)
Definition: tcp_var.h:645
struct tcp_log_bbr u_bbr
Definition: tcp_log_buf.h:108