42#include "opt_callout_profiling.h"
49#include <sys/callout.h>
50#include <sys/domainset.h>
52#include <sys/interrupt.h>
53#include <sys/kernel.h>
55#include <sys/kthread.h>
57#include <sys/malloc.h>
60#include <sys/random.h>
63#include <sys/sleepqueue.h>
64#include <sys/sysctl.h>
66#include <sys/unistd.h>
70#include <ddb/db_sym.h>
71#include <machine/_inttypes.h>
75#include <machine/cpu.h>
86#ifdef CALLOUT_PROFILING
88SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
89 "Average number of items examined per softclock call. Units = 1/1000");
91SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
92 "Average number of Giant callouts made per softclock call. Units = 1/1000");
93static int avg_lockcalls;
94SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
95 "Average number of lock callouts made per softclock call. Units = 1/1000");
96static int avg_mpcalls;
97SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
98 "Average number of MP callouts made per softclock call. Units = 1/1000");
99static int avg_depth_dir;
100SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
101 "Average number of direct callouts examined per callout_process call. "
103static int avg_lockcalls_dir;
104SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
105 &avg_lockcalls_dir, 0,
"Average number of lock direct callouts made per "
106 "callout_process call. Units = 1/1000");
107static int avg_mpcalls_dir;
108SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
109 0,
"Average number of MP direct callouts made per callout_process call. "
115 "Number of entries in callwheel and size of timeout() preallocation");
126 0,
"Pin the default (non-per-cpu) swi (shared with PCPU 0 swi)");
128 0,
"Pin the per-CPU swis (except PCPU 0, which is also default)");
153 callout_func_t *ce_migration_func;
154 void *ce_migration_arg;
155 sbintime_t ce_migration_time;
156 sbintime_t ce_migration_prec;
157 int ce_migration_cpu;
179 char cc_ktr_event_name[20];
183#define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION)
185#define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
186#define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func
187#define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg
188#define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain
189#define cc_exec_next(cc) cc->cc_next
190#define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
191#define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
193#define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func
194#define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg
195#define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu
196#define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time
197#define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec
200#define CPUBLOCK MAXCPU
201#define CC_CPU(cpu) (&cc_cpu[(cpu)])
202#define CC_SELF() CC_CPU(PCPU_GET(cpuid))
205#define CC_CPU(cpu) (&cc_cpu)
206#define CC_SELF() (&cc_cpu)
208#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
209#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
210#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
216#ifdef CALLOUT_PROFILING
217 int *mpcalls,
int *lockcalls,
int *gcalls,
251 cc_migration_cpu(cc, direct) = CPUBLOCK;
252 cc_migration_time(cc, direct) = 0;
253 cc_migration_prec(cc, direct) = 0;
254 cc_migration_func(cc, direct) = NULL;
255 cc_migration_arg(cc, direct) = NULL;
267 return (cc_migration_cpu(cc, direct) != CPUBLOCK);
290 TUNABLE_INT_FETCH(
"kern.ncallout", &
ncallout);
325 mtx_init(&cc->
cc_lock,
"callout", NULL, MTX_SPIN);
329 DOMAINSET_PREF(
pcpu_find(cpu)->pc_domain), M_WAITOK);
334 for (i = 0; i < 2; i++)
337 snprintf(cc->cc_ktr_event_name,
sizeof(cc->cc_ktr_event_name),
338 "callwheel cpu %d", cpu);
349callout_cpu_switch(
struct callout *c,
struct callout_cpu *cc,
int new_cpu)
353 MPASS(c != NULL && cc != NULL);
388 RFSTOPPED, 0,
"clock",
"clock (%d)", cpu);
390 panic(
"failed to create softclock thread for cpu %d: %d",
407 printf(
"%s: %s clock couldn't be pinned to cpu %d: %d\n",
409 "default" :
"per-cpu", cpu, error);
415#define CC_HASH_SHIFT 8
434 struct callout_entropy {
439 struct callout *tmp, *tmpn;
441 struct callout_list *sc;
443 sbintime_t first, last, lookahead, max, tmp_max;
444 u_int firstb, lastb, nowb;
445#ifdef CALLOUT_PROFILING
446 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
450 mtx_lock_spin_flags(&cc->
cc_lock, MTX_QUIET);
459 lookahead = (SBT_1S / 16);
460 else if (nowb - firstb == 1)
461 lookahead = (SBT_1S / 8);
465 first += (lookahead / 2);
484 tmp = LIST_FIRST(sc);
485 while (tmp != NULL) {
487 if (tmp->c_time <= now) {
492 if (tmp->c_iflags & CALLOUT_DIRECT) {
493#ifdef CALLOUT_PROFILING
497 LIST_NEXT(tmp, c_links.le);
499 LIST_REMOVE(tmp, c_links.le);
501#ifdef CALLOUT_PROFILING
502 &mpcalls_dir, &lockcalls_dir, NULL,
508 tmpn = LIST_NEXT(tmp, c_links.le);
509 LIST_REMOVE(tmp, c_links.le);
512 tmp->c_iflags |= CALLOUT_PROCESSED;
518 if (tmp->c_time >= max)
524 if (tmp->c_time > last) {
529 if (tmp->c_time < first)
531 tmp_max = tmp->c_time + tmp->c_precision;
535 tmp = LIST_NEXT(tmp, c_links.le);
544 }
while (((
int)(firstb - lastb)) <= 0);
548#ifdef CALLOUT_PROFILING
549 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
550 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
551 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
555 entropy.td = curthread;
557 random_harvest_queue(&entropy,
sizeof(entropy), RANDOM_CALLOUT);
560 if (TD_AWAITING_INTR(td)) {
562 THREAD_LOCK_ASSERT(td, MA_OWNED);
566 mtx_unlock_spin_flags(&cc->
cc_lock, MTX_QUIET);
568 mtx_unlock_spin_flags(&cc->
cc_lock, MTX_QUIET);
580 if (cpu == CPUBLOCK) {
581 while (c->c_cpu == CPUBLOCK)
597 sbintime_t sbt, sbintime_t precision,
void (*func)(
void *),
598 void *arg,
int cpu,
int flags)
606 c->c_iflags |= CALLOUT_PENDING;
607 c->c_iflags &= ~CALLOUT_PROCESSED;
608 c->c_flags |= CALLOUT_ACTIVE;
609 if (
flags & C_DIRECT_EXEC)
610 c->c_iflags |= CALLOUT_DIRECT;
613 c->c_precision = precision;
615 CTR3(KTR_CALLOUT,
"precision set for %p: %d.%08x",
616 c, (
int)(c->c_precision >> 32),
617 (u_int)(c->c_precision & 0xffffffff));
618 LIST_INSERT_HEAD(&cc->
cc_callwheel[bucket], c, c_links.le);
626 if (SBT_MAX - c->c_time < c->c_precision)
627 c->c_precision = SBT_MAX - c->c_time;
628 sbt = c->c_time + c->c_precision;
637#ifdef CALLOUT_PROFILING
638 int *mpcalls,
int *lockcalls,
int *gcalls,
642 struct rm_priotracker tracker;
643 callout_func_t *c_func, *drain;
645 struct lock_class *
class;
646 struct lock_object *c_lock;
647 uintptr_t lock_status;
651 callout_func_t *new_func;
654 sbintime_t new_prec, new_time;
656#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
657 sbintime_t sbt1, sbt2;
659 static sbintime_t maxdt = 2 * SBT_1MS;
660 static callout_func_t *lastfunc;
663 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
664 (
"softclock_call_cc: pend %p %x", c, c->c_iflags));
665 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
666 (
"softclock_call_cc: act %p %x", c, c->c_flags));
667 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
669 if (c->c_flags & CALLOUT_SHAREDLOCK) {
671 lock_status = (uintptr_t)&tracker;
678 c_iflags = c->c_iflags;
679 c->c_iflags &= ~CALLOUT_PENDING;
687 if (c_lock != NULL) {
688 class->lc_lock(c_lock, lock_status);
694 class->lc_unlock(c_lock);
699 if (c_lock == &
Giant.lock_object) {
700#ifdef CALLOUT_PROFILING
703 CTR3(KTR_CALLOUT,
"callout giant %p func %p arg %p",
706#ifdef CALLOUT_PROFILING
709 CTR3(KTR_CALLOUT,
"callout lock %p func %p arg %p",
713#ifdef CALLOUT_PROFILING
716 CTR3(KTR_CALLOUT,
"callout %p func %p arg %p",
719 KTR_STATE3(KTR_SCHED,
"callout", cc->cc_ktr_event_name,
"running",
720 "func:%p", c_func,
"arg:%p", c_arg,
"direct:%d", direct);
721#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
724 THREAD_NO_SLEEPING();
725 SDT_PROBE1(callout_execute, , , callout__start, c);
727 SDT_PROBE1(callout_execute, , , callout__end, c);
728 THREAD_SLEEPING_OK();
729#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
733 if (lastfunc != c_func || sbt2 > maxdt * 2) {
736 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
737 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
743 KTR_STATE0(KTR_SCHED,
"callout", cc->cc_ktr_event_name,
"idle");
744 CTR1(KTR_CALLOUT,
"callout %p finished", c);
745 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0)
746 class->lc_unlock(c_lock);
749 KASSERT(
cc_exec_curr(cc, direct) == c, (
"mishandled cc_curr"));
772 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
784 new_cpu = cc_migration_cpu(cc, direct);
785 new_time = cc_migration_time(cc, direct);
786 new_prec = cc_migration_prec(cc, direct);
787 new_func = cc_migration_func(cc, direct);
788 new_arg = cc_migration_arg(cc, direct);
799 "deferred cancelled %p func %p arg %p",
800 c, new_func, new_arg);
803 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
805 new_cc = callout_cpu_switch(c, cc, new_cpu);
806 flags = (direct) ? C_DIRECT_EXEC : 0;
808 new_arg, new_cpu,
flags);
812 panic(
"migration should not happen");
836 struct thread *td = curthread;
839#ifdef CALLOUT_PROFILING
840 int depth, gcalls, lockcalls, mpcalls;
860#ifdef CALLOUT_PROFILING
861 depth = gcalls = lockcalls = mpcalls = 0;
863 while ((c = TAILQ_FIRST(&cc->
cc_expireq)) != NULL) {
864 TAILQ_REMOVE(&cc->
cc_expireq, c, c_links.tqe);
866#ifdef CALLOUT_PROFILING
867 &mpcalls, &lockcalls, &gcalls,
870#ifdef CALLOUT_PROFILING
874#ifdef CALLOUT_PROFILING
875 avg_depth += (depth * 1000 - avg_depth) >> 8;
876 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
877 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
878 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
885 sbintime_t *
res, sbintime_t *prec_res)
887 sbintime_t to_sbt, to_pr;
889 if ((
flags & (C_ABSOLUTE | C_PRECALC)) != 0) {
891 *prec_res = precision;
904 to_sbt = DPCPU_GET(hardclocktime);
907 to_sbt = DPCPU_GET(hardclocktime);
910 if (cold && to_sbt == 0)
911 to_sbt = sbinuptime();
912 if ((
flags & C_HARDCLOCK) == 0)
915 to_sbt = sbinuptime();
916 if (SBT_MAX - to_sbt < sbt)
922 sbt >> C_PRELGET(
flags));
923 *prec_res = to_pr > precision ? to_pr : precision;
944 callout_func_t *ftn,
void *arg,
int cpu,
int flags)
946 sbintime_t to_sbt, precision;
948 int cancelled, direct;
954 }
else if ((cpu >= MAXCPU) ||
957 panic(
"Invalid CPU in callout %d", cpu);
966 if (
flags & C_DIRECT_EXEC) {
971 KASSERT(!direct || c->c_lock == NULL ||
972 (LOCK_CLASS(c->c_lock)->lc_flags & LC_SPINLOCK),
973 (
"%s: direct callout %p has non-spin lock", __func__, c));
995 CTR4(KTR_CALLOUT,
"%s %p func %p arg %p",
996 cancelled ?
"cancelled" :
"failed to cancel",
997 c, c->c_func, c->c_arg);
1010 cc_migration_cpu(cc, direct) = cpu;
1011 cc_migration_time(cc, direct) = to_sbt;
1012 cc_migration_prec(cc, direct) = precision;
1013 cc_migration_func(cc, direct) = ftn;
1014 cc_migration_arg(cc, direct) = arg;
1021 if (c->c_iflags & CALLOUT_PENDING) {
1022 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1025 LIST_REMOVE(c, c_links.le);
1027 TAILQ_REMOVE(&cc->
cc_expireq, c, c_links.tqe);
1030 c->c_iflags &= ~ CALLOUT_PENDING;
1031 c->c_flags &= ~ CALLOUT_ACTIVE;
1040 if (c->c_cpu != cpu) {
1058 cc_migration_cpu(cc, direct) = cpu;
1059 cc_migration_time(cc, direct) = to_sbt;
1060 cc_migration_prec(cc, direct) = precision;
1061 cc_migration_func(cc, direct) = ftn;
1062 cc_migration_arg(cc, direct) = arg;
1063 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING);
1064 c->c_flags |= CALLOUT_ACTIVE;
1066 "migration of %p func %p arg %p in %d.%08x to %u deferred",
1067 c, c->c_func, c->c_arg, (
int)(to_sbt >> 32),
1068 (u_int)(to_sbt & 0xffffffff), cpu);
1072 cc = callout_cpu_switch(c, cc, cpu);
1077 CTR6(KTR_CALLOUT,
"%sscheduled %p func %p arg %p in %d.%08x",
1078 cancelled ?
"re" :
"", c, c->c_func, c->c_arg, (
int)(to_sbt >> 32),
1079 (u_int)(to_sbt & 0xffffffff));
1091 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
1097 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
1104 struct lock_class *
class;
1105 int direct, sq_locked, use_lock;
1106 int cancelled, not_on_a_list;
1108 if ((
flags & CS_DRAIN) != 0)
1109 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
1110 "calling %s", __func__);
1112 KASSERT((
flags & CS_DRAIN) == 0 || drain == NULL,
1113 (
"Cannot set drain callback and CS_DRAIN flag at the same time"));
1119 if ((
flags & CS_DRAIN) == 0 && c->c_lock != NULL) {
1120 if (c->c_lock == &
Giant.lock_object)
1121 use_lock = mtx_owned(&
Giant);
1124 class = LOCK_CLASS(c->c_lock);
1125 class->lc_assert(c->c_lock, LA_XLOCKED);
1129 if (c->c_iflags & CALLOUT_DIRECT) {
1139 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) ==
1140 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) &&
1141 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) {
1154 c->c_iflags &= ~CALLOUT_PENDING;
1155 c->c_flags &= ~CALLOUT_ACTIVE;
1166 if (sq_locked != 0 && cc != old_cc) {
1174 panic(
"migration should not happen");
1188 if ((
flags & CS_DRAIN) == 0)
1189 c->c_flags &= ~CALLOUT_ACTIVE;
1191 if ((
flags & CS_DRAIN) != 0) {
1236 &cc->
cc_lock.lock_object,
"codrain",
1248 c->c_flags &= ~CALLOUT_ACTIVE;
1249 }
else if (use_lock &&
1262 CTR3(KTR_CALLOUT,
"cancelled %p func %p arg %p",
1263 c, c->c_func, c->c_arg);
1265 (
"callout wrongly scheduled for migration"));
1267 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1269 cc_migration_cpu(cc, direct) = CPUBLOCK;
1270 cc_migration_time(cc, direct) = 0;
1271 cc_migration_prec(cc, direct) = 0;
1272 cc_migration_func(cc, direct) = NULL;
1273 cc_migration_arg(cc, direct) = NULL;
1277 KASSERT(!sq_locked, (
"sleepqueue chain locked"));
1288 c->c_iflags &= ~CALLOUT_DFRMIGRATION;
1297 cc_migration_cpu(cc, direct) = CPUBLOCK;
1298 cc_migration_time(cc, direct) = 0;
1299 cc_migration_prec(cc, direct) = 0;
1300 cc_migration_func(cc, direct) = NULL;
1301 cc_migration_arg(cc, direct) = NULL;
1303 CTR3(KTR_CALLOUT,
"postponing stop %p func %p arg %p",
1304 c, c->c_func, c->c_arg);
1307 (
"callout drain function already set to %p",
1312 return ((
flags & CS_EXECUTING) != 0);
1314 CTR3(KTR_CALLOUT,
"failed to stop %p func %p arg %p",
1315 c, c->c_func, c->c_arg);
1318 (
"callout drain function already set to %p",
1323 KASSERT(!sq_locked, (
"sleepqueue chain still locked"));
1324 cancelled = ((
flags & CS_EXECUTING) != 0);
1331 if ((c->c_iflags & CALLOUT_PENDING) == 0) {
1332 CTR3(KTR_CALLOUT,
"failed to stop %p func %p arg %p",
1333 c, c->c_func, c->c_arg);
1344 c->c_iflags &= ~CALLOUT_PENDING;
1345 c->c_flags &= ~CALLOUT_ACTIVE;
1347 CTR3(KTR_CALLOUT,
"cancelled %p func %p arg %p",
1348 c, c->c_func, c->c_arg);
1349 if (not_on_a_list == 0) {
1350 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) {
1353 LIST_REMOVE(c, c_links.le);
1355 TAILQ_REMOVE(&cc->
cc_expireq, c, c_links.tqe);
1365 bzero(c,
sizeof *c);
1368 c->c_iflags = CALLOUT_RETURNUNLOCKED;
1370 c->c_lock = &
Giant.lock_object;
1379 bzero(c,
sizeof *c);
1381 KASSERT((
flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
1382 (
"callout_init_lock: bad flags %d",
flags));
1383 KASSERT(lock != NULL || (
flags & CALLOUT_RETURNUNLOCKED) == 0,
1384 (
"callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
1385 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE),
1386 (
"%s: callout %p has sleepable lock", __func__, c));
1387 c->c_iflags =
flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
1395 sbt += (uint64_t)sbt >> 1;
1396 if (
sizeof(
long) >=
sizeof(sbintime_t))
1399 return (flsl(((uint64_t)sbt) >> 32) + 32);
1409 struct callout *tmp;
1411 struct callout_list *sc;
1412 sbintime_t maxpr, maxt, medpr, medt, now, spr,
st, t;
1413 int ct[64], cpr[64], ccpbk[32];
1414 int error, val, i,
count, tcum, pcum, maxc, c, medc;
1419 if (error != 0 || req->newptr == NULL)
1422 st = spr = maxt = maxpr = 0;
1423 bzero(ccpbk,
sizeof(ccpbk));
1424 bzero(ct,
sizeof(ct));
1425 bzero(cpr,
sizeof(cpr));
1433 LIST_FOREACH(tmp, sc, c_links.le) {
1435 t = tmp->c_time - now;
1439 spr += tmp->c_precision / SBT_1US;
1442 if (tmp->c_precision > maxpr)
1443 maxpr = tmp->c_precision;
1445 cpr[
flssbt(tmp->c_precision)]++;
1449 ccpbk[fls(c + c / 2)]++;
1455 for (i = 0, tcum = 0; i < 64 && tcum <
count / 2; i++)
1457 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1458 for (i = 0, pcum = 0; i < 64 && pcum <
count / 2; i++)
1460 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
1461 for (i = 0, c = 0; i < 32 && c <
count / 2; i++)
1463 medc = (i >= 2) ? (1 << (i - 2)) : 0;
1465 printf(
"Scheduled callouts statistic snapshot:\n");
1466 printf(
" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
1468 printf(
" C/Bk: med %5d avg %6d.%06jd max %6d\n",
1473 printf(
" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1474 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
1476 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
1477 printf(
" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
1478 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
1479 (spr /
count) / 1000000, (spr /
count) % 1000000,
1480 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
1481 printf(
" Distribution: \tbuckets\t time\t tcum\t"
1483 for (i = 0, tcum = pcum = 0; i < 64; i++) {
1484 if (ct[i] == 0 && cpr[i] == 0)
1486 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
1489 printf(
" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
1490 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
1492 ct[i], tcum, cpr[i], pcum);
1497 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1499 "Dump immediate statistic snapshot of the scheduled callouts");
1503_show_callout(
struct callout *c)
1506 db_printf(
"callout %p\n", c);
1507#define C_DB_PRINTF(f, e) db_printf(" %s = " f "\n", #e, c->e);
1508 db_printf(
" &c_links = %p\n", &(c->c_links));
1509 C_DB_PRINTF(
"%" PRId64, c_time);
1510 C_DB_PRINTF(
"%" PRId64, c_precision);
1511 C_DB_PRINTF(
"%p", c_arg);
1512 C_DB_PRINTF(
"%p", c_func);
1513 C_DB_PRINTF(
"%p", c_lock);
1514 C_DB_PRINTF(
"%#x", c_flags);
1515 C_DB_PRINTF(
"%#x", c_iflags);
1516 C_DB_PRINTF(
"%d", c_cpu);
1520DB_SHOW_COMMAND(callout, db_show_callout)
1524 db_printf(
"usage: show callout <struct callout *>\n");
1528 _show_callout((
struct callout *)
addr);
1532_show_last_callout(
int cpu,
int direct,
const char *dirstr)
1540 db_printf(
"cpu %d last%s callout function: %p ", cpu, dirstr, func);
1541 db_printsym((db_expr_t)func, DB_STGY_ANY);
1542 db_printf(
"\ncpu %d last%s callout argument: %p\n", cpu, dirstr, arg);
1545DB_SHOW_COMMAND(callout_last, db_show_callout_last)
1551 db_printf(
"no such cpu: %d\n", (
int)
addr);
1560 while (cpu <= last) {
1561 if (!CPU_ABSENT(cpu)) {
1562 _show_last_callout(cpu, 0,
"");
1563 _show_last_callout(cpu, 1,
" direct");
static struct bt_table st
void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
int cpuset_setithread(lwpid_t id, int cpu)
int kproc_kthread_add(void(*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char *procname, const char *fmt,...)
void * malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, int flags)
void thread_lock_block_wait(struct thread *td)
void thread_lock_set(struct thread *td, struct mtx *new)
struct mtx __exclusive_cache_line Giant
struct lock_class lock_class_rm
void panic(const char *fmt,...)
void mi_switch(int flags)
void wakeup(const void *ident)
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
sbintime_t sbt_tickthreshold
int callout_schedule_on(struct callout *c, int to_ticks, int cpu)
int _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
static void start_softclock(void *dummy)
DPCPU_DECLARE(sbintime_t, hardclocktime)
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, int direct)
#define cc_exec_last_func(cc, dir)
static struct callout_cpu * callout_lock(struct callout *c)
#define cc_exec_curr(cc, dir)
static int flssbt(sbintime_t sbt)
int callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, callout_func_t *ftn, void *arg, int cpu, int flags)
SDT_PROVIDER_DEFINE(callout_execute)
#define cc_exec_waiting(cc, dir)
SDT_PROBE_DEFINE1(callout_execute,,, callout__start, "struct callout *")
void callout_process(sbintime_t now)
void callout_init(struct callout *c, int mpsafe)
#define CC_LOCK_ASSERT(cc)
static int sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures")
void _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
SYSCTL_PROC(_kern, OID_AUTO, callout_stat, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_callout_stat, "I", "Dump immediate statistic snapshot of the scheduled callouts")
static void callout_cpu_init(struct callout_cpu *cc, int cpu)
static void softclock_thread(void *arg)
void callout_when(sbintime_t sbt, sbintime_t precision, int flags, sbintime_t *res, sbintime_t *prec_res)
static u_int __read_mostly callwheelsize
#define cc_exec_cancel(cc, dir)
static u_int __read_mostly callwheelmask
SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN|CTLFLAG_NOFETCH, &ncallout, 0, "Number of entries in callwheel and size of timeout() preallocation")
static int pin_default_swi
#define cc_exec_drain(cc, dir)
static struct callout_cpu cc_cpu
static void callout_callwheel_init(void *dummy)
static int __read_mostly cc_default_cpu
int callout_schedule(struct callout *c, int to_ticks)
static void callout_cc_add(struct callout *c, struct callout_cpu *cc, sbintime_t sbt, sbintime_t precision, void(*func)(void *), void *arg, int cpu, int flags)
static int cc_cce_migrating(struct callout_cpu *cc, int direct)
SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL)
static u_int callout_get_bucket(sbintime_t sbt)
static u_int callout_hash(sbintime_t sbt)
static void cc_cce_cleanup(struct callout_cpu *cc, int direct)
#define callout_migrating(c)
#define cc_exec_last_arg(cc, dir)
void sched_class(struct thread *td, int class)
void sched_prio(struct thread *td, u_char prio)
void sched_add(struct thread *td, int flags)
struct callout_tailq cc_expireq
struct cc_exec cc_exec_entity[2]
struct mtx_padalign cc_lock
struct thread * cc_thread
struct callout_list * cc_callwheel
callout_func_t * cc_drain
static bool kasan_enabled __read_mostly
struct pcpu * pcpu_find(u_int cpuid)
int printf(const char *fmt,...)
int snprintf(char *str, size_t size, const char *format,...)
void sleepq_release(const void *wchan)
void sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue)
void sleepq_wait(const void *wchan, int pri)
void sleepq_lock(const void *wchan)