40#include "opt_hwpmc_hooks.h"
45#include <sys/cpuset.h>
46#include <sys/kernel.h>
49#include <sys/kthread.h>
52#include <sys/resourcevar.h>
56#include <sys/sysctl.h>
58#include <sys/turnstile.h>
59#include <sys/umtxvar.h>
60#include <machine/pcb.h>
61#include <machine/smp.h>
64#include <sys/pmckern.h>
68#include <sys/dtrace_bsd.h>
70dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
78 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
79 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
81#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus)
83#define INVERSE_ESTCPU_WEIGHT 8
87#define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
109#define TDF_DIDRUN TDF_SCHED0
110#define TDF_BOUND TDF_SCHED1
111#define TDF_SLICEEND TDF_SCHED2
114#define TSF_AFFINITY 0x0001
116#define SKE_RUNQ_PCPU(ts) \
117 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
119#define THREAD_CAN_SCHED(td, cpu) \
120 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
123 sizeof(
struct thread0_storage),
124 "increase struct thread0_storage.t0st_sched size");
142static int sched_pickcpu(
struct thread *td);
143static int forward_wakeup(
int cpunum);
144static void kick_other_cpu(
int pri,
int cpuid);
169static struct runq runq_pcpu[MAXCPU];
170long runq_length[MAXCPU];
172static cpuset_t idle_cpus_mask;
187 for (i = 0; i < MAXCPU; ++i)
197 int error, new_val, period;
202 if (error != 0 || req->newptr == NULL)
206 sched_slice = imax(1, (new_val + period / 2) / period);
212SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
218 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
220 "Quantum for timeshare threads in microseconds");
222 "Quantum for timeshare threads in stathz ticks");
225static SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup,
226 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
229static int runq_fuzz = 1;
230SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0,
"");
232static int forward_wakeup_enabled = 1;
233SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
234 &forward_wakeup_enabled, 0,
235 "Forwarding of wakeup to idle CPUs");
237static int forward_wakeups_requested = 0;
238SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
239 &forward_wakeups_requested, 0,
240 "Requests for Forwarding of wakeup to idle CPUs");
242static int forward_wakeups_delivered = 0;
243SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
244 &forward_wakeups_delivered, 0,
245 "Completed Forwarding of wakeup to idle CPUs");
247static int forward_wakeup_use_mask = 1;
248SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
249 &forward_wakeup_use_mask, 0,
250 "Use the mask of idle cpus");
252static int forward_wakeup_use_loop = 0;
253SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
254 &forward_wakeup_use_loop, 0,
255 "Use a loop to find idle cpus");
259static int sched_followon = 0;
260SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
262 "allow threads to share a quantum");
268 "struct proc *",
"uint8_t");
270 "struct proc *",
"void *");
272 "struct proc *",
"void *",
"int");
274 "struct proc *",
"uint8_t",
"struct thread *");
288 KTR_COUNTER0(KTR_SCHED,
"load",
"global load",
sched_tdcnt);
289 SDT_PROBE2(sched, , , load__change, NOCPU,
sched_tdcnt);
297 KTR_COUNTER0(KTR_SCHED,
"load",
"global load",
sched_tdcnt);
298 SDT_PROBE2(sched, , , load__change, NOCPU,
sched_tdcnt);
308 THREAD_LOCK_ASSERT(td, MA_OWNED);
309 if (td->td_priority < curthread->td_priority)
310 curthread->td_flags |= TDF_NEEDRESCHED;
349 THREAD_LOCK_ASSERT(td, MA_OWNED);
350 KASSERT((td->td_inhibitors == 0),
351 (
"maybe_preempt: trying to run inhibited thread"));
352 pri = td->td_priority;
353 cpri = ctd->td_priority;
354 if (KERNEL_PANICKED() || pri >= cpri ||
355 TD_IS_INHIBITED(ctd))
357#ifndef FULL_PREEMPTION
358 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
362 CTR0(KTR_PROC,
"maybe_preempt: scheduling preemption");
363 ctd->td_owepreempt = 1;
435#define loadfactor(loadav) (2 * (loadav))
436#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
439static fixpt_t
ccpu = 0.95122942450071400909 * FSCALE;
441 "Decay factor used for updating %CPU");
472 FOREACH_PROC_IN_SYSTEM(p) {
474 if (p->p_state == PRS_NEW) {
478 FOREACH_THREAD_IN_PROC(p, td) {
480 ts = td_get_sched(td);
491 if (TD_ON_RUNQ(td)) {
493 td->td_flags &= ~TDF_DIDRUN;
494 }
else if (TD_IS_RUNNING(td)) {
499 td->td_flags &= ~TDF_DIDRUN;
505 ts->ts_pctcpu = (
ts->ts_pctcpu *
ccpu) >> FSHIFT;
511 if (
ts->ts_cpticks != 0) {
512#if (FSHIFT >= CCPU_SHIFT)
514 ? ((fixpt_t)
ts->ts_cpticks) <<
516 100 * (((fixpt_t)
ts->ts_cpticks)
519 ts->ts_pctcpu += ((FSCALE -
ccpu) *
531 if (
ts->ts_slptime > 1) {
546 if (
ts->ts_slptime > 1) {
585 ts = td_get_sched(td);
587 if (
ts->ts_slptime > 5 * loadfac)
590 newcpu =
ts->ts_estcpu;
592 while (newcpu && --
ts->ts_slptime)
594 ts->ts_estcpu = newcpu;
608 if (td->td_pri_class != PRI_TIMESHARE)
610 newpriority = PUSER +
613 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
627 if (td->td_priority < PRI_MIN_TIMESHARE ||
628 td->td_priority > PRI_MAX_TIMESHARE)
678 mtx_init(&
sched_lock,
"sched lock", NULL, MTX_SPIN);
726 THREAD_LOCK_ASSERT(td, MA_OWNED);
727 ts = td_get_sched(td);
740 if (!TD_IS_IDLETHREAD(td) && --
ts->ts_slice <= 0) {
745 stat = DPCPU_PTR(idlestat);
754 for ( ; cnt > 0; cnt--)
765 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(td),
"proc exit",
766 "prio:%d", td->td_priority);
768 PROC_LOCK_ASSERT(p, MA_OWNED);
777 "prio:%d",
child->td_priority);
783 if ((
child->td_flags & TDF_NOLOAD) == 0)
785 thread_unlock(
child);
799 childtd->td_oncpu = NOCPU;
800 childtd->td_lastcpu = NOCPU;
802 childtd->td_cpuset =
cpuset_ref(td->td_cpuset);
803 childtd->td_domain.dr_policy = td->td_cpuset->cs_domain;
804 childtd->td_priority = childtd->td_base_pri;
805 ts = td_get_sched(childtd);
806 bzero(
ts,
sizeof(*
ts));
807 tsc = td_get_sched(td);
818 PROC_LOCK_ASSERT(p, MA_OWNED);
820 FOREACH_THREAD_IN_PROC(p, td) {
831 THREAD_LOCK_ASSERT(td, MA_OWNED);
832 td->td_pri_class =
class;
842 KTR_POINT3(KTR_SCHED,
"thread",
sched_tdname(td),
"priority change",
843 "prio:%d", td->td_priority,
"new prio:%d", prio, KTR_ATTR_LINKED,
845 SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
846 if (td != curthread && prio > td->td_priority) {
847 KTR_POINT3(KTR_SCHED,
"thread",
sched_tdname(curthread),
848 "lend prio",
"prio:%d", td->td_priority,
"new prio:%d",
850 SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
853 THREAD_LOCK_ASSERT(td, MA_OWNED);
854 if (td->td_priority == prio)
856 td->td_priority = prio;
857 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
871 td->td_flags |= TDF_BORROWING;
888 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
889 td->td_base_pri <= PRI_MAX_TIMESHARE)
890 base_pri = td->td_user_pri;
892 base_pri = td->td_base_pri;
893 if (prio >= base_pri) {
894 td->td_flags &= ~TDF_BORROWING;
906 td->td_base_pri = prio;
912 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
916 oldprio = td->td_priority;
923 if (TD_ON_LOCK(td) && oldprio != prio)
931 THREAD_LOCK_ASSERT(td, MA_OWNED);
932 td->td_base_user_pri = prio;
933 if (td->td_lend_user_pri <= prio)
935 td->td_user_pri = prio;
942 THREAD_LOCK_ASSERT(td, MA_OWNED);
943 td->td_lend_user_pri = prio;
944 td->td_user_pri = min(prio, td->td_base_user_pri);
945 if (td->td_priority > td->td_user_pri)
947 else if (td->td_priority != td->td_user_pri)
948 td->td_flags |= TDF_NEEDRESCHED;
958 if (td->td_lend_user_pri != prio)
960 if (td->td_user_pri != min(prio, td->td_base_user_pri))
962 if (td->td_priority != td->td_user_pri)
976 THREAD_LOCK_ASSERT(td, MA_OWNED);
977 td->td_slptick =
ticks;
978 td_get_sched(td)->ts_slptime = 0;
979 if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
981 if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
982 td->td_flags |= TDF_CANSWAP;
988 struct thread *newtd;
995 ts = td_get_sched(td);
998 THREAD_LOCK_ASSERT(td, MA_OWNED);
1000 td->td_lastcpu = td->td_oncpu;
1002 (
flags & SW_PREEMPT) != 0;
1004 td->td_owepreempt = 0;
1005 td->td_oncpu = NOCPU;
1013 if (td->td_flags & TDF_IDLETD) {
1016 CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
1019 if (TD_IS_RUNNING(td)) {
1022 SRQ_HOLDTD|SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1023 SRQ_HOLDTD|SRQ_OURSELF|SRQ_YIELDING);
1035 mtx_unlock_spin(tmtx);
1038 if ((td->td_flags & TDF_NOLOAD) == 0)
1044#if (KTR_COMPILE & KTR_SCHED) != 0
1045 if (TD_IS_IDLETHREAD(td))
1046 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(td),
"idle",
1047 "prio:%d", td->td_priority);
1049 KTR_STATE3(KTR_SCHED,
"thread",
sched_tdname(td), KTDSTATE(td),
1050 "prio:%d", td->td_priority,
"wmesg:\"%s\"", td->td_wmesg,
1051 "lockname:\"%s\"", td->td_lockname);
1056 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1057 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1060 SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
1063 lock_profile_release_lock(&
sched_lock.lock_object,
true);
1070 if (dtrace_vtime_active)
1071 (*dtrace_vtime_switch_func)(newtd);
1074 cpu_switch(td, newtd, tmtx);
1075 lock_profile_obtain_lock_success(&
sched_lock.lock_object,
true,
1076 0, 0, __FILE__, __LINE__);
1094 SDT_PROBE0(sched, , , on__cpu);
1096 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1097 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1101 SDT_PROBE0(sched, , , remain__cpu);
1104 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(td),
"running",
1105 "prio:%d", td->td_priority);
1108 if (td->td_flags & TDF_IDLETD)
1109 CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
1112 td->td_oncpu = PCPU_GET(cpuid);
1122 THREAD_LOCK_ASSERT(td, MA_OWNED);
1123 ts = td_get_sched(td);
1124 td->td_flags &= ~TDF_CANSWAP;
1125 if (
ts->ts_slptime > 1) {
1137forward_wakeup(
int cpunum)
1140 cpuset_t dontuse, map, map2;
1146 CTR0(KTR_RUNQ,
"forward_wakeup()");
1148 if ((!forward_wakeup_enabled) ||
1149 (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1154 forward_wakeups_requested++;
1160 me = PCPU_GET(cpuid);
1163 if (CPU_ISSET(me, &idle_cpus_mask) &&
1164 (cpunum == NOCPU || me == cpunum))
1167 CPU_SETOF(me, &dontuse);
1168 CPU_OR(&dontuse, &dontuse, &stopped_cpus);
1169 CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask);
1171 if (forward_wakeup_use_loop) {
1172 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1174 if (!CPU_ISSET(
id, &dontuse) &&
1175 pc->pc_curthread == pc->pc_idlethread) {
1181 if (forward_wakeup_use_mask) {
1182 map = idle_cpus_mask;
1183 CPU_ANDNOT(&map, &map, &dontuse);
1186 if (forward_wakeup_use_loop) {
1187 if (CPU_CMP(&map, &map2)) {
1188 printf(
"map != map2, loop method preferred\n");
1197 if (cpunum != NOCPU) {
1198 KASSERT((cpunum <=
mp_maxcpus),(
"forward_wakeup: bad cpunum."));
1199 iscpuset = CPU_ISSET(cpunum, &map);
1203 CPU_SETOF(cpunum, &map);
1205 if (!CPU_EMPTY(&map)) {
1206 forward_wakeups_delivered++;
1207 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1209 if (!CPU_ISSET(
id, &map))
1211 if (cpu_idle_wakeup(pc->pc_cpuid))
1214 if (!CPU_EMPTY(&map))
1215 ipi_selected(map, IPI_AST);
1218 if (cpunum == NOCPU)
1219 printf(
"forward_wakeup: Idle processor not found\n");
1224kick_other_cpu(
int pri,
int cpuid)
1230 if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
1231 forward_wakeups_delivered++;
1232 if (!cpu_idle_wakeup(cpuid))
1233 ipi_cpu(cpuid, IPI_AST);
1237 cpri = pcpu->pc_curthread->td_priority;
1241#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1242#if !defined(FULL_PREEMPTION)
1243 if (pri <= PRI_MAX_ITHD)
1246 ipi_cpu(cpuid, IPI_PREEMPT);
1251 pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1252 ipi_cpu(cpuid, IPI_AST);
1259sched_pickcpu(
struct thread *td)
1266 best = td->td_lastcpu;
1275 else if (runq_length[cpu] < runq_length[best])
1278 KASSERT(best != NOCPU, (
"no valid CPUs"));
1294 ts = td_get_sched(td);
1295 THREAD_LOCK_ASSERT(td, MA_OWNED);
1296 KASSERT((td->td_inhibitors == 0),
1297 (
"sched_add: trying to run inhibited thread"));
1298 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1299 (
"sched_add: bad thread state"));
1300 KASSERT(td->td_flags & TDF_INMEM,
1301 (
"sched_add: thread swapped out"));
1303 KTR_STATE2(KTR_SCHED,
"thread",
sched_tdname(td),
"runq add",
1304 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1306 KTR_POINT1(KTR_SCHED,
"thread",
sched_tdname(curthread),
"wokeup",
1308 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1309 flags & SRQ_PREEMPTED);
1317 if ((
flags & SRQ_HOLD) != 0)
1335 if (td->td_pinned != 0)
1336 cpu = td->td_lastcpu;
1340 (
"sched_add: bound td_sched not on cpu runq"));
1341 cpu =
ts->ts_runq - &runq_pcpu[0];
1344 cpu = sched_pickcpu(td);
1345 ts->ts_runq = &runq_pcpu[cpu];
1348 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq",
ts, td,
1352 "sched_add: adding td_sched:%p (td:%p) to gbl runq",
ts,
1358 if ((td->td_flags & TDF_NOLOAD) == 0)
1364 cpuid = PCPU_GET(cpuid);
1365 if (single_cpu && cpu != cpuid) {
1366 kick_other_cpu(td->td_priority, cpu);
1369 tidlemsk = idle_cpus_mask;
1370 CPU_ANDNOT(&tidlemsk, &tidlemsk, &hlt_cpus_mask);
1371 CPU_CLR(cpuid, &tidlemsk);
1373 if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
1374 ((
flags & SRQ_INTR) == 0) &&
1375 !CPU_EMPTY(&tidlemsk))
1376 forwarded = forward_wakeup(cpu);
1384 if ((
flags & SRQ_HOLDTD) == 0)
1391 ts = td_get_sched(td);
1392 THREAD_LOCK_ASSERT(td, MA_OWNED);
1393 KASSERT((td->td_inhibitors == 0),
1394 (
"sched_add: trying to run inhibited thread"));
1395 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1396 (
"sched_add: bad thread state"));
1397 KASSERT(td->td_flags & TDF_INMEM,
1398 (
"sched_add: thread swapped out"));
1399 KTR_STATE2(KTR_SCHED,
"thread",
sched_tdname(td),
"runq add",
1400 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1402 KTR_POINT1(KTR_SCHED,
"thread",
sched_tdname(curthread),
"wokeup",
1404 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1405 flags & SRQ_PREEMPTED);
1413 if ((
flags & SRQ_HOLD) != 0)
1419 CTR2(KTR_RUNQ,
"sched_add: adding td_sched:%p (td:%p) to runq",
ts, td);
1422 if ((td->td_flags & TDF_NOLOAD) == 0)
1427 if ((
flags & SRQ_HOLDTD) == 0)
1437 ts = td_get_sched(td);
1438 KASSERT(td->td_flags & TDF_INMEM,
1439 (
"sched_rem: thread swapped out"));
1440 KASSERT(TD_ON_RUNQ(td),
1441 (
"sched_rem: thread not on run queue"));
1443 KTR_STATE2(KTR_SCHED,
"thread",
sched_tdname(td),
"runq rem",
1444 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1446 SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
1448 if ((td->td_flags & TDF_NOLOAD) == 0)
1451 if (
ts->ts_runq != &
runq)
1452 runq_length[
ts->ts_runq - runq_pcpu]--;
1470 struct thread *tdcpu;
1478 tdcpu->td_priority < td->td_priority)) {
1479 CTR2(KTR_RUNQ,
"choosing td %p from pcpu runq %d", tdcpu,
1482 rq = &runq_pcpu[PCPU_GET(cpuid)];
1484 CTR1(KTR_RUNQ,
"choosing td_sched %p from main runq", td);
1495 runq_length[PCPU_GET(cpuid)]--;
1500 KASSERT(td->td_flags & TDF_INMEM,
1501 (
"sched_choose: thread swapped out"));
1504 return (PCPU_GET(idlethread));
1511 SDT_PROBE2(sched, , , surrender, td, td->td_proc);
1512 if (td->td_critnest > 1) {
1513 td->td_owepreempt = 1;
1516 mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT);
1525 td->td_priority = td->td_user_pri;
1526 td->td_base_pri = td->td_user_pri;
1535 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1536 KASSERT(td == curthread, (
"sched_bind: can only bind curthread"));
1538 ts = td_get_sched(td);
1542 ts->ts_runq = &runq_pcpu[cpu];
1543 if (PCPU_GET(cpuid) == cpu)
1554 THREAD_LOCK_ASSERT(td, MA_OWNED);
1555 KASSERT(td == curthread, (
"sched_unbind: can only bind curthread"));
1556 td->td_flags &= ~TDF_BOUND;
1562 THREAD_LOCK_ASSERT(td, MA_OWNED);
1582 return (
sizeof(
struct proc));
1588 return (
sizeof(
struct thread) +
sizeof(
struct td_sched));
1596 THREAD_LOCK_ASSERT(td, MA_OWNED);
1597 ts = td_get_sched(td);
1598 return (
ts->ts_pctcpu);
1607sched_pctcpu_delta(
struct thread *td)
1613 THREAD_LOCK_ASSERT(td, MA_OWNED);
1614 ts = td_get_sched(td);
1617 if (
ts->ts_cpticks != 0) {
1618#if (FSHIFT >= CCPU_SHIFT)
1620 ? ((fixpt_t)
ts->ts_cpticks) <<
1622 100 * (((fixpt_t)
ts->ts_cpticks)
1625 delta = ((FSCALE -
ccpu) *
1650 THREAD_NO_SLEEPING();
1651 stat = DPCPU_PTR(idlestat);
1653 mtx_assert(&
Giant, MA_NOTOWNED);
1670 KASSERT(curthread->td_md.md_spinlock_count == 1, (
"invalid count"));
1693 PCPU_SET(switchticks,
ticks);
1708 lock_profile_release_lock(&
sched_lock.lock_object,
true);
1709 td->td_lastcpu = td->td_oncpu;
1710 td->td_oncpu = NOCPU;
1723 td->td_oncpu = PCPU_GET(cpuid);
1725 lock_profile_obtain_lock_success(&
sched_lock.lock_object,
true,
1726 0, 0, __FILE__, __LINE__);
1727 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1729 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(td),
"running",
1730 "prio:%d", td->td_priority);
1731 SDT_PROBE0(sched, , , on__cpu);
1740 ts = td_get_sched(td);
1741 if (
ts->ts_name[0] ==
'\0')
1743 "%s tid %d", td->td_name, td->td_tid);
1744 return (
ts->ts_name);
1746 return (td->td_name);
1752sched_clear_tdname(
struct thread *td)
1756 ts = td_get_sched(td);
1757 ts->ts_name[0] =
'\0';
1768 THREAD_LOCK_ASSERT(td, MA_OWNED);
1774 ts = td_get_sched(td);
1775 ts->ts_flags &= ~TSF_AFFINITY;
1790 if (td->td_pinned != 0 || td->td_flags &
TDF_BOUND)
1793 switch (TD_GET_STATE(td)) {
1799 if (
ts->ts_runq != &
runq &&
1815 td->td_flags |= TDF_NEEDRESCHED;
1816 if (td != curthread)
1817 ipi_cpu(cpu, IPI_AST);
struct cpuset * cpuset_ref(struct cpuset *set)
void kproc_start(const void *udata)
void thread_lock_set(struct thread *td, struct mtx *new)
struct mtx __exclusive_cache_line Giant
struct mtx * thread_lock_block(struct thread *td)
struct sx __exclusive_cache_line allproc_lock
void runq_add(struct runq *rq, struct thread *td, int flags)
struct thread * choosethread(void)
void runq_remove(struct runq *rq, struct thread *td)
void runq_init(struct runq *rq)
struct thread * runq_choose_fuzz(struct runq *rq, int fuzz)
struct thread * runq_choose(struct runq *rq)
int runq_check(struct runq *rq)
void mi_switch(int flags)
struct loadavg averunnable
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
struct thread * sched_choose(void)
static void schedcpu(void)
void sched_bind(struct thread *td, int cpu)
fixpt_t sched_pctcpu(struct thread *td)
int sched_rr_interval(void)
void sched_idletd(void *dummy)
void sched_exit_thread(struct thread *td, struct thread *child)
void sched_userret_slowpath(struct thread *td)
void sched_lend_user_prio(struct thread *td, u_char prio)
void sched_relinquish(struct thread *td)
static void resetpriority_thread(struct thread *td)
void sched_throw(struct thread *td)
void sched_fork(struct thread *td, struct thread *childtd)
#define SKE_RUNQ_PCPU(ts)
#define loadfactor(loadav)
static void sched_priority(struct thread *td, u_char prio)
void sched_unbind(struct thread *td)
u_int sched_estcpu(struct thread *td)
DPCPU_DEFINE_STATIC(struct pcpuidlestat, idlestat)
void sched_fork_thread(struct thread *td, struct thread *childtd)
SDT_PROBE_DEFINE(sched,,, on__cpu)
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0, "Scheduler name")
static void resetpriority(struct thread *td)
SYSINIT(schedcpu, SI_SUB_LAST, SI_ORDER_FIRST, kproc_start, &sched_kp)
static void schedcpu_thread(void)
void sched_unlend_prio(struct thread *td, u_char prio)
_Static_assert(sizeof(struct thread)+sizeof(struct td_sched)<=sizeof(struct thread0_storage), "increase struct thread0_storage.t0st_sched size")
static void setup_runqs(void)
void sched_rem(struct thread *td)
#define THREAD_CAN_SCHED(td, cpu)
void sched_class(struct thread *td, int class)
void sched_prio(struct thread *td, u_char prio)
static struct mtx sched_lock
static int sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
SYSCTL_UINT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "Decay factor used for updating %CPU")
SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_quantum, "I", "Quantum for timeshare threads in microseconds")
SDT_PROVIDER_DEFINE(sched)
void sched_ap_entry(void)
int maybe_preempt(struct thread *td)
void sched_lend_user_prio_cond(struct thread *td, u_char prio)
void sched_preempt(struct thread *td)
void sched_clock(struct thread *td, int cnt)
static void updatepri(struct thread *td)
char * sched_tdname(struct thread *td)
static __inline void sched_load_add(void)
int sched_sizeof_proc(void)
static void sched_clock_tick(struct thread *td)
static __inline void sched_load_rem(void)
static void sched_setup(void *dummy)
int sched_sizeof_thread(void)
void sched_nice(struct proc *p, int nice)
#define INVERSE_ESTCPU_WEIGHT
void sched_lend_prio(struct thread *td, u_char prio)
static void sched_initticks(void *dummy)
void sched_affinity(struct thread *td)
SDT_PROBE_DEFINE2(sched,,, load__change, "int", "int")
SDT_PROBE_DEFINE3(sched,,, change__pri, "struct thread *", "struct proc *", "uint8_t")
void sched_fork_exit(struct thread *td)
void sched_sleep(struct thread *td, int pri)
void sched_add(struct thread *td, int flags)
SDT_PROBE_DEFINE4(sched,,, enqueue, "struct thread *", "struct proc *", "void *", "int")
static struct kproc_desc sched_kp
static void maybe_resched(struct thread *td)
void sched_exit(struct proc *p, struct thread *td)
SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "Scheduler")
void sched_switch(struct thread *td, int flags)
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "Quantum for timeshare threads in stathz ticks")
void sched_wakeup(struct thread *td, int srqflags)
int sched_is_bound(struct thread *td)
void sched_user_prio(struct thread *td, u_char prio)
static void sched_throw_tail(struct thread *td)
#define decay_cpu(loadfac, cpu)
static bool kasan_enabled __read_mostly
struct pcpu * pcpu_find(u_int cpuid)
int printf(const char *fmt,...)
int snprintf(char *str, size_t size, const char *format,...)
void turnstile_adjust(struct thread *td, u_char oldpri)