44#include <sys/sysproto.h>
46#include <sys/kernel.h>
48#include <sys/malloc.h>
52#include <sys/refcount.h>
54#include <sys/resourcevar.h>
55#include <sys/rwlock.h>
58#include <sys/syscallsubr.h>
59#include <sys/sysctl.h>
60#include <sys/sysent.h>
62#include <sys/umtxvar.h>
65#include <vm/vm_param.h>
71#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
76static void calcru1(
struct proc *p,
struct rusage_ext *ruxp,
77 struct timeval *up,
struct timeval *sp);
78static int donice(
struct thread *td,
struct proc *chgp,
int n);
79static struct uidinfo *
uilookup(uid_t uid);
85#ifndef _SYS_SYSPROTO_H_
86struct getpriority_args {
110 low = td->td_proc->p_nice;
124 pg = td->td_proc->p_pgrp;
134 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
136 if (p->p_state == PRS_NORMAL &&
148 who = td->td_ucred->cr_uid;
150 FOREACH_PROC_IN_SYSTEM(p) {
152 if (p->p_state == PRS_NORMAL &&
154 p->p_ucred->cr_uid == who) {
167 if (low == PRIO_MAX + 1 && error == 0)
169 td->td_retval[0] = low;
173#ifndef _SYS_SYSPROTO_H_
190 struct proc *curp, *p;
192 int found = 0, error = 0;
199 error =
donice(td, curp, prio);
207 error =
donice(td, p, prio);
226 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
228 if (p->p_state == PRS_NORMAL &&
230 error =
donice(td, p, prio);
240 who = td->td_ucred->cr_uid;
242 FOREACH_PROC_IN_SYSTEM(p) {
244 if (p->p_state == PRS_NORMAL &&
245 p->p_ucred->cr_uid == who &&
247 error =
donice(td, p, prio);
259 if (found == 0 && error == 0)
268donice(
struct thread *td,
struct proc *p,
int n)
272 PROC_LOCK_ASSERT(p, MA_OWNED);
279 if (n < p->p_nice &&
priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
288 "Allow non-root users to set an idle priority (deprecated)");
293#ifndef _SYS_SYSPROTO_H_
310 cierror = copyin(uap->
rtp, &rtp,
sizeof(
struct rtprio));
314 if (uap->
lwpid == 0 || uap->
lwpid == td->td_tid) {
331 return (copyout(&rtp, uap->
rtp,
sizeof(
struct rtprio)));
333 if ((error =
p_cansched(td, p)) || (error = cierror))
354 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
355 (error =
priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
357 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
359 (error =
priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
374#ifndef _SYS_SYSPROTO_H_
391 cierror = copyin(uap->
rtp, &rtp,
sizeof(
struct rtprio));
421 rtp.type = RTP_PRIO_IDLE;
422 rtp.prio = RTP_PRIO_MAX;
423 FOREACH_THREAD_IN_PROC(p, tdp) {
425 if (rtp2.type < rtp.type ||
426 (rtp2.type == rtp.type &&
427 rtp2.prio < rtp.prio)) {
428 rtp.type = rtp2.type;
429 rtp.prio = rtp2.prio;
434 return (copyout(&rtp, uap->
rtp,
sizeof(
struct rtprio)));
436 if ((error =
p_cansched(td, p)) || (error = cierror))
444 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
445 (error =
priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
447 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
449 (error =
priv_check(td, PRIV_SCHED_IDPRIO)) != 0)
461 FOREACH_THREAD_IN_PROC(p, td) {
478 u_char newpri, oldclass, oldpri;
480 switch (RTP_PRIO_BASE(rtp->type)) {
481 case RTP_PRIO_REALTIME:
482 if (rtp->prio > RTP_PRIO_MAX)
484 newpri = PRI_MIN_REALTIME + rtp->prio;
486 case RTP_PRIO_NORMAL:
487 if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
489 newpri = PRI_MIN_TIMESHARE + rtp->prio;
492 if (rtp->prio > RTP_PRIO_MAX)
494 newpri = PRI_MIN_IDLE + rtp->prio;
501 oldclass = td->td_pri_class;
503 oldpri = td->td_user_pri;
505 if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
506 td->td_pri_class != RTP_PRIO_NORMAL))
508 if (TD_ON_UPILOCK(td) && oldpri != newpri) {
523 switch (PRI_BASE(td->td_pri_class)) {
525 rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
528 rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
531 rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
536 rtp->type = td->td_pri_class;
540#if defined(COMPAT_43)
541#ifndef _SYS_SYSPROTO_H_
542struct osetrlimit_args {
548osetrlimit(
struct thread *td,
struct osetrlimit_args *uap)
554 if ((error = copyin(uap->rlp, &olim,
sizeof(
struct orlimit))))
556 lim.rlim_cur = olim.rlim_cur;
557 lim.rlim_max = olim.rlim_max;
562#ifndef _SYS_SYSPROTO_H_
563struct ogetrlimit_args {
569ogetrlimit(
struct thread *td,
struct ogetrlimit_args *uap)
575 if (uap->which >= RLIM_NLIMITS)
588 olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
589 olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
590 error = copyout(&olim, uap->rlp,
sizeof(olim));
595#ifndef _SYS_SYSPROTO_H_
607 if ((error = copyin(uap->
rlp, &alim,
sizeof(
struct rlimit))))
621 PROC_LOCK_ASSERT(p, MA_OWNED);
626 if (p->p_cpulimit == RLIM_INFINITY)
629 FOREACH_THREAD_IN_PROC(p, td) {
633 if (p->p_rux.rux_runtime > p->p_cpulimit *
cpu_tickrate()) {
635 if (p->p_rux.rux_runtime >= rlim.rlim_max *
cpu_tickrate()) {
636 killproc(p,
"exceeded maximum CPU limit");
638 if (p->p_cpulimit < rlim.rlim_max)
643 if ((p->p_flag & P_WEXIT) == 0)
644 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
659 struct plimit *newlim, *oldlim, *oldlim_td;
660 struct rlimit *alimp;
661 struct rlimit oldssiz;
664 if (which >= RLIM_NLIMITS)
670 if (limp->rlim_cur < 0)
671 limp->rlim_cur = RLIM_INFINITY;
672 if (limp->rlim_max < 0)
673 limp->rlim_max = RLIM_INFINITY;
675 oldssiz.rlim_cur = 0;
679 alimp = &oldlim->pl_rlimit[which];
680 if (limp->rlim_cur > alimp->rlim_max ||
681 limp->rlim_max > alimp->rlim_max)
682 if ((error =
priv_check(td, PRIV_PROC_SETRLIMIT))) {
687 if (limp->rlim_cur > limp->rlim_max)
688 limp->rlim_cur = limp->rlim_max;
690 alimp = &newlim->pl_rlimit[which];
694 if (limp->rlim_cur != RLIM_INFINITY &&
695 p->p_cpulimit == RLIM_INFINITY)
696 callout_reset_sbt(&p->p_limco, SBT_1S, 0,
698 p->p_cpulimit = limp->rlim_cur;
713 if (p->p_sysent->sv_fixlimit != NULL)
714 p->p_sysent->sv_fixlimit(&oldssiz,
730 if (limp->rlim_cur < 1)
732 if (limp->rlim_max < 1)
736 if (p->p_sysent->sv_fixlimit != NULL)
737 p->p_sysent->sv_fixlimit(limp, which);
742 if (td == curthread && PROC_COW_CHANGECOUNT(td, p) == 1) {
747 if (oldlim_td != NULL) {
748 MPASS(oldlim_td == oldlim);
754 if (which == RLIMIT_STACK &&
759 (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
765 if (limp->rlim_cur != oldssiz.rlim_cur) {
770 if (limp->rlim_cur > oldssiz.rlim_cur) {
771 prot = p->p_sysent->sv_stackprot;
772 size = limp->rlim_cur - oldssiz.rlim_cur;
773 addr = round_page(p->p_vmspace->vm_stacktop) -
777 size = oldssiz.rlim_cur - limp->rlim_cur;
778 addr = round_page(p->p_vmspace->vm_stacktop) -
782 size = round_page(size);
783 (void)vm_map_protect(&p->p_vmspace->vm_map,
785 VM_MAP_PROTECT_SET_PROT);
792#ifndef _SYS_SYSPROTO_H_
805 if (uap->
which >= RLIM_NLIMITS)
808 error = copyout(&rlim, uap->
rlp,
sizeof(
struct rlimit));
817calccru(
struct proc *p,
struct timeval *up,
struct timeval *sp)
820 PROC_LOCK_ASSERT(p, MA_OWNED);
821 calcru1(p, &p->p_crux, up, sp);
830calcru(
struct proc *p,
struct timeval *up,
struct timeval *sp)
835 PROC_LOCK_ASSERT(p, MA_OWNED);
836 PROC_STATLOCK_ASSERT(p, MA_OWNED);
844 if (td->td_proc == p) {
846 runtime = u - PCPU_GET(switchtime);
847 td->td_runtime += runtime;
848 td->td_incruntime += runtime;
849 PCPU_SET(switchtime, u);
852 FOREACH_THREAD_IN_PROC(p, td) {
853 if (td->td_incruntime == 0)
868 PROC_STATLOCK_ASSERT(p, MA_OWNED);
869 THREAD_LOCK_ASSERT(td, MA_OWNED);
876 if (td == curthread) {
878 runtime = u - PCPU_GET(switchtime);
879 td->td_runtime += runtime;
880 td->td_incruntime += runtime;
881 PCPU_SET(switchtime, u);
885 calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
889#ifndef __HAVE_INLINE_FLSLL
890#define flsll(x) (fls((x) >> 32) != 0 ? fls((x) >> 32) + 32 : fls(x))
896 uint64_t acc, bh, bl;
920 if (a >= (uint64_t)1 << 63)
923 for (i = 0; i < 128; i++) {
928 return (acc + (a * b) / c);
942 return (acc + (a * b) / c);
973calcru1(
struct proc *p,
struct rusage_ext *ruxp,
struct timeval *up,
977 uint64_t ut, uu,
st, su, it, tt, tu;
979 ut = ruxp->rux_uticks;
980 st = ruxp->rux_sticks;
981 it = ruxp->rux_iticks;
989 if ((int64_t)tu < 0) {
991 printf(
"calcru: negative runtime of %jd usec for pid %d (%s)\n",
992 (intmax_t)tu, p->p_pid, p->p_comm);
997 if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
1000 su = (tu *
st) / tt;
1006 if (tu >= ruxp->rux_tu) {
1011 if (uu < ruxp->rux_uu)
1013 if (su < ruxp->rux_su)
1015 }
else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
1028 }
else if (
vm_guest == VM_GUEST_NO) {
1037 printf(
"calcru: runtime went backwards from %ju usec "
1038 "to %ju usec for pid %d (%s)\n",
1039 (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
1040 p->p_pid, p->p_comm);
1047 up->tv_sec = uu / 1000000;
1048 up->tv_usec = uu % 1000000;
1049 sp->tv_sec = su / 1000000;
1050 sp->tv_usec = su % 1000000;
1053#ifndef _SYS_SYSPROTO_H_
1067 error = copyout(&ru, uap->
rusage,
sizeof(
struct rusage));
1086 case RUSAGE_CHILDREN:
1087 *rup = p->p_stats->p_cru;
1088 calccru(p, &rup->ru_utime, &rup->ru_stime);
1112 if (ru->ru_maxrss < ru2->ru_maxrss)
1113 ru->ru_maxrss = ru2->ru_maxrss;
1115 ip2 = &ru2->ru_first;
1116 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1121ruadd(
struct rusage *ru,
struct rusage_ext *rux,
struct rusage *ru2,
1122 struct rusage_ext *rux2)
1125 rux->rux_runtime += rux2->rux_runtime;
1126 rux->rux_uticks += rux2->rux_uticks;
1127 rux->rux_sticks += rux2->rux_sticks;
1128 rux->rux_iticks += rux2->rux_iticks;
1129 rux->rux_uu += rux2->rux_uu;
1130 rux->rux_su += rux2->rux_su;
1131 rux->rux_tu += rux2->rux_tu;
1142 rux->rux_runtime += td->td_incruntime;
1143 rux->rux_uticks += td->td_uticks;
1144 rux->rux_sticks += td->td_sticks;
1145 rux->rux_iticks += td->td_iticks;
1151 THREAD_LOCK_ASSERT(td, MA_OWNED);
1152 PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1156 td->td_incruntime = 0;
1180 PROC_STATLOCK_ASSERT(p, MA_OWNED);
1183 if (p->p_numthreads > 0) {
1184 FOREACH_THREAD_IN_PROC(p, td) {
1214 struct plimit *limp;
1216 limp =
malloc(
sizeof(
struct plimit), M_PLIMIT, M_WAITOK);
1217 refcount_init(&limp->pl_refcnt, 1);
1225 refcount_acquire(&limp->pl_refcnt);
1234 struct plimit *oldlimit;
1238 PROC_LOCK_ASSERT(p, MA_OWNED);
1240 if (td->td_limit == p->p_limit)
1243 oldlimit = td->td_limit;
1244 td->td_limit =
lim_hold(p->p_limit);
1253 PROC_LOCK_ASSERT(p1, MA_OWNED);
1254 PROC_LOCK_ASSERT(p2, MA_OWNED);
1256 p2->p_limit =
lim_hold(p1->p_limit);
1257 callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1258 if (p1->p_cpulimit != RLIM_INFINITY)
1259 callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1267 if (refcount_release(&limp->pl_refcnt))
1268 free((
void *)limp, M_PLIMIT);
1275 if (refcount_releasen(&limp->pl_refcnt, n))
1276 free((
void *)limp, M_PLIMIT);
1287 KASSERT(dst->pl_refcnt <= 1, (
"lim_copy to shared limit"));
1288 bcopy(
src->pl_rlimit, dst->pl_rlimit,
sizeof(
src->pl_rlimit));
1301 return (rl.rlim_max);
1310 return (rl.rlim_max);
1318(
lim_cur)(
struct thread *td,
int which)
1323 return (rl.rlim_cur);
1332 return (rl.rlim_cur);
1342 struct proc *p = td->td_proc;
1344 MPASS(td == curthread);
1345 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1346 (
"request for invalid resource limit"));
1347 *rlp = td->td_limit->pl_rlimit[which];
1348 if (p->p_sysent->sv_fixlimit != NULL)
1349 p->p_sysent->sv_fixlimit(rlp, which);
1356 PROC_LOCK_ASSERT(p, MA_OWNED);
1357 KASSERT(which >= 0 && which < RLIM_NLIMITS,
1358 (
"request for invalid resource limit"));
1359 *rlp = p->p_limit->pl_rlimit[which];
1360 if (p->p_sysent->sv_fixlimit != NULL)
1361 p->p_sysent->sv_fixlimit(rlp, which);
1377static struct uidinfo *
1380 struct uihashhead *uipp;
1381 struct uidinfo *uip;
1385 LIST_FOREACH(uip, uipp, ui_hash)
1386 if (uip->ui_uid == uid) {
1402 struct uidinfo *new_uip, *uip;
1405 cred = curthread->td_ucred;
1406 if (cred->cr_uidinfo->ui_uid == uid) {
1407 uip = cred->cr_uidinfo;
1410 }
else if (cred->cr_ruidinfo->ui_uid == uid) {
1411 uip = cred->cr_ruidinfo;
1422 new_uip =
malloc(
sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1423 racct_create(&new_uip->ui_racct);
1424 refcount_init(&new_uip->ui_ref, 1);
1425 new_uip->ui_uid = uid;
1433 if ((uip =
uilookup(uid)) == NULL) {
1434 LIST_INSERT_HEAD(
UIHASH(uid), new_uip, ui_hash);
1439 racct_destroy(&new_uip->ui_racct);
1440 free(new_uip, M_UIDINFO);
1452 refcount_acquire(&uip->ui_ref);
1474 if (refcount_release_if_not_last(&uip->ui_ref))
1478 if (refcount_release(&uip->ui_ref) == 0) {
1483 racct_destroy(&uip->ui_racct);
1484 LIST_REMOVE(uip, ui_hash);
1487 if (uip->ui_sbsize != 0)
1488 printf(
"freeing uidinfo: uid = %d, sbsize = %ld\n",
1489 uip->ui_uid, uip->ui_sbsize);
1490 if (uip->ui_proccnt != 0)
1491 printf(
"freeing uidinfo: uid = %d, proccnt = %ld\n",
1492 uip->ui_uid, uip->ui_proccnt);
1493 if (uip->ui_vmsize != 0)
1494 printf(
"freeing uidinfo: uid = %d, swapuse = %lld\n",
1495 uip->ui_uid, (
unsigned long long)uip->ui_vmsize);
1496 free(uip, M_UIDINFO);
1501ui_racct_foreach(
void (*
callback)(
struct racct *racct,
1502 void *arg2,
void *arg3),
void (*pre)(
void),
void (*post)(
void),
1503 void *arg2,
void *arg3)
1505 struct uidinfo *uip;
1506 struct uihashhead *uih;
1511 for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1512 LIST_FOREACH(uip, uih, ui_hash) {
1513 (
callback)(uip->ui_racct, arg2, arg3);
1523chglimit(
struct uidinfo *uip,
long *limit,
int diff, rlim_t max,
const char *
name)
1528 new = atomic_fetchadd_long(limit, (
long)diff) + diff;
1529 if (diff > 0 && max != 0) {
1530 if (new < 0 || new > max) {
1531 atomic_subtract_long(limit, (
long)diff);
1535 printf(
"negative %s for uid = %d\n",
name, uip->ui_uid);
1547 return (
chglimit(uip, &uip->ui_proccnt, diff, max,
"proccnt"));
1554chgsbsize(
struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1559 if (diff > 0 && max == 0) {
1562 rv =
chglimit(uip, &uip->ui_sbsize, diff, max,
"sbsize");
1577 return (
chglimit(uip, &uip->ui_ptscnt, diff, max,
"ptscnt"));
1584 return (
chglimit(uip, &uip->ui_kqcnt, diff, max,
"kqcnt"));
1591 return (
chglimit(uip, &uip->ui_umtxcnt, diff, max,
"umtxcnt"));
static struct bt_table st
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
int priv_check(struct thread *td, int priv)
struct sx __exclusive_cache_line proctree_lock
struct proc * pfind(pid_t pid)
struct sx __exclusive_cache_line allproc_lock
struct pgrp * pgfind(pid_t pgid)
int p_cansched(struct thread *td, struct proc *p)
int p_cansee(struct thread *td, struct proc *p)
int kern_getpriority(struct thread *td, int which, int who)
void rucollect(struct rusage *ru, struct rusage *ru2)
void ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2, struct rusage_ext *rux2)
void lim_copy(struct plimit *dst, struct plimit *src)
static LIST_HEAD(uihashhead, uidinfo)
static struct rwlock uihashtbl_lock
void lim_free(struct plimit *limp)
int sys_getpriority(struct thread *td, struct getpriority_args *uap)
SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW, &unprivileged_idprio, 0, "Allow non-root users to set an idle priority (deprecated)")
void rufetch(struct proc *p, struct rusage *ru)
struct plimit * lim_hold(struct plimit *limp)
static int chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
int chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
static struct uidinfo * uilookup(uid_t uid)
int kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which, struct rlimit *limp)
struct plimit * lim_alloc()
void uifree(struct uidinfo *uip)
void calccru(struct proc *p, struct timeval *up, struct timeval *sp)
rlim_t lim_cur_proc(struct proc *p, int which)
int chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
rlim_t() lim_cur(struct thread *td, int which)
struct plimit * lim_cowsync(void)
void lim_fork(struct proc *p1, struct proc *p2)
static int donice(struct thread *td, struct proc *p, int n)
static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures")
int chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
rlim_t lim_max(struct thread *td, int which)
int chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
int sys_setrlimit(struct thread *td, struct setrlimit_args *uap)
void uihold(struct uidinfo *uip)
int kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
struct uidinfo * uifind(uid_t uid)
int sys_getrusage(struct thread *td, struct getrusage_args *uap)
void calcru(struct proc *p, struct timeval *up, struct timeval *sp)
void ruxagg_locked(struct proc *p, struct thread *td)
int sys_setpriority(struct thread *td, struct setpriority_args *uap)
void lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
int rtp_to_pri(struct rtprio *rtp, struct thread *td)
void lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
static int unprivileged_idprio
static void ruxagg_ext_locked(struct rusage_ext *rux, struct thread *td)
int kern_getrusage(struct thread *td, int who, struct rusage *rup)
static void calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up, struct timeval *sp)
static uint64_t mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
rlim_t lim_max_proc(struct proc *p, int which)
void lim_freen(struct plimit *limp, int n)
static void lim_cb(void *arg)
int chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
int sys_getrlimit(struct thread *td, struct getrlimit_args *uap)
void rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up, struct timeval *sp)
int sys_rtprio(struct thread *td, struct rtprio_args *uap)
void ruxagg(struct proc *p, struct thread *td)
void rufetchtd(struct thread *td, struct rusage *ru)
int sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
void pri_to_rtp(struct thread *td, struct rtprio *rtp)
int kern_setpriority(struct thread *td, int which, int who, int prio)
void killproc(struct proc *p, const char *why)
void kern_psignal(struct proc *p, int sig)
uint64_t cpu_tickrate(void)
uint64_t cputick2usec(uint64_t tick)
void thread_cow_synced(struct thread *td)
struct thread * tdfind(lwpid_t tid, pid_t pid)
void umtx_pi_adjust(struct thread *td, u_char oldpri)
linker_function_name_callback_t callback
struct intr_irqsrc ** src
void sched_class(struct thread *td, int class)
void sched_prio(struct thread *td, u_char prio)
void sched_nice(struct proc *p, int nice)
void sched_user_prio(struct thread *td, u_char prio)
void * hashinit(int elements, struct malloc_type *type, u_long *hashmask)
int printf(const char *fmt,...)