65#include "opt_turnstile_profiling.h"
71#include <sys/kernel.h>
79#include <sys/sysctl.h>
80#include <sys/turnstile.h>
86#include <sys/lockmgr.h>
96#define TC_TABLESIZE 128
97#define TC_MASK (TC_TABLESIZE - 1)
99#define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
100#define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
129 struct lock_object *ts_lockobj;
130 struct thread *ts_owner;
136#ifdef TURNSTILE_PROFILING
142#ifdef TURNSTILE_PROFILING
143u_int turnstile_max_depth;
145 "turnstile profiling");
146static SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains,
147 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
148 "turnstile chain stats");
149SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
150 &turnstile_max_depth, 0,
"maximum depth achieved of a single chain");
160#ifdef TURNSTILE_PROFILING
161static void init_turnstile_profiling(
void *arg);
169static void turnstile_dtor(
void *mem,
int size,
void *arg);
184 mtx_unlock_spin(&
ts->ts_lock);
191 if (td->td_lock != &top->
ts_lock)
206 THREAD_LOCK_ASSERT(td, MA_OWNED);
207 pri = td->td_priority;
208 top =
ts = td->td_blocked;
209 THREAD_LOCKPTR_ASSERT(td, &
ts->ts_lock);
233 if (td->td_lock != &
ts->ts_lock) {
234 thread_lock_flags(td, MTX_DUPOK);
237 MPASS(td->td_proc != NULL);
238 MPASS(td->td_proc->p_magic == P_MAGIC);
246 if (TD_IS_SLEEPING(td)) {
248 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
249 td->td_tid, td->td_proc->p_pid);
251 panic(
"sleeping thread");
258 if (td->td_priority <= pri) {
272 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
273 MPASS(td->td_blocked == NULL);
283 KASSERT(td != curthread, (
"Deadlock detected"));
289 KASSERT(TD_ON_LOCK(td), (
290 "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
291 td->td_tid, td->td_name, TD_GET_STATE(td),
292 ts->ts_lockobj->lo_name));
299 THREAD_LOCKPTR_ASSERT(td, &
ts->ts_lock);
316 struct thread *td1, *td2;
319 THREAD_LOCK_ASSERT(td, MA_OWNED);
320 MPASS(TD_ON_LOCK(td));
332 if (td->td_turnstile != NULL)
340 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &
ts->ts_lock);
341 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
342 td2 = TAILQ_NEXT(td, td_lockq);
343 if ((td1 != NULL && td->td_priority < td1->td_priority) ||
344 (td2 != NULL && td->td_priority > td2->td_priority)) {
349 queue = td->td_tsqueue;
350 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
352 TAILQ_REMOVE(&
ts->ts_blocked[queue], td, td_lockq);
353 TAILQ_FOREACH(td1, &
ts->ts_blocked[queue], td_lockq) {
354 MPASS(td1->td_proc->p_magic == P_MAGIC);
355 if (td1->td_priority > td->td_priority)
360 TAILQ_INSERT_TAIL(&
ts->ts_blocked[queue], td, td_lockq);
362 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
366 "turnstile_adjust_thread: td %d put at tail on [%p] %s",
367 td->td_tid,
ts->ts_lockobj,
ts->ts_lockobj->lo_name);
370 "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
371 td->td_tid, td1->td_tid,
ts->ts_lockobj,
372 ts->ts_lockobj->lo_name);
393 LIST_INIT(&thread0.td_contested);
394 thread0.td_turnstile = NULL;
397#ifdef TURNSTILE_PROFILING
399init_turnstile_profiling(
void *arg)
401 struct sysctl_oid *chain_oid;
406 snprintf(chain_name,
sizeof(chain_name),
"%d", i);
407 chain_oid = SYSCTL_ADD_NODE(NULL,
408 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
409 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
410 "turnstile chain stats");
411 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
414 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
419SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
420 init_turnstile_profiling, NULL);
448 MPASS(TD_ON_LOCK(td));
455 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &
ts->ts_lock);
456 mtx_assert(&
ts->ts_lock, MA_OWNED);
467 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
468 td->td_tsqueue == TS_SHARED_QUEUE);
469 if (td == TAILQ_FIRST(&
ts->ts_blocked[td->td_tsqueue]) &&
470 td->td_priority < oldpri) {
483 MPASS(
ts->ts_owner == NULL);
489 MPASS(owner->td_proc->p_magic == P_MAGIC);
490 ts->ts_owner = owner;
491 LIST_INSERT_HEAD(&owner->td_contested,
ts, ts_link);
499turnstile_dtor(
void *mem,
int size,
void *arg)
504 MPASS(TAILQ_EMPTY(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
505 MPASS(TAILQ_EMPTY(&
ts->ts_blocked[TS_SHARED_QUEUE]));
506 MPASS(TAILQ_EMPTY(&
ts->ts_pending));
520 TAILQ_INIT(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
521 TAILQ_INIT(&
ts->ts_blocked[TS_SHARED_QUEUE]);
522 TAILQ_INIT(&
ts->ts_pending);
523 LIST_INIT(&
ts->ts_free);
524 mtx_init(&
ts->ts_lock,
"turnstile lock", NULL, MTX_SPIN);
534 mtx_destroy(&
ts->ts_lock);
566 mtx_lock_spin(&tc->tc_lock);
576 mtx_lock_spin(&tc->tc_lock);
577 LIST_FOREACH(
ts, &tc->tc_turnstiles, ts_hash)
578 if (
ts->ts_lockobj == lock) {
579 mtx_lock_spin(&
ts->ts_lock);
583 ts = curthread->td_turnstile;
585 mtx_lock_spin(&
ts->ts_lock);
586 KASSERT(
ts->ts_lockobj == NULL, (
"stale ts_lockobj pointer"));
587 ts->ts_lockobj = lock;
597 struct lock_object *lock;
599 if ((lock =
ts->ts_lockobj) == NULL)
602 mtx_lock_spin(&tc->tc_lock);
603 mtx_lock_spin(&
ts->ts_lock);
604 if (__predict_false(lock !=
ts->ts_lockobj)) {
605 mtx_unlock_spin(&tc->tc_lock);
606 mtx_unlock_spin(&
ts->ts_lock);
619 mtx_assert(&
ts->ts_lock, MA_OWNED);
620 mtx_unlock_spin(&
ts->ts_lock);
621 if (
ts == curthread->td_turnstile)
622 ts->ts_lockobj = NULL;
624 mtx_unlock_spin(&tc->tc_lock);
630 MPASS(
ts->ts_lockobj == NULL);
637 struct lock_object *lock;
639 mtx_assert(&
ts->ts_lock, MA_OWNED);
641 mtx_unlock_spin(&
ts->ts_lock);
642 lock =
ts->ts_lockobj;
643 if (
ts == curthread->td_turnstile)
644 ts->ts_lockobj = NULL;
646 mtx_unlock_spin(&tc->tc_lock);
661 mtx_assert(&tc->tc_lock, MA_OWNED);
662 LIST_FOREACH(
ts, &tc->tc_turnstiles, ts_hash)
663 if (
ts->ts_lockobj == lock) {
664 mtx_lock_spin(&
ts->ts_lock);
679 mtx_unlock_spin(&tc->tc_lock);
686static struct thread *
689 struct thread *std, *xtd;
691 std = TAILQ_FIRST(&
ts->ts_blocked[TS_SHARED_QUEUE]);
692 xtd = TAILQ_FIRST(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
693 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
705 struct thread *td, *owner;
708 mtx_assert(&
ts->ts_lock, MA_OWNED);
709 MPASS(
ts != curthread->td_turnstile);
718 MPASS(td->td_proc->p_magic == P_MAGIC);
719 THREAD_LOCKPTR_BLOCKED_ASSERT(td, &
ts->ts_lock);
725 if (td->td_priority < owner->td_priority)
727 thread_unlock(owner);
729 mtx_unlock_spin(&
ts->ts_lock);
730 mtx_unlock_spin(&tc->tc_lock);
743 struct thread *td, *td1;
744 struct lock_object *lock;
747 mtx_assert(&
ts->ts_lock, MA_OWNED);
749 MPASS(owner->td_proc->p_magic == P_MAGIC);
750 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
758 mtx_assert(&tc->tc_lock, MA_OWNED);
759 if (
ts == td->td_turnstile) {
760#ifdef TURNSTILE_PROFILING
762 if (tc->tc_depth > tc->tc_max_depth) {
763 tc->tc_max_depth = tc->tc_depth;
764 if (tc->tc_max_depth > turnstile_max_depth)
765 turnstile_max_depth = tc->tc_max_depth;
768 LIST_INSERT_HEAD(&tc->tc_turnstiles,
ts, ts_hash);
769 KASSERT(TAILQ_EMPTY(&
ts->ts_pending),
770 (
"thread's turnstile has pending threads"));
771 KASSERT(TAILQ_EMPTY(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
772 (
"thread's turnstile has exclusive waiters"));
773 KASSERT(TAILQ_EMPTY(&
ts->ts_blocked[TS_SHARED_QUEUE]),
774 (
"thread's turnstile has shared waiters"));
775 KASSERT(LIST_EMPTY(&
ts->ts_free),
776 (
"thread's turnstile has a non-empty free list"));
777 MPASS(
ts->ts_lockobj != NULL);
779 TAILQ_INSERT_TAIL(&
ts->ts_blocked[queue], td, td_lockq);
783 TAILQ_FOREACH(td1, &
ts->ts_blocked[queue], td_lockq)
784 if (td1->td_priority > td->td_priority)
788 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
790 TAILQ_INSERT_TAIL(&
ts->ts_blocked[queue], td, td_lockq);
791 MPASS(owner ==
ts->ts_owner);
793 MPASS(td->td_turnstile != NULL);
794 LIST_INSERT_HEAD(&
ts->ts_free, td->td_turnstile, ts_hash);
798 td->td_turnstile = NULL;
801 lock =
ts->ts_lockobj;
802 td->td_tsqueue = queue;
804 td->td_lockname = lock->lo_name;
805 td->td_blktick =
ticks;
807 mtx_unlock_spin(&tc->tc_lock);
810 if (LOCK_LOG_TEST(lock, 0))
811 CTR4(KTR_LOCK,
"%s: td %d blocked on [%p] %s", __func__,
812 td->td_tid, lock, lock->lo_name);
814 SDT_PROBE0(sched, , , sleep);
816 THREAD_LOCKPTR_ASSERT(td, &
ts->ts_lock);
819 if (LOCK_LOG_TEST(lock, 0))
820 CTR4(KTR_LOCK,
"%s: td %d free from blocked on [%p] %s",
821 __func__, td->td_tid, lock, lock->lo_name);
836 mtx_assert(&
ts->ts_lock, MA_OWNED);
837 MPASS(curthread->td_proc->p_magic == P_MAGIC);
838 MPASS(
ts->ts_owner == curthread ||
ts->ts_owner == NULL);
839 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
845 td = TAILQ_FIRST(&
ts->ts_blocked[queue]);
846 MPASS(td->td_proc->p_magic == P_MAGIC);
848 TAILQ_REMOVE(&
ts->ts_blocked[queue], td, td_lockq);
850 TAILQ_INSERT_TAIL(&
ts->ts_pending, td, td_lockq);
857 empty = TAILQ_EMPTY(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
858 TAILQ_EMPTY(&
ts->ts_blocked[TS_SHARED_QUEUE]);
861 mtx_assert(&tc->tc_lock, MA_OWNED);
862 MPASS(LIST_EMPTY(&
ts->ts_free));
863#ifdef TURNSTILE_PROFILING
867 ts = LIST_FIRST(&
ts->ts_free);
869 LIST_REMOVE(
ts, ts_hash);
870 td->td_turnstile =
ts;
887 mtx_assert(&
ts->ts_lock, MA_OWNED);
888 MPASS(curthread->td_proc->p_magic == P_MAGIC);
889 MPASS(
ts->ts_owner == curthread ||
ts->ts_owner == NULL);
895 mtx_assert(&tc->tc_lock, MA_OWNED);
896 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
902 TAILQ_CONCAT(&
ts->ts_pending, &
ts->ts_blocked[queue], td_lockq);
909 TAILQ_FOREACH(td, &
ts->ts_pending, td_lockq) {
910 if (LIST_EMPTY(&
ts->ts_free)) {
911 MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
913#ifdef TURNSTILE_PROFILING
917 ts1 = LIST_FIRST(&
ts->ts_free);
919 LIST_REMOVE(ts1, ts_hash);
920 td->td_turnstile = ts1;
930 THREAD_LOCK_ASSERT(td, MA_OWNED);
934 LIST_FOREACH(nts, &td->td_contested, ts_link) {
955 mtx_assert(&
ts->ts_lock, MA_OWNED);
956 MPASS(
ts->ts_owner == curthread ||
ts->ts_owner == NULL);
957 MPASS(!TAILQ_EMPTY(&
ts->ts_pending));
963 TAILQ_INIT(&pending_threads);
964 TAILQ_CONCAT(&pending_threads, &
ts->ts_pending, td_lockq);
966 if (TAILQ_EMPTY(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
967 TAILQ_EMPTY(&
ts->ts_blocked[TS_SHARED_QUEUE]))
968 ts->ts_lockobj = NULL;
985 if (
ts->ts_owner != NULL) {
987 LIST_REMOVE(
ts, ts_link);
1000 while (!TAILQ_EMPTY(&pending_threads)) {
1001 td = TAILQ_FIRST(&pending_threads);
1002 TAILQ_REMOVE(&pending_threads, td, td_lockq);
1003 SDT_PROBE2(sched, , ,
wakeup, td, td->td_proc);
1005 THREAD_LOCKPTR_ASSERT(td, &
ts->ts_lock);
1006 MPASS(td->td_proc->p_magic == P_MAGIC);
1007 MPASS(TD_ON_LOCK(td));
1009 MPASS(TD_CAN_RUN(td));
1010 td->td_blocked = NULL;
1011 td->td_lockname = NULL;
1014 td->td_tsqueue = 0xff;
1018 mtx_unlock_spin(&
ts->ts_lock);
1032 mtx_assert(&
ts->ts_lock, MA_OWNED);
1033 MPASS(
ts->ts_owner == curthread);
1034 MPASS(TAILQ_EMPTY(&
ts->ts_pending));
1035 MPASS(!TAILQ_EMPTY(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
1036 !TAILQ_EMPTY(&
ts->ts_blocked[TS_SHARED_QUEUE]));
1045 ts->ts_owner = NULL;
1046 LIST_REMOVE(
ts, ts_link);
1056 mtx_unlock_spin(&
ts->ts_lock);
1073 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1074 mtx_assert(&
ts->ts_lock, MA_OWNED);
1076 return (TAILQ_FIRST(&
ts->ts_blocked[queue]));
1088 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1089 mtx_assert(&
ts->ts_lock, MA_OWNED);
1091 return (TAILQ_EMPTY(&
ts->ts_blocked[queue]));
1096print_thread(
struct thread *td,
const char *prefix)
1099 db_printf(
"%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
1100 td->td_proc->p_pid, td->td_name);
1104print_queue(
struct threadqueue *queue,
const char *header,
const char *prefix)
1108 db_printf(
"%s:\n", header);
1109 if (TAILQ_EMPTY(queue)) {
1110 db_printf(
"%sempty\n", prefix);
1113 TAILQ_FOREACH(td, queue, td_lockq) {
1114 print_thread(td, prefix);
1118DB_SHOW_COMMAND(
turnstile, db_show_turnstile)
1122 struct lock_object *lock;
1132 lock = (
struct lock_object *)
addr;
1134 LIST_FOREACH(
ts, &tc->tc_turnstiles, ts_hash)
1135 if (
ts->ts_lockobj == lock)
1148 db_printf(
"Unable to locate a turnstile via %p\n", (
void *)
addr);
1151 lock =
ts->ts_lockobj;
1152 db_printf(
"Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
1155 print_thread(
ts->ts_owner,
"Lock Owner: ");
1157 db_printf(
"Lock Owner: none\n");
1158 print_queue(&
ts->ts_blocked[TS_SHARED_QUEUE],
"Shared Waiters",
"\t");
1159 print_queue(&
ts->ts_blocked[TS_EXCLUSIVE_QUEUE],
"Exclusive Waiters",
1161 print_queue(&
ts->ts_pending,
"Pending Threads",
"\t");
1170print_lockchain(
struct thread *td,
const char *prefix)
1172 struct lock_object *lock;
1173 struct lock_class *
class;
1175 struct thread *owner;
1181 while (!db_pager_quit) {
1182 if (td == (
void *)LK_KERNPROC) {
1183 db_printf(
"%sdisowned (LK_KERNPROC)\n", prefix);
1186 db_printf(
"%sthread %d (pid %d, %s) is ", prefix, td->td_tid,
1187 td->td_proc->p_pid, td->td_name);
1188 switch (TD_GET_STATE(td)) {
1190 db_printf(
"inactive\n");
1193 db_printf(
"runnable\n");
1196 db_printf(
"on a run queue\n");
1199 db_printf(
"running on CPU %d\n", td->td_oncpu);
1202 if (TD_ON_LOCK(td)) {
1203 ts = td->td_blocked;
1204 lock =
ts->ts_lockobj;
1205 class = LOCK_CLASS(lock);
1206 db_printf(
"blocked on lock %p (%s) \"%s\"\n",
1207 lock, class->lc_name, lock->lo_name);
1208 if (
ts->ts_owner == NULL)
1212 }
else if (TD_ON_SLEEPQ(td)) {
1213 if (!lockmgr_chain(td, &owner) &&
1214 !sx_chain(td, &owner)) {
1215 db_printf(
"sleeping on %p \"%s\"\n",
1216 td->td_wchan, td->td_wmesg);
1224 db_printf(
"inhibited: %s\n", KTDSTATE(td));
1227 db_printf(
"??? (%#x)\n", TD_GET_STATE(td));
1233DB_SHOW_COMMAND(lockchain, db_show_lockchain)
1239 td = db_lookup_thread(
addr,
true);
1243 print_lockchain(td,
"");
1245DB_SHOW_ALIAS(sleepchain, db_show_lockchain);
1247DB_SHOW_ALL_COMMAND(chains, db_show_allchains)
1254 FOREACH_PROC_IN_SYSTEM(p) {
1255 FOREACH_THREAD_IN_PROC(p, td) {
1256 if ((TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested))
1257 || (TD_IS_INHIBITED(td) && TD_ON_SLEEPQ(td))) {
1258 db_printf(
"chain %d:\n", i++);
1259 print_lockchain(td,
" ");
1266DB_SHOW_ALIAS(allchains, db_show_allchains)
1268static void print_waiters(
struct turnstile *
ts,
int indent);
1271print_waiter(
struct thread *td,
int indent)
1278 for (i = 0; i < indent; i++)
1280 print_thread(td,
"thread ");
1281 LIST_FOREACH(
ts, &td->td_contested, ts_link)
1282 print_waiters(
ts, indent + 1);
1288 struct lock_object *lock;
1289 struct lock_class *
class;
1295 lock =
ts->ts_lockobj;
1296 class = LOCK_CLASS(lock);
1297 for (i = 0; i < indent; i++)
1299 db_printf(
"lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
1300 TAILQ_FOREACH(td, &
ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
1301 print_waiter(td, indent + 1);
1302 TAILQ_FOREACH(td, &
ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
1303 print_waiter(td, indent + 1);
1304 TAILQ_FOREACH(td, &
ts->ts_pending, td_lockq)
1305 print_waiter(td, indent + 1);
1308DB_SHOW_COMMAND(locktree, db_show_locktree)
1310 struct lock_object *lock;
1311 struct lock_class *
class;
1317 lock = (
struct lock_object *)
addr;
1319 LIST_FOREACH(
ts, &tc->tc_turnstiles, ts_hash)
1320 if (
ts->ts_lockobj == lock)
1323 class = LOCK_CLASS(lock);
1324 db_printf(
"lock %p (%s) \"%s\"\n", lock, class->lc_name,
1327 print_waiters(
ts, 0);
SYSCTL_NODE(_kern, OID_AUTO, binmisc, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "Image activator for miscellaneous binaries")
TAILQ_HEAD(note_info_list, note_info)
static long empty[CPUSTATES]
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick, 0, "Run periodic events when idle")
void thread_lock_block_wait(struct thread *td)
void thread_lock_set(struct thread *td, struct mtx *new)
void panic(const char *fmt,...)
void mi_switch(int flags)
void wakeup(const void *ident)
static struct mtx tc_lock
void sched_unlend_prio(struct thread *td, u_char prio)
void sched_lend_prio(struct thread *td, u_char prio)
void sched_add(struct thread *td, int flags)
struct threadqueue ts_blocked[2]
struct threadqueue ts_pending
struct thread * kdb_thread
void kdb_backtrace_thread(struct thread *td)
int printf(const char *fmt,...)
int snprintf(char *str, size_t size, const char *format,...)
static void init_turnstile0(void *dummy)
static int turnstile_init(void *mem, int size, int flags)
int turnstile_signal(struct turnstile *ts, int queue)
void turnstile_adjust(struct thread *td, u_char oldpri)
void turnstile_chain_lock(struct lock_object *lock)
bool turnstile_lock(struct turnstile *ts, struct lock_object **lockp, struct thread **tdp)
void turnstile_cancel(struct turnstile *ts)
void turnstile_unpend(struct turnstile *ts)
void turnstile_assert(struct turnstile *ts)
static uma_zone_t turnstile_zone
static void propagate_unlock_td(struct turnstile *top, struct thread *td)
static struct mtx td_contested_lock
void init_turnstiles(void)
void turnstile_disown(struct turnstile *ts)
static struct turnstile_chain turnstile_chains[TC_TABLESIZE]
static int turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
void turnstile_free(struct turnstile *ts)
void turnstile_broadcast(struct turnstile *ts, int queue)
SDT_PROVIDER_DECLARE(sched)
struct turnstile * turnstile_lookup(struct lock_object *lock)
int turnstile_empty(struct turnstile *ts, int queue)
SDT_PROBE_DEFINE(sched,,, sleep)
struct turnstile * turnstile_alloc(void)
SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL)
void turnstile_chain_unlock(struct lock_object *lock)
static void turnstile_fini(void *mem, int size)
static u_char turnstile_calc_unlend_prio_locked(struct thread *td)
static void propagate_unlock_ts(struct turnstile *top, struct turnstile *ts)
struct thread * turnstile_head(struct turnstile *ts, int queue)
void turnstile_unlock(struct turnstile *ts, struct lock_object *lock)
static void propagate_priority(struct thread *td)
static void turnstile_setowner(struct turnstile *ts, struct thread *owner)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
struct turnstile * turnstile_trywait(struct lock_object *lock)
SDT_PROBE_DEFINE2(sched,,, wakeup, "struct thread *", "struct proc *")
static struct thread * turnstile_first_waiter(struct turnstile *ts)
void turnstile_claim(struct turnstile *ts)