41#include "opt_adaptive_mutexes.h"
43#include "opt_hwpmc_hooks.h"
51#include <sys/kernel.h>
54#include <sys/malloc.h>
57#include <sys/resourcevar.h>
61#include <sys/sysctl.h>
62#include <sys/turnstile.h>
63#include <sys/vmmeter.h>
64#include <sys/lock_profile.h>
66#include <machine/atomic.h>
67#include <machine/bus.h>
68#include <machine/cpu.h>
72#include <fs/devfs/devfs_int.h>
75#include <vm/vm_extern.h>
77#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
78#define ADAPTIVE_MUTEXES
82#include <sys/pmckern.h>
83PMC_SOFT_DEFINE( , , lock, failed);
90#define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock))
95#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
97#define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
99static void assert_mtx(
const struct lock_object *lock,
int what);
101static void db_show_mtx(
const struct lock_object *lock);
103static void lock_mtx(
struct lock_object *lock, uintptr_t how);
104static void lock_spin(
struct lock_object *lock, uintptr_t how);
106static int owner_mtx(
const struct lock_object *lock,
107 struct thread **owner);
109static uintptr_t
unlock_mtx(
struct lock_object *lock);
110static uintptr_t
unlock_spin(
struct lock_object *lock);
116 .lc_name =
"sleep mutex",
117 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
120 .lc_ddb_show = db_show_mtx,
125 .lc_owner = owner_mtx,
129 .lc_name =
"spin mutex",
130 .lc_flags = LC_SPINLOCK | LC_RECURSABLE,
133 .lc_ddb_show = db_show_mtx,
138 .lc_owner = owner_mtx,
142#ifdef ADAPTIVE_MUTEXES
143#ifdef MUTEX_CUSTOM_BACKOFF
144static SYSCTL_NODE(_debug, OID_AUTO,
mtx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
147static struct lock_delay_config __read_frequently mtx_delay;
149SYSCTL_U16(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base,
151SYSCTL_U16(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max,
154LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay);
156#define mtx_delay locks_delay
160#ifdef MUTEX_SPIN_CUSTOM_BACKOFF
162 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
163 "mtx spin debugging");
167SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW,
169SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW,
174#define mtx_spin_delay locks_delay
200 if (what & LA_LOCKED) {
204 mtx_assert((
const struct mtx *)lock, what);
211 mtx_lock((
struct mtx *)lock);
218 mtx_lock_spin((
struct mtx *)lock);
226 m = (
struct mtx *)lock;
227 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
237 m = (
struct mtx *)lock;
238 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
245owner_mtx(
const struct lock_object *lock,
struct thread **owner)
250 m = (
const struct mtx *)lock;
252 *owner = (
struct thread *)(x & ~MTX_FLAGMASK);
253 return (*owner != NULL);
269 KASSERT(
kdb_active != 0 || SCHEDULER_STOPPED() ||
270 !TD_IS_IDLETHREAD(curthread),
271 (
"mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
272 curthread, m->lock_object.lo_name, file, line));
273 KASSERT(m->mtx_lock != MTX_DESTROYED,
274 (
"mtx_lock() of destroyed mutex @ %s:%d", file, line));
276 (
"mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
278 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
279 LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
281 tid = (uintptr_t)curthread;
283 if (!_mtx_obtain_lock_fetch(m, &v, tid))
284 _mtx_lock_sleep(m, v, opts, file, line);
286 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
287 m, 0, 0, file, line);
288 LOCK_LOG_LOCK(
"LOCK", &m->lock_object, opts, m->mtx_recurse, file,
290 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
292 TD_LOCKS_INC(curthread);
302 KASSERT(m->mtx_lock != MTX_DESTROYED,
303 (
"mtx_unlock() of destroyed mutex @ %s:%d", file, line));
305 (
"mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
307 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
308 LOCK_LOG_LOCK(
"UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
310 mtx_assert(m, MA_OWNED);
315 __mtx_unlock(m, curthread, opts, file, line);
317 TD_LOCKS_DEC(curthread);
331 KASSERT(m->mtx_lock != MTX_DESTROYED,
332 (
"mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
334 (
"mtx_lock_spin() of sleep mutex %s @ %s:%d",
335 m->lock_object.lo_name, file, line));
337 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
338 (opts & MTX_RECURSE) != 0,
339 (
"mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
340 m->lock_object.lo_name, file, line));
341 opts &= ~MTX_RECURSE;
342 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
346 tid = (uintptr_t)curthread;
348 if (!_mtx_obtain_lock_fetch(m, &v, tid))
349 _mtx_lock_spin(m, v, opts, file, line);
351 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,
352 m, 0, 0, file, line);
354 __mtx_lock_spin(m, curthread, opts, file, line);
356 LOCK_LOG_LOCK(
"LOCK", &m->lock_object, opts, m->mtx_recurse, file,
358 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
367 if (SCHEDULER_STOPPED())
372 KASSERT(m->mtx_lock != MTX_DESTROYED,
373 (
"mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
375 (
"mtx_trylock_spin() of sleep mutex %s @ %s:%d",
376 m->lock_object.lo_name, file, line));
377 KASSERT((opts & MTX_RECURSE) == 0,
378 (
"mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
379 m->lock_object.lo_name, file, line));
380 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
381 LOCK_LOG_TRY(
"LOCK", &m->lock_object, opts, 1, file, line);
382 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
385 LOCK_LOG_TRY(
"LOCK", &m->lock_object, opts, 0, file, line);
397 KASSERT(m->mtx_lock != MTX_DESTROYED,
398 (
"mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
400 (
"mtx_unlock_spin() of sleep mutex %s @ %s:%d",
401 m->lock_object.lo_name, file, line));
402 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
403 LOCK_LOG_LOCK(
"UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
405 mtx_assert(m, MA_OWNED);
407 __mtx_unlock_spin(m);
421 uint64_t waittime = 0;
429 if (SCHEDULER_STOPPED_TD(td))
432 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(td),
433 (
"mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
434 curthread, m->lock_object.lo_name, file, line));
435 KASSERT(m->mtx_lock != MTX_DESTROYED,
436 (
"mtx_trylock() of destroyed mutex @ %s:%d", file, line));
438 (
"mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
445 if (_mtx_obtain_lock_fetch(m, &v, tid))
447 if (v == MTX_UNOWNED)
450 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
451 (opts & MTX_RECURSE) != 0)) {
453 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
461 opts &= ~MTX_RECURSE;
463 LOCK_LOG_TRY(
"LOCK", &m->lock_object, opts, rval, file, line);
465 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
467 TD_LOCKS_INC(curthread);
469 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
470 m, contested, waittime, file, line);
493__mtx_lock_sleep(
volatile uintptr_t *c, uintptr_t v,
int opts,
const char *file,
504 struct thread *owner;
507 uint64_t waittime = 0;
509#if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS)
510 struct lock_delay_arg lda;
514 int64_t sleep_time = 0;
515 int64_t all_time = 0;
517#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
518 int doing_lockprof = 0;
526 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
527 while (v == MTX_UNOWNED) {
528 if (_mtx_obtain_lock_fetch(m, &v, tid))
539 if (SCHEDULER_STOPPED_TD(td))
542 if (__predict_false(v == MTX_UNOWNED))
543 v = MTX_READ_VALUE(m);
545 if (__predict_false(lv_mtx_owner(v) == td)) {
546 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
547 (opts & MTX_RECURSE) != 0,
548 (
"_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
549 m->lock_object.lo_name, file, line));
551 opts &= ~MTX_RECURSE;
554 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
555 if (LOCK_LOG_TEST(&m->lock_object, opts))
556 CTR1(KTR_LOCK,
"_mtx_lock_sleep: %p recursing", m);
560 opts &= ~MTX_RECURSE;
563#if defined(ADAPTIVE_MUTEXES)
564 lock_delay_arg_init(&lda, &mtx_delay);
565#elif defined(KDTRACE_HOOKS)
566 lock_delay_arg_init_noadapt(&lda);
570 PMC_SOFT_CALL( , , lock, failed);
572 lock_profile_obtain_lock_failed(&m->lock_object,
false,
573 &contested, &waittime);
574 if (LOCK_LOG_TEST(&m->lock_object, opts))
576 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
577 m->lock_object.lo_name, (
void *)m->mtx_lock, file, line);
580 if (v == MTX_UNOWNED) {
581 if (_mtx_obtain_lock_fetch(m, &v, tid))
588#ifdef ADAPTIVE_MUTEXES
593 owner = lv_mtx_owner(v);
594 if (TD_IS_RUNNING(owner)) {
595 if (LOCK_LOG_TEST(&m->lock_object, 0))
597 "%s: spinning on %p held by %p",
599 KTR_STATE1(KTR_SCHED,
"thread",
601 "spinning",
"lockname:\"%s\"",
602 m->lock_object.lo_name);
605 v = MTX_READ_VALUE(m);
606 owner = lv_mtx_owner(v);
607 }
while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
608 KTR_STATE0(KTR_SCHED,
"thread",
616 v = MTX_READ_VALUE(m);
623 if (v == MTX_UNOWNED) {
628#ifdef ADAPTIVE_MUTEXES
636 owner = lv_mtx_owner(v);
637 if (TD_IS_RUNNING(owner)) {
648 if ((v & MTX_CONTESTED) == 0 &&
649 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
650 goto retry_turnstile;
656 mtx_assert(m, MA_NOTOWNED);
664#ifndef ADAPTIVE_MUTEXES
665 owner = mtx_owner(m);
667 MPASS(owner == mtx_owner(m));
673 v = MTX_READ_VALUE(m);
675#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
676 if (__predict_true(!doing_lockprof))
682 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
687 if (lda.spin_cnt > sleep_cnt)
688 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
691 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
692 waittime, file, line);
704_mtx_lock_spin_cookie(
volatile uintptr_t *c, uintptr_t v,
int opts,
705 const char *file,
int line)
708_mtx_lock_spin_cookie(
volatile uintptr_t *c, uintptr_t v)
712 struct lock_delay_arg lda;
716 uint64_t waittime = 0;
719 int64_t spin_time = 0;
721#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
722 int doing_lockprof = 0;
725 tid = (uintptr_t)curthread;
729 if (LOCKSTAT_PROFILE_ENABLED(adaptive__acquire)) {
730 while (v == MTX_UNOWNED) {
731 if (_mtx_obtain_lock_fetch(m, &v, tid))
742 if (__predict_false(v == MTX_UNOWNED))
743 v = MTX_READ_VALUE(m);
745 if (__predict_false(v == tid)) {
750 if (SCHEDULER_STOPPED())
753 if (LOCK_LOG_TEST(&m->lock_object, opts))
754 CTR1(KTR_LOCK,
"_mtx_lock_spin: %p spinning", m);
755 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname((
struct thread *)tid),
756 "spinning",
"lockname:\"%s\"", m->lock_object.lo_name);
761 PMC_SOFT_CALL( , , lock, failed);
763 lock_profile_obtain_lock_failed(&m->lock_object,
true, &contested, &waittime);
766 if (v == MTX_UNOWNED) {
767 if (_mtx_obtain_lock_fetch(m, &v, tid))
774 if (__predict_true(lda.spin_cnt < 10000000)) {
779 v = MTX_READ_VALUE(m);
780 }
while (v != MTX_UNOWNED);
784 if (LOCK_LOG_TEST(&m->lock_object, opts))
785 CTR1(KTR_LOCK,
"_mtx_lock_spin: %p spin done", m);
786 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname((
struct thread *)tid),
789#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
790 if (__predict_true(!doing_lockprof))
795 if (lda.spin_cnt != 0)
796 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
799 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
800 contested, waittime, file, line);
809 KASSERT(m->mtx_lock != MTX_DESTROYED,
810 (
"thread_lock() of destroyed mutex @ %s:%d", file, line));
812 (
"thread_lock() of sleep mutex %s @ %s:%d",
813 m->lock_object.lo_name, file, line));
814 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
815 (
"thread_lock: got a recursive mutex %s @ %s:%d\n",
816 m->lock_object.lo_name, file, line));
817 WITNESS_CHECKORDER(&m->lock_object,
818 opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
821#define thread_lock_validate(m, opts, file, line) do { } while (0)
824#ifndef LOCK_PROFILING
827_thread_lock(
struct thread *td,
int opts,
const char *file,
int line)
836 tid = (uintptr_t)curthread;
838 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire)))
844 goto slowpath_unlocked;
845 if (__predict_false(!_mtx_obtain_lock(m, tid)))
846 goto slowpath_unlocked;
847 if (__predict_true(m == td->td_lock)) {
848 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
851 _mtx_release_lock_quick(m);
868 struct lock_delay_arg lda;
871 uint64_t waittime = 0;
874 int64_t spin_time = 0;
876#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
877 int doing_lockprof = 1;
880 tid = (uintptr_t)curthread;
882 if (SCHEDULER_STOPPED()) {
895 PMC_SOFT_CALL( , , lock, failed);
900#elif defined(KDTRACE_HOOKS)
904 if (__predict_false(doing_lockprof))
913 v = MTX_READ_VALUE(m);
915 if (v == MTX_UNOWNED) {
916 if (_mtx_obtain_lock_fetch(m, &v, tid))
921 lock_profile_obtain_lock_failed(&m->lock_object,
true,
922 &contested, &waittime);
926 if (__predict_true(lda.spin_cnt < 10000000)) {
931 if (m != td->td_lock) {
935 v = MTX_READ_VALUE(m);
936 }
while (v != MTX_UNOWNED);
939 if (m == td->td_lock)
941 _mtx_release_lock_quick(m);
943 LOCK_LOG_LOCK(
"LOCK", &m->lock_object, opts, m->mtx_recurse, file,
945 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
947#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
948 if (__predict_true(!doing_lockprof))
954 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
955 waittime, file, line);
957 if (lda.spin_cnt != 0)
958 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
968 mtx_assert(lock, MA_OWNED);
978 mtx_assert(
new, MA_OWNED);
980 (
"thread %p lock %p not blocked_lock %p",
982 atomic_store_rel_ptr((
volatile void *)&td->td_lock, (uintptr_t)
new);
993 atomic_thread_fence_acq();
1001 mtx_assert(
new, MA_OWNED);
1003 mtx_assert(lock, MA_OWNED);
1005 mtx_unlock_spin(lock);
1017 const char *file,
int line)
1027 if (SCHEDULER_STOPPED())
1030 tid = (uintptr_t)curthread;
1033 if (__predict_false(v == tid))
1034 v = MTX_READ_VALUE(m);
1036 if (__predict_false(v & MTX_RECURSED)) {
1037 if (--(m->mtx_recurse) == 0)
1038 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1039 if (LOCK_LOG_TEST(&m->lock_object, opts))
1040 CTR1(KTR_LOCK,
"_mtx_unlock_sleep: %p unrecurse", m);
1044 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1045 if (v == tid && _mtx_release_lock(m, tid))
1053 _mtx_release_lock_quick(m);
1056 if (LOCK_LOG_TEST(&m->lock_object, opts))
1057 CTR1(KTR_LOCK,
"_mtx_unlock_sleep: %p contested", m);
1076#ifdef INVARIANT_SUPPORT
1078__mtx_assert(
const volatile uintptr_t *c,
int what,
const char *file,
int line)
1080 const struct mtx *m;
1082 if (KERNEL_PANICKED() ||
dumping || SCHEDULER_STOPPED())
1089 case MA_OWNED | MA_RECURSED:
1090 case MA_OWNED | MA_NOTRECURSED:
1092 panic(
"mutex %s not owned at %s:%d",
1093 m->lock_object.lo_name, file, line);
1094 if (mtx_recursed(m)) {
1095 if ((what & MA_NOTRECURSED) != 0)
1096 panic(
"mutex %s recursed at %s:%d",
1097 m->lock_object.lo_name, file, line);
1098 }
else if ((what & MA_RECURSED) != 0) {
1099 panic(
"mutex %s unrecursed at %s:%d",
1100 m->lock_object.lo_name, file, line);
1105 panic(
"mutex %s owned at %s:%d",
1106 m->lock_object.lo_name, file, line);
1109 panic(
"unknown mtx_assert at %s:%d", file, line);
1120 struct mtx_args *margs = arg;
1122 mtx_init((
struct mtx *)margs->ma_mtx, margs->ma_desc, NULL,
1136 struct lock_class *
class;
1141 MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
1142 MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
1143 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1144 (
"%s: mtx_lock not aligned for %s: %p", __func__,
name,
1148 if (opts & MTX_SPIN)
1153 if (opts & MTX_QUIET)
1155 if (opts & MTX_RECURSE)
1156 flags |= LO_RECURSABLE;
1157 if ((opts & MTX_NOWITNESS) == 0)
1158 flags |= LO_WITNESS;
1159 if (opts & MTX_DUPOK)
1161 if (opts & MTX_NOPROFILE)
1162 flags |= LO_NOPROFILE;
1169 m->mtx_lock = MTX_UNOWNED;
1189 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1193 lock_profile_release_lock(&m->lock_object,
true);
1196 TD_LOCKS_DEC(curthread);
1197 lock_profile_release_lock(&m->lock_object,
false);
1201 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1205 m->mtx_lock = MTX_DESTROYED;
1224 mtx_init(&
Giant,
"Giant", NULL, MTX_DEF | MTX_RECURSE);
1225 mtx_init(&
blocked_lock,
"blocked lock", NULL, MTX_SPIN);
1227 mtx_init(&
proc0.p_mtx,
"process lock", NULL, MTX_DEF | MTX_DUPOK);
1228 mtx_init(&
proc0.p_slock,
"process slock", NULL, MTX_SPIN);
1229 mtx_init(&
proc0.p_statmtx,
"pstatl", NULL, MTX_SPIN);
1230 mtx_init(&
proc0.p_itimmtx,
"pitiml", NULL, MTX_SPIN);
1231 mtx_init(&
proc0.p_profmtx,
"pprofl", NULL, MTX_SPIN);
1232 mtx_init(&
devmtx,
"cdev", NULL, MTX_DEF);
1236static void __noinline
1242 if (ldap->spin_cnt < 60000000 ||
kdb_active || KERNEL_PANICKED())
1251 printf(
"spin lock %p (%s) held by %p (tid %d) too long\n",
1252 m, m->lock_object.lo_name, td, td->td_tid);
1256 panic(
"spin lock held too long");
1264 struct lock_delay_arg lda;
1266 KASSERT(m->mtx_lock != MTX_DESTROYED,
1267 (
"%s() of destroyed mutex %p", __func__, m));
1269 (
"%s() of sleep mutex %p (%s)", __func__, m,
1270 m->lock_object.lo_name));
1271 KASSERT(!mtx_owned(m), (
"%s() waiting on myself on lock %p (%s)", __func__, m,
1272 m->lock_object.lo_name));
1276 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1277 if (__predict_true(lda.spin_cnt < 10000000)) {
1289 struct thread *owner;
1292 KASSERT(m->mtx_lock != MTX_DESTROYED,
1293 (
"%s() of destroyed mutex %p", __func__, m));
1295 (
"%s() not a sleep mutex %p (%s)", __func__, m,
1296 m->lock_object.lo_name));
1297 KASSERT(!mtx_owned(m), (
"%s() waiting on myself on lock %p (%s)", __func__, m,
1298 m->lock_object.lo_name));
1301 v = atomic_load_acq_ptr(&m->mtx_lock);
1302 if (v == MTX_UNOWNED) {
1305 owner = lv_mtx_owner(v);
1306 if (!TD_IS_RUNNING(owner)) {
1317db_show_mtx(
const struct lock_object *lock)
1320 const struct mtx *m;
1322 m = (
const struct mtx *)lock;
1324 db_printf(
" flags: {");
1329 if (m->lock_object.lo_flags & LO_RECURSABLE)
1330 db_printf(
", RECURSE");
1331 if (m->lock_object.lo_flags & LO_DUPOK)
1332 db_printf(
", DUPOK");
1334 db_printf(
" state: {");
1336 db_printf(
"UNOWNED");
1338 db_printf(
"DESTROYED");
1341 if (m->mtx_lock & MTX_CONTESTED)
1342 db_printf(
", CONTESTED");
1343 if (m->mtx_lock & MTX_RECURSED)
1344 db_printf(
", RECURSED");
1349 db_printf(
" owner: %p (tid %d, pid %d, \"%s\")\n", td,
1350 td->td_tid, td->td_proc->p_pid, td->td_name);
1351 if (mtx_recursed(m))
1352 db_printf(
" recursed: %d\n", m->mtx_recurse);
device_property_type_t type
SYSCTL_NODE(_kern, OID_AUTO, binmisc, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "Image activator for miscellaneous binaries")
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
volatile bool __read_frequently lockstat_enabled
uint64_t lockstat_nsecs(struct lock_object *lo)
void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v)
struct lock_class lock_class_mtx_sleep
void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line)
#define thread_lock_validate(m, opts, file, line)
void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v)
static void lock_spin(struct lock_object *lock, uintptr_t how)
void mtx_spin_wait_unlocked(struct mtx *m)
static void lock_mtx(struct lock_object *lock, uintptr_t how)
static uintptr_t unlock_mtx(struct lock_object *lock)
static void _mtx_lock_indefinite_check(struct mtx *, struct lock_delay_arg *)
void thread_lock_unblock(struct thread *td, struct mtx *new)
void thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line)
void thread_lock_block_wait(struct thread *td)
void thread_lock_set(struct thread *td, struct mtx *new)
void _thread_lock(struct thread *td)
void _mtx_destroy(volatile uintptr_t *c)
static void assert_mtx(const struct lock_object *lock, int what)
void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line)
int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
struct mtx __exclusive_cache_line Giant
void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
void mtx_wait_unlocked(struct mtx *m)
struct lock_class lock_class_mtx_spin
int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
struct mtx * thread_lock_block(struct thread *td)
static uintptr_t unlock_spin(struct lock_object *lock)
void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
void mtx_sysinit(void *arg)
int __read_mostly dumping
void panic(const char *fmt,...)
char * sched_tdname(struct thread *td)
u_char __read_frequently kdb_active
SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base, 0, "")
void lock_destroy(struct lock_object *lock)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
void lock_delay(struct lock_delay_arg *la)
int printf(const char *fmt,...)
void turnstile_chain_lock(struct lock_object *lock)
void turnstile_cancel(struct turnstile *ts)
void turnstile_unpend(struct turnstile *ts)
void init_turnstiles(void)
void turnstile_broadcast(struct turnstile *ts, int queue)
struct turnstile * turnstile_lookup(struct lock_object *lock)
void turnstile_chain_unlock(struct lock_object *lock)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
struct turnstile * turnstile_trywait(struct lock_object *lock)
void witness_display_spinlock(struct lock_object *lock, struct thread *owner, int(*prnt)(const char *fmt,...))