36#include "opt_hwpmc_hooks.h"
37#include "opt_no_adaptive_rwlocks.h"
42#include <sys/kernel.h>
46#include <sys/rwlock.h>
49#include <sys/sysctl.h>
51#include <sys/turnstile.h>
53#include <machine/cpu.h>
55#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
56#define ADAPTIVE_RWLOCKS
60#include <sys/pmckern.h>
61PMC_SOFT_DECLARE( , , lock, failed);
68#define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock))
73static void db_show_rwlock(
const struct lock_object *lock);
75static void assert_rw(
const struct lock_object *lock,
int what);
76static void lock_rw(
struct lock_object *lock, uintptr_t how);
78static int owner_rw(
const struct lock_object *lock,
struct thread **owner);
80static uintptr_t
unlock_rw(
struct lock_object *lock);
84 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
87 .lc_ddb_show = db_show_rwlock,
96#ifdef ADAPTIVE_RWLOCKS
97#ifdef RWLOCK_CUSTOM_BACKOFF
98static u_short __read_frequently rowner_retries;
99static u_short __read_frequently rowner_loops;
101 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
103SYSCTL_U16(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0,
"");
104SYSCTL_U16(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0,
"");
106static struct lock_delay_config __read_frequently rw_delay;
108SYSCTL_U16(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
110SYSCTL_U16(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
114rw_lock_delay_init(
void *arg __unused)
119 rowner_loops = max(10000, rw_delay.max);
123#define rw_delay locks_delay
124#define rowner_retries locks_delay_retries
125#define rowner_loops locks_delay_loops
134#define lv_rw_wowner(v) \
135 ((v) & RW_LOCK_READ ? NULL : \
136 (struct thread *)RW_OWNER((v)))
138#define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw))
144#define rw_recursed(rw) ((rw)->rw_recurse != 0)
149#define rw_wlocked(rw) (rw_wowner((rw)) == curthread)
156#define rw_owner(rw) rw_wowner(rw)
159#define __rw_assert(c, what, file, line)
166 rw_assert((
const struct rwlock *)lock, what);
170lock_rw(
struct lock_object *lock, uintptr_t how)
174 rw = (
struct rwlock *)lock;
186 rw = (
struct rwlock *)lock;
187 rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
188 if (rw->rw_lock & RW_LOCK_READ) {
199owner_rw(
const struct lock_object *lock,
struct thread **owner)
201 const struct rwlock *rw = (
const struct rwlock *)lock;
202 uintptr_t x = rw->rw_lock;
205 return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) :
218 MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
219 RW_RECURSE | RW_NEW)) == 0);
220 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
221 (
"%s: rw_lock not aligned for %s: %p", __func__,
name,
224 flags = LO_UPGRADABLE;
227 if (opts & RW_NOPROFILE)
228 flags |= LO_NOPROFILE;
229 if (!(opts & RW_NOWITNESS))
231 if (opts & RW_RECURSE)
232 flags |= LO_RECURSABLE;
239 rw->rw_lock = RW_UNLOCKED;
250 KASSERT(rw->rw_lock == RW_UNLOCKED, (
"rw lock %p not unlocked", rw));
251 KASSERT(rw->rw_recurse == 0, (
"rw lock %p still recursed", rw));
252 rw->rw_lock = RW_DESTROYED;
259 struct rw_args *args;
262 rw_init_flags((
struct rwlock *)args->ra_rw, args->ra_desc,
281 KASSERT(
kdb_active != 0 || SCHEDULER_STOPPED() ||
282 !TD_IS_IDLETHREAD(curthread),
283 (
"rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
284 curthread, rw->lock_object.lo_name, file, line));
285 KASSERT(rw->rw_lock != RW_DESTROYED,
286 (
"rw_wlock() of destroyed rwlock @ %s:%d", file, line));
287 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
289 tid = (uintptr_t)curthread;
291 if (!_rw_write_lock_fetch(rw, &v, tid))
292 _rw_wlock_hard(rw, v, file, line);
294 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
295 0, 0, file, line, LOCKSTAT_WRITER);
297 LOCK_LOG_LOCK(
"WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
298 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
299 TD_LOCKS_INC(curthread);
312 if (SCHEDULER_STOPPED_TD(td))
315 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(td),
316 (
"rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
317 curthread, rw->lock_object.lo_name, file, line));
318 KASSERT(rw->rw_lock != RW_DESTROYED,
319 (
"rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
325 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
327 if (v == RW_UNLOCKED)
329 if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
331 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
338 LOCK_LOG_TRY(
"WLOCK", &rw->lock_object, 0, rval, file, line);
340 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
343 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
344 rw, 0, 0, file, line, LOCKSTAT_WRITER);
345 TD_LOCKS_INC(curthread);
366 KASSERT(rw->rw_lock != RW_DESTROYED,
367 (
"rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
369 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
370 LOCK_LOG_LOCK(
"WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
374 _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
376 __rw_wunlock(rw, curthread, file, line);
379 TD_LOCKS_DEC(curthread);
389static bool __always_inline
393 if ((v & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER))
396 if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ))
401static bool __always_inline
403 LOCK_FILE_LINE_ARG_DEF)
417 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
418 *vp + RW_ONE_READER)) {
419 if (LOCK_LOG_TEST(&rw->lock_object, 0))
421 "%s: %p succeed %p -> %p", __func__,
423 (
void *)(*vp + RW_ONE_READER));
431static void __noinline
433 LOCK_FILE_LINE_ARG_DEF)
436 struct thread *owner;
437#ifdef ADAPTIVE_RWLOCKS
442 uint64_t waittime = 0;
445#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
446 struct lock_delay_arg lda;
450 int64_t sleep_time = 0;
451 int64_t all_time = 0;
453#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
455 int doing_lockprof = 0;
459 if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) {
472 if (SCHEDULER_STOPPED())
475#if defined(ADAPTIVE_RWLOCKS)
476 lock_delay_arg_init(&lda, &rw_delay);
477#elif defined(KDTRACE_HOOKS)
478 lock_delay_arg_init_noadapt(&lda);
482 PMC_SOFT_CALL( , , lock, failed);
484 lock_profile_obtain_lock_failed(&rw->lock_object,
false,
485 &contested, &waittime);
494#ifdef ADAPTIVE_RWLOCKS
500 if ((v & RW_LOCK_READ) == 0) {
501 owner = (
struct thread *)RW_OWNER(v);
502 if (TD_IS_RUNNING(owner)) {
503 if (LOCK_LOG_TEST(&rw->lock_object, 0))
505 "%s: spinning on %p held by %p",
506 __func__, rw, owner);
507 KTR_STATE1(KTR_SCHED,
"thread",
509 "lockname:\"%s\"", rw->lock_object.lo_name);
512 v = RW_READ_VALUE(rw);
514 }
while (owner != NULL && TD_IS_RUNNING(owner));
515 KTR_STATE0(KTR_SCHED,
"thread",
520 if ((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) {
523 v = RW_READ_VALUE(rw);
526 if (spintries < rowner_retries) {
528 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(curthread),
529 "spinning",
"lockname:\"%s\"",
530 rw->lock_object.lo_name);
532 for (i = 0; i < rowner_loops; i += n) {
534 v = RW_READ_VALUE(rw);
535 if (!(v & RW_LOCK_READ))
544 lda.spin_cnt += rowner_loops - i;
546 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname(curthread),
548 if (i < rowner_loops)
566 v = RW_READ_VALUE(rw);
568 if (((v & RW_LOCK_WRITE_SPINNER) && RW_READERS(v) == 0) ||
576#ifdef ADAPTIVE_RWLOCKS
585 if (TD_IS_RUNNING(owner)) {
603 if (!(v & RW_LOCK_READ_WAITERS)) {
604 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
605 v | RW_LOCK_READ_WAITERS))
607 if (LOCK_LOG_TEST(&rw->lock_object, 0))
608 CTR2(KTR_LOCK,
"%s: %p set read waiters flag",
616 if (LOCK_LOG_TEST(&rw->lock_object, 0))
617 CTR2(KTR_LOCK,
"%s: %p blocking on turnstile", __func__,
628 if (LOCK_LOG_TEST(&rw->lock_object, 0))
629 CTR2(KTR_LOCK,
"%s: %p resuming from turnstile",
631 v = RW_READ_VALUE(rw);
633#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
634 if (__predict_true(!doing_lockprof))
640 LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
641 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
642 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
645 if (lda.spin_cnt > sleep_cnt)
646 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
647 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
648 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
656 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
657 waittime, file, line, LOCKSTAT_READER);
668 KASSERT(
kdb_active != 0 || SCHEDULER_STOPPED_TD(td) ||
669 !TD_IS_IDLETHREAD(td),
670 (
"rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
671 td, rw->lock_object.lo_name, file, line));
672 KASSERT(rw->rw_lock != RW_DESTROYED,
673 (
"rw_rlock() of destroyed rwlock @ %s:%d", file, line));
675 (
"rw_rlock: wlock already held for %s @ %s:%d",
676 rw->lock_object.lo_name, file, line));
677 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
679 v = RW_READ_VALUE(rw);
680 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) ||
684 lock_profile_obtain_lock_success(&rw->lock_object,
false, 0, 0,
687 LOCK_LOG_LOCK(
"RLOCK", &rw->lock_object, 0, 0, file, line);
688 WITNESS_LOCK(&rw->lock_object, 0, file, line);
689 TD_LOCKS_INC(curthread);
706 if (SCHEDULER_STOPPED())
709 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
710 (
"rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
711 curthread, rw->lock_object.lo_name, file, line));
715 KASSERT(rw->rw_lock != RW_DESTROYED,
716 (
"rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
717 if (!(x & RW_LOCK_READ))
719 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
720 LOCK_LOG_TRY(
"RLOCK", &rw->lock_object, 0, 1, file,
722 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
723 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
724 rw, 0, 0, file, line, LOCKSTAT_READER);
725 TD_LOCKS_INC(curthread);
726 curthread->td_rw_rlocks++;
731 LOCK_LOG_TRY(
"RLOCK", &rw->lock_object, 0, 0, file, line);
744static bool __always_inline
749 if (RW_READERS(*vp) > 1 || !(*vp & RW_LOCK_WAITERS)) {
750 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
751 *vp - RW_ONE_READER)) {
752 if (LOCK_LOG_TEST(&rw->lock_object, 0))
754 "%s: %p succeeded %p -> %p",
755 __func__, rw, (
void *)*vp,
756 (
void *)(*vp - RW_ONE_READER));
767static void __noinline
769 LOCK_FILE_LINE_ARG_DEF)
772 uintptr_t setv, queue;
774 if (SCHEDULER_STOPPED())
785 v = RW_READ_VALUE(rw);
790 MPASS(v & RW_LOCK_WAITERS);
809 queue = TS_SHARED_QUEUE;
810 if (v & RW_LOCK_WRITE_WAITERS) {
811 queue = TS_EXCLUSIVE_QUEUE;
812 setv |= (v & RW_LOCK_READ_WAITERS);
814 setv |= (v & RW_LOCK_WRITE_SPINNER);
815 if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv))
817 if (LOCK_LOG_TEST(&rw->lock_object, 0))
818 CTR2(KTR_LOCK,
"%s: %p last succeeded with waiters",
837 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
846 KASSERT(rw->rw_lock != RW_DESTROYED,
847 (
"rw_runlock() of destroyed rwlock @ %s:%d", file, line));
849 WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
850 LOCK_LOG_LOCK(
"RUNLOCK", &rw->lock_object, 0, 0, file, line);
853 v = RW_READ_VALUE(rw);
855 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||
859 lock_profile_release_lock(&rw->lock_object,
false);
861 TD_LOCKS_DEC(curthread);
873#ifdef ADAPTIVE_RWLOCKS
878 if (v & RW_LOCK_WRITE_SPINNER)
882 *in_critical =
false;
887#define rw_drop_critical(v, in_critical, extra_work) do { } while (0)
901 struct thread *owner;
902#ifdef ADAPTIVE_RWLOCKS
905 enum { READERS, WRITER } sleep_reason = READERS;
906 bool in_critical =
false;
910 uint64_t waittime = 0;
913#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
914 struct lock_delay_arg lda;
918 int64_t sleep_time = 0;
919 int64_t all_time = 0;
921#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
923 int doing_lockprof = 0;
927 tid = (uintptr_t)curthread;
931 if (LOCKSTAT_PROFILE_ENABLED(rw__acquire)) {
932 while (v == RW_UNLOCKED) {
933 if (_rw_write_lock_fetch(rw, &v, tid))
948 if (SCHEDULER_STOPPED())
951 if (__predict_false(v == RW_UNLOCKED))
952 v = RW_READ_VALUE(rw);
954 if (__predict_false(
lv_rw_wowner(v) == (
struct thread *)tid)) {
955 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
956 (
"%s: recursing but non-recursive rw %s @ %s:%d\n",
957 __func__, rw->lock_object.lo_name, file, line));
959 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
960 if (LOCK_LOG_TEST(&rw->lock_object, 0))
961 CTR2(KTR_LOCK,
"%s: %p recursing", __func__, rw);
965 if (LOCK_LOG_TEST(&rw->lock_object, 0))
966 CTR5(KTR_LOCK,
"%s: %s contested (lock=%p) at %s:%d", __func__,
967 rw->lock_object.lo_name, (
void *)rw->rw_lock, file, line);
969#if defined(ADAPTIVE_RWLOCKS)
970 lock_delay_arg_init(&lda, &rw_delay);
971#elif defined(KDTRACE_HOOKS)
972 lock_delay_arg_init_noadapt(&lda);
976 PMC_SOFT_CALL( , , lock, failed);
978 lock_profile_obtain_lock_failed(&rw->lock_object,
false,
979 &contested, &waittime);
982 if (v == RW_UNLOCKED) {
983 if (_rw_write_lock_fetch(rw, &v, tid))
991#ifdef ADAPTIVE_RWLOCKS
992 if (v == (RW_LOCK_READ | RW_LOCK_WRITE_SPINNER)) {
993 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
1003 if (!(v & RW_LOCK_READ)) {
1005 sleep_reason = WRITER;
1007 if (!TD_IS_RUNNING(owner))
1009 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1010 CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
1011 __func__, rw, owner);
1012 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(curthread),
1013 "spinning",
"lockname:\"%s\"",
1014 rw->lock_object.lo_name);
1017 v = RW_READ_VALUE(rw);
1019 }
while (owner != NULL && TD_IS_RUNNING(owner));
1020 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname(curthread),
1023 }
else if (RW_READERS(v) > 0) {
1024 sleep_reason = READERS;
1025 if (spintries == rowner_retries)
1027 if (!(v & RW_LOCK_WRITE_SPINNER)) {
1033 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1034 v | RW_LOCK_WRITE_SPINNER)) {
1036 in_critical =
false;
1042 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(curthread),
1043 "spinning",
"lockname:\"%s\"",
1044 rw->lock_object.lo_name);
1046 for (i = 0; i < rowner_loops; i += n) {
1048 v = RW_READ_VALUE(rw);
1049 if (!(v & RW_LOCK_WRITE_SPINNER))
1051 if (!(v & RW_LOCK_READ))
1060 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname(curthread),
1062 if (i < rowner_loops)
1068 v = RW_READ_VALUE(rw);
1072#ifdef ADAPTIVE_RWLOCKS
1080 if (owner != NULL) {
1081 if (TD_IS_RUNNING(owner)) {
1086 }
else if (RW_READERS(v) > 0 && sleep_reason == WRITER) {
1099 setv = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
1100 if ((v & ~setv) == RW_UNLOCKED) {
1101 setv &= ~RW_LOCK_WRITE_SPINNER;
1102 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | setv)) {
1112#ifdef ADAPTIVE_RWLOCKS
1114 if ((v & RW_LOCK_WRITE_SPINNER) ||
1115 !((v & RW_LOCK_WRITE_WAITERS))) {
1116 setv = v & ~RW_LOCK_WRITE_SPINNER;
1117 setv |= RW_LOCK_WRITE_WAITERS;
1118 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, setv))
1122 in_critical =
false;
1131 if (!(v & RW_LOCK_WRITE_WAITERS)) {
1132 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
1133 v | RW_LOCK_WRITE_WAITERS))
1135 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1136 CTR2(KTR_LOCK,
"%s: %p set write waiters flag",
1139#ifdef ADAPTIVE_RWLOCKS
1146 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1147 CTR2(KTR_LOCK,
"%s: %p blocking on turnstile", __func__,
1158 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1159 CTR2(KTR_LOCK,
"%s: %p resuming from turnstile",
1161#ifdef ADAPTIVE_RWLOCKS
1164 v = RW_READ_VALUE(rw);
1166 if (__predict_true(!extra_work))
1168#ifdef ADAPTIVE_RWLOCKS
1172#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1173 if (__predict_true(!doing_lockprof))
1179 LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
1180 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1181 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1184 if (lda.spin_cnt > sleep_cnt)
1185 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
1186 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1187 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1190 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
1191 waittime, file, line, LOCKSTAT_WRITER);
1205 uintptr_t tid, setv;
1208 tid = (uintptr_t)curthread;
1209 if (SCHEDULER_STOPPED())
1213 if (__predict_false(v == tid))
1214 v = RW_READ_VALUE(rw);
1216 if (v & RW_LOCK_WRITER_RECURSED) {
1217 if (--(rw->rw_recurse) == 0)
1218 atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
1219 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1220 CTR2(KTR_LOCK,
"%s: %p unrecursing", __func__, rw);
1224 LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER);
1225 if (v == tid && _rw_write_unlock(rw, tid))
1228 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1229 (
"%s: neither of the waiter flags are set", __func__));
1231 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1232 CTR2(KTR_LOCK,
"%s: %p contested", __func__, rw);
1253 v = RW_READ_VALUE(rw);
1254 queue = TS_SHARED_QUEUE;
1255 if (v & RW_LOCK_WRITE_WAITERS) {
1256 queue = TS_EXCLUSIVE_QUEUE;
1257 setv |= (v & RW_LOCK_READ_WAITERS);
1259 atomic_store_rel_ptr(&rw->rw_lock, setv);
1262 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1263 CTR3(KTR_LOCK,
"%s: %p waking up %s waiters", __func__, rw,
1264 queue == TS_SHARED_QUEUE ?
"read" :
"write");
1281 uintptr_t v, setv, tid;
1285 if (SCHEDULER_STOPPED())
1288 KASSERT(rw->rw_lock != RW_DESTROYED,
1289 (
"rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1290 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line);
1299 tid = (uintptr_t)curthread;
1301 v = RW_READ_VALUE(rw);
1303 if (RW_READERS(v) > 1)
1305 if (!(v & RW_LOCK_WAITERS)) {
1306 success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid);
1316 v = RW_READ_VALUE(rw);
1318 if (RW_READERS(v) > 1) {
1328 setv = tid | (v & RW_LOCK_WAITERS);
1329 success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv);
1331 if (v & RW_LOCK_WAITERS)
1339 LOCK_LOG_TRY(
"WUPGRADE", &rw->lock_object, 0, success, file, line);
1341 curthread->td_rw_rlocks--;
1342 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1344 LOCKSTAT_RECORD0(rw__upgrade, rw);
1368 if (SCHEDULER_STOPPED())
1371 KASSERT(rw->rw_lock != RW_DESTROYED,
1372 (
"rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1373 __rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line);
1376 panic(
"downgrade of a recursed lock");
1379 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1386 tid = (uintptr_t)curthread;
1387 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1395 v = rw->rw_lock & RW_LOCK_WAITERS;
1396 rwait = v & RW_LOCK_READ_WAITERS;
1397 wwait = v & RW_LOCK_WRITE_WAITERS;
1398 MPASS(rwait | wwait);
1407 v &= ~RW_LOCK_READ_WAITERS;
1408 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1413 if (rwait && !wwait) {
1420 curthread->td_rw_rlocks++;
1421 LOCK_LOG_LOCK(
"WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1422 LOCKSTAT_RECORD0(rw__downgrade, rw);
1434#ifdef INVARIANT_SUPPORT
1445__rw_assert(
const volatile uintptr_t *c,
int what,
const char *file,
int line)
1447 const struct rwlock *rw;
1449 if (SCHEDULER_STOPPED())
1456 case RA_LOCKED | RA_RECURSED:
1457 case RA_LOCKED | RA_NOTRECURSED:
1459 case RA_RLOCKED | RA_RECURSED:
1460 case RA_RLOCKED | RA_NOTRECURSED:
1469 if (rw->rw_lock == RW_UNLOCKED ||
1470 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1472 panic(
"Lock %s not %slocked @ %s:%d\n",
1473 rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1474 "read " :
"", file, line);
1476 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1478 if (what & RA_NOTRECURSED)
1479 panic(
"Lock %s recursed @ %s:%d\n",
1480 rw->lock_object.lo_name, file,
1482 }
else if (what & RA_RECURSED)
1483 panic(
"Lock %s not recursed @ %s:%d\n",
1484 rw->lock_object.lo_name, file, line);
1489 case RA_WLOCKED | RA_RECURSED:
1490 case RA_WLOCKED | RA_NOTRECURSED:
1492 panic(
"Lock %s not exclusively locked @ %s:%d\n",
1493 rw->lock_object.lo_name, file, line);
1495 if (what & RA_NOTRECURSED)
1496 panic(
"Lock %s recursed @ %s:%d\n",
1497 rw->lock_object.lo_name, file, line);
1498 }
else if (what & RA_RECURSED)
1499 panic(
"Lock %s not recursed @ %s:%d\n",
1500 rw->lock_object.lo_name, file, line);
1511 panic(
"Lock %s exclusively locked @ %s:%d\n",
1512 rw->lock_object.lo_name, file, line);
1516 panic(
"Unknown rw lock assertion: %d @ %s:%d", what, file,
1524db_show_rwlock(
const struct lock_object *lock)
1526 const struct rwlock *rw;
1529 rw = (
const struct rwlock *)lock;
1531 db_printf(
" state: ");
1532 if (rw->rw_lock == RW_UNLOCKED)
1533 db_printf(
"UNLOCKED\n");
1534 else if (rw->rw_lock == RW_DESTROYED) {
1535 db_printf(
"DESTROYED\n");
1537 }
else if (rw->rw_lock & RW_LOCK_READ)
1538 db_printf(
"RLOCK: %ju locks\n",
1539 (uintmax_t)(RW_READERS(rw->rw_lock)));
1542 db_printf(
"WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1543 td->td_tid, td->td_proc->p_pid, td->td_name);
1545 db_printf(
" recursed: %u\n", rw->rw_recurse);
1547 db_printf(
" waiters: ");
1548 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1549 case RW_LOCK_READ_WAITERS:
1550 db_printf(
"readers\n");
1552 case RW_LOCK_WRITE_WAITERS:
1553 db_printf(
"writers\n");
1555 case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1556 db_printf(
"readers and writers\n");
1559 db_printf(
"none\n");
SYSCTL_NODE(_kern, OID_AUTO, binmisc, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "Image activator for miscellaneous binaries")
uint64_t lockstat_nsecs(struct lock_object *lo)
static bool __always_inline __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
static void lock_rw(struct lock_object *lock, uintptr_t how)
static void __noinline __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
int __rw_try_wlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
void __rw_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
struct lock_class lock_class_rw
void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
int __rw_try_rlock_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
void __rw_downgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
#define __rw_assert(c, what, file, line)
void rw_sysinit(void *arg)
static void __noinline __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
#define rw_drop_critical(v, in_critical, extra_work)
static bool __always_inline __rw_can_read(struct thread *td, uintptr_t v, bool fp)
void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
void __rw_rlock(volatile uintptr_t *c, const char *file, int line)
void __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
static bool __always_inline __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp, bool fp LOCK_FILE_LINE_ARG_DEF)
int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
int _rw_wowned(const volatile uintptr_t *c)
void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
int __rw_try_upgrade_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
void _rw_runlock_cookie_int(struct rwlock *rw LOCK_FILE_LINE_ARG_DEF)
static void assert_rw(const struct lock_object *lock, int what)
void _rw_destroy(volatile uintptr_t *c)
static uintptr_t unlock_rw(struct lock_object *lock)
void panic(const char *fmt,...)
char * sched_tdname(struct thread *td)
u_char __read_frequently kdb_active
SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base, 0, "")
void lock_delay_default_init(struct lock_delay_config *lc)
void lock_destroy(struct lock_object *lock)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
void lock_delay(struct lock_delay_arg *la)
LOCK_DELAY_SYSINIT(locks_delay_init)
void turnstile_chain_lock(struct lock_object *lock)
void turnstile_cancel(struct turnstile *ts)
void turnstile_unpend(struct turnstile *ts)
void turnstile_disown(struct turnstile *ts)
void turnstile_broadcast(struct turnstile *ts, int queue)
struct turnstile * turnstile_lookup(struct lock_object *lock)
void turnstile_chain_unlock(struct lock_object *lock)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
struct turnstile * turnstile_trywait(struct lock_object *lock)
void turnstile_claim(struct turnstile *ts)
void witness_assert(const struct lock_object *lock, int flags, const char *file, int line)