42#include "opt_hwpmc_hooks.h"
43#include "opt_no_adaptive_sx.h"
51#include <sys/kernel.h>
57#include <sys/sleepqueue.h>
60#include <sys/sysctl.h>
62#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
63#include <machine/cpu.h>
70#if defined(SMP) && !defined(NO_ADAPTIVE_SX)
75#include <sys/pmckern.h>
76PMC_SOFT_DECLARE( , , lock, failed);
80#define SQ_EXCLUSIVE_QUEUE 0
81#define SQ_SHARED_QUEUE 1
87#define GIANT_DECLARE \
89 WITNESS_SAVE_DECL(Giant) \
91#define GIANT_SAVE(work) do { \
92 if (__predict_false(mtx_owned(&Giant))) { \
94 WITNESS_SAVE(&Giant.lock_object, Giant); \
95 while (mtx_owned(&Giant)) { \
102#define GIANT_RESTORE() do { \
103 if (_giantcnt > 0) { \
104 mtx_assert(&Giant, MA_NOTOWNED); \
105 while (_giantcnt--) \
107 WITNESS_RESTORE(&Giant.lock_object, Giant); \
115#define sx_recursed(sx) ((sx)->sx_recurse != 0)
117static void assert_sx(
const struct lock_object *lock,
int what);
119static void db_show_sx(
const struct lock_object *lock);
121static void lock_sx(
struct lock_object *lock, uintptr_t how);
123static int owner_sx(
const struct lock_object *lock,
struct thread **owner);
125static uintptr_t
unlock_sx(
struct lock_object *lock);
129 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
132 .lc_ddb_show = db_show_sx,
137 .lc_owner = owner_sx,
142#define _sx_assert(sx, what, file, line)
146#ifdef SX_CUSTOM_BACKOFF
147static u_short __read_frequently asx_retries;
148static u_short __read_frequently asx_loops;
149static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
151SYSCTL_U16(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0,
"");
152SYSCTL_U16(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0,
"");
154static struct lock_delay_config __read_frequently sx_delay;
156SYSCTL_U16(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
158SYSCTL_U16(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
162sx_lock_delay_init(
void *arg __unused)
167 asx_loops = max(10000, sx_delay.max);
171#define sx_delay locks_delay
172#define asx_retries locks_delay_retries
173#define asx_loops locks_delay_loops
181 sx_assert((
const struct sx *)lock, what);
185lock_sx(
struct lock_object *lock, uintptr_t how)
189 sx = (
struct sx *)lock;
201 sx = (
struct sx *)lock;
202 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
203 if (sx_xlocked(sx)) {
214owner_sx(
const struct lock_object *lock,
struct thread **owner)
219 sx = (
const struct sx *)lock;
222 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
223 ((*owner = (
struct thread *)SX_OWNER(x)) != NULL));
230 struct sx_args *sargs = arg;
232 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
240 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
241 SX_NOPROFILE | SX_NEW)) == 0);
242 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
243 (
"%s: sx_lock not aligned for %s: %p", __func__, description,
246 flags = LO_SLEEPABLE | LO_UPGRADABLE;
249 if (opts & SX_NOPROFILE)
250 flags |= LO_NOPROFILE;
251 if (!(opts & SX_NOWITNESS))
253 if (opts & SX_RECURSE)
254 flags |= LO_RECURSABLE;
261 sx->sx_lock = SX_LOCK_UNLOCKED;
269 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, (
"sx lock still held"));
270 KASSERT(sx->sx_recurse == 0, (
"sx lock still recursed"));
271 sx->sx_lock = SX_LOCK_DESTROYED;
280 if (SCHEDULER_STOPPED())
283 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
284 (
"sx_try_slock() by idle thread %p on sx %s @ %s:%d",
285 curthread, sx->lock_object.lo_name, file, line));
289 KASSERT(x != SX_LOCK_DESTROYED,
290 (
"sx_try_slock() of destroyed sx @ %s:%d", file, line));
291 if (!(x & SX_LOCK_SHARED))
293 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
294 LOCK_LOG_TRY(
"SLOCK", &sx->lock_object, 0, 1, file, line);
295 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
296 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
297 sx, 0, 0, file, line, LOCKSTAT_READER);
298 TD_LOCKS_INC(curthread);
299 curthread->td_sx_slocks++;
304 LOCK_LOG_TRY(
"SLOCK", &sx->lock_object, 0, 0, file, line);
316_sx_xlock(
struct sx *sx,
int opts,
const char *file,
int line)
321 KASSERT(
kdb_active != 0 || SCHEDULER_STOPPED() ||
322 !TD_IS_IDLETHREAD(curthread),
323 (
"sx_xlock() by idle thread %p on sx %s @ %s:%d",
324 curthread, sx->lock_object.lo_name, file, line));
325 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
326 (
"sx_xlock() of destroyed sx @ %s:%d", file, line));
327 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
329 tid = (uintptr_t)curthread;
330 x = SX_LOCK_UNLOCKED;
331 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
334 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
335 0, 0, file, line, LOCKSTAT_WRITER);
337 LOCK_LOG_LOCK(
"XLOCK", &sx->lock_object, 0, sx->sx_recurse,
339 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
340 TD_LOCKS_INC(curthread);
356 if (SCHEDULER_STOPPED_TD(td))
359 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(td),
360 (
"sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
361 curthread, sx->lock_object.lo_name, file, line));
362 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
363 (
"sx_try_xlock() of destroyed sx @ %s:%d", file, line));
367 x = SX_LOCK_UNLOCKED;
369 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
371 if (x == SX_LOCK_UNLOCKED)
373 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
375 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
382 LOCK_LOG_TRY(
"XLOCK", &sx->lock_object, 0, rval, file, line);
384 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
387 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
388 sx, 0, 0, file, line, LOCKSTAT_WRITER);
389 TD_LOCKS_INC(curthread);
406 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
407 (
"sx_xunlock() of destroyed sx @ %s:%d", file, line));
409 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
410 LOCK_LOG_LOCK(
"XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
415 __sx_xunlock(sx, curthread, file, line);
417 TD_LOCKS_DEC(curthread);
432 if (SCHEDULER_STOPPED())
435 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
436 (
"sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
445 x = SX_READ_VALUE(sx);
447 if (SX_SHARERS(x) > 1)
449 waiters = (x & SX_LOCK_WAITERS);
450 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
451 (uintptr_t)curthread | waiters)) {
456 LOCK_LOG_TRY(
"XUPGRADE", &sx->lock_object, 0, success, file, line);
458 curthread->td_sx_slocks--;
459 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
461 LOCKSTAT_RECORD0(sx__upgrade, sx);
482 if (SCHEDULER_STOPPED())
485 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
486 (
"sx_downgrade() of destroyed sx @ %s:%d", file, line));
487 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
490 panic(
"downgrade of a recursed lock");
493 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
507 if (!(x & SX_LOCK_SHARED_WAITERS) &&
508 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
509 (x & SX_LOCK_EXCLUSIVE_WAITERS)))
524 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
525 (x & SX_LOCK_EXCLUSIVE_WAITERS));
526 if (x & SX_LOCK_SHARED_WAITERS)
535 curthread->td_sx_slocks++;
536 LOCK_LOG_LOCK(
"XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
537 LOCKSTAT_RECORD0(sx__downgrade, sx);
552 if (x & SX_LOCK_WRITE_SPINNER)
556 *in_critical =
false;
561#define sx_drop_critical(x, in_critical, extra_work) do { } while (0)
576 struct thread *owner;
577 u_int i, n, spintries = 0;
578 enum { READERS, WRITER } sleep_reason = READERS;
579 bool in_critical =
false;
582 uint64_t waittime = 0;
586#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
587 struct lock_delay_arg lda;
591 int64_t sleep_time = 0;
592 int64_t all_time = 0;
594#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
596 int doing_lockprof = 0;
600 tid = (uintptr_t)curthread;
603 if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
604 while (x == SX_LOCK_UNLOCKED) {
605 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
620 if (SCHEDULER_STOPPED())
623 if (__predict_false(x == SX_LOCK_UNLOCKED))
624 x = SX_READ_VALUE(sx);
627 if (__predict_false(lv_sx_owner(x) == (
struct thread *)tid)) {
628 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
629 (
"_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
630 sx->lock_object.lo_name, file, line));
632 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
633 if (LOCK_LOG_TEST(&sx->lock_object, 0))
634 CTR2(KTR_LOCK,
"%s: %p recursing", __func__, sx);
638 if (LOCK_LOG_TEST(&sx->lock_object, 0))
639 CTR5(KTR_LOCK,
"%s: %s contested (lock=%p) at %s:%d", __func__,
640 sx->lock_object.lo_name, (
void *)sx->sx_lock, file, line);
642#if defined(ADAPTIVE_SX)
643 lock_delay_arg_init(&lda, &sx_delay);
644#elif defined(KDTRACE_HOOKS)
645 lock_delay_arg_init_noadapt(&lda);
649 PMC_SOFT_CALL( , , lock, failed);
651 lock_profile_obtain_lock_failed(&sx->lock_object,
false, &contested,
659 if (x == SX_LOCK_UNLOCKED) {
660 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
671 if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) {
672 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
682 if ((x & SX_LOCK_SHARED) == 0) {
684 sleep_reason = WRITER;
685 owner = lv_sx_owner(x);
686 if (!TD_IS_RUNNING(owner))
688 if (LOCK_LOG_TEST(&sx->lock_object, 0))
689 CTR3(KTR_LOCK,
"%s: spinning on %p held by %p",
690 __func__, sx, owner);
691 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(curthread),
692 "spinning",
"lockname:\"%s\"",
693 sx->lock_object.lo_name);
696 x = SX_READ_VALUE(sx);
697 owner = lv_sx_owner(x);
698 }
while (owner != NULL && TD_IS_RUNNING(owner));
699 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname(curthread),
702 }
else if (SX_SHARERS(x) > 0) {
703 sleep_reason = READERS;
704 if (spintries == asx_retries)
706 if (!(x & SX_LOCK_WRITE_SPINNER)) {
712 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
713 x | SX_LOCK_WRITE_SPINNER)) {
721 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(curthread),
722 "spinning",
"lockname:\"%s\"",
723 sx->lock_object.lo_name);
725 for (i = 0; i < asx_loops; i += n) {
727 x = SX_READ_VALUE(sx);
728 if (!(x & SX_LOCK_WRITE_SPINNER))
730 if (!(x & SX_LOCK_SHARED))
739 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname(curthread),
747 x = SX_READ_VALUE(sx);
754 if (x == SX_LOCK_UNLOCKED) {
768 if (!(x & SX_LOCK_SHARED)) {
769 owner = (
struct thread *)SX_OWNER(x);
770 if (TD_IS_RUNNING(owner)) {
776 }
else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
793 setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER);
794 if ((x & ~setx) == SX_LOCK_SHARED) {
795 setx &= ~SX_LOCK_WRITE_SPINNER;
796 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
799 CTR2(KTR_LOCK,
"%s: %p claimed by new writer",
812 if ((x & SX_LOCK_WRITE_SPINNER) ||
813 !((x & SX_LOCK_EXCLUSIVE_WAITERS))) {
814 setx = x & ~SX_LOCK_WRITE_SPINNER;
815 setx |= SX_LOCK_EXCLUSIVE_WAITERS;
816 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
829 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
830 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
831 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
834 if (LOCK_LOG_TEST(&sx->lock_object, 0))
835 CTR2(KTR_LOCK,
"%s: %p set excl waiters flag",
847 if (LOCK_LOG_TEST(&sx->lock_object, 0))
848 CTR2(KTR_LOCK,
"%s: %p blocking on sleep queue",
854 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
855 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
857 if (!(opts & SX_INTERRUPTIBLE))
866 if (LOCK_LOG_TEST(&sx->lock_object, 0))
868 "%s: interruptible sleep by %p suspended by signal",
872 if (LOCK_LOG_TEST(&sx->lock_object, 0))
873 CTR2(KTR_LOCK,
"%s: %p resuming from sleep queue",
875 x = SX_READ_VALUE(sx);
877 if (__predict_true(!extra_work))
884#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
885 if (__predict_true(!doing_lockprof))
891 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
892 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
893 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
894 if (lda.spin_cnt > sleep_cnt)
895 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
896 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
897 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
901 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
902 contested, waittime, file, line, LOCKSTAT_WRITER);
916 int queue, wakeup_swapper;
918 if (SCHEDULER_STOPPED())
921 tid = (uintptr_t)curthread;
923 if (__predict_false(x == tid))
924 x = SX_READ_VALUE(sx);
926 MPASS(!(x & SX_LOCK_SHARED));
928 if (__predict_false(x & SX_LOCK_RECURSED)) {
930 if ((--sx->sx_recurse) == 0)
931 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
932 if (LOCK_LOG_TEST(&sx->lock_object, 0))
933 CTR2(KTR_LOCK,
"%s: %p unrecursing", __func__, sx);
937 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
939 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
942 if (LOCK_LOG_TEST(&sx->lock_object, 0))
943 CTR2(KTR_LOCK,
"%s: %p contested", __func__, sx);
946 x = SX_READ_VALUE(sx);
947 MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
958 setx = SX_LOCK_UNLOCKED;
960 if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 &&
963 setx |= (x & SX_LOCK_SHARED_WAITERS);
965 atomic_store_rel_ptr(&sx->sx_lock, setx);
968 if (LOCK_LOG_TEST(&sx->lock_object, 0))
969 CTR3(KTR_LOCK,
"%s: %p waking up all threads on %s queue",
980static bool __always_inline
984 if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER))
987 if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
992static bool __always_inline
994 LOCK_FILE_LINE_ARG_DEF)
1004 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1005 *xp + SX_ONE_SHARER)) {
1006 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1007 CTR4(KTR_LOCK,
"%s: %p succeed %p -> %p",
1008 __func__, sx, (
void *)*xp,
1009 (
void *)(*xp + SX_ONE_SHARER));
1017static int __noinline
1023 struct thread *owner;
1024 u_int i, n, spintries = 0;
1026#ifdef LOCK_PROFILING
1027 uint64_t waittime = 0;
1031#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
1032 struct lock_delay_arg lda;
1035 u_int sleep_cnt = 0;
1036 int64_t sleep_time = 0;
1037 int64_t all_time = 0;
1039#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1040 uintptr_t state = 0;
1047 if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
1055#ifdef LOCK_PROFILING
1060 if (SCHEDULER_STOPPED())
1063#if defined(ADAPTIVE_SX)
1064 lock_delay_arg_init(&lda, &sx_delay);
1065#elif defined(KDTRACE_HOOKS)
1066 lock_delay_arg_init_noadapt(&lda);
1070 PMC_SOFT_CALL( , , lock, failed);
1072 lock_profile_obtain_lock_failed(&sx->lock_object,
false, &contested,
1099 if ((x & SX_LOCK_SHARED) == 0) {
1100 owner = lv_sx_owner(x);
1101 if (TD_IS_RUNNING(owner)) {
1102 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1104 "%s: spinning on %p held by %p",
1105 __func__, sx, owner);
1106 KTR_STATE1(KTR_SCHED,
"thread",
1108 "lockname:\"%s\"", sx->lock_object.lo_name);
1111 x = SX_READ_VALUE(sx);
1112 owner = lv_sx_owner(x);
1113 }
while (owner != NULL && TD_IS_RUNNING(owner));
1114 KTR_STATE0(KTR_SCHED,
"thread",
1119 if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) {
1122 x = SX_READ_VALUE(sx);
1125 if (spintries < asx_retries) {
1126 KTR_STATE1(KTR_SCHED,
"thread",
sched_tdname(curthread),
1127 "spinning",
"lockname:\"%s\"",
1128 sx->lock_object.lo_name);
1130 for (i = 0; i < asx_loops; i += n) {
1132 x = SX_READ_VALUE(sx);
1133 if (!(x & SX_LOCK_SHARED))
1144 KTR_STATE0(KTR_SCHED,
"thread",
sched_tdname(curthread),
1157 x = SX_READ_VALUE(sx);
1159 if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) ||
1171 if (!(x & SX_LOCK_SHARED)) {
1172 owner = (
struct thread *)SX_OWNER(x);
1173 if (TD_IS_RUNNING(owner)) {
1175 x = SX_READ_VALUE(sx);
1186 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1187 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1188 x | SX_LOCK_SHARED_WAITERS))
1190 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1191 CTR2(KTR_LOCK,
"%s: %p set shared waiters flag",
1199 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1200 CTR2(KTR_LOCK,
"%s: %p blocking on sleep queue",
1206 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1207 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1209 if (!(opts & SX_INTERRUPTIBLE))
1218 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1220 "%s: interruptible sleep by %p suspended by signal",
1224 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1225 CTR2(KTR_LOCK,
"%s: %p resuming from sleep queue",
1227 x = SX_READ_VALUE(sx);
1229#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1230 if (__predict_true(!extra_work))
1236 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1237 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1238 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1239 if (lda.spin_cnt > sleep_cnt)
1240 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1241 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1242 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1246 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1247 contested, waittime, file, line, LOCKSTAT_READER);
1260 KASSERT(
kdb_active != 0 || SCHEDULER_STOPPED() ||
1261 !TD_IS_IDLETHREAD(curthread),
1262 (
"sx_slock() by idle thread %p on sx %s @ %s:%d",
1263 curthread, sx->lock_object.lo_name, file, line));
1264 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1265 (
"sx_slock() of destroyed sx @ %s:%d", file, line));
1266 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1270 x = SX_READ_VALUE(sx);
1271 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1275 lock_profile_obtain_lock_success(&sx->lock_object,
false, 0, 0,
1278 LOCK_LOG_LOCK(
"SLOCK", &sx->lock_object, 0, 0, file, line);
1279 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1280 TD_LOCKS_INC(curthread);
1286_sx_slock(
struct sx *sx,
int opts,
const char *file,
int line)
1292static bool __always_inline
1297 if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) {
1298 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1299 *xp - SX_ONE_SHARER)) {
1300 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1302 "%s: %p succeeded %p -> %p",
1303 __func__, sx, (
void *)*xp,
1304 (
void *)(*xp - SX_ONE_SHARER));
1315static void __noinline
1317 LOCK_FILE_LINE_ARG_DEF)
1319 int wakeup_swapper = 0;
1320 uintptr_t setx, queue;
1322 if (SCHEDULER_STOPPED())
1329 x = SX_READ_VALUE(sx);
1340 setx = SX_LOCK_UNLOCKED;
1342 if (x & SX_LOCK_EXCLUSIVE_WAITERS) {
1343 setx |= (x & SX_LOCK_SHARED_WAITERS);
1346 setx |= (x & SX_LOCK_WRITE_SPINNER);
1347 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1349 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1350 CTR2(KTR_LOCK,
"%s: %p waking up all thread on"
1351 "exclusive queue", __func__, sx);
1361 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1370 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1371 (
"sx_sunlock() of destroyed sx @ %s:%d", file, line));
1373 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1374 LOCK_LOG_LOCK(
"SUNLOCK", &sx->lock_object, 0, 0, file, line);
1377 x = SX_READ_VALUE(sx);
1378 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1382 lock_profile_release_lock(&sx->lock_object,
false);
1384 TD_LOCKS_DEC(curthread);
1394#ifdef INVARIANT_SUPPORT
1405_sx_assert(
const struct sx *sx,
int what,
const char *file,
int line)
1411 if (SCHEDULER_STOPPED())
1415 case SA_SLOCKED | SA_NOTRECURSED:
1416 case SA_SLOCKED | SA_RECURSED:
1422 case SA_LOCKED | SA_NOTRECURSED:
1423 case SA_LOCKED | SA_RECURSED:
1432 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1433 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1434 sx_xholder(sx) != curthread)))
1435 panic(
"Lock %s not %slocked @ %s:%d\n",
1436 sx->lock_object.lo_name, slocked ?
"share " :
"",
1439 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1441 if (what & SA_NOTRECURSED)
1442 panic(
"Lock %s recursed @ %s:%d\n",
1443 sx->lock_object.lo_name, file,
1445 }
else if (what & SA_RECURSED)
1446 panic(
"Lock %s not recursed @ %s:%d\n",
1447 sx->lock_object.lo_name, file, line);
1452 case SA_XLOCKED | SA_NOTRECURSED:
1453 case SA_XLOCKED | SA_RECURSED:
1454 if (sx_xholder(sx) != curthread)
1455 panic(
"Lock %s not exclusively locked @ %s:%d\n",
1456 sx->lock_object.lo_name, file, line);
1458 if (what & SA_NOTRECURSED)
1459 panic(
"Lock %s recursed @ %s:%d\n",
1460 sx->lock_object.lo_name, file, line);
1461 }
else if (what & SA_RECURSED)
1462 panic(
"Lock %s not recursed @ %s:%d\n",
1463 sx->lock_object.lo_name, file, line);
1474 if (sx_xholder(sx) == curthread)
1475 panic(
"Lock %s exclusively locked @ %s:%d\n",
1476 sx->lock_object.lo_name, file, line);
1480 panic(
"Unknown sx lock assertion: %d @ %s:%d", what, file,
1488db_show_sx(
const struct lock_object *lock)
1491 const struct sx *sx;
1493 sx = (
const struct sx *)lock;
1495 db_printf(
" state: ");
1496 if (sx->sx_lock == SX_LOCK_UNLOCKED)
1497 db_printf(
"UNLOCKED\n");
1498 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1499 db_printf(
"DESTROYED\n");
1501 }
else if (sx->sx_lock & SX_LOCK_SHARED)
1502 db_printf(
"SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1504 td = sx_xholder(sx);
1505 db_printf(
"XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1506 td->td_tid, td->td_proc->p_pid, td->td_name);
1508 db_printf(
" recursed: %d\n", sx->sx_recurse);
1511 db_printf(
" waiters: ");
1512 switch(sx->sx_lock &
1513 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1514 case SX_LOCK_SHARED_WAITERS:
1515 db_printf(
"shared\n");
1517 case SX_LOCK_EXCLUSIVE_WAITERS:
1518 db_printf(
"exclusive\n");
1520 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1521 db_printf(
"exclusive and shared\n");
1524 db_printf(
"none\n");
1534sx_chain(
struct thread *td,
struct thread **ownerp)
1536 const struct sx *sx;
1545 sx->lock_object.lo_name != td->td_wmesg)
1549 db_printf(
"blocked on sx \"%s\" ", td->td_wmesg);
1550 *ownerp = sx_xholder(sx);
1551 if (sx->sx_lock & SX_LOCK_SHARED)
1552 db_printf(
"SLOCK (count %ju)\n",
1553 (uintmax_t)SX_SHARERS(sx->sx_lock));
1555 db_printf(
"XLOCK\n");
SYSCTL_NODE(_kern, OID_AUTO, binmisc, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "Image activator for miscellaneous binaries")
uint64_t lockstat_nsecs(struct lock_object *lo)
void panic(const char *fmt,...)
void _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#define _sx_assert(sx, what, file, line)
int sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
static int __noinline _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
static void assert_sx(const struct lock_object *lock, int what)
struct lock_class lock_class_sx
#define sx_drop_critical(x, in_critical, extra_work)
void sx_sysinit(void *arg)
static void lock_sx(struct lock_object *lock, uintptr_t how)
void sx_downgrade_(struct sx *sx, const char *file, int line)
void sx_init_flags(struct sx *sx, const char *description, int opts)
int _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
int sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
#define SQ_EXCLUSIVE_QUEUE
int sx_try_upgrade_(struct sx *sx, const char *file, int line)
int sx_try_slock_(struct sx *sx, const char *file, int line)
void _sx_xunlock(struct sx *sx, const char *file, int line)
static bool __always_inline __sx_can_read(struct thread *td, uintptr_t x, bool fp)
static bool __always_inline __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp LOCK_FILE_LINE_ARG_DEF)
int _sx_slock(struct sx *sx, int opts, const char *file, int line)
void sx_destroy(struct sx *sx)
static bool __always_inline _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
void _sx_sunlock(struct sx *sx, const char *file, int line)
static void __noinline _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
int sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
void _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
int sx_try_xlock_(struct sx *sx, const char *file, int line)
int _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
void sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
static uintptr_t unlock_sx(struct lock_object *lock)
int _sx_xlock(struct sx *sx, int opts, const char *file, int line)
char * sched_tdname(struct thread *td)
u_char __read_frequently kdb_active
SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base, 0, "")
void lock_delay_default_init(struct lock_delay_config *lc)
void lock_destroy(struct lock_object *lock)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
void lock_delay(struct lock_delay_arg *la)
LOCK_DELAY_SYSINIT(locks_delay_init)
void sleepq_release(const void *wchan)
int sleepq_wait_sig(const void *wchan, int pri)
int sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
u_int sleepq_sleepcnt(const void *wchan, int queue)
void sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue)
void sleepq_wait(const void *wchan, int pri)
void sleepq_lock(const void *wchan)
void witness_assert(const struct lock_object *lock, int flags, const char *file, int line)