44#include <sys/kernel.h>
50#include <sys/rmlock.h>
53#include <sys/turnstile.h>
54#include <sys/lock_profile.h>
55#include <machine/cpu.h>
66#define RM_DESTROYED ((void *)0xdead)
68#define rm_destroyed(rm) \
69 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
75#define _rm_assert(c, what, file, line)
78static void assert_rm(
const struct lock_object *lock,
int what);
80static void db_show_rm(
const struct lock_object *lock);
82static void lock_rm(
struct lock_object *lock, uintptr_t how);
84static int owner_rm(
const struct lock_object *lock,
struct thread **owner);
86static uintptr_t
unlock_rm(
struct lock_object *lock);
90 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
93 .lc_ddb_show = db_show_rm,
103 .lc_name =
"sleepable rm",
104 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
107 .lc_ddb_show = db_show_rm,
112 .lc_owner = owner_rm,
120 rm_assert((
const struct rmlock *)lock, what);
124lock_rm(
struct lock_object *lock, uintptr_t how)
127 struct rm_priotracker *tracker;
129 rm = (
struct rmlock *)lock;
133 tracker = (
struct rm_priotracker *)how;
134 rm_rlock(rm, tracker);
144 struct rm_queue *queue;
145 struct rm_priotracker *tracker;
148 rm = (
struct rmlock *)lock;
151 rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
163 for (queue = pc->pc_rm_queue.rmq_next;
164 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
165 tracker = (
struct rm_priotracker *)queue;
166 if ((tracker->rmp_rmlock == rm) &&
167 (tracker->rmp_thread == td)) {
168 how = (uintptr_t)tracker;
172 KASSERT(tracker != NULL,
173 (
"rm_priotracker is non-NULL when lock held in read mode"));
175 rm_runlock(rm, tracker);
182owner_rm(
const struct lock_object *lock,
struct thread **owner)
184 const struct rmlock *rm;
185 struct lock_class *
lc;
187 rm = (
const struct rmlock *)lock;
188 lc = LOCK_CLASS(&rm->rm_wlock_object);
189 return (
lc->lc_owner(&rm->rm_wlock_object, owner));
206 struct rm_queue *next;
209 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
210 next = pc->pc_rm_queue.rmq_next;
211 tracker->rmp_cpuQueue.rmq_next = next;
214 next->rmq_prev = &tracker->rmp_cpuQueue;
217 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
226 const struct thread *td)
228 struct rm_queue *queue;
229 struct rm_priotracker *tracker;
233 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
234 queue = queue->rmq_next) {
235 tracker = (
struct rm_priotracker *)queue;
236 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
245 struct rm_queue *next, *prev;
247 next = tracker->rmp_cpuQueue.rmq_next;
248 prev = tracker->rmp_cpuQueue.rmq_prev;
251 next->rmq_prev = prev;
254 prev->rmq_next = next;
261 struct rmlock *rm = arg;
262 struct rm_priotracker *tracker;
263 struct rm_queue *queue;
266 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
267 queue = queue->rmq_next) {
268 tracker = (
struct rm_priotracker *)queue;
269 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
272 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
282 struct lock_class *
lc;
286 if (!(opts & RM_NOWITNESS))
287 liflags |= LO_WITNESS;
288 if (opts & RM_RECURSE)
289 liflags |= LO_RECURSABLE;
295 LIST_INIT(&rm->rm_activeReaders);
296 if (opts & RM_SLEEPABLE) {
297 liflags |= LO_SLEEPABLE;
299 xflags = (opts & RM_NEW ? SX_NEW : 0);
301 xflags | SX_NOWITNESS);
304 xflags = (opts & RM_NEW ? MTX_NEW : 0);
305 mtx_init(&rm->rm_lock_mtx,
name,
"rmlock_mtx",
306 xflags | MTX_NOWITNESS);
322 rm_assert(rm, RA_UNLOCKED);
324 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
327 mtx_destroy(&rm->rm_lock_mtx);
335 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
336 return (sx_xlocked(&rm->rm_lock_sx));
338 return (mtx_owned(&rm->rm_lock_mtx));
344 struct rm_args *args;
359 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
371 atomic_interrupt_fence();
372 if (tracker->rmp_flags) {
383 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
390 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
404 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
405 if (!sx_try_xlock(&rm->rm_lock_sx))
408 if (!mtx_trylock(&rm->rm_lock_mtx))
412 if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
413 THREAD_SLEEPING_OK();
414 sx_xlock(&rm->rm_lock_sx);
415 THREAD_NO_SLEEPING();
417 mtx_lock(&rm->rm_lock_mtx);
422 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
427 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
428 sx_xunlock(&rm->rm_lock_sx);
430 mtx_unlock(&rm->rm_lock_mtx);
436_rm_rlock(
struct rmlock *rm,
struct rm_priotracker *tracker,
int trylock)
438 struct thread *td = curthread;
441 if (SCHEDULER_STOPPED())
444 tracker->rmp_flags = 0;
445 tracker->rmp_thread = td;
446 tracker->rmp_rmlock = rm;
448 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
449 THREAD_NO_SLEEPING();
452 atomic_interrupt_fence();
454 pc = cpuid_to_pcpu[td->td_oncpu];
458 atomic_interrupt_fence();
465 if (__predict_true(0 == (td->td_owepreempt |
466 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
473static __noinline
void
477 if (td->td_owepreempt) {
482 if (!tracker->rmp_flags)
486 LIST_REMOVE(tracker, rmp_qentry);
492 rm = tracker->rmp_rmlock;
510 struct thread *td = tracker->rmp_thread;
512 if (SCHEDULER_STOPPED())
516 atomic_interrupt_fence();
518 pc = cpuid_to_pcpu[td->td_oncpu];
521 atomic_interrupt_fence();
525 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
526 THREAD_SLEEPING_OK();
528 if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
537 struct rm_priotracker *prio;
541 if (SCHEDULER_STOPPED())
544 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
545 sx_xlock(&rm->rm_lock_sx);
547 mtx_lock(&rm->rm_lock_mtx);
549 if (CPU_CMP(&rm->rm_writecpus, &
all_cpus)) {
552 CPU_ANDNOT(&readcpus, &readcpus, &rm->rm_writecpus);
571 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
587 if (rm->lock_object.lo_flags & LO_SLEEPABLE)
588 sx_xunlock(&rm->rm_lock_sx);
590 mtx_unlock(&rm->rm_lock_mtx);
599 if (SCHEDULER_STOPPED())
602 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
603 (
"rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
604 curthread, rm->lock_object.lo_name, file, line));
606 (
"rm_wlock() of destroyed rmlock @ %s:%d", file, line));
609 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
614 LOCK_LOG_LOCK(
"RMWLOCK", &rm->lock_object, 0, 0, file, line);
615 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
616 TD_LOCKS_INC(curthread);
623 if (SCHEDULER_STOPPED())
627 (
"rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
629 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
630 LOCK_LOG_LOCK(
"RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
632 TD_LOCKS_DEC(curthread);
637 int trylock,
const char *file,
int line)
640 if (SCHEDULER_STOPPED())
644 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
648 (
"rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
649 rm->lock_object.lo_name, file, line));
653 KASSERT(
kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
654 (
"rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
655 curthread, rm->lock_object.lo_name, file, line));
657 (
"rm_rlock() of destroyed rmlock @ %s:%d", file, line));
660 (
"rm_rlock: wlock already held for %s @ %s:%d",
661 rm->lock_object.lo_name, file, line));
662 WITNESS_CHECKORDER(&rm->lock_object,
663 LOP_NEWORDER | LOP_NOSLEEP, file, line, NULL);
668 LOCK_LOG_TRY(
"RMRLOCK", &rm->lock_object, 0, 1, file,
671 LOCK_LOG_LOCK(
"RMRLOCK", &rm->lock_object, 0, 0, file,
673 WITNESS_LOCK(&rm->lock_object, LOP_NOSLEEP, file, line);
674 TD_LOCKS_INC(curthread);
677 LOCK_LOG_TRY(
"RMRLOCK", &rm->lock_object, 0, 0, file, line);
684 const char *file,
int line)
687 if (SCHEDULER_STOPPED())
691 (
"rm_runlock() of destroyed rmlock @ %s:%d", file, line));
693 WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
694 LOCK_LOG_LOCK(
"RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
696 TD_LOCKS_DEC(curthread);
721 int trylock,
const char *file,
int line)
729 const char *file,
int line)
737#ifdef INVARIANT_SUPPORT
748_rm_assert(
const struct rmlock *rm,
int what,
const char *file,
int line)
752 if (SCHEDULER_STOPPED())
756 case RA_LOCKED | RA_RECURSED:
757 case RA_LOCKED | RA_NOTRECURSED:
759 case RA_RLOCKED | RA_RECURSED:
760 case RA_RLOCKED | RA_NOTRECURSED:
766 if (what & RA_RLOCKED)
767 panic(
"Lock %s exclusively locked @ %s:%d\n",
768 rm->lock_object.lo_name, file, line);
769 if (what & RA_RECURSED)
770 panic(
"Lock %s not recursed @ %s:%d\n",
771 rm->lock_object.lo_name, file, line);
780 panic(
"Lock %s not %slocked @ %s:%d\n",
781 rm->lock_object.lo_name, (what & RA_RLOCKED) ?
782 "read " :
"", file, line);
784 if (what & RA_NOTRECURSED)
785 panic(
"Lock %s recursed @ %s:%d\n",
786 rm->lock_object.lo_name, file, line);
787 }
else if (what & RA_RECURSED)
788 panic(
"Lock %s not recursed @ %s:%d\n",
789 rm->lock_object.lo_name, file, line);
793 panic(
"Lock %s not exclusively locked @ %s:%d\n",
794 rm->lock_object.lo_name, file, line);
798 panic(
"Lock %s exclusively locked @ %s:%d\n",
799 rm->lock_object.lo_name, file, line);
806 panic(
"Lock %s read locked @ %s:%d\n",
807 rm->lock_object.lo_name, file, line);
810 panic(
"Unknown rm lock assertion: %d @ %s:%d", what, file,
818print_tracker(
struct rm_priotracker *tr)
823 db_printf(
" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
824 td->td_proc->p_pid, td->td_name);
826 db_printf(
"ONQUEUE");
828 db_printf(
",SIGNAL");
835db_show_rm(
const struct lock_object *lock)
837 struct rm_priotracker *tr;
838 struct rm_queue *queue;
839 const struct rmlock *rm;
840 struct lock_class *
lc;
843 rm = (
const struct rmlock *)lock;
844 db_printf(
" writecpus: ");
845 ddb_display_cpuset(__DEQUALIFY(
const cpuset_t *, &rm->rm_writecpus));
847 db_printf(
" per-CPU readers:\n");
848 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
849 for (queue = pc->pc_rm_queue.rmq_next;
850 queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
851 tr = (
struct rm_priotracker *)queue;
852 if (tr->rmp_rmlock == rm)
855 db_printf(
" active readers:\n");
856 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
858 lc = LOCK_CLASS(&rm->rm_wlock_object);
859 db_printf(
"Backing write-lock (%s):\n",
lc->lc_name);
860 lc->lc_ddb_show(&rm->rm_wlock_object);
887#define RMS_NOOWNER ((void *)0x1)
888#define RMS_TRANSIENT ((void *)0x2)
889#define RMS_FLAGMASK 0xf
905 CRITICAL_ASSERT(curthread);
906 return (zpcpu_get(rms->pcpu));
913 return (zpcpu_get_cpu(rms->pcpu, cpu));
920 CRITICAL_ASSERT(curthread);
929 CRITICAL_ASSERT(curthread);
939 old = atomic_fetchadd_int(&rms->debug_readers, 1);
940 KASSERT(old >= 0, (
"%s: bad readers count %d\n", __func__, old));
948 old = atomic_fetchadd_int(&rms->debug_readers, -1);
949 KASSERT(old > 0, (
"%s: bad readers count %d\n", __func__, old));
967 CRITICAL_ASSERT(curthread);
976 CRITICAL_ASSERT(curthread);
991 rms->debug_readers = 0;
992 mtx_init(&rms->mtx,
name, NULL, MTX_DEF | MTX_NEW);
993 rms->pcpu = uma_zalloc_pcpu(
pcpu_zone_8, M_WAITOK | M_ZERO);
1000 MPASS(rms->writers == 0);
1001 MPASS(rms->readers == 0);
1002 mtx_destroy(&rms->mtx);
1006static void __noinline
1013 mtx_lock(&rms->mtx);
1014 while (rms->writers > 0)
1015 msleep(&rms->readers, &rms->mtx, PUSER - 1, mtx_name(&rms->mtx), 0);
1018 mtx_unlock(&rms->mtx);
1020 TD_LOCKS_INC(curthread);
1028 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
1029 MPASS(atomic_load_ptr(&rms->owner) != curthread);
1034 atomic_interrupt_fence();
1035 if (__predict_false(rms->writers > 0)) {
1039 atomic_interrupt_fence();
1041 atomic_interrupt_fence();
1044 TD_LOCKS_INC(curthread);
1052 MPASS(atomic_load_ptr(&rms->owner) != curthread);
1057 atomic_interrupt_fence();
1058 if (__predict_false(rms->writers > 0)) {
1063 atomic_interrupt_fence();
1065 atomic_interrupt_fence();
1068 TD_LOCKS_INC(curthread);
1072static void __noinline
1079 mtx_lock(&rms->mtx);
1080 MPASS(rms->writers > 0);
1081 MPASS(rms->readers > 0);
1082 MPASS(rms->debug_readers == rms->readers);
1085 if (rms->readers == 0)
1087 mtx_unlock(&rms->mtx);
1088 TD_LOCKS_DEC(curthread);
1099 atomic_interrupt_fence();
1100 if (__predict_false(rms->writers > 0)) {
1104 atomic_interrupt_fence();
1106 atomic_interrupt_fence();
1109 TD_LOCKS_DEC(curthread);
1114 struct smp_rendezvous_cpus_retry_arg
srcra;
1122 struct rmslock *rms;
1124 rmsipi = __containerof(arg,
struct rmslock_ipi, srcra);
1131 atomic_add_int(&rms->readers, pcpu->
readers);
1142 struct rmslock *rms;
1144 rmsipi = __containerof(arg,
struct rmslock_ipi, srcra);
1148 while (atomic_load_int(&pcpu->
influx))
1162 panic(
"%s: got %d readers on cpu %d\n", __func__,
1179 MPASS(
rms->readers == 0);
1180 MPASS(
rms->writers == 1);
1196 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
1197 MPASS(atomic_load_ptr(&
rms->owner) != curthread);
1199 mtx_lock(&
rms->mtx);
1201 if (
rms->writers > 1) {
1202 msleep(&
rms->owner, &
rms->mtx, (PUSER - 1),
1203 mtx_name(&
rms->mtx), 0);
1204 MPASS(
rms->readers == 0);
1206 (
"%s: unexpected owner value %p\n", __func__,
1212 (
"%s: unexpected owner value %p\n", __func__,
rms->owner));
1217 if (
rms->readers > 0) {
1218 msleep(&
rms->writers, &
rms->mtx, (PUSER - 1),
1219 mtx_name(&
rms->mtx), 0);
1223 rms->owner = curthread;
1225 mtx_unlock(&
rms->mtx);
1226 MPASS(
rms->readers == 0);
1227 TD_LOCKS_INC(curthread);
1234 mtx_lock(&
rms->mtx);
1235 KASSERT(
rms->owner == curthread,
1236 (
"%s: unexpected owner value %p\n", __func__,
rms->owner));
1237 MPASS(
rms->writers >= 1);
1238 MPASS(
rms->readers == 0);
1240 if (
rms->writers > 0) {
1247 mtx_unlock(&
rms->mtx);
1248 TD_LOCKS_DEC(curthread);
1255 if (rms_wowned(
rms))
void rms_rlock(struct rmslock *rms)
static __noinline int _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
int rm_wowned(const struct rmlock *rm)
void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
static uintptr_t unlock_rm(struct lock_object *lock)
static void rms_action_func(void *arg)
static void assert_rm(const struct lock_object *lock, int what)
static int rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, const struct thread *td)
void rm_sysinit(void *arg)
static void rms_wait_func(void *arg, int cpu)
static void rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu)
static void __noinline rms_runlock_fallback(struct rmslock *rms)
#define _rm_assert(c, what, file, line)
static void rms_assert_no_pcpu_readers(struct rmslock *rms)
void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line)
void rms_wunlock(struct rmslock *rms)
void _rm_wunlock(struct rmlock *rm)
static void lock_rm(struct lock_object *lock, uintptr_t how)
MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN)
static struct rmslock_pcpu * rms_int_pcpu(struct rmslock *rms)
static void rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
int rms_try_rlock(struct rmslock *rms)
void rms_init(struct rmslock *rms, const char *name)
struct lock_class lock_class_rm_sleepable
void rms_destroy(struct rmslock *rms)
static void __noinline rms_rlock_fallback(struct rmslock *rms)
_Static_assert(sizeof(struct rmslock_pcpu)==8, "bad size")
void rm_init(struct rmlock *rm, const char *name)
static void rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu)
void _rm_wlock(struct rmlock *rm)
void rms_runlock(struct rmslock *rms)
static void rms_int_debug_readers_dec(struct rmslock *rms)
static struct rmslock_pcpu * rms_int_remote_pcpu(struct rmslock *rms, int cpu)
static void rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu)
void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
static void rms_wlock_switch(struct rmslock *rms)
void rms_unlock(struct rmslock *rms)
static struct mtx rm_spinlock
void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
void rm_destroy(struct rmlock *rm)
static void rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
static __noinline void _rm_unlock_hard(struct thread *td, struct rm_priotracker *tracker)
static void rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu)
static void rm_cleanIPI(void *arg)
static void rms_int_debug_readers_inc(struct rmslock *rms)
int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line)
void rms_wlock(struct rmslock *rms)
struct lock_class lock_class_rm
int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
void rm_init_flags(struct rmlock *rm, const char *name, int opts)
void panic(const char *fmt,...)
void sx_init_flags(struct sx *sx, const char *description, int opts)
void sx_destroy(struct sx *sx)
void wakeup(const void *ident)
void wakeup_one(const void *ident)
struct smp_rendezvous_cpus_retry_arg srcra
u_char __read_frequently kdb_active
void lock_destroy(struct lock_object *lock)
void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags)
void smp_no_rendezvous_barrier(void *dummy)
void smp_rendezvous_cpus(cpuset_t map, void(*setup_func)(void *), void(*action_func)(void *), void(*teardown_func)(void *), void *arg)
void smp_rendezvous_cpus_retry(cpuset_t map, void(*setup_func)(void *), void(*action_func)(void *), void(*teardown_func)(void *), void(*wait_func)(void *, int), struct smp_rendezvous_cpus_retry_arg *arg)
void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
int turnstile_signal(struct turnstile *ts, int queue)
void turnstile_chain_lock(struct lock_object *lock)
void turnstile_unpend(struct turnstile *ts)
struct turnstile * turnstile_lookup(struct lock_object *lock)
void turnstile_chain_unlock(struct lock_object *lock)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
struct turnstile * turnstile_trywait(struct lock_object *lock)