37#include "opt_umtx_profiling.h"
40#include <sys/kernel.h>
43#include <sys/filedesc.h>
44#include <sys/limits.h>
46#include <sys/malloc.h>
51#include <sys/resource.h>
52#include <sys/resourcevar.h>
53#include <sys/rwlock.h>
57#include <sys/sysctl.h>
58#include <sys/sysent.h>
60#include <sys/sysproto.h>
61#include <sys/syscallsubr.h>
62#include <sys/taskqueue.h>
64#include <sys/eventhandler.h>
66#include <sys/umtxvar.h>
68#include <security/mac/mac_framework.h>
71#include <vm/vm_param.h>
74#include <vm/vm_object.h>
76#include <machine/atomic.h>
77#include <machine/cpu.h>
79#include <compat/freebsd32/freebsd32.h>
80#ifdef COMPAT_FREEBSD32
81#include <compat/freebsd32/freebsd32_proto.h>
88#define UPROF_PERC_BIGGER(w, f, sw, sf) \
89 (((w) > (sw)) || ((w) == (sw) && (f) > (sf)))
92#define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED)
94#define UMTXQ_ASSERT_LOCKED_BUSY(key) do { \
95 struct umtxq_chain *uc; \
97 uc = umtxq_getchain(key); \
98 mtx_assert(&uc->uc_lock, MA_OWNED); \
99 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); \
102#define UMTXQ_ASSERT_LOCKED_BUSY(key) do {} while (0)
114#define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
115 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
116 PRI_MAX_TIMESHARE : (td)->td_user_pri)
118#define GOLDEN_RATIO_PRIME 2654404609U
120#define UMTX_CHAINS 512
122#define UMTX_SHIFTS (__WORD_BIT - 9)
124#define GET_SHARE(flags) \
125 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE)
127#define BUSY_SPINS 200
132 struct _umtx_time *tp);
134 struct umtx_robust_lists_params *rbp);
136 struct timespec *tsp);
144 __offsetof(
struct umutex32, m_spare[0]),
"m_spare32");
147SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN,
149 "False forces destruction of umtx attached to file, on last close");
151SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN,
153 "Maximum number of robust mutexes allowed for each thread");
160static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
165SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN,
170static long max_length;
171SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0,
"max_length");
172static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
177 const struct _umtx_time *umtxtime);
187#define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE)
193umtx_init_profiling(
void)
195 struct sysctl_oid *chain_oid;
200 snprintf(chain_name,
sizeof(chain_name),
"%d", i);
201 chain_oid = SYSCTL_ADD_NODE(NULL,
202 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO,
203 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
205 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
206 "max_length0", CTLFLAG_RD, &
umtxq_chains[0][i].max_length, 0, NULL);
207 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
208 "max_length1", CTLFLAG_RD, &
umtxq_chains[1][i].max_length, 0, NULL);
213sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS)
217 struct umtxq_chain *uc;
218 u_int fract, i, j, tot, whole;
219 u_int sf0, sf1, sf2, sf3, sf4;
220 u_int si0, si1, si2, si3, si4;
221 u_int sw0, sw1, sw2, sw3, sw4;
224 for (i = 0; i < 2; i++) {
228 mtx_lock(&uc->uc_lock);
229 tot += uc->max_length;
230 mtx_unlock(&uc->uc_lock);
235 sf0 = sf1 = sf2 = sf3 = sf4 = 0;
236 si0 = si1 = si2 = si3 = si4 = 0;
237 sw0 = sw1 = sw2 = sw3 = sw4 = 0;
240 mtx_lock(&uc->uc_lock);
241 whole = uc->max_length * 100;
242 mtx_unlock(&uc->uc_lock);
243 fract = (whole % tot) * 100;
244 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) {
248 }
else if (UPROF_PERC_BIGGER(whole, fract, sw1,
253 }
else if (UPROF_PERC_BIGGER(whole, fract, sw2,
258 }
else if (UPROF_PERC_BIGGER(whole, fract, sw3,
263 }
else if (UPROF_PERC_BIGGER(whole, fract, sw4,
271 sbuf_printf(&sb,
"1st: %u.%u%% idx: %u\n", sw0 / tot,
273 sbuf_printf(&sb,
"2nd: %u.%u%% idx: %u\n", sw1 / tot,
275 sbuf_printf(&sb,
"3rd: %u.%u%% idx: %u\n", sw2 / tot,
277 sbuf_printf(&sb,
"4th: %u.%u%% idx: %u\n", sw3 / tot,
279 sbuf_printf(&sb,
"5th: %u.%u%% idx: %u\n", sw4 / tot,
291sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS)
293 struct umtxq_chain *uc;
299 if (error != 0 || req->newptr == NULL)
303 for (i = 0; i < 2; ++i) {
306 mtx_lock(&uc->uc_lock);
309 mtx_unlock(&uc->uc_lock);
317 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
318 sysctl_debug_umtx_chains_clear,
"I",
319 "Clear umtx chains statistics");
321 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0,
322 sysctl_debug_umtx_chains_peaks,
"A",
323 "Highest peaks in chains max length");
331 umtx_pi_zone = uma_zcreate(
"umtx pi",
sizeof(
struct umtx_pi),
332 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
333 for (i = 0; i < 2; ++i) {
336 MTX_DEF | MTX_DUPOK);
350 umtx_init_profiling();
352 mtx_init(&
umtx_lock,
"umtx lock", NULL, MTX_DEF);
361 uq =
malloc(
sizeof(
struct umtx_q), M_UMTX, M_WAITOK | M_ZERO);
362 uq->uq_spare_queue =
malloc(
sizeof(
struct umtxq_queue), M_UMTX,
364 TAILQ_INIT(&uq->uq_spare_queue->head);
365 TAILQ_INIT(&uq->uq_pi_contested);
366 uq->uq_inherited_pri = PRI_MAX;
374 MPASS(uq->uq_spare_queue != NULL);
375 free(uq->uq_spare_queue, M_UMTX);
384 n = (uintptr_t)key->info.both.a + key->info.both.b;
392 if (key->type <= TYPE_SEM)
404 struct umtxq_chain *uc;
407 mtx_assert(&uc->uc_lock, MA_OWNED);
414 while (uc->uc_busy && --
count > 0)
420 while (uc->uc_busy) {
422 msleep(uc, &uc->uc_lock, 0,
"umtxqb", 0);
435 struct umtxq_chain *uc;
438 mtx_assert(&uc->uc_lock, MA_OWNED);
439 KASSERT(uc->uc_busy != 0, (
"not busy"));
454static struct umtxq_queue *
457 struct umtxq_queue *uh;
458 struct umtxq_chain *uc;
462 LIST_FOREACH(uh, &uc->uc_queue[q], link) {
463 if (umtx_key_match(&uh->key, key))
473 struct umtxq_queue *uh;
474 struct umtxq_chain *uc;
478 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, (
"umtx_q is already on queue"));
481 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link);
483 uh = uq->uq_spare_queue;
484 uh->key = uq->uq_key;
485 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link);
488 if (uc->length > uc->max_length) {
489 uc->max_length = uc->length;
490 if (uc->max_length > max_length)
491 max_length = uc->max_length;
495 uq->uq_spare_queue = NULL;
497 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link);
499 uq->uq_flags |= UQF_UMTXQ;
500 uq->uq_cur_queue = uh;
507 struct umtxq_chain *uc;
508 struct umtxq_queue *uh;
512 if (uq->uq_flags & UQF_UMTXQ) {
513 uh = uq->uq_cur_queue;
514 TAILQ_REMOVE(&uh->head, uq, uq_link);
516 uq->uq_flags &= ~UQF_UMTXQ;
517 if (TAILQ_EMPTY(&uh->head)) {
518 KASSERT(uh->length == 0,
519 (
"inconsistent umtxq_queue length"));
523 LIST_REMOVE(uh, link);
525 uh = LIST_FIRST(&uc->uc_spare_queue);
526 KASSERT(uh != NULL, (
"uc_spare_queue is empty"));
527 LIST_REMOVE(uh, link);
529 uq->uq_spare_queue = uh;
530 uq->uq_cur_queue = NULL;
540 struct umtxq_queue *uh;
556 struct umtxq_queue *uh;
562 *first = TAILQ_FIRST(&uh->head);
574 struct umtxq_queue *uh;
575 struct umtx_q *uq, *uq_temp;
583 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
584 if ((uq->uq_bitset & bitset) == 0)
601 struct umtxq_queue *uh;
609 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) {
642 struct umtxq_queue *uh;
643 struct umtx_q *uq, *uq_temp;
652 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) {
653 if (++ret <= n_wake) {
660 if (ret - n_wake == n_requeue)
672 TIMESPEC_TO_TIMEVAL(&tv, tsp);
678 int absolute,
const struct timespec *timeout)
681 timo->clockid = clockid;
683 timo->is_abs_real =
false;
685 timespecadd(&timo->cur, timeout, &timo->end);
687 timo->end = *timeout;
688 timo->is_abs_real = clockid == CLOCK_REALTIME ||
689 clockid == CLOCK_REALTIME_FAST ||
690 clockid == CLOCK_REALTIME_PRECISE ||
691 clockid == CLOCK_SECOND;
697 const struct _umtx_time *umtxtime)
701 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout);
711 switch (timo->clockid) {
715 case CLOCK_REALTIME_PRECISE:
716 case CLOCK_REALTIME_FAST:
717 case CLOCK_MONOTONIC:
718 case CLOCK_MONOTONIC_PRECISE:
719 case CLOCK_MONOTONIC_FAST:
721 case CLOCK_UPTIME_PRECISE:
722 case CLOCK_UPTIME_FAST:
724 timespec2bintime(&timo->end, &
bt);
725 switch (timo->clockid) {
727 case CLOCK_REALTIME_PRECISE:
728 case CLOCK_REALTIME_FAST:
731 bintime_sub(&
bt, &bbt);
736 if (
bt.sec >= (SBT_MAX >> 32)) {
742 switch (timo->clockid) {
743 case CLOCK_REALTIME_FAST:
744 case CLOCK_MONOTONIC_FAST:
745 case CLOCK_UPTIME_FAST:
758 case CLOCK_THREAD_CPUTIME_ID:
759 case CLOCK_PROCESS_CPUTIME_ID:
762 if (timespeccmp(&timo->end, &timo->cur, <=))
764 timespecsub(&timo->end, &timo->cur, &tts);
766 *
flags = C_HARDCLOCK;
776 return (UMUTEX_RB_OWNERDEAD);
777 else if ((
flags & UMUTEX_NONCONSISTENT) != 0)
778 return (UMUTEX_RB_NOTRECOV);
780 return (UMUTEX_UNOWNED);
790 struct umtx_abs_timeout *timo)
792 struct umtxq_chain *uc;
794 int error,
flags = 0;
799 if (!(uq->uq_flags & UQF_UMTXQ)) {
804 if (timo->is_abs_real)
805 curthread->td_rtcgen =
811 error = msleep_sbt(uq, &uc->uc_lock, PCATCH, wmesg,
813 if (error == EINTR || error == ERESTART)
815 if (error == EWOULDBLOCK && (
flags & C_ABSOLUTE) != 0) {
821 curthread->td_rtcgen = 0;
831 struct thread *td = curthread;
833 vm_map_entry_t entry;
839 if (share == THREAD_SHARE) {
841 key->info.private.vs = td->td_proc->p_vmspace;
842 key->info.private.addr = (uintptr_t)
addr;
844 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE);
845 map = &td->td_proc->p_vmspace->vm_map;
846 if (vm_map_lookup(&map, (vm_offset_t)
addr, VM_PROT_WRITE,
847 &entry, &key->info.shared.object, &pindex, &prot,
848 &wired) != KERN_SUCCESS) {
852 if ((share == PROCESS_SHARE) ||
853 (share == AUTO_SHARE &&
854 VM_INHERIT_SHARE == entry->inheritance)) {
856 key->info.shared.offset = (vm_offset_t)
addr -
857 entry->start + entry->offset;
858 vm_object_reference(key->info.shared.object);
861 key->info.private.vs = td->td_proc->p_vmspace;
862 key->info.private.addr = (uintptr_t)
addr;
864 vm_map_lookup_done(map, entry);
878 vm_object_deallocate(key->info.shared.object);
881#ifdef COMPAT_FREEBSD10
886do_lock_umtx(
struct thread *td,
struct umtx *umtx, u_long
id,
887 const struct timespec *timeout)
889 struct umtx_abs_timeout timo;
907 owner =
casuword(&umtx->u_owner, UMTX_UNOWNED,
id);
910 if (owner == UMTX_UNOWNED)
918 if (owner == UMTX_CONTESTED) {
920 UMTX_CONTESTED,
id | UMTX_CONTESTED);
922 if (owner == UMTX_CONTESTED)
945 AUTO_SHARE, &uq->uq_key)) != 0)
948 umtxq_lock(&uq->uq_key);
952 umtxq_unlock(&uq->uq_key);
960 old =
casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED);
964 umtxq_lock(&uq->uq_key);
966 umtxq_unlock(&uq->uq_key);
976 umtxq_lock(&uq->uq_key);
978 error =
umtxq_sleep(uq,
"umtx", timeout == NULL ? NULL :
981 umtxq_unlock(&uq->uq_key);
988 if (timeout == NULL) {
994 if (error == ERESTART)
1004do_unlock_umtx(
struct thread *td,
struct umtx *umtx, u_long
id)
1006 struct umtx_key key;
1015 owner =
fuword(__DEVOLATILE(u_long *, &umtx->u_owner));
1019 if ((owner & ~UMTX_CONTESTED) !=
id)
1023 if ((owner & UMTX_CONTESTED) == 0) {
1024 old =
casuword(&umtx->u_owner, owner, UMTX_UNOWNED);
1033 if ((error =
umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1047 old =
casuword(&umtx->u_owner, owner,
1048 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED);
1061#ifdef COMPAT_FREEBSD32
1067do_lock_umtx32(
struct thread *td, uint32_t *m, uint32_t
id,
1068 const struct timespec *timeout)
1070 struct umtx_abs_timeout timo;
1078 if (timeout != NULL)
1092 if (owner == UMUTEX_UNOWNED)
1100 if (owner == UMUTEX_CONTESTED) {
1102 UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED);
1103 if (owner == UMUTEX_CONTESTED)
1126 AUTO_SHARE, &uq->uq_key)) != 0)
1129 umtxq_lock(&uq->uq_key);
1133 umtxq_unlock(&uq->uq_key);
1141 old =
casuword32(m, owner, owner | UMUTEX_CONTESTED);
1145 umtxq_lock(&uq->uq_key);
1147 umtxq_unlock(&uq->uq_key);
1157 umtxq_lock(&uq->uq_key);
1162 umtxq_unlock(&uq->uq_key);
1169 if (timeout == NULL) {
1175 if (error == ERESTART)
1185do_unlock_umtx32(
struct thread *td, uint32_t *m, uint32_t
id)
1187 struct umtx_key key;
1200 if ((owner & ~UMUTEX_CONTESTED) !=
id)
1204 if ((owner & UMUTEX_CONTESTED) == 0) {
1214 if ((error =
umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE,
1229 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED);
1249 struct _umtx_time *timeout,
int compat32,
int is_private)
1251 struct umtx_abs_timeout timo;
1259 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0)
1262 if (timeout != NULL)
1265 umtxq_lock(&uq->uq_key);
1267 umtxq_unlock(&uq->uq_key);
1268 if (compat32 == 0) {
1269 error = fueword(
addr, &tmp);
1273 error = fueword32(
addr, &tmp32);
1279 umtxq_lock(&uq->uq_key);
1282 error =
umtxq_sleep(uq,
"uwait", timeout == NULL ?
1284 if ((uq->uq_flags & UQF_UMTXQ) == 0)
1288 }
else if ((uq->uq_flags & UQF_UMTXQ) != 0) {
1291 umtxq_unlock(&uq->uq_key);
1293 if (error == ERESTART)
1304 struct umtx_key key;
1308 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0)
1322 struct _umtx_time *timeout,
int mode)
1324 struct umtx_abs_timeout timo;
1326 uint32_t owner, old, id;
1332 if (timeout != NULL)
1340 rv = fueword32(&m->m_owner, &owner);
1344 if (owner == UMUTEX_UNOWNED ||
1345 owner == UMUTEX_CONTESTED ||
1346 owner == UMUTEX_RB_OWNERDEAD ||
1347 owner == UMUTEX_RB_NOTRECOV)
1356 if (owner == UMUTEX_RB_OWNERDEAD) {
1357 rv = casueword32(&m->m_owner,
1358 UMUTEX_RB_OWNERDEAD, &owner,
1359 id | UMUTEX_CONTESTED);
1363 MPASS(owner == UMUTEX_RB_OWNERDEAD);
1364 return (EOWNERDEAD);
1372 if (owner == UMUTEX_RB_NOTRECOV)
1373 return (ENOTRECOVERABLE);
1379 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED,
1387 MPASS(owner == UMUTEX_UNOWNED);
1396 if (owner == UMUTEX_CONTESTED) {
1397 rv = casueword32(&m->m_owner,
1398 UMUTEX_CONTESTED, &owner,
1399 id | UMUTEX_CONTESTED);
1404 MPASS(owner == UMUTEX_CONTESTED);
1440 umtxq_lock(&uq->uq_key);
1443 umtxq_unlock(&uq->uq_key);
1451 rv = casueword32(&m->m_owner, owner, &old,
1452 owner | UMUTEX_CONTESTED);
1455 if (rv == -1 || rv == 1) {
1456 umtxq_lock(&uq->uq_key);
1459 umtxq_unlock(&uq->uq_key);
1476 umtxq_lock(&uq->uq_key);
1478 MPASS(old == owner);
1479 error =
umtxq_sleep(uq,
"umtxn", timeout == NULL ?
1482 umtxq_unlock(&uq->uq_key);
1498 struct umtx_key key;
1499 uint32_t owner, old, id, newlock;
1508 error = fueword32(&m->m_owner, &owner);
1512 if ((owner & ~UMUTEX_CONTESTED) !=
id)
1516 if ((owner & UMUTEX_CONTESTED) == 0) {
1517 error = casueword32(&m->m_owner, owner, &old, newlock);
1526 MPASS(old == owner);
1546 newlock |= UMUTEX_CONTESTED;
1547 error = casueword32(&m->m_owner, owner, &old, newlock);
1573 struct umtx_key key;
1580 error = fueword32(&m->m_owner, &owner);
1584 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD &&
1585 owner != UMUTEX_RB_NOTRECOV)
1588 error = fueword32(&m->m_flags, &
flags);
1602 if (
count <= 1 && owner != UMUTEX_RB_OWNERDEAD &&
1603 owner != UMUTEX_RB_NOTRECOV) {
1604 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
1608 }
else if (error == 1) {
1621 if (error == 0 &&
count != 0) {
1622 MPASS((owner & ~UMUTEX_CONTESTED) == 0 ||
1623 owner == UMUTEX_RB_OWNERDEAD ||
1624 owner == UMUTEX_RB_NOTRECOV);
1639 struct umtx_key key;
1640 uint32_t owner, old;
1645 switch (
flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT |
1649 type = TYPE_NORMAL_UMUTEX;
1651 case UMUTEX_PRIO_INHERIT:
1652 type = TYPE_PI_UMUTEX;
1654 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST):
1655 type = TYPE_PI_ROBUST_UMUTEX;
1657 case UMUTEX_PRIO_PROTECT:
1658 type = TYPE_PP_UMUTEX;
1660 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST):
1661 type = TYPE_PP_ROBUST_UMUTEX;
1675 error = fueword32(&m->m_owner, &owner);
1684 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 &&
1685 (
count > 1 || (
count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) {
1686 error = casueword32(&m->m_owner, owner, &old,
1687 owner | UMUTEX_CONTESTED);
1693 MPASS(old == owner);
1701 if (error == EFAULT) {
1703 }
else if (
count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 ||
1704 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV))
1718 TAILQ_INIT(&pi->pi_blocked);
1737 struct umtx_q *uq, *uq1, *uq2;
1751 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq);
1752 uq2 = TAILQ_NEXT(uq, uq_lockq);
1753 if ((uq1 != NULL &&
UPRI(td) <
UPRI(uq1->uq_thread)) ||
1754 (uq2 != NULL &&
UPRI(td) >
UPRI(uq2->uq_thread))) {
1759 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
1760 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
1761 td1 = uq1->uq_thread;
1762 MPASS(td1->td_proc->p_magic == P_MAGIC);
1768 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
1770 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
1775static struct umtx_pi *
1778 struct umtx_q *uq_owner;
1780 if (pi->pi_owner == NULL)
1782 uq_owner = pi->pi_owner->td_umtxq;
1783 if (uq_owner == NULL)
1785 return (uq_owner->uq_pi_blocked);
1794 struct umtx_pi *pi1;
1830 pi = uq->uq_pi_blocked;
1838 if (td == NULL || td == curthread)
1841 MPASS(td->td_proc != NULL);
1842 MPASS(td->td_proc->p_magic == P_MAGIC);
1845 if (td->td_lend_user_pri > pri)
1857 pi = uq->uq_pi_blocked;
1872 struct umtx_q *uq, *uq_owner;
1873 struct umtx_pi *pi2;
1880 while (pi != NULL && pi->pi_owner != NULL) {
1882 uq_owner = pi->pi_owner->td_umtxq;
1884 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) {
1885 uq = TAILQ_FIRST(&pi2->pi_blocked);
1887 if (pri >
UPRI(uq->uq_thread))
1888 pri =
UPRI(uq->uq_thread);
1892 if (pri > uq_owner->uq_inherited_pri)
1893 pri = uq_owner->uq_inherited_pri;
1894 thread_lock(pi->pi_owner);
1896 thread_unlock(pi->pi_owner);
1897 if ((pi = uq_owner->uq_pi_blocked) != NULL)
1908 struct umtx_q *uq_owner;
1910 uq_owner = owner->td_umtxq;
1912 MPASS(pi->pi_owner == NULL);
1913 pi->pi_owner = owner;
1914 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link);
1925 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link);
1926 pi->pi_owner = NULL;
1939 if (pi->pi_owner == owner) {
1944 if (pi->pi_owner != NULL) {
1952 uq = TAILQ_FIRST(&pi->pi_blocked);
1954 pri =
UPRI(uq->uq_thread);
1956 if (pri <
UPRI(owner))
1958 thread_unlock(owner);
1979 pi = uq->uq_pi_blocked;
1992 const char *wmesg,
struct umtx_abs_timeout *timo,
bool shared)
1994 struct thread *td, *td1;
1998 struct umtxq_chain *uc;
2004 KASSERT(td == curthread, (
"inconsistent uq_thread"));
2006 KASSERT(uc->uc_busy != 0, (
"umtx chain is not busy"));
2009 if (pi->pi_owner == NULL) {
2011 td1 =
tdfind(owner, shared ? -1 : td->td_proc->p_pid);
2014 if (pi->pi_owner == NULL)
2016 PROC_UNLOCK(td1->td_proc);
2020 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) {
2021 pri =
UPRI(uq1->uq_thread);
2027 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq);
2029 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq);
2031 uq->uq_pi_blocked = pi;
2033 td->td_flags |= TDF_UPIBLOCKED;
2043 uq->uq_pi_blocked = NULL;
2045 td->td_flags &= ~TDF_UPIBLOCKED;
2047 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq);
2050 umtxq_unlock(&uq->uq_key);
2073 struct umtxq_chain *uc;
2077 KASSERT(pi->pi_refcount > 0, (
"invalid reference count"));
2078 if (--pi->pi_refcount == 0) {
2080 if (pi->pi_owner != NULL)
2082 KASSERT(TAILQ_EMPTY(&pi->pi_blocked),
2083 (
"blocked queue not empty"));
2085 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink);
2096 struct umtxq_chain *uc;
2102 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) {
2103 if (umtx_key_match(&pi->pi_key, key)) {
2116 struct umtxq_chain *uc;
2120 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink);
2129 struct umtx_q *uq_first, *uq_first2, *uq_me;
2130 struct umtx_pi *pi, *pi2;
2135 if (uq_first != NULL) {
2137 pi = uq_first->uq_pi_blocked;
2138 KASSERT(pi != NULL, (
"pi == NULL?"));
2139 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) {
2144 uq_me = td->td_umtxq;
2145 if (pi->pi_owner == td)
2148 uq_first = TAILQ_FIRST(&pi->pi_blocked);
2149 while (uq_first != NULL &&
2150 (uq_first->uq_flags & UQF_UMTXQ) == 0) {
2151 uq_first = TAILQ_NEXT(uq_first, uq_lockq);
2154 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) {
2155 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked);
2156 if (uq_first2 != NULL) {
2157 if (pri >
UPRI(uq_first2->uq_thread))
2158 pri =
UPRI(uq_first2->uq_thread);
2182 if (pi->pi_owner == td)
2195 struct _umtx_time *timeout,
int try)
2197 struct umtx_abs_timeout timo;
2199 struct umtx_pi *pi, *new_pi;
2200 uint32_t id, old_owner, owner, old;
2211 if (timeout != NULL)
2214 umtxq_lock(&uq->uq_key);
2218 if (new_pi == NULL) {
2219 umtxq_unlock(&uq->uq_key);
2221 umtxq_lock(&uq->uq_key);
2228 if (new_pi != NULL) {
2229 new_pi->pi_key = uq->uq_key;
2235 umtxq_unlock(&uq->uq_key);
2245 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner,
id);
2253 MPASS(owner == UMUTEX_UNOWNED);
2258 if (owner == UMUTEX_RB_NOTRECOV) {
2259 error = ENOTRECOVERABLE;
2274 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) {
2276 rv = casueword32(&m->m_owner, owner, &owner,
2277 id | UMUTEX_CONTESTED);
2298 MPASS(owner == old_owner);
2299 umtxq_lock(&uq->uq_key);
2303 umtxq_unlock(&uq->uq_key);
2312 id | UMUTEX_CONTESTED, old_owner);
2314 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD)
2319 if ((owner & ~UMUTEX_CONTESTED) == id) {
2336 umtxq_lock(&uq->uq_key);
2338 umtxq_unlock(&uq->uq_key);
2346 rv = casueword32(&m->m_owner, owner, &old, owner |
2370 umtxq_lock(&uq->uq_key);
2373 MPASS(old == owner);
2375 "umtxpi", timeout == NULL ? NULL : &timo,
2376 (
flags & USYNC_PROCESS_SHARED) != 0);
2385 umtxq_lock(&uq->uq_key);
2387 umtxq_unlock(&uq->uq_key);
2399 struct umtx_key key;
2400 uint32_t id, new_owner, old, owner;
2409 error = fueword32(&m->m_owner, &owner);
2413 if ((owner & ~UMUTEX_CONTESTED) !=
id)
2419 if ((owner & UMUTEX_CONTESTED) == 0) {
2420 error = casueword32(&m->m_owner, owner, &old, new_owner);
2459 new_owner |= UMUTEX_CONTESTED;
2461 error = casueword32(&m->m_owner, owner, &old, new_owner);
2471 if (error == 0 && old != owner)
2481 struct _umtx_time *timeout,
int try)
2483 struct umtx_abs_timeout timo;
2484 struct umtx_q *uq, *uq2;
2488 int error, pri, old_inherited_pri, su, rv;
2497 if (timeout != NULL)
2500 su = (
priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2502 old_inherited_pri = uq->uq_inherited_pri;
2503 umtxq_lock(&uq->uq_key);
2505 umtxq_unlock(&uq->uq_key);
2507 rv = fueword32(&m->m_ceilings[0], &ceiling);
2512 ceiling = RTP_PRIO_MAX - ceiling;
2513 if (ceiling > RTP_PRIO_MAX) {
2519 if (
UPRI(td) < PRI_MIN_REALTIME + ceiling) {
2524 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) {
2525 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling;
2527 if (uq->uq_inherited_pri <
UPRI(td))
2533 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2534 id | UMUTEX_CONTESTED);
2541 MPASS(owner == UMUTEX_CONTESTED);
2546 if (owner == UMUTEX_RB_OWNERDEAD) {
2547 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD,
2548 &owner,
id | UMUTEX_CONTESTED);
2554 MPASS(owner == UMUTEX_RB_OWNERDEAD);
2577 }
else if (owner == UMUTEX_RB_NOTRECOV) {
2578 error = ENOTRECOVERABLE;
2591 umtxq_lock(&uq->uq_key);
2594 error =
umtxq_sleep(uq,
"umtxpp", timeout == NULL ?
2597 umtxq_unlock(&uq->uq_key);
2600 uq->uq_inherited_pri = old_inherited_pri;
2602 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2603 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2605 if (pri >
UPRI(uq2->uq_thread))
2606 pri =
UPRI(uq2->uq_thread);
2609 if (pri > uq->uq_inherited_pri)
2610 pri = uq->uq_inherited_pri;
2617 if (error != 0 && error != EOWNERDEAD) {
2619 uq->uq_inherited_pri = old_inherited_pri;
2621 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2622 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2624 if (pri >
UPRI(uq2->uq_thread))
2625 pri =
UPRI(uq2->uq_thread);
2628 if (pri > uq->uq_inherited_pri)
2629 pri = uq->uq_inherited_pri;
2648 struct umtx_key key;
2649 struct umtx_q *uq, *uq2;
2651 uint32_t id, owner, rceiling;
2652 int error, pri, new_inherited_pri, su;
2656 su = (
priv_check(td, PRIV_SCHED_RTPRIO) == 0);
2661 error = fueword32(&m->m_owner, &owner);
2665 if ((owner & ~UMUTEX_CONTESTED) !=
id)
2668 error = copyin(&m->m_ceilings[1], &rceiling,
sizeof(uint32_t));
2673 new_inherited_pri = PRI_MAX;
2675 rceiling = RTP_PRIO_MAX - rceiling;
2676 if (rceiling > RTP_PRIO_MAX)
2678 new_inherited_pri = PRI_MIN_REALTIME + rceiling;
2708 uq->uq_inherited_pri = new_inherited_pri;
2710 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) {
2711 uq2 = TAILQ_FIRST(&pi->pi_blocked);
2713 if (pri >
UPRI(uq2->uq_thread))
2714 pri =
UPRI(uq2->uq_thread);
2717 if (pri > uq->uq_inherited_pri)
2718 pri = uq->uq_inherited_pri;
2730 uint32_t *old_ceiling)
2733 uint32_t
flags, id, owner, save_ceiling;
2736 error = fueword32(&m->m_flags, &
flags);
2739 if ((
flags & UMUTEX_PRIO_PROTECT) == 0)
2741 if (ceiling > RTP_PRIO_MAX)
2750 umtxq_lock(&uq->uq_key);
2752 umtxq_unlock(&uq->uq_key);
2754 rv = fueword32(&m->m_ceilings[0], &save_ceiling);
2760 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner,
2761 id | UMUTEX_CONTESTED);
2768 MPASS(owner == UMUTEX_CONTESTED);
2769 rv = suword32(&m->m_ceilings[0], ceiling);
2770 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED);
2771 error = (rv == 0 && rv1 == 0) ? 0: EFAULT;
2775 if ((owner & ~UMUTEX_CONTESTED) == id) {
2776 rv = suword32(&m->m_ceilings[0], ceiling);
2777 error = rv == 0 ? 0 : EFAULT;
2781 if (owner == UMUTEX_RB_OWNERDEAD) {
2784 }
else if (owner == UMUTEX_RB_NOTRECOV) {
2785 error = ENOTRECOVERABLE;
2801 umtxq_lock(&uq->uq_key);
2806 umtxq_unlock(&uq->uq_key);
2808 umtxq_lock(&uq->uq_key);
2812 umtxq_unlock(&uq->uq_key);
2814 if (error == 0 && old_ceiling != NULL) {
2815 rv = suword32(old_ceiling, save_ceiling);
2816 error = rv == 0 ? 0 : EFAULT;
2826 struct _umtx_time *timeout,
int mode)
2831 error = fueword32(&m->m_flags, &
flags);
2835 switch (
flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2839 case UMUTEX_PRIO_INHERIT:
2842 case UMUTEX_PRIO_PROTECT:
2848 if (timeout == NULL) {
2853 if (error == ERESTART)
2868 error = fueword32(&m->m_flags, &
flags);
2872 switch (
flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) {
2875 case UMUTEX_PRIO_INHERIT:
2877 case UMUTEX_PRIO_PROTECT:
2886 struct timespec *timeout, u_long wflags)
2888 struct umtx_abs_timeout timo;
2890 uint32_t
flags, clockid, hasw;
2894 error = fueword32(&cv->c_flags, &
flags);
2901 if ((wflags & CVWAIT_CLOCKID) != 0) {
2902 error = fueword32(&cv->c_clockid, &clockid);
2907 if (clockid < CLOCK_REALTIME ||
2908 clockid >= CLOCK_THREAD_CPUTIME_ID) {
2914 clockid = CLOCK_REALTIME;
2917 umtxq_lock(&uq->uq_key);
2920 umtxq_unlock(&uq->uq_key);
2926 error = fueword32(&cv->c_has_waiters, &hasw);
2927 if (error == 0 && hasw == 0)
2928 suword32(&cv->c_has_waiters, 1);
2934 if (timeout != NULL)
2936 (wflags & CVWAIT_ABSTIME) != 0, timeout);
2938 umtxq_lock(&uq->uq_key);
2940 error =
umtxq_sleep(uq,
"ucond", timeout == NULL ?
2944 if ((uq->uq_flags & UQF_UMTXQ) == 0)
2953 if ((uq->uq_flags & UQF_UMTXQ) != 0) {
2954 int oldlen = uq->uq_cur_queue->length;
2957 umtxq_unlock(&uq->uq_key);
2958 suword32(&cv->c_has_waiters, 0);
2959 umtxq_lock(&uq->uq_key);
2963 if (error == ERESTART)
2967 umtxq_unlock(&uq->uq_key);
2978 struct umtx_key key;
2979 int error, cnt, nwake;
2982 error = fueword32(&cv->c_flags, &
flags);
2993 error = suword32(&cv->c_has_waiters, 0);
3007 struct umtx_key key;
3011 error = fueword32(&cv->c_flags, &
flags);
3022 error = suword32(&cv->c_has_waiters, 0);
3034 struct _umtx_time *timeout)
3036 struct umtx_abs_timeout timo;
3038 uint32_t
flags, wrflags;
3039 int32_t state, oldstate;
3040 int32_t blocked_readers;
3041 int error, error1, rv;
3044 error = fueword32(&rwlock->rw_flags, &
flags);
3051 if (timeout != NULL)
3054 wrflags = URWLOCK_WRITE_OWNER;
3055 if (!(fflag & URWLOCK_PREFER_READER) && !(
flags & URWLOCK_PREFER_READER))
3056 wrflags |= URWLOCK_WRITE_WAITERS;
3059 rv = fueword32(&rwlock->rw_state, &state);
3066 while (!(state & wrflags)) {
3067 if (__predict_false(URWLOCK_READER_COUNT(state) ==
3068 URWLOCK_MAX_READERS)) {
3072 rv = casueword32(&rwlock->rw_state, state,
3073 &oldstate, state + 1);
3079 MPASS(oldstate == state);
3093 umtxq_lock(&uq->uq_key);
3095 umtxq_unlock(&uq->uq_key);
3101 rv = fueword32(&rwlock->rw_state, &state);
3106 while (error == 0 && (state & wrflags) &&
3107 !(state & URWLOCK_READ_WAITERS)) {
3108 rv = casueword32(&rwlock->rw_state, state,
3109 &oldstate, state | URWLOCK_READ_WAITERS);
3115 MPASS(oldstate == state);
3129 if (!(state & wrflags)) {
3142 rv = fueword32(&rwlock->rw_blocked_readers,
3149 suword32(&rwlock->rw_blocked_readers, blocked_readers+1);
3151 while (state & wrflags) {
3152 umtxq_lock(&uq->uq_key);
3156 error =
umtxq_sleep(uq,
"urdlck", timeout == NULL ?
3161 umtxq_unlock(&uq->uq_key);
3164 rv = fueword32(&rwlock->rw_state, &state);
3172 rv = fueword32(&rwlock->rw_blocked_readers,
3179 suword32(&rwlock->rw_blocked_readers, blocked_readers-1);
3180 if (blocked_readers == 1) {
3181 rv = fueword32(&rwlock->rw_state, &state);
3188 rv = casueword32(&rwlock->rw_state, state,
3189 &oldstate, state & ~URWLOCK_READ_WAITERS);
3195 MPASS(oldstate == state);
3213 if (error == ERESTART)
3219do_rw_wrlock(
struct thread *td,
struct urwlock *rwlock,
struct _umtx_time *timeout)
3221 struct umtx_abs_timeout timo;
3224 int32_t state, oldstate;
3225 int32_t blocked_writers;
3226 int32_t blocked_readers;
3227 int error, error1, rv;
3230 error = fueword32(&rwlock->rw_flags, &
flags);
3237 if (timeout != NULL)
3240 blocked_readers = 0;
3242 rv = fueword32(&rwlock->rw_state, &state);
3247 while ((state & URWLOCK_WRITE_OWNER) == 0 &&
3248 URWLOCK_READER_COUNT(state) == 0) {
3249 rv = casueword32(&rwlock->rw_state, state,
3250 &oldstate, state | URWLOCK_WRITE_OWNER);
3256 MPASS(oldstate == state);
3267 if ((state & (URWLOCK_WRITE_OWNER |
3268 URWLOCK_WRITE_WAITERS)) == 0 &&
3269 blocked_readers != 0) {
3270 umtxq_lock(&uq->uq_key);
3275 umtxq_unlock(&uq->uq_key);
3282 umtxq_lock(&uq->uq_key);
3284 umtxq_unlock(&uq->uq_key);
3290 rv = fueword32(&rwlock->rw_state, &state);
3294 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) ||
3295 URWLOCK_READER_COUNT(state) != 0) &&
3296 (state & URWLOCK_WRITE_WAITERS) == 0) {
3297 rv = casueword32(&rwlock->rw_state, state,
3298 &oldstate, state | URWLOCK_WRITE_WAITERS);
3304 MPASS(oldstate == state);
3317 if ((state & URWLOCK_WRITE_OWNER) == 0 &&
3318 URWLOCK_READER_COUNT(state) == 0) {
3326 rv = fueword32(&rwlock->rw_blocked_writers,
3333 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1);
3335 while ((state & URWLOCK_WRITE_OWNER) ||
3336 URWLOCK_READER_COUNT(state) != 0) {
3337 umtxq_lock(&uq->uq_key);
3341 error =
umtxq_sleep(uq,
"uwrlck", timeout == NULL ?
3346 umtxq_unlock(&uq->uq_key);
3349 rv = fueword32(&rwlock->rw_state, &state);
3356 rv = fueword32(&rwlock->rw_blocked_writers,
3363 suword32(&rwlock->rw_blocked_writers, blocked_writers-1);
3364 if (blocked_writers == 1) {
3365 rv = fueword32(&rwlock->rw_state, &state);
3372 rv = casueword32(&rwlock->rw_state, state,
3373 &oldstate, state & ~URWLOCK_WRITE_WAITERS);
3379 MPASS(oldstate == state);
3395 rv = fueword32(&rwlock->rw_blocked_readers,
3403 blocked_readers = 0;
3409 if (error == ERESTART)
3419 int32_t state, oldstate;
3420 int error, rv, q,
count;
3423 error = fueword32(&rwlock->rw_flags, &
flags);
3430 error = fueword32(&rwlock->rw_state, &state);
3435 if (state & URWLOCK_WRITE_OWNER) {
3437 rv = casueword32(&rwlock->rw_state, state,
3438 &oldstate, state & ~URWLOCK_WRITE_OWNER);
3445 if (!(oldstate & URWLOCK_WRITE_OWNER)) {
3455 }
else if (URWLOCK_READER_COUNT(state) != 0) {
3457 rv = casueword32(&rwlock->rw_state, state,
3458 &oldstate, state - 1);
3465 if (URWLOCK_READER_COUNT(oldstate) == 0) {
3482 if (!(
flags & URWLOCK_PREFER_READER)) {
3483 if (state & URWLOCK_WRITE_WAITERS) {
3485 q = UMTX_EXCLUSIVE_QUEUE;
3486 }
else if (state & URWLOCK_READ_WAITERS) {
3488 q = UMTX_SHARED_QUEUE;
3491 if (state & URWLOCK_READ_WAITERS) {
3493 q = UMTX_SHARED_QUEUE;
3494 }
else if (state & URWLOCK_WRITE_WAITERS) {
3496 q = UMTX_EXCLUSIVE_QUEUE;
3501 umtxq_lock(&uq->uq_key);
3505 umtxq_unlock(&uq->uq_key);
3512#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
3514do_sem_wait(
struct thread *td,
struct _usem *
sem,
struct _umtx_time *timeout)
3516 struct umtx_abs_timeout timo;
3522 error = fueword32(&
sem->_flags, &
flags);
3529 if (timeout != NULL)
3533 umtxq_lock(&uq->uq_key);
3536 umtxq_unlock(&uq->uq_key);
3537 rv = casueword32(&
sem->_has_waiters, 0, &count1, 1);
3539 rv1 = fueword32(&
sem->_count, &
count);
3540 if (rv == -1 || (rv == 0 && (rv1 == -1 ||
count != 0)) ||
3541 (rv == 1 && count1 == 0)) {
3542 umtxq_lock(&uq->uq_key);
3545 umtxq_unlock(&uq->uq_key);
3555 error = rv == -1 ? EFAULT : 0;
3558 umtxq_lock(&uq->uq_key);
3561 error =
umtxq_sleep(uq,
"usem", timeout == NULL ? NULL : &timo);
3563 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3568 if (error == ERESTART && timeout != NULL &&
3569 (timeout->_flags & UMTX_ABSTIME) == 0)
3572 umtxq_unlock(&uq->uq_key);
3582do_sem_wake(
struct thread *td,
struct _usem *
sem)
3584 struct umtx_key key;
3588 error = fueword32(&
sem->_flags, &
flags);
3604 error = suword32(&
sem->_has_waiters, 0);
3621 struct umtx_abs_timeout timo;
3628 if (timeout != NULL)
3635 umtxq_lock(&uq->uq_key);
3638 umtxq_unlock(&uq->uq_key);
3639 rv = fueword32(&
sem->_count, &
count);
3641 umtxq_lock(&uq->uq_key);
3644 umtxq_unlock(&uq->uq_key);
3649 if (USEM_COUNT(
count) != 0) {
3650 umtxq_lock(&uq->uq_key);
3653 umtxq_unlock(&uq->uq_key);
3657 if (
count == USEM_HAS_WAITERS)
3659 rv = casueword32(&
sem->_count, 0, &
count, USEM_HAS_WAITERS);
3662 umtxq_lock(&uq->uq_key);
3665 umtxq_unlock(&uq->uq_key);
3674 umtxq_lock(&uq->uq_key);
3677 error =
umtxq_sleep(uq,
"usem", timeout == NULL ? NULL : &timo);
3679 if ((uq->uq_flags & UQF_UMTXQ) == 0)
3683 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) {
3685 if (error == ERESTART)
3687 if (error == EINTR) {
3690 timespecsub(&timo.end, &timo.cur,
3691 &timeout->_timeout);
3695 umtxq_unlock(&uq->uq_key);
3706 struct umtx_key key;
3710 rv = fueword32(&
sem->_flags, &
flags);
3725 rv = fueword32(&
sem->_count, &
count);
3726 while (rv != -1 &&
count & USEM_HAS_WAITERS) {
3728 count & ~USEM_HAS_WAITERS);
3751#ifdef COMPAT_FREEBSD10
3753freebsd10__umtx_lock(
struct thread *td,
struct freebsd10__umtx_lock_args *uap)
3755 return (do_lock_umtx(td, uap->umtx, td->td_tid, 0));
3759freebsd10__umtx_unlock(
struct thread *td,
3760 struct freebsd10__umtx_unlock_args *uap)
3762 return (do_unlock_umtx(td, uap->umtx, td->td_tid));
3771 error = copyin(uaddr, tsp,
sizeof(*tsp));
3773 if (tsp->tv_sec < 0 ||
3774 tsp->tv_nsec >= 1000000000 ||
3786 if (size <=
sizeof(tp->_timeout)) {
3787 tp->_clockid = CLOCK_REALTIME;
3789 error = copyin(uaddr, &tp->_timeout,
sizeof(tp->_timeout));
3791 error = copyin(uaddr, tp,
sizeof(*tp));
3794 if (tp->_timeout.tv_sec < 0 ||
3795 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0)
3802 struct umtx_robust_lists_params *rb)
3805 if (size >
sizeof(*rb))
3807 return (copyin(uaddr, rb, size));
3819 KASSERT(sz >=
sizeof(*tsp),
3820 (
"umtx_copyops specifies incorrect sizes"));
3822 return (copyout(tsp, uaddr,
sizeof(*tsp)));
3825#ifdef COMPAT_FREEBSD10
3827__umtx_op_lock_umtx(
struct thread *td,
struct _umtx_op_args *uap,
3830 struct timespec *
ts, timeout;
3834 if (uap->uaddr2 == NULL)
3842#ifdef COMPAT_FREEBSD32
3844 return (do_lock_umtx32(td, uap->obj, uap->val,
ts));
3846 return (do_lock_umtx(td, uap->obj, uap->val,
ts));
3850__umtx_op_unlock_umtx(
struct thread *td,
struct _umtx_op_args *uap,
3853#ifdef COMPAT_FREEBSD32
3855 return (do_unlock_umtx32(td, uap->obj, uap->val));
3857 return (do_unlock_umtx(td, uap->obj, uap->val));
3861#if !defined(COMPAT_FREEBSD10)
3866 return (EOPNOTSUPP);
3874 struct _umtx_time timeout, *tm_p;
3877 if (uap->uaddr2 == NULL)
3881 uap->uaddr2, (
size_t)uap->uaddr1, &timeout);
3893 struct _umtx_time timeout, *tm_p;
3896 if (uap->uaddr2 == NULL)
3900 uap->uaddr2, (
size_t)uap->uaddr1, &timeout);
3905 return (
do_wait(td, uap->obj, uap->val, tm_p, 1, 0));
3912 struct _umtx_time *tm_p, timeout;
3915 if (uap->uaddr2 == NULL)
3919 uap->uaddr2, (
size_t)uap->uaddr1, &timeout);
3924 return (
do_wait(td, uap->obj, uap->val, tm_p, 1, 1));
3935#define BATCH_SIZE 128
3940 int count, error, i, pos, tocopy;
3942 upp = (
char **)uap->obj;
3947 error = copyin(upp + pos, uaddrs, tocopy *
sizeof(
char *));
3950 for (i = 0; i < tocopy; ++i) {
3962 int count, error, i, pos, tocopy;
3964 upp = (uint32_t *)uap->obj;
3969 error = copyin(upp + pos, uaddrs, tocopy *
sizeof(uint32_t));
3972 for (i = 0; i < tocopy; ++i) {
4003 struct _umtx_time *tm_p, timeout;
4007 if (uap->uaddr2 == NULL)
4011 uap->uaddr2, (
size_t)uap->uaddr1, &timeout);
4031 struct _umtx_time *tm_p, timeout;
4035 if (uap->uaddr2 == NULL)
4039 uap->uaddr2, (
size_t)uap->uaddr1, &timeout);
4075 struct timespec *
ts, timeout;
4079 if (uap->uaddr2 == NULL)
4087 return (
do_cv_wait(td, uap->obj, uap->uaddr1,
ts, uap->val));
4110 struct _umtx_time timeout;
4114 if (uap->uaddr2 == NULL) {
4118 (
size_t)uap->uaddr1, &timeout);
4121 error =
do_rw_rdlock(td, uap->obj, uap->val, &timeout);
4130 struct _umtx_time timeout;
4134 if (uap->uaddr2 == NULL) {
4138 (
size_t)uap->uaddr1, &timeout);
4155#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4157__umtx_op_sem_wait(
struct thread *td,
struct _umtx_op_args *uap,
4160 struct _umtx_time *tm_p, timeout;
4164 if (uap->uaddr2 == NULL)
4168 uap->uaddr2, (
size_t)uap->uaddr1, &timeout);
4173 return (do_sem_wait(td, uap->obj, tm_p));
4177__umtx_op_sem_wake(
struct thread *td,
struct _umtx_op_args *uap,
4181 return (do_sem_wake(td, uap->obj));
4197 struct _umtx_time *tm_p, timeout;
4202 if (uap->uaddr2 == NULL) {
4206 uasize = (size_t)uap->uaddr1;
4213 if (error == EINTR && uap->uaddr2 != NULL &&
4214 (timeout._flags & UMTX_ABSTIME) == 0 &&
4235#define USHM_OBJ_UMTX(o) \
4236 ((struct umtx_shm_obj_list *)(&(o)->umtx_data))
4238#define USHMF_REG_LINKED 0x0001
4239#define USHMF_OBJ_LINKED 0x0002
4243 struct umtx_key ushm_key;
4244 struct ucred *ushm_cred;
4245 struct shmfd *ushm_obj;
4264 struct umtx_shm_reg_head d;
4271 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) {
4272 TAILQ_REMOVE(&d, reg, ushm_reg_link);
4284 struct umtx_shm_reg_head *reg_head;
4286 KASSERT(key->shared, (
"umtx_p_find_rg: private key"));
4289 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) {
4290 KASSERT(reg->ushm_key.shared,
4291 (
"non-shared key on reg %p %d", reg, reg->ushm_key.shared));
4292 if (reg->ushm_key.info.shared.object ==
4293 key->info.shared.object &&
4294 reg->ushm_key.info.shared.offset ==
4295 key->info.shared.offset) {
4296 KASSERT(reg->ushm_key.type == TYPE_SHM, (
"TYPE_USHM"));
4297 KASSERT(reg->ushm_refcnt > 0,
4298 (
"reg %p refcnt 0 onlist", reg));
4300 (
"reg %p not linked", reg));
4323 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0);
4335 KASSERT(reg->ushm_refcnt > 0, (
"ushm_reg %p refcnt 0", reg));
4337 res = reg->ushm_refcnt == 0;
4341 reg, ushm_reg_link);
4342 reg->ushm_flags &= ~USHMF_REG_LINKED;
4345 LIST_REMOVE(reg, ushm_obj_link);
4346 reg->ushm_flags &= ~USHMF_OBJ_LINKED;
4359 object = reg->ushm_obj->shm_object;
4360 VM_OBJECT_WLOCK(
object);
4361 object->flags |= OBJ_UMTXDEAD;
4362 VM_OBJECT_WUNLOCK(
object);
4389 LIST_FOREACH_SAFE(reg,
USHM_OBJ_UMTX(
object), ushm_obj_link, reg1) {
4414 cred = td->td_ucred;
4418 reg->ushm_refcnt = 1;
4419 bcopy(key, ®->ushm_key,
sizeof(*key));
4420 reg->ushm_obj =
shm_alloc(td->td_ucred, O_RDWR,
false);
4421 reg->ushm_cred =
crhold(cred);
4437 LIST_INSERT_HEAD(
USHM_OBJ_UMTX(key->info.shared.object), reg,
4449 vm_map_entry_t entry;
4456 map = &td->td_proc->p_vmspace->vm_map;
4457 res = vm_map_lookup(&map, (uintptr_t)
addr, VM_PROT_READ, &entry,
4458 &
object, &pindex, &prot, &wired);
4459 if (
res != KERN_SUCCESS)
4464 ret = (
object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0;
4465 vm_map_lookup_done(map, entry);
4475 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4484 struct umtx_key key;
4489 if (__bitcount(
flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP |
4490 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1)
4492 if ((
flags & UMTX_SHM_ALIVE) != 0)
4497 KASSERT(key.shared == 1, (
"non-shared key"));
4498 if ((
flags & UMTX_SHM_CREAT) != 0) {
4508 KASSERT(reg != NULL, (
"no reg"));
4509 if ((
flags & UMTX_SHM_DESTROY) != 0) {
4514 error = mac_posixshm_check_open(td->td_ucred,
4515 reg->ushm_obj, FFLAGS(O_RDWR));
4518 error =
shm_access(reg->ushm_obj, td->td_ucred,
4525 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj,
4527 td->td_retval[0] =
fd;
4540 return (
umtx_shm(td, uap->uaddr1, uap->val));
4547 struct umtx_robust_lists_params rb;
4551 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 &&
4552 (td->td_rb_list != 0 || td->td_rbp_list != 0 ||
4553 td->td_rb_inact != 0))
4555 }
else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) {
4559 bzero(&rb,
sizeof(rb));
4565 td->td_pflags2 |= TDP2_COMPAT32RB;
4567 td->td_rb_list = rb.robust_list_offset;
4568 td->td_rbp_list = rb.robust_priv_list_offset;
4569 td->td_rb_inact = rb.robust_inact_offset;
4573#if defined(__i386__) || defined(__amd64__)
4584struct umtx_timex32 {
4585 struct timespecx32 _timeout;
4591#define timespeci386 timespec32
4592#define umtx_timei386 umtx_time32
4607#if defined(__LP64__)
4608#define timespecx32 timespec32
4609#define umtx_timex32 umtx_time32
4615 struct umtx_robust_lists_params *rbp)
4617 struct umtx_robust_lists_params_compat32 rb32;
4620 if (size >
sizeof(rb32))
4622 bzero(&rb32,
sizeof(rb32));
4623 error = copyin(uaddr, &rb32, size);
4626 CP(rb32, *rbp, robust_list_offset);
4627 CP(rb32, *rbp, robust_priv_list_offset);
4628 CP(rb32, *rbp, robust_inact_offset);
4639 error = copyin(uaddr, &ts32,
sizeof(ts32));
4664 error = copyin(uaddr, &t32,
sizeof(t32));
4681 .tv_nsec = tsp->tv_nsec,
4689 KASSERT(sz >=
sizeof(remain32),
4690 (
"umtx_copyops specifies incorrect sizes"));
4692 return (copyout(&remain32, uaddr,
sizeof(remain32)));
4696#if defined(__i386__) || defined(__LP64__)
4698umtx_copyin_timeoutx32(
const void *uaddr,
struct timespec *tsp)
4700 struct timespecx32 ts32;
4703 error = copyin(uaddr, &ts32,
sizeof(ts32));
4705 if (ts32.tv_sec < 0 ||
4706 ts32.tv_nsec >= 1000000000 ||
4710 CP(ts32, *tsp, tv_sec);
4711 CP(ts32, *tsp, tv_nsec);
4718umtx_copyin_umtx_timex32(
const void *uaddr,
size_t size,
struct _umtx_time *tp)
4720 struct umtx_timex32 t32;
4723 t32._clockid = CLOCK_REALTIME;
4725 if (size <=
sizeof(t32._timeout))
4726 error = copyin(uaddr, &t32._timeout,
sizeof(t32._timeout));
4728 error = copyin(uaddr, &t32,
sizeof(t32));
4731 if (t32._timeout.tv_sec < 0 ||
4732 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0)
4734 TS_CP(t32, *tp, _timeout);
4736 CP(t32, *tp, _clockid);
4741umtx_copyout_timeoutx32(
void *uaddr,
size_t sz,
struct timespec *tsp)
4743 struct timespecx32 remain32 = {
4744 .tv_sec = tsp->tv_sec,
4745 .tv_nsec = tsp->tv_nsec,
4753 KASSERT(sz >=
sizeof(remain32),
4754 (
"umtx_copyops specifies incorrect sizes"));
4756 return (copyout(&remain32, uaddr,
sizeof(remain32)));
4764#ifdef COMPAT_FREEBSD10
4765 [UMTX_OP_LOCK] = __umtx_op_lock_umtx,
4766 [UMTX_OP_UNLOCK] = __umtx_op_unlock_umtx,
4788#if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10)
4789 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait,
4790 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake,
4808 .timespec_sz =
sizeof(
struct timespec),
4809 .umtx_time_sz =
sizeof(
struct _umtx_time),
4824#if defined(__i386__) || defined(__LP64__)
4828 .copyin_umtx_time = umtx_copyin_umtx_timex32,
4830 .copyout_timeout = umtx_copyout_timeoutx32,
4831 .timespec_sz =
sizeof(
struct timespecx32),
4832 .umtx_time_sz =
sizeof(
struct umtx_timex32),
4836#ifdef COMPAT_FREEBSD32
4838#define umtx_native_ops32 umtx_native_opsi386
4840#define umtx_native_ops32 umtx_native_opsx32
4845#define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386)
4849 void *uaddr1,
void *uaddr2,
const struct umtx_copyops *ops)
4851 struct _umtx_op_args uap = {
4853 .op = op & ~UMTX_OP__FLAGS,
4861 return ((*
op_table[uap.op])(td, &uap, ops));
4871 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) {
4872 if ((uap->op & UMTX_OP__I386) != 0)
4875 umtx_ops = &umtx_native_opsx32;
4877#elif !defined(__i386__)
4879 if ((uap->op & UMTX_OP__I386) != 0)
4883 if ((uap->op & UMTX_OP__32BIT) != 0)
4884 umtx_ops = &umtx_native_opsx32;
4886 return (
kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4887 uap->uaddr2, umtx_ops));
4890#ifdef COMPAT_FREEBSD32
4891#ifdef COMPAT_FREEBSD10
4893freebsd10_freebsd32__umtx_lock(
struct thread *td,
4894 struct freebsd10_freebsd32__umtx_lock_args *uap)
4896 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL));
4900freebsd10_freebsd32__umtx_unlock(
struct thread *td,
4901 struct freebsd10_freebsd32__umtx_unlock_args *uap)
4903 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid));
4908freebsd32__umtx_op(
struct thread *td,
struct freebsd32__umtx_op_args *uap)
4911 return (
kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1,
4912 uap->uaddr2, &umtx_native_ops32));
4921 td->td_umtxq->uq_thread = td;
4940 uq->uq_inherited_pri = PRI_MAX;
4942 KASSERT(uq->uq_flags == 0, (
"uq_flags != 0"));
4943 KASSERT(uq->uq_thread == td, (
"uq_thread != td"));
4944 KASSERT(uq->uq_pi_blocked == NULL, (
"uq_pi_blocked != NULL"));
4945 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), (
"uq_pi_contested is not empty"));
4960 KASSERT(p == curproc, (
"need curproc"));
4961 KASSERT((p->p_flag & P_HADTHREADS) == 0 ||
4962 (p->p_flag & P_STOPPED_SINGLE) != 0,
4963 (
"curproc must be single-threaded"));
4968 FOREACH_THREAD_IN_PROC(p, td) {
4969 KASSERT(td == curthread ||
4970 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)),
4971 (
"running thread %p %p", p, td));
4973 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0;
4995 error = fueword32((
void *)ptr, &res32);
4999 error = fueword((
void *)ptr, &res1);
5012 struct umutex32 m32;
5015 memcpy(&m32, m,
sizeof(m32));
5016 *rb_list = m32.m_rb_lnk;
5018 *rb_list = m->m_rb_lnk;
5029 KASSERT(td->td_proc == curproc, (
"need current vmspace"));
5030 error = copyin((
void *)rbp, &m,
sizeof(m));
5033 if (rb_list != NULL)
5035 if ((m.m_flags & UMUTEX_ROBUST) == 0)
5037 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid)
5039 return (inact ? 0 : EINVAL);
5045 const char *
name,
bool compat32)
5054 for (i = 0; error == 0 && rbp != 0 && i <
umtx_max_rb; i++) {
5055 if (rbp == *rb_inact) {
5063 uprintf(
"comm %s pid %d: reached umtx %smax rb %d\n",
5067 uprintf(
"comm %s pid %d: handling %srb error %d\n",
5068 td->td_proc->p_comm, td->td_proc->p_pid,
name, error);
5088 if (uq->uq_inherited_pri != PRI_MAX ||
5089 !TAILQ_EMPTY(&uq->uq_pi_contested)) {
5091 uq->uq_inherited_pri = PRI_MAX;
5092 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) {
5093 pi->pi_owner = NULL;
5094 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link);
5101 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0;
5102 td->td_pflags2 &= ~TDP2_COMPAT32RB;
5104 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0)
5112 rb_inact = td->td_rb_inact;
device_property_type_t type
SYSCTL_PROC(_kern_binmisc, OID_AUTO, add, CTLFLAG_MPSAFE|CTLTYPE_STRUCT|CTLFLAG_WR, NULL, IBC_ADD, sysctl_kern_binmisc, "S,ximgact_binmisc_entry", "Add an activator entry")
static struct bt_table bt
int tvtohz(struct timeval *tv)
void finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
int falloc_caps(struct thread *td, struct file **resultfp, int *resultfd, int flags, struct filecaps *fcaps)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
SYSCTL_LONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &physmem, 0, "Amount of physical memory (in pages)")
int priv_check(struct thread *td, int priv)
struct ucred * crhold(struct ucred *cr)
void crfree(struct ucred *cr)
rlim_t() lim_cur(struct thread *td, int which)
int chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
void wakeup(const void *ident)
void wakeup_one(const void *ident)
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
int sysctl_handle_string(SYSCTL_HANDLER_ARGS)
volatile int rtc_generation
void getboottimebin(struct bintime *boottimebin)
void bintime(struct bintime *bt)
int thread_check_susp(struct thread *td, bool sleep)
struct thread * tdfind(lwpid_t tid, pid_t pid)
int kern_clock_gettime(struct thread *td, clockid_t clock_id, struct timespec *ats)
static void umtx_repropagate_priority(struct umtx_pi *pi)
struct umtx_pi * umtx_pi_lookup(struct umtx_key *key)
static void umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list, bool compat32)
static int umtx_copyin_robust_lists(const void *uaddr, size_t size, struct umtx_robust_lists_params *rb)
void umtxq_unbusy_unlocked(struct umtx_key *key)
static int __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
void umtx_pi_adjust(struct thread *td, u_char oldpri)
static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS]
void umtx_key_release(struct umtx_key *key)
static const _umtx_op_func op_table[]
static int do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
static struct umtx_shm_reg * umtx_shm_find_reg(const struct umtx_key *key)
static int do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout)
static int __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static void umtxq_signal_thread(struct umtx_q *uq)
static struct umtxq_queue * umtxq_queue_lookup(struct umtx_key *key, int q)
static int do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, struct _umtx_time *timeout, int try)
int(* _umtx_op_func)(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *umtx_ops)
#define umtxq_signal(key, nwake)
static bool umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force)
static const struct umtx_copyops umtx_native_ops
void umtx_shm_object_terminated(vm_object_t object)
static int __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val, void *uaddr1, void *uaddr2, const struct umtx_copyops *ops)
static int do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, uint32_t *old_ceiling)
#define UMTXQ_ASSERT_LOCKED_BUSY(key)
static struct umtx_shm_reg_head umtx_shm_reg_delfree
static int umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact, bool compat32)
static void umtx_abs_timeout_init2(struct umtx_abs_timeout *timo, const struct _umtx_time *umtxtime)
#define GOLDEN_RATIO_PRIME
static int __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int do_unlock_umutex(struct thread *td, struct umutex *m, bool rb)
static void umtx_pi_disown(struct umtx_pi *pi)
SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN, &umtx_shm_vnobj_persistent, 0, "False forces destruction of umtx attached to file, on last close")
void umtxq_remove_queue(struct umtx_q *uq, int q)
static void umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force)
void umtx_pi_insert(struct umtx_pi *pi)
static int umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp)
struct umtx_q * umtxq_alloc(void)
int kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private)
static bool umtx_pi_check_loop(struct umtx_pi *pi)
void umtx_pi_free(struct umtx_pi *pi)
static int umtx_copyin_robust_lists32(const void *uaddr, size_t size, struct umtx_robust_lists_params *rbp)
_Static_assert(sizeof(struct umutex)==sizeof(struct umutex32), "umutex32")
static struct mtx umtx_lock
static int __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static int umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp)
static void umtx_shm_init(void)
static void umtxq_hash(struct umtx_key *key)
int umtxq_signal_mask(struct umtx_key *key, int n_wake, u_int bitset)
static int __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
static void umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused)
static int __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static void umtx_thread_cleanup(struct thread *td)
static int __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static int umtx_shm_create_reg(struct thread *td, const struct umtx_key *key, struct umtx_shm_reg **res)
static int umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td)
struct umtx_pi * umtx_pi_alloc(int flags)
static int __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap)
void umtxq_busy(struct umtx_key *key)
static int umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp)
static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory")
static int do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, struct _umtx_time *timeout)
static int tstohz(const struct timespec *tsp)
void umtx_thread_alloc(struct thread *td)
static int do_sem2_wake(struct thread *td, struct _usem2 *sem)
static int umtx_shm_alive(struct thread *td, void *addr)
static int do_lock_umutex(struct thread *td, struct umutex *m, struct _umtx_time *timeout, int mode)
static int do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, struct _umtx_time *timeout, int mode)
void umtx_exec(struct proc *p)
SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL)
static int do_rw_unlock(struct thread *td, struct urwlock *rwlock)
static struct mtx umtx_shm_lock
static int __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg)
int umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2, int n_requeue)
static void umtxq_sysinit(void *)
static void umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner)
static int do_cv_signal(struct thread *td, struct ucond *cv)
int umtx_pi_drop(struct thread *td, struct umtx_key *key, bool rb, int *count)
static int __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap)
static struct umtx_shm_reg * umtx_shm_find_reg_locked(const struct umtx_key *key)
static int __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static int umtx_pi_allocated
static uint32_t umtx_unlock_val(uint32_t flags, bool rb)
static int __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static void umtx_shm_free_reg(struct umtx_shm_reg *reg)
static int umtxq_count_pi(struct umtx_key *key, struct umtx_q **first)
void umtxq_unbusy(struct umtx_key *key)
int umtxq_sleep(struct umtx_q *uq, const char *wmesg, struct umtx_abs_timeout *timo)
int umtx_shm_vnobj_persistent
int umtx_key_get(const void *addr, int type, int share, struct umtx_key *key)
static int __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "umtx debug")
static int umtx_abs_timeout_getsbt(struct umtx_abs_timeout *timo, sbintime_t *sbt, int *flags)
static int __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static int do_cv_broadcast(struct thread *td, struct ucond *cv)
static int umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp)
void umtxq_free(struct umtx_q *uq)
static int __umtx_op_unimpl(struct thread *td __unused, struct _umtx_op_args *uap __unused, const struct umtx_copyops *ops __unused)
static int umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32)
int umtx_copyin_timeout(const void *uaddr, struct timespec *tsp)
LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg)
void umtx_thread_init(struct thread *td)
int umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner, const char *wmesg, struct umtx_abs_timeout *timo, bool shared)
void umtx_thread_exit(struct thread *td)
static int __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static struct task umtx_shm_reg_delfree_task
static int do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout)
void umtx_pi_unref(struct umtx_pi *pi)
void umtx_thread_fini(struct thread *td)
#define UMTXQ_LOCKED_ASSERT(uc)
static uma_zone_t umtx_shm_reg_zone
static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb)
static int __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static void umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact, const char *name, bool compat32)
static int __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static uma_zone_t umtx_pi_zone
static int do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags)
static int __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
static struct umtx_pi * umtx_pi_next(struct umtx_pi *pi)
static int __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
struct umtxq_chain * umtxq_getchain(struct umtx_key *key)
static const struct umtx_copyops umtx_native_opsi386
static int do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, struct _umtx_time *timeout, int try)
static int umtx_verbose_rb
static int __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops)
static int do_wait(struct thread *td, void *addr, u_long id, struct _umtx_time *timeout, int compat32, int is_private)
int sys__umtx_op(struct thread *td, struct _umtx_op_args *uap)
void umtx_pi_ref(struct umtx_pi *pi)
static int umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp)
static int __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap, const struct umtx_copyops *ops __unused)
void umtx_shm_object_init(vm_object_t object)
static int do_wake_umutex(struct thread *td, struct umutex *m)
static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]
static int umtxq_signal_queue(struct umtx_key *key, int n_wake, int q)
static int umtx_shm(struct thread *td, void *addr, u_int flags)
static int do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m, struct timespec *timeout, u_long wflags)
static void umtx_propagate_priority(struct thread *td)
int umtx_pi_claim(struct umtx_pi *pi, struct thread *owner)
int umtxq_count(struct umtx_key *key)
void umtx_abs_timeout_init(struct umtx_abs_timeout *timo, int clockid, int absolute, const struct timespec *timeout)
void umtxq_insert_queue(struct umtx_q *uq, int q)
void sched_lend_user_prio(struct thread *td, u_char prio)
void sched_lend_user_prio_cond(struct thread *td, u_char prio)
int(* copyout_timeout)(void *uaddr, size_t size, struct timespec *tsp)
const size_t umtx_time_sz
int(* copyin_timeout)(const void *uaddr, struct timespec *tsp)
int(* copyin_robust_lists)(const void *uaddr, size_t size, struct umtx_robust_lists_params *rbp)
int(* copyin_umtx_time)(const void *uaddr, size_t size, struct _umtx_time *tp)
struct timespeci386 _timeout
int snprintf(char *str, size_t size, const char *format,...)
int uprintf(const char *fmt,...)
int sbuf_finish(struct sbuf *s)
void sbuf_delete(struct sbuf *s)
int sbuf_printf(struct sbuf *s, const char *fmt,...)
ssize_t sbuf_len(struct sbuf *s)
char * sbuf_data(struct sbuf *s)
struct sbuf * sbuf_new(struct sbuf *s, char *buf, int length, int flags)
int sbuf_trim(struct sbuf *s)
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
int32_t fuword32(volatile const void *addr)
uint32_t casuword32(volatile uint32_t *addr, uint32_t old, uint32_t new)
long fuword(volatile const void *addr)
u_long casuword(volatile u_long *addr, u_long old, u_long new)
void shm_drop(struct shmfd *shmfd)
struct shmfd * shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
int shm_dotruncate(struct shmfd *shmfd, off_t length)
int shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
struct shmfd * shm_hold(struct shmfd *shmfd)