34#include <sys/counter.h>
36#include <sys/gtaskqueue.h>
37#include <sys/kernel.h>
38#include <sys/limits.h>
40#include <sys/malloc.h>
47#include <sys/sysctl.h>
48#include <sys/turnstile.h>
50#include <machine/stdarg.h>
55#include <vm/vm_extern.h>
56#include <vm/vm_kern.h>
62#define EPOCH_ALIGN CACHE_LINE_SIZE*2
64#define EPOCH_ALIGN CACHE_LINE_SIZE
83 epoch_record_t e_pcpu_record;
87 struct mtx e_drain_mtx;
88 volatile int e_drain_count;
93#define MAX_ADAPTIVE_SPIN 100
96CTASSERT(
sizeof(ck_epoch_entry_t) ==
sizeof(
struct epoch_context));
97SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
99SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
106 &
block_count,
"# of times a thread was in an epoch when epoch_wait was called");
110 &
migrate_count,
"# of times thread was migrated to another CPU in epoch_wait");
114 &
turnstile_count,
"# of times a thread was blocked on a lock in an epoch during an epoch_wait");
118 &
switch_count,
"# of times a thread voluntarily context switched in epoch_wait");
131 ck_epoch_entry_container)
143static uma_zone_t pcpu_zone_record;
145static struct sx epoch_sx;
147#define EPOCH_LOCK() sx_xlock(&epoch_sx)
148#define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
151epoch_currecord(epoch_t epoch)
154 return (zpcpu_get(epoch->e_pcpu_record));
159 RB_ENTRY(stackentry) se_node;
160 struct stack se_stack;
164stackentry_compare(
struct stackentry *a,
struct stackentry *b)
167 if (a->se_stack.depth > b->se_stack.depth)
169 if (a->se_stack.depth < b->se_stack.depth)
171 for (
int i = 0; i < a->se_stack.depth; i++) {
172 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
174 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
181RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
182RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
184static struct mtx epoch_stacks_lock;
185MTX_SYSINIT(epochstacks, &epoch_stacks_lock,
"epoch_stacks", MTX_DEF);
187static bool epoch_trace_stack_print =
true;
188SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
189 &epoch_trace_stack_print, 0,
"Print stack traces on epoch reports");
191static void epoch_trace_report(
const char *fmt, ...) __printflike(1, 2);
193epoch_trace_report(const
char *fmt, ...)
196 struct stackentry se, *
new;
199 stack_save(&se.se_stack);
202 if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
205 new =
malloc(
sizeof(*
new), M_STACK, M_NOWAIT);
207 bcopy(&se.se_stack, &new->se_stack,
sizeof(
struct stack));
209 mtx_lock(&epoch_stacks_lock);
210 new = RB_INSERT(stacktree, &epoch_stacks,
new);
211 mtx_unlock(&epoch_stacks_lock);
219 if (epoch_trace_stack_print)
224epoch_trace_enter(
struct thread *td, epoch_t epoch, epoch_tracker_t et,
225 const char *file,
int line)
229 SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
230 if (iet->et_epoch != epoch)
232 epoch_trace_report(
"Recursively entering epoch %s "
233 "at %s:%d, previously entered at %s:%d\n",
234 epoch->e_name, file, line,
235 iet->et_file, iet->et_line);
237 et->et_epoch = epoch;
241 SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
245epoch_trace_exit(
struct thread *td, epoch_t epoch, epoch_tracker_t et,
246 const char *file,
int line)
249 if (SLIST_FIRST(&td->td_epochs) != et) {
250 epoch_trace_report(
"Exiting epoch %s in a not nested order "
251 "at %s:%d. Most recently entered %s at %s:%d\n",
254 SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
255 SLIST_FIRST(&td->td_epochs)->et_file,
256 SLIST_FIRST(&td->td_epochs)->et_line);
258 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
260 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
261 if (et->et_flags & ET_REPORT_EXIT)
262 printf(
"Td %p exiting epoch %s at %s:%d\n", td, epoch->e_name,
268epoch_trace_list(
struct thread *td)
272 SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
273 printf(
"Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
274 iet->et_file, iet->et_line);
278epoch_where_report(epoch_t epoch)
281 struct epoch_tracker *tdwait;
283 MPASS(epoch != NULL);
284 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
285 MPASS(!THREAD_CAN_SLEEP());
287 er = epoch_currecord(epoch);
288 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
289 if (tdwait->et_td == curthread)
292 if (tdwait != NULL) {
293 tdwait->et_flags |= ET_REPORT_EXIT;
294 printf(
"Td %p entered epoch %s at %s:%d\n", curthread,
295 epoch->e_name, tdwait->et_file, tdwait->et_line);
312 pcpu_zone_record = uma_zcreate(
"epoch_record pcpu",
314 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
316 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
319 DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
323 SLIST_INIT(&thread0.td_epochs);
325 sx_init(&epoch_sx,
"epoch-sx");
328 global_epoch_preempt =
epoch_alloc(
"Global preemptible", EPOCH_PREEMPT);
332#if !defined(EARLY_AP_STARTUP)
347 epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
349 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
350 bzero(er,
sizeof(*er));
351 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
352 TAILQ_INIT((
struct threadlist *)(uintptr_t)&er->er_tdlist);
354 er->er_parent = epoch;
375 if (__predict_false(!inited))
376 panic(
"%s called too early in boot", __func__);
393 if (epoch_array[i].e_in_use == 0)
397 epoch = epoch_array + i;
398 ck_epoch_init(&epoch->e_epoch);
400 epoch->e_flags =
flags;
401 epoch->e_name =
name;
402 sx_init(&epoch->e_drain_sx,
"epoch-drain-sx");
403 mtx_init(&epoch->e_drain_mtx,
"epoch-drain-mtx", NULL, MTX_DEF);
410 atomic_store_rel_int(&epoch->e_in_use, 1);
425 MPASS(epoch->e_in_use != 0);
429 atomic_store_rel_int(&epoch->e_in_use, 0);
439 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
446 MPASS(er->er_td == NULL);
447 MPASS(TAILQ_EMPTY(&er->er_tdlist));
450 uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
451 mtx_destroy(&epoch->e_drain_mtx);
453 memset(epoch, 0,
sizeof(*epoch));
458#define INIT_CHECK(epoch) \
460 if (__predict_false((epoch) == NULL)) \
470 MPASS(cold || epoch != NULL);
472 MPASS((vm_offset_t)et >= td->td_kstack &&
473 (vm_offset_t)et +
sizeof(
struct epoch_tracker) <=
474 td->td_kstack + td->td_kstack_pages * PAGE_SIZE);
477 MPASS(epoch->e_flags & EPOCH_PREEMPT);
480 epoch_trace_enter(td, epoch, et, file, line);
483 THREAD_NO_SLEEPING();
486 et->et_old_priority = td->td_priority;
487 er = epoch_currecord(epoch);
489 MPASS(er->er_td == NULL);
490 TAILQ_INSERT_TAIL(&er->
er_tdlist, et, et_link);
491 ck_epoch_begin(&er->
er_record, &et->et_section);
500 MPASS(cold || epoch != NULL);
503 er = epoch_currecord(epoch);
505 if (er->er_record.active == 0) {
506 MPASS(er->er_td == NULL);
507 er->er_td = curthread;
510 MPASS(er->er_td == curthread);
513 ck_epoch_begin(&er->er_record, NULL);
526 THREAD_SLEEPING_OK();
527 er = epoch_currecord(epoch);
528 MPASS(epoch->e_flags & EPOCH_PREEMPT);
530 MPASS(et->et_td == td);
532 et->et_td = (
void*)0xDEADBEEF;
534 MPASS(er->er_td == NULL);
536 ck_epoch_end(&er->
er_record, &et->et_section);
537 TAILQ_REMOVE(&er->
er_tdlist, et, et_link);
539 if (__predict_false(et->et_old_priority != td->td_priority))
543 epoch_trace_exit(td, epoch, et, file, line);
553 er = epoch_currecord(epoch);
554 ck_epoch_end(&er->er_record, NULL);
556 MPASS(er->er_td == curthread);
557 if (er->er_record.active == 0)
569 ck_epoch_record_t *cr,
void *arg __unused)
571 epoch_record_t record;
572 struct thread *td, *owner, *curwaittd;
573 struct epoch_tracker *tdwait;
575 struct lock_object *lock;
577 int locksheld __unused;
581 locksheld = td->td_locks;
588 if (TAILQ_EMPTY(&record->er_tdlist))
591 if (record->er_cpuid != curcpu) {
597 gen = record->er_gen;
606 }
while (!TAILQ_EMPTY(&record->er_tdlist) &&
607 gen == record->er_gen &&
614 if (gen != record->er_gen)
642 TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
648 curwaittd = tdwait->et_td;
649 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
652 thread_lock(curwaittd);
654 thread_unlock(curwaittd);
658 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
659 ((
ts = curwaittd->td_blocked) != NULL)) {
671 if (
ts == curwaittd->td_blocked) {
672 MPASS(TD_IS_INHIBITED(curwaittd) &&
673 TD_ON_LOCK(curwaittd));
676 curwaittd->td_tsqueue);
685 KASSERT(td->td_locks == locksheld,
686 (
"%d extra locks held", td->td_locks - locksheld));
715 MPASS(cold || epoch != NULL);
719 locks = curthread->td_locks;
720 MPASS(epoch->e_flags & EPOCH_PREEMPT);
721 if ((epoch->e_flags & EPOCH_LOCKED) == 0)
722 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
723 "epoch_wait() can be long running");
724 KASSERT(!
in_epoch(epoch), (
"epoch_wait_preempt() called in the middle "
725 "of an epoch section of the same epoch"));
730 old_cpu = PCPU_GET(cpuid);
731 old_pinned = td->td_pinned;
732 old_prio = td->td_priority;
742 if (was_bound != 0) {
751 td->td_pinned = old_pinned;
757 KASSERT(td->td_locks == locks,
758 (
"%d residual locks held", td->td_locks - locks));
772 MPASS(cold || epoch != NULL);
774 MPASS(epoch->e_flags == 0);
784 ck_epoch_entry_t *cb;
790 if (__predict_false(epoch == NULL))
792#if !defined(EARLY_AP_STARTUP)
793 if (__predict_false(inited < 2))
798 *DPCPU_PTR(epoch_cb_count) += 1;
799 er = epoch_currecord(epoch);
800 ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)
callback);
810 ck_stack_entry_t *cursor, *head, *next;
811 ck_epoch_record_t *record;
815 int i, npending, total;
817 ck_stack_init(&cb_stack);
821 epoch = epoch_array + i;
823 atomic_load_acq_int(&epoch->e_in_use) == 0))
825 er = epoch_currecord(epoch);
826 record = &er->er_record;
827 if ((npending = record->n_pending) == 0)
829 ck_epoch_poll_deferred(record, &cb_stack);
830 total += npending - record->n_pending;
833 *DPCPU_PTR(epoch_cb_count) -= total;
839 head = ck_stack_batch_pop_npsc(&cb_stack);
840 for (cursor = head; cursor != NULL; cursor = next) {
841 struct ck_epoch_entry *entry =
842 ck_epoch_entry_container(cursor);
844 next = CK_STACK_NEXT(cursor);
845 entry->function(entry);
853 struct epoch_tracker *tdwait;
856 MPASS(epoch != NULL);
857 MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
859 if (THREAD_CAN_SLEEP())
862 er = epoch_currecord(epoch);
863 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
864 if (tdwait->et_td == td) {
870 MPASS(td->td_pinned);
871 printf(
"cpu: %d id: %d\n", curcpu, td->td_tid);
872 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
873 printf(
"td_tid: %d ", tdwait->et_td->td_tid);
889 crit = td->td_critnest > 0;
893 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
894 KASSERT(er->er_td != td,
895 (
"%s critical section in epoch '%s', from cpu %d",
896 (crit ?
"exited" :
"re-entered"), epoch->e_name, cpu));
900#define epoch_assert_nocpu(e, td) do {} while (0)
909 if (__predict_false((epoch) == NULL))
911 if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
920 if (td->td_critnest == 0) {
931 er = epoch_currecord(epoch);
932 if (er->er_record.active == 0) {
937 MPASS(er->er_td == td);
950 struct epoch *epoch =
953 if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
954 mtx_lock(&epoch->e_drain_mtx);
956 mtx_unlock(&epoch->e_drain_mtx);
970 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
971 "epoch_drain_callbacks() may sleep!");
974 if (__predict_false(epoch == NULL))
976#if !defined(EARLY_AP_STARTUP)
977 if (__predict_false(inited < 2))
982 sx_xlock(&epoch->e_drain_sx);
983 mtx_lock(&epoch->e_drain_mtx);
987 old_cpu = PCPU_GET(cpuid);
988 old_pinned = td->td_pinned;
994 epoch->e_drain_count++;
996 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
1002 if (was_bound != 0) {
1006 if (old_pinned != 0)
1011 td->td_pinned = old_pinned;
1015 while (epoch->e_drain_count != 0)
1016 msleep(epoch, &epoch->e_drain_mtx, PZERO,
"EDRAIN", 0);
1018 mtx_unlock(&epoch->e_drain_mtx);
1019 sx_xunlock(&epoch->e_drain_sx);
SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx, CTLFLAG_RWTUN, &__elfN(allow_wx), 0, "Allow pages to be mapped simultaneously writable and executable")
DPCPU_DEFINE(sbintime_t, hardclocktime)
MTX_SYSINIT(et_eventtimers_init, &et_eventtimers_mtx, "et_mtx", MTX_DEF)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
void panic(const char *fmt,...)
void sx_destroy(struct sx *sx)
void mi_switch(int flags)
void wakeup(const void *ident)
linker_function_name_callback_t callback
void sched_bind(struct thread *td, int cpu)
void sched_unbind(struct thread *td)
void sched_prio(struct thread *td, u_char prio)
int sched_is_bound(struct thread *td)
ck_epoch_record_t er_record
volatile struct epoch_tdlist er_tdlist
struct epoch_context er_drain_ctx
static bool kasan_enabled __read_mostly
counter_u64_t counter_u64_alloc(int flags)
ck_epoch_record_t er_record
void epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
struct epoch_record __aligned(EPOCH_ALIGN)
static void epoch_drain_cb(struct epoch_context *ctx)
static void epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, void *arg __unused)
void epoch_free(epoch_t epoch)
SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, &block_count, "# of times a thread was in an epoch when epoch_wait was called")
static void epoch_init_smp(void *dummy __unused)
epoch_t epoch_alloc(const char *name, int flags)
CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, ck_epoch_entry_container)
void epoch_wait_preempt(epoch_t epoch)
static counter_u64_t epoch_call_count
void epoch_wait(epoch_t epoch)
static counter_u64_t switch_count
#define INIT_CHECK(epoch)
static void epoch_adjust_prio(struct thread *td, u_char prio)
struct epoch_context er_drain_ctx
#define MAX_ADAPTIVE_SPIN
static void epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr, void *arg __unused)
SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "epoch information")
static void epoch_init(void *arg __unused)
void epoch_drain_callbacks(epoch_t epoch)
static counter_u64_t turnstile_count
static void epoch_call_task(void *arg __unused)
void epoch_enter(epoch_t epoch)
static void epoch_ctor(epoch_t epoch)
void _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
void _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
int in_epoch_verbose(epoch_t epoch, int dump_onfail)
void epoch_exit(epoch_t epoch)
#define epoch_assert_nocpu(e, td)
int in_epoch(epoch_t epoch)
static counter_u64_t block_count
static int in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
TAILQ_HEAD(epoch_tdlist, epoch_tracker)
static counter_u64_t migrate_count
SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL)
static counter_u64_t epoch_call_task_count
CTASSERT(sizeof(ck_epoch_entry_t)==sizeof(struct epoch_context))
int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask, void *uniq, int cpu, device_t dev, struct resource *irq, const char *name)
int printf(const char *fmt,...)
int vprintf(const char *fmt, va_list ap)
void stack_zero(struct stack *st)
void stack_print_ddb(const struct stack *st)
bool turnstile_lock(struct turnstile *ts, struct lock_object **lockp, struct thread **tdp)
void turnstile_unlock(struct turnstile *ts, struct lock_object *lock)
void turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)