33#include "opt_hwpmc_hooks.h"
34#include "opt_kstack_usage_prof.h"
39#include <sys/cpuset.h>
40#include <sys/rtprio.h>
42#include <sys/interrupt.h>
43#include <sys/kernel.h>
44#include <sys/kthread.h>
46#include <sys/limits.h>
48#include <sys/malloc.h>
53#include <sys/random.h>
54#include <sys/resourcevar.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <sys/unistd.h>
60#include <sys/vmmeter.h>
61#include <machine/atomic.h>
62#include <machine/cpu.h>
63#include <machine/md_var.h>
64#include <machine/smp.h>
65#include <machine/stdarg.h>
68#include <ddb/db_sym.h>
83#define IT_DEAD 0x000001
84#define IT_WAIT 0x000002
100 "Number of consecutive interrupts before storm protection is enabled");
103 0,
"Maximum interrupt handler executions without re-entering epoch(9)");
105static int intr_hwpmc_waiting_report_threshold = 1;
106SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN,
107 &intr_hwpmc_waiting_report_threshold, 1,
108 "Threshold for reporting number of events in a workq");
109#define PMC_HOOK_INSTALLED_ANY() __predict_false(pmc_hook != NULL)
112 TAILQ_HEAD_INITIALIZER(event_list);
113static struct mtx event_lock;
114MTX_SYSINIT(intr_event_list, &event_lock,
"intr event list", MTX_DEF);
121 struct intr_event *ie);
127#include <sys/pmckern.h>
128PMC_SOFT_DEFINE( , , intr, all);
129PMC_SOFT_DEFINE( , , intr, ithread);
130PMC_SOFT_DEFINE( , , intr, filter);
131PMC_SOFT_DEFINE( , , intr, stray);
132PMC_SOFT_DEFINE( , , intr, schedule);
133PMC_SOFT_DEFINE( , , intr, waiting);
135#define PMC_SOFT_CALL_INTR_HLPR(event, frame) \
138 PMC_SOFT_CALL_TF( , , intr, event, frame); \
140 PMC_SOFT_CALL( , , intr, event); \
146intr_priority(
enum intr_type
flags)
150 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
151 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
176 panic(
"intr_priority: no interrupt type in flags");
188 struct intr_event *ie;
194 mtx_assert(&ie->ie_lock, MA_OWNED);
197 if (CK_SLIST_EMPTY(&ie->ie_handlers))
200 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
203 strlcpy(td->td_name, ie->ie_fullname,
sizeof(td->td_name));
205 sched_clear_tdname(td);
218 struct intr_handler *ih;
220 int missed, space,
flags;
223 mtx_assert(&ie->ie_lock, MA_OWNED);
224 strlcpy(ie->ie_fullname, ie->ie_name,
sizeof(ie->ie_fullname));
230 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
231 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
232 sizeof(ie->ie_fullname)) {
233 strcat(ie->ie_fullname,
" ");
234 strcat(ie->ie_fullname, ih->ih_name);
238 flags |= ih->ih_flags;
240 ie->ie_hflags =
flags;
249 if (missed == 1 && space == 1) {
250 ih = CK_SLIST_FIRST(&ie->ie_handlers);
251 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
252 sizeof(ie->ie_fullname);
253 strcat(ie->ie_fullname, (missed == 0) ?
" " :
"-");
254 strcat(ie->ie_fullname, &ih->ih_name[missed]);
257 last = &ie->ie_fullname[
sizeof(ie->ie_fullname) - 2];
258 while (missed-- > 0) {
259 if (strlen(ie->ie_fullname) + 1 ==
sizeof(ie->ie_fullname)) {
266 strcat(ie->ie_fullname,
" +");
269 strcat(ie->ie_fullname,
"+");
276 if (ie->ie_thread != NULL)
278 CTR2(KTR_INTR,
"%s: updated %s", __func__, ie->ie_fullname);
284 void (*
post_filter)(
void *),
int (*assign_cpu)(
void *,
int),
285 const char *fmt, ...)
287 struct intr_event *ie;
291 if ((
flags & ~IE_SOFT) != 0)
293 ie =
malloc(
sizeof(
struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
294 ie->ie_source = source;
298 ie->ie_assign_cpu = assign_cpu;
299 ie->ie_flags =
flags;
302 CK_SLIST_INIT(&ie->ie_handlers);
303 mtx_init(&ie->ie_lock,
"intr event", NULL, MTX_DEF);
306 vsnprintf(ie->ie_name,
sizeof(ie->ie_name), fmt, ap);
308 strlcpy(ie->ie_fullname, ie->ie_name,
sizeof(ie->ie_fullname));
309 mtx_lock(&event_lock);
310 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
311 mtx_unlock(&event_lock);
314 CTR2(KTR_INTR,
"%s: created %s", __func__, ie->ie_name);
331 if (cpu != NOCPU && CPU_ABSENT(cpu))
334 if (ie->ie_assign_cpu == NULL)
337 error =
priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
346 mtx_lock(&ie->ie_lock);
347 if (ie->ie_thread != NULL) {
348 id = ie->ie_thread->it_thread->td_tid;
349 mtx_unlock(&ie->ie_lock);
354 mtx_unlock(&ie->ie_lock);
357 error = ie->ie_assign_cpu(ie->ie_source, cpu);
360 mtx_lock(&ie->ie_lock);
361 if (ie->ie_thread != NULL) {
363 id = ie->ie_thread->it_thread->td_tid;
364 mtx_unlock(&ie->ie_lock);
367 mtx_unlock(&ie->ie_lock);
373 mtx_lock(&ie->ie_lock);
375 mtx_unlock(&ie->ie_lock);
422 mtx_lock(&ie->ie_lock);
423 if (ie->ie_thread != NULL) {
424 id = ie->ie_thread->it_thread->td_tid;
425 mtx_unlock(&ie->ie_lock);
428 mtx_unlock(&ie->ie_lock);
433static struct intr_event *
436 struct intr_event *ie;
438 mtx_lock(&event_lock);
439 TAILQ_FOREACH(ie, &event_list, ie_list)
440 if (ie->ie_irq == irq &&
441 (ie->ie_flags & IE_SOFT) == 0 &&
442 CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
444 mtx_unlock(&event_lock);
451 struct intr_event *ie;
462 for (n = 0; n < CPU_SETSIZE; n++) {
463 if (!CPU_ISSET(n,
mask))
476 case CPU_WHICH_INTRHANDLER:
478 case CPU_WHICH_ITHREAD:
488 struct intr_event *ie;
504 case CPU_WHICH_INTRHANDLER:
505 mtx_lock(&ie->ie_lock);
506 if (ie->ie_cpu == NOCPU)
509 CPU_SET(ie->ie_cpu,
mask);
510 mtx_unlock(&ie->ie_lock);
512 case CPU_WHICH_ITHREAD:
513 mtx_lock(&ie->ie_lock);
514 if (ie->ie_thread == NULL) {
515 mtx_unlock(&ie->ie_lock);
518 id = ie->ie_thread->it_thread->td_tid;
519 mtx_unlock(&ie->ie_lock);
523 CPU_COPY(&td->td_cpuset->cs_mask,
mask);
536 mtx_lock(&event_lock);
537 mtx_lock(&ie->ie_lock);
538 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
539 mtx_unlock(&ie->ie_lock);
540 mtx_unlock(&event_lock);
543 TAILQ_REMOVE(&event_list, ie, ie_list);
545 if (ie->ie_thread != NULL) {
547 ie->ie_thread = NULL;
550 mtx_unlock(&ie->ie_lock);
551 mtx_unlock(&event_lock);
552 mtx_destroy(&ie->ie_lock);
567 &td, RFSTOPPED | RFHIGHPID,
568 0,
"intr",
"%s",
name);
570 panic(
"kproc_create() failed with %d", error);
575 td->td_pflags |= TDP_ITHREAD;
577 CTR2(KTR_INTR,
"%s: created %s", __func__,
name);
586 CTR2(KTR_INTR,
"%s: killing %s", __func__, ithread->
it_event->ie_name);
590 if (TD_AWAITING_INTR(td)) {
599 driver_filter_t filter, driver_intr_t handler,
void *arg, u_char pri,
600 enum intr_type
flags,
void **cookiep)
602 struct intr_handler *ih, *temp_ih;
603 struct intr_handler **prevptr;
606 if (ie == NULL ||
name == NULL || (handler == NULL && filter == NULL))
610 ih =
malloc(
sizeof(
struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
611 ih->ih_filter = filter;
612 ih->ih_handler = handler;
613 ih->ih_argument = arg;
614 strlcpy(ih->ih_name,
name,
sizeof(ih->ih_name));
617 if (
flags & INTR_EXCL)
618 ih->ih_flags = IH_EXCLUSIVE;
619 if (
flags & INTR_MPSAFE)
620 ih->ih_flags |= IH_MPSAFE;
621 if (
flags & INTR_ENTROPY)
622 ih->ih_flags |= IH_ENTROPY;
623 if (
flags & INTR_TYPE_NET)
624 ih->ih_flags |= IH_NET;
627 mtx_lock(&ie->ie_lock);
628 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
629 if ((
flags & INTR_EXCL) ||
630 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
631 mtx_unlock(&ie->ie_lock);
638 while (ie->ie_thread == NULL && handler != NULL) {
639 if (ie->ie_flags & IE_ADDING_THREAD)
640 msleep(ie, &ie->ie_lock, 0,
"ithread", 0);
642 ie->ie_flags |= IE_ADDING_THREAD;
643 mtx_unlock(&ie->ie_lock);
645 mtx_lock(&ie->ie_lock);
646 ie->ie_flags &= ~IE_ADDING_THREAD;
655 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
656 if (temp_ih->ih_pri > ih->ih_pri)
659 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
663 CTR3(KTR_INTR,
"%s: added %s to %s", __func__, ih->ih_name,
665 mtx_unlock(&ie->ie_lock);
680 struct intr_handler *ih;
684 mtx_lock(&ie->ie_lock);
686 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
691 mtx_unlock(&ie->ie_lock);
692 panic(
"handler %p not found in interrupt event %p", cookie, ie);
704 start = strchr(ih->ih_name,
':');
706 start = strchr(ih->ih_name, 0);
713 space =
sizeof(ih->ih_name) - (
start - ih->ih_name) - 1;
714 if (strlen(descr) + 1 > space) {
715 mtx_unlock(&ie->ie_lock);
721 strcpy(
start + 1, descr);
723 mtx_unlock(&ie->ie_lock);
734 struct intr_handler *ih;
735 struct intr_event *ie;
737 ih = (
struct intr_handler *)cookie;
742 (
"interrupt handler \"%s\" has a NULL interrupt event",
744 return (ie->ie_source);
756 mtx_assert(&ie->ie_lock, MA_OWNED);
757 phase = ie->ie_phase;
763 KASSERT(ie->ie_active[!
phase] == 0, (
"idle phase has activity"));
764 atomic_store_rel_int(&ie->ie_phase, !
phase);
774 atomic_thread_fence_seq_cst();
781 while (ie->ie_active[
phase] > 0)
783 atomic_thread_fence_acq();
789 struct intr_event *ie;
791 ie = handler->ih_event;
792 mtx_assert(&ie->ie_lock, MA_OWNED);
793 KASSERT((handler->ih_flags & IH_DEAD) == 0,
794 (
"update for a removed handler"));
796 if (ie->ie_thread == NULL) {
800 if ((handler->ih_flags & IH_CHANGED) == 0) {
801 handler->ih_flags |= IH_CHANGED;
804 while ((handler->ih_flags & IH_CHANGED) != 0)
805 msleep(handler, &ie->ie_lock, 0,
"ih_barr", 0);
818 struct intr_event *ie;
825 if (ie->ie_thread == NULL)
827 ithd = ie->ie_thread;
835 if (!TD_AWAITING_INTR(td)) {
850 struct intr_handler *handler = (
struct intr_handler *)cookie;
851 struct intr_event *ie;
852 struct intr_handler *ih;
853 struct intr_handler **prevptr;
860 ie = handler->ih_event;
862 (
"interrupt handler \"%s\" has a NULL interrupt event",
865 mtx_lock(&ie->ie_lock);
866 CTR3(KTR_INTR,
"%s: removing %s from %s", __func__, handler->ih_name,
868 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
873 panic(
"interrupt handler \"%s\" not found in "
874 "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
883 if (ie->ie_thread == NULL) {
884 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
887 mtx_unlock(&ie->ie_lock);
888 free(handler, M_ITHREAD);
898 KASSERT((handler->ih_flags & IH_DEAD) == 0,
899 (
"duplicate handle remove"));
900 handler->ih_flags |= IH_DEAD;
902 while (handler->ih_flags & IH_DEAD)
903 msleep(handler, &ie->ie_lock, 0,
"iev_rmh", 0);
913 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
914 if (ih->ih_handler != NULL) {
921 ie->ie_thread = NULL;
924 mtx_unlock(&ie->ie_lock);
925 free(handler, M_ITHREAD);
932 struct intr_handler *handler = (
struct intr_handler *)cookie;
933 struct intr_event *ie;
937 ie = handler->ih_event;
939 (
"interrupt handler \"%s\" has a NULL interrupt event",
941 mtx_lock(&ie->ie_lock);
942 handler->ih_flags |= IH_SUSP;
944 mtx_unlock(&ie->ie_lock);
951 struct intr_handler *handler = (
struct intr_handler *)cookie;
952 struct intr_event *ie;
956 ie = handler->ih_event;
958 (
"interrupt handler \"%s\" has a NULL interrupt event",
965 mtx_lock(&ie->ie_lock);
966 handler->ih_flags &= ~IH_SUSP;
968 mtx_unlock(&ie->ie_lock);
983 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
984 ie->ie_thread == NULL)
995 if (ie->ie_hflags & IH_ENTROPY) {
996 entropy.
event = (uintptr_t)ie;
998 random_harvest_queue(&entropy,
sizeof(entropy), RANDOM_INTERRUPT);
1001 KASSERT(td->td_proc != NULL, (
"ithread %s has no process", ie->ie_name));
1012 atomic_store_rel_int(&it->
it_need, 1);
1014 if (TD_AWAITING_INTR(td)) {
1017 if (PMC_HOOK_INSTALLED_ANY())
1018 PMC_SOFT_CALL_INTR_HLPR(schedule, frame);
1020 CTR3(KTR_INTR,
"%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1027 if (PMC_HOOK_INSTALLED_ANY() &&
1028 (it->
it_waiting >= intr_hwpmc_waiting_report_threshold))
1029 PMC_SOFT_CALL_INTR_HLPR(waiting, frame);
1031 CTR5(KTR_INTR,
"%s: pid %d (%s): it_need %d, state %d",
1032 __func__, td->td_proc->p_pid, td->td_name, it->
it_need, TD_GET_STATE(td));
1056swi_add(
struct intr_event **eventp,
const char *
name, driver_intr_t handler,
1057 void *arg,
int pri,
enum intr_type
flags,
void **cookiep)
1059 struct intr_event *ie;
1062 if (
flags & INTR_ENTROPY)
1065 ie = (eventp != NULL) ? *eventp : NULL;
1068 if (!(ie->ie_flags & IE_SOFT))
1078 if (handler != NULL) {
1080 PI_SWI(pri),
flags, cookiep);
1091 struct intr_handler *ih = (
struct intr_handler *)cookie;
1092 struct intr_event *ie = ih->ih_event;
1096 CTR3(KTR_INTR,
"swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1099 if ((
flags & SWI_FROMNMI) == 0) {
1100 entropy.
event = (uintptr_t)ih;
1101 entropy.
td = curthread;
1102 random_harvest_queue(&entropy,
sizeof(entropy), RANDOM_SWI);
1112 if (
flags & SWI_DELAY)
1115 if (
flags & SWI_FROMNMI) {
1116#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
1118 (
"SWI_FROMNMI used not with clk_intr_event"));
1119 ipi_self_from_nmi(IPI_SWI);
1124 KASSERT(error == 0, (
"stray software interrupt"));
1144 struct intr_handler *ih, *ihn, *ihp;
1147 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1152 if (ih->ih_flags & IH_DEAD) {
1153 mtx_lock(&ie->ie_lock);
1155 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1157 CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1158 ih->ih_flags &= ~IH_DEAD;
1160 mtx_unlock(&ie->ie_lock);
1170 if ((ih->ih_flags & IH_CHANGED) != 0) {
1171 mtx_lock(&ie->ie_lock);
1172 ih->ih_flags &= ~IH_CHANGED;
1174 mtx_unlock(&ie->ie_lock);
1178 if (ih->ih_handler == NULL)
1182 if ((ih->ih_flags & IH_SUSP) != 0)
1194 if ((ie->ie_flags & IE_SOFT) != 0 &&
1195 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1199 CTR6(KTR_INTR,
"%s: pid %d exec %p(%p) for %s flg=%x",
1200 __func__, p->p_pid, (
void *)ih->ih_handler,
1201 ih->ih_argument, ih->ih_name, ih->ih_flags);
1203 if (!(ih->ih_flags & IH_MPSAFE))
1205 ih->ih_handler(ih->ih_argument);
1206 if (!(ih->ih_flags & IH_MPSAFE))
1216 if (!(ie->ie_flags & IE_SOFT))
1217 THREAD_NO_SLEEPING();
1219 if (!(ie->ie_flags & IE_SOFT))
1220 THREAD_SLEEPING_OK();
1233 !(ie->ie_flags & IE_SOFT)) {
1235 if (
ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1237 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1248 if (ie->ie_post_ithread != NULL)
1249 ie->ie_post_ithread(ie->ie_source);
1258 struct epoch_tracker et;
1260 struct intr_event *ie;
1263 int wake, epoch_count;
1270 (
"%s: ithread and proc linkage out of sync", __func__));
1284 CTR3(KTR_INTR,
"%s: pid %d (%s) exiting", __func__,
1285 p->p_pid, td->td_name);
1286 free(ithd, M_ITHREAD);
1300 (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0;
1303 NET_EPOCH_ENTER(et);
1305 while (atomic_cmpset_acq_int(&ithd->
it_need, 1, 0) != 0) {
1311 NET_EPOCH_ENTER(et);
1316 WITNESS_WARN(WARN_PANIC, NULL,
"suspending ithread");
1317 mtx_assert(&
Giant, MA_NOTOWNED);
1325 if (atomic_load_acq_int(&ithd->
it_need) == 0 &&
1358 struct intr_handler *ih;
1359 struct trapframe *oldframe;
1363 bool filter, thread;
1367#ifdef KSTACK_USAGE_PROF
1368 intr_prof_stack_use(td, frame);
1372 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1381 td->td_intr_nesting_level++;
1386 oldframe = td->td_intr_frame;
1387 td->td_intr_frame = frame;
1389 phase = ie->ie_phase;
1390 atomic_add_int(&ie->ie_active[
phase], 1);
1396 atomic_thread_fence_seq_cst();
1398 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1399 if ((ih->ih_flags & IH_SUSP) != 0)
1401 if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0)
1403 if (ih->ih_filter == NULL) {
1407 CTR4(KTR_INTR,
"%s: exec %p(%p) for %s", __func__,
1408 ih->ih_filter, ih->ih_argument == NULL ? frame :
1409 ih->ih_argument, ih->ih_name);
1410 if (ih->ih_argument == NULL)
1411 ret = ih->ih_filter(frame);
1413 ret = ih->ih_filter(ih->ih_argument);
1415 PMC_SOFT_CALL_TF( , , intr, all, frame);
1417 KASSERT(ret == FILTER_STRAY ||
1418 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1419 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1420 (
"%s: incorrect return value %#x from %s", __func__, ret,
1422 filter = filter || ret == FILTER_HANDLED;
1424 if (ret & FILTER_SCHEDULE_THREAD)
1425 PMC_SOFT_CALL_TF( , , intr, ithread, frame);
1426 else if (ret & FILTER_HANDLED)
1427 PMC_SOFT_CALL_TF( , , intr, filter, frame);
1428 else if (ret == FILTER_STRAY)
1429 PMC_SOFT_CALL_TF( , , intr, stray, frame);
1447 if (ret == FILTER_SCHEDULE_THREAD)
1451 atomic_add_rel_int(&ie->ie_active[
phase], -1);
1453 td->td_intr_frame = oldframe;
1456 if (ie->ie_pre_ithread != NULL)
1457 ie->ie_pre_ithread(ie->ie_source);
1459 if (ie->ie_post_filter != NULL)
1460 ie->ie_post_filter(ie->ie_source);
1468 KASSERT(error == 0, (
"bad stray interrupt"));
1471 td->td_intr_nesting_level--;
1474 if (!thread && !filter)
1485db_dump_intrhand(
struct intr_handler *ih)
1489 db_printf(
"\t%-10s ", ih->ih_name);
1490 switch (ih->ih_pri) {
1510 if (ih->ih_pri >= PI_SOFT)
1513 db_printf(
"%4u", ih->ih_pri);
1517 if (ih->ih_filter != NULL) {
1519 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1521 if (ih->ih_handler != NULL) {
1522 if (ih->ih_filter != NULL)
1525 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1527 db_printf(
"(%p)", ih->ih_argument);
1529 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1533 if (ih->ih_flags & IH_EXCLUSIVE) {
1539 if (ih->ih_flags & IH_ENTROPY) {
1542 db_printf(
"ENTROPY");
1545 if (ih->ih_flags & IH_DEAD) {
1551 if (ih->ih_flags & IH_MPSAFE) {
1554 db_printf(
"MPSAFE");
1571db_dump_intr_event(
struct intr_event *ie,
int handlers)
1573 struct intr_handler *ih;
1577 db_printf(
"%s ", ie->ie_fullname);
1580 db_printf(
"(pid %d)", it->
it_thread->td_proc->p_pid);
1582 db_printf(
"(no thread)");
1583 if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 ||
1584 (it != NULL && it->
it_need)) {
1587 if (ie->ie_flags & IE_SOFT) {
1591 if (ie->ie_flags & IE_ADDING_THREAD) {
1594 db_printf(
"ADDING_THREAD");
1597 if (it != NULL && it->
it_need) {
1607 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1608 db_dump_intrhand(ih);
1614DB_SHOW_COMMAND(intr, db_show_intr)
1616 struct intr_event *ie;
1619 verbose = strchr(modif,
'v') != NULL;
1620 all = strchr(modif,
'a') != NULL;
1621 TAILQ_FOREACH(ie, &event_list, ie_list) {
1622 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1624 db_dump_intr_event(ie, verbose);
1640 panic(
"died while creating clk swi ithread");
1661 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1669 uint32_t *intrcnt32;
1673 if (req->flags & SCTL_MASK32) {
1676 intrcnt32 =
malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1677 if (intrcnt32 == NULL)
1679 for (i = 0; i < sintrcnt /
sizeof (u_long); i++)
1680 intrcnt32[i] = intrcnt[i];
1682 free(intrcnt32, M_TEMP);
1690 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1692 "Interrupt Counts");
1698DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1706 for (i = intrcnt; j < (sintrcnt /
sizeof(u_long)) && !db_pager_quit;
1711 db_printf(
"%s\t%lu\n", cp, *i);
1712 cp += strlen(cp) + 1;
int cpuset_setithread(lwpid_t id, int cpu)
int cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, struct cpuset **setp)
int cpuset_setthread(lwpid_t id, cpuset_t *mask)
MTX_SYSINIT(et_eventtimers_init, &et_eventtimers_mtx, "et_mtx", MTX_DEF)
static struct intr_event * intr_lookup(int irq)
void _intr_drain(int irq)
int intr_event_resume_handler(void *cookie)
int intr_event_handle(struct intr_event *ie, struct trapframe *frame)
static int sysctl_intrnames(SYSCTL_HANDLER_ARGS)
int intr_event_describe_handler(struct intr_event *ie, void *cookie, const char *descr)
static TAILQ_HEAD(intr_event)
int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep)
int intr_event_create(struct intr_event **event, void *source, int flags, int irq, void(*pre_ithread)(void *), void(*post_ithread)(void *), void(*post_filter)(void *), int(*assign_cpu)(void *, int), const char *fmt,...)
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
int swi_remove(void *cookie)
static int _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
int intr_event_bind_ithread(struct intr_event *ie, int cpu)
static void ithread_loop(void *arg)
static void intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
static void start_softintr(void *dummy)
static int intr_epoch_batch
static int swi_assign_cpu(void *arg, int cpu)
static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
static int intr_storm_threshold
static void intr_handler_barrier(struct intr_handler *handler)
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, &intr_storm_threshold, 0, "Number of consecutive interrupts before storm protection is enabled")
static void intr_event_update(struct intr_event *ie)
int intr_getaffinity(int irq, int mode, void *m)
static void intr_event_barrier(struct intr_event *ie)
struct intr_event * tty_intr_event
static void ithread_destroy(struct intr_thread *ithread)
int intr_event_bind(struct intr_event *ie, int cpu)
static void ithread_execute_handlers(struct proc *p, struct intr_event *ie)
int intr_event_destroy(struct intr_event *ie)
int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep)
int intr_event_remove_handler(void *cookie)
void swi_sched(void *cookie, int flags)
struct intr_event * clk_intr_event
static struct intr_thread * ithread_create(const char *name)
int intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
int intr_event_bind_irqonly(struct intr_event *ie, int cpu)
SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, 0, sysctl_intrnames, "", "Interrupt Names")
void * intr_handler_source(void *cookie)
int intr_setaffinity(int irq, int mode, void *m)
int intr_event_suspend_handler(void *cookie)
static int sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
static void ithread_update(struct intr_thread *ithd)
static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads")
int kproc_kthread_add(void(*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char *procname, const char *fmt,...)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
struct mtx __exclusive_cache_line Giant
int priv_check(struct thread *td, int priv)
void panic(const char *fmt,...)
void mi_switch(int flags)
void wakeup(const void *ident)
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
void sched_class(struct thread *td, int class)
void sched_prio(struct thread *td, u_char prio)
void sched_add(struct thread *td, int flags)
struct intr_event * it_event
struct thread * it_thread
int vsnprintf(char *str, size_t size, const char *format, va_list ap)
int printf(const char *fmt,...)