FreeBSD kernel kern code
kern_intr.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include "opt_ddb.h"
33#include "opt_hwpmc_hooks.h"
34#include "opt_kstack_usage_prof.h"
35
36#include <sys/param.h>
37#include <sys/bus.h>
38#include <sys/conf.h>
39#include <sys/cpuset.h>
40#include <sys/rtprio.h>
41#include <sys/systm.h>
42#include <sys/interrupt.h>
43#include <sys/kernel.h>
44#include <sys/kthread.h>
45#include <sys/ktr.h>
46#include <sys/limits.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mutex.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/epoch.h>
53#include <sys/random.h>
54#include <sys/resourcevar.h>
55#include <sys/sched.h>
56#include <sys/smp.h>
57#include <sys/sysctl.h>
58#include <sys/syslog.h>
59#include <sys/unistd.h>
60#include <sys/vmmeter.h>
61#include <machine/atomic.h>
62#include <machine/cpu.h>
63#include <machine/md_var.h>
64#include <machine/smp.h>
65#include <machine/stdarg.h>
66#ifdef DDB
67#include <ddb/ddb.h>
68#include <ddb/db_sym.h>
69#endif
70
71/*
72 * Describe an interrupt thread. There is one of these per interrupt event.
73 */
75 struct intr_event *it_event;
76 struct thread *it_thread; /* Kernel thread. */
77 int it_flags; /* (j) IT_* flags. */
78 int it_need; /* Needs service. */
79 int it_waiting; /* Waiting in the runq. */
80};
81
82/* Interrupt thread flags kept in it_flags */
83#define IT_DEAD 0x000001 /* Thread is waiting to exit. */
84#define IT_WAIT 0x000002 /* Thread is waiting for completion. */
85
87 struct thread *td;
88 uintptr_t event;
89};
90
91struct intr_event *clk_intr_event;
92struct intr_event *tty_intr_event;
93struct proc *intrproc;
94
95static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
96
97static int intr_storm_threshold = 0;
98SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
100 "Number of consecutive interrupts before storm protection is enabled");
101static int intr_epoch_batch = 1000;
102SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch,
103 0, "Maximum interrupt handler executions without re-entering epoch(9)");
104#ifdef HWPMC_HOOKS
105static int intr_hwpmc_waiting_report_threshold = 1;
106SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN,
107 &intr_hwpmc_waiting_report_threshold, 1,
108 "Threshold for reporting number of events in a workq");
109#define PMC_HOOK_INSTALLED_ANY() __predict_false(pmc_hook != NULL)
110#endif
111static TAILQ_HEAD(, intr_event) event_list =
112 TAILQ_HEAD_INITIALIZER(event_list);
113static struct mtx event_lock;
114MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
115
116static void intr_event_update(struct intr_event *ie);
117static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame);
118static struct intr_thread *ithread_create(const char *name);
119static void ithread_destroy(struct intr_thread *ithread);
120static void ithread_execute_handlers(struct proc *p,
121 struct intr_event *ie);
122static void ithread_loop(void *);
123static void ithread_update(struct intr_thread *ithd);
124static void start_softintr(void *);
125
126#ifdef HWPMC_HOOKS
127#include <sys/pmckern.h>
128PMC_SOFT_DEFINE( , , intr, all);
129PMC_SOFT_DEFINE( , , intr, ithread);
130PMC_SOFT_DEFINE( , , intr, filter);
131PMC_SOFT_DEFINE( , , intr, stray);
132PMC_SOFT_DEFINE( , , intr, schedule);
133PMC_SOFT_DEFINE( , , intr, waiting);
134
135#define PMC_SOFT_CALL_INTR_HLPR(event, frame) \
136do { \
137 if (frame != NULL) \
138 PMC_SOFT_CALL_TF( , , intr, event, frame); \
139 else \
140 PMC_SOFT_CALL( , , intr, event); \
141} while (0)
142#endif
143
144/* Map an interrupt type to an ithread priority. */
145u_char
146intr_priority(enum intr_type flags)
147{
148 u_char pri;
149
150 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
151 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
152 switch (flags) {
153 case INTR_TYPE_TTY:
154 pri = PI_TTY;
155 break;
156 case INTR_TYPE_BIO:
157 pri = PI_DISK;
158 break;
159 case INTR_TYPE_NET:
160 pri = PI_NET;
161 break;
162 case INTR_TYPE_CAM:
163 pri = PI_DISK;
164 break;
165 case INTR_TYPE_AV:
166 pri = PI_AV;
167 break;
168 case INTR_TYPE_CLK:
169 pri = PI_REALTIME;
170 break;
171 case INTR_TYPE_MISC:
172 pri = PI_DULL; /* don't care */
173 break;
174 default:
175 /* We didn't specify an interrupt level. */
176 panic("intr_priority: no interrupt type in flags");
177 }
178
179 return pri;
180}
181
182/*
183 * Update an ithread based on the associated intr_event.
184 */
185static void
187{
188 struct intr_event *ie;
189 struct thread *td;
190 u_char pri;
191
192 ie = ithd->it_event;
193 td = ithd->it_thread;
194 mtx_assert(&ie->ie_lock, MA_OWNED);
195
196 /* Determine the overall priority of this event. */
197 if (CK_SLIST_EMPTY(&ie->ie_handlers))
198 pri = PRI_MAX_ITHD;
199 else
200 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
201
202 /* Update name and priority. */
203 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
204#ifdef KTR
205 sched_clear_tdname(td);
206#endif
207 thread_lock(td);
208 sched_prio(td, pri);
209 thread_unlock(td);
210}
211
212/*
213 * Regenerate the full name of an interrupt event and update its priority.
214 */
215static void
216intr_event_update(struct intr_event *ie)
217{
218 struct intr_handler *ih;
219 char *last;
220 int missed, space, flags;
221
222 /* Start off with no entropy and just the name of the event. */
223 mtx_assert(&ie->ie_lock, MA_OWNED);
224 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
225 flags = 0;
226 missed = 0;
227 space = 1;
228
229 /* Run through all the handlers updating values. */
230 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
231 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
232 sizeof(ie->ie_fullname)) {
233 strcat(ie->ie_fullname, " ");
234 strcat(ie->ie_fullname, ih->ih_name);
235 space = 0;
236 } else
237 missed++;
238 flags |= ih->ih_flags;
239 }
240 ie->ie_hflags = flags;
241
242 /*
243 * If there is only one handler and its name is too long, just copy in
244 * as much of the end of the name (includes the unit number) as will
245 * fit. Otherwise, we have multiple handlers and not all of the names
246 * will fit. Add +'s to indicate missing names. If we run out of room
247 * and still have +'s to add, change the last character from a + to a *.
248 */
249 if (missed == 1 && space == 1) {
250 ih = CK_SLIST_FIRST(&ie->ie_handlers);
251 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
252 sizeof(ie->ie_fullname);
253 strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
254 strcat(ie->ie_fullname, &ih->ih_name[missed]);
255 missed = 0;
256 }
257 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
258 while (missed-- > 0) {
259 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
260 if (*last == '+') {
261 *last = '*';
262 break;
263 } else
264 *last = '+';
265 } else if (space) {
266 strcat(ie->ie_fullname, " +");
267 space = 0;
268 } else
269 strcat(ie->ie_fullname, "+");
270 }
271
272 /*
273 * If this event has an ithread, update it's priority and
274 * name.
275 */
276 if (ie->ie_thread != NULL)
277 ithread_update(ie->ie_thread);
278 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
279}
280
281int
282intr_event_create(struct intr_event **event, void *source, int flags, int irq,
283 void (*pre_ithread)(void *), void (*post_ithread)(void *),
284 void (*post_filter)(void *), int (*assign_cpu)(void *, int),
285 const char *fmt, ...)
286{
287 struct intr_event *ie;
288 va_list ap;
289
290 /* The only valid flag during creation is IE_SOFT. */
291 if ((flags & ~IE_SOFT) != 0)
292 return (EINVAL);
293 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
294 ie->ie_source = source;
295 ie->ie_pre_ithread = pre_ithread;
296 ie->ie_post_ithread = post_ithread;
297 ie->ie_post_filter = post_filter;
298 ie->ie_assign_cpu = assign_cpu;
299 ie->ie_flags = flags;
300 ie->ie_irq = irq;
301 ie->ie_cpu = NOCPU;
302 CK_SLIST_INIT(&ie->ie_handlers);
303 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
304
305 va_start(ap, fmt);
306 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
307 va_end(ap);
308 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
309 mtx_lock(&event_lock);
310 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
311 mtx_unlock(&event_lock);
312 if (event != NULL)
313 *event = ie;
314 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
315 return (0);
316}
317
318/*
319 * Bind an interrupt event to the specified CPU. Note that not all
320 * platforms support binding an interrupt to a CPU. For those
321 * platforms this request will fail. Using a cpu id of NOCPU unbinds
322 * the interrupt event.
323 */
324static int
325_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
326{
327 lwpid_t id;
328 int error;
329
330 /* Need a CPU to bind to. */
331 if (cpu != NOCPU && CPU_ABSENT(cpu))
332 return (EINVAL);
333
334 if (ie->ie_assign_cpu == NULL)
335 return (EOPNOTSUPP);
336
337 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
338 if (error)
339 return (error);
340
341 /*
342 * If we have any ithreads try to set their mask first to verify
343 * permissions, etc.
344 */
345 if (bindithread) {
346 mtx_lock(&ie->ie_lock);
347 if (ie->ie_thread != NULL) {
348 id = ie->ie_thread->it_thread->td_tid;
349 mtx_unlock(&ie->ie_lock);
350 error = cpuset_setithread(id, cpu);
351 if (error)
352 return (error);
353 } else
354 mtx_unlock(&ie->ie_lock);
355 }
356 if (bindirq)
357 error = ie->ie_assign_cpu(ie->ie_source, cpu);
358 if (error) {
359 if (bindithread) {
360 mtx_lock(&ie->ie_lock);
361 if (ie->ie_thread != NULL) {
362 cpu = ie->ie_cpu;
363 id = ie->ie_thread->it_thread->td_tid;
364 mtx_unlock(&ie->ie_lock);
365 (void)cpuset_setithread(id, cpu);
366 } else
367 mtx_unlock(&ie->ie_lock);
368 }
369 return (error);
370 }
371
372 if (bindirq) {
373 mtx_lock(&ie->ie_lock);
374 ie->ie_cpu = cpu;
375 mtx_unlock(&ie->ie_lock);
376 }
377
378 return (error);
379}
380
381/*
382 * Bind an interrupt event to the specified CPU. For supported platforms, any
383 * associated ithreads as well as the primary interrupt context will be bound
384 * to the specificed CPU.
385 */
386int
387intr_event_bind(struct intr_event *ie, int cpu)
388{
389
390 return (_intr_event_bind(ie, cpu, true, true));
391}
392
393/*
394 * Bind an interrupt event to the specified CPU, but do not bind associated
395 * ithreads.
396 */
397int
398intr_event_bind_irqonly(struct intr_event *ie, int cpu)
399{
400
401 return (_intr_event_bind(ie, cpu, true, false));
402}
403
404/*
405 * Bind an interrupt event's ithread to the specified CPU.
406 */
407int
408intr_event_bind_ithread(struct intr_event *ie, int cpu)
409{
410
411 return (_intr_event_bind(ie, cpu, false, true));
412}
413
414/*
415 * Bind an interrupt event's ithread to the specified cpuset.
416 */
417int
418intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
419{
420 lwpid_t id;
421
422 mtx_lock(&ie->ie_lock);
423 if (ie->ie_thread != NULL) {
424 id = ie->ie_thread->it_thread->td_tid;
425 mtx_unlock(&ie->ie_lock);
426 return (cpuset_setthread(id, cs));
427 } else {
428 mtx_unlock(&ie->ie_lock);
429 }
430 return (ENODEV);
431}
432
433static struct intr_event *
435{
436 struct intr_event *ie;
437
438 mtx_lock(&event_lock);
439 TAILQ_FOREACH(ie, &event_list, ie_list)
440 if (ie->ie_irq == irq &&
441 (ie->ie_flags & IE_SOFT) == 0 &&
442 CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
443 break;
444 mtx_unlock(&event_lock);
445 return (ie);
446}
447
448int
449intr_setaffinity(int irq, int mode, void *m)
450{
451 struct intr_event *ie;
452 cpuset_t *mask;
453 int cpu, n;
454
455 mask = m;
456 cpu = NOCPU;
457 /*
458 * If we're setting all cpus we can unbind. Otherwise make sure
459 * only one cpu is in the set.
460 */
461 if (CPU_CMP(cpuset_root, mask)) {
462 for (n = 0; n < CPU_SETSIZE; n++) {
463 if (!CPU_ISSET(n, mask))
464 continue;
465 if (cpu != NOCPU)
466 return (EINVAL);
467 cpu = n;
468 }
469 }
470 ie = intr_lookup(irq);
471 if (ie == NULL)
472 return (ESRCH);
473 switch (mode) {
474 case CPU_WHICH_IRQ:
475 return (intr_event_bind(ie, cpu));
476 case CPU_WHICH_INTRHANDLER:
477 return (intr_event_bind_irqonly(ie, cpu));
478 case CPU_WHICH_ITHREAD:
479 return (intr_event_bind_ithread(ie, cpu));
480 default:
481 return (EINVAL);
482 }
483}
484
485int
486intr_getaffinity(int irq, int mode, void *m)
487{
488 struct intr_event *ie;
489 struct thread *td;
490 struct proc *p;
491 cpuset_t *mask;
492 lwpid_t id;
493 int error;
494
495 mask = m;
496 ie = intr_lookup(irq);
497 if (ie == NULL)
498 return (ESRCH);
499
500 error = 0;
501 CPU_ZERO(mask);
502 switch (mode) {
503 case CPU_WHICH_IRQ:
504 case CPU_WHICH_INTRHANDLER:
505 mtx_lock(&ie->ie_lock);
506 if (ie->ie_cpu == NOCPU)
507 CPU_COPY(cpuset_root, mask);
508 else
509 CPU_SET(ie->ie_cpu, mask);
510 mtx_unlock(&ie->ie_lock);
511 break;
512 case CPU_WHICH_ITHREAD:
513 mtx_lock(&ie->ie_lock);
514 if (ie->ie_thread == NULL) {
515 mtx_unlock(&ie->ie_lock);
516 CPU_COPY(cpuset_root, mask);
517 } else {
518 id = ie->ie_thread->it_thread->td_tid;
519 mtx_unlock(&ie->ie_lock);
520 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
521 if (error != 0)
522 return (error);
523 CPU_COPY(&td->td_cpuset->cs_mask, mask);
524 PROC_UNLOCK(p);
525 }
526 default:
527 return (EINVAL);
528 }
529 return (0);
530}
531
532int
533intr_event_destroy(struct intr_event *ie)
534{
535
536 mtx_lock(&event_lock);
537 mtx_lock(&ie->ie_lock);
538 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
539 mtx_unlock(&ie->ie_lock);
540 mtx_unlock(&event_lock);
541 return (EBUSY);
542 }
543 TAILQ_REMOVE(&event_list, ie, ie_list);
544#ifndef notyet
545 if (ie->ie_thread != NULL) {
546 ithread_destroy(ie->ie_thread);
547 ie->ie_thread = NULL;
548 }
549#endif
550 mtx_unlock(&ie->ie_lock);
551 mtx_unlock(&event_lock);
552 mtx_destroy(&ie->ie_lock);
553 free(ie, M_ITHREAD);
554 return (0);
555}
556
557static struct intr_thread *
559{
560 struct intr_thread *ithd;
561 struct thread *td;
562 int error;
563
564 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
565
567 &td, RFSTOPPED | RFHIGHPID,
568 0, "intr", "%s", name);
569 if (error)
570 panic("kproc_create() failed with %d", error);
571 thread_lock(td);
572 sched_class(td, PRI_ITHD);
573 TD_SET_IWAIT(td);
574 thread_unlock(td);
575 td->td_pflags |= TDP_ITHREAD;
576 ithd->it_thread = td;
577 CTR2(KTR_INTR, "%s: created %s", __func__, name);
578 return (ithd);
579}
580
581static void
583{
584 struct thread *td;
585
586 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
587 td = ithread->it_thread;
588 thread_lock(td);
589 ithread->it_flags |= IT_DEAD;
590 if (TD_AWAITING_INTR(td)) {
591 TD_CLR_IWAIT(td);
592 sched_add(td, SRQ_INTR);
593 } else
594 thread_unlock(td);
595}
596
597int
598intr_event_add_handler(struct intr_event *ie, const char *name,
599 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
600 enum intr_type flags, void **cookiep)
601{
602 struct intr_handler *ih, *temp_ih;
603 struct intr_handler **prevptr;
604 struct intr_thread *it;
605
606 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
607 return (EINVAL);
608
609 /* Allocate and populate an interrupt handler structure. */
610 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
611 ih->ih_filter = filter;
612 ih->ih_handler = handler;
613 ih->ih_argument = arg;
614 strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
615 ih->ih_event = ie;
616 ih->ih_pri = pri;
617 if (flags & INTR_EXCL)
618 ih->ih_flags = IH_EXCLUSIVE;
619 if (flags & INTR_MPSAFE)
620 ih->ih_flags |= IH_MPSAFE;
621 if (flags & INTR_ENTROPY)
622 ih->ih_flags |= IH_ENTROPY;
623 if (flags & INTR_TYPE_NET)
624 ih->ih_flags |= IH_NET;
625
626 /* We can only have one exclusive handler in a event. */
627 mtx_lock(&ie->ie_lock);
628 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
629 if ((flags & INTR_EXCL) ||
630 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
631 mtx_unlock(&ie->ie_lock);
632 free(ih, M_ITHREAD);
633 return (EINVAL);
634 }
635 }
636
637 /* Create a thread if we need one. */
638 while (ie->ie_thread == NULL && handler != NULL) {
639 if (ie->ie_flags & IE_ADDING_THREAD)
640 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
641 else {
642 ie->ie_flags |= IE_ADDING_THREAD;
643 mtx_unlock(&ie->ie_lock);
644 it = ithread_create("intr: newborn");
645 mtx_lock(&ie->ie_lock);
646 ie->ie_flags &= ~IE_ADDING_THREAD;
647 ie->ie_thread = it;
648 it->it_event = ie;
649 ithread_update(it);
650 wakeup(ie);
651 }
652 }
653
654 /* Add the new handler to the event in priority order. */
655 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
656 if (temp_ih->ih_pri > ih->ih_pri)
657 break;
658 }
659 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
660
662
663 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
664 ie->ie_name);
665 mtx_unlock(&ie->ie_lock);
666
667 if (cookiep != NULL)
668 *cookiep = ih;
669 return (0);
670}
671
672/*
673 * Append a description preceded by a ':' to the name of the specified
674 * interrupt handler.
675 */
676int
677intr_event_describe_handler(struct intr_event *ie, void *cookie,
678 const char *descr)
679{
680 struct intr_handler *ih;
681 size_t space;
682 char *start;
683
684 mtx_lock(&ie->ie_lock);
685#ifdef INVARIANTS
686 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
687 if (ih == cookie)
688 break;
689 }
690 if (ih == NULL) {
691 mtx_unlock(&ie->ie_lock);
692 panic("handler %p not found in interrupt event %p", cookie, ie);
693 }
694#endif
695 ih = cookie;
696
697 /*
698 * Look for an existing description by checking for an
699 * existing ":". This assumes device names do not include
700 * colons. If one is found, prepare to insert the new
701 * description at that point. If one is not found, find the
702 * end of the name to use as the insertion point.
703 */
704 start = strchr(ih->ih_name, ':');
705 if (start == NULL)
706 start = strchr(ih->ih_name, 0);
707
708 /*
709 * See if there is enough remaining room in the string for the
710 * description + ":". The "- 1" leaves room for the trailing
711 * '\0'. The "+ 1" accounts for the colon.
712 */
713 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
714 if (strlen(descr) + 1 > space) {
715 mtx_unlock(&ie->ie_lock);
716 return (ENOSPC);
717 }
718
719 /* Append a colon followed by the description. */
720 *start = ':';
721 strcpy(start + 1, descr);
723 mtx_unlock(&ie->ie_lock);
724 return (0);
725}
726
727/*
728 * Return the ie_source field from the intr_event an intr_handler is
729 * associated with.
730 */
731void *
733{
734 struct intr_handler *ih;
735 struct intr_event *ie;
736
737 ih = (struct intr_handler *)cookie;
738 if (ih == NULL)
739 return (NULL);
740 ie = ih->ih_event;
741 KASSERT(ie != NULL,
742 ("interrupt handler \"%s\" has a NULL interrupt event",
743 ih->ih_name));
744 return (ie->ie_source);
745}
746
747/*
748 * If intr_event_handle() is running in the ISR context at the time of the call,
749 * then wait for it to complete.
750 */
751static void
752intr_event_barrier(struct intr_event *ie)
753{
754 int phase;
755
756 mtx_assert(&ie->ie_lock, MA_OWNED);
757 phase = ie->ie_phase;
758
759 /*
760 * Switch phase to direct future interrupts to the other active counter.
761 * Make sure that any preceding stores are visible before the switch.
762 */
763 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
764 atomic_store_rel_int(&ie->ie_phase, !phase);
765
766 /*
767 * This code cooperates with wait-free iteration of ie_handlers
768 * in intr_event_handle.
769 * Make sure that the removal and the phase update are not reordered
770 * with the active count check.
771 * Note that no combination of acquire and release fences can provide
772 * that guarantee as Store->Load sequences can always be reordered.
773 */
774 atomic_thread_fence_seq_cst();
775
776 /*
777 * Now wait on the inactive phase.
778 * The acquire fence is needed so that that all post-barrier accesses
779 * are after the check.
780 */
781 while (ie->ie_active[phase] > 0)
782 cpu_spinwait();
783 atomic_thread_fence_acq();
784}
785
786static void
787intr_handler_barrier(struct intr_handler *handler)
788{
789 struct intr_event *ie;
790
791 ie = handler->ih_event;
792 mtx_assert(&ie->ie_lock, MA_OWNED);
793 KASSERT((handler->ih_flags & IH_DEAD) == 0,
794 ("update for a removed handler"));
795
796 if (ie->ie_thread == NULL) {
798 return;
799 }
800 if ((handler->ih_flags & IH_CHANGED) == 0) {
801 handler->ih_flags |= IH_CHANGED;
803 }
804 while ((handler->ih_flags & IH_CHANGED) != 0)
805 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
806}
807
808/*
809 * Sleep until an ithread finishes executing an interrupt handler.
810 *
811 * XXX Doesn't currently handle interrupt filters or fast interrupt
812 * handlers. This is intended for LinuxKPI drivers only.
813 * Do not use in BSD code.
814 */
815void
817{
818 struct intr_event *ie;
819 struct intr_thread *ithd;
820 struct thread *td;
821
822 ie = intr_lookup(irq);
823 if (ie == NULL)
824 return;
825 if (ie->ie_thread == NULL)
826 return;
827 ithd = ie->ie_thread;
828 td = ithd->it_thread;
829 /*
830 * We set the flag and wait for it to be cleared to avoid
831 * long delays with potentially busy interrupt handlers
832 * were we to only sample TD_AWAITING_INTR() every tick.
833 */
834 thread_lock(td);
835 if (!TD_AWAITING_INTR(td)) {
836 ithd->it_flags |= IT_WAIT;
837 while (ithd->it_flags & IT_WAIT) {
838 thread_unlock(td);
839 pause("idrain", 1);
840 thread_lock(td);
841 }
842 }
843 thread_unlock(td);
844 return;
845}
846
847int
849{
850 struct intr_handler *handler = (struct intr_handler *)cookie;
851 struct intr_event *ie;
852 struct intr_handler *ih;
853 struct intr_handler **prevptr;
854#ifdef notyet
855 int dead;
856#endif
857
858 if (handler == NULL)
859 return (EINVAL);
860 ie = handler->ih_event;
861 KASSERT(ie != NULL,
862 ("interrupt handler \"%s\" has a NULL interrupt event",
863 handler->ih_name));
864
865 mtx_lock(&ie->ie_lock);
866 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
867 ie->ie_name);
868 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
869 if (ih == handler)
870 break;
871 }
872 if (ih == NULL) {
873 panic("interrupt handler \"%s\" not found in "
874 "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
875 }
876
877 /*
878 * If there is no ithread, then directly remove the handler. Note that
879 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
880 * care needs to be taken to keep ie_handlers consistent and to free
881 * the removed handler only when ie_handlers is quiescent.
882 */
883 if (ie->ie_thread == NULL) {
884 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
887 mtx_unlock(&ie->ie_lock);
888 free(handler, M_ITHREAD);
889 return (0);
890 }
891
892 /*
893 * Let the interrupt thread do the job.
894 * The interrupt source is disabled when the interrupt thread is
895 * running, so it does not have to worry about interaction with
896 * intr_event_handle().
897 */
898 KASSERT((handler->ih_flags & IH_DEAD) == 0,
899 ("duplicate handle remove"));
900 handler->ih_flags |= IH_DEAD;
902 while (handler->ih_flags & IH_DEAD)
903 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
905
906#ifdef notyet
907 /*
908 * XXX: This could be bad in the case of ppbus(8). Also, I think
909 * this could lead to races of stale data when servicing an
910 * interrupt.
911 */
912 dead = 1;
913 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
914 if (ih->ih_handler != NULL) {
915 dead = 0;
916 break;
917 }
918 }
919 if (dead) {
920 ithread_destroy(ie->ie_thread);
921 ie->ie_thread = NULL;
922 }
923#endif
924 mtx_unlock(&ie->ie_lock);
925 free(handler, M_ITHREAD);
926 return (0);
927}
928
929int
931{
932 struct intr_handler *handler = (struct intr_handler *)cookie;
933 struct intr_event *ie;
934
935 if (handler == NULL)
936 return (EINVAL);
937 ie = handler->ih_event;
938 KASSERT(ie != NULL,
939 ("interrupt handler \"%s\" has a NULL interrupt event",
940 handler->ih_name));
941 mtx_lock(&ie->ie_lock);
942 handler->ih_flags |= IH_SUSP;
943 intr_handler_barrier(handler);
944 mtx_unlock(&ie->ie_lock);
945 return (0);
946}
947
948int
950{
951 struct intr_handler *handler = (struct intr_handler *)cookie;
952 struct intr_event *ie;
953
954 if (handler == NULL)
955 return (EINVAL);
956 ie = handler->ih_event;
957 KASSERT(ie != NULL,
958 ("interrupt handler \"%s\" has a NULL interrupt event",
959 handler->ih_name));
960
961 /*
962 * intr_handler_barrier() acts not only as a barrier,
963 * it also allows to check for any pending interrupts.
964 */
965 mtx_lock(&ie->ie_lock);
966 handler->ih_flags &= ~IH_SUSP;
967 intr_handler_barrier(handler);
968 mtx_unlock(&ie->ie_lock);
969 return (0);
970}
971
972static int
973intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
974{
975 struct intr_entropy entropy;
976 struct intr_thread *it;
977 struct thread *td;
978 struct thread *ctd;
979
980 /*
981 * If no ithread or no handlers, then we have a stray interrupt.
982 */
983 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
984 ie->ie_thread == NULL)
985 return (EINVAL);
986
987 ctd = curthread;
988 it = ie->ie_thread;
989 td = it->it_thread;
990
991 /*
992 * If any of the handlers for this ithread claim to be good
993 * sources of entropy, then gather some.
994 */
995 if (ie->ie_hflags & IH_ENTROPY) {
996 entropy.event = (uintptr_t)ie;
997 entropy.td = ctd;
998 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
999 }
1000
1001 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
1002
1003 /*
1004 * Set it_need to tell the thread to keep running if it is already
1005 * running. Then, lock the thread and see if we actually need to
1006 * put it on the runqueue.
1007 *
1008 * Use store_rel to arrange that the store to ih_need in
1009 * swi_sched() is before the store to it_need and prepare for
1010 * transfer of this order to loads in the ithread.
1011 */
1012 atomic_store_rel_int(&it->it_need, 1);
1013 thread_lock(td);
1014 if (TD_AWAITING_INTR(td)) {
1015#ifdef HWPMC_HOOKS
1016 it->it_waiting = 0;
1017 if (PMC_HOOK_INSTALLED_ANY())
1018 PMC_SOFT_CALL_INTR_HLPR(schedule, frame);
1019#endif
1020 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1021 td->td_name);
1022 TD_CLR_IWAIT(td);
1023 sched_add(td, SRQ_INTR);
1024 } else {
1025#ifdef HWPMC_HOOKS
1026 it->it_waiting++;
1027 if (PMC_HOOK_INSTALLED_ANY() &&
1028 (it->it_waiting >= intr_hwpmc_waiting_report_threshold))
1029 PMC_SOFT_CALL_INTR_HLPR(waiting, frame);
1030#endif
1031 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1032 __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
1033 thread_unlock(td);
1034 }
1035
1036 return (0);
1037}
1038
1039/*
1040 * Allow interrupt event binding for software interrupt handlers -- a no-op,
1041 * since interrupts are generated in software rather than being directed by
1042 * a PIC.
1043 */
1044static int
1045swi_assign_cpu(void *arg, int cpu)
1046{
1047
1048 return (0);
1049}
1050
1051/*
1052 * Add a software interrupt handler to a specified event. If a given event
1053 * is not specified, then a new event is created.
1054 */
1055int
1056swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1057 void *arg, int pri, enum intr_type flags, void **cookiep)
1058{
1059 struct intr_event *ie;
1060 int error = 0;
1061
1062 if (flags & INTR_ENTROPY)
1063 return (EINVAL);
1064
1065 ie = (eventp != NULL) ? *eventp : NULL;
1066
1067 if (ie != NULL) {
1068 if (!(ie->ie_flags & IE_SOFT))
1069 return (EINVAL);
1070 } else {
1071 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1072 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1073 if (error)
1074 return (error);
1075 if (eventp != NULL)
1076 *eventp = ie;
1077 }
1078 if (handler != NULL) {
1079 error = intr_event_add_handler(ie, name, NULL, handler, arg,
1080 PI_SWI(pri), flags, cookiep);
1081 }
1082 return (error);
1083}
1084
1085/*
1086 * Schedule a software interrupt thread.
1087 */
1088void
1089swi_sched(void *cookie, int flags)
1090{
1091 struct intr_handler *ih = (struct intr_handler *)cookie;
1092 struct intr_event *ie = ih->ih_event;
1093 struct intr_entropy entropy;
1094 int error __unused;
1095
1096 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1097 ih->ih_need);
1098
1099 if ((flags & SWI_FROMNMI) == 0) {
1100 entropy.event = (uintptr_t)ih;
1101 entropy.td = curthread;
1102 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
1103 }
1104
1105 /*
1106 * Set ih_need for this handler so that if the ithread is already
1107 * running it will execute this handler on the next pass. Otherwise,
1108 * it will execute it the next time it runs.
1109 */
1110 ih->ih_need = 1;
1111
1112 if (flags & SWI_DELAY)
1113 return;
1114
1115 if (flags & SWI_FROMNMI) {
1116#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
1117 KASSERT(ie == clk_intr_event,
1118 ("SWI_FROMNMI used not with clk_intr_event"));
1119 ipi_self_from_nmi(IPI_SWI);
1120#endif
1121 } else {
1122 VM_CNT_INC(v_soft);
1123 error = intr_event_schedule_thread(ie, NULL);
1124 KASSERT(error == 0, ("stray software interrupt"));
1125 }
1126}
1127
1128/*
1129 * Remove a software interrupt handler. Currently this code does not
1130 * remove the associated interrupt event if it becomes empty. Calling code
1131 * may do so manually via intr_event_destroy(), but that's not really
1132 * an optimal interface.
1133 */
1134int
1135swi_remove(void *cookie)
1136{
1137
1138 return (intr_event_remove_handler(cookie));
1139}
1140
1141static void
1142intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1143{
1144 struct intr_handler *ih, *ihn, *ihp;
1145
1146 ihp = NULL;
1147 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1148 /*
1149 * If this handler is marked for death, remove it from
1150 * the list of handlers and wake up the sleeper.
1151 */
1152 if (ih->ih_flags & IH_DEAD) {
1153 mtx_lock(&ie->ie_lock);
1154 if (ihp == NULL)
1155 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1156 else
1157 CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1158 ih->ih_flags &= ~IH_DEAD;
1159 wakeup(ih);
1160 mtx_unlock(&ie->ie_lock);
1161 continue;
1162 }
1163
1164 /*
1165 * Now that we know that the current element won't be removed
1166 * update the previous element.
1167 */
1168 ihp = ih;
1169
1170 if ((ih->ih_flags & IH_CHANGED) != 0) {
1171 mtx_lock(&ie->ie_lock);
1172 ih->ih_flags &= ~IH_CHANGED;
1173 wakeup(ih);
1174 mtx_unlock(&ie->ie_lock);
1175 }
1176
1177 /* Skip filter only handlers */
1178 if (ih->ih_handler == NULL)
1179 continue;
1180
1181 /* Skip suspended handlers */
1182 if ((ih->ih_flags & IH_SUSP) != 0)
1183 continue;
1184
1185 /*
1186 * For software interrupt threads, we only execute
1187 * handlers that have their need flag set. Hardware
1188 * interrupt threads always invoke all of their handlers.
1189 *
1190 * ih_need can only be 0 or 1. Failed cmpset below
1191 * means that there is no request to execute handlers,
1192 * so a retry of the cmpset is not needed.
1193 */
1194 if ((ie->ie_flags & IE_SOFT) != 0 &&
1195 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1196 continue;
1197
1198 /* Execute this handler. */
1199 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1200 __func__, p->p_pid, (void *)ih->ih_handler,
1201 ih->ih_argument, ih->ih_name, ih->ih_flags);
1202
1203 if (!(ih->ih_flags & IH_MPSAFE))
1204 mtx_lock(&Giant);
1205 ih->ih_handler(ih->ih_argument);
1206 if (!(ih->ih_flags & IH_MPSAFE))
1207 mtx_unlock(&Giant);
1208 }
1209}
1210
1211static void
1212ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1213{
1214
1215 /* Interrupt handlers should not sleep. */
1216 if (!(ie->ie_flags & IE_SOFT))
1217 THREAD_NO_SLEEPING();
1219 if (!(ie->ie_flags & IE_SOFT))
1220 THREAD_SLEEPING_OK();
1221
1222 /*
1223 * Interrupt storm handling:
1224 *
1225 * If this interrupt source is currently storming, then throttle
1226 * it to only fire the handler once per clock tick.
1227 *
1228 * If this interrupt source is not currently storming, but the
1229 * number of back to back interrupts exceeds the storm threshold,
1230 * then enter storming mode.
1231 */
1232 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1233 !(ie->ie_flags & IE_SOFT)) {
1234 /* Report the message only once every second. */
1235 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1236 printf(
1237 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1238 ie->ie_name);
1239 }
1240 pause("istorm", 1);
1241 } else
1242 ie->ie_count++;
1243
1244 /*
1245 * Now that all the handlers have had a chance to run, reenable
1246 * the interrupt source.
1247 */
1248 if (ie->ie_post_ithread != NULL)
1249 ie->ie_post_ithread(ie->ie_source);
1250}
1251
1252/*
1253 * This is the main code for interrupt threads.
1254 */
1255static void
1257{
1258 struct epoch_tracker et;
1259 struct intr_thread *ithd;
1260 struct intr_event *ie;
1261 struct thread *td;
1262 struct proc *p;
1263 int wake, epoch_count;
1264 bool needs_epoch;
1265
1266 td = curthread;
1267 p = td->td_proc;
1268 ithd = (struct intr_thread *)arg;
1269 KASSERT(ithd->it_thread == td,
1270 ("%s: ithread and proc linkage out of sync", __func__));
1271 ie = ithd->it_event;
1272 ie->ie_count = 0;
1273 wake = 0;
1274
1275 /*
1276 * As long as we have interrupts outstanding, go through the
1277 * list of handlers, giving each one a go at it.
1278 */
1279 for (;;) {
1280 /*
1281 * If we are an orphaned thread, then just die.
1282 */
1283 if (ithd->it_flags & IT_DEAD) {
1284 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1285 p->p_pid, td->td_name);
1286 free(ithd, M_ITHREAD);
1287 kthread_exit();
1288 }
1289
1290 /*
1291 * Service interrupts. If another interrupt arrives while
1292 * we are running, it will set it_need to note that we
1293 * should make another pass.
1294 *
1295 * The load_acq part of the following cmpset ensures
1296 * that the load of ih_need in ithread_execute_handlers()
1297 * is ordered after the load of it_need here.
1298 */
1299 needs_epoch =
1300 (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0;
1301 if (needs_epoch) {
1302 epoch_count = 0;
1303 NET_EPOCH_ENTER(et);
1304 }
1305 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1307 if (needs_epoch &&
1308 ++epoch_count >= intr_epoch_batch) {
1309 NET_EPOCH_EXIT(et);
1310 epoch_count = 0;
1311 NET_EPOCH_ENTER(et);
1312 }
1313 }
1314 if (needs_epoch)
1315 NET_EPOCH_EXIT(et);
1316 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1317 mtx_assert(&Giant, MA_NOTOWNED);
1318
1319 /*
1320 * Processed all our interrupts. Now get the sched
1321 * lock. This may take a while and it_need may get
1322 * set again, so we have to check it again.
1323 */
1324 thread_lock(td);
1325 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1326 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1327 TD_SET_IWAIT(td);
1328 ie->ie_count = 0;
1329 mi_switch(SW_VOL | SWT_IWAIT);
1330 } else {
1331 if (ithd->it_flags & IT_WAIT) {
1332 wake = 1;
1333 ithd->it_flags &= ~IT_WAIT;
1334 }
1335 thread_unlock(td);
1336 }
1337 if (wake) {
1338 wakeup(ithd);
1339 wake = 0;
1340 }
1341 }
1342}
1343
1344/*
1345 * Main interrupt handling body.
1346 *
1347 * Input:
1348 * o ie: the event connected to this interrupt.
1349 * o frame: some archs (i.e. i386) pass a frame to some.
1350 * handlers as their main argument.
1351 * Return value:
1352 * o 0: everything ok.
1353 * o EINVAL: stray interrupt.
1354 */
1355int
1356intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1357{
1358 struct intr_handler *ih;
1359 struct trapframe *oldframe;
1360 struct thread *td;
1361 int phase;
1362 int ret;
1363 bool filter, thread;
1364
1365 td = curthread;
1366
1367#ifdef KSTACK_USAGE_PROF
1368 intr_prof_stack_use(td, frame);
1369#endif
1370
1371 /* An interrupt with no event or handlers is a stray interrupt. */
1372 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1373 return (EINVAL);
1374
1375 /*
1376 * Execute fast interrupt handlers directly.
1377 * To support clock handlers, if a handler registers
1378 * with a NULL argument, then we pass it a pointer to
1379 * a trapframe as its argument.
1380 */
1381 td->td_intr_nesting_level++;
1382 filter = false;
1383 thread = false;
1384 ret = 0;
1385 critical_enter();
1386 oldframe = td->td_intr_frame;
1387 td->td_intr_frame = frame;
1388
1389 phase = ie->ie_phase;
1390 atomic_add_int(&ie->ie_active[phase], 1);
1391
1392 /*
1393 * This fence is required to ensure that no later loads are
1394 * re-ordered before the ie_active store.
1395 */
1396 atomic_thread_fence_seq_cst();
1397
1398 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1399 if ((ih->ih_flags & IH_SUSP) != 0)
1400 continue;
1401 if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0)
1402 continue;
1403 if (ih->ih_filter == NULL) {
1404 thread = true;
1405 continue;
1406 }
1407 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1408 ih->ih_filter, ih->ih_argument == NULL ? frame :
1409 ih->ih_argument, ih->ih_name);
1410 if (ih->ih_argument == NULL)
1411 ret = ih->ih_filter(frame);
1412 else
1413 ret = ih->ih_filter(ih->ih_argument);
1414#ifdef HWPMC_HOOKS
1415 PMC_SOFT_CALL_TF( , , intr, all, frame);
1416#endif
1417 KASSERT(ret == FILTER_STRAY ||
1418 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1419 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1420 ("%s: incorrect return value %#x from %s", __func__, ret,
1421 ih->ih_name));
1422 filter = filter || ret == FILTER_HANDLED;
1423#ifdef HWPMC_HOOKS
1424 if (ret & FILTER_SCHEDULE_THREAD)
1425 PMC_SOFT_CALL_TF( , , intr, ithread, frame);
1426 else if (ret & FILTER_HANDLED)
1427 PMC_SOFT_CALL_TF( , , intr, filter, frame);
1428 else if (ret == FILTER_STRAY)
1429 PMC_SOFT_CALL_TF( , , intr, stray, frame);
1430#endif
1431
1432 /*
1433 * Wrapper handler special handling:
1434 *
1435 * in some particular cases (like pccard and pccbb),
1436 * the _real_ device handler is wrapped in a couple of
1437 * functions - a filter wrapper and an ithread wrapper.
1438 * In this case (and just in this case), the filter wrapper
1439 * could ask the system to schedule the ithread and mask
1440 * the interrupt source if the wrapped handler is composed
1441 * of just an ithread handler.
1442 *
1443 * TODO: write a generic wrapper to avoid people rolling
1444 * their own.
1445 */
1446 if (!thread) {
1447 if (ret == FILTER_SCHEDULE_THREAD)
1448 thread = true;
1449 }
1450 }
1451 atomic_add_rel_int(&ie->ie_active[phase], -1);
1452
1453 td->td_intr_frame = oldframe;
1454
1455 if (thread) {
1456 if (ie->ie_pre_ithread != NULL)
1457 ie->ie_pre_ithread(ie->ie_source);
1458 } else {
1459 if (ie->ie_post_filter != NULL)
1460 ie->ie_post_filter(ie->ie_source);
1461 }
1462
1463 /* Schedule the ithread if needed. */
1464 if (thread) {
1465 int error __unused;
1466
1467 error = intr_event_schedule_thread(ie, frame);
1468 KASSERT(error == 0, ("bad stray interrupt"));
1469 }
1470 critical_exit();
1471 td->td_intr_nesting_level--;
1472#ifdef notyet
1473 /* The interrupt is not aknowledged by any filter and has no ithread. */
1474 if (!thread && !filter)
1475 return (EINVAL);
1476#endif
1477 return (0);
1478}
1479
1480#ifdef DDB
1481/*
1482 * Dump details about an interrupt handler
1483 */
1484static void
1485db_dump_intrhand(struct intr_handler *ih)
1486{
1487 int comma;
1488
1489 db_printf("\t%-10s ", ih->ih_name);
1490 switch (ih->ih_pri) {
1491 case PI_REALTIME:
1492 db_printf("CLK ");
1493 break;
1494 case PI_AV:
1495 db_printf("AV ");
1496 break;
1497 case PI_TTY:
1498 db_printf("TTY ");
1499 break;
1500 case PI_NET:
1501 db_printf("NET ");
1502 break;
1503 case PI_DISK:
1504 db_printf("DISK");
1505 break;
1506 case PI_DULL:
1507 db_printf("DULL");
1508 break;
1509 default:
1510 if (ih->ih_pri >= PI_SOFT)
1511 db_printf("SWI ");
1512 else
1513 db_printf("%4u", ih->ih_pri);
1514 break;
1515 }
1516 db_printf(" ");
1517 if (ih->ih_filter != NULL) {
1518 db_printf("[F]");
1519 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1520 }
1521 if (ih->ih_handler != NULL) {
1522 if (ih->ih_filter != NULL)
1523 db_printf(",");
1524 db_printf("[H]");
1525 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1526 }
1527 db_printf("(%p)", ih->ih_argument);
1528 if (ih->ih_need ||
1529 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1530 IH_MPSAFE)) != 0) {
1531 db_printf(" {");
1532 comma = 0;
1533 if (ih->ih_flags & IH_EXCLUSIVE) {
1534 if (comma)
1535 db_printf(", ");
1536 db_printf("EXCL");
1537 comma = 1;
1538 }
1539 if (ih->ih_flags & IH_ENTROPY) {
1540 if (comma)
1541 db_printf(", ");
1542 db_printf("ENTROPY");
1543 comma = 1;
1544 }
1545 if (ih->ih_flags & IH_DEAD) {
1546 if (comma)
1547 db_printf(", ");
1548 db_printf("DEAD");
1549 comma = 1;
1550 }
1551 if (ih->ih_flags & IH_MPSAFE) {
1552 if (comma)
1553 db_printf(", ");
1554 db_printf("MPSAFE");
1555 comma = 1;
1556 }
1557 if (ih->ih_need) {
1558 if (comma)
1559 db_printf(", ");
1560 db_printf("NEED");
1561 }
1562 db_printf("}");
1563 }
1564 db_printf("\n");
1565}
1566
1567/*
1568 * Dump details about a event.
1569 */
1570void
1571db_dump_intr_event(struct intr_event *ie, int handlers)
1572{
1573 struct intr_handler *ih;
1574 struct intr_thread *it;
1575 int comma;
1576
1577 db_printf("%s ", ie->ie_fullname);
1578 it = ie->ie_thread;
1579 if (it != NULL)
1580 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1581 else
1582 db_printf("(no thread)");
1583 if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 ||
1584 (it != NULL && it->it_need)) {
1585 db_printf(" {");
1586 comma = 0;
1587 if (ie->ie_flags & IE_SOFT) {
1588 db_printf("SOFT");
1589 comma = 1;
1590 }
1591 if (ie->ie_flags & IE_ADDING_THREAD) {
1592 if (comma)
1593 db_printf(", ");
1594 db_printf("ADDING_THREAD");
1595 comma = 1;
1596 }
1597 if (it != NULL && it->it_need) {
1598 if (comma)
1599 db_printf(", ");
1600 db_printf("NEED");
1601 }
1602 db_printf("}");
1603 }
1604 db_printf("\n");
1605
1606 if (handlers)
1607 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1608 db_dump_intrhand(ih);
1609}
1610
1611/*
1612 * Dump data about interrupt handlers
1613 */
1614DB_SHOW_COMMAND(intr, db_show_intr)
1615{
1616 struct intr_event *ie;
1617 int all, verbose;
1618
1619 verbose = strchr(modif, 'v') != NULL;
1620 all = strchr(modif, 'a') != NULL;
1621 TAILQ_FOREACH(ie, &event_list, ie_list) {
1622 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1623 continue;
1624 db_dump_intr_event(ie, verbose);
1625 if (db_pager_quit)
1626 break;
1627 }
1628}
1629#endif /* DDB */
1630
1631/*
1632 * Start standard software interrupt threads
1633 */
1634static void
1636{
1637
1638 if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK,
1639 INTR_MPSAFE, NULL))
1640 panic("died while creating clk swi ithread");
1641}
1642SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1643 NULL);
1644
1645/*
1646 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1647 * The data for this machine dependent, and the declarations are in machine
1648 * dependent code. The layout of intrnames and intrcnt however is machine
1649 * independent.
1650 *
1651 * We do not know the length of intrcnt and intrnames at compile time, so
1652 * calculate things at run time.
1653 */
1654static int
1655sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1656{
1657 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1658}
1659
1660SYSCTL_PROC(_hw, OID_AUTO, intrnames,
1661 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1662 sysctl_intrnames, "",
1663 "Interrupt Names");
1664
1665static int
1666sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1667{
1668#ifdef SCTL_MASK32
1669 uint32_t *intrcnt32;
1670 unsigned i;
1671 int error;
1672
1673 if (req->flags & SCTL_MASK32) {
1674 if (!req->oldptr)
1675 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1676 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1677 if (intrcnt32 == NULL)
1678 return (ENOMEM);
1679 for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1680 intrcnt32[i] = intrcnt[i];
1681 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1682 free(intrcnt32, M_TEMP);
1683 return (error);
1684 }
1685#endif
1686 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1687}
1688
1689SYSCTL_PROC(_hw, OID_AUTO, intrcnt,
1690 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1691 sysctl_intrcnt, "",
1692 "Interrupt Counts");
1693
1694#ifdef DDB
1695/*
1696 * DDB command to dump the interrupt statistics.
1697 */
1698DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1699{
1700 u_long *i;
1701 char *cp;
1702 u_int j;
1703
1704 cp = intrnames;
1705 j = 0;
1706 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1707 i++, j++) {
1708 if (*cp == '\0')
1709 break;
1710 if (*i != 0)
1711 db_printf("%s\t%lu\n", cp, *i);
1712 cp += strlen(cp) + 1;
1713 }
1714}
1715#endif
const char * name
Definition: kern_fail.c:145
int cpuset_setithread(lwpid_t id, int cpu)
Definition: kern_cpuset.c:1512
cpuset_t * cpuset_root
Definition: kern_cpuset.c:141
int cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, struct cpuset **setp)
Definition: kern_cpuset.c:888
int cpuset_setthread(lwpid_t id, cpuset_t *mask)
Definition: kern_cpuset.c:1502
MTX_SYSINIT(et_eventtimers_init, &et_eventtimers_mtx, "et_mtx", MTX_DEF)
static struct intr_event * intr_lookup(int irq)
Definition: kern_intr.c:434
void _intr_drain(int irq)
Definition: kern_intr.c:816
int intr_event_resume_handler(void *cookie)
Definition: kern_intr.c:949
int intr_event_handle(struct intr_event *ie, struct trapframe *frame)
Definition: kern_intr.c:1356
static int sysctl_intrnames(SYSCTL_HANDLER_ARGS)
Definition: kern_intr.c:1655
int intr_event_describe_handler(struct intr_event *ie, void *cookie, const char *descr)
Definition: kern_intr.c:677
static TAILQ_HEAD(intr_event)
Definition: kern_intr.c:111
int intr_event_add_handler(struct intr_event *ie, const char *name, driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, void **cookiep)
Definition: kern_intr.c:598
int intr_event_create(struct intr_event **event, void *source, int flags, int irq, void(*pre_ithread)(void *), void(*post_ithread)(void *), void(*post_filter)(void *), int(*assign_cpu)(void *, int), const char *fmt,...)
Definition: kern_intr.c:282
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
int swi_remove(void *cookie)
Definition: kern_intr.c:1135
static int _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
Definition: kern_intr.c:325
int intr_event_bind_ithread(struct intr_event *ie, int cpu)
Definition: kern_intr.c:408
static void ithread_loop(void *arg)
Definition: kern_intr.c:1256
static void intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
Definition: kern_intr.c:1142
static void start_softintr(void *dummy)
Definition: kern_intr.c:1635
#define IT_DEAD
Definition: kern_intr.c:83
static int intr_epoch_batch
Definition: kern_intr.c:101
static int swi_assign_cpu(void *arg, int cpu)
Definition: kern_intr.c:1045
static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
Definition: kern_intr.c:973
static int intr_storm_threshold
Definition: kern_intr.c:97
static void intr_handler_barrier(struct intr_handler *handler)
Definition: kern_intr.c:787
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, &intr_storm_threshold, 0, "Number of consecutive interrupts before storm protection is enabled")
__FBSDID("$FreeBSD$")
static void intr_event_update(struct intr_event *ie)
Definition: kern_intr.c:216
int intr_getaffinity(int irq, int mode, void *m)
Definition: kern_intr.c:486
static void intr_event_barrier(struct intr_event *ie)
Definition: kern_intr.c:752
struct intr_event * tty_intr_event
Definition: kern_intr.c:92
static void ithread_destroy(struct intr_thread *ithread)
Definition: kern_intr.c:582
int intr_event_bind(struct intr_event *ie, int cpu)
Definition: kern_intr.c:387
static void ithread_execute_handlers(struct proc *p, struct intr_event *ie)
Definition: kern_intr.c:1212
int intr_event_destroy(struct intr_event *ie)
Definition: kern_intr.c:533
int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep)
Definition: kern_intr.c:1056
int intr_event_remove_handler(void *cookie)
Definition: kern_intr.c:848
void swi_sched(void *cookie, int flags)
Definition: kern_intr.c:1089
struct intr_event * clk_intr_event
Definition: kern_intr.c:91
static struct intr_thread * ithread_create(const char *name)
Definition: kern_intr.c:558
int intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
Definition: kern_intr.c:418
#define IT_WAIT
Definition: kern_intr.c:84
int intr_event_bind_irqonly(struct intr_event *ie, int cpu)
Definition: kern_intr.c:398
SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, 0, sysctl_intrnames, "", "Interrupt Names")
void * intr_handler_source(void *cookie)
Definition: kern_intr.c:732
int intr_setaffinity(int irq, int mode, void *m)
Definition: kern_intr.c:449
int intr_event_suspend_handler(void *cookie)
Definition: kern_intr.c:930
static int sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
Definition: kern_intr.c:1666
static void ithread_update(struct intr_thread *ithd)
Definition: kern_intr.c:186
static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads")
struct proc * intrproc
Definition: kern_intr.c:93
void kthread_exit(void)
Definition: kern_kthread.c:328
int kproc_kthread_add(void(*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char *procname, const char *fmt,...)
Definition: kern_kthread.c:455
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
struct mtx __exclusive_cache_line Giant
Definition: kern_mutex.c:181
static uint32_t phase
Definition: kern_poll.c:239
int priv_check(struct thread *td, int priv)
Definition: kern_priv.c:271
void panic(const char *fmt,...)
void mi_switch(int flags)
Definition: kern_synch.c:491
void wakeup(const void *ident)
Definition: kern_synch.c:349
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1867
int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
Definition: kern_time.c:1118
void *** start
Definition: linker_if.m:98
METHOD void post_ithread
Definition: pic_if.m:149
METHOD void pre_ithread
Definition: pic_if.m:154
METHOD void post_filter
Definition: pic_if.m:144
void sched_class(struct thread *td, int class)
Definition: sched_4bsd.c:829
void sched_prio(struct thread *td, u_char prio)
Definition: sched_4bsd.c:901
void sched_add(struct thread *td, int flags)
Definition: sched_4bsd.c:1285
uintptr_t event
Definition: kern_intr.c:88
struct thread * td
Definition: kern_intr.c:87
struct intr_event * it_event
Definition: kern_intr.c:75
int it_waiting
Definition: kern_intr.c:79
struct thread * it_thread
Definition: kern_intr.c:76
int it_flags
Definition: kern_intr.c:77
int it_need
Definition: kern_intr.c:78
int mask
Definition: subr_acl_nfs4.c:70
int vsnprintf(char *str, size_t size, const char *format, va_list ap)
Definition: subr_prf.c:565
int printf(const char *fmt,...)
Definition: subr_prf.c:397
uint16_t flags
Definition: subr_stats.c:2
struct mtx mtx
Definition: uipc_ktls.c:0
static int dummy
mode_t mode