FreeBSD kernel kern code
kern_event.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org>
5 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org>
6 * Copyright (c) 2009 Apple, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include "opt_ktrace.h"
35#include "opt_kqueue.h"
36
37#ifdef COMPAT_FREEBSD11
38#define _WANT_FREEBSD11_KEVENT
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/capsicum.h>
44#include <sys/kernel.h>
45#include <sys/limits.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/rwlock.h>
49#include <sys/proc.h>
50#include <sys/malloc.h>
51#include <sys/unistd.h>
52#include <sys/file.h>
53#include <sys/filedesc.h>
54#include <sys/filio.h>
55#include <sys/fcntl.h>
56#include <sys/kthread.h>
57#include <sys/selinfo.h>
58#include <sys/queue.h>
59#include <sys/event.h>
60#include <sys/eventvar.h>
61#include <sys/poll.h>
62#include <sys/protosw.h>
63#include <sys/resourcevar.h>
64#include <sys/sigio.h>
65#include <sys/signalvar.h>
66#include <sys/socket.h>
67#include <sys/socketvar.h>
68#include <sys/stat.h>
69#include <sys/sysctl.h>
70#include <sys/sysproto.h>
71#include <sys/syscallsubr.h>
72#include <sys/taskqueue.h>
73#include <sys/uio.h>
74#include <sys/user.h>
75#ifdef KTRACE
76#include <sys/ktrace.h>
77#endif
78#include <machine/atomic.h>
79
80#include <vm/uma.h>
81
82static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
83
84/*
85 * This lock is used if multiple kq locks are required. This possibly
86 * should be made into a per proc lock.
87 */
88static struct mtx kq_global;
89MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
90#define KQ_GLOBAL_LOCK(lck, haslck) do { \
91 if (!haslck) \
92 mtx_lock(lck); \
93 haslck = 1; \
94} while (0)
95#define KQ_GLOBAL_UNLOCK(lck, haslck) do { \
96 if (haslck) \
97 mtx_unlock(lck); \
98 haslck = 0; \
99} while (0)
100
102
103static int kevent_copyout(void *arg, struct kevent *kevp, int count);
104static int kevent_copyin(void *arg, struct kevent *kevp, int count);
105static int kqueue_register(struct kqueue *kq, struct kevent *kev,
106 struct thread *td, int mflag);
107static int kqueue_acquire(struct file *fp, struct kqueue **kqp);
108static void kqueue_release(struct kqueue *kq, int locked);
109static void kqueue_destroy(struct kqueue *kq);
110static void kqueue_drain(struct kqueue *kq, struct thread *td);
111static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
112 uintptr_t ident, int mflag);
113static void kqueue_task(void *arg, int pending);
114static int kqueue_scan(struct kqueue *kq, int maxevents,
115 struct kevent_copyops *k_ops,
116 const struct timespec *timeout,
117 struct kevent *keva, struct thread *td);
118static void kqueue_wakeup(struct kqueue *kq);
119static struct filterops *kqueue_fo_find(int filt);
120static void kqueue_fo_release(int filt);
121struct g_kevent_args;
122static int kern_kevent_generic(struct thread *td,
123 struct g_kevent_args *uap,
124 struct kevent_copyops *k_ops, const char *struct_name);
125
126static fo_ioctl_t kqueue_ioctl;
127static fo_poll_t kqueue_poll;
128static fo_kqfilter_t kqueue_kqfilter;
129static fo_stat_t kqueue_stat;
130static fo_close_t kqueue_close;
131static fo_fill_kinfo_t kqueue_fill_kinfo;
132
133static struct fileops kqueueops = {
134 .fo_read = invfo_rdwr,
135 .fo_write = invfo_rdwr,
136 .fo_truncate = invfo_truncate,
137 .fo_ioctl = kqueue_ioctl,
138 .fo_poll = kqueue_poll,
139 .fo_kqfilter = kqueue_kqfilter,
140 .fo_stat = kqueue_stat,
141 .fo_close = kqueue_close,
142 .fo_chmod = invfo_chmod,
143 .fo_chown = invfo_chown,
144 .fo_sendfile = invfo_sendfile,
145 .fo_fill_kinfo = kqueue_fill_kinfo,
146};
147
148static int knote_attach(struct knote *kn, struct kqueue *kq);
149static void knote_drop(struct knote *kn, struct thread *td);
150static void knote_drop_detached(struct knote *kn, struct thread *td);
151static void knote_enqueue(struct knote *kn);
152static void knote_dequeue(struct knote *kn);
153static void knote_init(void);
154static struct knote *knote_alloc(int mflag);
155static void knote_free(struct knote *kn);
156
157static void filt_kqdetach(struct knote *kn);
158static int filt_kqueue(struct knote *kn, long hint);
159static int filt_procattach(struct knote *kn);
160static void filt_procdetach(struct knote *kn);
161static int filt_proc(struct knote *kn, long hint);
162static int filt_fileattach(struct knote *kn);
163static void filt_timerexpire(void *knx);
164static void filt_timerexpire_l(struct knote *kn, bool proc_locked);
165static int filt_timerattach(struct knote *kn);
166static void filt_timerdetach(struct knote *kn);
167static void filt_timerstart(struct knote *kn, sbintime_t to);
168static void filt_timertouch(struct knote *kn, struct kevent *kev,
169 u_long type);
170static int filt_timervalidate(struct knote *kn, sbintime_t *to);
171static int filt_timer(struct knote *kn, long hint);
172static int filt_userattach(struct knote *kn);
173static void filt_userdetach(struct knote *kn);
174static int filt_user(struct knote *kn, long hint);
175static void filt_usertouch(struct knote *kn, struct kevent *kev,
176 u_long type);
177
178static struct filterops file_filtops = {
179 .f_isfd = 1,
180 .f_attach = filt_fileattach,
181};
182static struct filterops kqread_filtops = {
183 .f_isfd = 1,
184 .f_detach = filt_kqdetach,
185 .f_event = filt_kqueue,
186};
187/* XXX - move to kern_proc.c? */
188static struct filterops proc_filtops = {
189 .f_isfd = 0,
190 .f_attach = filt_procattach,
191 .f_detach = filt_procdetach,
192 .f_event = filt_proc,
193};
194static struct filterops timer_filtops = {
195 .f_isfd = 0,
196 .f_attach = filt_timerattach,
197 .f_detach = filt_timerdetach,
198 .f_event = filt_timer,
199 .f_touch = filt_timertouch,
200};
201static struct filterops user_filtops = {
202 .f_attach = filt_userattach,
203 .f_detach = filt_userdetach,
204 .f_event = filt_user,
205 .f_touch = filt_usertouch,
206};
207
208static uma_zone_t knote_zone;
209static unsigned int __exclusive_cache_line kq_ncallouts;
210static unsigned int kq_calloutmax = 4 * 1024;
211SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
212 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
213
214/* XXX - ensure not influx ? */
215#define KNOTE_ACTIVATE(kn, islock) do { \
216 if ((islock)) \
217 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \
218 else \
219 KQ_LOCK((kn)->kn_kq); \
220 (kn)->kn_status |= KN_ACTIVE; \
221 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
222 knote_enqueue((kn)); \
223 if (!(islock)) \
224 KQ_UNLOCK((kn)->kn_kq); \
225} while (0)
226#define KQ_LOCK(kq) do { \
227 mtx_lock(&(kq)->kq_lock); \
228} while (0)
229#define KQ_FLUX_WAKEUP(kq) do { \
230 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
231 (kq)->kq_state &= ~KQ_FLUXWAIT; \
232 wakeup((kq)); \
233 } \
234} while (0)
235#define KQ_UNLOCK_FLUX(kq) do { \
236 KQ_FLUX_WAKEUP(kq); \
237 mtx_unlock(&(kq)->kq_lock); \
238} while (0)
239#define KQ_UNLOCK(kq) do { \
240 mtx_unlock(&(kq)->kq_lock); \
241} while (0)
242#define KQ_OWNED(kq) do { \
243 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
244} while (0)
245#define KQ_NOTOWNED(kq) do { \
246 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
247} while (0)
248
249static struct knlist *
251{
252 struct knlist *knl;
253
254 knl = kn->kn_knlist;
255 if (knl != NULL)
256 knl->kl_lock(knl->kl_lockarg);
257 return (knl);
258}
259
260static void
261kn_list_unlock(struct knlist *knl)
262{
263 bool do_free;
264
265 if (knl == NULL)
266 return;
267 do_free = knl->kl_autodestroy && knlist_empty(knl);
268 knl->kl_unlock(knl->kl_lockarg);
269 if (do_free) {
270 knlist_destroy(knl);
271 free(knl, M_KQUEUE);
272 }
273}
274
275static bool
276kn_in_flux(struct knote *kn)
277{
278
279 return (kn->kn_influx > 0);
280}
281
282static void
284{
285
286 KQ_OWNED(kn->kn_kq);
287 MPASS(kn->kn_influx < INT_MAX);
288 kn->kn_influx++;
289}
290
291static bool
293{
294
295 KQ_OWNED(kn->kn_kq);
296 MPASS(kn->kn_influx > 0);
297 kn->kn_influx--;
298 return (kn->kn_influx == 0);
299}
300
301#define KNL_ASSERT_LOCK(knl, islocked) do { \
302 if (islocked) \
303 KNL_ASSERT_LOCKED(knl); \
304 else \
305 KNL_ASSERT_UNLOCKED(knl); \
306} while (0)
307#ifdef INVARIANTS
308#define KNL_ASSERT_LOCKED(knl) do { \
309 knl->kl_assert_lock((knl)->kl_lockarg, LA_LOCKED); \
310} while (0)
311#define KNL_ASSERT_UNLOCKED(knl) do { \
312 knl->kl_assert_lock((knl)->kl_lockarg, LA_UNLOCKED); \
313} while (0)
314#else /* !INVARIANTS */
315#define KNL_ASSERT_LOCKED(knl) do {} while (0)
316#define KNL_ASSERT_UNLOCKED(knl) do {} while (0)
317#endif /* INVARIANTS */
318
319#ifndef KN_HASHSIZE
320#define KN_HASHSIZE 64 /* XXX should be tunable */
321#endif
322
323#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
324
325static int
327{
328
329 return (ENXIO);
330};
331
332struct filterops null_filtops = {
333 .f_isfd = 0,
334 .f_attach = filt_nullattach,
335};
336
337/* XXX - make SYSINIT to add these, and move into respective modules. */
338extern struct filterops sig_filtops;
339extern struct filterops fs_filtops;
340
341/*
342 * Table for for all system-defined filters.
343 */
344static struct mtx filterops_lock;
345MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops",
346 MTX_DEF);
347static struct {
348 struct filterops *for_fop;
351} sysfilt_ops[EVFILT_SYSCOUNT] = {
352 { &file_filtops, 1 }, /* EVFILT_READ */
353 { &file_filtops, 1 }, /* EVFILT_WRITE */
354 { &null_filtops }, /* EVFILT_AIO */
355 { &file_filtops, 1 }, /* EVFILT_VNODE */
356 { &proc_filtops, 1 }, /* EVFILT_PROC */
357 { &sig_filtops, 1 }, /* EVFILT_SIGNAL */
358 { &timer_filtops, 1 }, /* EVFILT_TIMER */
359 { &file_filtops, 1 }, /* EVFILT_PROCDESC */
360 { &fs_filtops, 1 }, /* EVFILT_FS */
361 { &null_filtops }, /* EVFILT_LIO */
362 { &user_filtops, 1 }, /* EVFILT_USER */
363 { &null_filtops }, /* EVFILT_SENDFILE */
364 { &file_filtops, 1 }, /* EVFILT_EMPTY */
366
367/*
368 * Simple redirection for all cdevsw style objects to call their fo_kqfilter
369 * method.
370 */
371static int
373{
374
375 return (fo_kqfilter(kn->kn_fp, kn));
376}
377
378/*ARGSUSED*/
379static int
380kqueue_kqfilter(struct file *fp, struct knote *kn)
381{
382 struct kqueue *kq = kn->kn_fp->f_data;
383
384 if (kn->kn_filter != EVFILT_READ)
385 return (EINVAL);
386
387 kn->kn_status |= KN_KQUEUE;
388 kn->kn_fop = &kqread_filtops;
389 knlist_add(&kq->kq_sel.si_note, kn, 0);
390
391 return (0);
392}
393
394static void
396{
397 struct kqueue *kq = kn->kn_fp->f_data;
398
399 knlist_remove(&kq->kq_sel.si_note, kn, 0);
400}
401
402/*ARGSUSED*/
403static int
404filt_kqueue(struct knote *kn, long hint)
405{
406 struct kqueue *kq = kn->kn_fp->f_data;
407
408 kn->kn_data = kq->kq_count;
409 return (kn->kn_data > 0);
410}
411
412/* XXX - move to kern_proc.c? */
413static int
415{
416 struct proc *p;
417 int error;
418 bool exiting, immediate;
419
420 exiting = immediate = false;
421 if (kn->kn_sfflags & NOTE_EXIT)
422 p = pfind_any(kn->kn_id);
423 else
424 p = pfind(kn->kn_id);
425 if (p == NULL)
426 return (ESRCH);
427 if (p->p_flag & P_WEXIT)
428 exiting = true;
429
430 if ((error = p_cansee(curthread, p))) {
431 PROC_UNLOCK(p);
432 return (error);
433 }
434
435 kn->kn_ptr.p_proc = p;
436 kn->kn_flags |= EV_CLEAR; /* automatically set */
437
438 /*
439 * Internal flag indicating registration done by kernel for the
440 * purposes of getting a NOTE_CHILD notification.
441 */
442 if (kn->kn_flags & EV_FLAG2) {
443 kn->kn_flags &= ~EV_FLAG2;
444 kn->kn_data = kn->kn_sdata; /* ppid */
445 kn->kn_fflags = NOTE_CHILD;
446 kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK);
447 immediate = true; /* Force immediate activation of child note. */
448 }
449 /*
450 * Internal flag indicating registration done by kernel (for other than
451 * NOTE_CHILD).
452 */
453 if (kn->kn_flags & EV_FLAG1) {
454 kn->kn_flags &= ~EV_FLAG1;
455 }
456
457 knlist_add(p->p_klist, kn, 1);
458
459 /*
460 * Immediately activate any child notes or, in the case of a zombie
461 * target process, exit notes. The latter is necessary to handle the
462 * case where the target process, e.g. a child, dies before the kevent
463 * is registered.
464 */
465 if (immediate || (exiting && filt_proc(kn, NOTE_EXIT)))
466 KNOTE_ACTIVATE(kn, 0);
467
468 PROC_UNLOCK(p);
469
470 return (0);
471}
472
473/*
474 * The knote may be attached to a different process, which may exit,
475 * leaving nothing for the knote to be attached to. So when the process
476 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
477 * it will be deleted when read out. However, as part of the knote deletion,
478 * this routine is called, so a check is needed to avoid actually performing
479 * a detach, because the original process does not exist any more.
480 */
481/* XXX - move to kern_proc.c? */
482static void
484{
485
486 knlist_remove(kn->kn_knlist, kn, 0);
487 kn->kn_ptr.p_proc = NULL;
488}
489
490/* XXX - move to kern_proc.c? */
491static int
492filt_proc(struct knote *kn, long hint)
493{
494 struct proc *p;
495 u_int event;
496
497 p = kn->kn_ptr.p_proc;
498 if (p == NULL) /* already activated, from attach filter */
499 return (0);
500
501 /* Mask off extra data. */
502 event = (u_int)hint & NOTE_PCTRLMASK;
503
504 /* If the user is interested in this event, record it. */
505 if (kn->kn_sfflags & event)
506 kn->kn_fflags |= event;
507
508 /* Process is gone, so flag the event as finished. */
509 if (event == NOTE_EXIT) {
510 kn->kn_flags |= EV_EOF | EV_ONESHOT;
511 kn->kn_ptr.p_proc = NULL;
512 if (kn->kn_fflags & NOTE_EXIT)
513 kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig);
514 if (kn->kn_fflags == 0)
515 kn->kn_flags |= EV_DROP;
516 return (1);
517 }
518
519 return (kn->kn_fflags != 0);
520}
521
522/*
523 * Called when the process forked. It mostly does the same as the
524 * knote(), activating all knotes registered to be activated when the
525 * process forked. Additionally, for each knote attached to the
526 * parent, check whether user wants to track the new process. If so
527 * attach a new knote to it, and immediately report an event with the
528 * child's pid.
529 */
530void
531knote_fork(struct knlist *list, int pid)
532{
533 struct kqueue *kq;
534 struct knote *kn;
535 struct kevent kev;
536 int error;
537
538 MPASS(list != NULL);
539 KNL_ASSERT_LOCKED(list);
540 if (SLIST_EMPTY(&list->kl_list))
541 return;
542
543 memset(&kev, 0, sizeof(kev));
544 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) {
545 kq = kn->kn_kq;
546 KQ_LOCK(kq);
547 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
548 KQ_UNLOCK(kq);
549 continue;
550 }
551
552 /*
553 * The same as knote(), activate the event.
554 */
555 if ((kn->kn_sfflags & NOTE_TRACK) == 0) {
556 if (kn->kn_fop->f_event(kn, NOTE_FORK))
557 KNOTE_ACTIVATE(kn, 1);
558 KQ_UNLOCK(kq);
559 continue;
560 }
561
562 /*
563 * The NOTE_TRACK case. In addition to the activation
564 * of the event, we need to register new events to
565 * track the child. Drop the locks in preparation for
566 * the call to kqueue_register().
567 */
568 kn_enter_flux(kn);
569 KQ_UNLOCK(kq);
570 list->kl_unlock(list->kl_lockarg);
571
572 /*
573 * Activate existing knote and register tracking knotes with
574 * new process.
575 *
576 * First register a knote to get just the child notice. This
577 * must be a separate note from a potential NOTE_EXIT
578 * notification since both NOTE_CHILD and NOTE_EXIT are defined
579 * to use the data field (in conflicting ways).
580 */
581 kev.ident = pid;
582 kev.filter = kn->kn_filter;
583 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT |
584 EV_FLAG2;
585 kev.fflags = kn->kn_sfflags;
586 kev.data = kn->kn_id; /* parent */
587 kev.udata = kn->kn_kevent.udata;/* preserve udata */
588 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
589 if (error)
590 kn->kn_fflags |= NOTE_TRACKERR;
591
592 /*
593 * Then register another knote to track other potential events
594 * from the new process.
595 */
596 kev.ident = pid;
597 kev.filter = kn->kn_filter;
598 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
599 kev.fflags = kn->kn_sfflags;
600 kev.data = kn->kn_id; /* parent */
601 kev.udata = kn->kn_kevent.udata;/* preserve udata */
602 error = kqueue_register(kq, &kev, NULL, M_NOWAIT);
603 if (error)
604 kn->kn_fflags |= NOTE_TRACKERR;
605 if (kn->kn_fop->f_event(kn, NOTE_FORK))
606 KNOTE_ACTIVATE(kn, 0);
607 list->kl_lock(list->kl_lockarg);
608 KQ_LOCK(kq);
609 kn_leave_flux(kn);
610 KQ_UNLOCK_FLUX(kq);
611 }
612}
613
614/*
615 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the
616 * interval timer support code.
617 */
618
619#define NOTE_TIMER_PRECMASK \
620 (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS)
621
622static sbintime_t
624{
625 int64_t secs;
626
627 /*
628 * Macros for converting to the fractional second portion of an
629 * sbintime_t using 64bit multiplication to improve precision.
630 */
631#define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32)
632#define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32)
633#define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32)
634 switch (flags & NOTE_TIMER_PRECMASK) {
635 case NOTE_SECONDS:
636#ifdef __LP64__
637 if (data > (SBT_MAX / SBT_1S))
638 return (SBT_MAX);
639#endif
640 return ((sbintime_t)data << 32);
641 case NOTE_MSECONDS: /* FALLTHROUGH */
642 case 0:
643 if (data >= 1000) {
644 secs = data / 1000;
645#ifdef __LP64__
646 if (secs > (SBT_MAX / SBT_1S))
647 return (SBT_MAX);
648#endif
649 return (secs << 32 | MS_TO_SBT(data % 1000));
650 }
651 return (MS_TO_SBT(data));
652 case NOTE_USECONDS:
653 if (data >= 1000000) {
654 secs = data / 1000000;
655#ifdef __LP64__
656 if (secs > (SBT_MAX / SBT_1S))
657 return (SBT_MAX);
658#endif
659 return (secs << 32 | US_TO_SBT(data % 1000000));
660 }
661 return (US_TO_SBT(data));
662 case NOTE_NSECONDS:
663 if (data >= 1000000000) {
664 secs = data / 1000000000;
665#ifdef __LP64__
666 if (secs > (SBT_MAX / SBT_1S))
667 return (SBT_MAX);
668#endif
669 return (secs << 32 | NS_TO_SBT(data % 1000000000));
670 }
671 return (NS_TO_SBT(data));
672 default:
673 break;
674 }
675 return (-1);
676}
677
679 struct callout c;
680 struct proc *p;
681 struct knote *kn;
682 int cpuid;
683 int flags;
684 TAILQ_ENTRY(kq_timer_cb_data) link;
685 sbintime_t next; /* next timer event fires at */
686 sbintime_t to; /* precalculated timer period, 0 for abs */
687};
688
689#define KQ_TIMER_CB_ENQUEUED 0x01
690
691static void
693{
694 callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kc->kn,
695 kc->cpuid, C_ABSOLUTE);
696}
697
698void
700{
701 struct kq_timer_cb_data *kc, *kc1;
702 struct bintime bt;
703 sbintime_t now;
704
705 PROC_LOCK_ASSERT(p, MA_OWNED);
706
708 now = bttosbt(bt);
709
710 TAILQ_FOREACH_SAFE(kc, &p->p_kqtim_stop, link, kc1) {
711 TAILQ_REMOVE(&p->p_kqtim_stop, kc, link);
712 kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
713 if (kc->next <= now)
714 filt_timerexpire_l(kc->kn, true);
715 else
717 }
718}
719
720static void
721filt_timerexpire_l(struct knote *kn, bool proc_locked)
722{
723 struct kq_timer_cb_data *kc;
724 struct proc *p;
725 uint64_t delta;
726 sbintime_t now;
727
728 kc = kn->kn_ptr.p_v;
729
730 if ((kn->kn_flags & EV_ONESHOT) != 0 || kc->to == 0) {
731 kn->kn_data++;
732 KNOTE_ACTIVATE(kn, 0);
733 return;
734 }
735
736 now = sbinuptime();
737 if (now >= kc->next) {
738 delta = (now - kc->next) / kc->to;
739 if (delta == 0)
740 delta = 1;
741 kn->kn_data += delta;
742 kc->next += (delta + 1) * kc->to;
743 if (now >= kc->next) /* overflow */
744 kc->next = now + kc->to;
745 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */
746 }
747
748 /*
749 * Initial check for stopped kc->p is racy. It is fine to
750 * miss the set of the stop flags, at worst we would schedule
751 * one more callout. On the other hand, it is not fine to not
752 * schedule when we we missed clearing of the flags, we
753 * recheck them under the lock and observe consistent state.
754 */
755 p = kc->p;
756 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
757 if (!proc_locked)
758 PROC_LOCK(p);
759 if (P_SHOULDSTOP(p) || P_KILLED(p)) {
760 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) == 0) {
762 TAILQ_INSERT_TAIL(&p->p_kqtim_stop, kc, link);
763 }
764 if (!proc_locked)
765 PROC_UNLOCK(p);
766 return;
767 }
768 if (!proc_locked)
769 PROC_UNLOCK(p);
770 }
772}
773
774static void
776{
777 filt_timerexpire_l(knx, false);
778}
779
780/*
781 * data contains amount of time to sleep
782 */
783static int
784filt_timervalidate(struct knote *kn, sbintime_t *to)
785{
786 struct bintime bt;
787 sbintime_t sbt;
788
789 if (kn->kn_sdata < 0)
790 return (EINVAL);
791 if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0)
792 kn->kn_sdata = 1;
793 /*
794 * The only fflags values supported are the timer unit
795 * (precision) and the absolute time indicator.
796 */
797 if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0)
798 return (EINVAL);
799
800 *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags);
801 if (*to < 0)
802 return (EINVAL);
803 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
805 sbt = bttosbt(bt);
806 *to = MAX(0, *to - sbt);
807 }
808 return (0);
809}
810
811static int
813{
814 struct kq_timer_cb_data *kc;
815 sbintime_t to;
816 int error;
817
818 to = -1;
819 error = filt_timervalidate(kn, &to);
820 if (error != 0)
821 return (error);
822 KASSERT(to > 0 || (kn->kn_flags & EV_ONESHOT) != 0 ||
823 (kn->kn_sfflags & NOTE_ABSTIME) != 0,
824 ("%s: periodic timer has a calculated zero timeout", __func__));
825 KASSERT(to >= 0,
826 ("%s: timer has a calculated negative timeout", __func__));
827
828 if (atomic_fetchadd_int(&kq_ncallouts, 1) + 1 > kq_calloutmax) {
829 atomic_subtract_int(&kq_ncallouts, 1);
830 return (ENOMEM);
831 }
832
833 if ((kn->kn_sfflags & NOTE_ABSTIME) == 0)
834 kn->kn_flags |= EV_CLEAR; /* automatically set */
835 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */
836 kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK);
837 kc->kn = kn;
838 kc->p = curproc;
839 kc->cpuid = PCPU_GET(cpuid);
840 kc->flags = 0;
841 callout_init(&kc->c, 1);
842 filt_timerstart(kn, to);
843
844 return (0);
845}
846
847static void
848filt_timerstart(struct knote *kn, sbintime_t to)
849{
850 struct kq_timer_cb_data *kc;
851
852 kc = kn->kn_ptr.p_v;
853 if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) {
854 kc->next = to;
855 kc->to = 0;
856 } else {
857 kc->next = to + sbinuptime();
858 kc->to = to;
859 }
861}
862
863static void
865{
866 struct kq_timer_cb_data *kc;
867 unsigned int old __unused;
868 bool pending;
869
870 kc = kn->kn_ptr.p_v;
871 do {
872 callout_drain(&kc->c);
873
874 /*
875 * kqtimer_proc_continue() might have rescheduled this callout.
876 * Double-check, using the process mutex as an interlock.
877 */
878 PROC_LOCK(kc->p);
879 if ((kc->flags & KQ_TIMER_CB_ENQUEUED) != 0) {
880 kc->flags &= ~KQ_TIMER_CB_ENQUEUED;
881 TAILQ_REMOVE(&kc->p->p_kqtim_stop, kc, link);
882 }
883 pending = callout_pending(&kc->c);
884 PROC_UNLOCK(kc->p);
885 } while (pending);
886 free(kc, M_KQUEUE);
887 old = atomic_fetchadd_int(&kq_ncallouts, -1);
888 KASSERT(old > 0, ("Number of callouts cannot become negative"));
889 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */
890}
891
892static void
893filt_timertouch(struct knote *kn, struct kevent *kev, u_long type)
894{
895 struct kq_timer_cb_data *kc;
896 struct kqueue *kq;
897 sbintime_t to;
898 int error;
899
900 switch (type) {
901 case EVENT_REGISTER:
902 /* Handle re-added timers that update data/fflags */
903 if (kev->flags & EV_ADD) {
904 kc = kn->kn_ptr.p_v;
905
906 /* Drain any existing callout. */
907 callout_drain(&kc->c);
908
909 /* Throw away any existing undelivered record
910 * of the timer expiration. This is done under
911 * the presumption that if a process is
912 * re-adding this timer with new parameters,
913 * it is no longer interested in what may have
914 * happened under the old parameters. If it is
915 * interested, it can wait for the expiration,
916 * delete the old timer definition, and then
917 * add the new one.
918 *
919 * This has to be done while the kq is locked:
920 * - if enqueued, dequeue
921 * - make it no longer active
922 * - clear the count of expiration events
923 */
924 kq = kn->kn_kq;
925 KQ_LOCK(kq);
926 if (kn->kn_status & KN_QUEUED)
927 knote_dequeue(kn);
928
929 kn->kn_status &= ~KN_ACTIVE;
930 kn->kn_data = 0;
931 KQ_UNLOCK(kq);
932
933 /* Reschedule timer based on new data/fflags */
934 kn->kn_sfflags = kev->fflags;
935 kn->kn_sdata = kev->data;
936 error = filt_timervalidate(kn, &to);
937 if (error != 0) {
938 kn->kn_flags |= EV_ERROR;
939 kn->kn_data = error;
940 } else
941 filt_timerstart(kn, to);
942 }
943 break;
944
945 case EVENT_PROCESS:
946 *kev = kn->kn_kevent;
947 if (kn->kn_flags & EV_CLEAR) {
948 kn->kn_data = 0;
949 kn->kn_fflags = 0;
950 }
951 break;
952
953 default:
954 panic("filt_timertouch() - invalid type (%ld)", type);
955 break;
956 }
957}
958
959static int
960filt_timer(struct knote *kn, long hint)
961{
962
963 return (kn->kn_data != 0);
964}
965
966static int
968{
969
970 /*
971 * EVFILT_USER knotes are not attached to anything in the kernel.
972 */
973 kn->kn_hook = NULL;
974 if (kn->kn_fflags & NOTE_TRIGGER)
975 kn->kn_hookid = 1;
976 else
977 kn->kn_hookid = 0;
978 return (0);
979}
980
981static void
982filt_userdetach(__unused struct knote *kn)
983{
984
985 /*
986 * EVFILT_USER knotes are not attached to anything in the kernel.
987 */
988}
989
990static int
991filt_user(struct knote *kn, __unused long hint)
992{
993
994 return (kn->kn_hookid);
995}
996
997static void
998filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
999{
1000 u_int ffctrl;
1001
1002 switch (type) {
1003 case EVENT_REGISTER:
1004 if (kev->fflags & NOTE_TRIGGER)
1005 kn->kn_hookid = 1;
1006
1007 ffctrl = kev->fflags & NOTE_FFCTRLMASK;
1008 kev->fflags &= NOTE_FFLAGSMASK;
1009 switch (ffctrl) {
1010 case NOTE_FFNOP:
1011 break;
1012
1013 case NOTE_FFAND:
1014 kn->kn_sfflags &= kev->fflags;
1015 break;
1016
1017 case NOTE_FFOR:
1018 kn->kn_sfflags |= kev->fflags;
1019 break;
1020
1021 case NOTE_FFCOPY:
1022 kn->kn_sfflags = kev->fflags;
1023 break;
1024
1025 default:
1026 /* XXX Return error? */
1027 break;
1028 }
1029 kn->kn_sdata = kev->data;
1030 if (kev->flags & EV_CLEAR) {
1031 kn->kn_hookid = 0;
1032 kn->kn_data = 0;
1033 kn->kn_fflags = 0;
1034 }
1035 break;
1036
1037 case EVENT_PROCESS:
1038 *kev = kn->kn_kevent;
1039 kev->fflags = kn->kn_sfflags;
1040 kev->data = kn->kn_sdata;
1041 if (kn->kn_flags & EV_CLEAR) {
1042 kn->kn_hookid = 0;
1043 kn->kn_data = 0;
1044 kn->kn_fflags = 0;
1045 }
1046 break;
1047
1048 default:
1049 panic("filt_usertouch() - invalid type (%ld)", type);
1050 break;
1051 }
1052}
1053
1054int
1055sys_kqueue(struct thread *td, struct kqueue_args *uap)
1056{
1057
1058 return (kern_kqueue(td, 0, NULL));
1059}
1060
1061static void
1062kqueue_init(struct kqueue *kq)
1063{
1064
1065 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
1066 TAILQ_INIT(&kq->kq_head);
1067 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
1068 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
1069}
1070
1071int
1072kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
1073{
1074 struct filedesc *fdp;
1075 struct kqueue *kq;
1076 struct file *fp;
1077 struct ucred *cred;
1078 int fd, error;
1079
1080 fdp = td->td_proc->p_fd;
1081 cred = td->td_ucred;
1082 if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES)))
1083 return (ENOMEM);
1084
1085 error = falloc_caps(td, &fp, &fd, flags, fcaps);
1086 if (error != 0) {
1087 chgkqcnt(cred->cr_ruidinfo, -1, 0);
1088 return (error);
1089 }
1090
1091 /* An extra reference on `fp' has been held for us by falloc(). */
1092 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
1093 kqueue_init(kq);
1094 kq->kq_fdp = fdp;
1095 kq->kq_cred = crhold(cred);
1096
1097 FILEDESC_XLOCK(fdp);
1098 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
1099 FILEDESC_XUNLOCK(fdp);
1100
1101 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
1102 fdrop(fp, td);
1103
1104 td->td_retval[0] = fd;
1105 return (0);
1106}
1107
1109 int fd;
1110 const void *changelist;
1114 const struct timespec *timeout;
1115};
1116
1117int
1118sys_kevent(struct thread *td, struct kevent_args *uap)
1119{
1120 struct kevent_copyops k_ops = {
1121 .arg = uap,
1122 .k_copyout = kevent_copyout,
1123 .k_copyin = kevent_copyin,
1124 .kevent_size = sizeof(struct kevent),
1125 };
1126 struct g_kevent_args gk_args = {
1127 .fd = uap->fd,
1128 .changelist = uap->changelist,
1129 .nchanges = uap->nchanges,
1130 .eventlist = uap->eventlist,
1131 .nevents = uap->nevents,
1132 .timeout = uap->timeout,
1133 };
1134
1135 return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent"));
1136}
1137
1138static int
1139kern_kevent_generic(struct thread *td, struct g_kevent_args *uap,
1140 struct kevent_copyops *k_ops, const char *struct_name)
1141{
1142 struct timespec ts, *tsp;
1143#ifdef KTRACE
1144 struct kevent *eventlist = uap->eventlist;
1145#endif
1146 int error;
1147
1148 if (uap->timeout != NULL) {
1149 error = copyin(uap->timeout, &ts, sizeof(ts));
1150 if (error)
1151 return (error);
1152 tsp = &ts;
1153 } else
1154 tsp = NULL;
1155
1156#ifdef KTRACE
1157 if (KTRPOINT(td, KTR_STRUCT_ARRAY))
1158 ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist,
1159 uap->nchanges, k_ops->kevent_size);
1160#endif
1161
1162 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
1163 k_ops, tsp);
1164
1165#ifdef KTRACE
1166 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
1167 ktrstructarray(struct_name, UIO_USERSPACE, eventlist,
1168 td->td_retval[0], k_ops->kevent_size);
1169#endif
1170
1171 return (error);
1172}
1173
1174/*
1175 * Copy 'count' items into the destination list pointed to by uap->eventlist.
1176 */
1177static int
1178kevent_copyout(void *arg, struct kevent *kevp, int count)
1179{
1180 struct kevent_args *uap;
1181 int error;
1182
1183 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1184 uap = (struct kevent_args *)arg;
1185
1186 error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
1187 if (error == 0)
1188 uap->eventlist += count;
1189 return (error);
1190}
1191
1192/*
1193 * Copy 'count' items from the list pointed to by uap->changelist.
1194 */
1195static int
1196kevent_copyin(void *arg, struct kevent *kevp, int count)
1197{
1198 struct kevent_args *uap;
1199 int error;
1200
1201 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1202 uap = (struct kevent_args *)arg;
1203
1204 error = copyin(uap->changelist, kevp, count * sizeof *kevp);
1205 if (error == 0)
1206 uap->changelist += count;
1207 return (error);
1208}
1209
1210#ifdef COMPAT_FREEBSD11
1211static int
1212kevent11_copyout(void *arg, struct kevent *kevp, int count)
1213{
1214 struct freebsd11_kevent_args *uap;
1215 struct freebsd11_kevent kev11;
1216 int error, i;
1217
1218 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1219 uap = (struct freebsd11_kevent_args *)arg;
1220
1221 for (i = 0; i < count; i++) {
1222 kev11.ident = kevp->ident;
1223 kev11.filter = kevp->filter;
1224 kev11.flags = kevp->flags;
1225 kev11.fflags = kevp->fflags;
1226 kev11.data = kevp->data;
1227 kev11.udata = kevp->udata;
1228 error = copyout(&kev11, uap->eventlist, sizeof(kev11));
1229 if (error != 0)
1230 break;
1231 uap->eventlist++;
1232 kevp++;
1233 }
1234 return (error);
1235}
1236
1237/*
1238 * Copy 'count' items from the list pointed to by uap->changelist.
1239 */
1240static int
1241kevent11_copyin(void *arg, struct kevent *kevp, int count)
1242{
1243 struct freebsd11_kevent_args *uap;
1244 struct freebsd11_kevent kev11;
1245 int error, i;
1246
1247 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
1248 uap = (struct freebsd11_kevent_args *)arg;
1249
1250 for (i = 0; i < count; i++) {
1251 error = copyin(uap->changelist, &kev11, sizeof(kev11));
1252 if (error != 0)
1253 break;
1254 kevp->ident = kev11.ident;
1255 kevp->filter = kev11.filter;
1256 kevp->flags = kev11.flags;
1257 kevp->fflags = kev11.fflags;
1258 kevp->data = (uintptr_t)kev11.data;
1259 kevp->udata = kev11.udata;
1260 bzero(&kevp->ext, sizeof(kevp->ext));
1261 uap->changelist++;
1262 kevp++;
1263 }
1264 return (error);
1265}
1266
1267int
1268freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap)
1269{
1270 struct kevent_copyops k_ops = {
1271 .arg = uap,
1272 .k_copyout = kevent11_copyout,
1273 .k_copyin = kevent11_copyin,
1274 .kevent_size = sizeof(struct freebsd11_kevent),
1275 };
1276 struct g_kevent_args gk_args = {
1277 .fd = uap->fd,
1278 .changelist = uap->changelist,
1279 .nchanges = uap->nchanges,
1280 .eventlist = uap->eventlist,
1281 .nevents = uap->nevents,
1282 .timeout = uap->timeout,
1283 };
1284
1285 return (kern_kevent_generic(td, &gk_args, &k_ops, "freebsd11_kevent"));
1286}
1287#endif
1288
1289int
1290kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
1291 struct kevent_copyops *k_ops, const struct timespec *timeout)
1292{
1293 cap_rights_t rights;
1294 struct file *fp;
1295 int error;
1296
1297 cap_rights_init_zero(&rights);
1298 if (nchanges > 0)
1299 cap_rights_set_one(&rights, CAP_KQUEUE_CHANGE);
1300 if (nevents > 0)
1301 cap_rights_set_one(&rights, CAP_KQUEUE_EVENT);
1302 error = fget(td, fd, &rights, &fp);
1303 if (error != 0)
1304 return (error);
1305
1306 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout);
1307 fdrop(fp, td);
1308
1309 return (error);
1310}
1311
1312static int
1313kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1314 struct kevent_copyops *k_ops, const struct timespec *timeout)
1315{
1316 struct kevent keva[KQ_NEVENTS];
1317 struct kevent *kevp, *changes;
1318 int i, n, nerrors, error;
1319
1320 if (nchanges < 0)
1321 return (EINVAL);
1322
1323 nerrors = 0;
1324 while (nchanges > 0) {
1325 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
1326 error = k_ops->k_copyin(k_ops->arg, keva, n);
1327 if (error)
1328 return (error);
1329 changes = keva;
1330 for (i = 0; i < n; i++) {
1331 kevp = &changes[i];
1332 if (!kevp->filter)
1333 continue;
1334 kevp->flags &= ~EV_SYSFLAGS;
1335 error = kqueue_register(kq, kevp, td, M_WAITOK);
1336 if (error || (kevp->flags & EV_RECEIPT)) {
1337 if (nevents == 0)
1338 return (error);
1339 kevp->flags = EV_ERROR;
1340 kevp->data = error;
1341 (void)k_ops->k_copyout(k_ops->arg, kevp, 1);
1342 nevents--;
1343 nerrors++;
1344 }
1345 }
1346 nchanges -= n;
1347 }
1348 if (nerrors) {
1349 td->td_retval[0] = nerrors;
1350 return (0);
1351 }
1352
1353 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1354}
1355
1356int
1357kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents,
1358 struct kevent_copyops *k_ops, const struct timespec *timeout)
1359{
1360 struct kqueue *kq;
1361 int error;
1362
1363 error = kqueue_acquire(fp, &kq);
1364 if (error != 0)
1365 return (error);
1366 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1367 kqueue_release(kq, 0);
1368 return (error);
1369}
1370
1371/*
1372 * Performs a kevent() call on a temporarily created kqueue. This can be
1373 * used to perform one-shot polling, similar to poll() and select().
1374 */
1375int
1376kern_kevent_anonymous(struct thread *td, int nevents,
1377 struct kevent_copyops *k_ops)
1378{
1379 struct kqueue kq = {};
1380 int error;
1381
1382 kqueue_init(&kq);
1383 kq.kq_refcnt = 1;
1384 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1385 kqueue_drain(&kq, td);
1386 kqueue_destroy(&kq);
1387 return (error);
1388}
1389
1390int
1391kqueue_add_filteropts(int filt, struct filterops *filtops)
1392{
1393 int error;
1394
1395 error = 0;
1396 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) {
1397 printf(
1398"trying to add a filterop that is out of range: %d is beyond %d\n",
1399 ~filt, EVFILT_SYSCOUNT);
1400 return EINVAL;
1401 }
1402 mtx_lock(&filterops_lock);
1403 if (sysfilt_ops[~filt].for_fop != &null_filtops &&
1404 sysfilt_ops[~filt].for_fop != NULL)
1405 error = EEXIST;
1406 else {
1407 sysfilt_ops[~filt].for_fop = filtops;
1408 sysfilt_ops[~filt].for_refcnt = 0;
1409 }
1410 mtx_unlock(&filterops_lock);
1411
1412 return (error);
1413}
1414
1415int
1417{
1418 int error;
1419
1420 error = 0;
1421 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1422 return EINVAL;
1423
1424 mtx_lock(&filterops_lock);
1425 if (sysfilt_ops[~filt].for_fop == &null_filtops ||
1426 sysfilt_ops[~filt].for_fop == NULL)
1427 error = EINVAL;
1428 else if (sysfilt_ops[~filt].for_refcnt != 0)
1429 error = EBUSY;
1430 else {
1431 sysfilt_ops[~filt].for_fop = &null_filtops;
1432 sysfilt_ops[~filt].for_refcnt = 0;
1433 }
1434 mtx_unlock(&filterops_lock);
1435
1436 return error;
1437}
1438
1439static struct filterops *
1441{
1442
1443 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1444 return NULL;
1445
1446 if (sysfilt_ops[~filt].for_nolock)
1447 return sysfilt_ops[~filt].for_fop;
1448
1449 mtx_lock(&filterops_lock);
1450 sysfilt_ops[~filt].for_refcnt++;
1451 if (sysfilt_ops[~filt].for_fop == NULL)
1452 sysfilt_ops[~filt].for_fop = &null_filtops;
1453 mtx_unlock(&filterops_lock);
1454
1455 return sysfilt_ops[~filt].for_fop;
1456}
1457
1458static void
1460{
1461
1462 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0)
1463 return;
1464
1465 if (sysfilt_ops[~filt].for_nolock)
1466 return;
1467
1468 mtx_lock(&filterops_lock);
1469 KASSERT(sysfilt_ops[~filt].for_refcnt > 0,
1470 ("filter object refcount not valid on release"));
1471 sysfilt_ops[~filt].for_refcnt--;
1472 mtx_unlock(&filterops_lock);
1473}
1474
1475/*
1476 * A ref to kq (obtained via kqueue_acquire) must be held.
1477 */
1478static int
1479kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td,
1480 int mflag)
1481{
1482 struct filterops *fops;
1483 struct file *fp;
1484 struct knote *kn, *tkn;
1485 struct knlist *knl;
1486 int error, filt, event;
1487 int haskqglobal, filedesc_unlock;
1488
1489 if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE))
1490 return (EINVAL);
1491
1492 fp = NULL;
1493 kn = NULL;
1494 knl = NULL;
1495 error = 0;
1496 haskqglobal = 0;
1497 filedesc_unlock = 0;
1498
1499 filt = kev->filter;
1500 fops = kqueue_fo_find(filt);
1501 if (fops == NULL)
1502 return EINVAL;
1503
1504 if (kev->flags & EV_ADD) {
1505 /* Reject an invalid flag pair early */
1506 if (kev->flags & EV_KEEPUDATA) {
1507 tkn = NULL;
1508 error = EINVAL;
1509 goto done;
1510 }
1511
1512 /*
1513 * Prevent waiting with locks. Non-sleepable
1514 * allocation failures are handled in the loop, only
1515 * if the spare knote appears to be actually required.
1516 */
1517 tkn = knote_alloc(mflag);
1518 } else {
1519 tkn = NULL;
1520 }
1521
1522findkn:
1523 if (fops->f_isfd) {
1524 KASSERT(td != NULL, ("td is NULL"));
1525 if (kev->ident > INT_MAX)
1526 error = EBADF;
1527 else
1528 error = fget(td, kev->ident, &cap_event_rights, &fp);
1529 if (error)
1530 goto done;
1531
1532 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1533 kev->ident, M_NOWAIT) != 0) {
1534 /* try again */
1535 fdrop(fp, td);
1536 fp = NULL;
1537 error = kqueue_expand(kq, fops, kev->ident, mflag);
1538 if (error)
1539 goto done;
1540 goto findkn;
1541 }
1542
1543 if (fp->f_type == DTYPE_KQUEUE) {
1544 /*
1545 * If we add some intelligence about what we are doing,
1546 * we should be able to support events on ourselves.
1547 * We need to know when we are doing this to prevent
1548 * getting both the knlist lock and the kq lock since
1549 * they are the same thing.
1550 */
1551 if (fp->f_data == kq) {
1552 error = EINVAL;
1553 goto done;
1554 }
1555
1556 /*
1557 * Pre-lock the filedesc before the global
1558 * lock mutex, see the comment in
1559 * kqueue_close().
1560 */
1561 FILEDESC_XLOCK(td->td_proc->p_fd);
1562 filedesc_unlock = 1;
1563 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1564 }
1565
1566 KQ_LOCK(kq);
1567 if (kev->ident < kq->kq_knlistsize) {
1568 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1569 if (kev->filter == kn->kn_filter)
1570 break;
1571 }
1572 } else {
1573 if ((kev->flags & EV_ADD) == EV_ADD) {
1574 error = kqueue_expand(kq, fops, kev->ident, mflag);
1575 if (error != 0)
1576 goto done;
1577 }
1578
1579 KQ_LOCK(kq);
1580
1581 /*
1582 * If possible, find an existing knote to use for this kevent.
1583 */
1584 if (kev->filter == EVFILT_PROC &&
1585 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) {
1586 /* This is an internal creation of a process tracking
1587 * note. Don't attempt to coalesce this with an
1588 * existing note.
1589 */
1590 ;
1591 } else if (kq->kq_knhashmask != 0) {
1592 struct klist *list;
1593
1594 list = &kq->kq_knhash[
1595 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1596 SLIST_FOREACH(kn, list, kn_link)
1597 if (kev->ident == kn->kn_id &&
1598 kev->filter == kn->kn_filter)
1599 break;
1600 }
1601 }
1602
1603 /* knote is in the process of changing, wait for it to stabilize. */
1604 if (kn != NULL && kn_in_flux(kn)) {
1605 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1606 if (filedesc_unlock) {
1607 FILEDESC_XUNLOCK(td->td_proc->p_fd);
1608 filedesc_unlock = 0;
1609 }
1610 kq->kq_state |= KQ_FLUXWAIT;
1611 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1612 if (fp != NULL) {
1613 fdrop(fp, td);
1614 fp = NULL;
1615 }
1616 goto findkn;
1617 }
1618
1619 /*
1620 * kn now contains the matching knote, or NULL if no match
1621 */
1622 if (kn == NULL) {
1623 if (kev->flags & EV_ADD) {
1624 kn = tkn;
1625 tkn = NULL;
1626 if (kn == NULL) {
1627 KQ_UNLOCK(kq);
1628 error = ENOMEM;
1629 goto done;
1630 }
1631 kn->kn_fp = fp;
1632 kn->kn_kq = kq;
1633 kn->kn_fop = fops;
1634 /*
1635 * apply reference counts to knote structure, and
1636 * do not release it at the end of this routine.
1637 */
1638 fops = NULL;
1639 fp = NULL;
1640
1641 kn->kn_sfflags = kev->fflags;
1642 kn->kn_sdata = kev->data;
1643 kev->fflags = 0;
1644 kev->data = 0;
1645 kn->kn_kevent = *kev;
1646 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE |
1647 EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT);
1648 kn->kn_status = KN_DETACHED;
1649 if ((kev->flags & EV_DISABLE) != 0)
1650 kn->kn_status |= KN_DISABLED;
1651 kn_enter_flux(kn);
1652
1653 error = knote_attach(kn, kq);
1654 KQ_UNLOCK(kq);
1655 if (error != 0) {
1656 tkn = kn;
1657 goto done;
1658 }
1659
1660 if ((error = kn->kn_fop->f_attach(kn)) != 0) {
1661 knote_drop_detached(kn, td);
1662 goto done;
1663 }
1664 knl = kn_list_lock(kn);
1665 goto done_ev_add;
1666 } else {
1667 /* No matching knote and the EV_ADD flag is not set. */
1668 KQ_UNLOCK(kq);
1669 error = ENOENT;
1670 goto done;
1671 }
1672 }
1673
1674 if (kev->flags & EV_DELETE) {
1675 kn_enter_flux(kn);
1676 KQ_UNLOCK(kq);
1677 knote_drop(kn, td);
1678 goto done;
1679 }
1680
1681 if (kev->flags & EV_FORCEONESHOT) {
1682 kn->kn_flags |= EV_ONESHOT;
1683 KNOTE_ACTIVATE(kn, 1);
1684 }
1685
1686 if ((kev->flags & EV_ENABLE) != 0)
1687 kn->kn_status &= ~KN_DISABLED;
1688 else if ((kev->flags & EV_DISABLE) != 0)
1689 kn->kn_status |= KN_DISABLED;
1690
1691 /*
1692 * The user may change some filter values after the initial EV_ADD,
1693 * but doing so will not reset any filter which has already been
1694 * triggered.
1695 */
1696 kn->kn_status |= KN_SCAN;
1697 kn_enter_flux(kn);
1698 KQ_UNLOCK(kq);
1699 knl = kn_list_lock(kn);
1700 if ((kev->flags & EV_KEEPUDATA) == 0)
1701 kn->kn_kevent.udata = kev->udata;
1702 if (!fops->f_isfd && fops->f_touch != NULL) {
1703 fops->f_touch(kn, kev, EVENT_REGISTER);
1704 } else {
1705 kn->kn_sfflags = kev->fflags;
1706 kn->kn_sdata = kev->data;
1707 }
1708
1709done_ev_add:
1710 /*
1711 * We can get here with kn->kn_knlist == NULL. This can happen when
1712 * the initial attach event decides that the event is "completed"
1713 * already, e.g., filt_procattach() is called on a zombie process. It
1714 * will call filt_proc() which will remove it from the list, and NULL
1715 * kn_knlist.
1716 *
1717 * KN_DISABLED will be stable while the knote is in flux, so the
1718 * unlocked read will not race with an update.
1719 */
1720 if ((kn->kn_status & KN_DISABLED) == 0)
1721 event = kn->kn_fop->f_event(kn, 0);
1722 else
1723 event = 0;
1724
1725 KQ_LOCK(kq);
1726 if (event)
1727 kn->kn_status |= KN_ACTIVE;
1728 if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) ==
1729 KN_ACTIVE)
1730 knote_enqueue(kn);
1731 kn->kn_status &= ~KN_SCAN;
1732 kn_leave_flux(kn);
1733 kn_list_unlock(knl);
1734 KQ_UNLOCK_FLUX(kq);
1735
1736done:
1737 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1738 if (filedesc_unlock)
1739 FILEDESC_XUNLOCK(td->td_proc->p_fd);
1740 if (fp != NULL)
1741 fdrop(fp, td);
1742 knote_free(tkn);
1743 if (fops != NULL)
1744 kqueue_fo_release(filt);
1745 return (error);
1746}
1747
1748static int
1749kqueue_acquire(struct file *fp, struct kqueue **kqp)
1750{
1751 int error;
1752 struct kqueue *kq;
1753
1754 error = 0;
1755
1756 kq = fp->f_data;
1757 if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1758 return (EBADF);
1759 *kqp = kq;
1760 KQ_LOCK(kq);
1761 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1762 KQ_UNLOCK(kq);
1763 return (EBADF);
1764 }
1765 kq->kq_refcnt++;
1766 KQ_UNLOCK(kq);
1767
1768 return error;
1769}
1770
1771static void
1772kqueue_release(struct kqueue *kq, int locked)
1773{
1774 if (locked)
1775 KQ_OWNED(kq);
1776 else
1777 KQ_LOCK(kq);
1778 kq->kq_refcnt--;
1779 if (kq->kq_refcnt == 1)
1780 wakeup(&kq->kq_refcnt);
1781 if (!locked)
1782 KQ_UNLOCK(kq);
1783}
1784
1785void
1787{
1788 taskqueue_quiesce(taskqueue_kqueue_ctx);
1789}
1790
1791static void
1792kqueue_schedtask(struct kqueue *kq)
1793{
1794 struct thread *td;
1795
1796 KQ_OWNED(kq);
1797 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1798 ("scheduling kqueue task while draining"));
1799
1800 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1801 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1802 kq->kq_state |= KQ_TASKSCHED;
1803 td = curthread;
1804 thread_lock(td);
1805 td->td_flags |= TDF_ASTPENDING | TDF_KQTICKLED;
1806 thread_unlock(td);
1807 }
1808}
1809
1810/*
1811 * Expand the kq to make sure we have storage for fops/ident pair.
1812 *
1813 * Return 0 on success (or no work necessary), return errno on failure.
1814 */
1815static int
1816kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1817 int mflag)
1818{
1819 struct klist *list, *tmp_knhash, *to_free;
1820 u_long tmp_knhashmask;
1821 int error, fd, size;
1822
1823 KQ_NOTOWNED(kq);
1824
1825 error = 0;
1826 to_free = NULL;
1827 if (fops->f_isfd) {
1828 fd = ident;
1829 if (kq->kq_knlistsize <= fd) {
1830 size = kq->kq_knlistsize;
1831 while (size <= fd)
1832 size += KQEXTENT;
1833 list = malloc(size * sizeof(*list), M_KQUEUE, mflag);
1834 if (list == NULL)
1835 return ENOMEM;
1836 KQ_LOCK(kq);
1837 if ((kq->kq_state & KQ_CLOSING) != 0) {
1838 to_free = list;
1839 error = EBADF;
1840 } else if (kq->kq_knlistsize > fd) {
1841 to_free = list;
1842 } else {
1843 if (kq->kq_knlist != NULL) {
1844 bcopy(kq->kq_knlist, list,
1845 kq->kq_knlistsize * sizeof(*list));
1846 to_free = kq->kq_knlist;
1847 kq->kq_knlist = NULL;
1848 }
1849 bzero((caddr_t)list +
1850 kq->kq_knlistsize * sizeof(*list),
1851 (size - kq->kq_knlistsize) * sizeof(*list));
1852 kq->kq_knlistsize = size;
1853 kq->kq_knlist = list;
1854 }
1855 KQ_UNLOCK(kq);
1856 }
1857 } else {
1858 if (kq->kq_knhashmask == 0) {
1859 tmp_knhash = hashinit_flags(KN_HASHSIZE, M_KQUEUE,
1860 &tmp_knhashmask, (mflag & M_WAITOK) != 0 ?
1861 HASH_WAITOK : HASH_NOWAIT);
1862 if (tmp_knhash == NULL)
1863 return (ENOMEM);
1864 KQ_LOCK(kq);
1865 if ((kq->kq_state & KQ_CLOSING) != 0) {
1866 to_free = tmp_knhash;
1867 error = EBADF;
1868 } else if (kq->kq_knhashmask == 0) {
1869 kq->kq_knhash = tmp_knhash;
1870 kq->kq_knhashmask = tmp_knhashmask;
1871 } else {
1872 to_free = tmp_knhash;
1873 }
1874 KQ_UNLOCK(kq);
1875 }
1876 }
1877 free(to_free, M_KQUEUE);
1878
1879 KQ_NOTOWNED(kq);
1880 return (error);
1881}
1882
1883static void
1884kqueue_task(void *arg, int pending)
1885{
1886 struct kqueue *kq;
1887 int haskqglobal;
1888
1889 haskqglobal = 0;
1890 kq = arg;
1891
1892 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
1893 KQ_LOCK(kq);
1894
1895 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1896
1897 kq->kq_state &= ~KQ_TASKSCHED;
1898 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1899 wakeup(&kq->kq_state);
1900 }
1901 KQ_UNLOCK(kq);
1902 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
1903}
1904
1905/*
1906 * Scan, update kn_data (if not ONESHOT), and copyout triggered events.
1907 * We treat KN_MARKER knotes as if they are in flux.
1908 */
1909static int
1910kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1911 const struct timespec *tsp, struct kevent *keva, struct thread *td)
1912{
1913 struct kevent *kevp;
1914 struct knote *kn, *marker;
1915 struct knlist *knl;
1916 sbintime_t asbt, rsbt;
1917 int count, error, haskqglobal, influx, nkev, touch;
1918
1919 count = maxevents;
1920 nkev = 0;
1921 error = 0;
1922 haskqglobal = 0;
1923
1924 if (maxevents == 0)
1925 goto done_nl;
1926 if (maxevents < 0) {
1927 error = EINVAL;
1928 goto done_nl;
1929 }
1930
1931 rsbt = 0;
1932 if (tsp != NULL) {
1933 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 ||
1934 tsp->tv_nsec >= 1000000000) {
1935 error = EINVAL;
1936 goto done_nl;
1937 }
1938 if (timespecisset(tsp)) {
1939 if (tsp->tv_sec <= INT32_MAX) {
1940 rsbt = tstosbt(*tsp);
1941 if (TIMESEL(&asbt, rsbt))
1942 asbt += tc_tick_sbt;
1943 if (asbt <= SBT_MAX - rsbt)
1944 asbt += rsbt;
1945 else
1946 asbt = 0;
1947 rsbt >>= tc_precexp;
1948 } else
1949 asbt = 0;
1950 } else
1951 asbt = -1;
1952 } else
1953 asbt = 0;
1954 marker = knote_alloc(M_WAITOK);
1955 marker->kn_status = KN_MARKER;
1956 KQ_LOCK(kq);
1957
1958retry:
1959 kevp = keva;
1960 if (kq->kq_count == 0) {
1961 if (asbt == -1) {
1962 error = EWOULDBLOCK;
1963 } else {
1964 kq->kq_state |= KQ_SLEEP;
1965 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1966 "kqread", asbt, rsbt, C_ABSOLUTE);
1967 }
1968 if (error == 0)
1969 goto retry;
1970 /* don't restart after signals... */
1971 if (error == ERESTART)
1972 error = EINTR;
1973 else if (error == EWOULDBLOCK)
1974 error = 0;
1975 goto done;
1976 }
1977
1978 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1979 influx = 0;
1980 while (count) {
1981 KQ_OWNED(kq);
1982 kn = TAILQ_FIRST(&kq->kq_head);
1983
1984 if ((kn->kn_status == KN_MARKER && kn != marker) ||
1985 kn_in_flux(kn)) {
1986 if (influx) {
1987 influx = 0;
1988 KQ_FLUX_WAKEUP(kq);
1989 }
1990 kq->kq_state |= KQ_FLUXWAIT;
1991 error = msleep(kq, &kq->kq_lock, PSOCK,
1992 "kqflxwt", 0);
1993 continue;
1994 }
1995
1996 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1997 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) {
1998 kn->kn_status &= ~KN_QUEUED;
1999 kq->kq_count--;
2000 continue;
2001 }
2002 if (kn == marker) {
2003 KQ_FLUX_WAKEUP(kq);
2004 if (count == maxevents)
2005 goto retry;
2006 goto done;
2007 }
2008 KASSERT(!kn_in_flux(kn),
2009 ("knote %p is unexpectedly in flux", kn));
2010
2011 if ((kn->kn_flags & EV_DROP) == EV_DROP) {
2012 kn->kn_status &= ~KN_QUEUED;
2013 kn_enter_flux(kn);
2014 kq->kq_count--;
2015 KQ_UNLOCK(kq);
2016 /*
2017 * We don't need to lock the list since we've
2018 * marked it as in flux.
2019 */
2020 knote_drop(kn, td);
2021 KQ_LOCK(kq);
2022 continue;
2023 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) {
2024 kn->kn_status &= ~KN_QUEUED;
2025 kn_enter_flux(kn);
2026 kq->kq_count--;
2027 KQ_UNLOCK(kq);
2028 /*
2029 * We don't need to lock the list since we've
2030 * marked the knote as being in flux.
2031 */
2032 *kevp = kn->kn_kevent;
2033 knote_drop(kn, td);
2034 KQ_LOCK(kq);
2035 kn = NULL;
2036 } else {
2037 kn->kn_status |= KN_SCAN;
2038 kn_enter_flux(kn);
2039 KQ_UNLOCK(kq);
2040 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE)
2041 KQ_GLOBAL_LOCK(&kq_global, haskqglobal);
2042 knl = kn_list_lock(kn);
2043 if (kn->kn_fop->f_event(kn, 0) == 0) {
2044 KQ_LOCK(kq);
2045 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
2046 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE |
2047 KN_SCAN);
2048 kn_leave_flux(kn);
2049 kq->kq_count--;
2050 kn_list_unlock(knl);
2051 influx = 1;
2052 continue;
2053 }
2054 touch = (!kn->kn_fop->f_isfd &&
2055 kn->kn_fop->f_touch != NULL);
2056 if (touch)
2057 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS);
2058 else
2059 *kevp = kn->kn_kevent;
2060 KQ_LOCK(kq);
2061 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
2062 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) {
2063 /*
2064 * Manually clear knotes who weren't
2065 * 'touch'ed.
2066 */
2067 if (touch == 0 && kn->kn_flags & EV_CLEAR) {
2068 kn->kn_data = 0;
2069 kn->kn_fflags = 0;
2070 }
2071 if (kn->kn_flags & EV_DISPATCH)
2072 kn->kn_status |= KN_DISABLED;
2073 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
2074 kq->kq_count--;
2075 } else
2076 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2077
2078 kn->kn_status &= ~KN_SCAN;
2079 kn_leave_flux(kn);
2080 kn_list_unlock(knl);
2081 influx = 1;
2082 }
2083
2084 /* we are returning a copy to the user */
2085 kevp++;
2086 nkev++;
2087 count--;
2088
2089 if (nkev == KQ_NEVENTS) {
2090 influx = 0;
2091 KQ_UNLOCK_FLUX(kq);
2092 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2093 nkev = 0;
2094 kevp = keva;
2095 KQ_LOCK(kq);
2096 if (error)
2097 break;
2098 }
2099 }
2100 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
2101done:
2102 KQ_OWNED(kq);
2103 KQ_UNLOCK_FLUX(kq);
2104 knote_free(marker);
2105done_nl:
2106 KQ_NOTOWNED(kq);
2107 if (nkev != 0)
2108 error = k_ops->k_copyout(k_ops->arg, keva, nkev);
2109 td->td_retval[0] = maxevents - count;
2110 return (error);
2111}
2112
2113/*ARGSUSED*/
2114static int
2115kqueue_ioctl(struct file *fp, u_long cmd, void *data,
2116 struct ucred *active_cred, struct thread *td)
2117{
2118 /*
2119 * Enabling sigio causes two major problems:
2120 * 1) infinite recursion:
2121 * Synopsys: kevent is being used to track signals and have FIOASYNC
2122 * set. On receipt of a signal this will cause a kqueue to recurse
2123 * into itself over and over. Sending the sigio causes the kqueue
2124 * to become ready, which in turn posts sigio again, forever.
2125 * Solution: this can be solved by setting a flag in the kqueue that
2126 * we have a SIGIO in progress.
2127 * 2) locking problems:
2128 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts
2129 * us above the proc and pgrp locks.
2130 * Solution: Post a signal using an async mechanism, being sure to
2131 * record a generation count in the delivery so that we do not deliver
2132 * a signal to the wrong process.
2133 *
2134 * Note, these two mechanisms are somewhat mutually exclusive!
2135 */
2136#if 0
2137 struct kqueue *kq;
2138
2139 kq = fp->f_data;
2140 switch (cmd) {
2141 case FIOASYNC:
2142 if (*(int *)data) {
2143 kq->kq_state |= KQ_ASYNC;
2144 } else {
2145 kq->kq_state &= ~KQ_ASYNC;
2146 }
2147 return (0);
2148
2149 case FIOSETOWN:
2150 return (fsetown(*(int *)data, &kq->kq_sigio));
2151
2152 case FIOGETOWN:
2153 *(int *)data = fgetown(&kq->kq_sigio);
2154 return (0);
2155 }
2156#endif
2157
2158 return (ENOTTY);
2159}
2160
2161/*ARGSUSED*/
2162static int
2163kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
2164 struct thread *td)
2165{
2166 struct kqueue *kq;
2167 int revents = 0;
2168 int error;
2169
2170 if ((error = kqueue_acquire(fp, &kq)))
2171 return POLLERR;
2172
2173 KQ_LOCK(kq);
2174 if (events & (POLLIN | POLLRDNORM)) {
2175 if (kq->kq_count) {
2176 revents |= events & (POLLIN | POLLRDNORM);
2177 } else {
2178 selrecord(td, &kq->kq_sel);
2179 if (SEL_WAITING(&kq->kq_sel))
2180 kq->kq_state |= KQ_SEL;
2181 }
2182 }
2183 kqueue_release(kq, 1);
2184 KQ_UNLOCK(kq);
2185 return (revents);
2186}
2187
2188/*ARGSUSED*/
2189static int
2190kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred)
2191{
2192
2193 bzero((void *)st, sizeof *st);
2194 /*
2195 * We no longer return kq_count because the unlocked value is useless.
2196 * If you spent all this time getting the count, why not spend your
2197 * syscall better by calling kevent?
2198 *
2199 * XXX - This is needed for libc_r.
2200 */
2201 st->st_mode = S_IFIFO;
2202 return (0);
2203}
2204
2205static void
2206kqueue_drain(struct kqueue *kq, struct thread *td)
2207{
2208 struct knote *kn;
2209 int i;
2210
2211 KQ_LOCK(kq);
2212
2213 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
2214 ("kqueue already closing"));
2215 kq->kq_state |= KQ_CLOSING;
2216 if (kq->kq_refcnt > 1)
2217 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
2218
2219 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
2220
2221 KASSERT(knlist_empty(&kq->kq_sel.si_note),
2222 ("kqueue's knlist not empty"));
2223
2224 for (i = 0; i < kq->kq_knlistsize; i++) {
2225 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
2226 if (kn_in_flux(kn)) {
2227 kq->kq_state |= KQ_FLUXWAIT;
2228 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
2229 continue;
2230 }
2231 kn_enter_flux(kn);
2232 KQ_UNLOCK(kq);
2233 knote_drop(kn, td);
2234 KQ_LOCK(kq);
2235 }
2236 }
2237 if (kq->kq_knhashmask != 0) {
2238 for (i = 0; i <= kq->kq_knhashmask; i++) {
2239 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
2240 if (kn_in_flux(kn)) {
2241 kq->kq_state |= KQ_FLUXWAIT;
2242 msleep(kq, &kq->kq_lock, PSOCK,
2243 "kqclo2", 0);
2244 continue;
2245 }
2246 kn_enter_flux(kn);
2247 KQ_UNLOCK(kq);
2248 knote_drop(kn, td);
2249 KQ_LOCK(kq);
2250 }
2251 }
2252 }
2253
2254 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
2255 kq->kq_state |= KQ_TASKDRAIN;
2256 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
2257 }
2258
2259 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2260 selwakeuppri(&kq->kq_sel, PSOCK);
2261 if (!SEL_WAITING(&kq->kq_sel))
2262 kq->kq_state &= ~KQ_SEL;
2263 }
2264
2265 KQ_UNLOCK(kq);
2266}
2267
2268static void
2269kqueue_destroy(struct kqueue *kq)
2270{
2271
2272 KASSERT(kq->kq_fdp == NULL,
2273 ("kqueue still attached to a file descriptor"));
2274 seldrain(&kq->kq_sel);
2275 knlist_destroy(&kq->kq_sel.si_note);
2276 mtx_destroy(&kq->kq_lock);
2277
2278 if (kq->kq_knhash != NULL)
2279 free(kq->kq_knhash, M_KQUEUE);
2280 if (kq->kq_knlist != NULL)
2281 free(kq->kq_knlist, M_KQUEUE);
2282
2283 funsetown(&kq->kq_sigio);
2284}
2285
2286/*ARGSUSED*/
2287static int
2288kqueue_close(struct file *fp, struct thread *td)
2289{
2290 struct kqueue *kq = fp->f_data;
2291 struct filedesc *fdp;
2292 int error;
2293 int filedesc_unlock;
2294
2295 if ((error = kqueue_acquire(fp, &kq)))
2296 return error;
2297 kqueue_drain(kq, td);
2298
2299 /*
2300 * We could be called due to the knote_drop() doing fdrop(),
2301 * called from kqueue_register(). In this case the global
2302 * lock is owned, and filedesc sx is locked before, to not
2303 * take the sleepable lock after non-sleepable.
2304 */
2305 fdp = kq->kq_fdp;
2306 kq->kq_fdp = NULL;
2307 if (!sx_xlocked(FILEDESC_LOCK(fdp))) {
2308 FILEDESC_XLOCK(fdp);
2309 filedesc_unlock = 1;
2310 } else
2311 filedesc_unlock = 0;
2312 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
2313 if (filedesc_unlock)
2314 FILEDESC_XUNLOCK(fdp);
2315
2316 kqueue_destroy(kq);
2317 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
2318 crfree(kq->kq_cred);
2319 free(kq, M_KQUEUE);
2320 fp->f_data = NULL;
2321
2322 return (0);
2323}
2324
2325static int
2326kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2327{
2328
2329 kif->kf_type = KF_TYPE_KQUEUE;
2330 return (0);
2331}
2332
2333static void
2334kqueue_wakeup(struct kqueue *kq)
2335{
2336 KQ_OWNED(kq);
2337
2338 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
2339 kq->kq_state &= ~KQ_SLEEP;
2340 wakeup(kq);
2341 }
2342 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
2343 selwakeuppri(&kq->kq_sel, PSOCK);
2344 if (!SEL_WAITING(&kq->kq_sel))
2345 kq->kq_state &= ~KQ_SEL;
2346 }
2347 if (!knlist_empty(&kq->kq_sel.si_note))
2348 kqueue_schedtask(kq);
2349 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
2350 pgsigio(&kq->kq_sigio, SIGIO, 0);
2351 }
2352}
2353
2354/*
2355 * Walk down a list of knotes, activating them if their event has triggered.
2356 *
2357 * There is a possibility to optimize in the case of one kq watching another.
2358 * Instead of scheduling a task to wake it up, you could pass enough state
2359 * down the chain to make up the parent kqueue. Make this code functional
2360 * first.
2361 */
2362void
2363knote(struct knlist *list, long hint, int lockflags)
2364{
2365 struct kqueue *kq;
2366 struct knote *kn, *tkn;
2367 int error;
2368
2369 if (list == NULL)
2370 return;
2371
2372 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED);
2373
2374 if ((lockflags & KNF_LISTLOCKED) == 0)
2375 list->kl_lock(list->kl_lockarg);
2376
2377 /*
2378 * If we unlock the list lock (and enter influx), we can
2379 * eliminate the kqueue scheduling, but this will introduce
2380 * four lock/unlock's for each knote to test. Also, marker
2381 * would be needed to keep iteration position, since filters
2382 * or other threads could remove events.
2383 */
2384 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) {
2385 kq = kn->kn_kq;
2386 KQ_LOCK(kq);
2387 if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) {
2388 /*
2389 * Do not process the influx notes, except for
2390 * the influx coming from the kq unlock in the
2391 * kqueue_scan(). In the later case, we do
2392 * not interfere with the scan, since the code
2393 * fragment in kqueue_scan() locks the knlist,
2394 * and cannot proceed until we finished.
2395 */
2396 KQ_UNLOCK(kq);
2397 } else if ((lockflags & KNF_NOKQLOCK) != 0) {
2398 kn_enter_flux(kn);
2399 KQ_UNLOCK(kq);
2400 error = kn->kn_fop->f_event(kn, hint);
2401 KQ_LOCK(kq);
2402 kn_leave_flux(kn);
2403 if (error)
2404 KNOTE_ACTIVATE(kn, 1);
2405 KQ_UNLOCK_FLUX(kq);
2406 } else {
2407 if (kn->kn_fop->f_event(kn, hint))
2408 KNOTE_ACTIVATE(kn, 1);
2409 KQ_UNLOCK(kq);
2410 }
2411 }
2412 if ((lockflags & KNF_LISTLOCKED) == 0)
2413 list->kl_unlock(list->kl_lockarg);
2414}
2415
2416/*
2417 * add a knote to a knlist
2418 */
2419void
2420knlist_add(struct knlist *knl, struct knote *kn, int islocked)
2421{
2422
2423 KNL_ASSERT_LOCK(knl, islocked);
2424 KQ_NOTOWNED(kn->kn_kq);
2425 KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn));
2426 KASSERT((kn->kn_status & KN_DETACHED) != 0,
2427 ("knote %p was not detached", kn));
2428 if (!islocked)
2429 knl->kl_lock(knl->kl_lockarg);
2430 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext);
2431 if (!islocked)
2432 knl->kl_unlock(knl->kl_lockarg);
2433 KQ_LOCK(kn->kn_kq);
2434 kn->kn_knlist = knl;
2435 kn->kn_status &= ~KN_DETACHED;
2436 KQ_UNLOCK(kn->kn_kq);
2437}
2438
2439static void
2440knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked,
2441 int kqislocked)
2442{
2443
2444 KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked"));
2445 KNL_ASSERT_LOCK(knl, knlislocked);
2446 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED);
2447 KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn));
2448 KASSERT((kn->kn_status & KN_DETACHED) == 0,
2449 ("knote %p was already detached", kn));
2450 if (!knlislocked)
2451 knl->kl_lock(knl->kl_lockarg);
2452 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext);
2453 kn->kn_knlist = NULL;
2454 if (!knlislocked)
2455 kn_list_unlock(knl);
2456 if (!kqislocked)
2457 KQ_LOCK(kn->kn_kq);
2458 kn->kn_status |= KN_DETACHED;
2459 if (!kqislocked)
2460 KQ_UNLOCK(kn->kn_kq);
2461}
2462
2463/*
2464 * remove knote from the specified knlist
2465 */
2466void
2467knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
2468{
2469
2470 knlist_remove_kq(knl, kn, islocked, 0);
2471}
2472
2473int
2474knlist_empty(struct knlist *knl)
2475{
2476
2477 KNL_ASSERT_LOCKED(knl);
2478 return (SLIST_EMPTY(&knl->kl_list));
2479}
2480
2481static struct mtx knlist_lock;
2482MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects",
2483 MTX_DEF);
2484static void knlist_mtx_lock(void *arg);
2485static void knlist_mtx_unlock(void *arg);
2486
2487static void
2489{
2490
2491 mtx_lock((struct mtx *)arg);
2492}
2493
2494static void
2496{
2497
2498 mtx_unlock((struct mtx *)arg);
2499}
2500
2501static void
2502knlist_mtx_assert_lock(void *arg, int what)
2503{
2504
2505 if (what == LA_LOCKED)
2506 mtx_assert((struct mtx *)arg, MA_OWNED);
2507 else
2508 mtx_assert((struct mtx *)arg, MA_NOTOWNED);
2509}
2510
2511static void
2513{
2514
2515 rw_rlock((struct rwlock *)arg);
2516}
2517
2518static void
2520{
2521
2522 rw_runlock((struct rwlock *)arg);
2523}
2524
2525static void
2526knlist_rw_assert_lock(void *arg, int what)
2527{
2528
2529 if (what == LA_LOCKED)
2530 rw_assert((struct rwlock *)arg, RA_LOCKED);
2531 else
2532 rw_assert((struct rwlock *)arg, RA_UNLOCKED);
2533}
2534
2535void
2536knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *),
2537 void (*kl_unlock)(void *),
2538 void (*kl_assert_lock)(void *, int))
2539{
2540
2541 if (lock == NULL)
2542 knl->kl_lockarg = &knlist_lock;
2543 else
2544 knl->kl_lockarg = lock;
2545
2546 if (kl_lock == NULL)
2547 knl->kl_lock = knlist_mtx_lock;
2548 else
2549 knl->kl_lock = kl_lock;
2550 if (kl_unlock == NULL)
2551 knl->kl_unlock = knlist_mtx_unlock;
2552 else
2553 knl->kl_unlock = kl_unlock;
2554 if (kl_assert_lock == NULL)
2555 knl->kl_assert_lock = knlist_mtx_assert_lock;
2556 else
2557 knl->kl_assert_lock = kl_assert_lock;
2558
2559 knl->kl_autodestroy = 0;
2560 SLIST_INIT(&knl->kl_list);
2561}
2562
2563void
2564knlist_init_mtx(struct knlist *knl, struct mtx *lock)
2565{
2566
2567 knlist_init(knl, lock, NULL, NULL, NULL);
2568}
2569
2570struct knlist *
2571knlist_alloc(struct mtx *lock)
2572{
2573 struct knlist *knl;
2574
2575 knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK);
2576 knlist_init_mtx(knl, lock);
2577 return (knl);
2578}
2579
2580void
2581knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
2582{
2583
2586}
2587
2588void
2589knlist_destroy(struct knlist *knl)
2590{
2591
2592 KASSERT(KNLIST_EMPTY(knl),
2593 ("destroying knlist %p with knotes on it", knl));
2594}
2595
2596void
2597knlist_detach(struct knlist *knl)
2598{
2599
2600 KNL_ASSERT_LOCKED(knl);
2601 knl->kl_autodestroy = 1;
2602 if (knlist_empty(knl)) {
2603 knlist_destroy(knl);
2604 free(knl, M_KQUEUE);
2605 }
2606}
2607
2608/*
2609 * Even if we are locked, we may need to drop the lock to allow any influx
2610 * knotes time to "settle".
2611 */
2612void
2613knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
2614{
2615 struct knote *kn, *kn2;
2616 struct kqueue *kq;
2617
2618 KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl));
2619 if (islocked)
2620 KNL_ASSERT_LOCKED(knl);
2621 else {
2623again: /* need to reacquire lock since we have dropped it */
2624 knl->kl_lock(knl->kl_lockarg);
2625 }
2626
2627 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) {
2628 kq = kn->kn_kq;
2629 KQ_LOCK(kq);
2630 if (kn_in_flux(kn)) {
2631 KQ_UNLOCK(kq);
2632 continue;
2633 }
2634 knlist_remove_kq(knl, kn, 1, 1);
2635 if (killkn) {
2636 kn_enter_flux(kn);
2637 KQ_UNLOCK(kq);
2638 knote_drop_detached(kn, td);
2639 } else {
2640 /* Make sure cleared knotes disappear soon */
2641 kn->kn_flags |= EV_EOF | EV_ONESHOT;
2642 KQ_UNLOCK(kq);
2643 }
2644 kq = NULL;
2645 }
2646
2647 if (!SLIST_EMPTY(&knl->kl_list)) {
2648 /* there are still in flux knotes remaining */
2649 kn = SLIST_FIRST(&knl->kl_list);
2650 kq = kn->kn_kq;
2651 KQ_LOCK(kq);
2652 KASSERT(kn_in_flux(kn), ("knote removed w/o list lock"));
2653 knl->kl_unlock(knl->kl_lockarg);
2654 kq->kq_state |= KQ_FLUXWAIT;
2655 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2656 kq = NULL;
2657 goto again;
2658 }
2659
2660 if (islocked)
2661 KNL_ASSERT_LOCKED(knl);
2662 else {
2663 knl->kl_unlock(knl->kl_lockarg);
2665 }
2666}
2667
2668/*
2669 * Remove all knotes referencing a specified fd must be called with FILEDESC
2670 * lock. This prevents a race where a new fd comes along and occupies the
2671 * entry and we attach a knote to the fd.
2672 */
2673void
2674knote_fdclose(struct thread *td, int fd)
2675{
2676 struct filedesc *fdp = td->td_proc->p_fd;
2677 struct kqueue *kq;
2678 struct knote *kn;
2679 int influx;
2680
2681 FILEDESC_XLOCK_ASSERT(fdp);
2682
2683 /*
2684 * We shouldn't have to worry about new kevents appearing on fd
2685 * since filedesc is locked.
2686 */
2687 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2688 KQ_LOCK(kq);
2689
2690again:
2691 influx = 0;
2692 while (kq->kq_knlistsize > fd &&
2693 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2694 if (kn_in_flux(kn)) {
2695 /* someone else might be waiting on our knote */
2696 if (influx)
2697 wakeup(kq);
2698 kq->kq_state |= KQ_FLUXWAIT;
2699 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2700 goto again;
2701 }
2702 kn_enter_flux(kn);
2703 KQ_UNLOCK(kq);
2704 influx = 1;
2705 knote_drop(kn, td);
2706 KQ_LOCK(kq);
2707 }
2708 KQ_UNLOCK_FLUX(kq);
2709 }
2710}
2711
2712static int
2713knote_attach(struct knote *kn, struct kqueue *kq)
2714{
2715 struct klist *list;
2716
2717 KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn));
2718 KQ_OWNED(kq);
2719
2720 if ((kq->kq_state & KQ_CLOSING) != 0)
2721 return (EBADF);
2722 if (kn->kn_fop->f_isfd) {
2723 if (kn->kn_id >= kq->kq_knlistsize)
2724 return (ENOMEM);
2725 list = &kq->kq_knlist[kn->kn_id];
2726 } else {
2727 if (kq->kq_knhash == NULL)
2728 return (ENOMEM);
2729 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2730 }
2731 SLIST_INSERT_HEAD(list, kn, kn_link);
2732 return (0);
2733}
2734
2735static void
2736knote_drop(struct knote *kn, struct thread *td)
2737{
2738
2739 if ((kn->kn_status & KN_DETACHED) == 0)
2740 kn->kn_fop->f_detach(kn);
2741 knote_drop_detached(kn, td);
2742}
2743
2744static void
2745knote_drop_detached(struct knote *kn, struct thread *td)
2746{
2747 struct kqueue *kq;
2748 struct klist *list;
2749
2750 kq = kn->kn_kq;
2751
2752 KASSERT((kn->kn_status & KN_DETACHED) != 0,
2753 ("knote %p still attached", kn));
2754 KQ_NOTOWNED(kq);
2755
2756 KQ_LOCK(kq);
2757 KASSERT(kn->kn_influx == 1,
2758 ("knote_drop called on %p with influx %d", kn, kn->kn_influx));
2759
2760 if (kn->kn_fop->f_isfd)
2761 list = &kq->kq_knlist[kn->kn_id];
2762 else
2763 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2764
2765 if (!SLIST_EMPTY(list))
2766 SLIST_REMOVE(list, kn, knote, kn_link);
2767 if (kn->kn_status & KN_QUEUED)
2768 knote_dequeue(kn);
2769 KQ_UNLOCK_FLUX(kq);
2770
2771 if (kn->kn_fop->f_isfd) {
2772 fdrop(kn->kn_fp, td);
2773 kn->kn_fp = NULL;
2774 }
2775 kqueue_fo_release(kn->kn_kevent.filter);
2776 kn->kn_fop = NULL;
2777 knote_free(kn);
2778}
2779
2780static void
2782{
2783 struct kqueue *kq = kn->kn_kq;
2784
2785 KQ_OWNED(kn->kn_kq);
2786 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
2787
2788 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2789 kn->kn_status |= KN_QUEUED;
2790 kq->kq_count++;
2791 kqueue_wakeup(kq);
2792}
2793
2794static void
2796{
2797 struct kqueue *kq = kn->kn_kq;
2798
2799 KQ_OWNED(kn->kn_kq);
2800 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
2801
2802 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2803 kn->kn_status &= ~KN_QUEUED;
2804 kq->kq_count--;
2805}
2806
2807static void
2809{
2810
2811 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
2812 NULL, NULL, UMA_ALIGN_PTR, 0);
2813}
2814SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
2815
2816static struct knote *
2817knote_alloc(int mflag)
2818{
2819
2820 return (uma_zalloc(knote_zone, mflag | M_ZERO));
2821}
2822
2823static void
2825{
2826
2827 uma_zfree(knote_zone, kn);
2828}
2829
2830/*
2831 * Register the kev w/ the kq specified by fd.
2832 */
2833int
2834kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
2835{
2836 struct kqueue *kq;
2837 struct file *fp;
2838 cap_rights_t rights;
2839 int error;
2840
2841 error = fget(td, fd, cap_rights_init_one(&rights, CAP_KQUEUE_CHANGE),
2842 &fp);
2843 if (error != 0)
2844 return (error);
2845 if ((error = kqueue_acquire(fp, &kq)) != 0)
2846 goto noacquire;
2847
2848 error = kqueue_register(kq, kev, td, mflag);
2849 kqueue_release(kq, 0);
2850
2851noacquire:
2852 fdrop(fp, td);
2853 return (error);
2854}
struct timespec * ts
Definition: clock_if.m:39
int * count
Definition: cpufreq_if.m:63
device_property_type_t type
Definition: bus_if.m:941
static struct bt_table bt
static struct bt_table st
int invfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td)
int fsetown(pid_t pgid, struct sigio **sigiop)
int invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, struct thread *td)
void funsetown(struct sigio **sigiop)
pid_t fgetown(struct sigio **sigiop)
int fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
void finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
int invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags, struct thread *td)
int invfo_rdwr(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td)
int falloc_caps(struct thread *td, struct file **resultfp, int *resultfd, int flags, struct filecaps *fcaps)
int invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
static struct @0 sysfilt_ops[EVFILT_SYSCOUNT]
static bool kn_in_flux(struct knote *kn)
Definition: kern_event.c:276
static int filt_proc(struct knote *kn, long hint)
Definition: kern_event.c:492
void knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
Definition: kern_event.c:2467
void knlist_detach(struct knlist *knl)
Definition: kern_event.c:2597
static int filt_userattach(struct knote *kn)
Definition: kern_event.c:967
static void kqueue_destroy(struct kqueue *kq)
Definition: kern_event.c:2269
SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue")
struct filterops sig_filtops
Definition: kern_sig.c:121
static void filt_timertouch(struct knote *kn, struct kevent *kev, u_long type)
Definition: kern_event.c:893
static void kqueue_drain(struct kqueue *kq, struct thread *td)
Definition: kern_event.c:2206
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
static struct filterops kqread_filtops
Definition: kern_event.c:182
static int kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout)
Definition: kern_event.c:1313
static void filt_timerdetach(struct knote *kn)
Definition: kern_event.c:864
static int filt_timervalidate(struct knote *kn, sbintime_t *to)
Definition: kern_event.c:784
static fo_close_t kqueue_close
Definition: kern_event.c:130
static int filt_user(struct knote *kn, long hint)
static void knlist_rw_assert_lock(void *arg, int what)
Definition: kern_event.c:2526
#define KN_HASHSIZE
Definition: kern_event.c:320
#define KNOTE_ACTIVATE(kn, islock)
Definition: kern_event.c:215
static int knote_attach(struct knote *kn, struct kqueue *kq)
Definition: kern_event.c:2713
#define KQ_GLOBAL_UNLOCK(lck, haslck)
Definition: kern_event.c:95
#define KQ_GLOBAL_LOCK(lck, haslck)
Definition: kern_event.c:90
int kqueue_add_filteropts(int filt, struct filterops *filtops)
Definition: kern_event.c:1391
int kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps)
Definition: kern_event.c:1072
static struct filterops file_filtops
Definition: kern_event.c:178
static void filt_timerexpire_l(struct knote *kn, bool proc_locked)
Definition: kern_event.c:721
static void kn_enter_flux(struct knote *kn)
Definition: kern_event.c:283
static struct mtx knlist_lock
Definition: kern_event.c:2481
static unsigned int __exclusive_cache_line kq_ncallouts
Definition: kern_event.c:209
static struct filterops * kqueue_fo_find(int filt)
Definition: kern_event.c:1440
static int kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, struct kevent_copyops *k_ops, const char *struct_name)
Definition: kern_event.c:1139
static struct knlist * kn_list_lock(struct knote *kn)
Definition: kern_event.c:250
static void knote_dequeue(struct knote *kn)
Definition: kern_event.c:2795
#define KQ_TIMER_CB_ENQUEUED
Definition: kern_event.c:689
static fo_ioctl_t kqueue_ioctl
Definition: kern_event.c:126
static int kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, const struct timespec *timeout, struct kevent *keva, struct thread *td)
Definition: kern_event.c:1910
static struct mtx kq_global
Definition: kern_event.c:88
int for_refcnt
Definition: kern_event.c:350
static void kqueue_schedtask(struct kqueue *kq)
Definition: kern_event.c:1792
static int filt_timer(struct knote *kn, long hint)
Definition: kern_event.c:960
void knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn)
Definition: kern_event.c:2613
void knote_fdclose(struct thread *td, int fd)
Definition: kern_event.c:2674
static fo_poll_t kqueue_poll
Definition: kern_event.c:127
static int kevent_copyout(void *arg, struct kevent *kevp, int count)
Definition: kern_event.c:1178
void knote_fork(struct knlist *list, int pid)
Definition: kern_event.c:531
void knlist_init(struct knlist *knl, void *lock, void(*kl_lock)(void *), void(*kl_unlock)(void *), void(*kl_assert_lock)(void *, int))
Definition: kern_event.c:2536
static void knote_drop(struct knote *kn, struct thread *td)
Definition: kern_event.c:2736
static struct mtx filterops_lock
Definition: kern_event.c:344
static fo_fill_kinfo_t kqueue_fill_kinfo
Definition: kern_event.c:131
static void kqueue_wakeup(struct kqueue *kq)
Definition: kern_event.c:2334
void knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock)
Definition: kern_event.c:2581
static int kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, int mflag)
Definition: kern_event.c:1816
static void knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked)
Definition: kern_event.c:2440
static void knlist_rw_rlock(void *arg)
Definition: kern_event.c:2512
void knlist_add(struct knlist *knl, struct knote *kn, int islocked)
Definition: kern_event.c:2420
static void filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
Definition: kern_event.c:998
#define KQ_LOCK(kq)
Definition: kern_event.c:226
int sys_kqueue(struct thread *td, struct kqueue_args *uap)
Definition: kern_event.c:1055
#define KNL_ASSERT_LOCKED(knl)
Definition: kern_event.c:315
static int filt_nullattach(struct knote *kn)
Definition: kern_event.c:326
static void filt_timerstart(struct knote *kn, sbintime_t to)
Definition: kern_event.c:848
static void filt_procdetach(struct knote *kn)
Definition: kern_event.c:483
int for_nolock
Definition: kern_event.c:349
static sbintime_t timer2sbintime(int64_t data, int flags)
Definition: kern_event.c:623
static struct filterops user_filtops
Definition: kern_event.c:201
static int filt_timerattach(struct knote *kn)
Definition: kern_event.c:812
void kqtimer_proc_continue(struct proc *p)
Definition: kern_event.c:699
static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system")
static int kevent_copyin(void *arg, struct kevent *kevp, int count)
Definition: kern_event.c:1196
struct knlist * knlist_alloc(struct mtx *lock)
Definition: kern_event.c:2571
static void kqueue_release(struct kqueue *kq, int locked)
Definition: kern_event.c:1772
static void kqueue_fo_release(int filt)
Definition: kern_event.c:1459
#define MS_TO_SBT(ms)
struct filterops * for_fop
Definition: kern_event.c:348
static unsigned int kq_calloutmax
Definition: kern_event.c:210
static void knote_init(void)
Definition: kern_event.c:2808
static int kqueue_acquire(struct file *fp, struct kqueue **kqp)
Definition: kern_event.c:1749
#define KQ_UNLOCK(kq)
Definition: kern_event.c:239
__FBSDID("$FreeBSD$")
static void knlist_mtx_assert_lock(void *arg, int what)
Definition: kern_event.c:2502
static int filt_kqueue(struct knote *kn, long hint)
Definition: kern_event.c:404
#define KNL_ASSERT_LOCK(knl, islocked)
Definition: kern_event.c:301
static void filt_kqdetach(struct knote *kn)
Definition: kern_event.c:395
struct filterops fs_filtops
Definition: vfs_subr.c:6145
static fo_stat_t kqueue_stat
Definition: kern_event.c:129
#define KNL_ASSERT_UNLOCKED(knl)
Definition: kern_event.c:316
static void filt_userdetach(struct knote *kn)
static void kqtimer_sched_callout(struct kq_timer_cb_data *kc)
Definition: kern_event.c:692
TASKQUEUE_DEFINE_THREAD(kqueue_ctx)
int kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout)
Definition: kern_event.c:1357
void knlist_destroy(struct knlist *knl)
Definition: kern_event.c:2589
static void knote_drop_detached(struct knote *kn, struct thread *td)
Definition: kern_event.c:2745
void knote(struct knlist *list, long hint, int lockflags)
Definition: kern_event.c:2363
#define NS_TO_SBT(ns)
static void filt_timerexpire(void *knx)
Definition: kern_event.c:775
#define US_TO_SBT(us)
#define KQ_FLUX_WAKEUP(kq)
Definition: kern_event.c:229
static struct fileops kqueueops
Definition: kern_event.c:133
void kqueue_drain_schedtask(void)
Definition: kern_event.c:1786
static int filt_procattach(struct knote *kn)
Definition: kern_event.c:414
static uma_zone_t knote_zone
Definition: kern_event.c:208
static struct knote * knote_alloc(int mflag)
Definition: kern_event.c:2817
int knlist_empty(struct knlist *knl)
Definition: kern_event.c:2474
static void kqueue_init(struct kqueue *kq)
Definition: kern_event.c:1062
static void knlist_mtx_unlock(void *arg)
Definition: kern_event.c:2495
static fo_kqfilter_t kqueue_kqfilter
Definition: kern_event.c:128
int kqfd_register(int fd, struct kevent *kev, struct thread *td, int mflag)
Definition: kern_event.c:2834
static struct filterops proc_filtops
Definition: kern_event.c:188
static int kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int mflag)
Definition: kern_event.c:1479
static void knlist_rw_runlock(void *arg)
Definition: kern_event.c:2519
int kqueue_del_filteropts(int filt)
Definition: kern_event.c:1416
#define NOTE_TIMER_PRECMASK
Definition: kern_event.c:619
static bool kn_leave_flux(struct knote *kn)
Definition: kern_event.c:292
static void knote_free(struct knote *kn)
Definition: kern_event.c:2824
int kern_kevent_anonymous(struct thread *td, int nevents, struct kevent_copyops *k_ops)
Definition: kern_event.c:1376
MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF)
int kern_kevent(struct thread *td, int fd, int nchanges, int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout)
Definition: kern_event.c:1290
static void knlist_mtx_lock(void *arg)
Definition: kern_event.c:2488
static void kqueue_task(void *arg, int pending)
Definition: kern_event.c:1884
static struct filterops timer_filtops
Definition: kern_event.c:194
static void kn_list_unlock(struct knlist *knl)
Definition: kern_event.c:261
#define KQ_NOTOWNED(kq)
Definition: kern_event.c:245
#define KQ_UNLOCK_FLUX(kq)
Definition: kern_event.c:235
void knlist_init_mtx(struct knlist *knl, struct mtx *lock)
Definition: kern_event.c:2564
static void knote_enqueue(struct knote *kn)
Definition: kern_event.c:2781
static int filt_fileattach(struct knote *kn)
Definition: kern_event.c:372
#define KQ_OWNED(kq)
Definition: kern_event.c:242
struct filterops null_filtops
Definition: kern_event.c:332
int sys_kevent(struct thread *td, struct kevent_args *uap)
Definition: kern_event.c:1118
#define KN_HASH(val, mask)
Definition: kern_event.c:323
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
struct proc * pfind_any(pid_t pid)
Definition: kern_proc.c:478
struct proc * pfind(pid_t pid)
Definition: kern_proc.c:468
int p_cansee(struct thread *td, struct proc *p)
Definition: kern_prot.c:1462
struct ucred * crhold(struct ucred *cr)
Definition: kern_prot.c:2014
void crfree(struct ucred *cr)
Definition: kern_prot.c:2035
rlim_t() lim_cur(struct thread *td, int which)
int chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
void panic(const char *fmt,...)
void pgsigio(struct sigio **sigiop, int sig, int checkctty)
Definition: kern_sig.c:4041
void wakeup(const void *ident)
Definition: kern_synch.c:349
sbintime_t tc_tick_sbt
Definition: kern_tc.c:140
void getboottimebin(struct bintime *boottimebin)
Definition: kern_tc.c:496
int tc_precexp
Definition: kern_tc.c:141
void bintime(struct bintime *bt)
Definition: kern_tc.c:415
int callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, callout_func_t *ftn, void *arg, int cpu, int flags)
Definition: kern_timeout.c:943
void callout_init(struct callout *c, int mpsafe)
uint32_t * data
Definition: msi_if.m:90
const struct timespec * timeout
Definition: kern_event.c:1114
void * eventlist
Definition: kern_event.c:1112
const void * changelist
Definition: kern_event.c:1110
struct proc * p
Definition: kern_event.c:680
struct callout c
Definition: kern_event.c:679
struct knote * kn
Definition: kern_event.c:681
__read_mostly cap_rights_t cap_event_rights
void * hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask, int flags)
Definition: subr_hash.c:57
int printf(const char *fmt,...)
Definition: subr_prf.c:397
uint16_t flags
Definition: subr_stats.c:2
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
void taskqueue_quiesce(struct taskqueue *queue)
void selwakeuppri(struct selinfo *sip, int pri)
Definition: sys_generic.c:1924
void seldrain(struct selinfo *sip)
Definition: sys_generic.c:1851
void selrecord(struct thread *selector, struct selinfo *sip)
Definition: sys_generic.c:1869
struct mtx mtx
Definition: uipc_ktls.c:0
int fd