FreeBSD kernel kern code
kern_ktrace.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California.
6 * Copyright (c) 2005 Robert N. M. Watson
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39#include "opt_ktrace.h"
40
41#include <sys/param.h>
42#include <sys/capsicum.h>
43#include <sys/systm.h>
44#include <sys/fcntl.h>
45#include <sys/kernel.h>
46#include <sys/kthread.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/malloc.h>
50#include <sys/mount.h>
51#include <sys/namei.h>
52#include <sys/priv.h>
53#include <sys/proc.h>
54#include <sys/resourcevar.h>
55#include <sys/unistd.h>
56#include <sys/vnode.h>
57#include <sys/socket.h>
58#include <sys/stat.h>
59#include <sys/ktrace.h>
60#include <sys/sx.h>
61#include <sys/sysctl.h>
62#include <sys/sysent.h>
63#include <sys/syslog.h>
64#include <sys/sysproto.h>
65
66#include <security/mac/mac_framework.h>
67
68/*
69 * The ktrace facility allows the tracing of certain key events in user space
70 * processes, such as system calls, signal delivery, context switches, and
71 * user generated events using utrace(2). It works by streaming event
72 * records and data to a vnode associated with the process using the
73 * ktrace(2) system call. In general, records can be written directly from
74 * the context that generates the event. One important exception to this is
75 * during a context switch, where sleeping is not permitted. To handle this
76 * case, trace events are generated using in-kernel ktr_request records, and
77 * then delivered to disk at a convenient moment -- either immediately, the
78 * next traceable event, at system call return, or at process exit.
79 *
80 * When dealing with multiple threads or processes writing to the same event
81 * log, ordering guarantees are weak: specifically, if an event has multiple
82 * records (i.e., system call enter and return), they may be interlaced with
83 * records from another event. Process and thread ID information is provided
84 * in the record, and user applications can de-interlace events if required.
85 */
86
87static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
88
89#ifdef KTRACE
90
91FEATURE(ktrace, "Kernel support for system-call tracing");
92
93#ifndef KTRACE_REQUEST_POOL
94#define KTRACE_REQUEST_POOL 100
95#endif
96
97struct ktr_request {
98 struct ktr_header ktr_header;
99 void *ktr_buffer;
100 union {
101 struct ktr_proc_ctor ktr_proc_ctor;
102 struct ktr_cap_fail ktr_cap_fail;
103 struct ktr_syscall ktr_syscall;
104 struct ktr_sysret ktr_sysret;
105 struct ktr_genio ktr_genio;
106 struct ktr_psig ktr_psig;
107 struct ktr_csw ktr_csw;
108 struct ktr_fault ktr_fault;
109 struct ktr_faultend ktr_faultend;
110 struct ktr_struct_array ktr_struct_array;
111 } ktr_data;
112 STAILQ_ENTRY(ktr_request) ktr_list;
113};
114
115static int data_lengths[] = {
116 [KTR_SYSCALL] = offsetof(struct ktr_syscall, ktr_args),
117 [KTR_SYSRET] = sizeof(struct ktr_sysret),
118 [KTR_NAMEI] = 0,
119 [KTR_GENIO] = sizeof(struct ktr_genio),
120 [KTR_PSIG] = sizeof(struct ktr_psig),
121 [KTR_CSW] = sizeof(struct ktr_csw),
122 [KTR_USER] = 0,
123 [KTR_STRUCT] = 0,
124 [KTR_SYSCTL] = 0,
125 [KTR_PROCCTOR] = sizeof(struct ktr_proc_ctor),
126 [KTR_PROCDTOR] = 0,
127 [KTR_CAPFAIL] = sizeof(struct ktr_cap_fail),
128 [KTR_FAULT] = sizeof(struct ktr_fault),
129 [KTR_FAULTEND] = sizeof(struct ktr_faultend),
130 [KTR_STRUCT_ARRAY] = sizeof(struct ktr_struct_array),
131};
132
133static STAILQ_HEAD(, ktr_request) ktr_free;
134
135static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
136 "KTRACE options");
137
138static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
139TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
140
141u_int ktr_geniosize = PAGE_SIZE;
142SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RWTUN, &ktr_geniosize,
143 0, "Maximum size of genio event payload");
144
145/*
146 * Allow to not to send signal to traced process, in which context the
147 * ktr record is written. The limit is applied from the process that
148 * set up ktrace, so killing the traced process is not completely fair.
149 */
150int ktr_filesize_limit_signal = 0;
151SYSCTL_INT(_kern_ktrace, OID_AUTO, filesize_limit_signal, CTLFLAG_RWTUN,
152 &ktr_filesize_limit_signal, 0,
153 "Send SIGXFSZ to the traced process when the log size limit is exceeded");
154
155static int print_message = 1;
156static struct mtx ktrace_mtx;
157static struct sx ktrace_sx;
158
159struct ktr_io_params {
160 struct vnode *vp;
161 struct ucred *cr;
162 off_t lim;
163 u_int refs;
164};
165
166static void ktrace_init(void *dummy);
167static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
168static u_int ktrace_resize_pool(u_int oldsize, u_int newsize);
169static struct ktr_request *ktr_getrequest_entered(struct thread *td, int type);
170static struct ktr_request *ktr_getrequest(int type);
171static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
172static struct ktr_io_params *ktr_freeproc(struct proc *p);
173static void ktr_freerequest(struct ktr_request *req);
174static void ktr_freerequest_locked(struct ktr_request *req);
175static void ktr_writerequest(struct thread *td, struct ktr_request *req);
176static int ktrcanset(struct thread *,struct proc *);
177static int ktrsetchildren(struct thread *, struct proc *, int, int,
178 struct ktr_io_params *);
179static int ktrops(struct thread *, struct proc *, int, int,
180 struct ktr_io_params *);
181static void ktrprocctor_entered(struct thread *, struct proc *);
182
183/*
184 * ktrace itself generates events, such as context switches, which we do not
185 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
186 * whether or not it is in a region where tracing of events should be
187 * suppressed.
188 */
189static void
190ktrace_enter(struct thread *td)
191{
192
193 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
194 td->td_pflags |= TDP_INKTRACE;
195}
196
197static void
198ktrace_exit(struct thread *td)
199{
200
201 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
202 td->td_pflags &= ~TDP_INKTRACE;
203}
204
205static void
206ktrace_assert(struct thread *td)
207{
208
209 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
210}
211
212static void
213ktrace_init(void *dummy)
214{
215 struct ktr_request *req;
216 int i;
217
218 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
219 sx_init(&ktrace_sx, "ktrace_sx");
220 STAILQ_INIT(&ktr_free);
221 for (i = 0; i < ktr_requestpool; i++) {
222 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK |
223 M_ZERO);
224 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
225 }
226}
227SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
228
229static int
230sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
231{
232 struct thread *td;
233 u_int newsize, oldsize, wantsize;
234 int error;
235
236 /* Handle easy read-only case first to avoid warnings from GCC. */
237 if (!req->newptr) {
238 oldsize = ktr_requestpool;
239 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
240 }
241
242 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
243 if (error)
244 return (error);
245 td = curthread;
246 ktrace_enter(td);
247 oldsize = ktr_requestpool;
248 newsize = ktrace_resize_pool(oldsize, wantsize);
249 ktrace_exit(td);
250 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
251 if (error)
252 return (error);
253 if (wantsize > oldsize && newsize < wantsize)
254 return (ENOSPC);
255 return (0);
256}
257SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool,
258 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &ktr_requestpool, 0,
259 sysctl_kern_ktrace_request_pool, "IU",
260 "Pool buffer size for ktrace(1)");
261
262static u_int
263ktrace_resize_pool(u_int oldsize, u_int newsize)
264{
265 STAILQ_HEAD(, ktr_request) ktr_new;
266 struct ktr_request *req;
267 int bound;
268
269 print_message = 1;
270 bound = newsize - oldsize;
271 if (bound == 0)
272 return (ktr_requestpool);
273 if (bound < 0) {
274 mtx_lock(&ktrace_mtx);
275 /* Shrink pool down to newsize if possible. */
276 while (bound++ < 0) {
277 req = STAILQ_FIRST(&ktr_free);
278 if (req == NULL)
279 break;
280 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
281 ktr_requestpool--;
282 free(req, M_KTRACE);
283 }
284 } else {
285 /* Grow pool up to newsize. */
286 STAILQ_INIT(&ktr_new);
287 while (bound-- > 0) {
288 req = malloc(sizeof(struct ktr_request), M_KTRACE,
289 M_WAITOK | M_ZERO);
290 STAILQ_INSERT_HEAD(&ktr_new, req, ktr_list);
291 }
292 mtx_lock(&ktrace_mtx);
293 STAILQ_CONCAT(&ktr_free, &ktr_new);
294 ktr_requestpool += (newsize - oldsize);
295 }
296 mtx_unlock(&ktrace_mtx);
297 return (ktr_requestpool);
298}
299
300/* ktr_getrequest() assumes that ktr_comm[] is the same size as td_name[]. */
301CTASSERT(sizeof(((struct ktr_header *)NULL)->ktr_comm) ==
302 (sizeof((struct thread *)NULL)->td_name));
303
304static struct ktr_request *
305ktr_getrequest_entered(struct thread *td, int type)
306{
307 struct ktr_request *req;
308 struct proc *p = td->td_proc;
309 int pm;
310
311 mtx_lock(&ktrace_mtx);
312 if (!KTRCHECK(td, type)) {
313 mtx_unlock(&ktrace_mtx);
314 return (NULL);
315 }
316 req = STAILQ_FIRST(&ktr_free);
317 if (req != NULL) {
318 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
319 req->ktr_header.ktr_type = type;
320 if (p->p_traceflag & KTRFAC_DROP) {
321 req->ktr_header.ktr_type |= KTR_DROP;
322 p->p_traceflag &= ~KTRFAC_DROP;
323 }
324 mtx_unlock(&ktrace_mtx);
325 microtime(&req->ktr_header.ktr_time);
326 req->ktr_header.ktr_pid = p->p_pid;
327 req->ktr_header.ktr_tid = td->td_tid;
328 bcopy(td->td_name, req->ktr_header.ktr_comm,
329 sizeof(req->ktr_header.ktr_comm));
330 req->ktr_buffer = NULL;
331 req->ktr_header.ktr_len = 0;
332 } else {
333 p->p_traceflag |= KTRFAC_DROP;
334 pm = print_message;
335 print_message = 0;
336 mtx_unlock(&ktrace_mtx);
337 if (pm)
338 printf("Out of ktrace request objects.\n");
339 }
340 return (req);
341}
342
343static struct ktr_request *
344ktr_getrequest(int type)
345{
346 struct thread *td = curthread;
347 struct ktr_request *req;
348
349 ktrace_enter(td);
350 req = ktr_getrequest_entered(td, type);
351 if (req == NULL)
352 ktrace_exit(td);
353
354 return (req);
355}
356
357/*
358 * Some trace generation environments don't permit direct access to VFS,
359 * such as during a context switch where sleeping is not allowed. Under these
360 * circumstances, queue a request to the thread to be written asynchronously
361 * later.
362 */
363static void
364ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
365{
366
367 mtx_lock(&ktrace_mtx);
368 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
369 mtx_unlock(&ktrace_mtx);
370 thread_lock(td);
371 td->td_flags |= TDF_ASTPENDING;
372 thread_unlock(td);
373}
374
375/*
376 * Drain any pending ktrace records from the per-thread queue to disk. This
377 * is used both internally before committing other records, and also on
378 * system call return. We drain all the ones we can find at the time when
379 * drain is requested, but don't keep draining after that as those events
380 * may be approximately "after" the current event.
381 */
382static void
383ktr_drain(struct thread *td)
384{
385 struct ktr_request *queued_req;
386 STAILQ_HEAD(, ktr_request) local_queue;
387
388 ktrace_assert(td);
389 sx_assert(&ktrace_sx, SX_XLOCKED);
390
391 STAILQ_INIT(&local_queue);
392
393 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
394 mtx_lock(&ktrace_mtx);
395 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
396 mtx_unlock(&ktrace_mtx);
397
398 while ((queued_req = STAILQ_FIRST(&local_queue))) {
399 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
400 ktr_writerequest(td, queued_req);
401 ktr_freerequest(queued_req);
402 }
403 }
404}
405
406/*
407 * Submit a trace record for immediate commit to disk -- to be used only
408 * where entering VFS is OK. First drain any pending records that may have
409 * been cached in the thread.
410 */
411static void
412ktr_submitrequest(struct thread *td, struct ktr_request *req)
413{
414
415 ktrace_assert(td);
416
417 sx_xlock(&ktrace_sx);
418 ktr_drain(td);
419 ktr_writerequest(td, req);
420 ktr_freerequest(req);
421 sx_xunlock(&ktrace_sx);
422 ktrace_exit(td);
423}
424
425static void
426ktr_freerequest(struct ktr_request *req)
427{
428
429 mtx_lock(&ktrace_mtx);
430 ktr_freerequest_locked(req);
431 mtx_unlock(&ktrace_mtx);
432}
433
434static void
435ktr_freerequest_locked(struct ktr_request *req)
436{
437
438 mtx_assert(&ktrace_mtx, MA_OWNED);
439 if (req->ktr_buffer != NULL)
440 free(req->ktr_buffer, M_KTRACE);
441 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
442}
443
444static void
445ktr_io_params_ref(struct ktr_io_params *kiop)
446{
447 mtx_assert(&ktrace_mtx, MA_OWNED);
448 kiop->refs++;
449}
450
451static struct ktr_io_params *
452ktr_io_params_rele(struct ktr_io_params *kiop)
453{
454 mtx_assert(&ktrace_mtx, MA_OWNED);
455 if (kiop == NULL)
456 return (NULL);
457 KASSERT(kiop->refs > 0, ("kiop ref == 0 %p", kiop));
458 return (--(kiop->refs) == 0 ? kiop : NULL);
459}
460
461void
462ktr_io_params_free(struct ktr_io_params *kiop)
463{
464 if (kiop == NULL)
465 return;
466
467 MPASS(kiop->refs == 0);
468 vn_close(kiop->vp, FWRITE, kiop->cr, curthread);
469 crfree(kiop->cr);
470 free(kiop, M_KTRACE);
471}
472
473static struct ktr_io_params *
474ktr_io_params_alloc(struct thread *td, struct vnode *vp)
475{
476 struct ktr_io_params *res;
477
478 res = malloc(sizeof(struct ktr_io_params), M_KTRACE, M_WAITOK);
479 res->vp = vp;
480 res->cr = crhold(td->td_ucred);
481 res->lim = lim_cur(td, RLIMIT_FSIZE);
482 res->refs = 1;
483 return (res);
484}
485
486/*
487 * Disable tracing for a process and release all associated resources.
488 * The caller is responsible for releasing a reference on the returned
489 * vnode and credentials.
490 */
491static struct ktr_io_params *
492ktr_freeproc(struct proc *p)
493{
494 struct ktr_io_params *kiop;
495 struct ktr_request *req;
496
497 PROC_LOCK_ASSERT(p, MA_OWNED);
498 mtx_assert(&ktrace_mtx, MA_OWNED);
499 kiop = ktr_io_params_rele(p->p_ktrioparms);
500 p->p_ktrioparms = NULL;
501 p->p_traceflag = 0;
502 while ((req = STAILQ_FIRST(&p->p_ktr)) != NULL) {
503 STAILQ_REMOVE_HEAD(&p->p_ktr, ktr_list);
504 ktr_freerequest_locked(req);
505 }
506 return (kiop);
507}
508
509struct vnode *
510ktr_get_tracevp(struct proc *p, bool ref)
511{
512 struct vnode *vp;
513
514 PROC_LOCK_ASSERT(p, MA_OWNED);
515
516 if (p->p_ktrioparms != NULL) {
517 vp = p->p_ktrioparms->vp;
518 if (ref)
519 vrefact(vp);
520 } else {
521 vp = NULL;
522 }
523 return (vp);
524}
525
526void
527ktrsyscall(int code, int narg, register_t args[])
528{
529 struct ktr_request *req;
530 struct ktr_syscall *ktp;
531 size_t buflen;
532 char *buf = NULL;
533
534 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
535 return;
536
537 buflen = sizeof(register_t) * narg;
538 if (buflen > 0) {
539 buf = malloc(buflen, M_KTRACE, M_WAITOK);
540 bcopy(args, buf, buflen);
541 }
542 req = ktr_getrequest(KTR_SYSCALL);
543 if (req == NULL) {
544 if (buf != NULL)
545 free(buf, M_KTRACE);
546 return;
547 }
548 ktp = &req->ktr_data.ktr_syscall;
549 ktp->ktr_code = code;
550 ktp->ktr_narg = narg;
551 if (buflen > 0) {
552 req->ktr_header.ktr_len = buflen;
553 req->ktr_buffer = buf;
554 }
555 ktr_submitrequest(curthread, req);
556}
557
558void
559ktrsysret(int code, int error, register_t retval)
560{
561 struct ktr_request *req;
562 struct ktr_sysret *ktp;
563
564 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
565 return;
566
567 req = ktr_getrequest(KTR_SYSRET);
568 if (req == NULL)
569 return;
570 ktp = &req->ktr_data.ktr_sysret;
571 ktp->ktr_code = code;
572 ktp->ktr_error = error;
573 ktp->ktr_retval = ((error == 0) ? retval: 0); /* what about val2 ? */
574 ktr_submitrequest(curthread, req);
575}
576
577/*
578 * When a setuid process execs, disable tracing.
579 *
580 * XXX: We toss any pending asynchronous records.
581 */
582struct ktr_io_params *
583ktrprocexec(struct proc *p)
584{
585 struct ktr_io_params *kiop;
586
587 PROC_LOCK_ASSERT(p, MA_OWNED);
588
589 kiop = p->p_ktrioparms;
590 if (kiop == NULL || priv_check_cred(kiop->cr, PRIV_DEBUG_DIFFCRED))
591 return (NULL);
592
593 mtx_lock(&ktrace_mtx);
594 kiop = ktr_freeproc(p);
595 mtx_unlock(&ktrace_mtx);
596 return (kiop);
597}
598
599/*
600 * When a process exits, drain per-process asynchronous trace records
601 * and disable tracing.
602 */
603void
604ktrprocexit(struct thread *td)
605{
606 struct ktr_request *req;
607 struct proc *p;
608 struct ktr_io_params *kiop;
609
610 p = td->td_proc;
611 if (p->p_traceflag == 0)
612 return;
613
614 ktrace_enter(td);
615 req = ktr_getrequest_entered(td, KTR_PROCDTOR);
616 if (req != NULL)
617 ktr_enqueuerequest(td, req);
618 sx_xlock(&ktrace_sx);
619 ktr_drain(td);
620 sx_xunlock(&ktrace_sx);
621 PROC_LOCK(p);
622 mtx_lock(&ktrace_mtx);
623 kiop = ktr_freeproc(p);
624 mtx_unlock(&ktrace_mtx);
625 PROC_UNLOCK(p);
626 ktr_io_params_free(kiop);
627 ktrace_exit(td);
628}
629
630static void
631ktrprocctor_entered(struct thread *td, struct proc *p)
632{
633 struct ktr_proc_ctor *ktp;
634 struct ktr_request *req;
635 struct thread *td2;
636
637 ktrace_assert(td);
638 td2 = FIRST_THREAD_IN_PROC(p);
639 req = ktr_getrequest_entered(td2, KTR_PROCCTOR);
640 if (req == NULL)
641 return;
642 ktp = &req->ktr_data.ktr_proc_ctor;
643 ktp->sv_flags = p->p_sysent->sv_flags;
644 ktr_enqueuerequest(td2, req);
645}
646
647void
648ktrprocctor(struct proc *p)
649{
650 struct thread *td = curthread;
651
652 if ((p->p_traceflag & KTRFAC_MASK) == 0)
653 return;
654
655 ktrace_enter(td);
656 ktrprocctor_entered(td, p);
657 ktrace_exit(td);
658}
659
660/*
661 * When a process forks, enable tracing in the new process if needed.
662 */
663void
664ktrprocfork(struct proc *p1, struct proc *p2)
665{
666
667 MPASS(p2->p_ktrioparms == NULL);
668 MPASS(p2->p_traceflag == 0);
669
670 if (p1->p_traceflag == 0)
671 return;
672
673 PROC_LOCK(p1);
674 mtx_lock(&ktrace_mtx);
675 if (p1->p_traceflag & KTRFAC_INHERIT) {
676 p2->p_traceflag = p1->p_traceflag;
677 if ((p2->p_ktrioparms = p1->p_ktrioparms) != NULL)
678 p1->p_ktrioparms->refs++;
679 }
680 mtx_unlock(&ktrace_mtx);
681 PROC_UNLOCK(p1);
682
683 ktrprocctor(p2);
684}
685
686/*
687 * When a thread returns, drain any asynchronous records generated by the
688 * system call.
689 */
690void
691ktruserret(struct thread *td)
692{
693
694 ktrace_enter(td);
695 sx_xlock(&ktrace_sx);
696 ktr_drain(td);
697 sx_xunlock(&ktrace_sx);
698 ktrace_exit(td);
699}
700
701void
702ktrnamei(const char *path)
703{
704 struct ktr_request *req;
705 int namelen;
706 char *buf = NULL;
707
708 namelen = strlen(path);
709 if (namelen > 0) {
710 buf = malloc(namelen, M_KTRACE, M_WAITOK);
711 bcopy(path, buf, namelen);
712 }
713 req = ktr_getrequest(KTR_NAMEI);
714 if (req == NULL) {
715 if (buf != NULL)
716 free(buf, M_KTRACE);
717 return;
718 }
719 if (namelen > 0) {
720 req->ktr_header.ktr_len = namelen;
721 req->ktr_buffer = buf;
722 }
723 ktr_submitrequest(curthread, req);
724}
725
726void
727ktrsysctl(int *name, u_int namelen)
728{
729 struct ktr_request *req;
730 u_int mib[CTL_MAXNAME + 2];
731 char *mibname;
732 size_t mibnamelen;
733 int error;
734
735 /* Lookup name of mib. */
736 KASSERT(namelen <= CTL_MAXNAME, ("sysctl MIB too long"));
737 mib[0] = 0;
738 mib[1] = 1;
739 bcopy(name, mib + 2, namelen * sizeof(*name));
740 mibnamelen = 128;
741 mibname = malloc(mibnamelen, M_KTRACE, M_WAITOK);
742 error = kernel_sysctl(curthread, mib, namelen + 2, mibname, &mibnamelen,
743 NULL, 0, &mibnamelen, 0);
744 if (error) {
745 free(mibname, M_KTRACE);
746 return;
747 }
748 req = ktr_getrequest(KTR_SYSCTL);
749 if (req == NULL) {
750 free(mibname, M_KTRACE);
751 return;
752 }
753 req->ktr_header.ktr_len = mibnamelen;
754 req->ktr_buffer = mibname;
755 ktr_submitrequest(curthread, req);
756}
757
758void
759ktrgenio(int fd, enum uio_rw rw, struct uio *uio, int error)
760{
761 struct ktr_request *req;
762 struct ktr_genio *ktg;
763 int datalen;
764 char *buf;
765
766 if (error) {
767 free(uio, M_IOV);
768 return;
769 }
770 uio->uio_offset = 0;
771 uio->uio_rw = UIO_WRITE;
772 datalen = MIN(uio->uio_resid, ktr_geniosize);
773 buf = malloc(datalen, M_KTRACE, M_WAITOK);
774 error = uiomove(buf, datalen, uio);
775 free(uio, M_IOV);
776 if (error) {
777 free(buf, M_KTRACE);
778 return;
779 }
780 req = ktr_getrequest(KTR_GENIO);
781 if (req == NULL) {
782 free(buf, M_KTRACE);
783 return;
784 }
785 ktg = &req->ktr_data.ktr_genio;
786 ktg->ktr_fd = fd;
787 ktg->ktr_rw = rw;
788 req->ktr_header.ktr_len = datalen;
789 req->ktr_buffer = buf;
790 ktr_submitrequest(curthread, req);
791}
792
793void
794ktrpsig(int sig, sig_t action, sigset_t *mask, int code)
795{
796 struct thread *td = curthread;
797 struct ktr_request *req;
798 struct ktr_psig *kp;
799
800 req = ktr_getrequest(KTR_PSIG);
801 if (req == NULL)
802 return;
803 kp = &req->ktr_data.ktr_psig;
804 kp->signo = (char)sig;
805 kp->action = action;
806 kp->mask = *mask;
807 kp->code = code;
808 ktr_enqueuerequest(td, req);
809 ktrace_exit(td);
810}
811
812void
813ktrcsw(int out, int user, const char *wmesg)
814{
815 struct thread *td = curthread;
816 struct ktr_request *req;
817 struct ktr_csw *kc;
818
819 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
820 return;
821
822 req = ktr_getrequest(KTR_CSW);
823 if (req == NULL)
824 return;
825 kc = &req->ktr_data.ktr_csw;
826 kc->out = out;
827 kc->user = user;
828 if (wmesg != NULL)
829 strlcpy(kc->wmesg, wmesg, sizeof(kc->wmesg));
830 else
831 bzero(kc->wmesg, sizeof(kc->wmesg));
832 ktr_enqueuerequest(td, req);
833 ktrace_exit(td);
834}
835
836void
837ktrstruct(const char *name, const void *data, size_t datalen)
838{
839 struct ktr_request *req;
840 char *buf;
841 size_t buflen, namelen;
842
843 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
844 return;
845
846 if (data == NULL)
847 datalen = 0;
848 namelen = strlen(name) + 1;
849 buflen = namelen + datalen;
850 buf = malloc(buflen, M_KTRACE, M_WAITOK);
851 strcpy(buf, name);
852 bcopy(data, buf + namelen, datalen);
853 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
854 free(buf, M_KTRACE);
855 return;
856 }
857 req->ktr_buffer = buf;
858 req->ktr_header.ktr_len = buflen;
859 ktr_submitrequest(curthread, req);
860}
861
862void
863ktrstruct_error(const char *name, const void *data, size_t datalen, int error)
864{
865
866 if (error == 0)
867 ktrstruct(name, data, datalen);
868}
869
870void
871ktrstructarray(const char *name, enum uio_seg seg, const void *data,
872 int num_items, size_t struct_size)
873{
874 struct ktr_request *req;
875 struct ktr_struct_array *ksa;
876 char *buf;
877 size_t buflen, datalen, namelen;
878 int max_items;
879
880 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
881 return;
882 if (num_items < 0)
883 return;
884
885 /* Trim array length to genio size. */
886 max_items = ktr_geniosize / struct_size;
887 if (num_items > max_items) {
888 if (max_items == 0)
889 num_items = 1;
890 else
891 num_items = max_items;
892 }
893 datalen = num_items * struct_size;
894
895 if (data == NULL)
896 datalen = 0;
897
898 namelen = strlen(name) + 1;
899 buflen = namelen + datalen;
900 buf = malloc(buflen, M_KTRACE, M_WAITOK);
901 strcpy(buf, name);
902 if (seg == UIO_SYSSPACE)
903 bcopy(data, buf + namelen, datalen);
904 else {
905 if (copyin(data, buf + namelen, datalen) != 0) {
906 free(buf, M_KTRACE);
907 return;
908 }
909 }
910 if ((req = ktr_getrequest(KTR_STRUCT_ARRAY)) == NULL) {
911 free(buf, M_KTRACE);
912 return;
913 }
914 ksa = &req->ktr_data.ktr_struct_array;
915 ksa->struct_size = struct_size;
916 req->ktr_buffer = buf;
917 req->ktr_header.ktr_len = buflen;
918 ktr_submitrequest(curthread, req);
919}
920
921void
922ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed,
923 const cap_rights_t *held)
924{
925 struct thread *td = curthread;
926 struct ktr_request *req;
927 struct ktr_cap_fail *kcf;
928
929 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
930 return;
931
932 req = ktr_getrequest(KTR_CAPFAIL);
933 if (req == NULL)
934 return;
935 kcf = &req->ktr_data.ktr_cap_fail;
936 kcf->cap_type = type;
937 if (needed != NULL)
938 kcf->cap_needed = *needed;
939 else
940 cap_rights_init(&kcf->cap_needed);
941 if (held != NULL)
942 kcf->cap_held = *held;
943 else
944 cap_rights_init(&kcf->cap_held);
945 ktr_enqueuerequest(td, req);
946 ktrace_exit(td);
947}
948
949void
950ktrfault(vm_offset_t vaddr, int type)
951{
952 struct thread *td = curthread;
953 struct ktr_request *req;
954 struct ktr_fault *kf;
955
956 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
957 return;
958
959 req = ktr_getrequest(KTR_FAULT);
960 if (req == NULL)
961 return;
962 kf = &req->ktr_data.ktr_fault;
963 kf->vaddr = vaddr;
964 kf->type = type;
965 ktr_enqueuerequest(td, req);
966 ktrace_exit(td);
967}
968
969void
970ktrfaultend(int result)
971{
972 struct thread *td = curthread;
973 struct ktr_request *req;
974 struct ktr_faultend *kf;
975
976 if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
977 return;
978
979 req = ktr_getrequest(KTR_FAULTEND);
980 if (req == NULL)
981 return;
982 kf = &req->ktr_data.ktr_faultend;
983 kf->result = result;
984 ktr_enqueuerequest(td, req);
985 ktrace_exit(td);
986}
987#endif /* KTRACE */
988
989/* Interface and common routines */
990
991#ifndef _SYS_SYSPROTO_H_
993 char *fname;
994 int ops;
995 int facs;
996 int pid;
997};
998#endif
999/* ARGSUSED */
1000int
1001sys_ktrace(struct thread *td, struct ktrace_args *uap)
1002{
1003#ifdef KTRACE
1004 struct vnode *vp = NULL;
1005 struct proc *p;
1006 struct pgrp *pg;
1007 int facs = uap->facs & ~KTRFAC_ROOT;
1008 int ops = KTROP(uap->ops);
1009 int descend = uap->ops & KTRFLAG_DESCEND;
1010 int ret = 0;
1011 int flags, error = 0;
1012 struct nameidata nd;
1013 struct ktr_io_params *kiop, *old_kiop;
1014
1015 /*
1016 * Need something to (un)trace.
1017 */
1018 if (ops != KTROP_CLEARFILE && facs == 0)
1019 return (EINVAL);
1020
1021 kiop = NULL;
1022 if (ops != KTROP_CLEAR) {
1023 /*
1024 * an operation which requires a file argument.
1025 */
1026 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->fname);
1027 flags = FREAD | FWRITE | O_NOFOLLOW;
1028 error = vn_open(&nd, &flags, 0, NULL);
1029 if (error)
1030 return (error);
1031 NDFREE(&nd, NDF_ONLY_PNBUF);
1032 vp = nd.ni_vp;
1033 VOP_UNLOCK(vp);
1034 if (vp->v_type != VREG) {
1035 (void)vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
1036 return (EACCES);
1037 }
1038 kiop = ktr_io_params_alloc(td, vp);
1039 }
1040
1041 /*
1042 * Clear all uses of the tracefile.
1043 */
1044 ktrace_enter(td);
1045 if (ops == KTROP_CLEARFILE) {
1046restart:
1047 sx_slock(&allproc_lock);
1048 FOREACH_PROC_IN_SYSTEM(p) {
1049 old_kiop = NULL;
1050 PROC_LOCK(p);
1051 if (p->p_ktrioparms != NULL &&
1052 p->p_ktrioparms->vp == vp) {
1053 if (ktrcanset(td, p)) {
1054 mtx_lock(&ktrace_mtx);
1055 old_kiop = ktr_freeproc(p);
1056 mtx_unlock(&ktrace_mtx);
1057 } else
1058 error = EPERM;
1059 }
1060 PROC_UNLOCK(p);
1061 if (old_kiop != NULL) {
1062 sx_sunlock(&allproc_lock);
1063 ktr_io_params_free(old_kiop);
1064 goto restart;
1065 }
1066 }
1067 sx_sunlock(&allproc_lock);
1068 goto done;
1069 }
1070 /*
1071 * do it
1072 */
1073 sx_slock(&proctree_lock);
1074 if (uap->pid < 0) {
1075 /*
1076 * by process group
1077 */
1078 pg = pgfind(-uap->pid);
1079 if (pg == NULL) {
1080 sx_sunlock(&proctree_lock);
1081 error = ESRCH;
1082 goto done;
1083 }
1084
1085 /*
1086 * ktrops() may call vrele(). Lock pg_members
1087 * by the proctree_lock rather than pg_mtx.
1088 */
1089 PGRP_UNLOCK(pg);
1090 if (LIST_EMPTY(&pg->pg_members)) {
1091 sx_sunlock(&proctree_lock);
1092 error = ESRCH;
1093 goto done;
1094 }
1095 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1096 PROC_LOCK(p);
1097 if (descend)
1098 ret |= ktrsetchildren(td, p, ops, facs, kiop);
1099 else
1100 ret |= ktrops(td, p, ops, facs, kiop);
1101 }
1102 } else {
1103 /*
1104 * by pid
1105 */
1106 p = pfind(uap->pid);
1107 if (p == NULL) {
1108 error = ESRCH;
1109 sx_sunlock(&proctree_lock);
1110 goto done;
1111 }
1112 if (descend)
1113 ret |= ktrsetchildren(td, p, ops, facs, kiop);
1114 else
1115 ret |= ktrops(td, p, ops, facs, kiop);
1116 }
1117 sx_sunlock(&proctree_lock);
1118 if (!ret)
1119 error = EPERM;
1120done:
1121 if (kiop != NULL) {
1122 mtx_lock(&ktrace_mtx);
1123 kiop = ktr_io_params_rele(kiop);
1124 mtx_unlock(&ktrace_mtx);
1125 ktr_io_params_free(kiop);
1126 }
1127 ktrace_exit(td);
1128 return (error);
1129#else /* !KTRACE */
1130 return (ENOSYS);
1131#endif /* KTRACE */
1132}
1133
1134/* ARGSUSED */
1135int
1136sys_utrace(struct thread *td, struct utrace_args *uap)
1137{
1138
1139#ifdef KTRACE
1140 struct ktr_request *req;
1141 void *cp;
1142 int error;
1143
1144 if (!KTRPOINT(td, KTR_USER))
1145 return (0);
1146 if (uap->len > KTR_USER_MAXLEN)
1147 return (EINVAL);
1148 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
1149 error = copyin(uap->addr, cp, uap->len);
1150 if (error) {
1151 free(cp, M_KTRACE);
1152 return (error);
1153 }
1154 req = ktr_getrequest(KTR_USER);
1155 if (req == NULL) {
1156 free(cp, M_KTRACE);
1157 return (ENOMEM);
1158 }
1159 req->ktr_buffer = cp;
1160 req->ktr_header.ktr_len = uap->len;
1161 ktr_submitrequest(td, req);
1162 return (0);
1163#else /* !KTRACE */
1164 return (ENOSYS);
1165#endif /* KTRACE */
1166}
1167
1168#ifdef KTRACE
1169static int
1170ktrops(struct thread *td, struct proc *p, int ops, int facs,
1171 struct ktr_io_params *new_kiop)
1172{
1173 struct ktr_io_params *old_kiop;
1174
1175 PROC_LOCK_ASSERT(p, MA_OWNED);
1176 if (!ktrcanset(td, p)) {
1177 PROC_UNLOCK(p);
1178 return (0);
1179 }
1180 if ((ops == KTROP_SET && p->p_state == PRS_NEW) ||
1181 p_cansee(td, p) != 0) {
1182 /*
1183 * Disallow setting trace points if the process is being born.
1184 * This avoids races with trace point inheritance in
1185 * ktrprocfork().
1186 */
1187 PROC_UNLOCK(p);
1188 return (0);
1189 }
1190 if ((p->p_flag & P_WEXIT) != 0) {
1191 /*
1192 * There's nothing to do if the process is exiting, but avoid
1193 * signaling an error.
1194 */
1195 PROC_UNLOCK(p);
1196 return (1);
1197 }
1198 old_kiop = NULL;
1199 mtx_lock(&ktrace_mtx);
1200 if (ops == KTROP_SET) {
1201 if (p->p_ktrioparms != NULL &&
1202 p->p_ktrioparms->vp != new_kiop->vp) {
1203 /* if trace file already in use, relinquish below */
1204 old_kiop = ktr_io_params_rele(p->p_ktrioparms);
1205 p->p_ktrioparms = NULL;
1206 }
1207 if (p->p_ktrioparms == NULL) {
1208 p->p_ktrioparms = new_kiop;
1209 ktr_io_params_ref(new_kiop);
1210 }
1211 p->p_traceflag |= facs;
1212 if (priv_check(td, PRIV_KTRACE) == 0)
1213 p->p_traceflag |= KTRFAC_ROOT;
1214 } else {
1215 /* KTROP_CLEAR */
1216 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0)
1217 /* no more tracing */
1218 old_kiop = ktr_freeproc(p);
1219 }
1220 mtx_unlock(&ktrace_mtx);
1221 if ((p->p_traceflag & KTRFAC_MASK) != 0)
1222 ktrprocctor_entered(td, p);
1223 PROC_UNLOCK(p);
1224 ktr_io_params_free(old_kiop);
1225
1226 return (1);
1227}
1228
1229static int
1230ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs,
1231 struct ktr_io_params *new_kiop)
1232{
1233 struct proc *p;
1234 int ret = 0;
1235
1236 p = top;
1237 PROC_LOCK_ASSERT(p, MA_OWNED);
1238 sx_assert(&proctree_lock, SX_LOCKED);
1239 for (;;) {
1240 ret |= ktrops(td, p, ops, facs, new_kiop);
1241 /*
1242 * If this process has children, descend to them next,
1243 * otherwise do any siblings, and if done with this level,
1244 * follow back up the tree (but not past top).
1245 */
1246 if (!LIST_EMPTY(&p->p_children))
1247 p = LIST_FIRST(&p->p_children);
1248 else for (;;) {
1249 if (p == top)
1250 return (ret);
1251 if (LIST_NEXT(p, p_sibling)) {
1252 p = LIST_NEXT(p, p_sibling);
1253 break;
1254 }
1255 p = p->p_pptr;
1256 }
1257 PROC_LOCK(p);
1258 }
1259 /*NOTREACHED*/
1260}
1261
1262static void
1263ktr_writerequest(struct thread *td, struct ktr_request *req)
1264{
1265 struct ktr_io_params *kiop, *kiop1;
1266 struct ktr_header *kth;
1267 struct vnode *vp;
1268 struct proc *p;
1269 struct ucred *cred;
1270 struct uio auio;
1271 struct iovec aiov[3];
1272 struct mount *mp;
1273 off_t lim;
1274 int datalen, buflen;
1275 int error;
1276
1277 p = td->td_proc;
1278
1279 /*
1280 * We reference the kiop for use in I/O in case ktrace is
1281 * disabled on the process as we write out the request.
1282 */
1283 mtx_lock(&ktrace_mtx);
1284 kiop = p->p_ktrioparms;
1285
1286 /*
1287 * If kiop is NULL, it has been cleared out from under this
1288 * request, so just drop it.
1289 */
1290 if (kiop == NULL) {
1291 mtx_unlock(&ktrace_mtx);
1292 return;
1293 }
1294
1295 ktr_io_params_ref(kiop);
1296 vp = kiop->vp;
1297 cred = kiop->cr;
1298 lim = kiop->lim;
1299
1300 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
1301 mtx_unlock(&ktrace_mtx);
1302
1303 kth = &req->ktr_header;
1304 KASSERT(((u_short)kth->ktr_type & ~KTR_DROP) < nitems(data_lengths),
1305 ("data_lengths array overflow"));
1306 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
1307 buflen = kth->ktr_len;
1308 auio.uio_iov = &aiov[0];
1309 auio.uio_offset = 0;
1310 auio.uio_segflg = UIO_SYSSPACE;
1311 auio.uio_rw = UIO_WRITE;
1312 aiov[0].iov_base = (caddr_t)kth;
1313 aiov[0].iov_len = sizeof(struct ktr_header);
1314 auio.uio_resid = sizeof(struct ktr_header);
1315 auio.uio_iovcnt = 1;
1316 auio.uio_td = td;
1317 if (datalen != 0) {
1318 aiov[1].iov_base = (caddr_t)&req->ktr_data;
1319 aiov[1].iov_len = datalen;
1320 auio.uio_resid += datalen;
1321 auio.uio_iovcnt++;
1322 kth->ktr_len += datalen;
1323 }
1324 if (buflen != 0) {
1325 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
1326 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
1327 aiov[auio.uio_iovcnt].iov_len = buflen;
1328 auio.uio_resid += buflen;
1329 auio.uio_iovcnt++;
1330 }
1331
1332 vn_start_write(vp, &mp, V_WAIT);
1333 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1334 td->td_ktr_io_lim = lim;
1335#ifdef MAC
1336 error = mac_vnode_check_write(cred, NOCRED, vp);
1337 if (error == 0)
1338#endif
1339 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
1340 VOP_UNLOCK(vp);
1342 if (error == 0) {
1343 mtx_lock(&ktrace_mtx);
1344 kiop = ktr_io_params_rele(kiop);
1345 mtx_unlock(&ktrace_mtx);
1346 ktr_io_params_free(kiop);
1347 return;
1348 }
1349
1350 /*
1351 * If error encountered, give up tracing on this vnode on this
1352 * process. Other processes might still be suitable for
1353 * writes to this vnode.
1354 */
1355 log(LOG_NOTICE,
1356 "ktrace write failed, errno %d, tracing stopped for pid %d\n",
1357 error, p->p_pid);
1358
1359 kiop1 = NULL;
1360 PROC_LOCK(p);
1361 mtx_lock(&ktrace_mtx);
1362 if (p->p_ktrioparms != NULL && p->p_ktrioparms->vp == vp)
1363 kiop1 = ktr_freeproc(p);
1364 kiop = ktr_io_params_rele(kiop);
1365 mtx_unlock(&ktrace_mtx);
1366 PROC_UNLOCK(p);
1367 ktr_io_params_free(kiop1);
1368 ktr_io_params_free(kiop);
1369}
1370
1371/*
1372 * Return true if caller has permission to set the ktracing state
1373 * of target. Essentially, the target can't possess any
1374 * more permissions than the caller. KTRFAC_ROOT signifies that
1375 * root previously set the tracing status on the target process, and
1376 * so, only root may further change it.
1377 */
1378static int
1379ktrcanset(struct thread *td, struct proc *targetp)
1380{
1381
1382 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1383 if (targetp->p_traceflag & KTRFAC_ROOT &&
1384 priv_check(td, PRIV_KTRACE))
1385 return (0);
1386
1387 if (p_candebug(td, targetp) != 0)
1388 return (0);
1389
1390 return (1);
1391}
1392
1393#endif /* KTRACE */
device_property_type_t type
Definition: bus_if.m:941
const char * name
Definition: kern_fail.c:145
SYSCTL_NODE(_kern, OID_AUTO, binmisc, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "Image activator for miscellaneous binaries")
SYSCTL_PROC(_kern_binmisc, OID_AUTO, add, CTLFLAG_MPSAFE|CTLTYPE_STRUCT|CTLFLAG_WR, NULL, IBC_ADD, sysctl_kern_binmisc, "S,ximgact_binmisc_entry", "Add an activator entry")
SYSINIT(imgact_binmisc, SI_SUB_EXEC, SI_ORDER_MIDDLE, imgact_binmisc_init, NULL)
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
CTASSERT(MAXSHELLCMDLEN >=MAXINTERP+3)
TUNABLE_INT("kern.eventtimer.periodic", &want_periodic)
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick, 0, "Run periodic events when idle")
static STAILQ_HEAD(cn_device)
Definition: kern_cons.c:88
FEATURE(kdtrace_hooks, "Kernel DTrace hooks which are required to load DTrace kernel modules")
static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE")
__FBSDID("$FreeBSD$")
int sys_ktrace(struct thread *td, struct ktrace_args *uap)
Definition: kern_ktrace.c:1001
int sys_utrace(struct thread *td, struct utrace_args *uap)
Definition: kern_ktrace.c:1136
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
int priv_check_cred(struct ucred *cred, int priv)
Definition: kern_priv.c:151
int priv_check(struct thread *td, int priv)
Definition: kern_priv.c:271
struct sx __exclusive_cache_line proctree_lock
Definition: kern_proc.c:135
struct proc * pfind(pid_t pid)
Definition: kern_proc.c:468
struct sx __exclusive_cache_line allproc_lock
Definition: kern_proc.c:134
struct pgrp * pgfind(pid_t pgid)
Definition: kern_proc.c:489
int p_cansee(struct thread *td, struct proc *p)
Definition: kern_prot.c:1462
int p_candebug(struct thread *td, struct proc *p)
Definition: kern_prot.c:1681
struct ucred * crhold(struct ucred *cr)
Definition: kern_prot.c:2014
void crfree(struct ucred *cr)
Definition: kern_prot.c:2035
rlim_t() lim_cur(struct thread *td, int which)
int kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
Definition: kern_sysctl.c:2006
void microtime(struct timeval *tvp)
Definition: kern_tc.c:431
linker_file_t * result
Definition: linker_if.m:148
uint32_t * data
Definition: msi_if.m:90
struct resource * res
Definition: pic_if.m:98
char * fname
Definition: kern_ktrace.c:993
int mask
Definition: subr_acl_nfs4.c:70
int printf(const char *fmt,...)
Definition: subr_prf.c:397
void log(int level, const char *fmt,...)
Definition: subr_prf.c:314
uint16_t flags
Definition: subr_stats.c:2
int uiomove(void *cp, int n, struct uio *uio)
Definition: subr_uio.c:195
struct mtx mtx
Definition: uipc_ktls.c:0
static int dummy
const char * path
Definition: vfs_extattr.c:715
void() NDFREE(struct nameidata *ndp, const u_int flags)
Definition: vfs_lookup.c:1555
void vrefact(struct vnode *vp)
Definition: vfs_subr.c:3075
struct stat * buf
int fd
int vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
Definition: vfs_vnops.c:1901
int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp)
Definition: vfs_vnops.c:191
int vn_close(struct vnode *vp, int flags, struct ucred *file_cred, struct thread *td)
Definition: vfs_vnops.c:553
void vn_finished_write(struct mount *mp)
Definition: vfs_vnops.c:2009