FreeBSD kernel kern code
subr_trap.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (C) 1994, David Greenman
5 * Copyright (c) 1990, 1993
6 * The Regents of the University of California. All rights reserved.
7 * Copyright (c) 2007 The FreeBSD Foundation
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the University of Utah, and William Jolitz.
11 *
12 * Portions of this software were developed by A. Joseph Koshy under
13 * sponsorship from the FreeBSD Foundation and Google, Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD$");
48
49#include "opt_hwpmc_hooks.h"
50#include "opt_ktrace.h"
51#include "opt_sched.h"
52
53#include <sys/param.h>
54#include <sys/bus.h>
55#include <sys/capsicum.h>
56#include <sys/event.h>
57#include <sys/kernel.h>
58#include <sys/lock.h>
59#include <sys/msan.h>
60#include <sys/mutex.h>
61#include <sys/pmckern.h>
62#include <sys/proc.h>
63#include <sys/ktr.h>
64#include <sys/ptrace.h>
65#include <sys/racct.h>
66#include <sys/resourcevar.h>
67#include <sys/sched.h>
68#include <sys/signalvar.h>
69#include <sys/syscall.h>
70#include <sys/syscallsubr.h>
71#include <sys/sysent.h>
72#include <sys/systm.h>
73#include <sys/vmmeter.h>
74#ifdef KTRACE
75#include <sys/uio.h>
76#include <sys/ktrace.h>
77#endif
78#include <security/audit/audit.h>
79
80#include <machine/cpu.h>
81
82#ifdef VIMAGE
83#include <net/vnet.h>
84#endif
85
86#ifdef HWPMC_HOOKS
87#include <sys/pmckern.h>
88#endif
89
90#include <security/mac/mac_framework.h>
91
92void (*softdep_ast_cleanup)(struct thread *);
93
94/*
95 * Define the code needed before returning to user mode, for trap and
96 * syscall.
97 */
98void
99userret(struct thread *td, struct trapframe *frame)
100{
101 struct proc *p = td->td_proc;
102
103 CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
104 td->td_name);
105 KASSERT((p->p_flag & P_WEXIT) == 0,
106 ("Exiting process returns to usermode"));
107#ifdef DIAGNOSTIC
108 /*
109 * Check that we called signotify() enough. For
110 * multi-threaded processes, where signal distribution might
111 * change due to other threads changing sigmask, the check is
112 * racy and cannot be performed reliably.
113 * If current process is vfork child, indicated by P_PPWAIT, then
114 * issignal() ignores stops, so we block the check to avoid
115 * classifying pending signals.
116 */
117 if (p->p_numthreads == 1) {
118 PROC_LOCK(p);
119 thread_lock(td);
120 if ((p->p_flag & P_PPWAIT) == 0 &&
121 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
122 if (SIGPENDING(td) && (td->td_flags &
123 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
124 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
125 thread_unlock(td);
126 panic(
127 "failed to set signal flags for ast p %p td %p fl %x",
128 p, td, td->td_flags);
129 }
130 }
131 thread_unlock(td);
132 PROC_UNLOCK(p);
133 }
134#endif
135
136 /*
137 * Charge system time if profiling.
138 */
139 if (__predict_false(p->p_flag & P_PROFIL))
140 addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio);
141
142#ifdef HWPMC_HOOKS
143 if (PMC_THREAD_HAS_SAMPLES(td))
144 PMC_CALL_HOOK(td, PMC_FN_THR_USERRET, NULL);
145#endif
146#ifdef TCPHPTS
147 /*
148 * @gallatin is adament that this needs to go here, I
149 * am not so sure. Running hpts is a lot like
150 * a lro_flush() that happens while a user process
151 * is running. But he may know best so I will go
152 * with his view of accounting. :-)
153 */
154 tcp_run_hpts();
155#endif
156 /*
157 * Let the scheduler adjust our priority etc.
158 */
159 sched_userret(td);
160
161 /*
162 * Check for misbehavior.
163 *
164 * In case there is a callchain tracing ongoing because of
165 * hwpmc(4), skip the scheduler pinning check.
166 * hwpmc(4) subsystem, infact, will collect callchain informations
167 * at ast() checkpoint, which is past userret().
168 */
169 WITNESS_WARN(WARN_PANIC, NULL, "userret: returning");
170 KASSERT(td->td_critnest == 0,
171 ("userret: Returning in a critical section"));
172 KASSERT(td->td_locks == 0,
173 ("userret: Returning with %d locks held", td->td_locks));
174 KASSERT(td->td_rw_rlocks == 0,
175 ("userret: Returning with %d rwlocks held in read mode",
176 td->td_rw_rlocks));
177 KASSERT(td->td_sx_slocks == 0,
178 ("userret: Returning with %d sx locks held in shared mode",
179 td->td_sx_slocks));
180 KASSERT(td->td_lk_slocks == 0,
181 ("userret: Returning with %d lockmanager locks held in shared mode",
182 td->td_lk_slocks));
183 KASSERT((td->td_pflags & TDP_NOFAULTING) == 0,
184 ("userret: Returning with pagefaults disabled"));
185 if (__predict_false(!THREAD_CAN_SLEEP())) {
186#ifdef EPOCH_TRACE
187 epoch_trace_list(curthread);
188#endif
189 KASSERT(0, ("userret: Returning with sleep disabled"));
190 }
191 KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0,
192 ("userret: Returning with with pinned thread"));
193 KASSERT(td->td_vp_reserved == NULL,
194 ("userret: Returning with preallocated vnode"));
195 KASSERT((td->td_flags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
196 ("userret: Returning with stop signals deferred"));
197 KASSERT(td->td_vslock_sz == 0,
198 ("userret: Returning with vslock-wired space"));
199#ifdef VIMAGE
200 /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */
201 VNET_ASSERT(curvnet == NULL,
202 ("%s: Returning on td %p (pid %d, %s) with vnet %p set in %s",
203 __func__, td, p->p_pid, td->td_name, curvnet,
204 (td->td_vnet_lpush != NULL) ? td->td_vnet_lpush : "N/A"));
205#endif
206}
207
208/*
209 * Process an asynchronous software trap.
210 * This is relatively easy.
211 * This function will return with preemption disabled.
212 */
213void
214ast(struct trapframe *framep)
215{
216 struct thread *td;
217 struct proc *p;
218 int flags, sig;
219 bool resched_sigs;
220
221 kmsan_mark(framep, sizeof(*framep), KMSAN_STATE_INITED);
222
223 td = curthread;
224 p = td->td_proc;
225
226 CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
227 p->p_comm);
228 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
229 WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
230 mtx_assert(&Giant, MA_NOTOWNED);
231 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
232 td->td_frame = framep;
233 td->td_pticks = 0;
234
235 /*
236 * This updates the td_flag's for the checks below in one
237 * "atomic" operation with turning off the astpending flag.
238 * If another AST is triggered while we are handling the
239 * AST's saved in flags, the astpending flag will be set and
240 * ast() will be called again.
241 */
242 thread_lock(td);
243 flags = td->td_flags;
244 td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
245 TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND |
246 TDF_KQTICKLED);
247 thread_unlock(td);
248 VM_CNT_INC(v_trap);
249
250 if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
252 if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
253 addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
254 td->td_profil_ticks = 0;
255 td->td_pflags &= ~TDP_OWEUPC;
256 }
257#ifdef HWPMC_HOOKS
258 /* Handle Software PMC callchain capture. */
259 if (PMC_IS_PENDING_CALLCHAIN(td))
260 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
261#endif
262 if ((td->td_pflags & TDP_RFPPWAIT) != 0)
263 fork_rfppwait(td);
264 if (flags & TDF_ALRMPEND) {
265 PROC_LOCK(p);
266 kern_psignal(p, SIGVTALRM);
267 PROC_UNLOCK(p);
268 }
269 if (flags & TDF_PROFPEND) {
270 PROC_LOCK(p);
271 kern_psignal(p, SIGPROF);
272 PROC_UNLOCK(p);
273 }
274#ifdef MAC
275 if (flags & TDF_MACPEND)
276 mac_thread_userret(td);
277#endif
278 if (flags & TDF_NEEDRESCHED) {
279#ifdef KTRACE
280 if (KTRPOINT(td, KTR_CSW))
281 ktrcsw(1, 1, __func__);
282#endif
283 thread_lock(td);
284 sched_prio(td, td->td_user_pri);
285 mi_switch(SW_INVOL | SWT_NEEDRESCHED);
286#ifdef KTRACE
287 if (KTRPOINT(td, KTR_CSW))
288 ktrcsw(0, 1, __func__);
289#endif
290 }
291
292 td_softdep_cleanup(td);
293 MPASS(td->td_su == NULL);
294
295 /*
296 * If this thread tickled GEOM, we need to wait for the giggling to
297 * stop before we return to userland
298 */
299 if (__predict_false(td->td_pflags & TDP_GEOM))
300 g_waitidle();
301
302#ifdef DIAGNOSTIC
303 if (p->p_numthreads == 1 && (flags & TDF_NEEDSIGCHK) == 0) {
304 PROC_LOCK(p);
305 thread_lock(td);
306 /*
307 * Note that TDF_NEEDSIGCHK should be re-read from
308 * td_flags, since signal might have been delivered
309 * after we cleared td_flags above. This is one of
310 * the reason for looping check for AST condition.
311 * See comment in userret() about P_PPWAIT.
312 */
313 if ((p->p_flag & P_PPWAIT) == 0 &&
314 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
315 if (SIGPENDING(td) && (td->td_flags &
316 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
317 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
318 thread_unlock(td); /* fix dumps */
319 panic(
320 "failed2 to set signal flags for ast p %p td %p fl %x %x",
321 p, td, flags, td->td_flags);
322 }
323 }
324 thread_unlock(td);
325 PROC_UNLOCK(p);
326 }
327#endif
328
329 /*
330 * Check for signals. Unlocked reads of p_pendingcnt or
331 * p_siglist might cause process-directed signal to be handled
332 * later.
333 */
334 if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
335 !SIGISEMPTY(p->p_siglist)) {
337 PROC_LOCK(p);
338 mtx_lock(&p->p_sigacts->ps_mtx);
339 while ((sig = cursig(td)) != 0) {
340 KASSERT(sig >= 0, ("sig %d", sig));
341 postsig(sig);
342 }
343 mtx_unlock(&p->p_sigacts->ps_mtx);
344 PROC_UNLOCK(p);
345 resched_sigs = true;
346 } else {
347 resched_sigs = false;
348 }
349
350 if ((flags & TDF_KQTICKLED) != 0)
352
353 /*
354 * Handle deferred update of the fast sigblock value, after
355 * the postsig() loop was performed.
356 */
357 sigfastblock_setpend(td, resched_sigs);
358
359#ifdef KTRACE
360 KTRUSERRET(td);
361#endif
362
363 /*
364 * We need to check to see if we have to exit or wait due to a
365 * single threading requirement or some other STOP condition.
366 */
367 if (flags & TDF_NEEDSUSPCHK) {
368 PROC_LOCK(p);
370 PROC_UNLOCK(p);
371 }
372
373 if (td->td_pflags & TDP_OLDMASK) {
374 td->td_pflags &= ~TDP_OLDMASK;
375 kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
376 }
377
378#ifdef RACCT
379 if (__predict_false(racct_enable && p->p_throttled != 0))
380 racct_proc_throttled(p);
381#endif
382
383 userret(td, framep);
384}
385
386const char *
387syscallname(struct proc *p, u_int code)
388{
389 static const char unknown[] = "unknown";
390 struct sysentvec *sv;
391
392 sv = p->p_sysent;
393 if (sv->sv_syscallnames == NULL || code >= sv->sv_size)
394 return (unknown);
395 return (sv->sv_syscallnames[code]);
396}
int psratio
Definition: kern_clock.c:381
void kqueue_drain_schedtask(void)
Definition: kern_event.c:1786
void fork_rfppwait(struct thread *td)
Definition: kern_fork.c:808
struct mtx __exclusive_cache_line Giant
Definition: kern_mutex.c:181
void panic(const char *fmt,...)
int postsig(int sig)
Definition: kern_sig.c:3193
void sigfastblock_fetch(struct thread *td)
Definition: kern_sig.c:4344
int cursig(struct thread *td)
Definition: kern_sig.c:630
void kern_psignal(struct proc *p, int sig)
Definition: kern_sig.c:2117
void sigfastblock_setpend(struct thread *td, bool resched)
Definition: kern_sig.c:4383
int kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, int flags)
Definition: kern_sig.c:1039
void mi_switch(int flags)
Definition: kern_synch.c:491
void thread_cow_update(struct thread *td)
Definition: kern_thread.c:852
int thread_suspend_check(int return_instead)
Definition: kern_thread.c:1365
void sched_prio(struct thread *td, u_char prio)
Definition: sched_4bsd.c:901
void kmsan_mark(const void *addr, size_t size, uint8_t c)
Definition: subr_msan.c:547
void addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
Definition: subr_prof.c:143
uint16_t flags
Definition: subr_stats.c:2
void(* softdep_ast_cleanup)(struct thread *)
Definition: subr_trap.c:92
__FBSDID("$FreeBSD$")
void ast(struct trapframe *framep)
Definition: subr_trap.c:214
void userret(struct thread *td, struct trapframe *frame)
Definition: subr_trap.c:99
const char * syscallname(struct proc *p, u_int code)
Definition: subr_trap.c:387