49#include "opt_hwpmc_hooks.h"
50#include "opt_ktrace.h"
55#include <sys/capsicum.h>
57#include <sys/kernel.h>
61#include <sys/pmckern.h>
64#include <sys/ptrace.h>
66#include <sys/resourcevar.h>
68#include <sys/signalvar.h>
69#include <sys/syscall.h>
70#include <sys/syscallsubr.h>
71#include <sys/sysent.h>
73#include <sys/vmmeter.h>
76#include <sys/ktrace.h>
78#include <security/audit/audit.h>
80#include <machine/cpu.h>
87#include <sys/pmckern.h>
90#include <security/mac/mac_framework.h>
99userret(
struct thread *td,
struct trapframe *frame)
101 struct proc *p = td->td_proc;
103 CTR3(KTR_SYSC,
"userret: thread %p (pid %d, %s)", td, p->p_pid,
105 KASSERT((p->p_flag & P_WEXIT) == 0,
106 (
"Exiting process returns to usermode"));
117 if (p->p_numthreads == 1) {
120 if ((p->p_flag & P_PPWAIT) == 0 &&
121 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
122 if (SIGPENDING(td) && (td->td_flags &
123 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
124 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
127 "failed to set signal flags for ast p %p td %p fl %x",
128 p, td, td->td_flags);
139 if (__predict_false(p->p_flag & P_PROFIL))
143 if (PMC_THREAD_HAS_SAMPLES(td))
144 PMC_CALL_HOOK(td, PMC_FN_THR_USERRET, NULL);
169 WITNESS_WARN(WARN_PANIC, NULL,
"userret: returning");
170 KASSERT(td->td_critnest == 0,
171 (
"userret: Returning in a critical section"));
172 KASSERT(td->td_locks == 0,
173 (
"userret: Returning with %d locks held", td->td_locks));
174 KASSERT(td->td_rw_rlocks == 0,
175 (
"userret: Returning with %d rwlocks held in read mode",
177 KASSERT(td->td_sx_slocks == 0,
178 (
"userret: Returning with %d sx locks held in shared mode",
180 KASSERT(td->td_lk_slocks == 0,
181 (
"userret: Returning with %d lockmanager locks held in shared mode",
183 KASSERT((td->td_pflags & TDP_NOFAULTING) == 0,
184 (
"userret: Returning with pagefaults disabled"));
185 if (__predict_false(!THREAD_CAN_SLEEP())) {
187 epoch_trace_list(curthread);
189 KASSERT(0, (
"userret: Returning with sleep disabled"));
191 KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0,
192 (
"userret: Returning with with pinned thread"));
193 KASSERT(td->td_vp_reserved == NULL,
194 (
"userret: Returning with preallocated vnode"));
195 KASSERT((td->td_flags & (TDF_SBDRY | TDF_SEINTR | TDF_SERESTART)) == 0,
196 (
"userret: Returning with stop signals deferred"));
197 KASSERT(td->td_vslock_sz == 0,
198 (
"userret: Returning with vslock-wired space"));
201 VNET_ASSERT(curvnet == NULL,
202 (
"%s: Returning on td %p (pid %d, %s) with vnet %p set in %s",
203 __func__, td, p->p_pid, td->td_name, curvnet,
204 (td->td_vnet_lpush != NULL) ? td->td_vnet_lpush :
"N/A"));
214ast(
struct trapframe *framep)
221 kmsan_mark(framep,
sizeof(*framep), KMSAN_STATE_INITED);
226 CTR3(KTR_SYSC,
"ast: thread %p (pid %d, %s)", td, p->p_pid,
228 KASSERT(TRAPF_USERMODE(framep), (
"ast in kernel mode"));
229 WITNESS_WARN(WARN_PANIC, NULL,
"Returning to user mode");
230 mtx_assert(&
Giant, MA_NOTOWNED);
231 THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
232 td->td_frame = framep;
243 flags = td->td_flags;
244 td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
245 TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND |
250 if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
252 if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
253 addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
254 td->td_profil_ticks = 0;
255 td->td_pflags &= ~TDP_OWEUPC;
259 if (PMC_IS_PENDING_CALLCHAIN(td))
260 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (
void *) framep);
262 if ((td->td_pflags & TDP_RFPPWAIT) != 0)
264 if (
flags & TDF_ALRMPEND) {
269 if (
flags & TDF_PROFPEND) {
275 if (
flags & TDF_MACPEND)
276 mac_thread_userret(td);
278 if (
flags & TDF_NEEDRESCHED) {
280 if (KTRPOINT(td, KTR_CSW))
281 ktrcsw(1, 1, __func__);
287 if (KTRPOINT(td, KTR_CSW))
288 ktrcsw(0, 1, __func__);
292 td_softdep_cleanup(td);
293 MPASS(td->td_su == NULL);
299 if (__predict_false(td->td_pflags & TDP_GEOM))
303 if (p->p_numthreads == 1 && (
flags & TDF_NEEDSIGCHK) == 0) {
313 if ((p->p_flag & P_PPWAIT) == 0 &&
314 (td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
315 if (SIGPENDING(td) && (td->td_flags &
316 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
317 (TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
320 "failed2 to set signal flags for ast p %p td %p fl %x %x",
321 p, td,
flags, td->td_flags);
334 if (
flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
335 !SIGISEMPTY(p->p_siglist)) {
338 mtx_lock(&p->p_sigacts->ps_mtx);
339 while ((sig =
cursig(td)) != 0) {
340 KASSERT(sig >= 0, (
"sig %d", sig));
343 mtx_unlock(&p->p_sigacts->ps_mtx);
347 resched_sigs =
false;
350 if ((
flags & TDF_KQTICKLED) != 0)
367 if (
flags & TDF_NEEDSUSPCHK) {
373 if (td->td_pflags & TDP_OLDMASK) {
374 td->td_pflags &= ~TDP_OLDMASK;
379 if (__predict_false(racct_enable && p->p_throttled != 0))
380 racct_proc_throttled(p);
389 static const char unknown[] =
"unknown";
390 struct sysentvec *sv;
393 if (sv->sv_syscallnames == NULL || code >= sv->sv_size)
395 return (sv->sv_syscallnames[code]);
void kqueue_drain_schedtask(void)
void fork_rfppwait(struct thread *td)
struct mtx __exclusive_cache_line Giant
void panic(const char *fmt,...)
void sigfastblock_fetch(struct thread *td)
int cursig(struct thread *td)
void kern_psignal(struct proc *p, int sig)
void sigfastblock_setpend(struct thread *td, bool resched)
int kern_sigprocmask(struct thread *td, int how, sigset_t *set, sigset_t *oset, int flags)
void mi_switch(int flags)
void thread_cow_update(struct thread *td)
int thread_suspend_check(int return_instead)
void sched_prio(struct thread *td, u_char prio)
void kmsan_mark(const void *addr, size_t size, uint8_t c)
void addupc_task(struct thread *td, uintfptr_t pc, u_int ticks)
void(* softdep_ast_cleanup)(struct thread *)
void ast(struct trapframe *framep)
void userret(struct thread *td, struct trapframe *frame)
const char * syscallname(struct proc *p, u_int code)