FreeBSD kernel kern code
kern_clock.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$");
41
42#include "opt_kdb.h"
43#include "opt_device_polling.h"
44#include "opt_hwpmc_hooks.h"
45#include "opt_ntp.h"
46#include "opt_watchdog.h"
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/callout.h>
51#include <sys/epoch.h>
52#include <sys/eventhandler.h>
53#include <sys/gtaskqueue.h>
54#include <sys/kdb.h>
55#include <sys/kernel.h>
56#include <sys/kthread.h>
57#include <sys/ktr.h>
58#include <sys/lock.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/resource.h>
62#include <sys/resourcevar.h>
63#include <sys/sched.h>
64#include <sys/sdt.h>
65#include <sys/signalvar.h>
66#include <sys/sleepqueue.h>
67#include <sys/smp.h>
68#include <vm/vm.h>
69#include <vm/pmap.h>
70#include <vm/vm_map.h>
71#include <sys/sysctl.h>
72#include <sys/bus.h>
73#include <sys/interrupt.h>
74#include <sys/limits.h>
75#include <sys/timetc.h>
76
77#ifdef GPROF
78#include <sys/gmon.h>
79#endif
80
81#ifdef HWPMC_HOOKS
82#include <sys/pmckern.h>
83PMC_SOFT_DEFINE( , , clock, hard);
84PMC_SOFT_DEFINE( , , clock, stat);
85PMC_SOFT_DEFINE_EX( , , clock, prof, \
87#endif
88
89#ifdef DEVICE_POLLING
90extern void hardclock_device_poll(void);
91#endif /* DEVICE_POLLING */
92
93/* Spin-lock protecting profiling statistics. */
94static struct mtx time_lock;
95
97SDT_PROBE_DEFINE2(sched, , , tick, "struct thread *", "struct proc *");
98
99static int
100sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
101{
102 int error;
103 long cp_time[CPUSTATES];
104#ifdef SCTL_MASK32
105 int i;
106 unsigned int cp_time32[CPUSTATES];
107#endif
108
109 read_cpu_time(cp_time);
110#ifdef SCTL_MASK32
111 if (req->flags & SCTL_MASK32) {
112 if (!req->oldptr)
113 return SYSCTL_OUT(req, 0, sizeof(cp_time32));
114 for (i = 0; i < CPUSTATES; i++)
115 cp_time32[i] = (unsigned int)cp_time[i];
116 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
117 } else
118#endif
119 {
120 if (!req->oldptr)
121 return SYSCTL_OUT(req, 0, sizeof(cp_time));
122 error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
123 }
124 return error;
125}
126
127SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
128 0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
129
130static long empty[CPUSTATES];
131
132static int
133sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
134{
135 struct pcpu *pcpu;
136 int error;
137 int c;
138 long *cp_time;
139#ifdef SCTL_MASK32
140 unsigned int cp_time32[CPUSTATES];
141 int i;
142#endif
143
144 if (!req->oldptr) {
145#ifdef SCTL_MASK32
146 if (req->flags & SCTL_MASK32)
147 return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
148 else
149#endif
150 return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
151 }
152 for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
153 if (!CPU_ABSENT(c)) {
154 pcpu = pcpu_find(c);
155 cp_time = pcpu->pc_cp_time;
156 } else {
157 cp_time = empty;
158 }
159#ifdef SCTL_MASK32
160 if (req->flags & SCTL_MASK32) {
161 for (i = 0; i < CPUSTATES; i++)
162 cp_time32[i] = (unsigned int)cp_time[i];
163 error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
164 } else
165#endif
166 error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
167 }
168 return error;
169}
170
171SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
172 0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
173
174#ifdef DEADLKRES
175static const char *blessed[] = {
176 "getblk",
177 "so_snd_sx",
178 "so_rcv_sx",
179 NULL
180};
181static int slptime_threshold = 1800;
182static int blktime_threshold = 900;
183static int sleepfreq = 3;
184
185static void
186deadlres_td_on_lock(struct proc *p, struct thread *td, int blkticks)
187{
188 int tticks;
189
190 sx_assert(&allproc_lock, SX_LOCKED);
191 PROC_LOCK_ASSERT(p, MA_OWNED);
192 THREAD_LOCK_ASSERT(td, MA_OWNED);
193 /*
194 * The thread should be blocked on a turnstile, simply check
195 * if the turnstile channel is in good state.
196 */
197 MPASS(td->td_blocked != NULL);
198
199 tticks = ticks - td->td_blktick;
200 if (tticks > blkticks)
201 /*
202 * Accordingly with provided thresholds, this thread is stuck
203 * for too long on a turnstile.
204 */
205 panic("%s: possible deadlock detected for %p (%s), "
206 "blocked for %d ticks\n", __func__,
207 td, sched_tdname(td), tticks);
208}
209
210static void
211deadlres_td_sleep_q(struct proc *p, struct thread *td, int slpticks)
212{
213 const void *wchan;
214 int i, slptype, tticks;
215
216 sx_assert(&allproc_lock, SX_LOCKED);
217 PROC_LOCK_ASSERT(p, MA_OWNED);
218 THREAD_LOCK_ASSERT(td, MA_OWNED);
219 /*
220 * Check if the thread is sleeping on a lock, otherwise skip the check.
221 * Drop the thread lock in order to avoid a LOR with the sleepqueue
222 * spinlock.
223 */
224 wchan = td->td_wchan;
225 tticks = ticks - td->td_slptick;
226 slptype = sleepq_type(wchan);
227 if ((slptype == SLEEPQ_SX || slptype == SLEEPQ_LK) &&
228 tticks > slpticks) {
229 /*
230 * Accordingly with provided thresholds, this thread is stuck
231 * for too long on a sleepqueue.
232 * However, being on a sleepqueue, we might still check for the
233 * blessed list.
234 */
235 for (i = 0; blessed[i] != NULL; i++)
236 if (!strcmp(blessed[i], td->td_wmesg))
237 return;
238
239 panic("%s: possible deadlock detected for %p (%s), "
240 "blocked for %d ticks\n", __func__,
241 td, sched_tdname(td), tticks);
242 }
243}
244
245static void
246deadlkres(void)
247{
248 struct proc *p;
249 struct thread *td;
250 int blkticks, slpticks, tryl;
251
252 tryl = 0;
253 for (;;) {
254 blkticks = blktime_threshold * hz;
255 slpticks = slptime_threshold * hz;
256
257 /*
258 * Avoid to sleep on the sx_lock in order to avoid a
259 * possible priority inversion problem leading to
260 * starvation.
261 * If the lock can't be held after 100 tries, panic.
262 */
263 if (!sx_try_slock(&allproc_lock)) {
264 if (tryl > 100)
265 panic("%s: possible deadlock detected "
266 "on allproc_lock\n", __func__);
267 tryl++;
268 pause("allproc", sleepfreq * hz);
269 continue;
270 }
271 tryl = 0;
272 FOREACH_PROC_IN_SYSTEM(p) {
273 PROC_LOCK(p);
274 if (p->p_state == PRS_NEW) {
275 PROC_UNLOCK(p);
276 continue;
277 }
278 FOREACH_THREAD_IN_PROC(p, td) {
279 thread_lock(td);
280 if (TD_ON_LOCK(td))
281 deadlres_td_on_lock(p, td,
282 blkticks);
283 else if (TD_IS_SLEEPING(td))
284 deadlres_td_sleep_q(p, td,
285 slpticks);
286 thread_unlock(td);
287 }
288 PROC_UNLOCK(p);
289 }
290 sx_sunlock(&allproc_lock);
291
292 /* Sleep for sleepfreq seconds. */
293 pause("-", sleepfreq * hz);
294 }
295}
296
297static struct kthread_desc deadlkres_kd = {
298 "deadlkres",
299 deadlkres,
300 (struct thread **)NULL
301};
302
303SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
304
305static SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
306 "Deadlock resolver");
307SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
308 &slptime_threshold, 0,
309 "Number of seconds within is valid to sleep on a sleepqueue");
310SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
311 &blktime_threshold, 0,
312 "Number of seconds within is valid to block on a turnstile");
313SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
314 "Number of seconds between any deadlock resolver thread run");
315#endif /* DEADLKRES */
316
317void
318read_cpu_time(long *cp_time)
319{
320 struct pcpu *pc;
321 int i, j;
322
323 /* Sum up global cp_time[]. */
324 bzero(cp_time, sizeof(long) * CPUSTATES);
325 CPU_FOREACH(i) {
326 pc = pcpu_find(i);
327 for (j = 0; j < CPUSTATES; j++)
328 cp_time[j] += pc->pc_cp_time[j];
329 }
330}
331
332#include <sys/watchdog.h>
333
334static int watchdog_ticks;
336static void watchdog_fire(void);
337static void watchdog_config(void *, u_int, int *);
338
339static void
341{
342 EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
343}
344
345/*
346 * Clock handling routines.
347 *
348 * This code is written to operate with two timers that run independently of
349 * each other.
350 *
351 * The main timer, running hz times per second, is used to trigger interval
352 * timers, timeouts and rescheduling as needed.
353 *
354 * The second timer handles kernel and user profiling,
355 * and does resource use estimation. If the second timer is programmable,
356 * it is randomized to avoid aliasing between the two clocks. For example,
357 * the randomization prevents an adversary from always giving up the cpu
358 * just before its quantum expires. Otherwise, it would never accumulate
359 * cpu ticks. The mean frequency of the second timer is stathz.
360 *
361 * If no second timer exists, stathz will be zero; in this case we drive
362 * profiling and statistics off the main clock. This WILL NOT be accurate;
363 * do not do it unless absolutely necessary.
364 *
365 * The statistics clock may (or may not) be run at a higher rate while
366 * profiling. This profile clock runs at profhz. We require that profhz
367 * be an integral multiple of stathz.
368 *
369 * If the statistics clock is running fast, it must be divided by the ratio
370 * profhz/stathz for statistics. (For profiling, every tick counts.)
371 *
372 * Time-of-day is maintained using a "timecounter", which may or may
373 * not be related to the hardware generating the above mentioned
374 * interrupts.
375 */
376
380volatile int ticks;
382
383DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */
384#ifdef DEVICE_POLLING
385static int devpoll_run = 0;
386#endif
387
388/*
389 * Initialize clock frequencies and start both clocks running.
390 */
391static void
392initclocks(void *dummy __unused)
393{
394 int i;
395
396 /*
397 * Set divisors to 1 (normal case) and let the machine-specific
398 * code do its bit.
399 */
400 mtx_init(&time_lock, "time lock", NULL, MTX_DEF);
401 cpu_initclocks();
402
403 /*
404 * Compute profhz/stathz, and fix profhz if needed.
405 */
406 i = stathz ? stathz : hz;
407 if (profhz == 0)
408 profhz = i;
409 psratio = profhz / i;
410
411#ifdef SW_WATCHDOG
412 /* Enable hardclock watchdog now, even if a hardware watchdog exists. */
414#else
415 /* Volunteer to run a software watchdog. */
416 if (wdog_software_attach == NULL)
417 wdog_software_attach = watchdog_attach;
418#endif
419}
420SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
421
422static __noinline void
423hardclock_itimer(struct thread *td, struct pstats *pstats, int cnt, int usermode)
424{
425 struct proc *p;
426 int flags;
427
428 flags = 0;
429 p = td->td_proc;
430 if (usermode &&
431 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
432 PROC_ITIMLOCK(p);
433 if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
434 tick * cnt) == 0)
435 flags |= TDF_ALRMPEND | TDF_ASTPENDING;
436 PROC_ITIMUNLOCK(p);
437 }
438 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
439 PROC_ITIMLOCK(p);
440 if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
441 tick * cnt) == 0)
442 flags |= TDF_PROFPEND | TDF_ASTPENDING;
443 PROC_ITIMUNLOCK(p);
444 }
445 if (flags != 0) {
446 thread_lock(td);
447 td->td_flags |= flags;
448 thread_unlock(td);
449 }
450}
451
452void
453hardclock(int cnt, int usermode)
454{
455 struct pstats *pstats;
456 struct thread *td = curthread;
457 struct proc *p = td->td_proc;
458 int *t = DPCPU_PTR(pcputicks);
459 int global, i, newticks;
460
461 /*
462 * Update per-CPU and possibly global ticks values.
463 */
464 *t += cnt;
465 global = ticks;
466 do {
467 newticks = *t - global;
468 if (newticks <= 0) {
469 if (newticks < -1)
470 *t = global - 1;
471 newticks = 0;
472 break;
473 }
474 } while (!atomic_fcmpset_int(&ticks, &global, *t));
475
476 /*
477 * Run current process's virtual and profile time, as needed.
478 */
479 pstats = p->p_stats;
480 if (__predict_false(
481 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) ||
482 timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)))
483 hardclock_itimer(td, pstats, cnt, usermode);
484
485#ifdef HWPMC_HOOKS
486 if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
487 PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
488 if (td->td_intr_frame != NULL)
489 PMC_SOFT_CALL_TF( , , clock, hard, td->td_intr_frame);
490#endif
491 /* We are in charge to handle this tick duty. */
492 if (newticks > 0) {
493 tc_ticktock(newticks);
494#ifdef DEVICE_POLLING
495 /* Dangerous and no need to call these things concurrently. */
496 if (atomic_cmpset_acq_int(&devpoll_run, 0, 1)) {
497 /* This is very short and quick. */
499 atomic_store_rel_int(&devpoll_run, 0);
500 }
501#endif /* DEVICE_POLLING */
502 if (watchdog_enabled > 0) {
503 i = atomic_fetchadd_int(&watchdog_ticks, -newticks);
504 if (i > 0 && i <= newticks)
506 }
508 }
509 if (curcpu == CPU_FIRST())
511 if (__predict_false(DPCPU_GET(epoch_cb_count)))
512 GROUPTASK_ENQUEUE(DPCPU_PTR(epoch_cb_task));
513}
514
515void
517{
518 int *t;
519 KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
520 t = DPCPU_ID_PTR(cpu, pcputicks);
521
522 *t = ticks;
523}
524
525/*
526 * Compute number of ticks in the specified amount of time.
527 */
528int
529tvtohz(struct timeval *tv)
530{
531 unsigned long ticks;
532 long sec, usec;
533
534 /*
535 * If the number of usecs in the whole seconds part of the time
536 * difference fits in a long, then the total number of usecs will
537 * fit in an unsigned long. Compute the total and convert it to
538 * ticks, rounding up and adding 1 to allow for the current tick
539 * to expire. Rounding also depends on unsigned long arithmetic
540 * to avoid overflow.
541 *
542 * Otherwise, if the number of ticks in the whole seconds part of
543 * the time difference fits in a long, then convert the parts to
544 * ticks separately and add, using similar rounding methods and
545 * overflow avoidance. This method would work in the previous
546 * case but it is slightly slower and assumes that hz is integral.
547 *
548 * Otherwise, round the time difference down to the maximum
549 * representable value.
550 *
551 * If ints have 32 bits, then the maximum value for any timeout in
552 * 10ms ticks is 248 days.
553 */
554 sec = tv->tv_sec;
555 usec = tv->tv_usec;
556 if (usec < 0) {
557 sec--;
558 usec += 1000000;
559 }
560 if (sec < 0) {
561#ifdef DIAGNOSTIC
562 if (usec > 0) {
563 sec++;
564 usec -= 1000000;
565 }
566 printf("tvotohz: negative time difference %ld sec %ld usec\n",
567 sec, usec);
568#endif
569 ticks = 1;
570 } else if (sec <= LONG_MAX / 1000000)
571 ticks = howmany(sec * 1000000 + (unsigned long)usec, tick) + 1;
572 else if (sec <= LONG_MAX / hz)
573 ticks = sec * hz
574 + howmany((unsigned long)usec, tick) + 1;
575 else
576 ticks = LONG_MAX;
577 if (ticks > INT_MAX)
578 ticks = INT_MAX;
579 return ((int)ticks);
580}
581
582/*
583 * Start profiling on a process.
584 *
585 * Kernel profiling passes proc0 which never exits and hence
586 * keeps the profile clock running constantly.
587 */
588void
589startprofclock(struct proc *p)
590{
591
592 PROC_LOCK_ASSERT(p, MA_OWNED);
593 if (p->p_flag & P_STOPPROF)
594 return;
595 if ((p->p_flag & P_PROFIL) == 0) {
596 p->p_flag |= P_PROFIL;
597 mtx_lock(&time_lock);
598 if (++profprocs == 1)
600 mtx_unlock(&time_lock);
601 }
602}
603
604/*
605 * Stop profiling on a process.
606 */
607void
608stopprofclock(struct proc *p)
609{
610
611 PROC_LOCK_ASSERT(p, MA_OWNED);
612 if (p->p_flag & P_PROFIL) {
613 if (p->p_profthreads != 0) {
614 while (p->p_profthreads != 0) {
615 p->p_flag |= P_STOPPROF;
616 msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
617 "stopprof", 0);
618 }
619 }
620 if ((p->p_flag & P_PROFIL) == 0)
621 return;
622 p->p_flag &= ~P_PROFIL;
623 mtx_lock(&time_lock);
624 if (--profprocs == 0)
626 mtx_unlock(&time_lock);
627 }
628}
629
630/*
631 * Statistics clock. Updates rusage information and calls the scheduler
632 * to adjust priorities of the active thread.
633 *
634 * This should be called by all active processors.
635 */
636void
637statclock(int cnt, int usermode)
638{
639 struct rusage *ru;
640 struct vmspace *vm;
641 struct thread *td;
642 struct proc *p;
643 long rss;
644 long *cp_time;
645 uint64_t runtime, new_switchtime;
646
647 td = curthread;
648 p = td->td_proc;
649
650 cp_time = (long *)PCPU_PTR(cp_time);
651 if (usermode) {
652 /*
653 * Charge the time as appropriate.
654 */
655 td->td_uticks += cnt;
656 if (p->p_nice > NZERO)
657 cp_time[CP_NICE] += cnt;
658 else
659 cp_time[CP_USER] += cnt;
660 } else {
661 /*
662 * Came from kernel mode, so we were:
663 * - handling an interrupt,
664 * - doing syscall or trap work on behalf of the current
665 * user process, or
666 * - spinning in the idle loop.
667 * Whichever it is, charge the time as appropriate.
668 * Note that we charge interrupts to the current process,
669 * regardless of whether they are ``for'' that process,
670 * so that we know how much of its real time was spent
671 * in ``non-process'' (i.e., interrupt) work.
672 */
673 if ((td->td_pflags & TDP_ITHREAD) ||
674 td->td_intr_nesting_level >= 2) {
675 td->td_iticks += cnt;
676 cp_time[CP_INTR] += cnt;
677 } else {
678 td->td_pticks += cnt;
679 td->td_sticks += cnt;
680 if (!TD_IS_IDLETHREAD(td))
681 cp_time[CP_SYS] += cnt;
682 else
683 cp_time[CP_IDLE] += cnt;
684 }
685 }
686
687 /* Update resource usage integrals and maximums. */
688 MPASS(p->p_vmspace != NULL);
689 vm = p->p_vmspace;
690 ru = &td->td_ru;
691 ru->ru_ixrss += pgtok(vm->vm_tsize) * cnt;
692 ru->ru_idrss += pgtok(vm->vm_dsize) * cnt;
693 ru->ru_isrss += pgtok(vm->vm_ssize) * cnt;
694 rss = pgtok(vmspace_resident_count(vm));
695 if (ru->ru_maxrss < rss)
696 ru->ru_maxrss = rss;
697 KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
698 "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
699 SDT_PROBE2(sched, , , tick, td, td->td_proc);
700 thread_lock_flags(td, MTX_QUIET);
701
702 /*
703 * Compute the amount of time during which the current
704 * thread was running, and add that to its total so far.
705 */
706 new_switchtime = cpu_ticks();
707 runtime = new_switchtime - PCPU_GET(switchtime);
708 td->td_runtime += runtime;
709 td->td_incruntime += runtime;
710 PCPU_SET(switchtime, new_switchtime);
711
712 sched_clock(td, cnt);
713 thread_unlock(td);
714#ifdef HWPMC_HOOKS
715 if (td->td_intr_frame != NULL)
716 PMC_SOFT_CALL_TF( , , clock, stat, td->td_intr_frame);
717#endif
718}
719
720void
721profclock(int cnt, int usermode, uintfptr_t pc)
722{
723 struct thread *td;
724#ifdef GPROF
725 struct gmonparam *g;
726 uintfptr_t i;
727#endif
728
729 td = curthread;
730 if (usermode) {
731 /*
732 * Came from user mode; CPU was in user state.
733 * If this process is being profiled, record the tick.
734 * if there is no related user location yet, don't
735 * bother trying to count it.
736 */
737 if (td->td_proc->p_flag & P_PROFIL)
738 addupc_intr(td, pc, cnt);
739 }
740#ifdef GPROF
741 else {
742 /*
743 * Kernel statistics are just like addupc_intr, only easier.
744 */
745 g = &_gmonparam;
746 if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
747 i = PC_TO_I(g, pc);
748 if (i < g->textsize) {
749 KCOUNT(g, i) += cnt;
750 }
751 }
752 }
753#endif
754#ifdef HWPMC_HOOKS
755 if (td->td_intr_frame != NULL)
756 PMC_SOFT_CALL_TF( , , clock, prof, td->td_intr_frame);
757#endif
758}
759
760/*
761 * Return information about system clocks.
762 */
763static int
764sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
765{
766 struct clockinfo clkinfo;
767 /*
768 * Construct clockinfo structure.
769 */
770 bzero(&clkinfo, sizeof(clkinfo));
771 clkinfo.hz = hz;
772 clkinfo.tick = tick;
773 clkinfo.profhz = profhz;
774 clkinfo.stathz = stathz ? stathz : hz;
775 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
776}
777
778SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
779 CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
780 0, 0, sysctl_kern_clockrate, "S,clockinfo",
781 "Rate and period of various kernel clocks");
782
783static void
784watchdog_config(void *unused __unused, u_int cmd, int *error)
785{
786 u_int u;
787
788 u = cmd & WD_INTERVAL;
789 if (u >= WD_TO_1SEC) {
790 watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
792 *error = 0;
793 } else {
795 }
796}
797
798/*
799 * Handle a watchdog timeout by dumping interrupt information and
800 * then either dropping to DDB or panicking.
801 */
802static void
804{
805 int nintr;
806 uint64_t inttotal;
807 u_long *curintr;
808 char *curname;
809
810 curintr = intrcnt;
811 curname = intrnames;
812 inttotal = 0;
813 nintr = sintrcnt / sizeof(u_long);
814
815 printf("interrupt total\n");
816 while (--nintr >= 0) {
817 if (*curintr)
818 printf("%-12s %20lu\n", curname, *curintr);
819 curname += strlen(curname) + 1;
820 inttotal += *curintr++;
821 }
822 printf("Total %20ju\n", (uintmax_t)inttotal);
823
824#if defined(KDB) && !defined(KDB_UNATTENDED)
826 kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
827#else
828 panic("watchdog timeout");
829#endif
830}
INTERFACE clock
Definition: clock_if.m:30
SYSCTL_NODE(_kern, OID_AUTO, binmisc, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "Image activator for miscellaneous binaries")
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
static void watchdog_config(void *, u_int, int *)
void hardclock_sync(int cpu)
Definition: kern_clock.c:516
static int watchdog_ticks
Definition: kern_clock.c:334
SDT_PROBE_DEFINE2(sched,,, tick, "struct thread *", "struct proc *")
void read_cpu_time(long *cp_time)
Definition: kern_clock.c:318
int stathz
Definition: kern_clock.c:377
static int sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
Definition: kern_clock.c:100
int tvtohz(struct timeval *tv)
Definition: kern_clock.c:529
void stopprofclock(struct proc *p)
Definition: kern_clock.c:608
SDT_PROVIDER_DECLARE(sched)
static __noinline void hardclock_itimer(struct thread *td, struct pstats *pstats, int cnt, int usermode)
Definition: kern_clock.c:423
static void watchdog_fire(void)
Definition: kern_clock.c:803
static int sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
Definition: kern_clock.c:764
int profhz
Definition: kern_clock.c:378
static int sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
Definition: kern_clock.c:133
int psratio
Definition: kern_clock.c:381
__FBSDID("$FreeBSD$")
static void watchdog_attach(void)
Definition: kern_clock.c:340
void profclock(int cnt, int usermode, uintfptr_t pc)
Definition: kern_clock.c:721
int profprocs
Definition: kern_clock.c:379
SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_cp_time, "LU", "CPU time statistics")
void statclock(int cnt, int usermode)
Definition: kern_clock.c:637
static int watchdog_enabled
Definition: kern_clock.c:335
static long empty[CPUSTATES]
Definition: kern_clock.c:130
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
volatile int ticks
Definition: kern_clock.c:380
void hardclock(int cnt, int usermode)
Definition: kern_clock.c:453
void startprofclock(struct proc *p)
Definition: kern_clock.c:589
DPCPU_DEFINE_STATIC(int, pcputicks)
static void initclocks(void *dummy __unused)
Definition: kern_clock.c:392
static struct mtx time_lock
Definition: kern_clock.c:94
void cpu_stopprofclock(void)
void cpu_startprofclock(void)
int intr_event_handle(struct intr_event *ie, struct trapframe *frame)
Definition: kern_intr.c:1356
struct intr_event * clk_intr_event
Definition: kern_intr.c:91
void kthread_start(const void *udata)
Definition: kern_kthread.c:233
void hardclock_device_poll(void)
Definition: kern_poll.c:294
struct sx __exclusive_cache_line allproc_lock
Definition: kern_proc.c:134
void panic(const char *fmt,...)
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1867
void tc_ticktock(int cnt)
Definition: kern_tc.c:1919
cpu_tick_f * cpu_ticks
Definition: kern_tc.c:2174
void cpu_tick_calibration(void)
Definition: kern_tc.c:2065
int itimerdecr(struct itimerval *itp, int usec)
Definition: kern_time.c:1008
void sched_clock(struct thread *td, int cnt)
Definition: sched_4bsd.c:751
char * sched_tdname(struct thread *td)
Definition: sched_4bsd.c:1735
void kdb_enter(const char *why, const char *msg)
Definition: subr_kdb.c:498
void kdb_backtrace(void)
Definition: subr_kdb.c:429
int hz
Definition: subr_param.c:85
int tick
Definition: subr_param.c:86
struct pcpu * pcpu_find(u_int cpuid)
Definition: subr_pcpu.c:283
int printf(const char *fmt,...)
Definition: subr_prf.c:397
void addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
Definition: subr_prof.c:116
int sleepq_type(const void *wchan)
u_int mp_maxid
Definition: subr_smp.c:77
uint16_t flags
Definition: subr_stats.c:2
static int blessed(struct witness *, struct witness *)
struct mtx mtx
Definition: uipc_ktls.c:0
static int dummy