FreeBSD kernel kern code
kern_clocksource.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32/*
33 * Common routines to manage event timers hardware.
34 */
35
36#include "opt_device_polling.h"
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/bus.h>
41#include <sys/limits.h>
42#include <sys/lock.h>
43#include <sys/kdb.h>
44#include <sys/ktr.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/kernel.h>
48#include <sys/sched.h>
49#include <sys/smp.h>
50#include <sys/sysctl.h>
51#include <sys/timeet.h>
52#include <sys/timetc.h>
53
54#include <machine/atomic.h>
55#include <machine/clock.h>
56#include <machine/cpu.h>
57#include <machine/smp.h>
58
59int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */
60int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */
61
62static void setuptimer(void);
63static void loadtimer(sbintime_t now, int first);
64static int doconfigtimer(void);
65static void configtimer(int start);
66static int round_freq(struct eventtimer *et, int freq);
67
68static sbintime_t getnextcpuevent(int idle);
69static sbintime_t getnextevent(void);
70static int handleevents(sbintime_t now, int fake);
71
72static struct mtx et_hw_mtx;
73
74#define ET_HW_LOCK(state) \
75 { \
76 if (timer->et_flags & ET_FLAGS_PERCPU) \
77 mtx_lock_spin(&(state)->et_hw_mtx); \
78 else \
79 mtx_lock_spin(&et_hw_mtx); \
80 }
81
82#define ET_HW_UNLOCK(state) \
83 { \
84 if (timer->et_flags & ET_FLAGS_PERCPU) \
85 mtx_unlock_spin(&(state)->et_hw_mtx); \
86 else \
87 mtx_unlock_spin(&et_hw_mtx); \
88 }
89
90static struct eventtimer *timer = NULL;
91static sbintime_t timerperiod; /* Timer period for periodic mode. */
92static sbintime_t statperiod; /* statclock() events period. */
93static sbintime_t profperiod; /* profclock() events period. */
94static sbintime_t nexttick; /* Next global timer tick time. */
95static u_int busy = 1; /* Reconfiguration is in progress. */
96static int profiling; /* Profiling events enabled. */
97
98static char timername[32]; /* Wanted timer. */
99TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
100
101static int singlemul; /* Multiplier for periodic mode. */
102SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul,
103 0, "Multiplier for periodic mode");
104
105static u_int idletick; /* Run periodic events when idle. */
106SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick,
107 0, "Run periodic events when idle");
108
109static int periodic; /* Periodic or one-shot mode. */
110static int want_periodic; /* What mode to prefer. */
111TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
112
114 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */
115 u_int action; /* Reconfiguration requests. */
116 u_int handle; /* Immediate handle resuests. */
117 sbintime_t now; /* Last tick time. */
118 sbintime_t nextevent; /* Next scheduled event on this CPU. */
119 sbintime_t nexttick; /* Next timer tick time. */
120 sbintime_t nexthard; /* Next hardclock() event. */
121 sbintime_t nextstat; /* Next statclock() event. */
122 sbintime_t nextprof; /* Next profclock() event. */
123 sbintime_t nextcall; /* Next callout event. */
124 sbintime_t nextcallopt; /* Next optional callout event. */
125 int ipi; /* This CPU needs IPI. */
126 int idle; /* This CPU is in idle mode. */
127};
128
129DPCPU_DEFINE_STATIC(struct pcpu_state, timerstate);
130DPCPU_DEFINE(sbintime_t, hardclocktime);
131
132/*
133 * Timer broadcast IPI handler.
134 */
135int
137{
138 sbintime_t now;
139 struct pcpu_state *state;
140 int done;
141
142 if (doconfigtimer() || busy)
143 return (FILTER_HANDLED);
144 state = DPCPU_PTR(timerstate);
145 now = state->now;
146 CTR3(KTR_SPARE2, "ipi at %d: now %d.%08x",
147 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
148 done = handleevents(now, 0);
149 return (done ? FILTER_HANDLED : FILTER_STRAY);
150}
151
152/*
153 * Handle all events for specified time on this CPU
154 */
155static int
156handleevents(sbintime_t now, int fake)
157{
158 sbintime_t t, *hct;
159 struct trapframe *frame;
160 struct pcpu_state *state;
161 int usermode;
162 int done, runs;
163
164 CTR3(KTR_SPARE2, "handle at %d: now %d.%08x",
165 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
166 done = 0;
167 if (fake) {
168 frame = NULL;
169 usermode = 0;
170 } else {
171 frame = curthread->td_intr_frame;
172 usermode = TRAPF_USERMODE(frame);
173 }
174
175 state = DPCPU_PTR(timerstate);
176
177 runs = 0;
178 while (now >= state->nexthard) {
179 state->nexthard += tick_sbt;
180 runs++;
181 }
182 if (runs) {
183 hct = DPCPU_PTR(hardclocktime);
184 *hct = state->nexthard - tick_sbt;
185 if (fake < 2) {
186 hardclock(runs, usermode);
187 done = 1;
188 }
189 }
190 runs = 0;
191 while (now >= state->nextstat) {
192 state->nextstat += statperiod;
193 runs++;
194 }
195 if (runs && fake < 2) {
196 statclock(runs, usermode);
197 done = 1;
198 }
199 if (profiling) {
200 runs = 0;
201 while (now >= state->nextprof) {
202 state->nextprof += profperiod;
203 runs++;
204 }
205 if (runs && !fake) {
206 profclock(runs, usermode, TRAPF_PC(frame));
207 done = 1;
208 }
209 } else
210 state->nextprof = state->nextstat;
211 if (now >= state->nextcallopt || now >= state->nextcall) {
212 state->nextcall = state->nextcallopt = SBT_MAX;
214 }
215
216 t = getnextcpuevent(0);
217 ET_HW_LOCK(state);
218 if (!busy) {
219 state->idle = 0;
220 state->nextevent = t;
221 loadtimer(now, (fake == 2) &&
222 (timer->et_flags & ET_FLAGS_PERCPU));
223 }
224 ET_HW_UNLOCK(state);
225 return (done);
226}
227
228/*
229 * Schedule binuptime of the next event on current CPU.
230 */
231static sbintime_t
233{
234 sbintime_t event;
235 struct pcpu_state *state;
236 u_int hardfreq;
237
238 state = DPCPU_PTR(timerstate);
239 /* Handle hardclock() events, skipping some if CPU is idle. */
240 event = state->nexthard;
241 if (idle) {
243#ifdef SMP
244 && curcpu == CPU_FIRST()
245#endif
246 )
247 hardfreq = hz / tc_min_ticktock_freq;
248 else
249 hardfreq = hz;
250 if (hardfreq > 1)
251 event += tick_sbt * (hardfreq - 1);
252 }
253 /* Handle callout events. */
254 if (event > state->nextcall)
255 event = state->nextcall;
256 if (!idle) { /* If CPU is active - handle other types of events. */
257 if (event > state->nextstat)
258 event = state->nextstat;
259 if (profiling && event > state->nextprof)
260 event = state->nextprof;
261 }
262 return (event);
263}
264
265/*
266 * Schedule binuptime of the next event on all CPUs.
267 */
268static sbintime_t
270{
271 struct pcpu_state *state;
272 sbintime_t event;
273#ifdef SMP
274 int cpu;
275#endif
276#ifdef KTR
277 int c;
278
279 c = -1;
280#endif
281 state = DPCPU_PTR(timerstate);
282 event = state->nextevent;
283#ifdef SMP
284 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
285 CPU_FOREACH(cpu) {
286 state = DPCPU_ID_PTR(cpu, timerstate);
287 if (event > state->nextevent) {
288 event = state->nextevent;
289#ifdef KTR
290 c = cpu;
291#endif
292 }
293 }
294 }
295#endif
296 CTR4(KTR_SPARE2, "next at %d: next %d.%08x by %d",
297 curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c);
298 return (event);
299}
300
301/* Hardware timer callback function. */
302static void
303timercb(struct eventtimer *et, void *arg)
304{
305 sbintime_t now;
306 sbintime_t *next;
307 struct pcpu_state *state;
308#ifdef SMP
309 int cpu, bcast;
310#endif
311
312 /* Do not touch anything if somebody reconfiguring timers. */
313 if (busy)
314 return;
315 /* Update present and next tick times. */
316 state = DPCPU_PTR(timerstate);
317 if (et->et_flags & ET_FLAGS_PERCPU) {
318 next = &state->nexttick;
319 } else
320 next = &nexttick;
321 now = sbinuptime();
322 if (periodic)
323 *next = now + timerperiod;
324 else
325 *next = -1; /* Next tick is not scheduled yet. */
326 state->now = now;
327 CTR3(KTR_SPARE2, "intr at %d: now %d.%08x",
328 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
329
330#ifdef SMP
331#ifdef EARLY_AP_STARTUP
332 MPASS(mp_ncpus == 1 || smp_started);
333#endif
334 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
335 bcast = 0;
336#ifdef EARLY_AP_STARTUP
337 if ((et->et_flags & ET_FLAGS_PERCPU) == 0) {
338#else
339 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
340#endif
341 CPU_FOREACH(cpu) {
342 state = DPCPU_ID_PTR(cpu, timerstate);
343 ET_HW_LOCK(state);
344 state->now = now;
345 if (now >= state->nextevent) {
346 state->nextevent += SBT_1S;
347 if (curcpu != cpu) {
348 state->ipi = 1;
349 bcast = 1;
350 }
351 }
352 ET_HW_UNLOCK(state);
353 }
354 }
355#endif
356
357 /* Handle events for this time on this CPU. */
358 handleevents(now, 0);
359
360#ifdef SMP
361 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
362 if (bcast) {
363 CPU_FOREACH(cpu) {
364 if (curcpu == cpu)
365 continue;
366 state = DPCPU_ID_PTR(cpu, timerstate);
367 if (state->ipi) {
368 state->ipi = 0;
369 ipi_cpu(cpu, IPI_HARDCLOCK);
370 }
371 }
372 }
373#endif
374}
375
376/*
377 * Load new value into hardware timer.
378 */
379static void
380loadtimer(sbintime_t now, int start)
381{
382 struct pcpu_state *state;
383 sbintime_t new;
384 sbintime_t *next;
385 uint64_t tmp;
386 int eq;
387
388 if (timer->et_flags & ET_FLAGS_PERCPU) {
389 state = DPCPU_PTR(timerstate);
390 next = &state->nexttick;
391 } else
392 next = &nexttick;
393 if (periodic) {
394 if (start) {
395 /*
396 * Try to start all periodic timers aligned
397 * to period to make events synchronous.
398 */
399 tmp = now % timerperiod;
400 new = timerperiod - tmp;
401 if (new < tmp) /* Left less then passed. */
402 new += timerperiod;
403 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x",
404 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff),
405 (int)(new >> 32), (u_int)(new & 0xffffffff));
406 *next = new + now;
408 }
409 } else {
410 new = getnextevent();
411 eq = (new == *next);
412 CTR4(KTR_SPARE2, "load at %d: next %d.%08x eq %d",
413 curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq);
414 if (!eq) {
415 *next = new;
416 et_start(timer, new - now, 0);
417 }
418 }
419}
420
421/*
422 * Prepare event timer parameters after configuration changes.
423 */
424static void
426{
427 int freq;
428
429 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
430 periodic = 0;
431 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
432 periodic = 1;
433 singlemul = MIN(MAX(singlemul, 1), 20);
434 freq = hz * singlemul;
435 while (freq < (profiling ? profhz : stathz))
436 freq += hz;
437 freq = round_freq(timer, freq);
438 timerperiod = SBT_1S / freq;
439}
440
441/*
442 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
443 */
444static int
446{
447 sbintime_t now;
448 struct pcpu_state *state;
449
450 state = DPCPU_PTR(timerstate);
451 switch (atomic_load_acq_int(&state->action)) {
452 case 1:
453 now = sbinuptime();
454 ET_HW_LOCK(state);
455 loadtimer(now, 1);
456 ET_HW_UNLOCK(state);
457 state->handle = 0;
458 atomic_store_rel_int(&state->action, 0);
459 return (1);
460 case 2:
461 ET_HW_LOCK(state);
462 et_stop(timer);
463 ET_HW_UNLOCK(state);
464 state->handle = 0;
465 atomic_store_rel_int(&state->action, 0);
466 return (1);
467 }
468 if (atomic_readandclear_int(&state->handle) && !busy) {
469 now = sbinuptime();
470 handleevents(now, 0);
471 return (1);
472 }
473 return (0);
474}
475
476/*
477 * Reconfigure specified timer.
478 * For per-CPU timers use IPI to make other CPUs to reconfigure.
479 */
480static void
482{
483 sbintime_t now, next;
484 struct pcpu_state *state;
485 int cpu;
486
487 if (start) {
488 setuptimer();
489 now = sbinuptime();
490 } else
491 now = 0;
492 critical_enter();
493 ET_HW_LOCK(DPCPU_PTR(timerstate));
494 if (start) {
495 /* Initialize time machine parameters. */
496 next = now + timerperiod;
497 if (periodic)
498 nexttick = next;
499 else
500 nexttick = -1;
501#ifdef EARLY_AP_STARTUP
502 MPASS(mp_ncpus == 1 || smp_started);
503#endif
504 CPU_FOREACH(cpu) {
505 state = DPCPU_ID_PTR(cpu, timerstate);
506 state->now = now;
507#ifndef EARLY_AP_STARTUP
508 if (!smp_started && cpu != CPU_FIRST())
509 state->nextevent = SBT_MAX;
510 else
511#endif
512 state->nextevent = next;
513 if (periodic)
514 state->nexttick = next;
515 else
516 state->nexttick = -1;
517 state->nexthard = next;
518 state->nextstat = next;
519 state->nextprof = next;
520 state->nextcall = next;
521 state->nextcallopt = next;
522 hardclock_sync(cpu);
523 }
524 busy = 0;
525 /* Start global timer or per-CPU timer of this CPU. */
526 loadtimer(now, 1);
527 } else {
528 busy = 1;
529 /* Stop global timer or per-CPU timer of this CPU. */
530 et_stop(timer);
531 }
532 ET_HW_UNLOCK(DPCPU_PTR(timerstate));
533#ifdef SMP
534#ifdef EARLY_AP_STARTUP
535 /* If timer is global we are done. */
536 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
537#else
538 /* If timer is global or there is no other CPUs yet - we are done. */
539 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
540#endif
541 critical_exit();
542 return;
543 }
544 /* Set reconfigure flags for other CPUs. */
545 CPU_FOREACH(cpu) {
546 state = DPCPU_ID_PTR(cpu, timerstate);
547 atomic_store_rel_int(&state->action,
548 (cpu == curcpu) ? 0 : ( start ? 1 : 2));
549 }
550 /* Broadcast reconfigure IPI. */
551 ipi_all_but_self(IPI_HARDCLOCK);
552 /* Wait for reconfiguration completed. */
553restart:
554 cpu_spinwait();
555 CPU_FOREACH(cpu) {
556 if (cpu == curcpu)
557 continue;
558 state = DPCPU_ID_PTR(cpu, timerstate);
559 if (atomic_load_acq_int(&state->action))
560 goto restart;
561 }
562#endif
563 critical_exit();
564}
565
566/*
567 * Calculate nearest frequency supported by hardware timer.
568 */
569static int
570round_freq(struct eventtimer *et, int freq)
571{
572 uint64_t div;
573
574 if (et->et_frequency != 0) {
575 div = lmax((et->et_frequency + freq / 2) / freq, 1);
576 if (et->et_flags & ET_FLAGS_POW2DIV)
577 div = 1 << (flsl(div + div / 2) - 1);
578 freq = (et->et_frequency + div / 2) / div;
579 }
580 if (et->et_min_period > SBT_1S)
581 panic("Event timer \"%s\" doesn't support sub-second periods!",
582 et->et_name);
583 else if (et->et_min_period != 0)
584 freq = min(freq, SBT2FREQ(et->et_min_period));
585 if (et->et_max_period < SBT_1S && et->et_max_period != 0)
586 freq = max(freq, SBT2FREQ(et->et_max_period));
587 return (freq);
588}
589
590/*
591 * Configure and start event timers (BSP part).
592 */
593void
595{
596 struct pcpu_state *state;
597 int base, div, cpu;
598
599 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
600 CPU_FOREACH(cpu) {
601 state = DPCPU_ID_PTR(cpu, timerstate);
602 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
603 state->nextcall = SBT_MAX;
604 state->nextcallopt = SBT_MAX;
605 }
607 /* Grab requested timer or the best of present. */
608 if (timername[0])
609 timer = et_find(timername, 0, 0);
610 if (timer == NULL && periodic) {
611 timer = et_find(NULL,
612 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
613 }
614 if (timer == NULL) {
615 timer = et_find(NULL,
616 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
617 }
618 if (timer == NULL && !periodic) {
619 timer = et_find(NULL,
620 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
621 }
622 if (timer == NULL)
623 panic("No usable event timer found!");
624 et_init(timer, timercb, NULL, NULL);
625
626 /* Adapt to timer capabilities. */
627 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
628 periodic = 0;
629 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
630 periodic = 1;
631 if (timer->et_flags & ET_FLAGS_C3STOP)
633
634 /*
635 * We honor the requested 'hz' value.
636 * We want to run stathz in the neighborhood of 128hz.
637 * We would like profhz to run as often as possible.
638 */
639 if (singlemul <= 0 || singlemul > 20) {
640 if (hz >= 1500 || (hz % 128) == 0)
641 singlemul = 1;
642 else if (hz >= 750)
643 singlemul = 2;
644 else
645 singlemul = 4;
646 }
647 if (periodic) {
648 base = round_freq(timer, hz * singlemul);
649 singlemul = max((base + hz / 2) / hz, 1);
650 hz = (base + singlemul / 2) / singlemul;
651 if (base <= 128)
652 stathz = base;
653 else {
654 div = base / 128;
655 if (div >= singlemul && (div % singlemul) == 0)
656 div++;
657 stathz = base / div;
658 }
659 profhz = stathz;
660 while ((profhz + stathz) <= 128 * 64)
661 profhz += stathz;
663 } else {
664 hz = round_freq(timer, hz);
665 stathz = round_freq(timer, 127);
666 profhz = round_freq(timer, stathz * 64);
667 }
668 tick = 1000000 / hz;
669 tick_sbt = SBT_1S / hz;
670 tick_bt = sbttobt(tick_sbt);
671 statperiod = SBT_1S / stathz;
672 profperiod = SBT_1S / profhz;
673 ET_LOCK();
674 configtimer(1);
675 ET_UNLOCK();
676}
677
678/*
679 * Start per-CPU event timers on APs.
680 */
681void
683{
684 sbintime_t now;
685 struct pcpu_state *state;
686 struct thread *td;
687
688 state = DPCPU_PTR(timerstate);
689 now = sbinuptime();
690 ET_HW_LOCK(state);
691 state->now = now;
692 hardclock_sync(curcpu);
693 spinlock_enter();
694 ET_HW_UNLOCK(state);
695 td = curthread;
696 td->td_intr_nesting_level++;
697 handleevents(state->now, 2);
698 td->td_intr_nesting_level--;
699 spinlock_exit();
700}
701
702void
704{
705 ET_LOCK();
706 configtimer(0);
707 ET_UNLOCK();
708}
709
710void
712{
713 ET_LOCK();
714 configtimer(1);
715 ET_UNLOCK();
716}
717
718/*
719 * Switch to profiling clock rates.
720 */
721void
723{
724
725 ET_LOCK();
726 if (profiling == 0) {
727 if (periodic) {
728 configtimer(0);
729 profiling = 1;
730 configtimer(1);
731 } else
732 profiling = 1;
733 } else
734 profiling++;
735 ET_UNLOCK();
736}
737
738/*
739 * Switch to regular clock rates.
740 */
741void
743{
744
745 ET_LOCK();
746 if (profiling == 1) {
747 if (periodic) {
748 configtimer(0);
749 profiling = 0;
750 configtimer(1);
751 } else
752 profiling = 0;
753 } else
754 profiling--;
755 ET_UNLOCK();
756}
757
758/*
759 * Switch to idle mode (all ticks handled).
760 */
761sbintime_t
763{
764 sbintime_t now, t;
765 struct pcpu_state *state;
766
767 if (idletick || busy ||
768 (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
769#ifdef DEVICE_POLLING
770 || curcpu == CPU_FIRST()
771#endif
772 )
773 return (-1);
774 state = DPCPU_PTR(timerstate);
775 if (periodic)
776 now = state->now;
777 else
778 now = sbinuptime();
779 CTR3(KTR_SPARE2, "idle at %d: now %d.%08x",
780 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
781 t = getnextcpuevent(1);
782 ET_HW_LOCK(state);
783 state->idle = 1;
784 state->nextevent = t;
785 if (!periodic)
786 loadtimer(now, 0);
787 ET_HW_UNLOCK(state);
788 return (MAX(t - now, 0));
789}
790
791/*
792 * Switch to active mode (skip empty ticks).
793 */
794void
796{
797 sbintime_t now;
798 struct pcpu_state *state;
799 struct thread *td;
800
801 state = DPCPU_PTR(timerstate);
802 if (state->idle == 0 || busy)
803 return;
804 if (periodic)
805 now = state->now;
806 else
807 now = sbinuptime();
808 CTR3(KTR_SPARE2, "active at %d: now %d.%08x",
809 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff));
810 spinlock_enter();
811 td = curthread;
812 td->td_intr_nesting_level++;
813 handleevents(now, 1);
814 td->td_intr_nesting_level--;
815 spinlock_exit();
816}
817
818/*
819 * Change the frequency of the given timer. This changes et->et_frequency and
820 * if et is the active timer it reconfigures the timer on all CPUs. This is
821 * intended to be a private interface for the use of et_change_frequency() only.
822 */
823void
824cpu_et_frequency(struct eventtimer *et, uint64_t newfreq)
825{
826
827 ET_LOCK();
828 if (et == timer) {
829 configtimer(0);
830 et->et_frequency = newfreq;
831 configtimer(1);
832 } else
833 et->et_frequency = newfreq;
834 ET_UNLOCK();
835}
836
837void
838cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
839{
840 struct pcpu_state *state;
841
842 /* Do not touch anything if somebody reconfiguring timers. */
843 if (busy)
844 return;
845 CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x",
846 curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff),
847 (int)(bt >> 32), (u_int)(bt & 0xffffffff));
848
849 KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
850 state = DPCPU_ID_PTR(cpu, timerstate);
851 ET_HW_LOCK(state);
852
853 /*
854 * If there is callout time already set earlier -- do nothing.
855 * This check may appear redundant because we check already in
856 * callout_process() but this double check guarantees we're safe
857 * with respect to race conditions between interrupts execution
858 * and scheduling.
859 */
860 state->nextcallopt = bt_opt;
861 if (bt >= state->nextcall)
862 goto done;
863 state->nextcall = bt;
864 /* If there is some other event set earlier -- do nothing. */
865 if (bt >= state->nextevent)
866 goto done;
867 state->nextevent = bt;
868 /* If timer is periodic -- there is nothing to reprogram. */
869 if (periodic)
870 goto done;
871 /* If timer is global or of the current CPU -- reprogram it. */
872 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) {
873 loadtimer(sbinuptime(), 0);
874done:
875 ET_HW_UNLOCK(state);
876 return;
877 }
878 /* Otherwise make other CPU to reprogram it. */
879 state->handle = 1;
880 ET_HW_UNLOCK(state);
881#ifdef SMP
882 ipi_cpu(cpu, IPI_HARDCLOCK);
883#endif
884}
885
886/*
887 * Report or change the active event timers hardware.
888 */
889static int
891{
892 char buf[32];
893 struct eventtimer *et;
894 int error;
895
896 ET_LOCK();
897 et = timer;
898 snprintf(buf, sizeof(buf), "%s", et->et_name);
899 ET_UNLOCK();
900 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
901 ET_LOCK();
902 et = timer;
903 if (error != 0 || req->newptr == NULL ||
904 strcasecmp(buf, et->et_name) == 0) {
905 ET_UNLOCK();
906 return (error);
907 }
908 et = et_find(buf, 0, 0);
909 if (et == NULL) {
910 ET_UNLOCK();
911 return (ENOENT);
912 }
913 configtimer(0);
914 et_free(timer);
915 if (et->et_flags & ET_FLAGS_C3STOP)
917 if (timer->et_flags & ET_FLAGS_C3STOP)
920 timer = et;
921 et_init(timer, timercb, NULL, NULL);
922 configtimer(1);
923 ET_UNLOCK();
924 return (error);
925}
926SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
927 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
928 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
929
930/*
931 * Report or change the active event timer periodicity.
932 */
933static int
935{
936 int error, val;
937
938 val = periodic;
939 error = sysctl_handle_int(oidp, &val, 0, req);
940 if (error != 0 || req->newptr == NULL)
941 return (error);
942 ET_LOCK();
943 configtimer(0);
944 periodic = want_periodic = val;
945 configtimer(1);
946 ET_UNLOCK();
947 return (error);
948}
949SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
950 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
951 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");
952
953#include "opt_ddb.h"
954
955#ifdef DDB
956#include <ddb/ddb.h>
957
958DB_SHOW_COMMAND(clocksource, db_show_clocksource)
959{
960 struct pcpu_state *st;
961 int c;
962
963 CPU_FOREACH(c) {
964 st = DPCPU_ID_PTR(c, timerstate);
965 db_printf(
966 "CPU %2d: action %d handle %d ipi %d idle %d\n"
967 " now %#jx nevent %#jx (%jd)\n"
968 " ntick %#jx (%jd) nhard %#jx (%jd)\n"
969 " nstat %#jx (%jd) nprof %#jx (%jd)\n"
970 " ncall %#jx (%jd) ncallopt %#jx (%jd)\n",
971 c, st->action, st->handle, st->ipi, st->idle,
972 (uintmax_t)st->now,
973 (uintmax_t)st->nextevent,
974 (uintmax_t)(st->nextevent - st->now) / tick_sbt,
975 (uintmax_t)st->nexttick,
976 (uintmax_t)(st->nexttick - st->now) / tick_sbt,
977 (uintmax_t)st->nexthard,
978 (uintmax_t)(st->nexthard - st->now) / tick_sbt,
979 (uintmax_t)st->nextstat,
980 (uintmax_t)(st->nextstat - st->now) / tick_sbt,
981 (uintmax_t)st->nextprof,
982 (uintmax_t)(st->nextprof - st->now) / tick_sbt,
983 (uintmax_t)st->nextcall,
984 (uintmax_t)(st->nextcall - st->now) / tick_sbt,
985 (uintmax_t)st->nextcallopt,
986 (uintmax_t)(st->nextcallopt - st->now) / tick_sbt);
987 }
988}
989
990#endif
static struct bt_table bt
static struct bt_table st
void hardclock_sync(int cpu)
Definition: kern_clock.c:516
int stathz
Definition: kern_clock.c:377
int profhz
Definition: kern_clock.c:378
void profclock(int cnt, int usermode, uintfptr_t pc)
Definition: kern_clock.c:721
void statclock(int cnt, int usermode)
Definition: kern_clock.c:637
void hardclock(int cnt, int usermode)
Definition: kern_clock.c:453
void suspendclock(void)
static int periodic
static sbintime_t nexttick
static void configtimer(int start)
SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul, 0, "Multiplier for periodic mode")
static u_int busy
void cpu_stopprofclock(void)
static int handleevents(sbintime_t now, int fake)
void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
sbintime_t cpu_idleclock(void)
static int doconfigtimer(void)
void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq)
static void timercb(struct eventtimer *et, void *arg)
static int round_freq(struct eventtimer *et, int freq)
#define ET_HW_LOCK(state)
static sbintime_t getnextevent(void)
#define ET_HW_UNLOCK(state)
int cpu_disable_c3_sleep
DPCPU_DEFINE_STATIC(struct pcpu_state, timerstate)
static char timername[32]
static int sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
static struct mtx et_hw_mtx
static void loadtimer(sbintime_t now, int first)
void resumeclock(void)
TUNABLE_INT("kern.eventtimer.periodic", &want_periodic)
static u_int idletick
static sbintime_t getnextcpuevent(int idle)
TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername))
int hardclockintr(void)
static int sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
static sbintime_t profperiod
static int profiling
__FBSDID("$FreeBSD$")
SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick, 0, "Run periodic events when idle")
static struct eventtimer * timer
void cpu_initclocks_ap(void)
static sbintime_t timerperiod
static void setuptimer(void)
SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer")
void cpu_startprofclock(void)
void cpu_initclocks_bsp(void)
static sbintime_t statperiod
void cpu_activeclock(void)
static int singlemul
int cpu_disable_c2_sleep
DPCPU_DEFINE(sbintime_t, hardclocktime)
static int want_periodic
int et_init(struct eventtimer *et, et_event_cb_t *event, et_deregister_cb_t *deregister, void *arg)
Definition: kern_et.c:160
int et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
Definition: kern_et.c:182
struct eventtimer * et_find(const char *name, int check, int want)
Definition: kern_et.c:138
int et_free(struct eventtimer *et)
Definition: kern_et.c:230
int et_stop(struct eventtimer *et)
Definition: kern_et.c:209
void panic(const char *fmt,...)
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1644
int sysctl_handle_string(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1778
int tc_min_ticktock_freq
Definition: kern_tc.c:103
void callout_process(sbintime_t now)
Definition: kern_timeout.c:432
void *** start
Definition: linker_if.m:98
sbintime_t nextstat
sbintime_t nexttick
sbintime_t now
sbintime_t nextcall
sbintime_t nextevent
sbintime_t nextcallopt
struct mtx et_hw_mtx
sbintime_t nextprof
sbintime_t nexthard
struct bintime tick_bt
Definition: subr_param.c:87
int hz
Definition: subr_param.c:85
sbintime_t tick_sbt
Definition: subr_param.c:88
int tick
Definition: subr_param.c:86
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:550
volatile int smp_started
Definition: subr_smp.c:76
int mp_ncpus
Definition: subr_smp.c:72
struct mtx mtx
Definition: uipc_ktls.c:0
struct stat * buf