FreeBSD kernel kern code
kern_ntptime.c
Go to the documentation of this file.
1/*-
2 ***********************************************************************
3 * *
4 * Copyright (c) David L. Mills 1993-2001 *
5 * *
6 * Permission to use, copy, modify, and distribute this software and *
7 * its documentation for any purpose and without fee is hereby *
8 * granted, provided that the above copyright notice appears in all *
9 * copies and that both the copyright notice and this permission *
10 * notice appear in supporting documentation, and that the name *
11 * University of Delaware not be used in advertising or publicity *
12 * pertaining to distribution of the software without specific, *
13 * written prior permission. The University of Delaware makes no *
14 * representations about the suitability this software for any *
15 * purpose. It is provided "as is" without express or implied *
16 * warranty. *
17 * *
18 **********************************************************************/
19
20/*
21 * Adapted from the original sources for FreeBSD and timecounters by:
22 * Poul-Henning Kamp <phk@FreeBSD.org>.
23 *
24 * The 32bit version of the "LP" macros seems a bit past its "sell by"
25 * date so I have retained only the 64bit version and included it directly
26 * in this file.
27 *
28 * Only minor changes done to interface with the timecounters over in
29 * sys/kern/kern_clock.c. Some of the comments below may be (even more)
30 * confusing and/or plain wrong in that context.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD$");
35
36#include "opt_ntp.h"
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/sysproto.h>
41#include <sys/eventhandler.h>
42#include <sys/kernel.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/time.h>
48#include <sys/timex.h>
49#include <sys/timetc.h>
50#include <sys/timepps.h>
51#include <sys/syscallsubr.h>
52#include <sys/sysctl.h>
53
54#ifdef PPS_SYNC
55FEATURE(pps_sync, "Support usage of external PPS signal by kernel PLL");
56#endif
57
58/*
59 * Single-precision macros for 64-bit machines
60 */
61typedef int64_t l_fp;
62#define L_ADD(v, u) ((v) += (u))
63#define L_SUB(v, u) ((v) -= (u))
64#define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32)
65#define L_NEG(v) ((v) = -(v))
66#define L_RSHIFT(v, n) \
67 do { \
68 if ((v) < 0) \
69 (v) = -(-(v) >> (n)); \
70 else \
71 (v) = (v) >> (n); \
72 } while (0)
73#define L_MPY(v, a) ((v) *= (a))
74#define L_CLR(v) ((v) = 0)
75#define L_ISNEG(v) ((v) < 0)
76#define L_LINT(v, a) ((v) = (int64_t)(a) << 32)
77#define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32)
78
79/*
80 * Generic NTP kernel interface
81 *
82 * These routines constitute the Network Time Protocol (NTP) interfaces
83 * for user and daemon application programs. The ntp_gettime() routine
84 * provides the time, maximum error (synch distance) and estimated error
85 * (dispersion) to client user application programs. The ntp_adjtime()
86 * routine is used by the NTP daemon to adjust the system clock to an
87 * externally derived time. The time offset and related variables set by
88 * this routine are used by other routines in this module to adjust the
89 * phase and frequency of the clock discipline loop which controls the
90 * system clock.
91 *
92 * When the kernel time is reckoned directly in nanoseconds (NTP_NANO
93 * defined), the time at each tick interrupt is derived directly from
94 * the kernel time variable. When the kernel time is reckoned in
95 * microseconds, (NTP_NANO undefined), the time is derived from the
96 * kernel time variable together with a variable representing the
97 * leftover nanoseconds at the last tick interrupt. In either case, the
98 * current nanosecond time is reckoned from these values plus an
99 * interpolated value derived by the clock routines in another
100 * architecture-specific module. The interpolation can use either a
101 * dedicated counter or a processor cycle counter (PCC) implemented in
102 * some architectures.
103 *
104 * Note that all routines must run at priority splclock or higher.
105 */
106/*
107 * Phase/frequency-lock loop (PLL/FLL) definitions
108 *
109 * The nanosecond clock discipline uses two variable types, time
110 * variables and frequency variables. Both types are represented as 64-
111 * bit fixed-point quantities with the decimal point between two 32-bit
112 * halves. On a 32-bit machine, each half is represented as a single
113 * word and mathematical operations are done using multiple-precision
114 * arithmetic. On a 64-bit machine, ordinary computer arithmetic is
115 * used.
116 *
117 * A time variable is a signed 64-bit fixed-point number in ns and
118 * fraction. It represents the remaining time offset to be amortized
119 * over succeeding tick interrupts. The maximum time offset is about
120 * 0.5 s and the resolution is about 2.3e-10 ns.
121 *
122 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
123 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
124 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
125 * |s s s| ns |
126 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
127 * | fraction |
128 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
129 *
130 * A frequency variable is a signed 64-bit fixed-point number in ns/s
131 * and fraction. It represents the ns and fraction to be added to the
132 * kernel time variable at each second. The maximum frequency offset is
133 * about +-500000 ns/s and the resolution is about 2.3e-10 ns/s.
134 *
135 * 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
136 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
137 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
138 * |s s s s s s s s s s s s s| ns/s |
139 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
140 * | fraction |
141 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
142 */
143/*
144 * The following variables establish the state of the PLL/FLL and the
145 * residual time and frequency offset of the local clock.
146 */
147#define SHIFT_PLL 4 /* PLL loop gain (shift) */
148#define SHIFT_FLL 2 /* FLL loop gain (shift) */
149
150static int time_state = TIME_OK; /* clock state */
151int time_status = STA_UNSYNC; /* clock status bits */
152static long time_tai; /* TAI offset (s) */
153static long time_monitor; /* last time offset scaled (ns) */
154static long time_constant; /* poll interval (shift) (s) */
155static long time_precision = 1; /* clock precision (ns) */
156static long time_maxerror = MAXPHASE / 1000; /* maximum error (us) */
157long time_esterror = MAXPHASE / 1000; /* estimated error (us) */
158static long time_reftime; /* uptime at last adjustment (s) */
159static l_fp time_offset; /* time offset (ns) */
160static l_fp time_freq; /* frequency offset (ns/s) */
161static l_fp time_adj; /* tick adjust (ns/s) */
162
163static int64_t time_adjtime; /* correction from adjtime(2) (usec) */
164
165static struct mtx ntp_lock;
166MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN);
167
168#define NTP_LOCK() mtx_lock_spin(&ntp_lock)
169#define NTP_UNLOCK() mtx_unlock_spin(&ntp_lock)
170#define NTP_ASSERT_LOCKED() mtx_assert(&ntp_lock, MA_OWNED)
171
172#ifdef PPS_SYNC
173/*
174 * The following variables are used when a pulse-per-second (PPS) signal
175 * is available and connected via a modem control lead. They establish
176 * the engineering parameters of the clock discipline loop when
177 * controlled by the PPS signal.
178 */
179#define PPS_FAVG 2 /* min freq avg interval (s) (shift) */
180#define PPS_FAVGDEF 8 /* default freq avg int (s) (shift) */
181#define PPS_FAVGMAX 15 /* max freq avg interval (s) (shift) */
182#define PPS_PAVG 4 /* phase avg interval (s) (shift) */
183#define PPS_VALID 120 /* PPS signal watchdog max (s) */
184#define PPS_MAXWANDER 100000 /* max PPS wander (ns/s) */
185#define PPS_POPCORN 2 /* popcorn spike threshold (shift) */
186
187static struct timespec pps_tf[3]; /* phase median filter */
188static l_fp pps_freq; /* scaled frequency offset (ns/s) */
189static long pps_fcount; /* frequency accumulator */
190static long pps_jitter; /* nominal jitter (ns) */
191static long pps_stabil; /* nominal stability (scaled ns/s) */
192static long pps_lastsec; /* time at last calibration (s) */
193static int pps_valid; /* signal watchdog counter */
194static int pps_shift = PPS_FAVG; /* interval duration (s) (shift) */
195static int pps_shiftmax = PPS_FAVGDEF; /* max interval duration (s) (shift) */
196static int pps_intcnt; /* wander counter */
197
198/*
199 * PPS signal quality monitors
200 */
201static long pps_calcnt; /* calibration intervals */
202static long pps_jitcnt; /* jitter limit exceeded */
203static long pps_stbcnt; /* stability limit exceeded */
204static long pps_errcnt; /* calibration errors */
205#endif /* PPS_SYNC */
206/*
207 * End of phase/frequency-lock loop (PLL/FLL) definitions
208 */
209
210static void hardupdate(long offset);
211static void ntp_gettime1(struct ntptimeval *ntvp);
212static bool ntp_is_time_error(int tsl);
213
214static bool
216{
217
218 /*
219 * Status word error decode. If any of these conditions occur,
220 * an error is returned, instead of the status word. Most
221 * applications will care only about the fact the system clock
222 * may not be trusted, not about the details.
223 *
224 * Hardware or software error
225 */
226 if ((tsl & (STA_UNSYNC | STA_CLOCKERR)) ||
227
228 /*
229 * PPS signal lost when either time or frequency synchronization
230 * requested
231 */
232 (tsl & (STA_PPSFREQ | STA_PPSTIME) &&
233 !(tsl & STA_PPSSIGNAL)) ||
234
235 /*
236 * PPS jitter exceeded when time synchronization requested
237 */
238 (tsl & STA_PPSTIME && tsl & STA_PPSJITTER) ||
239
240 /*
241 * PPS wander exceeded or calibration error when frequency
242 * synchronization requested
243 */
244 (tsl & STA_PPSFREQ &&
245 tsl & (STA_PPSWANDER | STA_PPSERROR)))
246 return (true);
247
248 return (false);
249}
250
251static void
252ntp_gettime1(struct ntptimeval *ntvp)
253{
254 struct timespec atv; /* nanosecond time */
255
257
258 nanotime(&atv);
259 ntvp->time.tv_sec = atv.tv_sec;
260 ntvp->time.tv_nsec = atv.tv_nsec;
261 ntvp->maxerror = time_maxerror;
262 ntvp->esterror = time_esterror;
263 ntvp->tai = time_tai;
264 ntvp->time_state = time_state;
265
267 ntvp->time_state = TIME_ERROR;
268}
269
270/*
271 * ntp_gettime() - NTP user application interface
272 *
273 * See the timex.h header file for synopsis and API description. Note that
274 * the TAI offset is returned in the ntvtimeval.tai structure member.
275 */
276#ifndef _SYS_SYSPROTO_H_
278 struct ntptimeval *ntvp;
279};
280#endif
281/* ARGSUSED */
282int
283sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap)
284{
285 struct ntptimeval ntv;
286
287 memset(&ntv, 0, sizeof(ntv));
288
289 NTP_LOCK();
290 ntp_gettime1(&ntv);
291 NTP_UNLOCK();
292
293 td->td_retval[0] = ntv.time_state;
294 return (copyout(&ntv, uap->ntvp, sizeof(ntv)));
295}
296
297static int
298ntp_sysctl(SYSCTL_HANDLER_ARGS)
299{
300 struct ntptimeval ntv; /* temporary structure */
301
302 memset(&ntv, 0, sizeof(ntv));
303
304 NTP_LOCK();
305 ntp_gettime1(&ntv);
306 NTP_UNLOCK();
307
308 return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req));
309}
310
311SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
312 "");
313SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE | CTLFLAG_RD |
314 CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval) , ntp_sysctl, "S,ntptimeval",
315 "");
316
317#ifdef PPS_SYNC
318SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shiftmax, CTLFLAG_RW,
319 &pps_shiftmax, 0, "Max interval duration (sec) (shift)");
320SYSCTL_INT(_kern_ntp_pll, OID_AUTO, pps_shift, CTLFLAG_RW,
321 &pps_shift, 0, "Interval duration (sec) (shift)");
322SYSCTL_LONG(_kern_ntp_pll, OID_AUTO, time_monitor, CTLFLAG_RD,
323 &time_monitor, 0, "Last time offset scaled (ns)");
324
325SYSCTL_S64(_kern_ntp_pll, OID_AUTO, pps_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
326 &pps_freq, 0,
327 "Scaled frequency offset (ns/sec)");
328SYSCTL_S64(_kern_ntp_pll, OID_AUTO, time_freq, CTLFLAG_RD | CTLFLAG_MPSAFE,
329 &time_freq, 0,
330 "Frequency offset (ns/sec)");
331#endif
332
333/*
334 * ntp_adjtime() - NTP daemon application interface
335 *
336 * See the timex.h header file for synopsis and API description. Note that
337 * the timex.constant structure member has a dual purpose to set the time
338 * constant and to set the TAI offset.
339 */
340int
341kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp)
342{
343 long freq; /* frequency ns/s) */
344 int modes; /* mode bits from structure */
345 int error, retval;
346
347 /*
348 * Update selected clock variables - only the superuser can
349 * change anything. Note that there is no error checking here on
350 * the assumption the superuser should know what it is doing.
351 * Note that either the time constant or TAI offset are loaded
352 * from the ntv.constant member, depending on the mode bits. If
353 * the STA_PLL bit in the status word is cleared, the state and
354 * status words are reset to the initial values at boot.
355 */
356 modes = ntv->modes;
357 error = 0;
358 if (modes)
359 error = priv_check(td, PRIV_NTP_ADJTIME);
360 if (error != 0)
361 return (error);
362 NTP_LOCK();
363 if (modes & MOD_MAXERROR)
364 time_maxerror = ntv->maxerror;
365 if (modes & MOD_ESTERROR)
366 time_esterror = ntv->esterror;
367 if (modes & MOD_STATUS) {
368 if (time_status & STA_PLL && !(ntv->status & STA_PLL)) {
369 time_state = TIME_OK;
370 time_status = STA_UNSYNC;
371#ifdef PPS_SYNC
372 pps_shift = PPS_FAVG;
373#endif /* PPS_SYNC */
374 }
375 time_status &= STA_RONLY;
376 time_status |= ntv->status & ~STA_RONLY;
377 }
378 if (modes & MOD_TIMECONST) {
379 if (ntv->constant < 0)
380 time_constant = 0;
381 else if (ntv->constant > MAXTC)
382 time_constant = MAXTC;
383 else
384 time_constant = ntv->constant;
385 }
386 if (modes & MOD_TAI) {
387 if (ntv->constant > 0) /* XXX zero & negative numbers ? */
388 time_tai = ntv->constant;
389 }
390#ifdef PPS_SYNC
391 if (modes & MOD_PPSMAX) {
392 if (ntv->shift < PPS_FAVG)
393 pps_shiftmax = PPS_FAVG;
394 else if (ntv->shift > PPS_FAVGMAX)
395 pps_shiftmax = PPS_FAVGMAX;
396 else
397 pps_shiftmax = ntv->shift;
398 }
399#endif /* PPS_SYNC */
400 if (modes & MOD_NANO)
401 time_status |= STA_NANO;
402 if (modes & MOD_MICRO)
403 time_status &= ~STA_NANO;
404 if (modes & MOD_CLKB)
405 time_status |= STA_CLK;
406 if (modes & MOD_CLKA)
407 time_status &= ~STA_CLK;
408 if (modes & MOD_FREQUENCY) {
409 freq = (ntv->freq * 1000LL) >> 16;
410 if (freq > MAXFREQ)
411 L_LINT(time_freq, MAXFREQ);
412 else if (freq < -MAXFREQ)
413 L_LINT(time_freq, -MAXFREQ);
414 else {
415 /*
416 * ntv->freq is [PPM * 2^16] = [us/s * 2^16]
417 * time_freq is [ns/s * 2^32]
418 */
419 time_freq = ntv->freq * 1000LL * 65536LL;
420 }
421#ifdef PPS_SYNC
422 pps_freq = time_freq;
423#endif /* PPS_SYNC */
424 }
425 if (modes & MOD_OFFSET) {
426 if (time_status & STA_NANO)
427 hardupdate(ntv->offset);
428 else
429 hardupdate(ntv->offset * 1000);
430 }
431
432 /*
433 * Retrieve all clock variables. Note that the TAI offset is
434 * returned only by ntp_gettime();
435 */
436 if (time_status & STA_NANO)
437 ntv->offset = L_GINT(time_offset);
438 else
439 ntv->offset = L_GINT(time_offset) / 1000; /* XXX rounding ? */
440 ntv->freq = L_GINT((time_freq / 1000LL) << 16);
441 ntv->maxerror = time_maxerror;
442 ntv->esterror = time_esterror;
443 ntv->status = time_status;
444 ntv->constant = time_constant;
445 if (time_status & STA_NANO)
446 ntv->precision = time_precision;
447 else
448 ntv->precision = time_precision / 1000;
449 ntv->tolerance = MAXFREQ * SCALE_PPM;
450#ifdef PPS_SYNC
451 ntv->shift = pps_shift;
452 ntv->ppsfreq = L_GINT((pps_freq / 1000LL) << 16);
453 if (time_status & STA_NANO)
454 ntv->jitter = pps_jitter;
455 else
456 ntv->jitter = pps_jitter / 1000;
457 ntv->stabil = pps_stabil;
458 ntv->calcnt = pps_calcnt;
459 ntv->errcnt = pps_errcnt;
460 ntv->jitcnt = pps_jitcnt;
461 ntv->stbcnt = pps_stbcnt;
462#endif /* PPS_SYNC */
463 retval = ntp_is_time_error(time_status) ? TIME_ERROR : time_state;
464 NTP_UNLOCK();
465
466 *retvalp = retval;
467 return (0);
468}
469
470#ifndef _SYS_SYSPROTO_H_
472 struct timex *tp;
473};
474#endif
475
476int
477sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
478{
479 struct timex ntv;
480 int error, retval;
481
482 error = copyin(uap->tp, &ntv, sizeof(ntv));
483 if (error == 0) {
484 error = kern_ntp_adjtime(td, &ntv, &retval);
485 if (error == 0) {
486 error = copyout(&ntv, uap->tp, sizeof(ntv));
487 if (error == 0)
488 td->td_retval[0] = retval;
489 }
490 }
491 return (error);
492}
493
494/*
495 * second_overflow() - called after ntp_tick_adjust()
496 *
497 * This routine is ordinarily called immediately following the above
498 * routine ntp_tick_adjust(). While these two routines are normally
499 * combined, they are separated here only for the purposes of
500 * simulation.
501 */
502void
503ntp_update_second(int64_t *adjustment, time_t *newsec)
504{
505 int tickrate;
506 l_fp ftemp; /* 32/64-bit temporary */
507
508 NTP_LOCK();
509
510 /*
511 * On rollover of the second both the nanosecond and microsecond
512 * clocks are updated and the state machine cranked as
513 * necessary. The phase adjustment to be used for the next
514 * second is calculated and the maximum error is increased by
515 * the tolerance.
516 */
517 time_maxerror += MAXFREQ / 1000;
518
519 /*
520 * Leap second processing. If in leap-insert state at
521 * the end of the day, the system clock is set back one
522 * second; if in leap-delete state, the system clock is
523 * set ahead one second. The nano_time() routine or
524 * external clock driver will insure that reported time
525 * is always monotonic.
526 */
527 switch (time_state) {
528 /*
529 * No warning.
530 */
531 case TIME_OK:
532 if (time_status & STA_INS)
533 time_state = TIME_INS;
534 else if (time_status & STA_DEL)
535 time_state = TIME_DEL;
536 break;
537
538 /*
539 * Insert second 23:59:60 following second
540 * 23:59:59.
541 */
542 case TIME_INS:
543 if (!(time_status & STA_INS))
544 time_state = TIME_OK;
545 else if ((*newsec) % 86400 == 0) {
546 (*newsec)--;
547 time_state = TIME_OOP;
548 time_tai++;
549 }
550 break;
551
552 /*
553 * Delete second 23:59:59.
554 */
555 case TIME_DEL:
556 if (!(time_status & STA_DEL))
557 time_state = TIME_OK;
558 else if (((*newsec) + 1) % 86400 == 0) {
559 (*newsec)++;
560 time_tai--;
561 time_state = TIME_WAIT;
562 }
563 break;
564
565 /*
566 * Insert second in progress.
567 */
568 case TIME_OOP:
569 time_state = TIME_WAIT;
570 break;
571
572 /*
573 * Wait for status bits to clear.
574 */
575 case TIME_WAIT:
576 if (!(time_status & (STA_INS | STA_DEL)))
577 time_state = TIME_OK;
578 }
579
580 /*
581 * Compute the total time adjustment for the next second
582 * in ns. The offset is reduced by a factor depending on
583 * whether the PPS signal is operating. Note that the
584 * value is in effect scaled by the clock frequency,
585 * since the adjustment is added at each tick interrupt.
586 */
587 ftemp = time_offset;
588#ifdef PPS_SYNC
589 /* XXX even if PPS signal dies we should finish adjustment ? */
590 if (time_status & STA_PPSTIME && time_status &
591 STA_PPSSIGNAL)
592 L_RSHIFT(ftemp, pps_shift);
593 else
595#else
597#endif /* PPS_SYNC */
598 time_adj = ftemp;
599 L_SUB(time_offset, ftemp);
601
602 /*
603 * Apply any correction from adjtime(2). If more than one second
604 * off we slew at a rate of 5ms/s (5000 PPM) else 500us/s (500 PPM)
605 * until the last second is slewed the final < 500 usecs.
606 */
607 if (time_adjtime != 0) {
608 if (time_adjtime > 1000000)
609 tickrate = 5000;
610 else if (time_adjtime < -1000000)
611 tickrate = -5000;
612 else if (time_adjtime > 500)
613 tickrate = 500;
614 else if (time_adjtime < -500)
615 tickrate = -500;
616 else
617 tickrate = time_adjtime;
618 time_adjtime -= tickrate;
619 L_LINT(ftemp, tickrate * 1000);
620 L_ADD(time_adj, ftemp);
621 }
622 *adjustment = time_adj;
623
624#ifdef PPS_SYNC
625 if (pps_valid > 0)
626 pps_valid--;
627 else
628 time_status &= ~STA_PPSSIGNAL;
629#endif /* PPS_SYNC */
630
631 NTP_UNLOCK();
632}
633
634/*
635 * hardupdate() - local clock update
636 *
637 * This routine is called by ntp_adjtime() to update the local clock
638 * phase and frequency. The implementation is of an adaptive-parameter,
639 * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
640 * time and frequency offset estimates for each call. If the kernel PPS
641 * discipline code is configured (PPS_SYNC), the PPS signal itself
642 * determines the new time offset, instead of the calling argument.
643 * Presumably, calls to ntp_adjtime() occur only when the caller
644 * believes the local clock is valid within some bound (+-128 ms with
645 * NTP). If the caller's time is far different than the PPS time, an
646 * argument will ensue, and it's not clear who will lose.
647 *
648 * For uncompensated quartz crystal oscillators and nominal update
649 * intervals less than 256 s, operation should be in phase-lock mode,
650 * where the loop is disciplined to phase. For update intervals greater
651 * than 1024 s, operation should be in frequency-lock mode, where the
652 * loop is disciplined to frequency. Between 256 s and 1024 s, the mode
653 * is selected by the STA_MODE status bit.
654 */
655static void
657 long offset; /* clock offset (ns) */
658{
659 long mtemp;
660 l_fp ftemp;
661
663
664 /*
665 * Select how the phase is to be controlled and from which
666 * source. If the PPS signal is present and enabled to
667 * discipline the time, the PPS offset is used; otherwise, the
668 * argument offset is used.
669 */
670 if (!(time_status & STA_PLL))
671 return;
672 if (!(time_status & STA_PPSTIME && time_status &
673 STA_PPSSIGNAL)) {
674 if (offset > MAXPHASE)
675 time_monitor = MAXPHASE;
676 else if (offset < -MAXPHASE)
677 time_monitor = -MAXPHASE;
678 else
679 time_monitor = offset;
681 }
682
683 /*
684 * Select how the frequency is to be controlled and in which
685 * mode (PLL or FLL). If the PPS signal is present and enabled
686 * to discipline the frequency, the PPS frequency is used;
687 * otherwise, the argument offset is used to compute it.
688 */
689 if (time_status & STA_PPSFREQ && time_status & STA_PPSSIGNAL) {
691 return;
692 }
693 if (time_status & STA_FREQHOLD || time_reftime == 0)
695 mtemp = time_uptime - time_reftime;
696 L_LINT(ftemp, time_monitor);
697 L_RSHIFT(ftemp, (SHIFT_PLL + 2 + time_constant) << 1);
698 L_MPY(ftemp, mtemp);
699 L_ADD(time_freq, ftemp);
700 time_status &= ~STA_MODE;
701 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp >
702 MAXSEC)) {
703 L_LINT(ftemp, (time_monitor << 4) / mtemp);
704 L_RSHIFT(ftemp, SHIFT_FLL + 4);
705 L_ADD(time_freq, ftemp);
706 time_status |= STA_MODE;
707 }
709 if (L_GINT(time_freq) > MAXFREQ)
710 L_LINT(time_freq, MAXFREQ);
711 else if (L_GINT(time_freq) < -MAXFREQ)
712 L_LINT(time_freq, -MAXFREQ);
713}
714
715#ifdef PPS_SYNC
716/*
717 * hardpps() - discipline CPU clock oscillator to external PPS signal
718 *
719 * This routine is called at each PPS interrupt in order to discipline
720 * the CPU clock oscillator to the PPS signal. There are two independent
721 * first-order feedback loops, one for the phase, the other for the
722 * frequency. The phase loop measures and grooms the PPS phase offset
723 * and leaves it in a handy spot for the seconds overflow routine. The
724 * frequency loop averages successive PPS phase differences and
725 * calculates the PPS frequency offset, which is also processed by the
726 * seconds overflow routine. The code requires the caller to capture the
727 * time and architecture-dependent hardware counter values in
728 * nanoseconds at the on-time PPS signal transition.
729 *
730 * Note that, on some Unix systems this routine runs at an interrupt
731 * priority level higher than the timer interrupt routine hardclock().
732 * Therefore, the variables used are distinct from the hardclock()
733 * variables, except for the actual time and frequency variables, which
734 * are determined by this routine and updated atomically.
735 *
736 * tsp - time at PPS
737 * nsec - hardware counter at PPS
738 */
739void
740hardpps(struct timespec *tsp, long nsec)
741{
742 long u_sec, u_nsec, v_nsec; /* temps */
743 l_fp ftemp;
744
745 NTP_LOCK();
746
747 /*
748 * The signal is first processed by a range gate and frequency
749 * discriminator. The range gate rejects noise spikes outside
750 * the range +-500 us. The frequency discriminator rejects input
751 * signals with apparent frequency outside the range 1 +-500
752 * PPM. If two hits occur in the same second, we ignore the
753 * later hit; if not and a hit occurs outside the range gate,
754 * keep the later hit for later comparison, but do not process
755 * it.
756 */
757 time_status |= STA_PPSSIGNAL | STA_PPSJITTER;
758 time_status &= ~(STA_PPSWANDER | STA_PPSERROR);
759 pps_valid = PPS_VALID;
760 u_sec = tsp->tv_sec;
761 u_nsec = tsp->tv_nsec;
762 if (u_nsec >= (NANOSECOND >> 1)) {
763 u_nsec -= NANOSECOND;
764 u_sec++;
765 }
766 v_nsec = u_nsec - pps_tf[0].tv_nsec;
767 if (u_sec == pps_tf[0].tv_sec && v_nsec < NANOSECOND - MAXFREQ)
768 goto out;
769 pps_tf[2] = pps_tf[1];
770 pps_tf[1] = pps_tf[0];
771 pps_tf[0].tv_sec = u_sec;
772 pps_tf[0].tv_nsec = u_nsec;
773
774 /*
775 * Compute the difference between the current and previous
776 * counter values. If the difference exceeds 0.5 s, assume it
777 * has wrapped around, so correct 1.0 s. If the result exceeds
778 * the tick interval, the sample point has crossed a tick
779 * boundary during the last second, so correct the tick. Very
780 * intricate.
781 */
782 u_nsec = nsec;
783 if (u_nsec > (NANOSECOND >> 1))
784 u_nsec -= NANOSECOND;
785 else if (u_nsec < -(NANOSECOND >> 1))
786 u_nsec += NANOSECOND;
787 pps_fcount += u_nsec;
788 if (v_nsec > MAXFREQ || v_nsec < -MAXFREQ)
789 goto out;
790 time_status &= ~STA_PPSJITTER;
791
792 /*
793 * A three-stage median filter is used to help denoise the PPS
794 * time. The median sample becomes the time offset estimate; the
795 * difference between the other two samples becomes the time
796 * dispersion (jitter) estimate.
797 */
798 if (pps_tf[0].tv_nsec > pps_tf[1].tv_nsec) {
799 if (pps_tf[1].tv_nsec > pps_tf[2].tv_nsec) {
800 v_nsec = pps_tf[1].tv_nsec; /* 0 1 2 */
801 u_nsec = pps_tf[0].tv_nsec - pps_tf[2].tv_nsec;
802 } else if (pps_tf[2].tv_nsec > pps_tf[0].tv_nsec) {
803 v_nsec = pps_tf[0].tv_nsec; /* 2 0 1 */
804 u_nsec = pps_tf[2].tv_nsec - pps_tf[1].tv_nsec;
805 } else {
806 v_nsec = pps_tf[2].tv_nsec; /* 0 2 1 */
807 u_nsec = pps_tf[0].tv_nsec - pps_tf[1].tv_nsec;
808 }
809 } else {
810 if (pps_tf[1].tv_nsec < pps_tf[2].tv_nsec) {
811 v_nsec = pps_tf[1].tv_nsec; /* 2 1 0 */
812 u_nsec = pps_tf[2].tv_nsec - pps_tf[0].tv_nsec;
813 } else if (pps_tf[2].tv_nsec < pps_tf[0].tv_nsec) {
814 v_nsec = pps_tf[0].tv_nsec; /* 1 0 2 */
815 u_nsec = pps_tf[1].tv_nsec - pps_tf[2].tv_nsec;
816 } else {
817 v_nsec = pps_tf[2].tv_nsec; /* 1 2 0 */
818 u_nsec = pps_tf[1].tv_nsec - pps_tf[0].tv_nsec;
819 }
820 }
821
822 /*
823 * Nominal jitter is due to PPS signal noise and interrupt
824 * latency. If it exceeds the popcorn threshold, the sample is
825 * discarded. otherwise, if so enabled, the time offset is
826 * updated. We can tolerate a modest loss of data here without
827 * much degrading time accuracy.
828 *
829 * The measurements being checked here were made with the system
830 * timecounter, so the popcorn threshold is not allowed to fall below
831 * the number of nanoseconds in two ticks of the timecounter. For a
832 * timecounter running faster than 1 GHz the lower bound is 2ns, just
833 * to avoid a nonsensical threshold of zero.
834 */
835 if (u_nsec > lmax(pps_jitter << PPS_POPCORN,
836 2 * (NANOSECOND / (long)qmin(NANOSECOND, tc_getfrequency())))) {
837 time_status |= STA_PPSJITTER;
838 pps_jitcnt++;
839 } else if (time_status & STA_PPSTIME) {
840 time_monitor = -v_nsec;
842 }
843 pps_jitter += (u_nsec - pps_jitter) >> PPS_FAVG;
844 u_sec = pps_tf[0].tv_sec - pps_lastsec;
845 if (u_sec < (1 << pps_shift))
846 goto out;
847
848 /*
849 * At the end of the calibration interval the difference between
850 * the first and last counter values becomes the scaled
851 * frequency. It will later be divided by the length of the
852 * interval to determine the frequency update. If the frequency
853 * exceeds a sanity threshold, or if the actual calibration
854 * interval is not equal to the expected length, the data are
855 * discarded. We can tolerate a modest loss of data here without
856 * much degrading frequency accuracy.
857 */
858 pps_calcnt++;
859 v_nsec = -pps_fcount;
860 pps_lastsec = pps_tf[0].tv_sec;
861 pps_fcount = 0;
862 u_nsec = MAXFREQ << pps_shift;
863 if (v_nsec > u_nsec || v_nsec < -u_nsec || u_sec != (1 << pps_shift)) {
864 time_status |= STA_PPSERROR;
865 pps_errcnt++;
866 goto out;
867 }
868
869 /*
870 * Here the raw frequency offset and wander (stability) is
871 * calculated. If the wander is less than the wander threshold
872 * for four consecutive averaging intervals, the interval is
873 * doubled; if it is greater than the threshold for four
874 * consecutive intervals, the interval is halved. The scaled
875 * frequency offset is converted to frequency offset. The
876 * stability metric is calculated as the average of recent
877 * frequency changes, but is used only for performance
878 * monitoring.
879 */
880 L_LINT(ftemp, v_nsec);
881 L_RSHIFT(ftemp, pps_shift);
882 L_SUB(ftemp, pps_freq);
883 u_nsec = L_GINT(ftemp);
884 if (u_nsec > PPS_MAXWANDER) {
885 L_LINT(ftemp, PPS_MAXWANDER);
886 pps_intcnt--;
887 time_status |= STA_PPSWANDER;
888 pps_stbcnt++;
889 } else if (u_nsec < -PPS_MAXWANDER) {
890 L_LINT(ftemp, -PPS_MAXWANDER);
891 pps_intcnt--;
892 time_status |= STA_PPSWANDER;
893 pps_stbcnt++;
894 } else {
895 pps_intcnt++;
896 }
897 if (pps_intcnt >= 4) {
898 pps_intcnt = 4;
899 if (pps_shift < pps_shiftmax) {
900 pps_shift++;
901 pps_intcnt = 0;
902 }
903 } else if (pps_intcnt <= -4 || pps_shift > pps_shiftmax) {
904 pps_intcnt = -4;
905 if (pps_shift > PPS_FAVG) {
906 pps_shift--;
907 pps_intcnt = 0;
908 }
909 }
910 if (u_nsec < 0)
911 u_nsec = -u_nsec;
912 pps_stabil += (u_nsec * SCALE_PPM - pps_stabil) >> PPS_FAVG;
913
914 /*
915 * The PPS frequency is recalculated and clamped to the maximum
916 * MAXFREQ. If enabled, the system clock frequency is updated as
917 * well.
918 */
919 L_ADD(pps_freq, ftemp);
920 u_nsec = L_GINT(pps_freq);
921 if (u_nsec > MAXFREQ)
922 L_LINT(pps_freq, MAXFREQ);
923 else if (u_nsec < -MAXFREQ)
924 L_LINT(pps_freq, -MAXFREQ);
925 if (time_status & STA_PPSFREQ)
926 time_freq = pps_freq;
927
928out:
929 NTP_UNLOCK();
930}
931#endif /* PPS_SYNC */
932
933#ifndef _SYS_SYSPROTO_H_
935 struct timeval *delta;
936 struct timeval *olddelta;
937};
938#endif
939/* ARGSUSED */
940int
941sys_adjtime(struct thread *td, struct adjtime_args *uap)
942{
943 struct timeval delta, olddelta, *deltap;
944 int error;
945
946 if (uap->delta) {
947 error = copyin(uap->delta, &delta, sizeof(delta));
948 if (error)
949 return (error);
950 deltap = &delta;
951 } else
952 deltap = NULL;
953 error = kern_adjtime(td, deltap, &olddelta);
954 if (uap->olddelta && error == 0)
955 error = copyout(&olddelta, uap->olddelta, sizeof(olddelta));
956 return (error);
957}
958
959int
960kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta)
961{
962 struct timeval atv;
963 int64_t ltr, ltw;
964 int error;
965
966 if (delta != NULL) {
967 error = priv_check(td, PRIV_ADJTIME);
968 if (error != 0)
969 return (error);
970 ltw = (int64_t)delta->tv_sec * 1000000 + delta->tv_usec;
971 }
972 NTP_LOCK();
973 ltr = time_adjtime;
974 if (delta != NULL)
975 time_adjtime = ltw;
976 NTP_UNLOCK();
977 if (olddelta != NULL) {
978 atv.tv_sec = ltr / 1000000;
979 atv.tv_usec = ltr % 1000000;
980 if (atv.tv_usec < 0) {
981 atv.tv_usec += 1000000;
982 atv.tv_sec--;
983 }
984 *olddelta = atv;
985 }
986 return (0);
987}
988
989static struct callout resettodr_callout;
990static int resettodr_period = 1800;
991
992static void
993periodic_resettodr(void *arg __unused)
994{
995
996 /*
997 * Read of time_status is lock-less, which is fine since
998 * ntp_is_time_error() operates on the consistent read value.
999 */
1001 resettodr();
1002 if (resettodr_period > 0)
1004}
1005
1006static void
1007shutdown_resettodr(void *arg __unused, int howto __unused)
1008{
1009
1010 callout_drain(&resettodr_callout);
1011 /* Another unlocked read of time_status */
1013 resettodr();
1014}
1015
1016static int
1017sysctl_resettodr_period(SYSCTL_HANDLER_ARGS)
1018{
1019 int error;
1020
1021 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
1022 if (error || !req->newptr)
1023 return (error);
1024 if (cold)
1025 goto done;
1026 if (resettodr_period == 0)
1027 callout_stop(&resettodr_callout);
1028 else
1029 callout_reset(&resettodr_callout, resettodr_period * hz,
1030 periodic_resettodr, NULL);
1031done:
1032 return (0);
1033}
1034
1035SYSCTL_PROC(_machdep, OID_AUTO, rtc_save_period, CTLTYPE_INT | CTLFLAG_RWTUN |
1036 CTLFLAG_MPSAFE, &resettodr_period, 1800, sysctl_resettodr_period, "I",
1037 "Save system time to RTC with this period (in seconds)");
1038
1039static void
1040start_periodic_resettodr(void *arg __unused)
1041{
1042
1043 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_resettodr, NULL,
1044 SHUTDOWN_PRI_FIRST);
1046 if (resettodr_period == 0)
1047 return;
1048 callout_reset(&resettodr_callout, resettodr_period * hz,
1049 periodic_resettodr, NULL);
1050}
1051
1052SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE,
METHOD int gettime
Definition: clock_if.m:37
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
FEATURE(kdtrace_hooks, "Kernel DTrace hooks which are required to load DTrace kernel modules")
SYSCTL_LONG(_hw, OID_AUTO, availpages, CTLFLAG_RD, &physmem, 0, "Amount of physical memory (in pages)")
int sys_ntp_adjtime(struct thread *td, struct ntp_adjtime_args *uap)
Definition: kern_ntptime.c:477
#define L_RSHIFT(v, n)
Definition: kern_ntptime.c:66
static int64_t time_adjtime
Definition: kern_ntptime.c:163
int time_status
Definition: kern_ntptime.c:151
#define SHIFT_PLL
Definition: kern_ntptime.c:147
#define NTP_LOCK()
Definition: kern_ntptime.c:168
#define NTP_ASSERT_LOCKED()
Definition: kern_ntptime.c:170
int sys_ntp_gettime(struct thread *td, struct ntp_gettime_args *uap)
Definition: kern_ntptime.c:283
#define SHIFT_FLL
Definition: kern_ntptime.c:148
SYSINIT(periodic_resettodr, SI_SUB_LAST, SI_ORDER_MIDDLE, start_periodic_resettodr, NULL)
#define L_SUB(v, u)
Definition: kern_ntptime.c:63
#define L_ADD(v, u)
Definition: kern_ntptime.c:62
static void ntp_gettime1(struct ntptimeval *ntvp)
Definition: kern_ntptime.c:252
static long time_maxerror
Definition: kern_ntptime.c:156
static long time_precision
Definition: kern_ntptime.c:155
long time_esterror
Definition: kern_ntptime.c:157
static int resettodr_period
Definition: kern_ntptime.c:990
int sys_adjtime(struct thread *td, struct adjtime_args *uap)
Definition: kern_ntptime.c:941
static long time_monitor
Definition: kern_ntptime.c:153
static l_fp time_freq
Definition: kern_ntptime.c:160
SYSCTL_PROC(_kern_ntp_pll, OID_AUTO, gettime, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE, 0, sizeof(struct ntptimeval), ntp_sysctl, "S,ntptimeval", "")
static int ntp_sysctl(SYSCTL_HANDLER_ARGS)
Definition: kern_ntptime.c:298
#define L_LINT(v, a)
Definition: kern_ntptime.c:76
static int time_state
Definition: kern_ntptime.c:150
static long time_tai
Definition: kern_ntptime.c:152
#define NTP_UNLOCK()
Definition: kern_ntptime.c:169
__FBSDID("$FreeBSD$")
static void periodic_resettodr(void *arg __unused)
Definition: kern_ntptime.c:993
static void shutdown_resettodr(void *arg __unused, int howto __unused)
static l_fp time_offset
Definition: kern_ntptime.c:159
int kern_ntp_adjtime(struct thread *td, struct timex *ntv, int *retvalp)
Definition: kern_ntptime.c:341
static l_fp time_adj
Definition: kern_ntptime.c:161
static long time_constant
Definition: kern_ntptime.c:154
static int sysctl_resettodr_period(SYSCTL_HANDLER_ARGS)
static long time_reftime
Definition: kern_ntptime.c:158
void ntp_update_second(int64_t *adjustment, time_t *newsec)
Definition: kern_ntptime.c:503
static bool ntp_is_time_error(int tsl)
Definition: kern_ntptime.c:215
int kern_adjtime(struct thread *td, struct timeval *delta, struct timeval *olddelta)
Definition: kern_ntptime.c:960
MTX_SYSINIT(ntp, &ntp_lock, "ntp", MTX_SPIN)
static struct mtx ntp_lock
Definition: kern_ntptime.c:165
static void start_periodic_resettodr(void *arg __unused)
static struct callout resettodr_callout
Definition: kern_ntptime.c:989
static void hardupdate(long offset)
Definition: kern_ntptime.c:656
SYSCTL_NODE(_kern, OID_AUTO, ntp_pll, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "")
#define L_GINT(v)
Definition: kern_ntptime.c:77
#define L_MPY(v, a)
Definition: kern_ntptime.c:73
int64_t l_fp
Definition: kern_ntptime.c:61
int priv_check(struct thread *td, int priv)
Definition: kern_priv.c:271
int sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1867
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1644
uint64_t tc_getfrequency(void)
Definition: kern_tc.c:1258
volatile time_t time_uptime
Definition: kern_tc.c:106
void nanotime(struct timespec *tsp)
Definition: kern_tc.c:422
void callout_init(struct callout *c, int mpsafe)
int callout_schedule(struct callout *c, int to_ticks)
struct timeval * delta
Definition: kern_ntptime.c:935
struct timeval * olddelta
Definition: kern_ntptime.c:936
struct timex * tp
Definition: kern_ntptime.c:472
struct ntptimeval * ntvp
Definition: kern_ntptime.c:278
int hz
Definition: subr_param.c:85
void resettodr(void)
Definition: subr_rtc.c:377
struct mtx mtx
Definition: uipc_ktls.c:0