2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
72 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
73 * $DragonFly: src/sys/kern/kern_clock.c,v 1.38 2005/04/24 02:01:08 dillon Exp $
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/dkstat.h>
81 #include <sys/callout.h>
82 #include <sys/kernel.h>
83 #include <sys/kinfo.h>
85 #include <sys/malloc.h>
86 #include <sys/resourcevar.h>
87 #include <sys/signalvar.h>
88 #include <sys/timex.h>
89 #include <sys/timepps.h>
93 #include <vm/vm_map.h>
94 #include <sys/sysctl.h>
95 #include <sys/thread2.h>
97 #include <machine/cpu.h>
98 #include <machine/limits.h>
99 #include <machine/smp.h>
102 #include <sys/gmon.h>
105 #ifdef DEVICE_POLLING
106 extern void init_device_poll(void);
107 extern void hardclock_device_poll(void);
108 #endif /* DEVICE_POLLING */
110 static void initclocks (void *dummy);
111 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
114 * Some of these don't belong here, but it's easiest to concentrate them.
115 * Note that cp_time counts in microseconds, but most userland programs
116 * just compare relative times against the total by delta.
118 struct cp_time cp_time;
120 SYSCTL_OPAQUE(_kern, OID_AUTO, cp_time, CTLFLAG_RD, &cp_time, sizeof(cp_time),
121 "LU", "CPU time statistics");
124 * boottime is used to calculate the 'real' uptime. Do not confuse this with
125 * microuptime(). microtime() is not drift compensated. The real uptime
126 * with compensation is nanotime() - bootime. boottime is recalculated
127 * whenever the real time is set based on the compensated elapsed time
128 * in seconds (gd->gd_time_seconds).
130 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
131 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
134 struct timespec boottime; /* boot time (realtime) for reference only */
135 time_t time_second; /* read-only 'passive' uptime in seconds */
138 * basetime is used to calculate the compensated real time of day. The
139 * basetime can be modified on a per-tick basis by the adjtime(),
140 * ntp_adjtime(), and sysctl-based time correction APIs.
142 * Note that frequency corrections can also be made by adjusting
145 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is
146 * used on both SMP and UP systems to avoid MP races between cpu's and
147 * interrupt races on UP systems.
149 #define BASETIME_ARYSIZE 16
150 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1)
151 static struct timespec basetime[BASETIME_ARYSIZE];
152 static volatile int basetime_index;
155 sysctl_get_basetime(SYSCTL_HANDLER_ARGS)
160 bt = &basetime[basetime_index];
161 error = SYSCTL_OUT(req, bt, sizeof(*bt));
165 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
166 &boottime, timespec, "System boottime");
167 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0,
168 sysctl_get_basetime, "S,timespec", "System basetime");
170 static void hardclock(systimer_t info, struct intrframe *frame);
171 static void statclock(systimer_t info, struct intrframe *frame);
172 static void schedclock(systimer_t info, struct intrframe *frame);
173 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp);
175 int ticks; /* system master ticks at hz */
176 int clocks_running; /* tsleep/timeout clocks operational */
177 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */
178 int64_t nsec_acc; /* accumulator */
180 /* NTPD time correction fields */
181 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */
182 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */
183 int64_t ntp_delta; /* one-time correction in nsec */
184 int64_t ntp_big_delta = 1000000000;
185 int32_t ntp_tick_delta; /* current adjustment rate */
186 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */
187 time_t ntp_leap_second; /* time of next leap second */
188 int ntp_leap_insert; /* whether to insert or remove a second */
191 * Finish initializing clock frequencies and start all clocks running.
195 initclocks(void *dummy)
198 #ifdef DEVICE_POLLING
201 /*psratio = profhz / stathz;*/
207 * Called on a per-cpu basis
210 initclocks_pcpu(void)
212 struct globaldata *gd = mycpu;
215 if (gd->gd_cpuid == 0) {
216 gd->gd_time_seconds = 1;
217 gd->gd_cpuclock_base = cputimer_count();
220 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
221 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
225 * Use a non-queued periodic systimer to prevent multiple ticks from
226 * building up if the sysclock jumps forward (8254 gets reset). The
227 * sysclock will never jump backwards. Our time sync is based on
228 * the actual sysclock, not the ticks count.
230 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz);
231 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz);
232 /* XXX correct the frequency for scheduler / estcpu tests */
233 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock,
239 * This sets the current real time of day. Timespecs are in seconds and
240 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base,
241 * instead we adjust basetime so basetime + gd_* results in the current
242 * time of day. This way the gd_* fields are guarenteed to represent
243 * a monotonically increasing 'uptime' value.
245 * When set_timeofday() is called from userland, the system call forces it
246 * onto cpu #0 since only cpu #0 can update basetime_index.
249 set_timeofday(struct timespec *ts)
251 struct timespec *nbt;
255 * XXX SMP / non-atomic basetime updates
258 ni = (basetime_index + 1) & BASETIME_ARYMASK;
261 nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
262 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
263 if (nbt->tv_nsec < 0) {
264 nbt->tv_nsec += 1000000000;
269 * Note that basetime diverges from boottime as the clock drift is
270 * compensated for, so we cannot do away with boottime. When setting
271 * the absolute time of day the drift is 0 (for an instant) and we
272 * can simply assign boottime to basetime.
274 * Note that nanouptime() is based on gd_time_seconds which is drift
275 * compensated up to a point (it is guarenteed to remain monotonically
276 * increasing). gd_time_seconds is thus our best uptime guess and
277 * suitable for use in the boottime calculation. It is already taken
278 * into account in the basetime calculation above.
280 boottime.tv_sec = nbt->tv_sec;
284 * We now have a new basetime, update the index.
293 * Each cpu has its own hardclock, but we only increments ticks and softticks
296 * NOTE! systimer! the MP lock might not be held here. We can only safely
297 * manipulate objects owned by the current cpu.
300 hardclock(systimer_t info, struct intrframe *frame)
304 struct pstats *pstats;
305 struct globaldata *gd = mycpu;
308 * Realtime updates are per-cpu. Note that timer corrections as
309 * returned by microtime() and friends make an additional adjustment
310 * using a system-wise 'basetime', but the running time is always
311 * taken from the per-cpu globaldata area. Since the same clock
312 * is distributing (XXX SMP) to all cpus, the per-cpu timebases
315 * Note that we never allow info->time (aka gd->gd_hardclock.time)
316 * to reverse index gd_cpuclock_base, but that it is possible for
317 * it to temporarily get behind in the seconds if something in the
318 * system locks interrupts for a long period of time. Since periodic
319 * timers count events, though everything should resynch again
322 cputicks = info->time - gd->gd_cpuclock_base;
323 if (cputicks >= cputimer_freq) {
324 ++gd->gd_time_seconds;
325 gd->gd_cpuclock_base += cputimer_freq;
329 * The system-wide ticks counter and NTP related timedelta/tickdelta
330 * adjustments only occur on cpu #0. NTP adjustments are accomplished
331 * by updating basetime.
333 if (gd->gd_cpuid == 0) {
334 struct timespec *nbt;
341 #ifdef DEVICE_POLLING
342 hardclock_device_poll(); /* mpsafe, short and quick */
343 #endif /* DEVICE_POLLING */
346 if (tco->tc_poll_pps)
347 tco->tc_poll_pps(tco);
351 * Calculate the new basetime index. We are in a critical section
352 * on cpu #0 and can safely play with basetime_index. Start
353 * with the current basetime and then make adjustments.
355 ni = (basetime_index + 1) & BASETIME_ARYMASK;
357 *nbt = basetime[basetime_index];
360 * Apply adjtime corrections. (adjtime() API)
362 * adjtime() only runs on cpu #0 so our critical section is
363 * sufficient to access these variables.
365 if (ntp_delta != 0) {
366 nbt->tv_nsec += ntp_tick_delta;
367 ntp_delta -= ntp_tick_delta;
368 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) ||
369 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) {
370 ntp_tick_delta = ntp_delta;
375 * Apply permanent frequency corrections. (sysctl API)
377 if (ntp_tick_permanent != 0) {
378 ntp_tick_acc += ntp_tick_permanent;
379 if (ntp_tick_acc >= (1LL << 32)) {
380 nbt->tv_nsec += ntp_tick_acc >> 32;
381 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32;
382 } else if (ntp_tick_acc <= -(1LL << 32)) {
383 /* Negate ntp_tick_acc to avoid shifting the sign bit. */
384 nbt->tv_nsec -= (-ntp_tick_acc) >> 32;
385 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32;
389 if (nbt->tv_nsec >= 1000000000) {
391 nbt->tv_nsec -= 1000000000;
392 } else if (nbt->tv_nsec < 0) {
394 nbt->tv_nsec += 1000000000;
398 * Another per-tick compensation. (for ntp_adjtime() API)
401 nsec_acc += nsec_adj;
402 if (nsec_acc >= 0x100000000LL) {
403 nbt->tv_nsec += nsec_acc >> 32;
404 nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
405 } else if (nsec_acc <= -0x100000000LL) {
406 nbt->tv_nsec -= -nsec_acc >> 32;
407 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
409 if (nbt->tv_nsec >= 1000000000) {
410 nbt->tv_nsec -= 1000000000;
412 } else if (nbt->tv_nsec < 0) {
413 nbt->tv_nsec += 1000000000;
418 /************************************************************
419 * LEAP SECOND CORRECTION *
420 ************************************************************
422 * Taking into account all the corrections made above, figure
423 * out the new real time. If the seconds field has changed
424 * then apply any pending leap-second corrections.
426 getnanotime_nbt(nbt, &nts);
429 * Apply leap second (sysctl API)
431 if (ntp_leap_second) {
432 if (ntp_leap_second == nts.tv_sec) {
442 * Apply leap second (ntp_adjtime() API)
444 if (time_second != nts.tv_sec) {
445 leap = ntp_update_second(time_second, &nsec_adj);
447 time_second = nbt->tv_sec;
452 * Finally, our new basetime is ready to go live!
459 * softticks are handled for all cpus
461 hardclock_softtick(gd);
464 * ITimer handling is per-tick, per-cpu. I don't think psignal()
465 * is mpsafe on curproc, so XXX get the mplock.
467 if ((p = curproc) != NULL && try_mplock()) {
469 if (frame && CLKF_USERMODE(frame) &&
470 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
471 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
472 psignal(p, SIGVTALRM);
473 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
474 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
482 * The statistics clock typically runs at a 125Hz rate, and is intended
483 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu.
485 * NOTE! systimer! the MP lock might not be held here. We can only safely
486 * manipulate objects owned by the current cpu.
488 * The stats clock is responsible for grabbing a profiling sample.
489 * Most of the statistics are only used by user-level statistics programs.
490 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
493 * Like the other clocks, the stat clock is called from what is effectively
494 * a fast interrupt, so the context should be the thread/process that got
498 statclock(systimer_t info, struct intrframe *frame)
511 * How big was our timeslice relative to the last time?
513 microuptime(&tv); /* mpsafe */
514 stv = &mycpu->gd_stattv;
515 if (stv->tv_sec == 0) {
518 bump = tv.tv_usec - stv->tv_usec +
519 (tv.tv_sec - stv->tv_sec) * 1000000;
530 if (frame && CLKF_USERMODE(frame)) {
532 * Came from userland, handle user time and deal with
535 if (p && (p->p_flag & P_PROFIL))
536 addupc_intr(p, CLKF_PC(frame), 1);
537 td->td_uticks += bump;
540 * Charge the time as appropriate
542 if (p && p->p_nice > NZERO)
543 cp_time.cp_nice += bump;
545 cp_time.cp_user += bump;
549 * Kernel statistics are just like addupc_intr, only easier.
552 if (g->state == GMON_PROF_ON && frame) {
553 i = CLKF_PC(frame) - g->lowpc;
554 if (i < g->textsize) {
555 i /= HISTFRACTION * sizeof(*g->kcount);
561 * Came from kernel mode, so we were:
562 * - handling an interrupt,
563 * - doing syscall or trap work on behalf of the current
565 * - spinning in the idle loop.
566 * Whichever it is, charge the time as appropriate.
567 * Note that we charge interrupts to the current process,
568 * regardless of whether they are ``for'' that process,
569 * so that we know how much of its real time was spent
570 * in ``non-process'' (i.e., interrupt) work.
572 * XXX assume system if frame is NULL. A NULL frame
573 * can occur if ipi processing is done from an splx().
575 if (frame && CLKF_INTR(frame))
576 td->td_iticks += bump;
578 td->td_sticks += bump;
580 if (frame && CLKF_INTR(frame)) {
581 cp_time.cp_intr += bump;
583 if (td == &mycpu->gd_idlethread)
584 cp_time.cp_idle += bump;
586 cp_time.cp_sys += bump;
592 * The scheduler clock typically runs at a 20Hz rate. NOTE! systimer,
593 * the MP lock might not be held. We can safely manipulate parts of curproc
594 * but that's about it.
597 schedclock(systimer_t info, struct intrframe *frame)
600 struct pstats *pstats;
605 schedulerclock(NULL); /* mpsafe */
606 if ((p = curproc) != NULL) {
607 /* Update resource usage integrals and maximums. */
608 if ((pstats = p->p_stats) != NULL &&
609 (ru = &pstats->p_ru) != NULL &&
610 (vm = p->p_vmspace) != NULL) {
611 ru->ru_ixrss += pgtok(vm->vm_tsize);
612 ru->ru_idrss += pgtok(vm->vm_dsize);
613 ru->ru_isrss += pgtok(vm->vm_ssize);
614 rss = pgtok(vmspace_resident_count(vm));
615 if (ru->ru_maxrss < rss)
622 * Compute number of ticks for the specified amount of time. The
623 * return value is intended to be used in a clock interrupt timed
624 * operation and guarenteed to meet or exceed the requested time.
625 * If the representation overflows, return INT_MAX. The minimum return
626 * value is 1 ticks and the function will average the calculation up.
627 * If any value greater then 0 microseconds is supplied, a value
628 * of at least 2 will be returned to ensure that a near-term clock
629 * interrupt does not cause the timeout to occur (degenerately) early.
631 * Note that limit checks must take into account microseconds, which is
632 * done simply by using the smaller signed long maximum instead of
633 * the unsigned long maximum.
635 * If ints have 32 bits, then the maximum value for any timeout in
636 * 10ms ticks is 248 days.
639 tvtohz_high(struct timeval *tv)
656 printf("tvotohz: negative time difference %ld sec %ld usec\n",
660 } else if (sec <= INT_MAX / hz) {
661 ticks = (int)(sec * hz +
662 ((u_long)usec + (tick - 1)) / tick) + 1;
670 * Compute number of ticks for the specified amount of time, erroring on
671 * the side of it being too low to ensure that sleeping the returned number
672 * of ticks will not result in a late return.
674 * The supplied timeval may not be negative and should be normalized. A
675 * return value of 0 is possible if the timeval converts to less then
678 * If ints have 32 bits, then the maximum value for any timeout in
679 * 10ms ticks is 248 days.
682 tvtohz_low(struct timeval *tv)
688 if (sec <= INT_MAX / hz)
689 ticks = (int)(sec * hz + (u_long)tv->tv_usec / tick);
697 * Start profiling on a process.
699 * Kernel profiling passes proc0 which never exits and hence
700 * keeps the profile clock running constantly.
703 startprofclock(struct proc *p)
705 if ((p->p_flag & P_PROFIL) == 0) {
706 p->p_flag |= P_PROFIL;
708 if (++profprocs == 1 && stathz != 0) {
711 setstatclockrate(profhz);
719 * Stop profiling on a process.
722 stopprofclock(struct proc *p)
724 if (p->p_flag & P_PROFIL) {
725 p->p_flag &= ~P_PROFIL;
727 if (--profprocs == 0 && stathz != 0) {
730 setstatclockrate(stathz);
738 * Return information about system clocks.
741 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
743 struct kinfo_clockinfo clkinfo;
745 * Construct clockinfo structure.
748 clkinfo.ci_tick = tick;
749 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000;
750 clkinfo.ci_profhz = profhz;
751 clkinfo.ci_stathz = stathz ? stathz : hz;
752 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
755 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
756 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
759 * We have eight functions for looking at the clock, four for
760 * microseconds and four for nanoseconds. For each there is fast
761 * but less precise version "get{nano|micro}[up]time" which will
762 * return a time which is up to 1/HZ previous to the call, whereas
763 * the raw version "{nano|micro}[up]time" will return a timestamp
764 * which is as precise as possible. The "up" variants return the
765 * time relative to system boot, these are well suited for time
766 * interval measurements.
768 * Each cpu independantly maintains the current time of day, so all
769 * we need to do to protect ourselves from changes is to do a loop
770 * check on the seconds field changing out from under us.
772 * The system timer maintains a 32 bit count and due to various issues
773 * it is possible for the calculated delta to occassionally exceed
774 * cputimer_freq. If this occurs the cputimer_freq64_nsec multiplication
775 * can easily overflow, so we deal with the case. For uniformity we deal
776 * with the case in the usec case too.
779 getmicrouptime(struct timeval *tvp)
781 struct globaldata *gd = mycpu;
785 tvp->tv_sec = gd->gd_time_seconds;
786 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
787 } while (tvp->tv_sec != gd->gd_time_seconds);
789 if (delta >= cputimer_freq) {
790 tvp->tv_sec += delta / cputimer_freq;
791 delta %= cputimer_freq;
793 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
794 if (tvp->tv_usec >= 1000000) {
795 tvp->tv_usec -= 1000000;
801 getnanouptime(struct timespec *tsp)
803 struct globaldata *gd = mycpu;
807 tsp->tv_sec = gd->gd_time_seconds;
808 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
809 } while (tsp->tv_sec != gd->gd_time_seconds);
811 if (delta >= cputimer_freq) {
812 tsp->tv_sec += delta / cputimer_freq;
813 delta %= cputimer_freq;
815 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
819 microuptime(struct timeval *tvp)
821 struct globaldata *gd = mycpu;
825 tvp->tv_sec = gd->gd_time_seconds;
826 delta = cputimer_count() - gd->gd_cpuclock_base;
827 } while (tvp->tv_sec != gd->gd_time_seconds);
829 if (delta >= cputimer_freq) {
830 tvp->tv_sec += delta / cputimer_freq;
831 delta %= cputimer_freq;
833 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
837 nanouptime(struct timespec *tsp)
839 struct globaldata *gd = mycpu;
843 tsp->tv_sec = gd->gd_time_seconds;
844 delta = cputimer_count() - gd->gd_cpuclock_base;
845 } while (tsp->tv_sec != gd->gd_time_seconds);
847 if (delta >= cputimer_freq) {
848 tsp->tv_sec += delta / cputimer_freq;
849 delta %= cputimer_freq;
851 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
859 getmicrotime(struct timeval *tvp)
861 struct globaldata *gd = mycpu;
866 tvp->tv_sec = gd->gd_time_seconds;
867 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
868 } while (tvp->tv_sec != gd->gd_time_seconds);
870 if (delta >= cputimer_freq) {
871 tvp->tv_sec += delta / cputimer_freq;
872 delta %= cputimer_freq;
874 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
876 bt = &basetime[basetime_index];
877 tvp->tv_sec += bt->tv_sec;
878 tvp->tv_usec += bt->tv_nsec / 1000;
879 while (tvp->tv_usec >= 1000000) {
880 tvp->tv_usec -= 1000000;
886 getnanotime(struct timespec *tsp)
888 struct globaldata *gd = mycpu;
893 tsp->tv_sec = gd->gd_time_seconds;
894 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
895 } while (tsp->tv_sec != gd->gd_time_seconds);
897 if (delta >= cputimer_freq) {
898 tsp->tv_sec += delta / cputimer_freq;
899 delta %= cputimer_freq;
901 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
903 bt = &basetime[basetime_index];
904 tsp->tv_sec += bt->tv_sec;
905 tsp->tv_nsec += bt->tv_nsec;
906 while (tsp->tv_nsec >= 1000000000) {
907 tsp->tv_nsec -= 1000000000;
913 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp)
915 struct globaldata *gd = mycpu;
919 tsp->tv_sec = gd->gd_time_seconds;
920 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
921 } while (tsp->tv_sec != gd->gd_time_seconds);
923 if (delta >= cputimer_freq) {
924 tsp->tv_sec += delta / cputimer_freq;
925 delta %= cputimer_freq;
927 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
929 tsp->tv_sec += nbt->tv_sec;
930 tsp->tv_nsec += nbt->tv_nsec;
931 while (tsp->tv_nsec >= 1000000000) {
932 tsp->tv_nsec -= 1000000000;
939 microtime(struct timeval *tvp)
941 struct globaldata *gd = mycpu;
946 tvp->tv_sec = gd->gd_time_seconds;
947 delta = cputimer_count() - gd->gd_cpuclock_base;
948 } while (tvp->tv_sec != gd->gd_time_seconds);
950 if (delta >= cputimer_freq) {
951 tvp->tv_sec += delta / cputimer_freq;
952 delta %= cputimer_freq;
954 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
956 bt = &basetime[basetime_index];
957 tvp->tv_sec += bt->tv_sec;
958 tvp->tv_usec += bt->tv_nsec / 1000;
959 while (tvp->tv_usec >= 1000000) {
960 tvp->tv_usec -= 1000000;
966 nanotime(struct timespec *tsp)
968 struct globaldata *gd = mycpu;
973 tsp->tv_sec = gd->gd_time_seconds;
974 delta = cputimer_count() - gd->gd_cpuclock_base;
975 } while (tsp->tv_sec != gd->gd_time_seconds);
977 if (delta >= cputimer_freq) {
978 tsp->tv_sec += delta / cputimer_freq;
979 delta %= cputimer_freq;
981 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
983 bt = &basetime[basetime_index];
984 tsp->tv_sec += bt->tv_sec;
985 tsp->tv_nsec += bt->tv_nsec;
986 while (tsp->tv_nsec >= 1000000000) {
987 tsp->tv_nsec -= 1000000000;
993 * note: this is not exactly synchronized with real time. To do that we
994 * would have to do what microtime does and check for a nanoseconds overflow.
997 get_approximate_time_t(void)
999 struct globaldata *gd = mycpu;
1000 struct timespec *bt;
1002 bt = &basetime[basetime_index];
1003 return(gd->gd_time_seconds + bt->tv_sec);
1007 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1010 struct pps_fetch_args *fapi;
1012 struct pps_kcbind_args *kapi;
1016 case PPS_IOC_CREATE:
1018 case PPS_IOC_DESTROY:
1020 case PPS_IOC_SETPARAMS:
1021 app = (pps_params_t *)data;
1022 if (app->mode & ~pps->ppscap)
1024 pps->ppsparam = *app;
1026 case PPS_IOC_GETPARAMS:
1027 app = (pps_params_t *)data;
1028 *app = pps->ppsparam;
1029 app->api_version = PPS_API_VERS_1;
1031 case PPS_IOC_GETCAP:
1032 *(int*)data = pps->ppscap;
1035 fapi = (struct pps_fetch_args *)data;
1036 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1038 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1039 return (EOPNOTSUPP);
1040 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1041 fapi->pps_info_buf = pps->ppsinfo;
1043 case PPS_IOC_KCBIND:
1045 kapi = (struct pps_kcbind_args *)data;
1046 /* XXX Only root should be able to do this */
1047 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1049 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1051 if (kapi->edge & ~pps->ppscap)
1053 pps->kcmode = kapi->edge;
1056 return (EOPNOTSUPP);
1064 pps_init(struct pps_state *pps)
1066 pps->ppscap |= PPS_TSFMT_TSPEC;
1067 if (pps->ppscap & PPS_CAPTUREASSERT)
1068 pps->ppscap |= PPS_OFFSETASSERT;
1069 if (pps->ppscap & PPS_CAPTURECLEAR)
1070 pps->ppscap |= PPS_OFFSETCLEAR;
1074 pps_event(struct pps_state *pps, sysclock_t count, int event)
1076 struct globaldata *gd;
1077 struct timespec *tsp;
1078 struct timespec *osp;
1079 struct timespec *bt;
1092 /* Things would be easier with arrays... */
1093 if (event == PPS_CAPTUREASSERT) {
1094 tsp = &pps->ppsinfo.assert_timestamp;
1095 osp = &pps->ppsparam.assert_offset;
1096 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1097 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1098 pcount = &pps->ppscount[0];
1099 pseq = &pps->ppsinfo.assert_sequence;
1101 tsp = &pps->ppsinfo.clear_timestamp;
1102 osp = &pps->ppsparam.clear_offset;
1103 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1104 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1105 pcount = &pps->ppscount[1];
1106 pseq = &pps->ppsinfo.clear_sequence;
1109 /* Nothing really happened */
1110 if (*pcount == count)
1116 ts.tv_sec = gd->gd_time_seconds;
1117 delta = count - gd->gd_cpuclock_base;
1118 } while (ts.tv_sec != gd->gd_time_seconds);
1120 if (delta >= cputimer_freq) {
1121 ts.tv_sec += delta / cputimer_freq;
1122 delta %= cputimer_freq;
1124 ts.tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
1125 bt = &basetime[basetime_index];
1126 ts.tv_sec += bt->tv_sec;
1127 ts.tv_nsec += bt->tv_nsec;
1128 while (ts.tv_nsec >= 1000000000) {
1129 ts.tv_nsec -= 1000000000;
1137 timespecadd(tsp, osp);
1138 if (tsp->tv_nsec < 0) {
1139 tsp->tv_nsec += 1000000000;
1145 /* magic, at its best... */
1146 tcount = count - pps->ppscount[2];
1147 pps->ppscount[2] = count;
1148 if (tcount >= cputimer_freq) {
1149 delta = (1000000000 * (tcount / cputimer_freq) +
1150 cputimer_freq64_nsec *
1151 (tcount % cputimer_freq)) >> 32;
1153 delta = (cputimer_freq64_nsec * tcount) >> 32;
1155 hardpps(tsp, delta);