Fix stupid order bug. The code should ignore the first sample(s),
[dragonfly.git] / sys / kern / kern_clock.c
CommitLineData
8c10bfcf
MD
1/*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
984263bc
MD
34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
72 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
5eb5a6bc 73 * $DragonFly: src/sys/kern/kern_clock.c,v 1.37 2005/04/23 20:34:32 dillon Exp $
984263bc
MD
74 */
75
76#include "opt_ntp.h"
77
78#include <sys/param.h>
79#include <sys/systm.h>
80#include <sys/dkstat.h>
81#include <sys/callout.h>
82#include <sys/kernel.h>
f5d21610 83#include <sys/kinfo.h>
984263bc
MD
84#include <sys/proc.h>
85#include <sys/malloc.h>
86#include <sys/resourcevar.h>
87#include <sys/signalvar.h>
88#include <sys/timex.h>
89#include <sys/timepps.h>
90#include <vm/vm.h>
91#include <sys/lock.h>
92#include <vm/pmap.h>
93#include <vm/vm_map.h>
94#include <sys/sysctl.h>
2689779e 95#include <sys/thread2.h>
984263bc
MD
96
97#include <machine/cpu.h>
98#include <machine/limits.h>
99#include <machine/smp.h>
100
101#ifdef GPROF
102#include <sys/gmon.h>
103#endif
104
105#ifdef DEVICE_POLLING
106extern void init_device_poll(void);
107extern void hardclock_device_poll(void);
108#endif /* DEVICE_POLLING */
109
402ed7e1 110static void initclocks (void *dummy);
984263bc
MD
111SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
112
6ad39cae
MD
113/*
114 * Some of these don't belong here, but it's easiest to concentrate them.
f5d21610 115 * Note that cp_time counts in microseconds, but most userland programs
6ad39cae
MD
116 * just compare relative times against the total by delta.
117 */
f5d21610 118struct cp_time cp_time;
984263bc
MD
119
120SYSCTL_OPAQUE(_kern, OID_AUTO, cp_time, CTLFLAG_RD, &cp_time, sizeof(cp_time),
121 "LU", "CPU time statistics");
122
88c4d2f6
MD
123/*
124 * boottime is used to calculate the 'real' uptime. Do not confuse this with
125 * microuptime(). microtime() is not drift compensated. The real uptime
60b2809b
MD
126 * with compensation is nanotime() - bootime. boottime is recalculated
127 * whenever the real time is set based on the compensated elapsed time
128 * in seconds (gd->gd_time_seconds).
88c4d2f6 129 *
88c4d2f6
MD
130 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
131 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
132 * the real time.
133 */
134struct timespec boottime; /* boot time (realtime) for reference only */
88c4d2f6 135time_t time_second; /* read-only 'passive' uptime in seconds */
984263bc 136
5eb5a6bc
MD
137/*
138 * basetime is used to calculate the compensated real time of day. The
139 * basetime can be modified on a per-tick basis by the adjtime(),
140 * ntp_adjtime(), and sysctl-based time correction APIs.
141 *
142 * Note that frequency corrections can also be made by adjusting
143 * gd_cpuclock_base.
144 *
145 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is
146 * used on both SMP and UP systems to avoid MP races between cpu's and
147 * interrupt races on UP systems.
148 */
149#define BASETIME_ARYSIZE 16
150#define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1)
151static struct timespec basetime[BASETIME_ARYSIZE];
152static volatile int basetime_index;
153
154static int
155sysctl_get_basetime(SYSCTL_HANDLER_ARGS)
156{
157 struct timespec *bt;
158 int error;
159
160 bt = &basetime[basetime_index];
161 if (req->oldptr != NULL)
162 error = SYSCTL_OUT(req, bt, sizeof(*bt));
163 else
164 error = 0;
165 return (error);
166}
167
984263bc
MD
168SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
169 &boottime, timeval, "System boottime");
5eb5a6bc
MD
170SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0,
171 sysctl_get_basetime, "S,timeval", "System basetime");
984263bc 172
88c4d2f6
MD
173static void hardclock(systimer_t info, struct intrframe *frame);
174static void statclock(systimer_t info, struct intrframe *frame);
175static void schedclock(systimer_t info, struct intrframe *frame);
5eb5a6bc 176static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp);
88c4d2f6
MD
177
178int ticks; /* system master ticks at hz */
da3639ef 179int clocks_running; /* tsleep/timeout clocks operational */
88c4d2f6
MD
180int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */
181int64_t nsec_acc; /* accumulator */
984263bc 182
4026c000
JS
183/* NTPD time correction fields */
184int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */
185int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */
186int64_t ntp_delta; /* one-time correction in nsec */
187int64_t ntp_big_delta = 1000000000;
188int32_t ntp_tick_delta; /* current adjustment rate */
189int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */
48590578
JS
190time_t ntp_leap_second; /* time of next leap second */
191int ntp_leap_insert; /* whether to insert or remove a second */
4026c000 192
984263bc 193/*
88c4d2f6 194 * Finish initializing clock frequencies and start all clocks running.
984263bc 195 */
88c4d2f6
MD
196/* ARGSUSED*/
197static void
198initclocks(void *dummy)
984263bc 199{
88c4d2f6
MD
200 cpu_initclocks();
201#ifdef DEVICE_POLLING
202 init_device_poll();
203#endif
204 /*psratio = profhz / stathz;*/
205 initclocks_pcpu();
da3639ef 206 clocks_running = 1;
984263bc
MD
207}
208
88c4d2f6
MD
209/*
210 * Called on a per-cpu basis
211 */
212void
213initclocks_pcpu(void)
214{
215 struct globaldata *gd = mycpu;
984263bc 216
88c4d2f6
MD
217 crit_enter();
218 if (gd->gd_cpuid == 0) {
219 gd->gd_time_seconds = 1;
220 gd->gd_cpuclock_base = cputimer_count();
221 } else {
222 /* XXX */
223 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
224 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
225 }
0d1dffdf
MD
226
227 /*
228 * Use a non-queued periodic systimer to prevent multiple ticks from
229 * building up if the sysclock jumps forward (8254 gets reset). The
230 * sysclock will never jump backwards. Our time sync is based on
231 * the actual sysclock, not the ticks count.
232 */
233 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz);
234 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz);
88c4d2f6 235 /* XXX correct the frequency for scheduler / estcpu tests */
0d1dffdf 236 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock,
8478264a 237 NULL, ESTCPUFREQ);
88c4d2f6
MD
238 crit_exit();
239}
984263bc
MD
240
241/*
88c4d2f6
MD
242 * This sets the current real time of day. Timespecs are in seconds and
243 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base,
244 * instead we adjust basetime so basetime + gd_* results in the current
245 * time of day. This way the gd_* fields are guarenteed to represent
246 * a monotonically increasing 'uptime' value.
5eb5a6bc
MD
247 *
248 * When set_timeofday() is called from userland, the system call forces it
249 * onto cpu #0 since only cpu #0 can update basetime_index.
984263bc 250 */
88c4d2f6
MD
251void
252set_timeofday(struct timespec *ts)
253{
5eb5a6bc
MD
254 struct timespec *nbt;
255 int ni;
984263bc 256
88c4d2f6
MD
257 /*
258 * XXX SMP / non-atomic basetime updates
259 */
260 crit_enter();
5eb5a6bc
MD
261 ni = (basetime_index + 1) & BASETIME_ARYMASK;
262 nbt = &basetime[ni];
263 nanouptime(nbt);
264 nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
265 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
266 if (nbt->tv_nsec < 0) {
267 nbt->tv_nsec += 1000000000;
268 --nbt->tv_sec;
88c4d2f6 269 }
a81931cc
MD
270
271 /*
272 * Note that basetime diverges from boottime as the clock drift is
273 * compensated for, so we cannot do away with boottime. When setting
274 * the absolute time of day the drift is 0 (for an instant) and we
275 * can simply assign boottime to basetime.
276 *
277 * Note that nanouptime() is based on gd_time_seconds which is drift
278 * compensated up to a point (it is guarenteed to remain monotonically
279 * increasing). gd_time_seconds is thus our best uptime guess and
280 * suitable for use in the boottime calculation. It is already taken
281 * into account in the basetime calculation above.
282 */
5eb5a6bc 283 boottime.tv_sec = nbt->tv_sec;
4026c000 284 ntp_delta = 0;
5eb5a6bc
MD
285
286 /*
287 * We now have a new basetime, update the index.
288 */
289 cpu_mb1();
290 basetime_index = ni;
291
88c4d2f6
MD
292 crit_exit();
293}
294
984263bc 295/*
88c4d2f6
MD
296 * Each cpu has its own hardclock, but we only increments ticks and softticks
297 * on cpu #0.
298 *
299 * NOTE! systimer! the MP lock might not be held here. We can only safely
300 * manipulate objects owned by the current cpu.
984263bc 301 */
984263bc 302static void
88c4d2f6 303hardclock(systimer_t info, struct intrframe *frame)
984263bc 304{
88c4d2f6
MD
305 sysclock_t cputicks;
306 struct proc *p;
307 struct pstats *pstats;
308 struct globaldata *gd = mycpu;
984263bc
MD
309
310 /*
88c4d2f6
MD
311 * Realtime updates are per-cpu. Note that timer corrections as
312 * returned by microtime() and friends make an additional adjustment
313 * using a system-wise 'basetime', but the running time is always
314 * taken from the per-cpu globaldata area. Since the same clock
315 * is distributing (XXX SMP) to all cpus, the per-cpu timebases
316 * stay in synch.
317 *
318 * Note that we never allow info->time (aka gd->gd_hardclock.time)
fad57d0e
MD
319 * to reverse index gd_cpuclock_base, but that it is possible for
320 * it to temporarily get behind in the seconds if something in the
321 * system locks interrupts for a long period of time. Since periodic
322 * timers count events, though everything should resynch again
323 * immediately.
984263bc 324 */
88c4d2f6 325 cputicks = info->time - gd->gd_cpuclock_base;
fad57d0e 326 if (cputicks >= cputimer_freq) {
88c4d2f6
MD
327 ++gd->gd_time_seconds;
328 gd->gd_cpuclock_base += cputimer_freq;
329 }
984263bc
MD
330
331 /*
92b561b7
MD
332 * The system-wide ticks counter and NTP related timedelta/tickdelta
333 * adjustments only occur on cpu #0. NTP adjustments are accomplished
334 * by updating basetime.
984263bc 335 */
88c4d2f6 336 if (gd->gd_cpuid == 0) {
5eb5a6bc 337 struct timespec *nbt;
88c4d2f6
MD
338 struct timespec nts;
339 int leap;
5eb5a6bc 340 int ni;
984263bc 341
88c4d2f6 342 ++ticks;
984263bc 343
88c4d2f6
MD
344#ifdef DEVICE_POLLING
345 hardclock_device_poll(); /* mpsafe, short and quick */
346#endif /* DEVICE_POLLING */
984263bc 347
88c4d2f6
MD
348#if 0
349 if (tco->tc_poll_pps)
350 tco->tc_poll_pps(tco);
351#endif
5eb5a6bc 352
88c4d2f6 353 /*
5eb5a6bc
MD
354 * Calculate the new basetime index. We are in a critical section
355 * on cpu #0 and can safely play with basetime_index. Start
356 * with the current basetime and then make adjustments.
357 */
358 ni = (basetime_index + 1) & BASETIME_ARYMASK;
359 nbt = &basetime[ni];
360 *nbt = basetime[basetime_index];
361
362 /*
363 * Apply adjtime corrections. (adjtime() API)
364 *
365 * adjtime() only runs on cpu #0 so our critical section is
366 * sufficient to access these variables.
88c4d2f6 367 */
4026c000 368 if (ntp_delta != 0) {
5eb5a6bc 369 nbt->tv_nsec += ntp_tick_delta;
4026c000
JS
370 ntp_delta -= ntp_tick_delta;
371 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) ||
372 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) {
5eb5a6bc 373 ntp_tick_delta = ntp_delta;
4026c000
JS
374 }
375 }
376
5eb5a6bc
MD
377 /*
378 * Apply permanent frequency corrections. (sysctl API)
379 */
4026c000
JS
380 if (ntp_tick_permanent != 0) {
381 ntp_tick_acc += ntp_tick_permanent;
382 if (ntp_tick_acc >= (1LL << 32)) {
5eb5a6bc 383 nbt->tv_nsec += ntp_tick_acc >> 32;
331bc6f8 384 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32;
4026c000 385 } else if (ntp_tick_acc <= -(1LL << 32)) {
331bc6f8 386 /* Negate ntp_tick_acc to avoid shifting the sign bit. */
5eb5a6bc 387 nbt->tv_nsec -= (-ntp_tick_acc) >> 32;
331bc6f8 388 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32;
4026c000
JS
389 }
390 }
391
5eb5a6bc
MD
392 if (nbt->tv_nsec >= 1000000000) {
393 nbt->tv_sec++;
394 nbt->tv_nsec -= 1000000000;
395 } else if (nbt->tv_nsec < 0) {
396 nbt->tv_sec--;
397 nbt->tv_nsec += 1000000000;
88c4d2f6
MD
398 }
399
400 /*
5eb5a6bc 401 * Another per-tick compensation. (for ntp_adjtime() API)
88c4d2f6 402 */
5eb5a6bc 403 if (nsec_adj != 0) {
88c4d2f6
MD
404 nsec_acc += nsec_adj;
405 if (nsec_acc >= 0x100000000LL) {
5eb5a6bc 406 nbt->tv_nsec += nsec_acc >> 32;
88c4d2f6
MD
407 nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
408 } else if (nsec_acc <= -0x100000000LL) {
5eb5a6bc 409 nbt->tv_nsec -= -nsec_acc >> 32;
88c4d2f6
MD
410 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
411 }
5eb5a6bc
MD
412 if (nbt->tv_nsec >= 1000000000) {
413 nbt->tv_nsec -= 1000000000;
414 ++nbt->tv_sec;
415 } else if (nbt->tv_nsec < 0) {
416 nbt->tv_nsec += 1000000000;
417 --nbt->tv_sec;
418 }
419 }
420
421 /************************************************************
422 * LEAP SECOND CORRECTION *
423 ************************************************************
424 *
425 * Taking into account all the corrections made above, figure
426 * out the new real time. If the seconds field has changed
427 * then apply any pending leap-second corrections.
428 */
429 getnanotime_nbt(nbt, &nts);
430
431 /*
432 * Apply leap second (sysctl API)
433 */
434 if (ntp_leap_second) {
435 if (ntp_leap_second == nts.tv_sec) {
436 if (ntp_leap_insert)
437 nbt->tv_sec++;
438 else
439 nbt->tv_sec--;
440 ntp_leap_second--;
88c4d2f6 441 }
88c4d2f6
MD
442 }
443
444 /*
5eb5a6bc 445 * Apply leap second (ntp_adjtime() API)
88c4d2f6 446 */
88c4d2f6
MD
447 if (time_second != nts.tv_sec) {
448 leap = ntp_update_second(time_second, &nsec_adj);
5eb5a6bc
MD
449 nbt->tv_sec += leap;
450 time_second = nbt->tv_sec;
88c4d2f6
MD
451 nsec_adj /= hz;
452 }
5eb5a6bc
MD
453
454 /*
455 * Finally, our new basetime is ready to go live!
456 */
457 cpu_mb1();
458 basetime_index = ni;
88c4d2f6
MD
459 }
460
92b561b7
MD
461 /*
462 * softticks are handled for all cpus
463 */
464 hardclock_softtick(gd);
465
88c4d2f6
MD
466 /*
467 * ITimer handling is per-tick, per-cpu. I don't think psignal()
468 * is mpsafe on curproc, so XXX get the mplock.
469 */
470 if ((p = curproc) != NULL && try_mplock()) {
984263bc 471 pstats = p->p_stats;
88c4d2f6 472 if (frame && CLKF_USERMODE(frame) &&
984263bc
MD
473 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
474 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
475 psignal(p, SIGVTALRM);
476 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
477 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
478 psignal(p, SIGPROF);
88c4d2f6 479 rel_mplock();
984263bc 480 }
604e1e09 481 setdelayed();
88c4d2f6 482}
984263bc 483
88c4d2f6
MD
484/*
485 * The statistics clock typically runs at a 125Hz rate, and is intended
486 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu.
487 *
488 * NOTE! systimer! the MP lock might not be held here. We can only safely
489 * manipulate objects owned by the current cpu.
490 *
491 * The stats clock is responsible for grabbing a profiling sample.
492 * Most of the statistics are only used by user-level statistics programs.
493 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
494 * p->p_estcpu.
495 *
496 * Like the other clocks, the stat clock is called from what is effectively
497 * a fast interrupt, so the context should be the thread/process that got
498 * interrupted.
499 */
500static void
501statclock(systimer_t info, struct intrframe *frame)
502{
503#ifdef GPROF
504 struct gmonparam *g;
505 int i;
984263bc 506#endif
88c4d2f6
MD
507 thread_t td;
508 struct proc *p;
509 int bump;
510 struct timeval tv;
511 struct timeval *stv;
984263bc
MD
512
513 /*
88c4d2f6 514 * How big was our timeslice relative to the last time?
984263bc 515 */
88c4d2f6
MD
516 microuptime(&tv); /* mpsafe */
517 stv = &mycpu->gd_stattv;
518 if (stv->tv_sec == 0) {
519 bump = 1;
520 } else {
521 bump = tv.tv_usec - stv->tv_usec +
522 (tv.tv_sec - stv->tv_sec) * 1000000;
523 if (bump < 0)
524 bump = 0;
525 if (bump > 1000000)
526 bump = 1000000;
527 }
528 *stv = tv;
984263bc 529
88c4d2f6
MD
530 td = curthread;
531 p = td->td_proc;
984263bc 532
88c4d2f6
MD
533 if (frame && CLKF_USERMODE(frame)) {
534 /*
535 * Came from userland, handle user time and deal with
536 * possible process.
537 */
538 if (p && (p->p_flag & P_PROFIL))
539 addupc_intr(p, CLKF_PC(frame), 1);
540 td->td_uticks += bump;
984263bc 541
88c4d2f6
MD
542 /*
543 * Charge the time as appropriate
544 */
545 if (p && p->p_nice > NZERO)
f5d21610 546 cp_time.cp_nice += bump;
88c4d2f6 547 else
f5d21610 548 cp_time.cp_user += bump;
88c4d2f6
MD
549 } else {
550#ifdef GPROF
551 /*
552 * Kernel statistics are just like addupc_intr, only easier.
553 */
554 g = &_gmonparam;
555 if (g->state == GMON_PROF_ON && frame) {
556 i = CLKF_PC(frame) - g->lowpc;
557 if (i < g->textsize) {
558 i /= HISTFRACTION * sizeof(*g->kcount);
559 g->kcount[i]++;
560 }
561 }
562#endif
563 /*
564 * Came from kernel mode, so we were:
565 * - handling an interrupt,
566 * - doing syscall or trap work on behalf of the current
567 * user process, or
568 * - spinning in the idle loop.
569 * Whichever it is, charge the time as appropriate.
570 * Note that we charge interrupts to the current process,
571 * regardless of whether they are ``for'' that process,
572 * so that we know how much of its real time was spent
573 * in ``non-process'' (i.e., interrupt) work.
574 *
575 * XXX assume system if frame is NULL. A NULL frame
576 * can occur if ipi processing is done from an splx().
577 */
578 if (frame && CLKF_INTR(frame))
579 td->td_iticks += bump;
580 else
581 td->td_sticks += bump;
582
583 if (frame && CLKF_INTR(frame)) {
f5d21610 584 cp_time.cp_intr += bump;
88c4d2f6
MD
585 } else {
586 if (td == &mycpu->gd_idlethread)
f5d21610 587 cp_time.cp_idle += bump;
88c4d2f6 588 else
f5d21610 589 cp_time.cp_sys += bump;
88c4d2f6
MD
590 }
591 }
592}
593
594/*
0a3f9b47 595 * The scheduler clock typically runs at a 20Hz rate. NOTE! systimer,
88c4d2f6
MD
596 * the MP lock might not be held. We can safely manipulate parts of curproc
597 * but that's about it.
598 */
599static void
600schedclock(systimer_t info, struct intrframe *frame)
601{
602 struct proc *p;
603 struct pstats *pstats;
604 struct rusage *ru;
605 struct vmspace *vm;
606 long rss;
607
608 schedulerclock(NULL); /* mpsafe */
609 if ((p = curproc) != NULL) {
610 /* Update resource usage integrals and maximums. */
611 if ((pstats = p->p_stats) != NULL &&
612 (ru = &pstats->p_ru) != NULL &&
613 (vm = p->p_vmspace) != NULL) {
614 ru->ru_ixrss += pgtok(vm->vm_tsize);
615 ru->ru_idrss += pgtok(vm->vm_dsize);
616 ru->ru_isrss += pgtok(vm->vm_ssize);
617 rss = pgtok(vmspace_resident_count(vm));
618 if (ru->ru_maxrss < rss)
619 ru->ru_maxrss = rss;
620 }
b68b7282 621 }
984263bc
MD
622}
623
624/*
a94976ad
MD
625 * Compute number of ticks for the specified amount of time. The
626 * return value is intended to be used in a clock interrupt timed
627 * operation and guarenteed to meet or exceed the requested time.
628 * If the representation overflows, return INT_MAX. The minimum return
629 * value is 1 ticks and the function will average the calculation up.
630 * If any value greater then 0 microseconds is supplied, a value
631 * of at least 2 will be returned to ensure that a near-term clock
632 * interrupt does not cause the timeout to occur (degenerately) early.
633 *
634 * Note that limit checks must take into account microseconds, which is
635 * done simply by using the smaller signed long maximum instead of
636 * the unsigned long maximum.
637 *
638 * If ints have 32 bits, then the maximum value for any timeout in
639 * 10ms ticks is 248 days.
984263bc
MD
640 */
641int
a94976ad 642tvtohz_high(struct timeval *tv)
984263bc 643{
a94976ad 644 int ticks;
1fd87d54 645 long sec, usec;
984263bc 646
984263bc
MD
647 sec = tv->tv_sec;
648 usec = tv->tv_usec;
649 if (usec < 0) {
650 sec--;
651 usec += 1000000;
652 }
653 if (sec < 0) {
654#ifdef DIAGNOSTIC
655 if (usec > 0) {
656 sec++;
657 usec -= 1000000;
658 }
659 printf("tvotohz: negative time difference %ld sec %ld usec\n",
660 sec, usec);
661#endif
662 ticks = 1;
a94976ad
MD
663 } else if (sec <= INT_MAX / hz) {
664 ticks = (int)(sec * hz +
665 ((u_long)usec + (tick - 1)) / tick) + 1;
666 } else {
667 ticks = INT_MAX;
668 }
669 return (ticks);
670}
671
672/*
673 * Compute number of ticks for the specified amount of time, erroring on
674 * the side of it being too low to ensure that sleeping the returned number
675 * of ticks will not result in a late return.
676 *
677 * The supplied timeval may not be negative and should be normalized. A
678 * return value of 0 is possible if the timeval converts to less then
679 * 1 tick.
680 *
681 * If ints have 32 bits, then the maximum value for any timeout in
682 * 10ms ticks is 248 days.
683 */
684int
685tvtohz_low(struct timeval *tv)
686{
687 int ticks;
688 long sec;
689
690 sec = tv->tv_sec;
691 if (sec <= INT_MAX / hz)
692 ticks = (int)(sec * hz + (u_long)tv->tv_usec / tick);
984263bc 693 else
984263bc 694 ticks = INT_MAX;
a94976ad 695 return (ticks);
984263bc
MD
696}
697
a94976ad 698
984263bc
MD
699/*
700 * Start profiling on a process.
701 *
702 * Kernel profiling passes proc0 which never exits and hence
703 * keeps the profile clock running constantly.
704 */
705void
88c4d2f6 706startprofclock(struct proc *p)
984263bc 707{
984263bc
MD
708 if ((p->p_flag & P_PROFIL) == 0) {
709 p->p_flag |= P_PROFIL;
88c4d2f6 710#if 0 /* XXX */
984263bc
MD
711 if (++profprocs == 1 && stathz != 0) {
712 s = splstatclock();
6ad39cae 713 psdiv = psratio;
984263bc
MD
714 setstatclockrate(profhz);
715 splx(s);
716 }
88c4d2f6 717#endif
984263bc
MD
718 }
719}
720
721/*
722 * Stop profiling on a process.
723 */
724void
88c4d2f6 725stopprofclock(struct proc *p)
984263bc 726{
984263bc
MD
727 if (p->p_flag & P_PROFIL) {
728 p->p_flag &= ~P_PROFIL;
88c4d2f6 729#if 0 /* XXX */
984263bc
MD
730 if (--profprocs == 0 && stathz != 0) {
731 s = splstatclock();
6ad39cae 732 psdiv = 1;
984263bc
MD
733 setstatclockrate(stathz);
734 splx(s);
735 }
984263bc 736#endif
984263bc
MD
737 }
738}
739
740/*
741 * Return information about system clocks.
742 */
743static int
744sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
745{
f5d21610 746 struct kinfo_clockinfo clkinfo;
984263bc
MD
747 /*
748 * Construct clockinfo structure.
749 */
f5d21610
JS
750 clkinfo.ci_hz = hz;
751 clkinfo.ci_tick = tick;
4026c000 752 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000;
f5d21610
JS
753 clkinfo.ci_profhz = profhz;
754 clkinfo.ci_stathz = stathz ? stathz : hz;
984263bc
MD
755 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
756}
757
758SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
759 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
760
984263bc
MD
761/*
762 * We have eight functions for looking at the clock, four for
763 * microseconds and four for nanoseconds. For each there is fast
764 * but less precise version "get{nano|micro}[up]time" which will
765 * return a time which is up to 1/HZ previous to the call, whereas
766 * the raw version "{nano|micro}[up]time" will return a timestamp
767 * which is as precise as possible. The "up" variants return the
768 * time relative to system boot, these are well suited for time
769 * interval measurements.
88c4d2f6
MD
770 *
771 * Each cpu independantly maintains the current time of day, so all
772 * we need to do to protect ourselves from changes is to do a loop
773 * check on the seconds field changing out from under us.
fad57d0e
MD
774 *
775 * The system timer maintains a 32 bit count and due to various issues
776 * it is possible for the calculated delta to occassionally exceed
777 * cputimer_freq. If this occurs the cputimer_freq64_nsec multiplication
778 * can easily overflow, so we deal with the case. For uniformity we deal
779 * with the case in the usec case too.
984263bc 780 */
984263bc
MD
781void
782getmicrouptime(struct timeval *tvp)
783{
88c4d2f6
MD
784 struct globaldata *gd = mycpu;
785 sysclock_t delta;
786
787 do {
788 tvp->tv_sec = gd->gd_time_seconds;
789 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
790 } while (tvp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
791
792 if (delta >= cputimer_freq) {
793 tvp->tv_sec += delta / cputimer_freq;
794 delta %= cputimer_freq;
795 }
88c4d2f6
MD
796 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
797 if (tvp->tv_usec >= 1000000) {
798 tvp->tv_usec -= 1000000;
799 ++tvp->tv_sec;
984263bc
MD
800 }
801}
802
803void
804getnanouptime(struct timespec *tsp)
805{
88c4d2f6
MD
806 struct globaldata *gd = mycpu;
807 sysclock_t delta;
808
809 do {
810 tsp->tv_sec = gd->gd_time_seconds;
811 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
812 } while (tsp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
813
814 if (delta >= cputimer_freq) {
815 tsp->tv_sec += delta / cputimer_freq;
816 delta %= cputimer_freq;
984263bc 817 }
fad57d0e 818 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
984263bc
MD
819}
820
821void
88c4d2f6 822microuptime(struct timeval *tvp)
984263bc 823{
88c4d2f6
MD
824 struct globaldata *gd = mycpu;
825 sysclock_t delta;
826
827 do {
828 tvp->tv_sec = gd->gd_time_seconds;
829 delta = cputimer_count() - gd->gd_cpuclock_base;
830 } while (tvp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
831
832 if (delta >= cputimer_freq) {
833 tvp->tv_sec += delta / cputimer_freq;
834 delta %= cputimer_freq;
984263bc 835 }
fad57d0e 836 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
984263bc
MD
837}
838
839void
88c4d2f6 840nanouptime(struct timespec *tsp)
984263bc 841{
88c4d2f6
MD
842 struct globaldata *gd = mycpu;
843 sysclock_t delta;
844
845 do {
846 tsp->tv_sec = gd->gd_time_seconds;
847 delta = cputimer_count() - gd->gd_cpuclock_base;
848 } while (tsp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
849
850 if (delta >= cputimer_freq) {
851 tsp->tv_sec += delta / cputimer_freq;
852 delta %= cputimer_freq;
984263bc 853 }
fad57d0e 854 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
984263bc
MD
855}
856
88c4d2f6
MD
857/*
858 * realtime routines
859 */
984263bc
MD
860
861void
88c4d2f6 862getmicrotime(struct timeval *tvp)
984263bc 863{
88c4d2f6 864 struct globaldata *gd = mycpu;
5eb5a6bc 865 struct timespec *bt;
88c4d2f6 866 sysclock_t delta;
984263bc 867
88c4d2f6
MD
868 do {
869 tvp->tv_sec = gd->gd_time_seconds;
870 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
871 } while (tvp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
872
873 if (delta >= cputimer_freq) {
874 tvp->tv_sec += delta / cputimer_freq;
875 delta %= cputimer_freq;
876 }
88c4d2f6 877 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
984263bc 878
5eb5a6bc
MD
879 bt = &basetime[basetime_index];
880 tvp->tv_sec += bt->tv_sec;
881 tvp->tv_usec += bt->tv_nsec / 1000;
88c4d2f6
MD
882 while (tvp->tv_usec >= 1000000) {
883 tvp->tv_usec -= 1000000;
884 ++tvp->tv_sec;
984263bc 885 }
984263bc
MD
886}
887
888void
88c4d2f6 889getnanotime(struct timespec *tsp)
984263bc 890{
88c4d2f6 891 struct globaldata *gd = mycpu;
5eb5a6bc 892 struct timespec *bt;
88c4d2f6 893 sysclock_t delta;
984263bc 894
88c4d2f6
MD
895 do {
896 tsp->tv_sec = gd->gd_time_seconds;
897 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
898 } while (tsp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
899
900 if (delta >= cputimer_freq) {
901 tsp->tv_sec += delta / cputimer_freq;
902 delta %= cputimer_freq;
903 }
88c4d2f6 904 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
984263bc 905
5eb5a6bc
MD
906 bt = &basetime[basetime_index];
907 tsp->tv_sec += bt->tv_sec;
908 tsp->tv_nsec += bt->tv_nsec;
88c4d2f6
MD
909 while (tsp->tv_nsec >= 1000000000) {
910 tsp->tv_nsec -= 1000000000;
911 ++tsp->tv_sec;
984263bc 912 }
984263bc
MD
913}
914
5eb5a6bc
MD
915static void
916getnanotime_nbt(struct timespec *nbt, struct timespec *tsp)
917{
918 struct globaldata *gd = mycpu;
919 sysclock_t delta;
920
921 do {
922 tsp->tv_sec = gd->gd_time_seconds;
923 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
924 } while (tsp->tv_sec != gd->gd_time_seconds);
925
926 if (delta >= cputimer_freq) {
927 tsp->tv_sec += delta / cputimer_freq;
928 delta %= cputimer_freq;
929 }
930 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
931
932 tsp->tv_sec += nbt->tv_sec;
933 tsp->tv_nsec += nbt->tv_nsec;
934 while (tsp->tv_nsec >= 1000000000) {
935 tsp->tv_nsec -= 1000000000;
936 ++tsp->tv_sec;
937 }
938}
939
940
88c4d2f6
MD
941void
942microtime(struct timeval *tvp)
984263bc 943{
88c4d2f6 944 struct globaldata *gd = mycpu;
5eb5a6bc 945 struct timespec *bt;
88c4d2f6 946 sysclock_t delta;
984263bc 947
88c4d2f6
MD
948 do {
949 tvp->tv_sec = gd->gd_time_seconds;
950 delta = cputimer_count() - gd->gd_cpuclock_base;
951 } while (tvp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
952
953 if (delta >= cputimer_freq) {
954 tvp->tv_sec += delta / cputimer_freq;
955 delta %= cputimer_freq;
956 }
88c4d2f6 957 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
984263bc 958
5eb5a6bc
MD
959 bt = &basetime[basetime_index];
960 tvp->tv_sec += bt->tv_sec;
961 tvp->tv_usec += bt->tv_nsec / 1000;
88c4d2f6
MD
962 while (tvp->tv_usec >= 1000000) {
963 tvp->tv_usec -= 1000000;
964 ++tvp->tv_sec;
984263bc 965 }
984263bc
MD
966}
967
88c4d2f6
MD
968void
969nanotime(struct timespec *tsp)
970{
971 struct globaldata *gd = mycpu;
5eb5a6bc 972 struct timespec *bt;
88c4d2f6 973 sysclock_t delta;
984263bc 974
88c4d2f6
MD
975 do {
976 tsp->tv_sec = gd->gd_time_seconds;
977 delta = cputimer_count() - gd->gd_cpuclock_base;
978 } while (tsp->tv_sec != gd->gd_time_seconds);
fad57d0e
MD
979
980 if (delta >= cputimer_freq) {
981 tsp->tv_sec += delta / cputimer_freq;
982 delta %= cputimer_freq;
983 }
88c4d2f6 984 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
984263bc 985
5eb5a6bc
MD
986 bt = &basetime[basetime_index];
987 tsp->tv_sec += bt->tv_sec;
988 tsp->tv_nsec += bt->tv_nsec;
88c4d2f6
MD
989 while (tsp->tv_nsec >= 1000000000) {
990 tsp->tv_nsec -= 1000000000;
991 ++tsp->tv_sec;
984263bc 992 }
984263bc
MD
993}
994
25b804e7
MD
995/*
996 * note: this is not exactly synchronized with real time. To do that we
997 * would have to do what microtime does and check for a nanoseconds overflow.
998 */
999time_t
1000get_approximate_time_t(void)
1001{
1002 struct globaldata *gd = mycpu;
5eb5a6bc
MD
1003 struct timespec *bt;
1004
1005 bt = &basetime[basetime_index];
1006 return(gd->gd_time_seconds + bt->tv_sec);
25b804e7
MD
1007}
1008
984263bc
MD
1009int
1010pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1011{
1012 pps_params_t *app;
1013 struct pps_fetch_args *fapi;
1014#ifdef PPS_SYNC
1015 struct pps_kcbind_args *kapi;
1016#endif
1017
1018 switch (cmd) {
1019 case PPS_IOC_CREATE:
1020 return (0);
1021 case PPS_IOC_DESTROY:
1022 return (0);
1023 case PPS_IOC_SETPARAMS:
1024 app = (pps_params_t *)data;
1025 if (app->mode & ~pps->ppscap)
1026 return (EINVAL);
1027 pps->ppsparam = *app;
1028 return (0);
1029 case PPS_IOC_GETPARAMS:
1030 app = (pps_params_t *)data;
1031 *app = pps->ppsparam;
1032 app->api_version = PPS_API_VERS_1;
1033 return (0);
1034 case PPS_IOC_GETCAP:
1035 *(int*)data = pps->ppscap;
1036 return (0);
1037 case PPS_IOC_FETCH:
1038 fapi = (struct pps_fetch_args *)data;
1039 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1040 return (EINVAL);
1041 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1042 return (EOPNOTSUPP);
1043 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1044 fapi->pps_info_buf = pps->ppsinfo;
1045 return (0);
1046 case PPS_IOC_KCBIND:
1047#ifdef PPS_SYNC
1048 kapi = (struct pps_kcbind_args *)data;
1049 /* XXX Only root should be able to do this */
1050 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1051 return (EINVAL);
1052 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1053 return (EINVAL);
1054 if (kapi->edge & ~pps->ppscap)
1055 return (EINVAL);
1056 pps->kcmode = kapi->edge;
1057 return (0);
1058#else
1059 return (EOPNOTSUPP);
1060#endif
1061 default:
1062 return (ENOTTY);
1063 }
1064}
1065
1066void
1067pps_init(struct pps_state *pps)
1068{
1069 pps->ppscap |= PPS_TSFMT_TSPEC;
1070 if (pps->ppscap & PPS_CAPTUREASSERT)
1071 pps->ppscap |= PPS_OFFSETASSERT;
1072 if (pps->ppscap & PPS_CAPTURECLEAR)
1073 pps->ppscap |= PPS_OFFSETCLEAR;
1074}
1075
1076void
88c4d2f6 1077pps_event(struct pps_state *pps, sysclock_t count, int event)
984263bc 1078{
88c4d2f6
MD
1079 struct globaldata *gd;
1080 struct timespec *tsp;
1081 struct timespec *osp;
5eb5a6bc 1082 struct timespec *bt;
88c4d2f6
MD
1083 struct timespec ts;
1084 sysclock_t *pcount;
1085#ifdef PPS_SYNC
1086 sysclock_t tcount;
1087#endif
1088 sysclock_t delta;
1089 pps_seq_t *pseq;
1090 int foff;
1091 int fhard;
1092
1093 gd = mycpu;
984263bc
MD
1094
1095 /* Things would be easier with arrays... */
1096 if (event == PPS_CAPTUREASSERT) {
1097 tsp = &pps->ppsinfo.assert_timestamp;
1098 osp = &pps->ppsparam.assert_offset;
1099 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1100 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1101 pcount = &pps->ppscount[0];
1102 pseq = &pps->ppsinfo.assert_sequence;
1103 } else {
1104 tsp = &pps->ppsinfo.clear_timestamp;
1105 osp = &pps->ppsparam.clear_offset;
1106 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1107 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1108 pcount = &pps->ppscount[1];
1109 pseq = &pps->ppsinfo.clear_sequence;
1110 }
1111
984263bc
MD
1112 /* Nothing really happened */
1113 if (*pcount == count)
1114 return;
1115
1116 *pcount = count;
1117
88c4d2f6
MD
1118 do {
1119 ts.tv_sec = gd->gd_time_seconds;
1120 delta = count - gd->gd_cpuclock_base;
1121 } while (ts.tv_sec != gd->gd_time_seconds);
fad57d0e
MD
1122
1123 if (delta >= cputimer_freq) {
88c4d2f6
MD
1124 ts.tv_sec += delta / cputimer_freq;
1125 delta %= cputimer_freq;
1126 }
1127 ts.tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
5eb5a6bc
MD
1128 bt = &basetime[basetime_index];
1129 ts.tv_sec += bt->tv_sec;
1130 ts.tv_nsec += bt->tv_nsec;
88c4d2f6
MD
1131 while (ts.tv_nsec >= 1000000000) {
1132 ts.tv_nsec -= 1000000000;
1133 ++ts.tv_sec;
984263bc 1134 }
984263bc
MD
1135
1136 (*pseq)++;
1137 *tsp = ts;
1138
1139 if (foff) {
1140 timespecadd(tsp, osp);
1141 if (tsp->tv_nsec < 0) {
1142 tsp->tv_nsec += 1000000000;
1143 tsp->tv_sec -= 1;
1144 }
1145 }
1146#ifdef PPS_SYNC
1147 if (fhard) {
1148 /* magic, at its best... */
1149 tcount = count - pps->ppscount[2];
1150 pps->ppscount[2] = count;
fad57d0e 1151 if (tcount >= cputimer_freq) {
64642171
JS
1152 delta = (1000000000 * (tcount / cputimer_freq) +
1153 cputimer_freq64_nsec *
fad57d0e
MD
1154 (tcount % cputimer_freq)) >> 32;
1155 } else {
1156 delta = (cputimer_freq64_nsec * tcount) >> 32;
1157 }
984263bc
MD
1158 hardpps(tsp, delta);
1159 }
1160#endif
1161}
88c4d2f6 1162