2 * Copyright (c) 2004, Matthew Dillon <dillon@backplane.com>
3 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
41 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
42 * $DragonFly: src/sys/kern/kern_clock.c,v 1.16 2004/02/11 21:47:51 dillon Exp $
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/dkstat.h>
50 #include <sys/callout.h>
51 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/resourcevar.h>
55 #include <sys/signalvar.h>
56 #include <sys/timex.h>
57 #include <sys/timepps.h>
61 #include <vm/vm_map.h>
62 #include <sys/sysctl.h>
63 #include <sys/thread2.h>
65 #include <machine/cpu.h>
66 #include <machine/limits.h>
67 #include <machine/smp.h>
74 extern void init_device_poll(void);
75 extern void hardclock_device_poll(void);
76 #endif /* DEVICE_POLLING */
78 static void initclocks (void *dummy);
79 SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
82 * Some of these don't belong here, but it's easiest to concentrate them.
83 * Note that cp_time[] counts in microseconds, but most userland programs
84 * just compare relative times against the total by delta.
86 long cp_time[CPUSTATES];
88 SYSCTL_OPAQUE(_kern, OID_AUTO, cp_time, CTLFLAG_RD, &cp_time, sizeof(cp_time),
89 "LU", "CPU time statistics");
97 * boottime is used to calculate the 'real' uptime. Do not confuse this with
98 * microuptime(). microtime() is not drift compensated. The real uptime
99 * with compensation is nanotime() - bootime.
101 * basetime is used to calculate the compensated real time of day. Chunky
102 * changes to the time, aka settimeofday(), are made by modifying basetime.
104 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
105 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
108 struct timespec boottime; /* boot time (realtime) for reference only */
109 struct timespec basetime; /* base time adjusts uptime -> realtime */
110 time_t time_second; /* read-only 'passive' uptime in seconds */
112 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
113 &boottime, timeval, "System boottime");
114 SYSCTL_STRUCT(_kern, OID_AUTO, basetime, CTLFLAG_RD,
115 &basetime, timeval, "System basetime");
117 static void hardclock(systimer_t info, struct intrframe *frame);
118 static void statclock(systimer_t info, struct intrframe *frame);
119 static void schedclock(systimer_t info, struct intrframe *frame);
121 int ticks; /* system master ticks at hz */
122 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */
123 int64_t nsec_acc; /* accumulator */
126 * Finish initializing clock frequencies and start all clocks running.
130 initclocks(void *dummy)
133 #ifdef DEVICE_POLLING
136 /*psratio = profhz / stathz;*/
141 * Called on a per-cpu basis
144 initclocks_pcpu(void)
146 struct globaldata *gd = mycpu;
149 if (gd->gd_cpuid == 0) {
150 gd->gd_time_seconds = 1;
151 gd->gd_cpuclock_base = cputimer_count();
154 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
155 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
157 systimer_init_periodic(&gd->gd_hardclock, hardclock, NULL, hz);
158 systimer_init_periodic(&gd->gd_statclock, statclock, NULL, stathz);
159 /* XXX correct the frequency for scheduler / estcpu tests */
160 systimer_init_periodic(&gd->gd_schedclock, schedclock, NULL, 10);
165 * This sets the current real time of day. Timespecs are in seconds and
166 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base,
167 * instead we adjust basetime so basetime + gd_* results in the current
168 * time of day. This way the gd_* fields are guarenteed to represent
169 * a monotonically increasing 'uptime' value.
172 set_timeofday(struct timespec *ts)
177 * XXX SMP / non-atomic basetime updates
181 basetime.tv_sec = ts->tv_sec - ts2.tv_sec;
182 basetime.tv_nsec = ts->tv_nsec - ts2.tv_nsec;
183 if (basetime.tv_nsec < 0) {
184 basetime.tv_nsec += 1000000000;
187 if (boottime.tv_sec == 0)
194 * Each cpu has its own hardclock, but we only increments ticks and softticks
197 * NOTE! systimer! the MP lock might not be held here. We can only safely
198 * manipulate objects owned by the current cpu.
201 hardclock(systimer_t info, struct intrframe *frame)
205 struct pstats *pstats;
206 struct globaldata *gd = mycpu;
209 * Realtime updates are per-cpu. Note that timer corrections as
210 * returned by microtime() and friends make an additional adjustment
211 * using a system-wise 'basetime', but the running time is always
212 * taken from the per-cpu globaldata area. Since the same clock
213 * is distributing (XXX SMP) to all cpus, the per-cpu timebases
216 * Note that we never allow info->time (aka gd->gd_hardclock.time)
217 * to reverse index gd_cpuclock_base.
219 cputicks = info->time - gd->gd_cpuclock_base;
220 if (cputicks > cputimer_freq) {
221 ++gd->gd_time_seconds;
222 gd->gd_cpuclock_base += cputimer_freq;
226 * The system-wide ticks and softticks are only updated by cpu #0.
227 * Callwheel actions are also (at the moment) only handled by cpu #0.
228 * Finally, we also do NTP related timedelta/tickdelta adjustments
229 * by adjusting basetime.
231 if (gd->gd_cpuid == 0) {
237 #ifdef DEVICE_POLLING
238 hardclock_device_poll(); /* mpsafe, short and quick */
239 #endif /* DEVICE_POLLING */
241 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
243 } else if (softticks + 1 == ticks) {
248 if (tco->tc_poll_pps)
249 tco->tc_poll_pps(tco);
252 * Apply adjtime corrections. At the moment only do this if
253 * we can get the MP lock to interlock with adjtime's modification
254 * of these variables. Note that basetime adjustments are not
255 * MP safe either XXX.
257 if (timedelta != 0 && try_mplock()) {
258 basetime.tv_nsec += tickdelta * 1000;
259 if (basetime.tv_nsec >= 1000000000) {
260 basetime.tv_nsec -= 1000000000;
262 } else if (basetime.tv_nsec < 0) {
263 basetime.tv_nsec += 1000000000;
266 timedelta -= tickdelta;
271 * Apply per-tick compensation. ticks_adj adjusts for both
272 * offset and frequency, and could be negative.
274 if (nsec_adj != 0 && try_mplock()) {
275 nsec_acc += nsec_adj;
276 if (nsec_acc >= 0x100000000LL) {
277 basetime.tv_nsec += nsec_acc >> 32;
278 nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
279 } else if (nsec_acc <= -0x100000000LL) {
280 basetime.tv_nsec -= -nsec_acc >> 32;
281 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
283 if (basetime.tv_nsec >= 1000000000) {
284 basetime.tv_nsec -= 1000000000;
286 } else if (basetime.tv_nsec < 0) {
287 basetime.tv_nsec += 1000000000;
294 * If the realtime-adjusted seconds hand rolls over then tell
295 * ntp_update_second() what we did in the last second so it can
296 * calculate what to do in the next second. It may also add
297 * or subtract a leap second.
300 if (time_second != nts.tv_sec) {
301 leap = ntp_update_second(time_second, &nsec_adj);
302 basetime.tv_sec += leap;
303 time_second = nts.tv_sec + leap;
309 * ITimer handling is per-tick, per-cpu. I don't think psignal()
310 * is mpsafe on curproc, so XXX get the mplock.
312 if ((p = curproc) != NULL && try_mplock()) {
314 if (frame && CLKF_USERMODE(frame) &&
315 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
316 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
317 psignal(p, SIGVTALRM);
318 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
319 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
327 * The statistics clock typically runs at a 125Hz rate, and is intended
328 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu.
330 * NOTE! systimer! the MP lock might not be held here. We can only safely
331 * manipulate objects owned by the current cpu.
333 * The stats clock is responsible for grabbing a profiling sample.
334 * Most of the statistics are only used by user-level statistics programs.
335 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
338 * Like the other clocks, the stat clock is called from what is effectively
339 * a fast interrupt, so the context should be the thread/process that got
343 statclock(systimer_t info, struct intrframe *frame)
356 * How big was our timeslice relative to the last time?
358 microuptime(&tv); /* mpsafe */
359 stv = &mycpu->gd_stattv;
360 if (stv->tv_sec == 0) {
363 bump = tv.tv_usec - stv->tv_usec +
364 (tv.tv_sec - stv->tv_sec) * 1000000;
375 if (frame && CLKF_USERMODE(frame)) {
377 * Came from userland, handle user time and deal with
380 if (p && (p->p_flag & P_PROFIL))
381 addupc_intr(p, CLKF_PC(frame), 1);
382 td->td_uticks += bump;
385 * Charge the time as appropriate
387 if (p && p->p_nice > NZERO)
388 cp_time[CP_NICE] += bump;
390 cp_time[CP_USER] += bump;
394 * Kernel statistics are just like addupc_intr, only easier.
397 if (g->state == GMON_PROF_ON && frame) {
398 i = CLKF_PC(frame) - g->lowpc;
399 if (i < g->textsize) {
400 i /= HISTFRACTION * sizeof(*g->kcount);
406 * Came from kernel mode, so we were:
407 * - handling an interrupt,
408 * - doing syscall or trap work on behalf of the current
410 * - spinning in the idle loop.
411 * Whichever it is, charge the time as appropriate.
412 * Note that we charge interrupts to the current process,
413 * regardless of whether they are ``for'' that process,
414 * so that we know how much of its real time was spent
415 * in ``non-process'' (i.e., interrupt) work.
417 * XXX assume system if frame is NULL. A NULL frame
418 * can occur if ipi processing is done from an splx().
420 if (frame && CLKF_INTR(frame))
421 td->td_iticks += bump;
423 td->td_sticks += bump;
425 if (frame && CLKF_INTR(frame)) {
426 cp_time[CP_INTR] += bump;
428 if (td == &mycpu->gd_idlethread)
429 cp_time[CP_IDLE] += bump;
431 cp_time[CP_SYS] += bump;
437 * The scheduler clock typically runs at a 10Hz rate. NOTE! systimer,
438 * the MP lock might not be held. We can safely manipulate parts of curproc
439 * but that's about it.
442 schedclock(systimer_t info, struct intrframe *frame)
445 struct pstats *pstats;
450 schedulerclock(NULL); /* mpsafe */
451 if ((p = curproc) != NULL) {
452 /* Update resource usage integrals and maximums. */
453 if ((pstats = p->p_stats) != NULL &&
454 (ru = &pstats->p_ru) != NULL &&
455 (vm = p->p_vmspace) != NULL) {
456 ru->ru_ixrss += pgtok(vm->vm_tsize);
457 ru->ru_idrss += pgtok(vm->vm_dsize);
458 ru->ru_isrss += pgtok(vm->vm_ssize);
459 rss = pgtok(vmspace_resident_count(vm));
460 if (ru->ru_maxrss < rss)
467 * Compute number of ticks for the specified amount of time. The
468 * return value is intended to be used in a clock interrupt timed
469 * operation and guarenteed to meet or exceed the requested time.
470 * If the representation overflows, return INT_MAX. The minimum return
471 * value is 1 ticks and the function will average the calculation up.
472 * If any value greater then 0 microseconds is supplied, a value
473 * of at least 2 will be returned to ensure that a near-term clock
474 * interrupt does not cause the timeout to occur (degenerately) early.
476 * Note that limit checks must take into account microseconds, which is
477 * done simply by using the smaller signed long maximum instead of
478 * the unsigned long maximum.
480 * If ints have 32 bits, then the maximum value for any timeout in
481 * 10ms ticks is 248 days.
484 tvtohz_high(struct timeval *tv)
501 printf("tvotohz: negative time difference %ld sec %ld usec\n",
505 } else if (sec <= INT_MAX / hz) {
506 ticks = (int)(sec * hz +
507 ((u_long)usec + (tick - 1)) / tick) + 1;
515 * Compute number of ticks for the specified amount of time, erroring on
516 * the side of it being too low to ensure that sleeping the returned number
517 * of ticks will not result in a late return.
519 * The supplied timeval may not be negative and should be normalized. A
520 * return value of 0 is possible if the timeval converts to less then
523 * If ints have 32 bits, then the maximum value for any timeout in
524 * 10ms ticks is 248 days.
527 tvtohz_low(struct timeval *tv)
533 if (sec <= INT_MAX / hz)
534 ticks = (int)(sec * hz + (u_long)tv->tv_usec / tick);
542 * Start profiling on a process.
544 * Kernel profiling passes proc0 which never exits and hence
545 * keeps the profile clock running constantly.
548 startprofclock(struct proc *p)
550 if ((p->p_flag & P_PROFIL) == 0) {
551 p->p_flag |= P_PROFIL;
553 if (++profprocs == 1 && stathz != 0) {
556 setstatclockrate(profhz);
564 * Stop profiling on a process.
567 stopprofclock(struct proc *p)
569 if (p->p_flag & P_PROFIL) {
570 p->p_flag &= ~P_PROFIL;
572 if (--profprocs == 0 && stathz != 0) {
575 setstatclockrate(stathz);
583 * Return information about system clocks.
586 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
588 struct clockinfo clkinfo;
590 * Construct clockinfo structure.
594 clkinfo.tickadj = tickadj;
595 clkinfo.profhz = profhz;
596 clkinfo.stathz = stathz ? stathz : hz;
597 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
600 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
601 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
604 * We have eight functions for looking at the clock, four for
605 * microseconds and four for nanoseconds. For each there is fast
606 * but less precise version "get{nano|micro}[up]time" which will
607 * return a time which is up to 1/HZ previous to the call, whereas
608 * the raw version "{nano|micro}[up]time" will return a timestamp
609 * which is as precise as possible. The "up" variants return the
610 * time relative to system boot, these are well suited for time
611 * interval measurements.
613 * Each cpu independantly maintains the current time of day, so all
614 * we need to do to protect ourselves from changes is to do a loop
615 * check on the seconds field changing out from under us.
618 getmicrouptime(struct timeval *tvp)
620 struct globaldata *gd = mycpu;
624 tvp->tv_sec = gd->gd_time_seconds;
625 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
626 } while (tvp->tv_sec != gd->gd_time_seconds);
627 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
628 if (tvp->tv_usec >= 1000000) {
629 tvp->tv_usec -= 1000000;
635 getnanouptime(struct timespec *tsp)
637 struct globaldata *gd = mycpu;
641 tsp->tv_sec = gd->gd_time_seconds;
642 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
643 } while (tsp->tv_sec != gd->gd_time_seconds);
644 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
645 if (tsp->tv_nsec >= 1000000000) {
646 tsp->tv_nsec -= 1000000000;
652 microuptime(struct timeval *tvp)
654 struct globaldata *gd = mycpu;
658 tvp->tv_sec = gd->gd_time_seconds;
659 delta = cputimer_count() - gd->gd_cpuclock_base;
660 } while (tvp->tv_sec != gd->gd_time_seconds);
661 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
662 if (tvp->tv_usec >= 1000000) {
663 tvp->tv_usec -= 1000000;
669 nanouptime(struct timespec *tsp)
671 struct globaldata *gd = mycpu;
675 tsp->tv_sec = gd->gd_time_seconds;
676 delta = cputimer_count() - gd->gd_cpuclock_base;
677 } while (tsp->tv_sec != gd->gd_time_seconds);
678 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
679 if (tsp->tv_nsec >= 1000000000) {
680 tsp->tv_nsec -= 1000000000;
690 getmicrotime(struct timeval *tvp)
692 struct globaldata *gd = mycpu;
696 tvp->tv_sec = gd->gd_time_seconds;
697 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
698 } while (tvp->tv_sec != gd->gd_time_seconds);
699 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
701 tvp->tv_sec += basetime.tv_sec;
702 tvp->tv_usec += basetime.tv_nsec / 1000;
703 while (tvp->tv_usec >= 1000000) {
704 tvp->tv_usec -= 1000000;
710 getnanotime(struct timespec *tsp)
712 struct globaldata *gd = mycpu;
716 tsp->tv_sec = gd->gd_time_seconds;
717 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
718 } while (tsp->tv_sec != gd->gd_time_seconds);
719 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
721 tsp->tv_sec += basetime.tv_sec;
722 tsp->tv_nsec += basetime.tv_nsec;
723 while (tsp->tv_nsec >= 1000000000) {
724 tsp->tv_nsec -= 1000000000;
730 microtime(struct timeval *tvp)
732 struct globaldata *gd = mycpu;
736 tvp->tv_sec = gd->gd_time_seconds;
737 delta = cputimer_count() - gd->gd_cpuclock_base;
738 } while (tvp->tv_sec != gd->gd_time_seconds);
739 tvp->tv_usec = (cputimer_freq64_usec * delta) >> 32;
741 tvp->tv_sec += basetime.tv_sec;
742 tvp->tv_usec += basetime.tv_nsec / 1000;
743 while (tvp->tv_usec >= 1000000) {
744 tvp->tv_usec -= 1000000;
750 nanotime(struct timespec *tsp)
752 struct globaldata *gd = mycpu;
756 tsp->tv_sec = gd->gd_time_seconds;
757 delta = cputimer_count() - gd->gd_cpuclock_base;
758 } while (tsp->tv_sec != gd->gd_time_seconds);
759 tsp->tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
761 tsp->tv_sec += basetime.tv_sec;
762 tsp->tv_nsec += basetime.tv_nsec;
763 while (tsp->tv_nsec >= 1000000000) {
764 tsp->tv_nsec -= 1000000000;
770 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
773 struct pps_fetch_args *fapi;
775 struct pps_kcbind_args *kapi;
781 case PPS_IOC_DESTROY:
783 case PPS_IOC_SETPARAMS:
784 app = (pps_params_t *)data;
785 if (app->mode & ~pps->ppscap)
787 pps->ppsparam = *app;
789 case PPS_IOC_GETPARAMS:
790 app = (pps_params_t *)data;
791 *app = pps->ppsparam;
792 app->api_version = PPS_API_VERS_1;
795 *(int*)data = pps->ppscap;
798 fapi = (struct pps_fetch_args *)data;
799 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
801 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
803 pps->ppsinfo.current_mode = pps->ppsparam.mode;
804 fapi->pps_info_buf = pps->ppsinfo;
808 kapi = (struct pps_kcbind_args *)data;
809 /* XXX Only root should be able to do this */
810 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
812 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
814 if (kapi->edge & ~pps->ppscap)
816 pps->kcmode = kapi->edge;
827 pps_init(struct pps_state *pps)
829 pps->ppscap |= PPS_TSFMT_TSPEC;
830 if (pps->ppscap & PPS_CAPTUREASSERT)
831 pps->ppscap |= PPS_OFFSETASSERT;
832 if (pps->ppscap & PPS_CAPTURECLEAR)
833 pps->ppscap |= PPS_OFFSETCLEAR;
837 pps_event(struct pps_state *pps, sysclock_t count, int event)
839 struct globaldata *gd;
840 struct timespec *tsp;
841 struct timespec *osp;
854 /* Things would be easier with arrays... */
855 if (event == PPS_CAPTUREASSERT) {
856 tsp = &pps->ppsinfo.assert_timestamp;
857 osp = &pps->ppsparam.assert_offset;
858 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
859 fhard = pps->kcmode & PPS_CAPTUREASSERT;
860 pcount = &pps->ppscount[0];
861 pseq = &pps->ppsinfo.assert_sequence;
863 tsp = &pps->ppsinfo.clear_timestamp;
864 osp = &pps->ppsparam.clear_offset;
865 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
866 fhard = pps->kcmode & PPS_CAPTURECLEAR;
867 pcount = &pps->ppscount[1];
868 pseq = &pps->ppsinfo.clear_sequence;
871 /* Nothing really happened */
872 if (*pcount == count)
878 ts.tv_sec = gd->gd_time_seconds;
879 delta = count - gd->gd_cpuclock_base;
880 } while (ts.tv_sec != gd->gd_time_seconds);
881 if (delta > cputimer_freq) {
882 ts.tv_sec += delta / cputimer_freq;
883 delta %= cputimer_freq;
885 ts.tv_nsec = (cputimer_freq64_nsec * delta) >> 32;
886 ts.tv_sec += basetime.tv_sec;
887 ts.tv_nsec += basetime.tv_nsec;
888 while (ts.tv_nsec >= 1000000000) {
889 ts.tv_nsec -= 1000000000;
897 timespecadd(tsp, osp);
898 if (tsp->tv_nsec < 0) {
899 tsp->tv_nsec += 1000000000;
905 /* magic, at its best... */
906 tcount = count - pps->ppscount[2];
907 pps->ppscount[2] = count;
908 delta = (cputimer_freq64_nsec * tcount) >> 32;