2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
35 * Copyright (c) 1982, 1986, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
72 * $FreeBSD: src/sys/kern/kern_clock.c,v 1.105.2.10 2002/10/17 13:19:40 maxim Exp $
76 #include "opt_polling.h"
77 #include "opt_ifpoll.h"
78 #include "opt_pctrack.h"
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/callout.h>
83 #include <sys/kernel.h>
84 #include <sys/kinfo.h>
86 #include <sys/malloc.h>
87 #include <sys/resource.h>
88 #include <sys/resourcevar.h>
89 #include <sys/signalvar.h>
90 #include <sys/timex.h>
91 #include <sys/timepps.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_extern.h>
97 #include <sys/sysctl.h>
99 #include <sys/thread2.h>
101 #include <machine/cpu.h>
102 #include <machine/limits.h>
103 #include <machine/smp.h>
104 #include <machine/cpufunc.h>
105 #include <machine/specialreg.h>
106 #include <machine/clock.h>
109 #include <sys/gmon.h>
112 #ifdef DEVICE_POLLING
113 extern void init_device_poll_pcpu(int);
117 extern void ifpoll_init_pcpu(int);
121 static void do_pctrack(struct intrframe *frame, int which);
124 static void initclocks (void *dummy);
125 SYSINIT(clocks, SI_BOOT2_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
128 * Some of these don't belong here, but it's easiest to concentrate them.
129 * Note that cpu_time counts in microseconds, but most userland programs
130 * just compare relative times against the total by delta.
132 struct kinfo_cputime cputime_percpu[MAXCPU];
134 struct kinfo_pcheader cputime_pcheader = { PCTRACK_SIZE, PCTRACK_ARYSIZE };
135 struct kinfo_pctrack cputime_pctrack[MAXCPU][PCTRACK_SIZE];
139 sysctl_cputime(SYSCTL_HANDLER_ARGS)
142 size_t size = sizeof(struct kinfo_cputime);
144 for (cpu = 0; cpu < ncpus; ++cpu) {
145 if ((error = SYSCTL_OUT(req, &cputime_percpu[cpu], size)))
151 SYSCTL_PROC(_kern, OID_AUTO, cputime, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
152 sysctl_cputime, "S,kinfo_cputime", "CPU time statistics");
155 sysctl_cp_time(SYSCTL_HANDLER_ARGS)
157 long cpu_states[5] = {0};
159 size_t size = sizeof(cpu_states);
161 for (cpu = 0; cpu < ncpus; ++cpu) {
162 cpu_states[CP_USER] += cputime_percpu[cpu].cp_user;
163 cpu_states[CP_NICE] += cputime_percpu[cpu].cp_nice;
164 cpu_states[CP_SYS] += cputime_percpu[cpu].cp_sys;
165 cpu_states[CP_INTR] += cputime_percpu[cpu].cp_intr;
166 cpu_states[CP_IDLE] += cputime_percpu[cpu].cp_idle;
169 error = SYSCTL_OUT(req, cpu_states, size);
174 SYSCTL_PROC(_kern, OID_AUTO, cp_time, (CTLTYPE_LONG|CTLFLAG_RD), 0, 0,
175 sysctl_cp_time, "LU", "CPU time statistics");
178 * boottime is used to calculate the 'real' uptime. Do not confuse this with
179 * microuptime(). microtime() is not drift compensated. The real uptime
180 * with compensation is nanotime() - bootime. boottime is recalculated
181 * whenever the real time is set based on the compensated elapsed time
182 * in seconds (gd->gd_time_seconds).
184 * The gd_time_seconds and gd_cpuclock_base fields remain fairly monotonic.
185 * Slight adjustments to gd_cpuclock_base are made to phase-lock it to
188 struct timespec boottime; /* boot time (realtime) for reference only */
189 time_t time_second; /* read-only 'passive' uptime in seconds */
192 * basetime is used to calculate the compensated real time of day. The
193 * basetime can be modified on a per-tick basis by the adjtime(),
194 * ntp_adjtime(), and sysctl-based time correction APIs.
196 * Note that frequency corrections can also be made by adjusting
199 * basetime is a tail-chasing FIFO, updated only by cpu #0. The FIFO is
200 * used on both SMP and UP systems to avoid MP races between cpu's and
201 * interrupt races on UP systems.
203 #define BASETIME_ARYSIZE 16
204 #define BASETIME_ARYMASK (BASETIME_ARYSIZE - 1)
205 static struct timespec basetime[BASETIME_ARYSIZE];
206 static volatile int basetime_index;
209 sysctl_get_basetime(SYSCTL_HANDLER_ARGS)
216 * Because basetime data and index may be updated by another cpu,
217 * a load fence is required to ensure that the data we read has
218 * not been speculatively read relative to a possibly updated index.
220 index = basetime_index;
222 bt = &basetime[index];
223 error = SYSCTL_OUT(req, bt, sizeof(*bt));
227 SYSCTL_STRUCT(_kern, KERN_BOOTTIME, boottime, CTLFLAG_RD,
228 &boottime, timespec, "System boottime");
229 SYSCTL_PROC(_kern, OID_AUTO, basetime, CTLTYPE_STRUCT|CTLFLAG_RD, 0, 0,
230 sysctl_get_basetime, "S,timespec", "System basetime");
232 static void hardclock(systimer_t info, int, struct intrframe *frame);
233 static void statclock(systimer_t info, int, struct intrframe *frame);
234 static void schedclock(systimer_t info, int, struct intrframe *frame);
235 static void getnanotime_nbt(struct timespec *nbt, struct timespec *tsp);
237 int ticks; /* system master ticks at hz */
238 int clocks_running; /* tsleep/timeout clocks operational */
239 int64_t nsec_adj; /* ntpd per-tick adjustment in nsec << 32 */
240 int64_t nsec_acc; /* accumulator */
241 int sched_ticks; /* global schedule clock ticks */
243 /* NTPD time correction fields */
244 int64_t ntp_tick_permanent; /* per-tick adjustment in nsec << 32 */
245 int64_t ntp_tick_acc; /* accumulator for per-tick adjustment */
246 int64_t ntp_delta; /* one-time correction in nsec */
247 int64_t ntp_big_delta = 1000000000;
248 int32_t ntp_tick_delta; /* current adjustment rate */
249 int32_t ntp_default_tick_delta; /* adjustment rate for ntp_delta */
250 time_t ntp_leap_second; /* time of next leap second */
251 int ntp_leap_insert; /* whether to insert or remove a second */
254 * Finish initializing clock frequencies and start all clocks running.
258 initclocks(void *dummy)
260 /*psratio = profhz / stathz;*/
266 * Called on a per-cpu basis
269 initclocks_pcpu(void)
271 struct globaldata *gd = mycpu;
274 if (gd->gd_cpuid == 0) {
275 gd->gd_time_seconds = 1;
276 gd->gd_cpuclock_base = sys_cputimer->count();
279 gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds;
280 gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base;
283 systimer_intr_enable();
285 #ifdef DEVICE_POLLING
286 init_device_poll_pcpu(gd->gd_cpuid);
290 ifpoll_init_pcpu(gd->gd_cpuid);
294 * Use a non-queued periodic systimer to prevent multiple ticks from
295 * building up if the sysclock jumps forward (8254 gets reset). The
296 * sysclock will never jump backwards. Our time sync is based on
297 * the actual sysclock, not the ticks count.
299 systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz);
300 systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz);
301 /* XXX correct the frequency for scheduler / estcpu tests */
302 systimer_init_periodic_nq(&gd->gd_schedclock, schedclock,
308 * This sets the current real time of day. Timespecs are in seconds and
309 * nanoseconds. We do not mess with gd_time_seconds and gd_cpuclock_base,
310 * instead we adjust basetime so basetime + gd_* results in the current
311 * time of day. This way the gd_* fields are guarenteed to represent
312 * a monotonically increasing 'uptime' value.
314 * When set_timeofday() is called from userland, the system call forces it
315 * onto cpu #0 since only cpu #0 can update basetime_index.
318 set_timeofday(struct timespec *ts)
320 struct timespec *nbt;
324 * XXX SMP / non-atomic basetime updates
327 ni = (basetime_index + 1) & BASETIME_ARYMASK;
330 nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
331 nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
332 if (nbt->tv_nsec < 0) {
333 nbt->tv_nsec += 1000000000;
338 * Note that basetime diverges from boottime as the clock drift is
339 * compensated for, so we cannot do away with boottime. When setting
340 * the absolute time of day the drift is 0 (for an instant) and we
341 * can simply assign boottime to basetime.
343 * Note that nanouptime() is based on gd_time_seconds which is drift
344 * compensated up to a point (it is guarenteed to remain monotonically
345 * increasing). gd_time_seconds is thus our best uptime guess and
346 * suitable for use in the boottime calculation. It is already taken
347 * into account in the basetime calculation above.
349 boottime.tv_sec = nbt->tv_sec;
353 * We now have a new basetime, make sure all other cpus have it,
354 * then update the index.
363 * Each cpu has its own hardclock, but we only increments ticks and softticks
366 * NOTE! systimer! the MP lock might not be held here. We can only safely
367 * manipulate objects owned by the current cpu.
370 hardclock(systimer_t info, int in_ipi __unused, struct intrframe *frame)
374 struct globaldata *gd = mycpu;
377 * Realtime updates are per-cpu. Note that timer corrections as
378 * returned by microtime() and friends make an additional adjustment
379 * using a system-wise 'basetime', but the running time is always
380 * taken from the per-cpu globaldata area. Since the same clock
381 * is distributing (XXX SMP) to all cpus, the per-cpu timebases
384 * Note that we never allow info->time (aka gd->gd_hardclock.time)
385 * to reverse index gd_cpuclock_base, but that it is possible for
386 * it to temporarily get behind in the seconds if something in the
387 * system locks interrupts for a long period of time. Since periodic
388 * timers count events, though everything should resynch again
391 cputicks = info->time - gd->gd_cpuclock_base;
392 if (cputicks >= sys_cputimer->freq) {
393 ++gd->gd_time_seconds;
394 gd->gd_cpuclock_base += sys_cputimer->freq;
398 * The system-wide ticks counter and NTP related timedelta/tickdelta
399 * adjustments only occur on cpu #0. NTP adjustments are accomplished
400 * by updating basetime.
402 if (gd->gd_cpuid == 0) {
403 struct timespec *nbt;
411 if (tco->tc_poll_pps)
412 tco->tc_poll_pps(tco);
416 * Calculate the new basetime index. We are in a critical section
417 * on cpu #0 and can safely play with basetime_index. Start
418 * with the current basetime and then make adjustments.
420 ni = (basetime_index + 1) & BASETIME_ARYMASK;
422 *nbt = basetime[basetime_index];
425 * Apply adjtime corrections. (adjtime() API)
427 * adjtime() only runs on cpu #0 so our critical section is
428 * sufficient to access these variables.
430 if (ntp_delta != 0) {
431 nbt->tv_nsec += ntp_tick_delta;
432 ntp_delta -= ntp_tick_delta;
433 if ((ntp_delta > 0 && ntp_delta < ntp_tick_delta) ||
434 (ntp_delta < 0 && ntp_delta > ntp_tick_delta)) {
435 ntp_tick_delta = ntp_delta;
440 * Apply permanent frequency corrections. (sysctl API)
442 if (ntp_tick_permanent != 0) {
443 ntp_tick_acc += ntp_tick_permanent;
444 if (ntp_tick_acc >= (1LL << 32)) {
445 nbt->tv_nsec += ntp_tick_acc >> 32;
446 ntp_tick_acc -= (ntp_tick_acc >> 32) << 32;
447 } else if (ntp_tick_acc <= -(1LL << 32)) {
448 /* Negate ntp_tick_acc to avoid shifting the sign bit. */
449 nbt->tv_nsec -= (-ntp_tick_acc) >> 32;
450 ntp_tick_acc += ((-ntp_tick_acc) >> 32) << 32;
454 if (nbt->tv_nsec >= 1000000000) {
456 nbt->tv_nsec -= 1000000000;
457 } else if (nbt->tv_nsec < 0) {
459 nbt->tv_nsec += 1000000000;
463 * Another per-tick compensation. (for ntp_adjtime() API)
466 nsec_acc += nsec_adj;
467 if (nsec_acc >= 0x100000000LL) {
468 nbt->tv_nsec += nsec_acc >> 32;
469 nsec_acc = (nsec_acc & 0xFFFFFFFFLL);
470 } else if (nsec_acc <= -0x100000000LL) {
471 nbt->tv_nsec -= -nsec_acc >> 32;
472 nsec_acc = -(-nsec_acc & 0xFFFFFFFFLL);
474 if (nbt->tv_nsec >= 1000000000) {
475 nbt->tv_nsec -= 1000000000;
477 } else if (nbt->tv_nsec < 0) {
478 nbt->tv_nsec += 1000000000;
483 /************************************************************
484 * LEAP SECOND CORRECTION *
485 ************************************************************
487 * Taking into account all the corrections made above, figure
488 * out the new real time. If the seconds field has changed
489 * then apply any pending leap-second corrections.
491 getnanotime_nbt(nbt, &nts);
493 if (time_second != nts.tv_sec) {
495 * Apply leap second (sysctl API). Adjust nts for changes
496 * so we do not have to call getnanotime_nbt again.
498 if (ntp_leap_second) {
499 if (ntp_leap_second == nts.tv_sec) {
500 if (ntp_leap_insert) {
512 * Apply leap second (ntp_adjtime() API), calculate a new
513 * nsec_adj field. ntp_update_second() returns nsec_adj
514 * as a per-second value but we need it as a per-tick value.
516 leap = ntp_update_second(time_second, &nsec_adj);
522 * Update the time_second 'approximate time' global.
524 time_second = nts.tv_sec;
528 * Finally, our new basetime is ready to go live!
535 * lwkt thread scheduler fair queueing
537 lwkt_schedulerclock(curthread);
540 * softticks are handled for all cpus
542 hardclock_softtick(gd);
545 * ITimer handling is per-tick, per-cpu.
547 * We must acquire the per-process token in order for ksignal()
548 * to be non-blocking. For the moment this requires an AST fault,
549 * the ksignal() cannot be safely issued from this hard interrupt.
551 * XXX Even the trytoken here isn't right, and itimer operation in
552 * a multi threaded environment is going to be weird at the
555 if ((p = curproc) != NULL && lwkt_trytoken(&p->p_token)) {
557 if (frame && CLKF_USERMODE(frame) &&
558 timevalisset(&p->p_timer[ITIMER_VIRTUAL].it_value) &&
559 itimerdecr(&p->p_timer[ITIMER_VIRTUAL], ustick) == 0) {
560 p->p_flags |= P_SIGVTALRM;
563 if (timevalisset(&p->p_timer[ITIMER_PROF].it_value) &&
564 itimerdecr(&p->p_timer[ITIMER_PROF], ustick) == 0) {
565 p->p_flags |= P_SIGPROF;
569 lwkt_reltoken(&p->p_token);
575 * The statistics clock typically runs at a 125Hz rate, and is intended
576 * to be frequency offset from the hardclock (typ 100Hz). It is per-cpu.
578 * NOTE! systimer! the MP lock might not be held here. We can only safely
579 * manipulate objects owned by the current cpu.
581 * The stats clock is responsible for grabbing a profiling sample.
582 * Most of the statistics are only used by user-level statistics programs.
583 * The main exceptions are p->p_uticks, p->p_sticks, p->p_iticks, and
586 * Like the other clocks, the stat clock is called from what is effectively
587 * a fast interrupt, so the context should be the thread/process that got
591 statclock(systimer_t info, int in_ipi, struct intrframe *frame)
604 * How big was our timeslice relative to the last time?
606 microuptime(&tv); /* mpsafe */
607 stv = &mycpu->gd_stattv;
608 if (stv->tv_sec == 0) {
611 bump = tv.tv_usec - stv->tv_usec +
612 (tv.tv_sec - stv->tv_sec) * 1000000;
623 if (frame && CLKF_USERMODE(frame)) {
625 * Came from userland, handle user time and deal with
628 if (p && (p->p_flags & P_PROFIL))
629 addupc_intr(p, CLKF_PC(frame), 1);
630 td->td_uticks += bump;
633 * Charge the time as appropriate
635 if (p && p->p_nice > NZERO)
636 cpu_time.cp_nice += bump;
638 cpu_time.cp_user += bump;
640 int intr_nest = mycpu->gd_intr_nesting_level;
644 * IPI processing code will bump gd_intr_nesting_level
645 * up by one, which breaks following CLKF_INTR testing,
646 * so we substract it by one here.
652 * Kernel statistics are just like addupc_intr, only easier.
655 if (g->state == GMON_PROF_ON && frame) {
656 i = CLKF_PC(frame) - g->lowpc;
657 if (i < g->textsize) {
658 i /= HISTFRACTION * sizeof(*g->kcount);
664 #define IS_INTR_RUNNING ((frame && CLKF_INTR(intr_nest)) || CLKF_INTR_TD(td))
667 * Came from kernel mode, so we were:
668 * - handling an interrupt,
669 * - doing syscall or trap work on behalf of the current
671 * - spinning in the idle loop.
672 * Whichever it is, charge the time as appropriate.
673 * Note that we charge interrupts to the current process,
674 * regardless of whether they are ``for'' that process,
675 * so that we know how much of its real time was spent
676 * in ``non-process'' (i.e., interrupt) work.
678 * XXX assume system if frame is NULL. A NULL frame
679 * can occur if ipi processing is done from a crit_exit().
682 td->td_iticks += bump;
684 td->td_sticks += bump;
686 if (IS_INTR_RUNNING) {
689 do_pctrack(frame, PCTRACK_INT);
691 cpu_time.cp_intr += bump;
693 if (td == &mycpu->gd_idlethread) {
694 cpu_time.cp_idle += bump;
698 do_pctrack(frame, PCTRACK_SYS);
700 cpu_time.cp_sys += bump;
704 #undef IS_INTR_RUNNING
710 * Sample the PC when in the kernel or in an interrupt. User code can
711 * retrieve the information and generate a histogram or other output.
715 do_pctrack(struct intrframe *frame, int which)
717 struct kinfo_pctrack *pctrack;
719 pctrack = &cputime_pctrack[mycpu->gd_cpuid][which];
720 pctrack->pc_array[pctrack->pc_index & PCTRACK_ARYMASK] =
721 (void *)CLKF_PC(frame);
726 sysctl_pctrack(SYSCTL_HANDLER_ARGS)
728 struct kinfo_pcheader head;
733 head.pc_ntrack = PCTRACK_SIZE;
734 head.pc_arysize = PCTRACK_ARYSIZE;
736 if ((error = SYSCTL_OUT(req, &head, sizeof(head))) != 0)
739 for (cpu = 0; cpu < ncpus; ++cpu) {
740 for (ntrack = 0; ntrack < PCTRACK_SIZE; ++ntrack) {
741 error = SYSCTL_OUT(req, &cputime_pctrack[cpu][ntrack],
742 sizeof(struct kinfo_pctrack));
751 SYSCTL_PROC(_kern, OID_AUTO, pctrack, (CTLTYPE_OPAQUE|CTLFLAG_RD), 0, 0,
752 sysctl_pctrack, "S,kinfo_pcheader", "CPU PC tracking");
757 * The scheduler clock typically runs at a 50Hz rate. NOTE! systimer,
758 * the MP lock might not be held. We can safely manipulate parts of curproc
759 * but that's about it.
761 * Each cpu has its own scheduler clock.
764 schedclock(systimer_t info, int in_ipi __unused, struct intrframe *frame)
771 if ((lp = lwkt_preempted_proc()) != NULL) {
773 * Account for cpu time used and hit the scheduler. Note
774 * that this call MUST BE MP SAFE, and the BGL IS NOT HELD
778 usched_schedulerclock(lp, info->periodic, info->time);
780 usched_schedulerclock(NULL, info->periodic, info->time);
782 if ((lp = curthread->td_lwp) != NULL) {
784 * Update resource usage integrals and maximums.
786 if ((ru = &lp->lwp_proc->p_ru) &&
787 (vm = lp->lwp_proc->p_vmspace) != NULL) {
788 ru->ru_ixrss += pgtok(vm->vm_tsize);
789 ru->ru_idrss += pgtok(vm->vm_dsize);
790 ru->ru_isrss += pgtok(vm->vm_ssize);
791 if (lwkt_trytoken(&vm->vm_map.token)) {
792 rss = pgtok(vmspace_resident_count(vm));
793 if (ru->ru_maxrss < rss)
795 lwkt_reltoken(&vm->vm_map.token);
799 /* Increment the global sched_ticks */
800 if (mycpu->gd_cpuid == 0)
805 * Compute number of ticks for the specified amount of time. The
806 * return value is intended to be used in a clock interrupt timed
807 * operation and guarenteed to meet or exceed the requested time.
808 * If the representation overflows, return INT_MAX. The minimum return
809 * value is 1 ticks and the function will average the calculation up.
810 * If any value greater then 0 microseconds is supplied, a value
811 * of at least 2 will be returned to ensure that a near-term clock
812 * interrupt does not cause the timeout to occur (degenerately) early.
814 * Note that limit checks must take into account microseconds, which is
815 * done simply by using the smaller signed long maximum instead of
816 * the unsigned long maximum.
818 * If ints have 32 bits, then the maximum value for any timeout in
819 * 10ms ticks is 248 days.
822 tvtohz_high(struct timeval *tv)
839 kprintf("tvtohz_high: negative time difference "
840 "%ld sec %ld usec\n",
844 } else if (sec <= INT_MAX / hz) {
845 ticks = (int)(sec * hz +
846 ((u_long)usec + (ustick - 1)) / ustick) + 1;
854 tstohz_high(struct timespec *ts)
871 kprintf("tstohz_high: negative time difference "
872 "%ld sec %ld nsec\n",
876 } else if (sec <= INT_MAX / hz) {
877 ticks = (int)(sec * hz +
878 ((u_long)nsec + (nstick - 1)) / nstick) + 1;
887 * Compute number of ticks for the specified amount of time, erroring on
888 * the side of it being too low to ensure that sleeping the returned number
889 * of ticks will not result in a late return.
891 * The supplied timeval may not be negative and should be normalized. A
892 * return value of 0 is possible if the timeval converts to less then
895 * If ints have 32 bits, then the maximum value for any timeout in
896 * 10ms ticks is 248 days.
899 tvtohz_low(struct timeval *tv)
905 if (sec <= INT_MAX / hz)
906 ticks = (int)(sec * hz + (u_long)tv->tv_usec / ustick);
913 tstohz_low(struct timespec *ts)
919 if (sec <= INT_MAX / hz)
920 ticks = (int)(sec * hz + (u_long)ts->tv_nsec / nstick);
927 * Start profiling on a process.
929 * Kernel profiling passes proc0 which never exits and hence
930 * keeps the profile clock running constantly.
933 startprofclock(struct proc *p)
935 if ((p->p_flags & P_PROFIL) == 0) {
936 p->p_flags |= P_PROFIL;
938 if (++profprocs == 1 && stathz != 0) {
941 setstatclockrate(profhz);
949 * Stop profiling on a process.
951 * caller must hold p->p_token
954 stopprofclock(struct proc *p)
956 if (p->p_flags & P_PROFIL) {
957 p->p_flags &= ~P_PROFIL;
959 if (--profprocs == 0 && stathz != 0) {
962 setstatclockrate(stathz);
970 * Return information about system clocks.
973 sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
975 struct kinfo_clockinfo clkinfo;
977 * Construct clockinfo structure.
980 clkinfo.ci_tick = ustick;
981 clkinfo.ci_tickadj = ntp_default_tick_delta / 1000;
982 clkinfo.ci_profhz = profhz;
983 clkinfo.ci_stathz = stathz ? stathz : hz;
984 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
987 SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
988 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
991 * We have eight functions for looking at the clock, four for
992 * microseconds and four for nanoseconds. For each there is fast
993 * but less precise version "get{nano|micro}[up]time" which will
994 * return a time which is up to 1/HZ previous to the call, whereas
995 * the raw version "{nano|micro}[up]time" will return a timestamp
996 * which is as precise as possible. The "up" variants return the
997 * time relative to system boot, these are well suited for time
998 * interval measurements.
1000 * Each cpu independantly maintains the current time of day, so all
1001 * we need to do to protect ourselves from changes is to do a loop
1002 * check on the seconds field changing out from under us.
1004 * The system timer maintains a 32 bit count and due to various issues
1005 * it is possible for the calculated delta to occassionally exceed
1006 * sys_cputimer->freq. If this occurs the sys_cputimer->freq64_nsec
1007 * multiplication can easily overflow, so we deal with the case. For
1008 * uniformity we deal with the case in the usec case too.
1010 * All the [get][micro,nano][time,uptime]() routines are MPSAFE.
1013 getmicrouptime(struct timeval *tvp)
1015 struct globaldata *gd = mycpu;
1019 tvp->tv_sec = gd->gd_time_seconds;
1020 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1021 } while (tvp->tv_sec != gd->gd_time_seconds);
1023 if (delta >= sys_cputimer->freq) {
1024 tvp->tv_sec += delta / sys_cputimer->freq;
1025 delta %= sys_cputimer->freq;
1027 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1028 if (tvp->tv_usec >= 1000000) {
1029 tvp->tv_usec -= 1000000;
1035 getnanouptime(struct timespec *tsp)
1037 struct globaldata *gd = mycpu;
1041 tsp->tv_sec = gd->gd_time_seconds;
1042 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1043 } while (tsp->tv_sec != gd->gd_time_seconds);
1045 if (delta >= sys_cputimer->freq) {
1046 tsp->tv_sec += delta / sys_cputimer->freq;
1047 delta %= sys_cputimer->freq;
1049 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1053 microuptime(struct timeval *tvp)
1055 struct globaldata *gd = mycpu;
1059 tvp->tv_sec = gd->gd_time_seconds;
1060 delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1061 } while (tvp->tv_sec != gd->gd_time_seconds);
1063 if (delta >= sys_cputimer->freq) {
1064 tvp->tv_sec += delta / sys_cputimer->freq;
1065 delta %= sys_cputimer->freq;
1067 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1071 nanouptime(struct timespec *tsp)
1073 struct globaldata *gd = mycpu;
1077 tsp->tv_sec = gd->gd_time_seconds;
1078 delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1079 } while (tsp->tv_sec != gd->gd_time_seconds);
1081 if (delta >= sys_cputimer->freq) {
1082 tsp->tv_sec += delta / sys_cputimer->freq;
1083 delta %= sys_cputimer->freq;
1085 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1092 getmicrotime(struct timeval *tvp)
1094 struct globaldata *gd = mycpu;
1095 struct timespec *bt;
1099 tvp->tv_sec = gd->gd_time_seconds;
1100 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1101 } while (tvp->tv_sec != gd->gd_time_seconds);
1103 if (delta >= sys_cputimer->freq) {
1104 tvp->tv_sec += delta / sys_cputimer->freq;
1105 delta %= sys_cputimer->freq;
1107 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1109 bt = &basetime[basetime_index];
1110 tvp->tv_sec += bt->tv_sec;
1111 tvp->tv_usec += bt->tv_nsec / 1000;
1112 while (tvp->tv_usec >= 1000000) {
1113 tvp->tv_usec -= 1000000;
1119 getnanotime(struct timespec *tsp)
1121 struct globaldata *gd = mycpu;
1122 struct timespec *bt;
1126 tsp->tv_sec = gd->gd_time_seconds;
1127 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1128 } while (tsp->tv_sec != gd->gd_time_seconds);
1130 if (delta >= sys_cputimer->freq) {
1131 tsp->tv_sec += delta / sys_cputimer->freq;
1132 delta %= sys_cputimer->freq;
1134 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1136 bt = &basetime[basetime_index];
1137 tsp->tv_sec += bt->tv_sec;
1138 tsp->tv_nsec += bt->tv_nsec;
1139 while (tsp->tv_nsec >= 1000000000) {
1140 tsp->tv_nsec -= 1000000000;
1146 getnanotime_nbt(struct timespec *nbt, struct timespec *tsp)
1148 struct globaldata *gd = mycpu;
1152 tsp->tv_sec = gd->gd_time_seconds;
1153 delta = gd->gd_hardclock.time - gd->gd_cpuclock_base;
1154 } while (tsp->tv_sec != gd->gd_time_seconds);
1156 if (delta >= sys_cputimer->freq) {
1157 tsp->tv_sec += delta / sys_cputimer->freq;
1158 delta %= sys_cputimer->freq;
1160 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1162 tsp->tv_sec += nbt->tv_sec;
1163 tsp->tv_nsec += nbt->tv_nsec;
1164 while (tsp->tv_nsec >= 1000000000) {
1165 tsp->tv_nsec -= 1000000000;
1172 microtime(struct timeval *tvp)
1174 struct globaldata *gd = mycpu;
1175 struct timespec *bt;
1179 tvp->tv_sec = gd->gd_time_seconds;
1180 delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1181 } while (tvp->tv_sec != gd->gd_time_seconds);
1183 if (delta >= sys_cputimer->freq) {
1184 tvp->tv_sec += delta / sys_cputimer->freq;
1185 delta %= sys_cputimer->freq;
1187 tvp->tv_usec = (sys_cputimer->freq64_usec * delta) >> 32;
1189 bt = &basetime[basetime_index];
1190 tvp->tv_sec += bt->tv_sec;
1191 tvp->tv_usec += bt->tv_nsec / 1000;
1192 while (tvp->tv_usec >= 1000000) {
1193 tvp->tv_usec -= 1000000;
1199 nanotime(struct timespec *tsp)
1201 struct globaldata *gd = mycpu;
1202 struct timespec *bt;
1206 tsp->tv_sec = gd->gd_time_seconds;
1207 delta = sys_cputimer->count() - gd->gd_cpuclock_base;
1208 } while (tsp->tv_sec != gd->gd_time_seconds);
1210 if (delta >= sys_cputimer->freq) {
1211 tsp->tv_sec += delta / sys_cputimer->freq;
1212 delta %= sys_cputimer->freq;
1214 tsp->tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1216 bt = &basetime[basetime_index];
1217 tsp->tv_sec += bt->tv_sec;
1218 tsp->tv_nsec += bt->tv_nsec;
1219 while (tsp->tv_nsec >= 1000000000) {
1220 tsp->tv_nsec -= 1000000000;
1226 * note: this is not exactly synchronized with real time. To do that we
1227 * would have to do what microtime does and check for a nanoseconds overflow.
1230 get_approximate_time_t(void)
1232 struct globaldata *gd = mycpu;
1233 struct timespec *bt;
1235 bt = &basetime[basetime_index];
1236 return(gd->gd_time_seconds + bt->tv_sec);
1240 pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps)
1243 struct pps_fetch_args *fapi;
1245 struct pps_kcbind_args *kapi;
1249 case PPS_IOC_CREATE:
1251 case PPS_IOC_DESTROY:
1253 case PPS_IOC_SETPARAMS:
1254 app = (pps_params_t *)data;
1255 if (app->mode & ~pps->ppscap)
1257 pps->ppsparam = *app;
1259 case PPS_IOC_GETPARAMS:
1260 app = (pps_params_t *)data;
1261 *app = pps->ppsparam;
1262 app->api_version = PPS_API_VERS_1;
1264 case PPS_IOC_GETCAP:
1265 *(int*)data = pps->ppscap;
1268 fapi = (struct pps_fetch_args *)data;
1269 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1271 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec)
1272 return (EOPNOTSUPP);
1273 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1274 fapi->pps_info_buf = pps->ppsinfo;
1276 case PPS_IOC_KCBIND:
1278 kapi = (struct pps_kcbind_args *)data;
1279 /* XXX Only root should be able to do this */
1280 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1282 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1284 if (kapi->edge & ~pps->ppscap)
1286 pps->kcmode = kapi->edge;
1289 return (EOPNOTSUPP);
1297 pps_init(struct pps_state *pps)
1299 pps->ppscap |= PPS_TSFMT_TSPEC;
1300 if (pps->ppscap & PPS_CAPTUREASSERT)
1301 pps->ppscap |= PPS_OFFSETASSERT;
1302 if (pps->ppscap & PPS_CAPTURECLEAR)
1303 pps->ppscap |= PPS_OFFSETCLEAR;
1307 pps_event(struct pps_state *pps, sysclock_t count, int event)
1309 struct globaldata *gd;
1310 struct timespec *tsp;
1311 struct timespec *osp;
1312 struct timespec *bt;
1325 /* Things would be easier with arrays... */
1326 if (event == PPS_CAPTUREASSERT) {
1327 tsp = &pps->ppsinfo.assert_timestamp;
1328 osp = &pps->ppsparam.assert_offset;
1329 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
1330 fhard = pps->kcmode & PPS_CAPTUREASSERT;
1331 pcount = &pps->ppscount[0];
1332 pseq = &pps->ppsinfo.assert_sequence;
1334 tsp = &pps->ppsinfo.clear_timestamp;
1335 osp = &pps->ppsparam.clear_offset;
1336 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
1337 fhard = pps->kcmode & PPS_CAPTURECLEAR;
1338 pcount = &pps->ppscount[1];
1339 pseq = &pps->ppsinfo.clear_sequence;
1342 /* Nothing really happened */
1343 if (*pcount == count)
1349 ts.tv_sec = gd->gd_time_seconds;
1350 delta = count - gd->gd_cpuclock_base;
1351 } while (ts.tv_sec != gd->gd_time_seconds);
1353 if (delta >= sys_cputimer->freq) {
1354 ts.tv_sec += delta / sys_cputimer->freq;
1355 delta %= sys_cputimer->freq;
1357 ts.tv_nsec = (sys_cputimer->freq64_nsec * delta) >> 32;
1358 bt = &basetime[basetime_index];
1359 ts.tv_sec += bt->tv_sec;
1360 ts.tv_nsec += bt->tv_nsec;
1361 while (ts.tv_nsec >= 1000000000) {
1362 ts.tv_nsec -= 1000000000;
1370 timespecadd(tsp, osp);
1371 if (tsp->tv_nsec < 0) {
1372 tsp->tv_nsec += 1000000000;
1378 /* magic, at its best... */
1379 tcount = count - pps->ppscount[2];
1380 pps->ppscount[2] = count;
1381 if (tcount >= sys_cputimer->freq) {
1382 delta = (1000000000 * (tcount / sys_cputimer->freq) +
1383 sys_cputimer->freq64_nsec *
1384 (tcount % sys_cputimer->freq)) >> 32;
1386 delta = (sys_cputimer->freq64_nsec * tcount) >> 32;
1388 hardpps(tsp, delta);
1394 * Return the tsc target value for a delay of (ns).
1396 * Returns -1 if the TSC is not supported.
1399 tsc_get_target(int ns)
1401 #if defined(_RDTSC_SUPPORTED_)
1402 if (cpu_feature & CPUID_TSC) {
1403 return (rdtsc() + tsc_frequency * ns / (int64_t)1000000000);
1410 * Compare the tsc against the passed target
1412 * Returns +1 if the target has been reached
1413 * Returns 0 if the target has not yet been reached
1414 * Returns -1 if the TSC is not supported.
1416 * Typical use: while (tsc_test_target(target) == 0) { ...poll... }
1419 tsc_test_target(int64_t target)
1421 #if defined(_RDTSC_SUPPORTED_)
1422 if (cpu_feature & CPUID_TSC) {
1423 if ((int64_t)(target - rdtsc()) <= 0)
1432 * Delay the specified number of nanoseconds using the tsc. This function
1433 * returns immediately if the TSC is not supported. At least one cpu_pause()
1441 clk = tsc_get_target(ns);
1443 while (tsc_test_target(clk) == 0)