nataraid(4): Add devstat support.
[dragonfly.git] / sys / libkern / mcount.c
CommitLineData
984263bc
MD
1/*-
2 * Copyright (c) 1983, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/libkern/mcount.c,v 1.16 1999/12/29 04:54:41 peter Exp $
70fc5283 34 * $DragonFly: src/sys/libkern/mcount.c,v 1.9 2007/01/12 22:12:50 dillon Exp $
984263bc
MD
35 */
36
37#include <sys/param.h>
38#include <sys/gmon.h>
39#ifdef _KERNEL
40#ifndef GUPROF
41#include <sys/systm.h>
42#endif
43#include <vm/vm.h>
44#include <vm/vm_param.h>
45#include <vm/pmap.h>
e73e0a5e
JS
46void bintr(void);
47void btrap(void);
48void eintr(void);
49void user(void);
984263bc
MD
50#endif
51
52/*
53 * mcount is called on entry to each function compiled with the profiling
54 * switch set. _mcount(), which is declared in a machine-dependent way
55 * with _MCOUNT_DECL, does the actual work and is either inlined into a
56 * C routine or called by an assembly stub. In any case, this magic is
57 * taken care of by the MCOUNT definition in <machine/profile.h>.
58 *
59 * _mcount updates data structures that represent traversals of the
60 * program's call graph edges. frompc and selfpc are the return
61 * address and function address that represents the given call graph edge.
62 *
63 * Note: the original BSD code used the same variable (frompcindex) for
64 * both frompcindex and frompc. Any reasonable, modern compiler will
65 * perform this optimization.
66 */
e73e0a5e 67/* _mcount; may be static, inline, etc */
e44b8d32 68_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
984263bc
MD
69{
70#ifdef GUPROF
71 int delta;
72#endif
0e99e805
RG
73 fptrdiff_t frompci;
74 u_short *frompcindex;
75 struct tostruct *top, *prevtop;
76 struct gmonparam *p;
77 long toindex;
984263bc
MD
78#ifdef _KERNEL
79 MCOUNT_DECL(s)
80#endif
81
82 p = &_gmonparam;
83#ifndef GUPROF /* XXX */
84 /*
85 * check that we are profiling
86 * and that we aren't recursively invoked.
87 */
88 if (p->state != GMON_PROF_ON)
89 return;
90#endif
91#ifdef _KERNEL
92 MCOUNT_ENTER(s);
93#else
94 p->state = GMON_PROF_BUSY;
95#endif
96 frompci = frompc - p->lowpc;
97
98#ifdef _KERNEL
99 /*
100 * When we are called from an exception handler, frompci may be
101 * for a user address. Convert such frompci's to the index of
102 * user() to merge all user counts.
70fc5283
MD
103 *
104 * XXX doesn't work properly with vkernel
984263bc
MD
105 */
106 if (frompci >= p->textsize) {
107 if (frompci + p->lowpc
88181b08 108 >= (uintfptr_t)(VM_MAX_USER_ADDRESS + UPAGES * PAGE_SIZE))
984263bc
MD
109 goto done;
110 frompci = (uintfptr_t)user - p->lowpc;
111 if (frompci >= p->textsize)
112 goto done;
113 }
114#endif
115
116#ifdef GUPROF
117 if (p->state == GMON_PROF_HIRES) {
118 /*
119 * Count the time since cputime() was previously called
120 * against `frompc'. Compensate for overheads.
121 *
122 * cputime() sets its prev_count variable to the count when
123 * it is called. This in effect starts a counter for
124 * the next period of execution (normally from now until
125 * the next call to mcount() or mexitcount()). We set
126 * cputime_bias to compensate for our own overhead.
127 *
128 * We use the usual sampling counters since they can be
129 * located efficiently. 4-byte counters are usually
130 * necessary. gprof will add up the scattered counts
131 * just like it does for statistical profiling. All
132 * counts are signed so that underflow in the subtractions
133 * doesn't matter much (negative counts are normally
134 * compensated for by larger counts elsewhere). Underflow
135 * shouldn't occur, but may be caused by slightly wrong
136 * calibrations or from not clearing cputime_bias.
137 */
138 delta = cputime() - cputime_bias - p->mcount_pre_overhead;
139 cputime_bias = p->mcount_post_overhead;
140 KCOUNT(p, frompci) += delta;
141 *p->cputime_count += p->cputime_overhead;
142 *p->mcount_count += p->mcount_overhead;
143 }
144#endif /* GUPROF */
145
146#ifdef _KERNEL
147 /*
148 * When we are called from an exception handler, frompc is faked
149 * to be for where the exception occurred. We've just solidified
150 * the count for there. Now convert frompci to the index of btrap()
151 * for trap handlers and bintr() for interrupt handlers to make
152 * exceptions appear in the call graph as calls from btrap() and
153 * bintr() instead of calls from all over.
154 */
155 if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
156 && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
157 if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
158 frompci = (uintfptr_t)bintr - p->lowpc;
159 else
160 frompci = (uintfptr_t)btrap - p->lowpc;
161 }
162#endif
163
164 /*
165 * check that frompc is a reasonable pc value.
166 * for example: signal catchers get called from the stack,
167 * not from text space. too bad.
168 */
169 if (frompci >= p->textsize)
170 goto done;
171
172 frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
173 toindex = *frompcindex;
174 if (toindex == 0) {
175 /*
176 * first time traversing this arc
177 */
178 toindex = ++p->tos[0].link;
179 if (toindex >= p->tolimit)
180 /* halt further profiling */
181 goto overflow;
182
183 *frompcindex = toindex;
184 top = &p->tos[toindex];
185 top->selfpc = selfpc;
186 top->count = 1;
187 top->link = 0;
188 goto done;
189 }
190 top = &p->tos[toindex];
191 if (top->selfpc == selfpc) {
192 /*
193 * arc at front of chain; usual case.
194 */
195 top->count++;
196 goto done;
197 }
198 /*
199 * have to go looking down chain for it.
200 * top points to what we are looking at,
201 * prevtop points to previous top.
202 * we know it is not at the head of the chain.
203 */
204 for (; /* goto done */; ) {
205 if (top->link == 0) {
206 /*
207 * top is end of the chain and none of the chain
208 * had top->selfpc == selfpc.
209 * so we allocate a new tostruct
210 * and link it to the head of the chain.
211 */
212 toindex = ++p->tos[0].link;
213 if (toindex >= p->tolimit)
214 goto overflow;
215
216 top = &p->tos[toindex];
217 top->selfpc = selfpc;
218 top->count = 1;
219 top->link = *frompcindex;
220 *frompcindex = toindex;
221 goto done;
222 }
223 /*
224 * otherwise, check the next arc on the chain.
225 */
226 prevtop = top;
227 top = &p->tos[top->link];
228 if (top->selfpc == selfpc) {
229 /*
230 * there it is.
231 * increment its count
232 * move it to the head of the chain.
233 */
234 top->count++;
235 toindex = prevtop->link;
236 prevtop->link = top->link;
237 top->link = *frompcindex;
238 *frompcindex = toindex;
239 goto done;
240 }
241
242 }
243done:
244#ifdef _KERNEL
245 MCOUNT_EXIT(s);
246#else
247 p->state = GMON_PROF_ON;
248#endif
249 return;
250overflow:
251 p->state = GMON_PROF_ERROR;
252#ifdef _KERNEL
253 MCOUNT_EXIT(s);
254#endif
255 return;
256}
257
258/*
259 * Actual definition of mcount function. Defined in <machine/profile.h>,
260 * which is included by <sys/gmon.h>.
261 */
262MCOUNT
263
264#ifdef GUPROF
265void
e73e0a5e 266mexitcount(uintfptr_t selfpc)
984263bc
MD
267{
268 struct gmonparam *p;
269 uintfptr_t selfpcdiff;
270
271 p = &_gmonparam;
272 selfpcdiff = selfpc - (uintfptr_t)p->lowpc;
273 if (selfpcdiff < p->textsize) {
274 int delta;
275
276 /*
277 * Count the time since cputime() was previously called
278 * against `selfpc'. Compensate for overheads.
279 */
280 delta = cputime() - cputime_bias - p->mexitcount_pre_overhead;
281 cputime_bias = p->mexitcount_post_overhead;
282 KCOUNT(p, selfpcdiff) += delta;
283 *p->cputime_count += p->cputime_overhead;
284 *p->mexitcount_count += p->mexitcount_overhead;
285 }
286}
287
288void
dd3713cc 289empty_loop(void)
984263bc
MD
290{
291 int i;
292
293 for (i = 0; i < CALIB_SCALE; i++)
294 ;
295}
296
297void
dd3713cc 298nullfunc(void)
984263bc
MD
299{
300}
301
302void
dd3713cc 303nullfunc_loop(void)
984263bc
MD
304{
305 int i;
306
307 for (i = 0; i < CALIB_SCALE; i++)
308 nullfunc();
309}
310#endif /* GUPROF */