evtr: dump core frequencies and use them to print timestamps in usecs
[dragonfly.git] / lib / libevtr / evtr.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2009, 2010 Aggelos Economopoulos. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <assert.h>
33#include <ctype.h>
34#include <errno.h>
35#include <limits.h>
36#include <stdarg.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sys/queue.h>
41#include <sys/stat.h>
42#include <sys/tree.h>
43
44
45#include "evtr.h"
46
47enum {
48 MAX_EVHDR_SIZE = PATH_MAX + 200,
49 /* string namespaces */
50 EVTR_NS_PATH = 0x1,
51 EVTR_NS_FUNC,
52 EVTR_NS_DSTR,
53 EVTR_NS_MAX,
54 NR_BUCKETS = 1023, /* XXX */
55 REC_ALIGN = 8,
56 REC_BOUNDARY = 1 << 14,
57 FILTF_ID = 0x10,
58 EVTRF_WR = 0x1, /* open for writing */
59};
60
61typedef uint16_t fileid_t;
62typedef uint16_t funcid_t;
63typedef uint16_t fmtid_t;
64
65struct trace_event_header {
66 uint8_t type;
67 uint64_t ts; /* XXX: this should only be part of probe */
68} __attribute__((packed));
69
70struct probe_event_header {
71 struct trace_event_header eh;
72 /*
73 * For these fields, 0 implies "not available"
74 */
75 fileid_t file;
76 funcid_t caller1;
77 funcid_t caller2;
78 funcid_t func;
79 uint16_t line;
80 fmtid_t fmt;
81 uint16_t datalen;
82 uint8_t cpu; /* -1 if n/a */
83} __attribute__((packed));
84
85struct string_event_header {
86 struct trace_event_header eh;
87 uint16_t ns;
88 uint32_t id;
89 uint16_t len;
90} __attribute__((packed));
91
92struct fmt_event_header {
93 struct trace_event_header eh;
94 uint16_t id;
95 uint8_t subsys_len;
96 uint8_t fmt_len;
97} __attribute__((packed));
98
99struct cpuinfo_event_header {
100 double freq;
101 uint8_t cpu;
102} __attribute__((packed));
103
104struct hashentry {
105 const char *str;
106 uint16_t id;
107 struct hashentry *next;
108};
109
110struct hashtab {
111 struct hashentry *buckets[NR_BUCKETS];
112 uint16_t id;
113};
114
115struct event_fmt {
116 const char *subsys;
117 const char *fmt;
118};
119
120struct event_filter_unresolved {
121 TAILQ_ENTRY(event_filter_unresolved) link;
122 evtr_filter_t filt;
123};
124
125struct id_map {
126 RB_ENTRY(id_map) rb_node;
127 int id;
128 const void *data;
129};
130
131RB_HEAD(id_tree, id_map);
132struct string_map {
133 struct id_tree root;
134};
135
136struct fmt_map {
137 struct id_tree root;
138};
139
140RB_HEAD(thread_tree, evtr_thread);
141
142struct thread_map {
143 struct thread_tree root;
144};
145
146struct event_callback {
147 void (*cb)(evtr_event_t, void *data);
148 void *data; /* this field must be malloc()ed */
149};
150
151struct cpu {
152 struct evtr_thread *td; /* currently executing thread */
153 double freq;
154};
155
156struct evtr {
157 FILE *f;
158 int err;
159 int flags;
160 char *errmsg;
161 off_t bytes;
162 union {
163 /*
164 * When writing, we keep track of the strings we've
165 * already dumped so we only dump them once.
166 * Paths, function names etc belong to different
167 * namespaces.
168 */
169 struct hashtab *strings[EVTR_NS_MAX - 1];
170 /*
171 * When reading, we build a map from id to string.
172 * Every id must be defined at the point of use.
173 */
174 struct string_map maps[EVTR_NS_MAX - 1];
175 };
176 union {
177 /* same as above, but for subsys+fmt pairs */
178 struct fmt_map fmtmap;
179 struct hashtab *fmts;
180 };
181 /*
182 * Filters that have a format specified and we
183 * need to resolve that to an fmtid
184 */
185 TAILQ_HEAD(, event_filter_unresolved) unresolved_filtq;
186 struct event_callback **cbs;
187 int ncbs;
188 struct thread_map threads;
189 struct cpu *cpus;
190 int ncpus;
191};
192
193struct evtr_query {
194 evtr_t evtr;
195 off_t off;
196 evtr_filter_t filt;
197 int nfilt;
198 int nmatched;
199 int ntried;
200 void *buf;
201 int bufsize;
202};
203
204static int
205evtr_debug = 0;
206
207void
208evtr_set_debug(int lvl)
209{
210 evtr_debug = lvl;
211}
212
213static int id_map_cmp(struct id_map *, struct id_map *);
214RB_PROTOTYPE2(id_tree, id_map, rb_node, id_map_cmp, int);
215RB_GENERATE2(id_tree, id_map, rb_node, id_map_cmp, int, id);
216
217static int thread_cmp(struct evtr_thread *, struct evtr_thread *);
218RB_PROTOTYPE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *);
219RB_GENERATE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *, id);
220
221#define printd(...) \
222 do { \
223 if (evtr_debug) \
224 fprintf(stderr, __VA_ARGS__); \
225 } while (0)
226
227static inline
228void
229validate_string(const char *str)
230{
231 if (!evtr_debug)
232 return;
233 for (; *str; ++str)
234 assert(isprint(*str));
235}
236
237static
238void
239id_tree_free(struct id_tree *root)
240{
241 struct id_map *v, *n;
242
243 for (v = RB_MIN(id_tree, root); v; v = n) {
244 n = RB_NEXT(id_tree, root, v);
245 RB_REMOVE(id_tree, root, v);
246 }
247}
248
249static
250int
251evtr_register_callback(evtr_t evtr, void (*fn)(evtr_event_t, void *), void *d)
252{
253 struct event_callback *cb;
254 void *cbs;
255
256 if (!(cb = malloc(sizeof(*cb)))) {
257 evtr->err = ENOMEM;
258 return !0;
259 }
260 cb->cb = fn;
261 cb->data = d;
262 if (!(cbs = realloc(evtr->cbs, (++evtr->ncbs) * sizeof(cb)))) {
263 --evtr->ncbs;
264 free(cb);
265 evtr->err = ENOMEM;
266 return !0;
267 }
268 evtr->cbs = cbs;
269 evtr->cbs[evtr->ncbs - 1] = cb;
270 return 0;
271}
272
273static
274void
275evtr_deregister_callbacks(evtr_t evtr)
276{
277 int i;
278
279 for (i = 0; i < evtr->ncbs; ++i) {
280 free(evtr->cbs[i]);
281 }
282 free(evtr->cbs);
283 evtr->cbs = NULL;
284}
285
286static
287void
288evtr_run_callbacks(evtr_event_t ev, evtr_t evtr)
289{
290 struct event_callback *cb;
291 int i;
292
293 for (i = 0; i < evtr->ncbs; ++i) {
294 cb = evtr->cbs[i];
295 cb->cb(ev, cb->data);
296 }
297}
298
299static
300struct cpu *
301evtr_cpu(evtr_t evtr, int c)
302{
303 if ((c < 0) || (c >= evtr->ncpus))
304 return NULL;
305 return &evtr->cpus[c];
306}
307
308static
309int
310parse_format_data(evtr_event_t ev, const char *fmt, ...) __attribute__((format (scanf, 2, 3)));
311static
312int
313parse_format_data(evtr_event_t ev, const char *fmt, ...)
314{
315 va_list ap;
316 char buf[2048];
317
318 if (strcmp(fmt, ev->fmt))
319 return 0;
320 vsnprintf(buf, sizeof(buf), fmt, ev->fmtdata);
321 printd("string is: %s\n", buf);
322 va_start(ap, fmt);
323 return vsscanf(buf, fmt, ap);
324}
325
326static
327void
328evtr_deregister_filters(evtr_t evtr, evtr_filter_t filt, int nfilt)
329{
330 struct event_filter_unresolved *u, *tmp;
331 int i;
332 TAILQ_FOREACH_MUTABLE(u, &evtr->unresolved_filtq, link, tmp) {
333 for (i = 0; i < nfilt; ++i) {
334 if (u->filt == &filt[i]) {
335 TAILQ_REMOVE(&evtr->unresolved_filtq, u, link);
336 }
337 }
338 }
339}
340
341static
342void
343evtr_resolve_filters(evtr_t evtr, const char *fmt, int id)
344{
345 struct event_filter_unresolved *u, *tmp;
346 TAILQ_FOREACH_MUTABLE(u, &evtr->unresolved_filtq, link, tmp) {
347 if ((u->filt->fmt != NULL) && !strcmp(fmt, u->filt->fmt)) {
348 u->filt->fmtid = id;
349 u->filt->flags |= FILTF_ID;
350 TAILQ_REMOVE(&evtr->unresolved_filtq, u, link);
351 }
352 }
353}
354
355static
356int
357evtr_filter_register(evtr_t evtr, evtr_filter_t filt)
358{
359 struct event_filter_unresolved *res;
360
361 if (!(res = malloc(sizeof(*res)))) {
362 evtr->err = ENOMEM;
363 return !0;
364 }
365 res->filt = filt;
366 TAILQ_INSERT_TAIL(&evtr->unresolved_filtq, res, link);
367 return 0;
368}
369
370void
371evtr_event_data(evtr_event_t ev, char *buf, size_t len)
372{
373 /*
374 * XXX: we implicitly trust the format string.
375 * We shouldn't.
376 */
377 if (ev->fmtdatalen) {
378 vsnprintf(buf, len, ev->fmt, ev->fmtdata);
379 } else {
380 strlcpy(buf, ev->fmt, len);
381 }
382}
383
384
385int
386evtr_error(evtr_t evtr)
387{
388 return evtr->err || (evtr->errmsg != NULL);
389}
390
391const char *
392evtr_errmsg(evtr_t evtr)
393{
394 return evtr->errmsg ? evtr->errmsg : strerror(evtr->err);
395}
396
397static
398int
399id_map_cmp(struct id_map *a, struct id_map *b)
400{
401 return a->id - b->id;
402}
403
404static
405int
406thread_cmp(struct evtr_thread *a, struct evtr_thread *b)
407{
408 return (int)a->id - (int)b->id;
409}
410
411#define DEFINE_MAP_FIND(prefix, type) \
412 static \
413 type \
414 prefix ## _map_find(struct id_tree *tree, int id)\
415 { \
416 struct id_map *sid; \
417 \
418 sid = id_tree_RB_LOOKUP(tree, id); \
419 return sid ? sid->data : NULL; \
420 }
421
422DEFINE_MAP_FIND(string, const char *)
423DEFINE_MAP_FIND(fmt, const struct event_fmt *)
424
425static
426struct evtr_thread *
427thread_map_find(struct thread_map *map, void *id)
428{
429 return thread_tree_RB_LOOKUP(&map->root, id);
430}
431
432#define DEFINE_MAP_INSERT(prefix, type, _cmp, _dup) \
433 static \
434 int \
435 prefix ## _map_insert(struct id_tree *tree, type data, int id) \
436 { \
437 struct id_map *sid, *osid; \
438 \
439 sid = malloc(sizeof(*sid)); \
440 if (!sid) { \
441 return ENOMEM; \
442 } \
443 sid->id = id; \
444 sid->data = data; \
445 if ((osid = id_tree_RB_INSERT(tree, sid))) { \
446 free(sid); \
447 if (_cmp((type)osid->data, data)) { \
448 return EEXIST; \
449 } \
450 printd("mapping already exists, skipping\n"); \
451 /* we're OK with redefinitions of an id to the same string */ \
452 return 0; \
453 } \
454 /* only do the strdup if we're inserting a new string */ \
455 sid->data = _dup(data); /* XXX: oom */ \
456 return 0; \
457}
458
459static
460void
461thread_map_insert(struct thread_map *map, struct evtr_thread *td)
462{
463 struct evtr_thread *otd;
464
465 if ((otd = thread_tree_RB_INSERT(&map->root, td))) {
466 /*
467 * Thread addresses might be reused, we're
468 * ok with that.
469 * DANGER, Will Robinson: this means the user
470 * of the API needs to copy event->td if they
471 * want it to remain stable.
472 */
473 free((void *)otd->comm);
474 otd->comm = td->comm;
475 free(td);
476 }
477}
478
479static
480int
481event_fmt_cmp(const struct event_fmt *a, const struct event_fmt *b)
482{
483 int ret = 0;
484
485 if (a->subsys) {
486 if (b->subsys) {
487 ret = strcmp(a->subsys, b->subsys);
488 } else {
489 ret = strcmp(a->subsys, "");
490 }
491 } else if (b->subsys) {
492 ret = strcmp("", b->subsys);
493 }
494 if (ret)
495 return ret;
496 return strcmp(a->fmt, b->fmt);
497}
498
499static
500struct event_fmt *
501event_fmt_dup(const struct event_fmt *o)
502{
503 struct event_fmt *n;
504
505 if (!(n = malloc(sizeof(*n)))) {
506 return n;
507 }
508 memcpy(n, o, sizeof(*n));
509 return n;
510}
511
512DEFINE_MAP_INSERT(string, const char *, strcmp, strdup)
513DEFINE_MAP_INSERT(fmt, const struct event_fmt *, event_fmt_cmp, event_fmt_dup)
514
515static
516int
517hashfunc(const char *str)
518{
519 unsigned long hash = 5381;
520 int c;
521
522 while ((c = *str++))
523 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
524 return hash % NR_BUCKETS;
525}
526
527static
528struct hashentry *
529hash_find(struct hashtab *tab, const char *str)
530{
531 struct hashentry *ent;
532
533 for(ent = tab->buckets[hashfunc(str)]; ent && strcmp(ent->str, str);
534 ent = ent->next);
535
536 return ent;
537}
538
539static
540struct hashentry *
541hash_insert(struct hashtab *tab, const char *str)
542{
543 struct hashentry *ent;
544 int hsh;
545
546 if (!(ent = malloc(sizeof(*ent)))) {
547 fprintf(stderr, "out of memory\n");
548 return NULL;
549 }
550 hsh = hashfunc(str);
551 ent->next = tab->buckets[hsh];
552 ent->str = strdup(str);
553 ent->id = ++tab->id;
554 if (tab->id == 0) {
555 fprintf(stderr, "too many strings\n");
556 free(ent);
557 return NULL;
558 }
559 tab->buckets[hsh] = ent;
560 return ent;
561}
562
563static
564void
565thread_creation_callback(evtr_event_t ev, void *d)
566{
567 evtr_t evtr = (evtr_t)d;
568 struct evtr_thread *td;
569 void *ktd;
570 char buf[20];
571
572 //printd("thread_creation_callback\n");
573 if (parse_format_data(ev, "new_td %p %s", &ktd, buf) != 2) {
574 return;
575 }
576 buf[19] = '\0';
577
578 if (!(td = malloc(sizeof(*td)))) {
579 evtr->err = ENOMEM;
580 return;
581 }
582 td->id = ktd;
583 td->userdata = NULL;
584 if (!(td->comm = strdup(buf))) {
585 free(td);
586 evtr->err = ENOMEM;
587 return;
588 }
589 printd("inserting new thread %p: %s\n", td->id, td->comm);
590 thread_map_insert(&evtr->threads, td);
591}
592
593static
594void
595thread_switch_callback(evtr_event_t ev, void *d)
596{
597 evtr_t evtr = (evtr_t)d;
598 struct evtr_thread *tdp, *tdn;
599 void *ktdp, *ktdn;
600 struct cpu *cpu;
601 static struct evtr_event tdcr;
602 static char *fmt = "new_td %p %s";
603 char tidstr[40];
604 char fmtdata[sizeof(void *) + sizeof(char *)];
605
606 //printd("thread_switch_callback\n");
607 cpu = evtr_cpu(evtr, ev->cpu);
608 if (!cpu) {
609 printd("invalid cpu %d\n", ev->cpu);
610 return;
611 }
612 if (parse_format_data(ev, "sw %p > %p", &ktdp, &ktdn) != 2) {
613 return;
614 }
615 tdp = thread_map_find(&evtr->threads, ktdp);
616 if (!tdp) {
617 printd("switching from unknown thread %p\n", ktdp);
618 }
619 tdn = thread_map_find(&evtr->threads, ktdn);
620 if (!tdn) {
621 /*
622 * Fake a thread creation event for threads we
623 * haven't seen before.
624 */
625 tdcr.type = EVTR_TYPE_PROBE;
626 tdcr.ts = ev->ts;
627 tdcr.file = NULL;
628 tdcr.func = NULL;
629 tdcr.line = 0;
630 tdcr.fmt = fmt;
631 tdcr.fmtdata = &fmtdata;
632 tdcr.fmtdatalen = sizeof(fmtdata);
633 tdcr.cpu = ev->cpu;
634 tdcr.td = NULL;
635 snprintf(tidstr, sizeof(tidstr), "%p", ktdn);
636 ((void **)fmtdata)[0] = ktdn;
637 ((char **)fmtdata)[1] = &tidstr[0];
638 thread_creation_callback(&tdcr, evtr);
639
640 tdn = thread_map_find(&evtr->threads, ktdn);
641 assert(tdn != NULL);
642 printd("switching to unknown thread %p\n", ktdn);
643 cpu->td = tdn;
644 return;
645 }
646 printd("cpu %d: switching to thread %p\n", ev->cpu, ktdn);
647 cpu->td = tdn;
648}
649
650static
651void
652assert_foff_in_sync(evtr_t evtr)
653{
654 off_t off;
655
656 /*
657 * We keep our own offset because we
658 * might want to support mmap()
659 */
660 off = ftello(evtr->f);
661 if (evtr->bytes != off) {
662 fprintf(stderr, "bytes %jd, off %jd\n", evtr->bytes, off);
663 abort();
664 }
665}
666
667static
668int
669evtr_write(evtr_t evtr, const void *buf, size_t bytes)
670{
671 assert_foff_in_sync(evtr);
672 if (fwrite(buf, bytes, 1, evtr->f) != 1) {
673 evtr->err = errno;
674 evtr->errmsg = strerror(errno);
675 return !0;
676 }
677 evtr->bytes += bytes;
678 assert_foff_in_sync(evtr);
679 return 0;
680}
681
682/*
683 * Called after dumping a record to make sure the next
684 * record is REC_ALIGN aligned. This does not make much sense,
685 * as we shouldn't be using packed structs anyway.
686 */
687static
688int
689evtr_dump_pad(evtr_t evtr)
690{
691 size_t pad;
692 static char buf[REC_ALIGN];
693
694 pad = REC_ALIGN - (evtr->bytes % REC_ALIGN);
695 if (pad > 0) {
696 return evtr_write(evtr, buf, pad);
697 }
698 return 0;
699}
700
701/*
702 * We make sure that there is a new record every REC_BOUNDARY
703 * bytes, this costs next to nothing in space and allows for
704 * fast seeking.
705 */
706static
707int
708evtr_dump_avoid_boundary(evtr_t evtr, size_t bytes)
709{
710 unsigned pad, i;
711 static char buf[256];
712
713 pad = REC_BOUNDARY - (evtr->bytes % REC_BOUNDARY);
714 /* if adding @bytes would cause us to cross a boundary... */
715 if (bytes > pad) {
716 /* then pad to the boundary */
717 for (i = 0; i < (pad / sizeof(buf)); ++i) {
718 if (evtr_write(evtr, buf, sizeof(buf))) {
719 return !0;
720 }
721 }
722 i = pad % sizeof(buf);
723 if (i) {
724 if (evtr_write(evtr, buf, i)) {
725 return !0;
726 }
727 }
728 }
729 return 0;
730}
731
732static
733int
734evtr_dump_fmt(evtr_t evtr, uint64_t ts, const evtr_event_t ev)
735{
736 struct fmt_event_header fmt;
737 struct hashentry *ent;
738 char *subsys = "", buf[1024];
739
740 if (strlcpy(buf, subsys, sizeof(buf)) >= sizeof(buf)) {
741 evtr->errmsg = "name of subsystem is too large";
742 evtr->err = ERANGE;
743 return 0;
744 }
745 if (strlcat(buf, ev->fmt, sizeof(buf)) >= sizeof(buf)) {
746 evtr->errmsg = "fmt + name of subsystem is too large";
747 evtr->err = ERANGE;
748 return 0;
749 }
750
751 if ((ent = hash_find(evtr->fmts, buf))) {
752 return ent->id;
753 }
754 if (!(ent = hash_insert(evtr->fmts, buf))) {
755 evtr->err = evtr->fmts->id ? ENOMEM : ERANGE;
756 return 0;
757 }
758
759 fmt.eh.type = EVTR_TYPE_FMT;
760 fmt.eh.ts = ts;
761 fmt.subsys_len = strlen(subsys);
762 fmt.fmt_len = strlen(ev->fmt);
763 fmt.id = ent->id;
764 if (evtr_dump_avoid_boundary(evtr, sizeof(fmt) + fmt.subsys_len +
765 fmt.fmt_len))
766 return 0;
767 if (evtr_write(evtr, &fmt, sizeof(fmt)))
768 return 0;
769 if (evtr_write(evtr, subsys, fmt.subsys_len))
770 return 0;
771 if (evtr_write(evtr, ev->fmt, fmt.fmt_len))
772 return 0;
773 if (evtr_dump_pad(evtr))
774 return 0;
775 return fmt.id;
776}
777
778/*
779 * Replace string pointers or string ids in fmtdata
780 */
781static
782int
783mangle_string_ptrs(const char *fmt, uint8_t *fmtdata,
784 const char *(*replace)(void *, const char *), void *ctx)
785{
786 const char *f, *p;
787 size_t skipsize, intsz;
788 int ret = 0;
789
790 for (f = fmt; f[0] != '\0'; ++f) {
791 if (f[0] != '%')
792 continue;
793 ++f;
794 skipsize = 0;
795 for (p = f; p[0]; ++p) {
796 int again = 0;
797 /*
798 * Eat flags. Notice this will accept duplicate
799 * flags.
800 */
801 switch (p[0]) {
802 case '#':
803 case '0':
804 case '-':
805 case ' ':
806 case '+':
807 case '\'':
808 again = !0;
809 break;
810 }
811 if (!again)
812 break;
813 }
814 /* Eat minimum field width, if any */
815 for (; isdigit(p[0]); ++p)
816 ;
817 if (p[0] == '.')
818 ++p;
819 /* Eat precision, if any */
820 for (; isdigit(p[0]); ++p)
821 ;
822 intsz = 0;
823 switch (p[0]) {
824 case 'l':
825 if (p[1] == 'l') {
826 ++p;
827 intsz = sizeof(long long);
828 } else {
829 intsz = sizeof(long);
830 }
831 break;
832 case 'j':
833 intsz = sizeof(intmax_t);
834 break;
835 case 't':
836 intsz = sizeof(ptrdiff_t);
837 break;
838 case 'z':
839 intsz = sizeof(size_t);
840 break;
841 default:
842 break;
843 }
844 if (intsz != 0)
845 ++p;
846 else
847 intsz = sizeof(int);
848
849 switch (p[0]) {
850 case 'd':
851 case 'i':
852 case 'o':
853 case 'u':
854 case 'x':
855 case 'X':
856 case 'c':
857 skipsize = intsz;
858 break;
859 case 'p':
860 skipsize = sizeof(void *);
861 break;
862 case 'f':
863 if (p[-1] == 'l')
864 skipsize = sizeof(double);
865 else
866 skipsize = sizeof(float);
867 break;
868 case 's':
869 ((const char **)fmtdata)[0] =
870 replace(ctx, ((char **)fmtdata)[0]);
871 skipsize = sizeof(char *);
872 ++ret;
873 break;
874 default:
875 fprintf(stderr, "Unknown conversion specifier %c "
876 "in fmt starting with %s", p[0], f - 1);
877 return -1;
878 }
879 fmtdata += skipsize;
880 }
881 return ret;
882}
883
884/* XXX: do we really want the timestamp? */
885static
886int
887evtr_dump_string(evtr_t evtr, uint64_t ts, const char *str, int ns)
888{
889 struct string_event_header s;
890 struct hashentry *ent;
891
892 assert((0 <= ns) && (ns < EVTR_NS_MAX));
893 if ((ent = hash_find(evtr->strings[ns], str))) {
894 return ent->id;
895 }
896 if (!(ent = hash_insert(evtr->strings[ns], str))) {
897 evtr->err = evtr->strings[ns]->id ? ENOMEM : ERANGE;
898 return 0;
899 }
900
901 printd("hash_insert %s ns %d id %d\n", str, ns, ent->id);
902 s.eh.type = EVTR_TYPE_STR;
903 s.eh.ts = ts;
904 s.ns = ns;
905 s.id = ent->id;
906 s.len = strnlen(str, PATH_MAX);
907
908 if (evtr_dump_avoid_boundary(evtr, sizeof(s) + s.len))
909 return 0;
910 if (evtr_write(evtr, &s, sizeof(s)))
911 return 0;
912 if (evtr_write(evtr, str, s.len))
913 return 0;
914 if (evtr_dump_pad(evtr))
915 return 0;
916 return s.id;
917}
918
919struct replace_ctx {
920 evtr_t evtr;
921 uint64_t ts;
922};
923
924static
925const char *
926replace_strptr(void *_ctx, const char *s)
927{
928 struct replace_ctx *ctx = _ctx;
929 return (const char *)evtr_dump_string(ctx->evtr, ctx->ts, s, EVTR_NS_DSTR);
930}
931
932static
933const char *
934replace_strid(void *_ctx, const char *s)
935{
936 struct replace_ctx *ctx = _ctx;
937 const char *ret;
938
939 ret = string_map_find(&ctx->evtr->maps[EVTR_NS_DSTR - 1].root,
940 (uint32_t)s);
941 if (!ret) {
942 fprintf(stderr, "Unknown id for data string\n");
943 ctx->evtr->errmsg = "unknown id for data string";
944 ctx->evtr->err = !0;
945 }
946 validate_string(ret);
947 printd("replacing strid %d (ns %d) with string '%s' (or int %#x)\n", (int)s,
948 EVTR_NS_DSTR, ret ? ret : "NULL", (int)ret);
949 return ret;
950}
951
952static
953int
954evtr_dump_probe(evtr_t evtr, evtr_event_t ev)
955{
956 struct probe_event_header kev;
957 char buf[1024];
958
959 memset(&kev, '\0', sizeof(kev));
960 kev.eh.type = ev->type;
961 kev.eh.ts = ev->ts;
962 kev.line = ev->line;
963 kev.cpu = ev->cpu;
964 if (ev->file) {
965 kev.file = evtr_dump_string(evtr, kev.eh.ts, ev->file,
966 EVTR_NS_PATH);
967 }
968 if (ev->func) {
969 kev.func = evtr_dump_string(evtr, kev.eh.ts, ev->func,
970 EVTR_NS_FUNC);
971 }
972 if (ev->fmt) {
973 kev.fmt = evtr_dump_fmt(evtr, kev.eh.ts, ev);
974 }
975 if (ev->fmtdata) {
976 struct replace_ctx replctx = {
977 .evtr = evtr,
978 .ts = ev->ts,
979 };
980 assert(ev->fmtdatalen <= sizeof(buf));
981 kev.datalen = ev->fmtdatalen;
982 /*
983 * Replace all string pointers with string ids before dumping
984 * the data.
985 */
986 memcpy(buf, ev->fmtdata, ev->fmtdatalen);
987 if (mangle_string_ptrs(ev->fmt, buf,
988 replace_strptr, &replctx) < 0)
989 return !0;
990 if (evtr->err)
991 return evtr->err;
992 }
993 if (evtr_dump_avoid_boundary(evtr, sizeof(kev) + ev->fmtdatalen))
994 return !0;
995 if (evtr_write(evtr, &kev, sizeof(kev)))
996 return !0;
997 if (evtr_write(evtr, buf, ev->fmtdatalen))
998 return !0;
999 if (evtr_dump_pad(evtr))
1000 return !0;
1001 return 0;
1002}
1003
1004static
1005int
1006evtr_dump_sysinfo(evtr_t evtr, evtr_event_t ev)
1007{
1008 uint8_t type = EVTR_TYPE_SYSINFO;
1009 uint16_t ncpus = ev->ncpus;
1010
1011 if (ncpus <= 0) {
1012 evtr->errmsg = "invalid number of cpus";
1013 return !0;
1014 }
1015 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ncpus)))
1016 return !0;
1017 if (evtr_write(evtr, &type, sizeof(type))) {
1018 return !0;
1019 }
1020 if (evtr_write(evtr, &ncpus, sizeof(ncpus))) {
1021 return !0;
1022 }
1023 if (evtr_dump_pad(evtr))
1024 return !0;
1025 return 0;
1026}
1027static
1028int
1029evtr_dump_cpuinfo(evtr_t evtr, evtr_event_t ev)
1030{
1031 struct cpuinfo_event_header ci;
1032 uint8_t type;
1033
1034 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ci)))
1035 return !0;
1036 type = EVTR_TYPE_CPUINFO;
1037 if (evtr_write(evtr, &type, sizeof(type))) {
1038 return !0;
1039 }
1040 ci.cpu = ev->cpu;
1041 ci.freq = ev->cpuinfo.freq;
1042 if (ci.cpu < 0) {
1043 evtr->errmsg = "invalid cpu";
1044 return !0;
1045 }
1046 if (evtr_dump_avoid_boundary(evtr, sizeof(ci)))
1047 return !0;
1048 if (evtr_write(evtr, &ci, sizeof(ci))) {
1049 return !0;
1050 }
1051 if (evtr_dump_pad(evtr))
1052 return !0;
1053 return 0;
1054}
1055
1056int
1057evtr_rewind(evtr_t evtr)
1058{
1059 assert((evtr->flags & EVTRF_WR) == 0);
1060 evtr->bytes = 0;
1061 if (fseek(evtr->f, 0, SEEK_SET)) {
1062 evtr->err = errno;
1063 return !0;
1064 }
1065 return 0;
1066}
1067
1068int
1069evtr_dump_event(evtr_t evtr, evtr_event_t ev)
1070{
1071 switch (ev->type) {
1072 case EVTR_TYPE_PROBE:
1073 return evtr_dump_probe(evtr, ev);
1074 case EVTR_TYPE_SYSINFO:
1075 return evtr_dump_sysinfo(evtr, ev);
1076 case EVTR_TYPE_CPUINFO:
1077 return evtr_dump_cpuinfo(evtr, ev);
1078 }
1079 evtr->errmsg = "unknown event type";
1080 return !0;
1081}
1082
1083static
1084evtr_t
1085evtr_alloc(FILE *f)
1086{
1087 evtr_t evtr;
1088 if (!(evtr = malloc(sizeof(*evtr)))) {
1089 return NULL;
1090 }
1091
1092 evtr->f = f;
1093 evtr->err = 0;
1094 evtr->errmsg = NULL;
1095 evtr->bytes = 0;
1096 TAILQ_INIT(&evtr->unresolved_filtq);
1097 return evtr;
1098}
1099
1100evtr_t
1101evtr_open_read(FILE *f)
1102{
1103 evtr_t evtr;
1104 struct evtr_event ev;
1105 int i;
1106
1107 if (!(evtr = evtr_alloc(f))) {
1108 return NULL;
1109 }
1110 evtr->flags = 0;
1111 for (i = 0; i < (EVTR_NS_MAX - 1); ++i) {
1112 RB_INIT(&evtr->maps[i].root);
1113 }
1114 RB_INIT(&evtr->fmtmap.root);
1115 TAILQ_INIT(&evtr->unresolved_filtq);
1116 evtr->cbs = 0;
1117 evtr->ncbs = 0;
1118 RB_INIT(&evtr->threads.root);
1119 evtr->cpus = NULL;
1120 evtr->ncpus = 0;
1121 if (evtr_register_callback(evtr, &thread_creation_callback, evtr)) {
1122 goto free_evtr;
1123 }
1124 if (evtr_register_callback(evtr, &thread_switch_callback, evtr)) {
1125 goto free_cbs;
1126 }
1127 /*
1128 * Load the first event so we can pick up any
1129 * sysinfo entries.
1130 */
1131 if (evtr_next_event(evtr, &ev)) {
1132 goto free_cbs;
1133 }
1134 if (evtr_rewind(evtr))
1135 goto free_cbs;
1136 return evtr;
1137free_cbs:
1138 evtr_deregister_callbacks(evtr);
1139free_evtr:
1140 free(evtr);
1141 return NULL;
1142}
1143
1144evtr_t
1145evtr_open_write(FILE *f)
1146{
1147 evtr_t evtr;
1148 int i, j;
1149
1150 if (!(evtr = evtr_alloc(f))) {
1151 return NULL;
1152 }
1153
1154 evtr->flags = EVTRF_WR;
1155 if (!(evtr->fmts = calloc(sizeof(struct hashtab), 1)))
1156 goto free_evtr;
1157
1158 for (i = 0; i < EVTR_NS_MAX; ++i) {
1159 evtr->strings[i] = calloc(sizeof(struct hashtab), 1);
1160 if (!evtr->strings[i]) {
1161 for (j = 0; j < i; ++j) {
1162 free(evtr->strings[j]);
1163 }
1164 goto free_fmts;
1165 }
1166 }
1167
1168 return evtr;
1169free_fmts:
1170 free(evtr->fmts);
1171free_evtr:
1172 free(evtr);
1173 return NULL;
1174}
1175
1176static
1177void
1178hashtab_destroy(struct hashtab *h)
1179{
1180 struct hashentry *ent, *next;
1181 int i;
1182 for (i = 0; i < NR_BUCKETS; ++i) {
1183 for (ent = h->buckets[i]; ent; ent = next) {
1184 next = ent->next;
1185 free(ent);
1186 }
1187 }
1188 free(h);
1189}
1190
1191void
1192evtr_close(evtr_t evtr)
1193{
1194 int i;
1195
1196 if (evtr->flags & EVTRF_WR) {
1197 hashtab_destroy(evtr->fmts);
1198 for (i = 0; i < EVTR_NS_MAX; ++i)
1199 hashtab_destroy(evtr->strings[i]);
1200 } else {
1201 id_tree_free(&evtr->fmtmap.root);
1202 for (i = 0; i < EVTR_NS_MAX - 1; ++i) {
1203 id_tree_free(&evtr->maps[i].root);
1204 }
1205 }
1206 free(evtr);
1207}
1208
1209static
1210int
1211evtr_read(evtr_t evtr, void *buf, size_t size)
1212{
1213 assert(size > 0);
1214 assert_foff_in_sync(evtr);
1215// printd("evtr_read at %#jx, %zd bytes\n", evtr->bytes, size);
1216 if (fread(buf, size, 1, evtr->f) != 1) {
1217 if (feof(evtr->f)) {
1218 evtr->errmsg = "incomplete record";
1219 } else {
1220 evtr->errmsg = strerror(errno);
1221 }
1222 return !0;
1223 }
1224 evtr->bytes += size;
1225 assert_foff_in_sync(evtr);
1226 return 0;
1227}
1228
1229static
1230int
1231evtr_load_fmt(evtr_t evtr, char *buf)
1232{
1233 struct fmt_event_header *evh = (struct fmt_event_header *)buf;
1234 struct event_fmt *fmt;
1235 char *subsys = NULL, *fmtstr;
1236
1237 if (!(fmt = malloc(sizeof(*fmt)))) {
1238 evtr->err = errno;
1239 return !0;
1240 }
1241 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1242 sizeof(*evh) - sizeof(evh->eh))) {
1243 goto free_fmt;
1244 }
1245 assert(!evh->subsys_len);
1246 if (evh->subsys_len) {
1247 if (!(subsys = malloc(evh->subsys_len))) {
1248 evtr->err = errno;
1249 goto free_fmt;
1250 }
1251 if (evtr_read(evtr, subsys, evh->subsys_len)) {
1252 goto free_subsys;
1253 }
1254 fmt->subsys = subsys;
1255 } else {
1256 fmt->subsys = "";
1257 }
1258 if (!(fmtstr = malloc(evh->fmt_len + 1))) {
1259 evtr->err = errno;
1260 goto free_subsys;
1261 }
1262 if (evtr_read(evtr, fmtstr, evh->fmt_len)) {
1263 goto free_fmtstr;
1264 }
1265 fmtstr[evh->fmt_len] = '\0';
1266 fmt->fmt = fmtstr;
1267
1268 printd("fmt_map_insert (%d, %s)\n", evh->id, fmt->fmt);
1269 evtr->err = fmt_map_insert(&evtr->fmtmap.root, fmt, evh->id);
1270 switch (evtr->err) {
1271 case ENOMEM:
1272 evtr->errmsg = "out of memory";
1273 break;
1274 case EEXIST:
1275 evtr->errmsg = "redefinition of an id to a "
1276 "different format (corrupt input)";
1277 break;
1278 default:
1279 evtr_resolve_filters(evtr, fmt->fmt, evh->id);
1280 }
1281 return 0;
1282
1283free_fmtstr:
1284 free(fmtstr);
1285free_subsys:
1286 if (subsys)
1287 free(subsys);
1288free_fmt:
1289 free(fmt);
1290 return !0;
1291}
1292
1293static
1294int
1295evtr_load_string(evtr_t evtr, char *buf)
1296{
1297 char sbuf[PATH_MAX + 1];
1298 struct string_event_header *evh = (struct string_event_header *)buf;
1299
1300 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1301 sizeof(*evh) - sizeof(evh->eh))) {
1302 return !0;
1303 }
1304 if (evh->len > PATH_MAX) {
1305 evtr->errmsg = "string too large (corrupt input)";
1306 return !0;
1307 } else if (evh->len < 0) {
1308 evtr->errmsg = "negative string size (corrupt input)";
1309 return !0;
1310 }
1311 if (evh->len && evtr_read(evtr, sbuf, evh->len)) {
1312 return !0;
1313 }
1314 sbuf[evh->len] = 0;
1315 if (evh->ns >= EVTR_NS_MAX) {
1316 evtr->errmsg = "invalid namespace (corrupt input)";
1317 return !0;
1318 }
1319 validate_string(sbuf);
1320 printd("evtr_load_string:ns %d id %d : \"%s\"\n", evh->ns, evh->id,
1321 sbuf);
1322 evtr->err = string_map_insert(&evtr->maps[evh->ns - 1].root, sbuf, evh->id);
1323 switch (evtr->err) {
1324 case ENOMEM:
1325 evtr->errmsg = "out of memory";
1326 break;
1327 case EEXIST:
1328 evtr->errmsg = "redefinition of an id to a "
1329 "different string (corrupt input)";
1330 break;
1331 default:
1332 ;
1333 }
1334 return 0;
1335}
1336
1337static
1338int
1339evtr_filter_match(evtr_filter_t f, struct probe_event_header *pev)
1340{
1341 if ((f->cpu != -1) && (f->cpu != pev->cpu))
1342 return 0;
1343 if (!f->fmtid)
1344 return !0;
1345 /*
1346 * If we don't have an id for the required format
1347 * string, the format string won't match anyway
1348 * (we require that id <-> fmt mappings appear
1349 * before the first appearance of the fmt string),
1350 * so don't bother comparing.
1351 */
1352 if (!(f->flags & FILTF_ID))
1353 return 0;
1354 if(pev->fmt == f->fmtid)
1355 return !0;
1356 return 0;
1357}
1358
1359static
1360int
1361evtr_match_filters(struct evtr_query *q, struct probe_event_header *pev)
1362{
1363 int i;
1364
1365 /* no filters means we're interested in all events */
1366 if (!q->nfilt)
1367 return !0;
1368 ++q->ntried;
1369 for (i = 0; i < q->nfilt; ++i) {
1370 if (evtr_filter_match(&q->filt[i], pev)) {
1371 ++q->nmatched;
1372 return !0;
1373 }
1374 }
1375 return 0;
1376}
1377
1378static
1379int
1380evtr_skip(evtr_t evtr, off_t bytes)
1381{
1382 if (fseek(evtr->f, bytes, SEEK_CUR)) {
1383 evtr->err = errno;
1384 evtr->errmsg = strerror(errno);
1385 return !0;
1386 }
1387 evtr->bytes += bytes;
1388 return 0;
1389}
1390
1391/*
1392 * Make sure q->buf is at least len bytes
1393 */
1394static
1395int
1396evtr_query_reserve_buf(struct evtr_query *q, int len)
1397{
1398 void *tmp;
1399
1400 if (q->bufsize >= len)
1401 return 0;
1402 if (!(tmp = realloc(q->buf, len)))
1403 return !0;
1404 q->buf = tmp;
1405 q->bufsize = len;
1406 return 0;
1407}
1408
1409static
1410int
1411evtr_load_probe(evtr_t evtr, evtr_event_t ev, char *buf, struct evtr_query *q)
1412{
1413 struct probe_event_header *evh = (struct probe_event_header *)buf;
1414 struct cpu *cpu;
1415
1416 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1417 sizeof(*evh) - sizeof(evh->eh)))
1418 return !0;
1419 memset(ev, '\0', sizeof(*ev));
1420 ev->ts = evh->eh.ts;
1421 ev->type = EVTR_TYPE_PROBE;
1422 ev->line = evh->line;
1423 ev->cpu = evh->cpu;
1424 if ((cpu = evtr_cpu(evtr, evh->cpu))) {
1425 ev->td = cpu->td;
1426 } else {
1427 ev->td = NULL;
1428 }
1429 if (evh->file) {
1430 ev->file = string_map_find(
1431 &evtr->maps[EVTR_NS_PATH - 1].root,
1432 evh->file);
1433 if (!ev->file) {
1434 evtr->errmsg = "unknown id for file path";
1435 evtr->err = !0;
1436 ev->file = "<unknown>";
1437 } else {
1438 validate_string(ev->file);
1439 }
1440 } else {
1441 ev->file = "<unknown>";
1442 }
1443 if (evh->fmt) {
1444 const struct event_fmt *fmt;
1445 if (!(fmt = fmt_map_find(&evtr->fmtmap.root, evh->fmt))) {
1446 evtr->errmsg = "unknown id for event fmt";
1447 evtr->err = !0;
1448 ev->fmt = NULL;
1449 } else {
1450 ev->fmt = fmt->fmt;
1451 validate_string(fmt->fmt);
1452 }
1453 }
1454 if (evh->datalen) {
1455 if (evtr_query_reserve_buf(q, evh->datalen + 1)) {
1456 evtr->err = ENOMEM;
1457 } else if (!evtr_read(evtr, q->buf, evh->datalen)) {
1458 struct replace_ctx replctx = {
1459 .evtr = evtr,
1460 .ts = ev->ts,
1461 };
1462 assert(ev->fmt);
1463
1464 ev->fmtdata = q->buf;
1465 /*
1466 * If the format specifies any string pointers, there
1467 * is a string id stored in the fmtdata. Look it up
1468 * and replace it with a string pointer before
1469 * returning it to the user.
1470 */
1471 if (mangle_string_ptrs(ev->fmt, __DECONST(uint8_t *,
1472 ev->fmtdata),
1473 replace_strid, &replctx) < 0)
1474 return evtr->err;
1475 if (evtr->err)
1476 return evtr->err;
1477 ((char *)ev->fmtdata)[evh->datalen] = '\0';
1478 ev->fmtdatalen = evh->datalen;
1479 }
1480 }
1481 evtr_run_callbacks(ev, evtr);
1482 /* we can't filter before running the callbacks */
1483 if (!evtr_match_filters(q, evh)) {
1484 return -1; /* no match */
1485 }
1486
1487 return evtr->err;
1488}
1489
1490static
1491int
1492evtr_skip_to_record(evtr_t evtr)
1493{
1494 int skip;
1495
1496 skip = REC_ALIGN - (evtr->bytes % REC_ALIGN);
1497 if (skip > 0) {
1498 if (fseek(evtr->f, skip, SEEK_CUR)) {
1499 evtr->err = errno;
1500 evtr->errmsg = strerror(errno);
1501 return !0;
1502 }
1503 evtr->bytes += skip;
1504 }
1505 return 0;
1506}
1507
1508static
1509int
1510evtr_load_sysinfo(evtr_t evtr)
1511{
1512 uint16_t ncpus;
1513 int i;
1514
1515 if (evtr_read(evtr, &ncpus, sizeof(ncpus))) {
1516 return !0;
1517 }
1518 if (evtr->cpus)
1519 return 0;
1520 evtr->cpus = malloc(ncpus * sizeof(struct cpu));
1521 if (!evtr->cpus) {
1522 evtr->err = ENOMEM;
1523 return !0;
1524 }
1525 evtr->ncpus = ncpus;
1526 for (i = 0; i < ncpus; ++i) {
1527 evtr->cpus[i].td = NULL;
1528 evtr->cpus[i].freq = -1.0;
1529 }
1530 return 0;
1531}
1532
1533static
1534int
1535evtr_load_cpuinfo(evtr_t evtr)
1536{
1537 struct cpuinfo_event_header cih;
1538 struct cpu *cpu;
1539
1540 if (evtr_read(evtr, &cih, sizeof(cih))) {
1541 return !0;
1542 }
1543 if (cih.freq < 0.0) {
1544 evtr->errmsg = "cpu freq is negative";
1545 evtr->err = EINVAL;
1546 return !0;
1547 }
1548 /*
1549 * Notice that freq is merely a multiplier with
1550 * which we convert a timestamp to seconds; if
1551 * ts is not in cycles, freq is not the frequency.
1552 */
1553 if (!(cpu = evtr_cpu(evtr, cih.cpu))) {
1554 evtr->errmsg = "freq for invalid cpu";
1555 evtr->err = EINVAL;
1556 return !0;
1557 }
1558 cpu->freq = cih.freq;
1559 return 0;
1560}
1561
1562static
1563int
1564_evtr_next_event(evtr_t evtr, evtr_event_t ev, struct evtr_query *q)
1565{
1566 char buf[MAX_EVHDR_SIZE];
1567 int ret, err, ntried, nmatched;
1568 struct trace_event_header *evhdr = (struct trace_event_header *)buf;
1569
1570 for (ret = 0; !ret;) {
1571 /*
1572 * skip pad records -- this will only happen if there's a
1573 * variable sized record close to the boundary
1574 */
1575 if (evtr_read(evtr, &evhdr->type, 1))
1576 return feof(evtr->f) ? -1 : !0;
1577 if (evhdr->type == EVTR_TYPE_PAD) {
1578 evtr_skip_to_record(evtr);
1579 continue;
1580 }
1581 if (evhdr->type == EVTR_TYPE_SYSINFO) {
1582 evtr_load_sysinfo(evtr);
1583 continue;
1584 } else if (evhdr->type == EVTR_TYPE_CPUINFO) {
1585 evtr_load_cpuinfo(evtr);
1586 continue;
1587 }
1588 if (evtr_read(evtr, buf + 1, sizeof(*evhdr) - 1))
1589 return feof(evtr->f) ? -1 : !0;
1590 switch (evhdr->type) {
1591 case EVTR_TYPE_PROBE:
1592 ntried = q->ntried;
1593 nmatched = q->nmatched;
1594 if ((err = evtr_load_probe(evtr, ev, buf, q))) {
1595 if (err == -1) {
1596 /* no match */
1597 ret = 0;
1598 } else {
1599 return !0;
1600 }
1601 } else {
1602 ret = !0;
1603 }
1604 break;
1605 case EVTR_TYPE_STR:
1606 if (evtr_load_string(evtr, buf)) {
1607 return !0;
1608 }
1609 break;
1610 case EVTR_TYPE_FMT:
1611 if (evtr_load_fmt(evtr, buf)) {
1612 return !0;
1613 }
1614 break;
1615 default:
1616 evtr->err = !0;
1617 evtr->errmsg = "unknown event type (corrupt input?)";
1618 return !0;
1619 }
1620 evtr_skip_to_record(evtr);
1621 if (ret) {
1622 q->off = evtr->bytes;
1623 return 0;
1624 }
1625 }
1626 /* can't get here */
1627 return !0;
1628}
1629
1630int
1631evtr_next_event(evtr_t evtr, evtr_event_t ev)
1632{
1633 struct evtr_query *q;
1634 int ret;
1635
1636 if (!(q = evtr_query_init(evtr, NULL, 0))) {
1637 evtr->err = ENOMEM;
1638 return !0;
1639 }
1640 ret = _evtr_next_event(evtr, ev, q);
1641 evtr_query_destroy(q);
1642 return ret;
1643}
1644
1645int
1646evtr_last_event(evtr_t evtr, evtr_event_t ev)
1647{
1648 struct stat st;
1649 int fd;
1650 off_t last_boundary;
1651
1652 fd = fileno(evtr->f);
1653 if (fstat(fd, &st))
1654 return !0;
1655 /*
1656 * This skips pseudo records, so we can't provide
1657 * an event with all fields filled in this way.
1658 * It's doable, just needs some care. TBD.
1659 */
1660 if (0 && (st.st_mode & S_IFREG)) {
1661 /*
1662 * Skip to last boundary, that's the closest to the EOF
1663 * location that we are sure contains a header so we can
1664 * pick up the stream.
1665 */
1666 last_boundary = (st.st_size / REC_BOUNDARY) * REC_BOUNDARY;
1667 /* XXX: ->bytes should be in query */
1668 assert(evtr->bytes == 0);
1669 evtr_skip(evtr, last_boundary);
1670 }
1671
1672
1673 /*
1674 * If we can't seek, we need to go through the whole file.
1675 * Since you can't seek back, this is pretty useless unless
1676 * you really are interested only in the last event.
1677 */
1678 while (!evtr_next_event(evtr, ev))
1679 ;
1680 if (evtr_error(evtr))
1681 return !0;
1682 evtr_rewind(evtr);
1683 return 0;
1684}
1685
1686struct evtr_query *
1687evtr_query_init(evtr_t evtr, evtr_filter_t filt, int nfilt)
1688{
1689 struct evtr_query *q;
1690 int i;
1691
1692 if (!(q = malloc(sizeof(*q)))) {
1693 return q;
1694 }
1695 q->bufsize = 2;
1696 if (!(q->buf = malloc(q->bufsize))) {
1697 goto free_q;
1698 }
1699 q->evtr = evtr;
1700 q->off = 0;
1701 q->filt = filt;
1702 q->nfilt = nfilt;
1703 q->nmatched = 0;
1704 for (i = 0; i < nfilt; ++i) {
1705 filt[i].flags = 0;
1706 if (filt[i].fmt == NULL)
1707 continue;
1708 if (evtr_filter_register(evtr, &filt[i])) {
1709 evtr_deregister_filters(evtr, filt, i);
1710 goto free_buf;
1711 }
1712 }
1713
1714 return q;
1715free_buf:
1716 free(q->buf);
1717free_q:
1718 free(q);
1719 return NULL;
1720}
1721
1722void
1723evtr_query_destroy(struct evtr_query *q)
1724{
1725 evtr_deregister_filters(q->evtr, q->filt, q->nfilt);
1726 free(q->buf);
1727 free(q);
1728}
1729
1730int
1731evtr_query_next(struct evtr_query *q, evtr_event_t ev)
1732{
1733 /* we may support that in the future */
1734 if (q->off != q->evtr->bytes)
1735 return !0;
1736 return _evtr_next_event(q->evtr, ev, q);
1737}
1738
1739int
1740evtr_ncpus(evtr_t evtr)
1741{
1742 return evtr->ncpus;
1743}
1744
1745int
1746evtr_cpufreqs(evtr_t evtr, double *freqs)
1747{
1748 int i;
1749
1750 if (!freqs)
1751 return EINVAL;
1752 for (i = 0; i < evtr->ncpus; ++i) {
1753 freqs[i] = evtr->cpus[i].freq;
1754 }
1755 return 0;
1756}