2 * Copyright (c) 2009, 2010 Aggelos Economopoulos. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 #include <sys/queue.h>
53 printd_set_flags(const char *str, unsigned int *flags)
56 * This is suboptimal as we don't detect
65 err(2, "invalid debug flag %c\n", *str);
66 *flags |= 1 << (*str - 'a');
72 MAX_EVHDR_SIZE = PATH_MAX + 200,
73 /* string namespaces */
78 NR_BUCKETS = 1021, /* prime */
79 PARSE_ERR_BUFSIZE = 256,
81 REC_BOUNDARY = 1 << 14,
83 EVTRF_WR = 0x1, /* open for writing */
87 typedef uint16_t fileid_t;
88 typedef uint16_t funcid_t;
89 typedef uint16_t fmtid_t;
91 struct trace_event_header {
93 uint64_t ts; /* XXX: this should only be part of probe */
94 } __attribute__((packed));
96 struct probe_event_header {
97 struct trace_event_header eh;
99 * For these fields, 0 implies "not available"
108 uint8_t cpu; /* -1 if n/a */
109 } __attribute__((packed));
111 struct string_event_header {
112 struct trace_event_header eh;
116 } __attribute__((packed));
118 struct fmt_event_header {
119 struct trace_event_header eh;
123 } __attribute__((packed));
125 struct cpuinfo_event_header {
128 } __attribute__((packed));
133 struct hashentry *next;
137 struct hashentry *buckets[NR_BUCKETS];
138 uintptr_t (*hashfunc)(uintptr_t);
139 uintptr_t (*cmpfunc)(uintptr_t, uintptr_t);
151 struct event_filter_unresolved {
152 TAILQ_ENTRY(event_filter_unresolved) link;
157 RB_ENTRY(id_map) rb_node;
162 RB_HEAD(id_tree, id_map);
171 RB_HEAD(thread_tree, evtr_thread);
174 struct thread_tree root;
177 struct event_callback {
178 void (*cb)(evtr_event_t, void *data);
179 void *data; /* this field must be malloc()ed */
183 struct evtr_thread *td; /* currently executing thread */
195 * When writing, we keep track of the strings we've
196 * already dumped so we only dump them once.
197 * Paths, function names etc belong to different
200 struct hashtab_str *strings[EVTR_NS_MAX - 1];
202 * When reading, we build a map from id to string.
203 * Every id must be defined at the point of use.
205 struct string_map maps[EVTR_NS_MAX - 1];
208 /* same as above, but for subsys+fmt pairs */
209 struct fmt_map fmtmap;
210 struct hashtab_str *fmts;
212 struct thread_map threads;
226 struct symtab *symtab;
228 struct event_callback **cbs;
230 * Filters that have a format specified and we
231 * need to resolve that to an fmtid
233 TAILQ_HEAD(, event_filter_unresolved) unresolved_filtq;
236 char parse_err_buf[PARSE_ERR_BUFSIZE];
238 struct evtr_event pending_event;
242 evtr_set_debug(const char *str)
244 printd_set_flags(str, &evtr_debug);
247 static int id_map_cmp(struct id_map *, struct id_map *);
248 RB_PROTOTYPE2(id_tree, id_map, rb_node, id_map_cmp, int);
249 RB_GENERATE2(id_tree, id_map, rb_node, id_map_cmp, int, id);
251 static int thread_cmp(struct evtr_thread *, struct evtr_thread *);
252 RB_PROTOTYPE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *);
253 RB_GENERATE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *, id);
257 validate_string(const char *str)
259 if (!(evtr_debug & MISC))
262 assert(isprint(*str));
267 id_tree_free(struct id_tree *root)
269 struct id_map *v, *n;
271 for (v = RB_MIN(id_tree, root); v; v = n) {
272 n = RB_NEXT(id_tree, root, v);
273 RB_REMOVE(id_tree, root, v);
279 evtr_register_callback(evtr_query_t q, void (*fn)(evtr_event_t, void *), void *d)
281 struct event_callback *cb;
284 if (!(cb = malloc(sizeof(*cb)))) {
290 if (!(cbs = realloc(q->cbs, (++q->ncbs) * sizeof(cb)))) {
297 q->cbs[q->ncbs - 1] = cb;
303 evtr_deregister_callbacks(evtr_query_t q)
307 for (i = 0; i < q->ncbs; ++i) {
316 evtr_run_callbacks(evtr_event_t ev, evtr_query_t q)
318 struct event_callback *cb;
321 for (i = 0; i < q->ncbs; ++i) {
323 cb->cb(ev, cb->data);
329 evtr_cpu(evtr_t evtr, int c)
331 if ((c < 0) || (c >= evtr->ncpus))
333 return &evtr->cpus[c];
336 static int parse_format_data(evtr_event_t ev, const char *fmt, ...)
337 __printflike(2, 3) __scanflike(2, 3);
341 parse_format_data(evtr_event_t ev, const char *fmt, ...)
346 if (strcmp(fmt, ev->fmt))
348 vsnprintf(buf, sizeof(buf), fmt, __DECONST(void *, ev->fmtdata));
349 printd(MISC, "string is: %s\n", buf);
351 return vsscanf(buf, fmt, ap);
356 evtr_deregister_filters(evtr_query_t q, evtr_filter_t filt, int nfilt)
358 struct event_filter_unresolved *u, *tmp;
360 TAILQ_FOREACH_MUTABLE(u, &q->unresolved_filtq, link, tmp) {
361 for (i = 0; i < nfilt; ++i) {
362 if (u->filt == &filt[i]) {
363 TAILQ_REMOVE(&q->unresolved_filtq, u, link);
371 evtr_filter_register(evtr_query_t q, evtr_filter_t filt)
373 struct event_filter_unresolved *res;
375 if (!(res = malloc(sizeof(*res)))) {
380 TAILQ_INSERT_TAIL(&q->unresolved_filtq, res, link);
386 evtr_query_needs_parsing(evtr_query_t q)
390 for (i = 0; i < q->nfilt; ++i)
391 if (q->filt[i].ev_type == EVTR_TYPE_STMT)
397 evtr_event_data(evtr_event_t ev, char *buf, size_t len)
400 * XXX: we implicitly trust the format string.
403 if (ev->fmtdatalen) {
404 vsnprintf(buf, len, ev->fmt, __DECONST(void *, ev->fmtdata));
406 strlcpy(buf, ev->fmt, len);
411 evtr_error(evtr_t evtr)
413 return evtr->err || (evtr->errmsg != NULL);
417 evtr_errmsg(evtr_t evtr)
419 return evtr->errmsg ? evtr->errmsg : strerror(evtr->err);
423 evtr_query_error(evtr_query_t q)
425 return q->err || (q->errmsg != NULL) || evtr_error(q->evtr);
429 evtr_query_errmsg(evtr_query_t q)
431 return q->errmsg ? q->errmsg :
432 (q->err ? strerror(q->err) :
433 (evtr_errmsg(q->evtr)));
438 id_map_cmp(struct id_map *a, struct id_map *b)
440 return a->id - b->id;
445 thread_cmp(struct evtr_thread *a, struct evtr_thread *b)
456 #define DEFINE_MAP_FIND(prefix, type) \
459 prefix ## _map_find(struct id_tree *tree, int id)\
461 struct id_map *sid; \
463 sid = id_tree_RB_LOOKUP(tree, id); \
464 return sid ? sid->data : NULL; \
467 DEFINE_MAP_FIND(string, const char *)
468 DEFINE_MAP_FIND(fmt, const struct event_fmt *)
472 thread_map_find(struct thread_map *map, void *id)
474 return thread_tree_RB_LOOKUP(&map->root, id);
477 #define DEFINE_MAP_INSERT(prefix, type, _cmp, _dup) \
480 prefix ## _map_insert(struct id_tree *tree, type data, int id) \
482 struct id_map *sid, *osid; \
484 sid = malloc(sizeof(*sid)); \
490 if ((osid = id_tree_RB_INSERT(tree, sid))) { \
492 if (_cmp((type)osid->data, data)) { \
495 printd(DS, "mapping already exists, skipping\n"); \
496 /* we're OK with redefinitions of an id to the same string */ \
499 /* only do the strdup if we're inserting a new string */ \
500 sid->data = _dup(data); /* XXX: oom */ \
506 thread_map_insert(struct thread_map *map, struct evtr_thread *td)
508 struct evtr_thread *otd;
510 if ((otd = thread_tree_RB_INSERT(&map->root, td))) {
512 * Thread addresses might be reused, we're
514 * DANGER, Will Robinson: this means the user
515 * of the API needs to copy event->td if they
516 * want it to remain stable.
518 free((void *)otd->comm);
519 otd->comm = td->comm;
526 event_fmt_cmp(const struct event_fmt *a, const struct event_fmt *b)
532 ret = strcmp(a->subsys, b->subsys);
534 ret = strcmp(a->subsys, "");
536 } else if (b->subsys) {
537 ret = strcmp("", b->subsys);
541 return strcmp(a->fmt, b->fmt);
546 event_fmt_dup(const struct event_fmt *o)
550 if (!(n = malloc(sizeof(*n)))) {
553 memcpy(n, o, sizeof(*n));
557 DEFINE_MAP_INSERT(string, const char *, strcmp, strdup)
558 DEFINE_MAP_INSERT(fmt, const struct event_fmt *, event_fmt_cmp, event_fmt_dup)
561 hash_find(const struct hashtab *tab, uintptr_t key, uintptr_t *val)
563 struct hashentry *ent;
565 for(ent = tab->buckets[tab->hashfunc(key)];
566 ent && tab->cmpfunc(ent->key, key);
576 hash_insert(struct hashtab *tab, uintptr_t key, uintptr_t val)
578 struct hashentry *ent;
581 if (!(ent = malloc(sizeof(*ent)))) {
582 fprintf(stderr, "out of memory\n");
585 hsh = tab->hashfunc(key);
586 ent->next = tab->buckets[hsh];
589 tab->buckets[hsh] = ent;
595 cmpfunc_pointer(uintptr_t a, uintptr_t b)
602 hashfunc_pointer(uintptr_t p)
604 return p % NR_BUCKETS;
611 if (!(tab = calloc(sizeof(struct hashtab), 1)))
613 tab->hashfunc = &hashfunc_pointer;
614 tab->cmpfunc = &cmpfunc_pointer;
618 struct hashtab_str { /* string -> id map */
625 hashfunc_string(uintptr_t p)
627 const char *str = (char *)p;
628 unsigned long hash = 5381;
632 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
633 return hash % NR_BUCKETS;
638 cmpfunc_string(uintptr_t a, uintptr_t b)
640 return strcmp((char *)a, (char *)b);
648 struct hashtab_str *strtab;
649 if (!(strtab = calloc(sizeof(struct hashtab_str), 1)))
651 strtab->tab.hashfunc = &hashfunc_string;
652 strtab->tab.cmpfunc = &cmpfunc_string;
658 strhash_destroy(struct hashtab_str *strtab)
665 strhash_find(struct hashtab_str *strtab, const char *str, uint16_t *id)
669 if (hash_find(&strtab->tab, (uintptr_t)str, &val))
677 strhash_insert(struct hashtab_str *strtab, const char *str, uint16_t *id)
682 if (strtab->id == 0) {
683 fprintf(stderr, "too many strings\n");
688 fprintf(stderr, "out of memory\n");
692 hash_insert(&strtab->tab, (uintptr_t)str, (uintptr_t)val);
700 struct symtab *symtab;
701 if (!(symtab = calloc(sizeof(struct symtab), 1)))
703 symtab->tab.hashfunc = &hashfunc_string;
704 symtab->tab.cmpfunc = &cmpfunc_string;
709 symtab_destroy(struct symtab *symtab)
714 struct evtr_variable *
715 symtab_find(const struct symtab *symtab, const char *str)
719 if (hash_find(&symtab->tab, (uintptr_t)str, &val))
721 return (struct evtr_variable *)val;
725 symtab_insert(struct symtab *symtab, const char *name,
726 struct evtr_variable *var)
730 fprintf(stderr, "out of memory\n");
733 hash_insert(&symtab->tab, (uintptr_t)name, (uintptr_t)var);
739 evtr_filter_match(evtr_query_t q, evtr_filter_t f, evtr_event_t ev)
741 if ((f->cpu != -1) && (f->cpu != ev->cpu))
744 assert(!(f->flags & FILTF_ID));
745 if (ev->type != f->ev_type)
747 if (ev->type == EVTR_TYPE_PROBE) {
748 if (f->fmt && strcmp(ev->fmt, f->fmt))
750 } else if (ev->type == EVTR_TYPE_STMT) {
751 struct evtr_variable *var;
753 /* XXX: no need to do that *every* time */
754 parse_var(f->var, q->symtab, &var, &q->parse_err_buf[0],
757 * Ignore errors, they're expected since the
758 * variable might not be instantiated yet
760 if (var != ev->stmt.var)
768 evtr_match_filters(struct evtr_query *q, evtr_event_t ev)
772 /* no filters means we're interested in all events */
776 for (i = 0; i < q->nfilt; ++i) {
777 if (evtr_filter_match(q, &q->filt[i], ev)) {
787 parse_callback(evtr_event_t ev, void *d)
789 evtr_query_t q = (evtr_query_t)d;
790 if (ev->type != EVTR_TYPE_PROBE)
792 if (!ev->fmt || (ev->fmt[0] != '#'))
795 * Copy the event to ->pending_event, then call
796 * the parser to convert it into a synthesized
797 * EVTR_TYPE_STMT event.
799 memcpy(&q->pending_event, ev, sizeof(*ev));
800 parse_string(&q->pending_event, q->symtab, &ev->fmt[1],
801 &q->parse_err_buf[0], PARSE_ERR_BUFSIZE);
802 if (q->parse_err_buf[0]) { /* parse error */
803 q->errmsg = &q->parse_err_buf[0];
806 if (!evtr_match_filters(q, &q->pending_event))
809 * This will cause us to return ->pending_event next time
812 q->flags |= EVTRQF_PENDING;
817 thread_creation_callback(evtr_event_t ev, void *d)
819 evtr_query_t q = (evtr_query_t)d;
820 evtr_t evtr = q->evtr;
821 struct evtr_thread *td;
825 if (parse_format_data(ev, "new_td %p %s", &ktd, buf) != 2) {
830 if (!(td = malloc(sizeof(*td)))) {
836 if (!(td->comm = strdup(buf))) {
841 printd(DS, "inserting new thread %p: %s\n", td->id, td->comm);
842 thread_map_insert(&evtr->threads, td);
847 thread_switch_callback(evtr_event_t ev, void *d)
849 evtr_t evtr = ((evtr_query_t)d)->evtr;
850 struct evtr_thread *tdp, *tdn;
853 static struct evtr_event tdcr;
854 static char *fmt = "new_td %p %s";
856 char fmtdata[sizeof(void *) + sizeof(char *)];
858 cpu = evtr_cpu(evtr, ev->cpu);
860 printw("invalid cpu %d\n", ev->cpu);
863 if (parse_format_data(ev, "sw %p > %p", &ktdp, &ktdn) != 2) {
866 tdp = thread_map_find(&evtr->threads, ktdp);
868 printd(DS, "switching from unknown thread %p\n", ktdp);
870 tdn = thread_map_find(&evtr->threads, ktdn);
873 * Fake a thread creation event for threads we
874 * haven't seen before.
876 tdcr.type = EVTR_TYPE_PROBE;
882 tdcr.fmtdata = &fmtdata;
883 tdcr.fmtdatalen = sizeof(fmtdata);
886 snprintf(tidstr, sizeof(tidstr), "%p", ktdn);
887 ((void **)fmtdata)[0] = ktdn;
888 ((char **)fmtdata)[1] = &tidstr[0];
889 thread_creation_callback(&tdcr, d);
891 tdn = thread_map_find(&evtr->threads, ktdn);
893 printd(DS, "switching to unknown thread %p\n", ktdn);
897 printd(DS, "cpu %d: switching to thread %p\n", ev->cpu, ktdn);
903 assert_foff_in_sync(evtr_t evtr)
908 * We keep our own offset because we
909 * might want to support mmap()
911 off = ftello(evtr->f);
912 if (evtr->bytes != off) {
913 fprintf(stderr, "bytes %jd, off %jd\n", evtr->bytes, off);
920 evtr_write(evtr_t evtr, const void *buf, size_t bytes)
922 assert_foff_in_sync(evtr);
923 if (fwrite(buf, bytes, 1, evtr->f) != 1) {
925 evtr->errmsg = strerror(errno);
928 evtr->bytes += bytes;
929 assert_foff_in_sync(evtr);
934 * Called after dumping a record to make sure the next
935 * record is REC_ALIGN aligned. This does not make much sense,
936 * as we shouldn't be using packed structs anyway.
940 evtr_dump_pad(evtr_t evtr)
943 static char buf[REC_ALIGN];
945 pad = REC_ALIGN - (evtr->bytes % REC_ALIGN);
947 return evtr_write(evtr, buf, pad);
953 * We make sure that there is a new record every REC_BOUNDARY
954 * bytes, this costs next to nothing in space and allows for
959 evtr_dump_avoid_boundary(evtr_t evtr, size_t bytes)
962 static char buf[256];
964 pad = REC_BOUNDARY - (evtr->bytes % REC_BOUNDARY);
965 /* if adding @bytes would cause us to cross a boundary... */
967 /* then pad to the boundary */
968 for (i = 0; i < (pad / sizeof(buf)); ++i) {
969 if (evtr_write(evtr, buf, sizeof(buf))) {
973 i = pad % sizeof(buf);
975 if (evtr_write(evtr, buf, i)) {
985 evtr_dump_fmt(evtr_t evtr, uint64_t ts, const evtr_event_t ev)
987 struct fmt_event_header fmt;
990 char *subsys = "", buf[1024];
992 if (strlcpy(buf, subsys, sizeof(buf)) >= sizeof(buf)) {
993 evtr->errmsg = "name of subsystem is too large";
997 if (strlcat(buf, ev->fmt, sizeof(buf)) >= sizeof(buf)) {
998 evtr->errmsg = "fmt + name of subsystem is too large";
1003 if (!strhash_find(evtr->fmts, buf, &id)) {
1006 if ((err = strhash_insert(evtr->fmts, buf, &id))) {
1011 fmt.eh.type = EVTR_TYPE_FMT;
1013 fmt.subsys_len = strlen(subsys);
1014 fmt.fmt_len = strlen(ev->fmt);
1016 if (evtr_dump_avoid_boundary(evtr, sizeof(fmt) + fmt.subsys_len +
1019 if (evtr_write(evtr, &fmt, sizeof(fmt)))
1021 if (evtr_write(evtr, subsys, fmt.subsys_len))
1023 if (evtr_write(evtr, ev->fmt, fmt.fmt_len))
1025 if (evtr_dump_pad(evtr))
1031 * Replace string pointers or string ids in fmtdata
1035 mangle_string_ptrs(const char *fmt, uint8_t *fmtdata,
1036 const char *(*replace)(void *, const char *), void *ctx)
1039 size_t skipsize, intsz;
1042 for (f = fmt; f[0] != '\0'; ++f) {
1047 for (p = f; p[0]; ++p) {
1050 * Eat flags. Notice this will accept duplicate
1066 /* Eat minimum field width, if any */
1067 for (; isdigit(p[0]); ++p)
1071 /* Eat precision, if any */
1072 for (; isdigit(p[0]); ++p)
1079 intsz = sizeof(long long);
1081 intsz = sizeof(long);
1085 intsz = sizeof(intmax_t);
1088 intsz = sizeof(ptrdiff_t);
1091 intsz = sizeof(size_t);
1099 intsz = sizeof(int);
1112 skipsize = sizeof(void *);
1116 skipsize = sizeof(double);
1118 skipsize = sizeof(float);
1121 ((const char **)fmtdata)[0] =
1122 replace(ctx, ((char **)fmtdata)[0]);
1123 skipsize = sizeof(char *);
1127 fprintf(stderr, "Unknown conversion specifier %c "
1128 "in fmt starting with %s", p[0], f - 1);
1131 fmtdata += skipsize;
1136 /* XXX: do we really want the timestamp? */
1139 evtr_dump_string(evtr_t evtr, uint64_t ts, const char *str, int ns)
1141 struct string_event_header s;
1145 assert((0 <= ns) && (ns < EVTR_NS_MAX));
1146 if (!strhash_find(evtr->strings[ns], str, &id)) {
1149 if ((err = strhash_insert(evtr->strings[ns], str, &id))) {
1154 printd(DS, "hash_insert %s ns %d id %d\n", str, ns, id);
1155 s.eh.type = EVTR_TYPE_STR;
1159 s.len = strnlen(str, PATH_MAX);
1161 if (evtr_dump_avoid_boundary(evtr, sizeof(s) + s.len))
1163 if (evtr_write(evtr, &s, sizeof(s)))
1165 if (evtr_write(evtr, str, s.len))
1167 if (evtr_dump_pad(evtr))
1172 struct replace_ctx {
1179 replace_strptr(void *_ctx, const char *s)
1181 struct replace_ctx *ctx = _ctx;
1182 return (const char *)(uintptr_t)evtr_dump_string(ctx->evtr, ctx->ts, s,
1188 replace_strid(void *_ctx, const char *s)
1190 struct replace_ctx *ctx = _ctx;
1193 ret = string_map_find(&ctx->evtr->maps[EVTR_NS_DSTR - 1].root,
1196 fprintf(stderr, "Unknown id for data string\n");
1197 ctx->evtr->errmsg = "unknown id for data string";
1198 ctx->evtr->err = !0;
1200 validate_string(ret);
1201 printd(DS, "replacing strid %d (ns %d) with string '%s' (or int %#x)\n",
1202 (int)(uintptr_t)s, EVTR_NS_DSTR, ret ? ret : "NULL", (int)(uintptr_t)ret);
1208 evtr_dump_probe(evtr_t evtr, evtr_event_t ev)
1210 struct probe_event_header kev;
1213 memset(&kev, '\0', sizeof(kev));
1214 kev.eh.type = ev->type;
1216 kev.line = ev->line;
1219 kev.file = evtr_dump_string(evtr, kev.eh.ts, ev->file,
1223 kev.func = evtr_dump_string(evtr, kev.eh.ts, ev->func,
1227 kev.fmt = evtr_dump_fmt(evtr, kev.eh.ts, ev);
1230 struct replace_ctx replctx = {
1234 assert(ev->fmtdatalen <= (int)sizeof(buf));
1235 kev.datalen = ev->fmtdatalen;
1237 * Replace all string pointers with string ids before dumping
1240 memcpy(buf, ev->fmtdata, ev->fmtdatalen);
1241 if (mangle_string_ptrs(ev->fmt, buf,
1242 replace_strptr, &replctx) < 0)
1247 if (evtr_dump_avoid_boundary(evtr, sizeof(kev) + ev->fmtdatalen))
1249 if (evtr_write(evtr, &kev, sizeof(kev)))
1251 if (evtr_write(evtr, buf, ev->fmtdatalen))
1253 if (evtr_dump_pad(evtr))
1260 evtr_dump_sysinfo(evtr_t evtr, evtr_event_t ev)
1262 uint8_t type = EVTR_TYPE_SYSINFO;
1263 uint16_t ncpus = ev->ncpus;
1266 evtr->errmsg = "invalid number of cpus";
1269 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ncpus)))
1271 if (evtr_write(evtr, &type, sizeof(type))) {
1274 if (evtr_write(evtr, &ncpus, sizeof(ncpus))) {
1277 if (evtr_dump_pad(evtr))
1283 evtr_dump_cpuinfo(evtr_t evtr, evtr_event_t ev)
1285 struct cpuinfo_event_header ci;
1288 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ci)))
1290 type = EVTR_TYPE_CPUINFO;
1291 if (evtr_write(evtr, &type, sizeof(type))) {
1295 ci.freq = ev->cpuinfo.freq;
1296 if (evtr_dump_avoid_boundary(evtr, sizeof(ci)))
1298 if (evtr_write(evtr, &ci, sizeof(ci))) {
1301 if (evtr_dump_pad(evtr))
1307 evtr_rewind(evtr_t evtr)
1309 assert((evtr->flags & EVTRF_WR) == 0);
1311 if (fseek(evtr->f, 0, SEEK_SET)) {
1319 evtr_dump_event(evtr_t evtr, evtr_event_t ev)
1322 case EVTR_TYPE_PROBE:
1323 return evtr_dump_probe(evtr, ev);
1324 case EVTR_TYPE_SYSINFO:
1325 return evtr_dump_sysinfo(evtr, ev);
1326 case EVTR_TYPE_CPUINFO:
1327 return evtr_dump_cpuinfo(evtr, ev);
1329 evtr->errmsg = "unknown event type";
1338 if (!(evtr = malloc(sizeof(*evtr)))) {
1344 evtr->errmsg = NULL;
1349 static int evtr_next_event(evtr_t, evtr_event_t);
1352 evtr_open_read(FILE *f)
1355 struct evtr_event ev;
1358 if (!(evtr = evtr_alloc(f))) {
1362 for (i = 0; i < (EVTR_NS_MAX - 1); ++i) {
1363 RB_INIT(&evtr->maps[i].root);
1365 RB_INIT(&evtr->fmtmap.root);
1366 RB_INIT(&evtr->threads.root);
1370 * Load the first event so we can pick up any
1373 if (evtr_next_event(evtr, &ev)) {
1376 if (evtr_rewind(evtr))
1385 evtr_open_write(FILE *f)
1390 if (!(evtr = evtr_alloc(f))) {
1394 evtr->flags = EVTRF_WR;
1395 if (!(evtr->fmts = strhash_new()))
1397 for (i = 0; i < EVTR_NS_MAX; ++i) {
1398 evtr->strings[i] = strhash_new();
1399 if (!evtr->strings[i]) {
1400 for (j = 0; j < i; ++j) {
1401 strhash_destroy(evtr->strings[j]);
1409 strhash_destroy(evtr->fmts);
1417 hashtab_destroy(struct hashtab *h)
1419 struct hashentry *ent, *next;
1421 for (i = 0; i < NR_BUCKETS; ++i) {
1422 for (ent = h->buckets[i]; ent; ent = next) {
1431 evtr_close(evtr_t evtr)
1435 if (evtr->flags & EVTRF_WR) {
1436 hashtab_destroy(&evtr->fmts->tab);
1437 for (i = 0; i < EVTR_NS_MAX; ++i)
1438 hashtab_destroy(&evtr->strings[i]->tab);
1440 id_tree_free(&evtr->fmtmap.root);
1441 for (i = 0; i < EVTR_NS_MAX - 1; ++i) {
1442 id_tree_free(&evtr->maps[i].root);
1450 evtr_read(evtr_t evtr, void *buf, size_t size)
1453 assert_foff_in_sync(evtr);
1454 printd(IO, "evtr_read at %#jx, %zd bytes\n", evtr->bytes, size);
1455 if (fread(buf, size, 1, evtr->f) != 1) {
1456 if (feof(evtr->f)) {
1457 evtr->errmsg = "incomplete record";
1459 evtr->errmsg = strerror(errno);
1463 evtr->bytes += size;
1464 assert_foff_in_sync(evtr);
1470 evtr_load_fmt(evtr_query_t q, char *buf)
1472 evtr_t evtr = q->evtr;
1473 struct fmt_event_header *evh = (struct fmt_event_header *)buf;
1474 struct event_fmt *fmt;
1475 char *subsys = NULL, *fmtstr;
1477 if (!(fmt = malloc(sizeof(*fmt)))) {
1481 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1482 sizeof(*evh) - sizeof(evh->eh))) {
1485 assert(!evh->subsys_len);
1486 if (evh->subsys_len) {
1487 if (!(subsys = malloc(evh->subsys_len))) {
1491 if (evtr_read(evtr, subsys, evh->subsys_len)) {
1494 fmt->subsys = subsys;
1498 if (!(fmtstr = malloc(evh->fmt_len + 1))) {
1502 if (evtr_read(evtr, fmtstr, evh->fmt_len)) {
1505 fmtstr[evh->fmt_len] = '\0';
1508 printd(DS, "fmt_map_insert (%d, %s)\n", evh->id, fmt->fmt);
1509 evtr->err = fmt_map_insert(&evtr->fmtmap.root, fmt, evh->id);
1510 switch (evtr->err) {
1512 evtr->errmsg = "out of memory";
1515 evtr->errmsg = "redefinition of an id to a "
1516 "different format (corrupt input)";
1535 evtr_load_string(evtr_t evtr, char *buf)
1537 char sbuf[PATH_MAX + 1];
1538 struct string_event_header *evh = (struct string_event_header *)buf;
1540 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1541 sizeof(*evh) - sizeof(evh->eh))) {
1544 if (evh->len > PATH_MAX) {
1545 evtr->errmsg = "string too large (corrupt input)";
1548 if (evh->len && evtr_read(evtr, sbuf, evh->len)) {
1552 if (evh->ns >= EVTR_NS_MAX) {
1553 evtr->errmsg = "invalid namespace (corrupt input)";
1556 validate_string(sbuf);
1557 printd(DS, "evtr_load_string:ns %d id %d : \"%s\"\n", evh->ns, evh->id,
1559 evtr->err = string_map_insert(&evtr->maps[evh->ns - 1].root, sbuf, evh->id);
1560 switch (evtr->err) {
1562 evtr->errmsg = "out of memory";
1565 evtr->errmsg = "redefinition of an id to a "
1566 "different string (corrupt input)";
1576 evtr_skip(evtr_t evtr, off_t bytes)
1578 if (fseek(evtr->f, bytes, SEEK_CUR)) {
1580 evtr->errmsg = strerror(errno);
1583 evtr->bytes += bytes;
1588 * Make sure q->buf is at least len bytes
1592 evtr_query_reserve_buf(struct evtr_query *q, int len)
1596 if (q->bufsize >= len)
1598 if (!(tmp = realloc(q->buf, len)))
1607 evtr_load_probe(evtr_t evtr, evtr_event_t ev, char *buf, struct evtr_query *q)
1609 struct probe_event_header *evh = (struct probe_event_header *)buf;
1612 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1613 sizeof(*evh) - sizeof(evh->eh)))
1615 memset(ev, '\0', sizeof(*ev));
1616 ev->ts = evh->eh.ts;
1617 ev->type = EVTR_TYPE_PROBE;
1618 ev->line = evh->line;
1620 if ((cpu = evtr_cpu(evtr, evh->cpu))) {
1626 ev->file = string_map_find(
1627 &evtr->maps[EVTR_NS_PATH - 1].root,
1630 evtr->errmsg = "unknown id for file path";
1632 ev->file = "<unknown>";
1634 validate_string(ev->file);
1637 ev->file = "<unknown>";
1640 const struct event_fmt *fmt;
1641 if (!(fmt = fmt_map_find(&evtr->fmtmap.root, evh->fmt))) {
1642 evtr->errmsg = "unknown id for event fmt";
1647 validate_string(fmt->fmt);
1651 if (evtr_query_reserve_buf(q, evh->datalen + 1)) {
1653 } else if (!evtr_read(evtr, q->buf, evh->datalen)) {
1654 struct replace_ctx replctx = {
1660 ev->fmtdata = q->buf;
1662 * If the format specifies any string pointers, there
1663 * is a string id stored in the fmtdata. Look it up
1664 * and replace it with a string pointer before
1665 * returning it to the user.
1667 if (mangle_string_ptrs(ev->fmt, __DECONST(uint8_t *,
1669 replace_strid, &replctx) < 0)
1673 ((char *)ev->fmtdata)[evh->datalen] = '\0';
1674 ev->fmtdatalen = evh->datalen;
1677 evtr_run_callbacks(ev, q);
1683 evtr_skip_to_record(evtr_t evtr)
1687 skip = REC_ALIGN - (evtr->bytes % REC_ALIGN);
1689 if (fseek(evtr->f, skip, SEEK_CUR)) {
1691 evtr->errmsg = strerror(errno);
1694 evtr->bytes += skip;
1701 evtr_load_sysinfo(evtr_t evtr)
1706 if (evtr_read(evtr, &ncpus, sizeof(ncpus))) {
1711 evtr->cpus = malloc(ncpus * sizeof(struct cpu));
1716 evtr->ncpus = ncpus;
1717 for (i = 0; i < ncpus; ++i) {
1718 evtr->cpus[i].td = NULL;
1719 evtr->cpus[i].freq = -1.0;
1726 evtr_load_cpuinfo(evtr_t evtr)
1728 struct cpuinfo_event_header cih;
1731 if (evtr_read(evtr, &cih, sizeof(cih))) {
1734 if (cih.freq < 0.0) {
1735 evtr->errmsg = "cpu freq is negative";
1740 * Notice that freq is merely a multiplier with
1741 * which we convert a timestamp to seconds; if
1742 * ts is not in cycles, freq is not the frequency.
1744 if (!(cpu = evtr_cpu(evtr, cih.cpu))) {
1745 evtr->errmsg = "freq for invalid cpu";
1749 cpu->freq = cih.freq;
1755 _evtr_next_event(evtr_t evtr, evtr_event_t ev, struct evtr_query *q)
1757 char buf[MAX_EVHDR_SIZE];
1759 struct trace_event_header *evhdr = (struct trace_event_header *)buf;
1761 for (ret = 0; !ret;) {
1762 if (q->flags & EVTRQF_PENDING) {
1763 q->off = evtr->bytes;
1764 memcpy(ev, &q->pending_event, sizeof(*ev));
1765 q->flags &= ~EVTRQF_PENDING;
1768 if (evtr_read(evtr, &evhdr->type, 1)) {
1769 if (feof(evtr->f)) {
1770 evtr->errmsg = NULL;
1777 * skip pad records -- this will only happen if there's a
1778 * variable sized record close to the boundary
1780 if (evhdr->type == EVTR_TYPE_PAD) {
1781 evtr_skip_to_record(evtr);
1784 if (evhdr->type == EVTR_TYPE_SYSINFO) {
1785 evtr_load_sysinfo(evtr);
1787 } else if (evhdr->type == EVTR_TYPE_CPUINFO) {
1788 evtr_load_cpuinfo(evtr);
1791 if (evtr_read(evtr, buf + 1, sizeof(*evhdr) - 1))
1792 return feof(evtr->f) ? -1 : !0;
1793 switch (evhdr->type) {
1794 case EVTR_TYPE_PROBE:
1795 if ((err = evtr_load_probe(evtr, ev, buf, q))) {
1807 if (evtr_load_string(evtr, buf)) {
1812 if (evtr_load_fmt(q, buf)) {
1818 evtr->errmsg = "unknown event type (corrupt input?)";
1821 evtr_skip_to_record(evtr);
1823 if (!evtr_match_filters(q, ev)) {
1827 q->off = evtr->bytes;
1831 /* can't get here */
1837 evtr_next_event(evtr_t evtr, evtr_event_t ev)
1839 struct evtr_query *q;
1842 if (!(q = evtr_query_init(evtr, NULL, 0))) {
1846 ret = _evtr_next_event(evtr, ev, q);
1847 evtr_query_destroy(q);
1852 evtr_last_event(evtr_t evtr, evtr_event_t ev)
1856 off_t last_boundary;
1858 if (evtr_error(evtr))
1861 fd = fileno(evtr->f);
1865 * This skips pseudo records, so we can't provide
1866 * an event with all fields filled in this way.
1867 * It's doable, just needs some care. TBD.
1869 if (0 && (st.st_mode & S_IFREG)) {
1871 * Skip to last boundary, that's the closest to the EOF
1872 * location that we are sure contains a header so we can
1873 * pick up the stream.
1875 last_boundary = (st.st_size / REC_BOUNDARY) * REC_BOUNDARY;
1876 /* XXX: ->bytes should be in query */
1877 assert(evtr->bytes == 0);
1878 evtr_skip(evtr, last_boundary);
1883 * If we can't seek, we need to go through the whole file.
1884 * Since you can't seek back, this is pretty useless unless
1885 * you really are interested only in the last event.
1887 while (!evtr_next_event(evtr, ev))
1889 if (evtr_error(evtr))
1896 evtr_query_init(evtr_t evtr, evtr_filter_t filt, int nfilt)
1898 struct evtr_query *q;
1901 if (!(q = malloc(sizeof(*q)))) {
1905 if (!(q->buf = malloc(q->bufsize))) {
1908 if (!(q->symtab = symtab_new()))
1914 TAILQ_INIT(&q->unresolved_filtq);
1919 memset(&q->pending_event, '\0', sizeof(q->pending_event));
1920 if (evtr_register_callback(q, &thread_creation_callback, q)) {
1923 if (evtr_register_callback(q, &thread_switch_callback, q)) {
1926 if (evtr_query_needs_parsing(q) &&
1927 evtr_register_callback(q, &parse_callback, q)) {
1931 for (i = 0; i < nfilt; ++i) {
1933 if (filt[i].fmt == NULL)
1935 if (evtr_filter_register(q, &filt[i])) {
1936 evtr_deregister_filters(q, filt, i);
1943 evtr_deregister_callbacks(q);
1945 symtab_destroy(q->symtab);
1954 evtr_query_destroy(struct evtr_query *q)
1956 evtr_deregister_filters(q, q->filt, q->nfilt);
1963 evtr_query_next(struct evtr_query *q, evtr_event_t ev)
1965 if (evtr_query_error(q))
1967 /* we may support that in the future */
1968 if (q->off != q->evtr->bytes) {
1969 q->errmsg = "evtr/query offset mismatch";
1972 return _evtr_next_event(q->evtr, ev, q);
1976 evtr_ncpus(evtr_t evtr)
1982 evtr_cpufreqs(evtr_t evtr, double *freqs)
1988 for (i = 0; i < evtr->ncpus; ++i) {
1989 freqs[i] = evtr->cpus[i].freq;