2 * Copyright (c) 2009, 2010 Aggelos Economopoulos. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 #include <sys/queue.h>
53 printd_set_flags(const char *str, unsigned int *flags)
56 * This is suboptimal as we don't detect
65 err(2, "invalid debug flag %c\n", *str);
66 *flags |= 1 << (*str - 'a');
72 MAX_EVHDR_SIZE = PATH_MAX + 200,
73 /* string namespaces */
78 NR_BUCKETS = 1021, /* prime */
80 REC_BOUNDARY = 1 << 14,
82 EVTRF_WR = 0x1, /* open for writing */
86 typedef uint16_t fileid_t;
87 typedef uint16_t funcid_t;
88 typedef uint16_t fmtid_t;
90 struct trace_event_header {
92 uint64_t ts; /* XXX: this should only be part of probe */
93 } __attribute__((packed));
95 struct probe_event_header {
96 struct trace_event_header eh;
98 * For these fields, 0 implies "not available"
107 uint8_t cpu; /* -1 if n/a */
108 } __attribute__((packed));
110 struct string_event_header {
111 struct trace_event_header eh;
115 } __attribute__((packed));
117 struct fmt_event_header {
118 struct trace_event_header eh;
122 } __attribute__((packed));
124 struct cpuinfo_event_header {
127 } __attribute__((packed));
132 struct hashentry *next;
136 struct hashentry *buckets[NR_BUCKETS];
137 uintptr_t (*hashfunc)(uintptr_t);
138 uintptr_t (*cmpfunc)(uintptr_t, uintptr_t);
150 struct event_filter_unresolved {
151 TAILQ_ENTRY(event_filter_unresolved) link;
156 RB_ENTRY(id_map) rb_node;
161 RB_HEAD(id_tree, id_map);
170 RB_HEAD(thread_tree, evtr_thread);
173 struct thread_tree root;
176 struct event_callback {
177 void (*cb)(evtr_event_t, void *data);
178 void *data; /* this field must be malloc()ed */
182 struct evtr_thread *td; /* currently executing thread */
194 * When writing, we keep track of the strings we've
195 * already dumped so we only dump them once.
196 * Paths, function names etc belong to different
199 struct hashtab_str *strings[EVTR_NS_MAX - 1];
201 * When reading, we build a map from id to string.
202 * Every id must be defined at the point of use.
204 struct string_map maps[EVTR_NS_MAX - 1];
207 /* same as above, but for subsys+fmt pairs */
208 struct fmt_map fmtmap;
209 struct hashtab_str *fmts;
211 struct thread_map threads;
225 struct symtab *symtab;
227 struct event_callback **cbs;
229 * Filters that have a format specified and we
230 * need to resolve that to an fmtid
232 TAILQ_HEAD(, event_filter_unresolved) unresolved_filtq;
236 struct evtr_event pending_event;
240 evtr_set_debug(const char *str)
242 printd_set_flags(str, &evtr_debug);
245 static int id_map_cmp(struct id_map *, struct id_map *);
246 RB_PROTOTYPE2(id_tree, id_map, rb_node, id_map_cmp, int);
247 RB_GENERATE2(id_tree, id_map, rb_node, id_map_cmp, int, id);
249 static int thread_cmp(struct evtr_thread *, struct evtr_thread *);
250 RB_PROTOTYPE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *);
251 RB_GENERATE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *, id);
255 validate_string(const char *str)
257 if (!(evtr_debug & MISC))
260 assert(isprint(*str));
265 id_tree_free(struct id_tree *root)
267 struct id_map *v, *n;
269 for (v = RB_MIN(id_tree, root); v; v = n) {
270 n = RB_NEXT(id_tree, root, v);
271 RB_REMOVE(id_tree, root, v);
277 evtr_register_callback(evtr_query_t q, void (*fn)(evtr_event_t, void *), void *d)
279 struct event_callback *cb;
282 if (!(cb = malloc(sizeof(*cb)))) {
288 if (!(cbs = realloc(q->cbs, (++q->ncbs) * sizeof(cb)))) {
295 q->cbs[q->ncbs - 1] = cb;
301 evtr_deregister_callbacks(evtr_query_t q)
305 for (i = 0; i < q->ncbs; ++i) {
314 evtr_run_callbacks(evtr_event_t ev, evtr_query_t q)
316 struct event_callback *cb;
319 for (i = 0; i < q->ncbs; ++i) {
321 cb->cb(ev, cb->data);
327 evtr_cpu(evtr_t evtr, int c)
329 if ((c < 0) || (c >= evtr->ncpus))
331 return &evtr->cpus[c];
336 parse_format_data(evtr_event_t ev, const char *fmt, ...) __attribute__((format (scanf, 2, 3)));
339 parse_format_data(evtr_event_t ev, const char *fmt, ...)
344 if (strcmp(fmt, ev->fmt))
346 vsnprintf(buf, sizeof(buf), fmt, __DECONST(void *, ev->fmtdata));
347 printd(MISC, "string is: %s\n", buf);
349 return vsscanf(buf, fmt, ap);
354 evtr_deregister_filters(evtr_query_t q, evtr_filter_t filt, int nfilt)
356 struct event_filter_unresolved *u, *tmp;
358 TAILQ_FOREACH_MUTABLE(u, &q->unresolved_filtq, link, tmp) {
359 for (i = 0; i < nfilt; ++i) {
360 if (u->filt == &filt[i]) {
361 TAILQ_REMOVE(&q->unresolved_filtq, u, link);
369 evtr_filter_register(evtr_query_t q, evtr_filter_t filt)
371 struct event_filter_unresolved *res;
373 if (!(res = malloc(sizeof(*res)))) {
378 TAILQ_INSERT_TAIL(&q->unresolved_filtq, res, link);
384 evtr_query_needs_parsing(evtr_query_t q)
388 for (i = 0; i < q->nfilt; ++i)
389 if (q->filt[i].ev_type == EVTR_TYPE_STMT)
395 evtr_event_data(evtr_event_t ev, char *buf, size_t len)
398 * XXX: we implicitly trust the format string.
401 if (ev->fmtdatalen) {
402 vsnprintf(buf, len, ev->fmt, __DECONST(void *, ev->fmtdata));
404 strlcpy(buf, ev->fmt, len);
409 evtr_error(evtr_t evtr)
411 return evtr->err || (evtr->errmsg != NULL);
415 evtr_errmsg(evtr_t evtr)
417 return evtr->errmsg ? evtr->errmsg : strerror(evtr->err);
421 evtr_query_error(evtr_query_t q)
423 return q->err || (q->errmsg != NULL) || evtr_error(q->evtr);
427 evtr_query_errmsg(evtr_query_t q)
429 return q->errmsg ? q->errmsg :
430 (q->err ? strerror(q->err) :
431 (evtr_errmsg(q->evtr)));
436 id_map_cmp(struct id_map *a, struct id_map *b)
438 return a->id - b->id;
443 thread_cmp(struct evtr_thread *a, struct evtr_thread *b)
454 #define DEFINE_MAP_FIND(prefix, type) \
457 prefix ## _map_find(struct id_tree *tree, int id)\
459 struct id_map *sid; \
461 sid = id_tree_RB_LOOKUP(tree, id); \
462 return sid ? sid->data : NULL; \
465 DEFINE_MAP_FIND(string, const char *)
466 DEFINE_MAP_FIND(fmt, const struct event_fmt *)
470 thread_map_find(struct thread_map *map, void *id)
472 return thread_tree_RB_LOOKUP(&map->root, id);
475 #define DEFINE_MAP_INSERT(prefix, type, _cmp, _dup) \
478 prefix ## _map_insert(struct id_tree *tree, type data, int id) \
480 struct id_map *sid, *osid; \
482 sid = malloc(sizeof(*sid)); \
488 if ((osid = id_tree_RB_INSERT(tree, sid))) { \
490 if (_cmp((type)osid->data, data)) { \
493 printd(DS, "mapping already exists, skipping\n"); \
494 /* we're OK with redefinitions of an id to the same string */ \
497 /* only do the strdup if we're inserting a new string */ \
498 sid->data = _dup(data); /* XXX: oom */ \
504 thread_map_insert(struct thread_map *map, struct evtr_thread *td)
506 struct evtr_thread *otd;
508 if ((otd = thread_tree_RB_INSERT(&map->root, td))) {
510 * Thread addresses might be reused, we're
512 * DANGER, Will Robinson: this means the user
513 * of the API needs to copy event->td if they
514 * want it to remain stable.
516 free((void *)otd->comm);
517 otd->comm = td->comm;
524 event_fmt_cmp(const struct event_fmt *a, const struct event_fmt *b)
530 ret = strcmp(a->subsys, b->subsys);
532 ret = strcmp(a->subsys, "");
534 } else if (b->subsys) {
535 ret = strcmp("", b->subsys);
539 return strcmp(a->fmt, b->fmt);
544 event_fmt_dup(const struct event_fmt *o)
548 if (!(n = malloc(sizeof(*n)))) {
551 memcpy(n, o, sizeof(*n));
555 DEFINE_MAP_INSERT(string, const char *, strcmp, strdup)
556 DEFINE_MAP_INSERT(fmt, const struct event_fmt *, event_fmt_cmp, event_fmt_dup)
559 hash_find(const struct hashtab *tab, uintptr_t key, uintptr_t *val)
561 struct hashentry *ent;
563 for(ent = tab->buckets[tab->hashfunc(key)];
564 ent && tab->cmpfunc(ent->key, key);
574 hash_insert(struct hashtab *tab, uintptr_t key, uintptr_t val)
576 struct hashentry *ent;
579 if (!(ent = malloc(sizeof(*ent)))) {
580 fprintf(stderr, "out of memory\n");
583 hsh = tab->hashfunc(key);
584 ent->next = tab->buckets[hsh];
587 tab->buckets[hsh] = ent;
593 cmpfunc_pointer(uintptr_t a, uintptr_t b)
600 hashfunc_pointer(uintptr_t p)
602 return p % NR_BUCKETS;
609 if (!(tab = calloc(sizeof(struct hashtab), 1)))
611 tab->hashfunc = &hashfunc_pointer;
612 tab->cmpfunc = &cmpfunc_pointer;
616 struct hashtab_str { /* string -> id map */
623 hashfunc_string(uintptr_t p)
625 const char *str = (char *)p;
626 unsigned long hash = 5381;
630 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
631 return hash % NR_BUCKETS;
636 cmpfunc_string(uintptr_t a, uintptr_t b)
638 return strcmp((char *)a, (char *)b);
646 struct hashtab_str *strtab;
647 if (!(strtab = calloc(sizeof(struct hashtab_str), 1)))
649 strtab->tab.hashfunc = &hashfunc_string;
650 strtab->tab.cmpfunc = &cmpfunc_string;
656 strhash_destroy(struct hashtab_str *strtab)
663 strhash_find(struct hashtab_str *strtab, const char *str, uint16_t *id)
667 if (hash_find(&strtab->tab, (uintptr_t)str, &val))
675 strhash_insert(struct hashtab_str *strtab, const char *str, uint16_t *id)
680 if (strtab->id == 0) {
681 fprintf(stderr, "too many strings\n");
686 fprintf(stderr, "out of memory\n");
690 hash_insert(&strtab->tab, (uintptr_t)str, (uintptr_t)val);
698 struct symtab *symtab;
699 if (!(symtab = calloc(sizeof(struct symtab), 1)))
701 symtab->tab.hashfunc = &hashfunc_string;
702 symtab->tab.cmpfunc = &cmpfunc_string;
707 symtab_destroy(struct symtab *symtab)
712 struct evtr_variable *
713 symtab_find(const struct symtab *symtab, const char *str)
717 if (hash_find(&symtab->tab, (uintptr_t)str, &val))
719 return (struct evtr_variable *)val;
723 symtab_insert(struct symtab *symtab, const char *name,
724 struct evtr_variable *var)
728 fprintf(stderr, "out of memory\n");
731 hash_insert(&symtab->tab, (uintptr_t)name, (uintptr_t)var);
737 evtr_filter_match(evtr_query_t q, evtr_filter_t f, evtr_event_t ev)
739 if ((f->cpu != -1) && (f->cpu != ev->cpu))
742 assert(!(f->flags & FILTF_ID));
743 if (ev->type != f->ev_type)
745 if (ev->type == EVTR_TYPE_PROBE) {
746 if (f->fmt && strcmp(ev->fmt, f->fmt))
748 } else if (ev->type == EVTR_TYPE_STMT) {
749 struct evtr_variable *var;
751 /* XXX: no need to do that *every* time */
752 parse_var(f->var, q->symtab, &var);
753 if (var != ev->stmt.var)
761 evtr_match_filters(struct evtr_query *q, evtr_event_t ev)
765 /* no filters means we're interested in all events */
769 for (i = 0; i < q->nfilt; ++i) {
770 if (evtr_filter_match(q, &q->filt[i], ev)) {
780 parse_callback(evtr_event_t ev, void *d)
782 evtr_query_t q = (evtr_query_t)d;
783 if (ev->type != EVTR_TYPE_PROBE)
785 if (!ev->fmt || (ev->fmt[0] != '#'))
788 * Copy the event to ->pending_event, then call
789 * the parser to convert it into a synthesized
790 * EVTR_TYPE_STMT event.
792 memcpy(&q->pending_event, ev, sizeof(ev));
793 parse_string(&q->pending_event, q->symtab, &ev->fmt[1]);
794 if (!evtr_match_filters(q, &q->pending_event))
797 * This will cause us to return ->pending_event next time
800 q->flags |= EVTRQF_PENDING;
805 thread_creation_callback(evtr_event_t ev, void *d)
807 evtr_query_t q = (evtr_query_t)d;
808 evtr_t evtr = q->evtr;
809 struct evtr_thread *td;
813 if (parse_format_data(ev, "new_td %p %s", &ktd, buf) != 2) {
818 if (!(td = malloc(sizeof(*td)))) {
824 if (!(td->comm = strdup(buf))) {
829 printd(DS, "inserting new thread %p: %s\n", td->id, td->comm);
830 thread_map_insert(&evtr->threads, td);
835 thread_switch_callback(evtr_event_t ev, void *d)
837 evtr_t evtr = ((evtr_query_t)d)->evtr;
838 struct evtr_thread *tdp, *tdn;
841 static struct evtr_event tdcr;
842 static char *fmt = "new_td %p %s";
844 char fmtdata[sizeof(void *) + sizeof(char *)];
846 cpu = evtr_cpu(evtr, ev->cpu);
848 printw("invalid cpu %d\n", ev->cpu);
851 if (parse_format_data(ev, "sw %p > %p", &ktdp, &ktdn) != 2) {
854 tdp = thread_map_find(&evtr->threads, ktdp);
856 printd(DS, "switching from unknown thread %p\n", ktdp);
858 tdn = thread_map_find(&evtr->threads, ktdn);
861 * Fake a thread creation event for threads we
862 * haven't seen before.
864 tdcr.type = EVTR_TYPE_PROBE;
870 tdcr.fmtdata = &fmtdata;
871 tdcr.fmtdatalen = sizeof(fmtdata);
874 snprintf(tidstr, sizeof(tidstr), "%p", ktdn);
875 ((void **)fmtdata)[0] = ktdn;
876 ((char **)fmtdata)[1] = &tidstr[0];
877 thread_creation_callback(&tdcr, d);
879 tdn = thread_map_find(&evtr->threads, ktdn);
881 printd(DS, "switching to unknown thread %p\n", ktdn);
885 printd(DS, "cpu %d: switching to thread %p\n", ev->cpu, ktdn);
891 assert_foff_in_sync(evtr_t evtr)
896 * We keep our own offset because we
897 * might want to support mmap()
899 off = ftello(evtr->f);
900 if (evtr->bytes != off) {
901 fprintf(stderr, "bytes %jd, off %jd\n", evtr->bytes, off);
908 evtr_write(evtr_t evtr, const void *buf, size_t bytes)
910 assert_foff_in_sync(evtr);
911 if (fwrite(buf, bytes, 1, evtr->f) != 1) {
913 evtr->errmsg = strerror(errno);
916 evtr->bytes += bytes;
917 assert_foff_in_sync(evtr);
922 * Called after dumping a record to make sure the next
923 * record is REC_ALIGN aligned. This does not make much sense,
924 * as we shouldn't be using packed structs anyway.
928 evtr_dump_pad(evtr_t evtr)
931 static char buf[REC_ALIGN];
933 pad = REC_ALIGN - (evtr->bytes % REC_ALIGN);
935 return evtr_write(evtr, buf, pad);
941 * We make sure that there is a new record every REC_BOUNDARY
942 * bytes, this costs next to nothing in space and allows for
947 evtr_dump_avoid_boundary(evtr_t evtr, size_t bytes)
950 static char buf[256];
952 pad = REC_BOUNDARY - (evtr->bytes % REC_BOUNDARY);
953 /* if adding @bytes would cause us to cross a boundary... */
955 /* then pad to the boundary */
956 for (i = 0; i < (pad / sizeof(buf)); ++i) {
957 if (evtr_write(evtr, buf, sizeof(buf))) {
961 i = pad % sizeof(buf);
963 if (evtr_write(evtr, buf, i)) {
973 evtr_dump_fmt(evtr_t evtr, uint64_t ts, const evtr_event_t ev)
975 struct fmt_event_header fmt;
978 char *subsys = "", buf[1024];
980 if (strlcpy(buf, subsys, sizeof(buf)) >= sizeof(buf)) {
981 evtr->errmsg = "name of subsystem is too large";
985 if (strlcat(buf, ev->fmt, sizeof(buf)) >= sizeof(buf)) {
986 evtr->errmsg = "fmt + name of subsystem is too large";
991 if (!strhash_find(evtr->fmts, buf, &id)) {
994 if ((err = strhash_insert(evtr->fmts, buf, &id))) {
999 fmt.eh.type = EVTR_TYPE_FMT;
1001 fmt.subsys_len = strlen(subsys);
1002 fmt.fmt_len = strlen(ev->fmt);
1004 if (evtr_dump_avoid_boundary(evtr, sizeof(fmt) + fmt.subsys_len +
1007 if (evtr_write(evtr, &fmt, sizeof(fmt)))
1009 if (evtr_write(evtr, subsys, fmt.subsys_len))
1011 if (evtr_write(evtr, ev->fmt, fmt.fmt_len))
1013 if (evtr_dump_pad(evtr))
1019 * Replace string pointers or string ids in fmtdata
1023 mangle_string_ptrs(const char *fmt, uint8_t *fmtdata,
1024 const char *(*replace)(void *, const char *), void *ctx)
1027 size_t skipsize, intsz;
1030 for (f = fmt; f[0] != '\0'; ++f) {
1035 for (p = f; p[0]; ++p) {
1038 * Eat flags. Notice this will accept duplicate
1054 /* Eat minimum field width, if any */
1055 for (; isdigit(p[0]); ++p)
1059 /* Eat precision, if any */
1060 for (; isdigit(p[0]); ++p)
1067 intsz = sizeof(long long);
1069 intsz = sizeof(long);
1073 intsz = sizeof(intmax_t);
1076 intsz = sizeof(ptrdiff_t);
1079 intsz = sizeof(size_t);
1087 intsz = sizeof(int);
1100 skipsize = sizeof(void *);
1104 skipsize = sizeof(double);
1106 skipsize = sizeof(float);
1109 ((const char **)fmtdata)[0] =
1110 replace(ctx, ((char **)fmtdata)[0]);
1111 skipsize = sizeof(char *);
1115 fprintf(stderr, "Unknown conversion specifier %c "
1116 "in fmt starting with %s", p[0], f - 1);
1119 fmtdata += skipsize;
1124 /* XXX: do we really want the timestamp? */
1127 evtr_dump_string(evtr_t evtr, uint64_t ts, const char *str, int ns)
1129 struct string_event_header s;
1133 assert((0 <= ns) && (ns < EVTR_NS_MAX));
1134 if (!strhash_find(evtr->strings[ns], str, &id)) {
1137 if ((err = strhash_insert(evtr->strings[ns], str, &id))) {
1142 printd(DS, "hash_insert %s ns %d id %d\n", str, ns, id);
1143 s.eh.type = EVTR_TYPE_STR;
1147 s.len = strnlen(str, PATH_MAX);
1149 if (evtr_dump_avoid_boundary(evtr, sizeof(s) + s.len))
1151 if (evtr_write(evtr, &s, sizeof(s)))
1153 if (evtr_write(evtr, str, s.len))
1155 if (evtr_dump_pad(evtr))
1160 struct replace_ctx {
1167 replace_strptr(void *_ctx, const char *s)
1169 struct replace_ctx *ctx = _ctx;
1170 return (const char *)(uintptr_t)evtr_dump_string(ctx->evtr, ctx->ts, s,
1176 replace_strid(void *_ctx, const char *s)
1178 struct replace_ctx *ctx = _ctx;
1181 ret = string_map_find(&ctx->evtr->maps[EVTR_NS_DSTR - 1].root,
1184 fprintf(stderr, "Unknown id for data string\n");
1185 ctx->evtr->errmsg = "unknown id for data string";
1186 ctx->evtr->err = !0;
1188 validate_string(ret);
1189 printd(DS, "replacing strid %d (ns %d) with string '%s' (or int %#x)\n",
1190 (int)(uintptr_t)s, EVTR_NS_DSTR, ret ? ret : "NULL", (int)(uintptr_t)ret);
1196 evtr_dump_probe(evtr_t evtr, evtr_event_t ev)
1198 struct probe_event_header kev;
1201 memset(&kev, '\0', sizeof(kev));
1202 kev.eh.type = ev->type;
1204 kev.line = ev->line;
1207 kev.file = evtr_dump_string(evtr, kev.eh.ts, ev->file,
1211 kev.func = evtr_dump_string(evtr, kev.eh.ts, ev->func,
1215 kev.fmt = evtr_dump_fmt(evtr, kev.eh.ts, ev);
1218 struct replace_ctx replctx = {
1222 assert(ev->fmtdatalen <= (int)sizeof(buf));
1223 kev.datalen = ev->fmtdatalen;
1225 * Replace all string pointers with string ids before dumping
1228 memcpy(buf, ev->fmtdata, ev->fmtdatalen);
1229 if (mangle_string_ptrs(ev->fmt, buf,
1230 replace_strptr, &replctx) < 0)
1235 if (evtr_dump_avoid_boundary(evtr, sizeof(kev) + ev->fmtdatalen))
1237 if (evtr_write(evtr, &kev, sizeof(kev)))
1239 if (evtr_write(evtr, buf, ev->fmtdatalen))
1241 if (evtr_dump_pad(evtr))
1248 evtr_dump_sysinfo(evtr_t evtr, evtr_event_t ev)
1250 uint8_t type = EVTR_TYPE_SYSINFO;
1251 uint16_t ncpus = ev->ncpus;
1254 evtr->errmsg = "invalid number of cpus";
1257 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ncpus)))
1259 if (evtr_write(evtr, &type, sizeof(type))) {
1262 if (evtr_write(evtr, &ncpus, sizeof(ncpus))) {
1265 if (evtr_dump_pad(evtr))
1271 evtr_dump_cpuinfo(evtr_t evtr, evtr_event_t ev)
1273 struct cpuinfo_event_header ci;
1276 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ci)))
1278 type = EVTR_TYPE_CPUINFO;
1279 if (evtr_write(evtr, &type, sizeof(type))) {
1283 ci.freq = ev->cpuinfo.freq;
1284 if (evtr_dump_avoid_boundary(evtr, sizeof(ci)))
1286 if (evtr_write(evtr, &ci, sizeof(ci))) {
1289 if (evtr_dump_pad(evtr))
1295 evtr_rewind(evtr_t evtr)
1297 assert((evtr->flags & EVTRF_WR) == 0);
1299 if (fseek(evtr->f, 0, SEEK_SET)) {
1307 evtr_dump_event(evtr_t evtr, evtr_event_t ev)
1310 case EVTR_TYPE_PROBE:
1311 return evtr_dump_probe(evtr, ev);
1312 case EVTR_TYPE_SYSINFO:
1313 return evtr_dump_sysinfo(evtr, ev);
1314 case EVTR_TYPE_CPUINFO:
1315 return evtr_dump_cpuinfo(evtr, ev);
1317 evtr->errmsg = "unknown event type";
1326 if (!(evtr = malloc(sizeof(*evtr)))) {
1332 evtr->errmsg = NULL;
1337 static int evtr_next_event(evtr_t, evtr_event_t);
1340 evtr_open_read(FILE *f)
1343 struct evtr_event ev;
1346 if (!(evtr = evtr_alloc(f))) {
1350 for (i = 0; i < (EVTR_NS_MAX - 1); ++i) {
1351 RB_INIT(&evtr->maps[i].root);
1353 RB_INIT(&evtr->fmtmap.root);
1354 RB_INIT(&evtr->threads.root);
1358 * Load the first event so we can pick up any
1361 if (evtr_next_event(evtr, &ev)) {
1364 if (evtr_rewind(evtr))
1373 evtr_open_write(FILE *f)
1378 if (!(evtr = evtr_alloc(f))) {
1382 evtr->flags = EVTRF_WR;
1383 if (!(evtr->fmts = strhash_new()))
1385 for (i = 0; i < EVTR_NS_MAX; ++i) {
1386 evtr->strings[i] = strhash_new();
1387 if (!evtr->strings[i]) {
1388 for (j = 0; j < i; ++j) {
1389 strhash_destroy(evtr->strings[j]);
1397 strhash_destroy(evtr->fmts);
1405 hashtab_destroy(struct hashtab *h)
1407 struct hashentry *ent, *next;
1409 for (i = 0; i < NR_BUCKETS; ++i) {
1410 for (ent = h->buckets[i]; ent; ent = next) {
1419 evtr_close(evtr_t evtr)
1423 if (evtr->flags & EVTRF_WR) {
1424 hashtab_destroy(&evtr->fmts->tab);
1425 for (i = 0; i < EVTR_NS_MAX; ++i)
1426 hashtab_destroy(&evtr->strings[i]->tab);
1428 id_tree_free(&evtr->fmtmap.root);
1429 for (i = 0; i < EVTR_NS_MAX - 1; ++i) {
1430 id_tree_free(&evtr->maps[i].root);
1438 evtr_read(evtr_t evtr, void *buf, size_t size)
1441 assert_foff_in_sync(evtr);
1442 printd(IO, "evtr_read at %#jx, %zd bytes\n", evtr->bytes, size);
1443 if (fread(buf, size, 1, evtr->f) != 1) {
1444 if (feof(evtr->f)) {
1445 evtr->errmsg = "incomplete record";
1447 evtr->errmsg = strerror(errno);
1451 evtr->bytes += size;
1452 assert_foff_in_sync(evtr);
1458 evtr_load_fmt(evtr_query_t q, char *buf)
1460 evtr_t evtr = q->evtr;
1461 struct fmt_event_header *evh = (struct fmt_event_header *)buf;
1462 struct event_fmt *fmt;
1463 char *subsys = NULL, *fmtstr;
1465 if (!(fmt = malloc(sizeof(*fmt)))) {
1469 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1470 sizeof(*evh) - sizeof(evh->eh))) {
1473 assert(!evh->subsys_len);
1474 if (evh->subsys_len) {
1475 if (!(subsys = malloc(evh->subsys_len))) {
1479 if (evtr_read(evtr, subsys, evh->subsys_len)) {
1482 fmt->subsys = subsys;
1486 if (!(fmtstr = malloc(evh->fmt_len + 1))) {
1490 if (evtr_read(evtr, fmtstr, evh->fmt_len)) {
1493 fmtstr[evh->fmt_len] = '\0';
1496 printd(DS, "fmt_map_insert (%d, %s)\n", evh->id, fmt->fmt);
1497 evtr->err = fmt_map_insert(&evtr->fmtmap.root, fmt, evh->id);
1498 switch (evtr->err) {
1500 evtr->errmsg = "out of memory";
1503 evtr->errmsg = "redefinition of an id to a "
1504 "different format (corrupt input)";
1523 evtr_load_string(evtr_t evtr, char *buf)
1525 char sbuf[PATH_MAX + 1];
1526 struct string_event_header *evh = (struct string_event_header *)buf;
1528 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1529 sizeof(*evh) - sizeof(evh->eh))) {
1532 if (evh->len > PATH_MAX) {
1533 evtr->errmsg = "string too large (corrupt input)";
1536 if (evh->len && evtr_read(evtr, sbuf, evh->len)) {
1540 if (evh->ns >= EVTR_NS_MAX) {
1541 evtr->errmsg = "invalid namespace (corrupt input)";
1544 validate_string(sbuf);
1545 printd(DS, "evtr_load_string:ns %d id %d : \"%s\"\n", evh->ns, evh->id,
1547 evtr->err = string_map_insert(&evtr->maps[evh->ns - 1].root, sbuf, evh->id);
1548 switch (evtr->err) {
1550 evtr->errmsg = "out of memory";
1553 evtr->errmsg = "redefinition of an id to a "
1554 "different string (corrupt input)";
1564 evtr_skip(evtr_t evtr, off_t bytes)
1566 if (fseek(evtr->f, bytes, SEEK_CUR)) {
1568 evtr->errmsg = strerror(errno);
1571 evtr->bytes += bytes;
1576 * Make sure q->buf is at least len bytes
1580 evtr_query_reserve_buf(struct evtr_query *q, int len)
1584 if (q->bufsize >= len)
1586 if (!(tmp = realloc(q->buf, len)))
1595 evtr_load_probe(evtr_t evtr, evtr_event_t ev, char *buf, struct evtr_query *q)
1597 struct probe_event_header *evh = (struct probe_event_header *)buf;
1600 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1601 sizeof(*evh) - sizeof(evh->eh)))
1603 memset(ev, '\0', sizeof(*ev));
1604 ev->ts = evh->eh.ts;
1605 ev->type = EVTR_TYPE_PROBE;
1606 ev->line = evh->line;
1608 if ((cpu = evtr_cpu(evtr, evh->cpu))) {
1614 ev->file = string_map_find(
1615 &evtr->maps[EVTR_NS_PATH - 1].root,
1618 evtr->errmsg = "unknown id for file path";
1620 ev->file = "<unknown>";
1622 validate_string(ev->file);
1625 ev->file = "<unknown>";
1628 const struct event_fmt *fmt;
1629 if (!(fmt = fmt_map_find(&evtr->fmtmap.root, evh->fmt))) {
1630 evtr->errmsg = "unknown id for event fmt";
1635 validate_string(fmt->fmt);
1639 if (evtr_query_reserve_buf(q, evh->datalen + 1)) {
1641 } else if (!evtr_read(evtr, q->buf, evh->datalen)) {
1642 struct replace_ctx replctx = {
1648 ev->fmtdata = q->buf;
1650 * If the format specifies any string pointers, there
1651 * is a string id stored in the fmtdata. Look it up
1652 * and replace it with a string pointer before
1653 * returning it to the user.
1655 if (mangle_string_ptrs(ev->fmt, __DECONST(uint8_t *,
1657 replace_strid, &replctx) < 0)
1661 ((char *)ev->fmtdata)[evh->datalen] = '\0';
1662 ev->fmtdatalen = evh->datalen;
1665 evtr_run_callbacks(ev, q);
1671 evtr_skip_to_record(evtr_t evtr)
1675 skip = REC_ALIGN - (evtr->bytes % REC_ALIGN);
1677 if (fseek(evtr->f, skip, SEEK_CUR)) {
1679 evtr->errmsg = strerror(errno);
1682 evtr->bytes += skip;
1689 evtr_load_sysinfo(evtr_t evtr)
1694 if (evtr_read(evtr, &ncpus, sizeof(ncpus))) {
1699 evtr->cpus = malloc(ncpus * sizeof(struct cpu));
1704 evtr->ncpus = ncpus;
1705 for (i = 0; i < ncpus; ++i) {
1706 evtr->cpus[i].td = NULL;
1707 evtr->cpus[i].freq = -1.0;
1714 evtr_load_cpuinfo(evtr_t evtr)
1716 struct cpuinfo_event_header cih;
1719 if (evtr_read(evtr, &cih, sizeof(cih))) {
1722 if (cih.freq < 0.0) {
1723 evtr->errmsg = "cpu freq is negative";
1728 * Notice that freq is merely a multiplier with
1729 * which we convert a timestamp to seconds; if
1730 * ts is not in cycles, freq is not the frequency.
1732 if (!(cpu = evtr_cpu(evtr, cih.cpu))) {
1733 evtr->errmsg = "freq for invalid cpu";
1737 cpu->freq = cih.freq;
1743 _evtr_next_event(evtr_t evtr, evtr_event_t ev, struct evtr_query *q)
1745 char buf[MAX_EVHDR_SIZE];
1746 int ret, err, ntried, nmatched;
1747 struct trace_event_header *evhdr = (struct trace_event_header *)buf;
1749 for (ret = 0; !ret;) {
1750 if (q->flags & EVTRQF_PENDING) {
1751 q->off = evtr->bytes;
1752 memcpy(ev, &q->pending_event, sizeof(*ev));
1753 q->flags &= ~EVTRQF_PENDING;
1756 if (evtr_read(evtr, &evhdr->type, 1)) {
1757 if (feof(evtr->f)) {
1758 evtr->errmsg = NULL;
1765 * skip pad records -- this will only happen if there's a
1766 * variable sized record close to the boundary
1768 if (evhdr->type == EVTR_TYPE_PAD) {
1769 evtr_skip_to_record(evtr);
1772 if (evhdr->type == EVTR_TYPE_SYSINFO) {
1773 evtr_load_sysinfo(evtr);
1775 } else if (evhdr->type == EVTR_TYPE_CPUINFO) {
1776 evtr_load_cpuinfo(evtr);
1779 if (evtr_read(evtr, buf + 1, sizeof(*evhdr) - 1))
1780 return feof(evtr->f) ? -1 : !0;
1781 switch (evhdr->type) {
1782 case EVTR_TYPE_PROBE:
1784 nmatched = q->nmatched;
1785 if ((err = evtr_load_probe(evtr, ev, buf, q))) {
1797 if (evtr_load_string(evtr, buf)) {
1802 if (evtr_load_fmt(q, buf)) {
1808 evtr->errmsg = "unknown event type (corrupt input?)";
1811 evtr_skip_to_record(evtr);
1813 if (!evtr_match_filters(q, ev)) {
1817 q->off = evtr->bytes;
1821 /* can't get here */
1827 evtr_next_event(evtr_t evtr, evtr_event_t ev)
1829 struct evtr_query *q;
1832 if (!(q = evtr_query_init(evtr, NULL, 0))) {
1836 ret = _evtr_next_event(evtr, ev, q);
1837 evtr_query_destroy(q);
1842 evtr_last_event(evtr_t evtr, evtr_event_t ev)
1846 off_t last_boundary;
1848 if (evtr_error(evtr))
1851 fd = fileno(evtr->f);
1855 * This skips pseudo records, so we can't provide
1856 * an event with all fields filled in this way.
1857 * It's doable, just needs some care. TBD.
1859 if (0 && (st.st_mode & S_IFREG)) {
1861 * Skip to last boundary, that's the closest to the EOF
1862 * location that we are sure contains a header so we can
1863 * pick up the stream.
1865 last_boundary = (st.st_size / REC_BOUNDARY) * REC_BOUNDARY;
1866 /* XXX: ->bytes should be in query */
1867 assert(evtr->bytes == 0);
1868 evtr_skip(evtr, last_boundary);
1873 * If we can't seek, we need to go through the whole file.
1874 * Since you can't seek back, this is pretty useless unless
1875 * you really are interested only in the last event.
1877 while (!evtr_next_event(evtr, ev))
1879 if (evtr_error(evtr))
1886 evtr_query_init(evtr_t evtr, evtr_filter_t filt, int nfilt)
1888 struct evtr_query *q;
1891 if (!(q = malloc(sizeof(*q)))) {
1895 if (!(q->buf = malloc(q->bufsize))) {
1898 if (!(q->symtab = symtab_new()))
1904 TAILQ_INIT(&q->unresolved_filtq);
1909 memset(&q->pending_event, '\0', sizeof(q->pending_event));
1910 if (evtr_register_callback(q, &thread_creation_callback, q)) {
1913 if (evtr_register_callback(q, &thread_switch_callback, q)) {
1916 if (evtr_query_needs_parsing(q) &&
1917 evtr_register_callback(q, &parse_callback, q)) {
1921 for (i = 0; i < nfilt; ++i) {
1923 if (filt[i].fmt == NULL)
1925 if (evtr_filter_register(q, &filt[i])) {
1926 evtr_deregister_filters(q, filt, i);
1933 evtr_deregister_callbacks(q);
1935 symtab_destroy(q->symtab);
1944 evtr_query_destroy(struct evtr_query *q)
1946 evtr_deregister_filters(q, q->filt, q->nfilt);
1953 evtr_query_next(struct evtr_query *q, evtr_event_t ev)
1955 if (evtr_query_error(q))
1957 /* we may support that in the future */
1958 if (q->off != q->evtr->bytes) {
1959 q->errmsg = "evtr/query offset mismatch";
1962 return _evtr_next_event(q->evtr, ev, q);
1966 evtr_ncpus(evtr_t evtr)
1972 evtr_cpufreqs(evtr_t evtr, double *freqs)
1978 for (i = 0; i < evtr->ncpus; ++i) {
1979 freqs[i] = evtr->cpus[i].freq;