2 * Copyright (c) 2009, 2010 Aggelos Economopoulos. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 #include <sys/queue.h>
48 MAX_EVHDR_SIZE = PATH_MAX + 200,
49 /* string namespaces */
54 NR_BUCKETS = 1023, /* XXX */
56 REC_BOUNDARY = 1 << 14,
58 EVTRF_WR = 0x1, /* open for writing */
61 typedef uint16_t fileid_t;
62 typedef uint16_t funcid_t;
63 typedef uint16_t fmtid_t;
65 struct trace_event_header {
67 uint64_t ts; /* XXX: this should only be part of probe */
68 } __attribute__((packed));
70 struct probe_event_header {
71 struct trace_event_header eh;
73 * For these fields, 0 implies "not available"
82 uint8_t cpu; /* -1 if n/a */
83 } __attribute__((packed));
85 struct string_event_header {
86 struct trace_event_header eh;
90 } __attribute__((packed));
92 struct fmt_event_header {
93 struct trace_event_header eh;
97 } __attribute__((packed));
99 struct cpuinfo_event_header {
102 } __attribute__((packed));
107 struct hashentry *next;
111 struct hashentry *buckets[NR_BUCKETS];
120 struct event_filter_unresolved {
121 TAILQ_ENTRY(event_filter_unresolved) link;
126 RB_ENTRY(id_map) rb_node;
131 RB_HEAD(id_tree, id_map);
140 RB_HEAD(thread_tree, evtr_thread);
143 struct thread_tree root;
146 struct event_callback {
147 void (*cb)(evtr_event_t, void *data);
148 void *data; /* this field must be malloc()ed */
152 struct evtr_thread *td; /* currently executing thread */
164 * When writing, we keep track of the strings we've
165 * already dumped so we only dump them once.
166 * Paths, function names etc belong to different
169 struct hashtab *strings[EVTR_NS_MAX - 1];
171 * When reading, we build a map from id to string.
172 * Every id must be defined at the point of use.
174 struct string_map maps[EVTR_NS_MAX - 1];
177 /* same as above, but for subsys+fmt pairs */
178 struct fmt_map fmtmap;
179 struct hashtab *fmts;
182 * Filters that have a format specified and we
183 * need to resolve that to an fmtid
185 TAILQ_HEAD(, event_filter_unresolved) unresolved_filtq;
186 struct event_callback **cbs;
188 struct thread_map threads;
208 evtr_set_debug(int lvl)
213 static int id_map_cmp(struct id_map *, struct id_map *);
214 RB_PROTOTYPE2(id_tree, id_map, rb_node, id_map_cmp, int);
215 RB_GENERATE2(id_tree, id_map, rb_node, id_map_cmp, int, id);
217 static int thread_cmp(struct evtr_thread *, struct evtr_thread *);
218 RB_PROTOTYPE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *);
219 RB_GENERATE2(thread_tree, evtr_thread, rb_node, thread_cmp, void *, id);
221 #define printd(...) \
224 fprintf(stderr, __VA_ARGS__); \
229 validate_string(const char *str)
234 assert(isprint(*str));
239 id_tree_free(struct id_tree *root)
241 struct id_map *v, *n;
243 for (v = RB_MIN(id_tree, root); v; v = n) {
244 n = RB_NEXT(id_tree, root, v);
245 RB_REMOVE(id_tree, root, v);
251 evtr_register_callback(evtr_t evtr, void (*fn)(evtr_event_t, void *), void *d)
253 struct event_callback *cb;
256 if (!(cb = malloc(sizeof(*cb)))) {
262 if (!(cbs = realloc(evtr->cbs, (++evtr->ncbs) * sizeof(cb)))) {
269 evtr->cbs[evtr->ncbs - 1] = cb;
275 evtr_deregister_callbacks(evtr_t evtr)
279 for (i = 0; i < evtr->ncbs; ++i) {
288 evtr_run_callbacks(evtr_event_t ev, evtr_t evtr)
290 struct event_callback *cb;
293 for (i = 0; i < evtr->ncbs; ++i) {
295 cb->cb(ev, cb->data);
301 evtr_cpu(evtr_t evtr, int c)
303 if ((c < 0) || (c >= evtr->ncpus))
305 return &evtr->cpus[c];
310 parse_format_data(evtr_event_t ev, const char *fmt, ...) __attribute__((format (scanf, 2, 3)));
313 parse_format_data(evtr_event_t ev, const char *fmt, ...)
318 if (strcmp(fmt, ev->fmt))
320 vsnprintf(buf, sizeof(buf), fmt, __DECONST(void *, ev->fmtdata));
321 printd("string is: %s\n", buf);
323 return vsscanf(buf, fmt, ap);
328 evtr_deregister_filters(evtr_t evtr, evtr_filter_t filt, int nfilt)
330 struct event_filter_unresolved *u, *tmp;
332 TAILQ_FOREACH_MUTABLE(u, &evtr->unresolved_filtq, link, tmp) {
333 for (i = 0; i < nfilt; ++i) {
334 if (u->filt == &filt[i]) {
335 TAILQ_REMOVE(&evtr->unresolved_filtq, u, link);
343 evtr_resolve_filters(evtr_t evtr, const char *fmt, int id)
345 struct event_filter_unresolved *u, *tmp;
346 TAILQ_FOREACH_MUTABLE(u, &evtr->unresolved_filtq, link, tmp) {
347 if ((u->filt->fmt != NULL) && !strcmp(fmt, u->filt->fmt)) {
349 u->filt->flags |= FILTF_ID;
350 TAILQ_REMOVE(&evtr->unresolved_filtq, u, link);
357 evtr_filter_register(evtr_t evtr, evtr_filter_t filt)
359 struct event_filter_unresolved *res;
361 if (!(res = malloc(sizeof(*res)))) {
366 TAILQ_INSERT_TAIL(&evtr->unresolved_filtq, res, link);
371 evtr_event_data(evtr_event_t ev, char *buf, size_t len)
374 * XXX: we implicitly trust the format string.
377 if (ev->fmtdatalen) {
378 vsnprintf(buf, len, ev->fmt, __DECONST(void *, ev->fmtdata));
380 strlcpy(buf, ev->fmt, len);
386 evtr_error(evtr_t evtr)
388 return evtr->err || (evtr->errmsg != NULL);
392 evtr_errmsg(evtr_t evtr)
394 return evtr->errmsg ? evtr->errmsg : strerror(evtr->err);
399 id_map_cmp(struct id_map *a, struct id_map *b)
401 return a->id - b->id;
406 thread_cmp(struct evtr_thread *a, struct evtr_thread *b)
408 return (int)a->id - (int)b->id;
411 #define DEFINE_MAP_FIND(prefix, type) \
414 prefix ## _map_find(struct id_tree *tree, int id)\
416 struct id_map *sid; \
418 sid = id_tree_RB_LOOKUP(tree, id); \
419 return sid ? sid->data : NULL; \
422 DEFINE_MAP_FIND(string, const char *)
423 DEFINE_MAP_FIND(fmt, const struct event_fmt *)
427 thread_map_find(struct thread_map *map, void *id)
429 return thread_tree_RB_LOOKUP(&map->root, id);
432 #define DEFINE_MAP_INSERT(prefix, type, _cmp, _dup) \
435 prefix ## _map_insert(struct id_tree *tree, type data, int id) \
437 struct id_map *sid, *osid; \
439 sid = malloc(sizeof(*sid)); \
445 if ((osid = id_tree_RB_INSERT(tree, sid))) { \
447 if (_cmp((type)osid->data, data)) { \
450 printd("mapping already exists, skipping\n"); \
451 /* we're OK with redefinitions of an id to the same string */ \
454 /* only do the strdup if we're inserting a new string */ \
455 sid->data = _dup(data); /* XXX: oom */ \
461 thread_map_insert(struct thread_map *map, struct evtr_thread *td)
463 struct evtr_thread *otd;
465 if ((otd = thread_tree_RB_INSERT(&map->root, td))) {
467 * Thread addresses might be reused, we're
469 * DANGER, Will Robinson: this means the user
470 * of the API needs to copy event->td if they
471 * want it to remain stable.
473 free((void *)otd->comm);
474 otd->comm = td->comm;
481 event_fmt_cmp(const struct event_fmt *a, const struct event_fmt *b)
487 ret = strcmp(a->subsys, b->subsys);
489 ret = strcmp(a->subsys, "");
491 } else if (b->subsys) {
492 ret = strcmp("", b->subsys);
496 return strcmp(a->fmt, b->fmt);
501 event_fmt_dup(const struct event_fmt *o)
505 if (!(n = malloc(sizeof(*n)))) {
508 memcpy(n, o, sizeof(*n));
512 DEFINE_MAP_INSERT(string, const char *, strcmp, strdup)
513 DEFINE_MAP_INSERT(fmt, const struct event_fmt *, event_fmt_cmp, event_fmt_dup)
517 hashfunc(const char *str)
519 unsigned long hash = 5381;
523 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
524 return hash % NR_BUCKETS;
529 hash_find(struct hashtab *tab, const char *str)
531 struct hashentry *ent;
533 for(ent = tab->buckets[hashfunc(str)]; ent && strcmp(ent->str, str);
541 hash_insert(struct hashtab *tab, const char *str)
543 struct hashentry *ent;
546 if (!(ent = malloc(sizeof(*ent)))) {
547 fprintf(stderr, "out of memory\n");
551 ent->next = tab->buckets[hsh];
552 ent->str = strdup(str);
555 fprintf(stderr, "too many strings\n");
559 tab->buckets[hsh] = ent;
565 thread_creation_callback(evtr_event_t ev, void *d)
567 evtr_t evtr = (evtr_t)d;
568 struct evtr_thread *td;
572 //printd("thread_creation_callback\n");
573 if (parse_format_data(ev, "new_td %p %s", &ktd, buf) != 2) {
578 if (!(td = malloc(sizeof(*td)))) {
584 if (!(td->comm = strdup(buf))) {
589 printd("inserting new thread %p: %s\n", td->id, td->comm);
590 thread_map_insert(&evtr->threads, td);
595 thread_switch_callback(evtr_event_t ev, void *d)
597 evtr_t evtr = (evtr_t)d;
598 struct evtr_thread *tdp, *tdn;
601 static struct evtr_event tdcr;
602 static char *fmt = "new_td %p %s";
604 char fmtdata[sizeof(void *) + sizeof(char *)];
606 //printd("thread_switch_callback\n");
607 cpu = evtr_cpu(evtr, ev->cpu);
609 printd("invalid cpu %d\n", ev->cpu);
612 if (parse_format_data(ev, "sw %p > %p", &ktdp, &ktdn) != 2) {
615 tdp = thread_map_find(&evtr->threads, ktdp);
617 printd("switching from unknown thread %p\n", ktdp);
619 tdn = thread_map_find(&evtr->threads, ktdn);
622 * Fake a thread creation event for threads we
623 * haven't seen before.
625 tdcr.type = EVTR_TYPE_PROBE;
631 tdcr.fmtdata = &fmtdata;
632 tdcr.fmtdatalen = sizeof(fmtdata);
635 snprintf(tidstr, sizeof(tidstr), "%p", ktdn);
636 ((void **)fmtdata)[0] = ktdn;
637 ((char **)fmtdata)[1] = &tidstr[0];
638 thread_creation_callback(&tdcr, evtr);
640 tdn = thread_map_find(&evtr->threads, ktdn);
642 printd("switching to unknown thread %p\n", ktdn);
646 printd("cpu %d: switching to thread %p\n", ev->cpu, ktdn);
652 assert_foff_in_sync(evtr_t evtr)
657 * We keep our own offset because we
658 * might want to support mmap()
660 off = ftello(evtr->f);
661 if (evtr->bytes != off) {
662 fprintf(stderr, "bytes %jd, off %jd\n", evtr->bytes, off);
669 evtr_write(evtr_t evtr, const void *buf, size_t bytes)
671 assert_foff_in_sync(evtr);
672 if (fwrite(buf, bytes, 1, evtr->f) != 1) {
674 evtr->errmsg = strerror(errno);
677 evtr->bytes += bytes;
678 assert_foff_in_sync(evtr);
683 * Called after dumping a record to make sure the next
684 * record is REC_ALIGN aligned. This does not make much sense,
685 * as we shouldn't be using packed structs anyway.
689 evtr_dump_pad(evtr_t evtr)
692 static char buf[REC_ALIGN];
694 pad = REC_ALIGN - (evtr->bytes % REC_ALIGN);
696 return evtr_write(evtr, buf, pad);
702 * We make sure that there is a new record every REC_BOUNDARY
703 * bytes, this costs next to nothing in space and allows for
708 evtr_dump_avoid_boundary(evtr_t evtr, size_t bytes)
711 static char buf[256];
713 pad = REC_BOUNDARY - (evtr->bytes % REC_BOUNDARY);
714 /* if adding @bytes would cause us to cross a boundary... */
716 /* then pad to the boundary */
717 for (i = 0; i < (pad / sizeof(buf)); ++i) {
718 if (evtr_write(evtr, buf, sizeof(buf))) {
722 i = pad % sizeof(buf);
724 if (evtr_write(evtr, buf, i)) {
734 evtr_dump_fmt(evtr_t evtr, uint64_t ts, const evtr_event_t ev)
736 struct fmt_event_header fmt;
737 struct hashentry *ent;
738 char *subsys = "", buf[1024];
740 if (strlcpy(buf, subsys, sizeof(buf)) >= sizeof(buf)) {
741 evtr->errmsg = "name of subsystem is too large";
745 if (strlcat(buf, ev->fmt, sizeof(buf)) >= sizeof(buf)) {
746 evtr->errmsg = "fmt + name of subsystem is too large";
751 if ((ent = hash_find(evtr->fmts, buf))) {
754 if (!(ent = hash_insert(evtr->fmts, buf))) {
755 evtr->err = evtr->fmts->id ? ENOMEM : ERANGE;
759 fmt.eh.type = EVTR_TYPE_FMT;
761 fmt.subsys_len = strlen(subsys);
762 fmt.fmt_len = strlen(ev->fmt);
764 if (evtr_dump_avoid_boundary(evtr, sizeof(fmt) + fmt.subsys_len +
767 if (evtr_write(evtr, &fmt, sizeof(fmt)))
769 if (evtr_write(evtr, subsys, fmt.subsys_len))
771 if (evtr_write(evtr, ev->fmt, fmt.fmt_len))
773 if (evtr_dump_pad(evtr))
779 * Replace string pointers or string ids in fmtdata
783 mangle_string_ptrs(const char *fmt, uint8_t *fmtdata,
784 const char *(*replace)(void *, const char *), void *ctx)
787 size_t skipsize, intsz;
790 for (f = fmt; f[0] != '\0'; ++f) {
795 for (p = f; p[0]; ++p) {
798 * Eat flags. Notice this will accept duplicate
814 /* Eat minimum field width, if any */
815 for (; isdigit(p[0]); ++p)
819 /* Eat precision, if any */
820 for (; isdigit(p[0]); ++p)
827 intsz = sizeof(long long);
829 intsz = sizeof(long);
833 intsz = sizeof(intmax_t);
836 intsz = sizeof(ptrdiff_t);
839 intsz = sizeof(size_t);
860 skipsize = sizeof(void *);
864 skipsize = sizeof(double);
866 skipsize = sizeof(float);
869 ((const char **)fmtdata)[0] =
870 replace(ctx, ((char **)fmtdata)[0]);
871 skipsize = sizeof(char *);
875 fprintf(stderr, "Unknown conversion specifier %c "
876 "in fmt starting with %s", p[0], f - 1);
884 /* XXX: do we really want the timestamp? */
887 evtr_dump_string(evtr_t evtr, uint64_t ts, const char *str, int ns)
889 struct string_event_header s;
890 struct hashentry *ent;
892 assert((0 <= ns) && (ns < EVTR_NS_MAX));
893 if ((ent = hash_find(evtr->strings[ns], str))) {
896 if (!(ent = hash_insert(evtr->strings[ns], str))) {
897 evtr->err = evtr->strings[ns]->id ? ENOMEM : ERANGE;
901 printd("hash_insert %s ns %d id %d\n", str, ns, ent->id);
902 s.eh.type = EVTR_TYPE_STR;
906 s.len = strnlen(str, PATH_MAX);
908 if (evtr_dump_avoid_boundary(evtr, sizeof(s) + s.len))
910 if (evtr_write(evtr, &s, sizeof(s)))
912 if (evtr_write(evtr, str, s.len))
914 if (evtr_dump_pad(evtr))
926 replace_strptr(void *_ctx, const char *s)
928 struct replace_ctx *ctx = _ctx;
929 return (const char *)evtr_dump_string(ctx->evtr, ctx->ts, s, EVTR_NS_DSTR);
934 replace_strid(void *_ctx, const char *s)
936 struct replace_ctx *ctx = _ctx;
939 ret = string_map_find(&ctx->evtr->maps[EVTR_NS_DSTR - 1].root,
942 fprintf(stderr, "Unknown id for data string\n");
943 ctx->evtr->errmsg = "unknown id for data string";
946 validate_string(ret);
947 printd("replacing strid %d (ns %d) with string '%s' (or int %#x)\n", (int)s,
948 EVTR_NS_DSTR, ret ? ret : "NULL", (int)ret);
954 evtr_dump_probe(evtr_t evtr, evtr_event_t ev)
956 struct probe_event_header kev;
959 memset(&kev, '\0', sizeof(kev));
960 kev.eh.type = ev->type;
965 kev.file = evtr_dump_string(evtr, kev.eh.ts, ev->file,
969 kev.func = evtr_dump_string(evtr, kev.eh.ts, ev->func,
973 kev.fmt = evtr_dump_fmt(evtr, kev.eh.ts, ev);
976 struct replace_ctx replctx = {
980 assert(ev->fmtdatalen <= sizeof(buf));
981 kev.datalen = ev->fmtdatalen;
983 * Replace all string pointers with string ids before dumping
986 memcpy(buf, ev->fmtdata, ev->fmtdatalen);
987 if (mangle_string_ptrs(ev->fmt, buf,
988 replace_strptr, &replctx) < 0)
993 if (evtr_dump_avoid_boundary(evtr, sizeof(kev) + ev->fmtdatalen))
995 if (evtr_write(evtr, &kev, sizeof(kev)))
997 if (evtr_write(evtr, buf, ev->fmtdatalen))
999 if (evtr_dump_pad(evtr))
1006 evtr_dump_sysinfo(evtr_t evtr, evtr_event_t ev)
1008 uint8_t type = EVTR_TYPE_SYSINFO;
1009 uint16_t ncpus = ev->ncpus;
1012 evtr->errmsg = "invalid number of cpus";
1015 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ncpus)))
1017 if (evtr_write(evtr, &type, sizeof(type))) {
1020 if (evtr_write(evtr, &ncpus, sizeof(ncpus))) {
1023 if (evtr_dump_pad(evtr))
1029 evtr_dump_cpuinfo(evtr_t evtr, evtr_event_t ev)
1031 struct cpuinfo_event_header ci;
1034 if (evtr_dump_avoid_boundary(evtr, sizeof(type) + sizeof(ci)))
1036 type = EVTR_TYPE_CPUINFO;
1037 if (evtr_write(evtr, &type, sizeof(type))) {
1041 ci.freq = ev->cpuinfo.freq;
1042 if (evtr_dump_avoid_boundary(evtr, sizeof(ci)))
1044 if (evtr_write(evtr, &ci, sizeof(ci))) {
1047 if (evtr_dump_pad(evtr))
1053 evtr_rewind(evtr_t evtr)
1055 assert((evtr->flags & EVTRF_WR) == 0);
1057 if (fseek(evtr->f, 0, SEEK_SET)) {
1065 evtr_dump_event(evtr_t evtr, evtr_event_t ev)
1068 case EVTR_TYPE_PROBE:
1069 return evtr_dump_probe(evtr, ev);
1070 case EVTR_TYPE_SYSINFO:
1071 return evtr_dump_sysinfo(evtr, ev);
1072 case EVTR_TYPE_CPUINFO:
1073 return evtr_dump_cpuinfo(evtr, ev);
1075 evtr->errmsg = "unknown event type";
1084 if (!(evtr = malloc(sizeof(*evtr)))) {
1090 evtr->errmsg = NULL;
1092 TAILQ_INIT(&evtr->unresolved_filtq);
1097 evtr_open_read(FILE *f)
1100 struct evtr_event ev;
1103 if (!(evtr = evtr_alloc(f))) {
1107 for (i = 0; i < (EVTR_NS_MAX - 1); ++i) {
1108 RB_INIT(&evtr->maps[i].root);
1110 RB_INIT(&evtr->fmtmap.root);
1111 TAILQ_INIT(&evtr->unresolved_filtq);
1114 RB_INIT(&evtr->threads.root);
1117 if (evtr_register_callback(evtr, &thread_creation_callback, evtr)) {
1120 if (evtr_register_callback(evtr, &thread_switch_callback, evtr)) {
1124 * Load the first event so we can pick up any
1127 if (evtr_next_event(evtr, &ev)) {
1130 if (evtr_rewind(evtr))
1134 evtr_deregister_callbacks(evtr);
1141 evtr_open_write(FILE *f)
1146 if (!(evtr = evtr_alloc(f))) {
1150 evtr->flags = EVTRF_WR;
1151 if (!(evtr->fmts = calloc(sizeof(struct hashtab), 1)))
1154 for (i = 0; i < EVTR_NS_MAX; ++i) {
1155 evtr->strings[i] = calloc(sizeof(struct hashtab), 1);
1156 if (!evtr->strings[i]) {
1157 for (j = 0; j < i; ++j) {
1158 free(evtr->strings[j]);
1174 hashtab_destroy(struct hashtab *h)
1176 struct hashentry *ent, *next;
1178 for (i = 0; i < NR_BUCKETS; ++i) {
1179 for (ent = h->buckets[i]; ent; ent = next) {
1188 evtr_close(evtr_t evtr)
1192 if (evtr->flags & EVTRF_WR) {
1193 hashtab_destroy(evtr->fmts);
1194 for (i = 0; i < EVTR_NS_MAX; ++i)
1195 hashtab_destroy(evtr->strings[i]);
1197 id_tree_free(&evtr->fmtmap.root);
1198 for (i = 0; i < EVTR_NS_MAX - 1; ++i) {
1199 id_tree_free(&evtr->maps[i].root);
1207 evtr_read(evtr_t evtr, void *buf, size_t size)
1210 assert_foff_in_sync(evtr);
1211 // printd("evtr_read at %#jx, %zd bytes\n", evtr->bytes, size);
1212 if (fread(buf, size, 1, evtr->f) != 1) {
1213 if (feof(evtr->f)) {
1214 evtr->errmsg = "incomplete record";
1216 evtr->errmsg = strerror(errno);
1220 evtr->bytes += size;
1221 assert_foff_in_sync(evtr);
1227 evtr_load_fmt(evtr_t evtr, char *buf)
1229 struct fmt_event_header *evh = (struct fmt_event_header *)buf;
1230 struct event_fmt *fmt;
1231 char *subsys = NULL, *fmtstr;
1233 if (!(fmt = malloc(sizeof(*fmt)))) {
1237 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1238 sizeof(*evh) - sizeof(evh->eh))) {
1241 assert(!evh->subsys_len);
1242 if (evh->subsys_len) {
1243 if (!(subsys = malloc(evh->subsys_len))) {
1247 if (evtr_read(evtr, subsys, evh->subsys_len)) {
1250 fmt->subsys = subsys;
1254 if (!(fmtstr = malloc(evh->fmt_len + 1))) {
1258 if (evtr_read(evtr, fmtstr, evh->fmt_len)) {
1261 fmtstr[evh->fmt_len] = '\0';
1264 printd("fmt_map_insert (%d, %s)\n", evh->id, fmt->fmt);
1265 evtr->err = fmt_map_insert(&evtr->fmtmap.root, fmt, evh->id);
1266 switch (evtr->err) {
1268 evtr->errmsg = "out of memory";
1271 evtr->errmsg = "redefinition of an id to a "
1272 "different format (corrupt input)";
1275 evtr_resolve_filters(evtr, fmt->fmt, evh->id);
1291 evtr_load_string(evtr_t evtr, char *buf)
1293 char sbuf[PATH_MAX + 1];
1294 struct string_event_header *evh = (struct string_event_header *)buf;
1296 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1297 sizeof(*evh) - sizeof(evh->eh))) {
1300 if (evh->len > PATH_MAX) {
1301 evtr->errmsg = "string too large (corrupt input)";
1304 if (evh->len && evtr_read(evtr, sbuf, evh->len)) {
1308 if (evh->ns >= EVTR_NS_MAX) {
1309 evtr->errmsg = "invalid namespace (corrupt input)";
1312 validate_string(sbuf);
1313 printd("evtr_load_string:ns %d id %d : \"%s\"\n", evh->ns, evh->id,
1315 evtr->err = string_map_insert(&evtr->maps[evh->ns - 1].root, sbuf, evh->id);
1316 switch (evtr->err) {
1318 evtr->errmsg = "out of memory";
1321 evtr->errmsg = "redefinition of an id to a "
1322 "different string (corrupt input)";
1332 evtr_filter_match(evtr_filter_t f, struct probe_event_header *pev)
1334 if ((f->cpu != -1) && (f->cpu != pev->cpu))
1339 * If we don't have an id for the required format
1340 * string, the format string won't match anyway
1341 * (we require that id <-> fmt mappings appear
1342 * before the first appearance of the fmt string),
1343 * so don't bother comparing.
1345 if (!(f->flags & FILTF_ID))
1347 if(pev->fmt == f->fmtid)
1354 evtr_match_filters(struct evtr_query *q, struct probe_event_header *pev)
1358 /* no filters means we're interested in all events */
1362 for (i = 0; i < q->nfilt; ++i) {
1363 if (evtr_filter_match(&q->filt[i], pev)) {
1373 evtr_skip(evtr_t evtr, off_t bytes)
1375 if (fseek(evtr->f, bytes, SEEK_CUR)) {
1377 evtr->errmsg = strerror(errno);
1380 evtr->bytes += bytes;
1385 * Make sure q->buf is at least len bytes
1389 evtr_query_reserve_buf(struct evtr_query *q, int len)
1393 if (q->bufsize >= len)
1395 if (!(tmp = realloc(q->buf, len)))
1404 evtr_load_probe(evtr_t evtr, evtr_event_t ev, char *buf, struct evtr_query *q)
1406 struct probe_event_header *evh = (struct probe_event_header *)buf;
1409 if (evtr_read(evtr, buf + sizeof(struct trace_event_header),
1410 sizeof(*evh) - sizeof(evh->eh)))
1412 memset(ev, '\0', sizeof(*ev));
1413 ev->ts = evh->eh.ts;
1414 ev->type = EVTR_TYPE_PROBE;
1415 ev->line = evh->line;
1417 if ((cpu = evtr_cpu(evtr, evh->cpu))) {
1423 ev->file = string_map_find(
1424 &evtr->maps[EVTR_NS_PATH - 1].root,
1427 evtr->errmsg = "unknown id for file path";
1429 ev->file = "<unknown>";
1431 validate_string(ev->file);
1434 ev->file = "<unknown>";
1437 const struct event_fmt *fmt;
1438 if (!(fmt = fmt_map_find(&evtr->fmtmap.root, evh->fmt))) {
1439 evtr->errmsg = "unknown id for event fmt";
1444 validate_string(fmt->fmt);
1448 if (evtr_query_reserve_buf(q, evh->datalen + 1)) {
1450 } else if (!evtr_read(evtr, q->buf, evh->datalen)) {
1451 struct replace_ctx replctx = {
1457 ev->fmtdata = q->buf;
1459 * If the format specifies any string pointers, there
1460 * is a string id stored in the fmtdata. Look it up
1461 * and replace it with a string pointer before
1462 * returning it to the user.
1464 if (mangle_string_ptrs(ev->fmt, __DECONST(uint8_t *,
1466 replace_strid, &replctx) < 0)
1470 ((char *)ev->fmtdata)[evh->datalen] = '\0';
1471 ev->fmtdatalen = evh->datalen;
1474 evtr_run_callbacks(ev, evtr);
1475 /* we can't filter before running the callbacks */
1476 if (!evtr_match_filters(q, evh)) {
1477 return -1; /* no match */
1485 evtr_skip_to_record(evtr_t evtr)
1489 skip = REC_ALIGN - (evtr->bytes % REC_ALIGN);
1491 if (fseek(evtr->f, skip, SEEK_CUR)) {
1493 evtr->errmsg = strerror(errno);
1496 evtr->bytes += skip;
1503 evtr_load_sysinfo(evtr_t evtr)
1508 if (evtr_read(evtr, &ncpus, sizeof(ncpus))) {
1513 evtr->cpus = malloc(ncpus * sizeof(struct cpu));
1518 evtr->ncpus = ncpus;
1519 for (i = 0; i < ncpus; ++i) {
1520 evtr->cpus[i].td = NULL;
1521 evtr->cpus[i].freq = -1.0;
1528 evtr_load_cpuinfo(evtr_t evtr)
1530 struct cpuinfo_event_header cih;
1533 if (evtr_read(evtr, &cih, sizeof(cih))) {
1536 if (cih.freq < 0.0) {
1537 evtr->errmsg = "cpu freq is negative";
1542 * Notice that freq is merely a multiplier with
1543 * which we convert a timestamp to seconds; if
1544 * ts is not in cycles, freq is not the frequency.
1546 if (!(cpu = evtr_cpu(evtr, cih.cpu))) {
1547 evtr->errmsg = "freq for invalid cpu";
1551 cpu->freq = cih.freq;
1557 _evtr_next_event(evtr_t evtr, evtr_event_t ev, struct evtr_query *q)
1559 char buf[MAX_EVHDR_SIZE];
1560 int ret, err, ntried, nmatched;
1561 struct trace_event_header *evhdr = (struct trace_event_header *)buf;
1563 for (ret = 0; !ret;) {
1565 * skip pad records -- this will only happen if there's a
1566 * variable sized record close to the boundary
1568 if (evtr_read(evtr, &evhdr->type, 1))
1569 return feof(evtr->f) ? -1 : !0;
1570 if (evhdr->type == EVTR_TYPE_PAD) {
1571 evtr_skip_to_record(evtr);
1574 if (evhdr->type == EVTR_TYPE_SYSINFO) {
1575 evtr_load_sysinfo(evtr);
1577 } else if (evhdr->type == EVTR_TYPE_CPUINFO) {
1578 evtr_load_cpuinfo(evtr);
1581 if (evtr_read(evtr, buf + 1, sizeof(*evhdr) - 1))
1582 return feof(evtr->f) ? -1 : !0;
1583 switch (evhdr->type) {
1584 case EVTR_TYPE_PROBE:
1586 nmatched = q->nmatched;
1587 if ((err = evtr_load_probe(evtr, ev, buf, q))) {
1599 if (evtr_load_string(evtr, buf)) {
1604 if (evtr_load_fmt(evtr, buf)) {
1610 evtr->errmsg = "unknown event type (corrupt input?)";
1613 evtr_skip_to_record(evtr);
1615 q->off = evtr->bytes;
1619 /* can't get here */
1624 evtr_next_event(evtr_t evtr, evtr_event_t ev)
1626 struct evtr_query *q;
1629 if (!(q = evtr_query_init(evtr, NULL, 0))) {
1633 ret = _evtr_next_event(evtr, ev, q);
1634 evtr_query_destroy(q);
1639 evtr_last_event(evtr_t evtr, evtr_event_t ev)
1643 off_t last_boundary;
1645 fd = fileno(evtr->f);
1649 * This skips pseudo records, so we can't provide
1650 * an event with all fields filled in this way.
1651 * It's doable, just needs some care. TBD.
1653 if (0 && (st.st_mode & S_IFREG)) {
1655 * Skip to last boundary, that's the closest to the EOF
1656 * location that we are sure contains a header so we can
1657 * pick up the stream.
1659 last_boundary = (st.st_size / REC_BOUNDARY) * REC_BOUNDARY;
1660 /* XXX: ->bytes should be in query */
1661 assert(evtr->bytes == 0);
1662 evtr_skip(evtr, last_boundary);
1667 * If we can't seek, we need to go through the whole file.
1668 * Since you can't seek back, this is pretty useless unless
1669 * you really are interested only in the last event.
1671 while (!evtr_next_event(evtr, ev))
1673 if (evtr_error(evtr))
1680 evtr_query_init(evtr_t evtr, evtr_filter_t filt, int nfilt)
1682 struct evtr_query *q;
1685 if (!(q = malloc(sizeof(*q)))) {
1689 if (!(q->buf = malloc(q->bufsize))) {
1697 for (i = 0; i < nfilt; ++i) {
1699 if (filt[i].fmt == NULL)
1701 if (evtr_filter_register(evtr, &filt[i])) {
1702 evtr_deregister_filters(evtr, filt, i);
1716 evtr_query_destroy(struct evtr_query *q)
1718 evtr_deregister_filters(q->evtr, q->filt, q->nfilt);
1724 evtr_query_next(struct evtr_query *q, evtr_event_t ev)
1726 /* we may support that in the future */
1727 if (q->off != q->evtr->bytes)
1729 return _evtr_next_event(q->evtr, ev, q);
1733 evtr_ncpus(evtr_t evtr)
1739 evtr_cpufreqs(evtr_t evtr, double *freqs)
1745 for (i = 0; i < evtr->ncpus; ++i) {
1746 freqs[i] = evtr->cpus[i].freq;