summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/annotate.c60
-rw-r--r--tools/perf/util/build-id.c11
-rw-r--r--tools/perf/util/callchain.c15
-rw-r--r--tools/perf/util/callchain.h11
-rw-r--r--tools/perf/util/cpumap.h2
-rw-r--r--tools/perf/util/dso.c10
-rw-r--r--tools/perf/util/dso.h17
-rw-r--r--tools/perf/util/event.c31
-rw-r--r--tools/perf/util/event.h41
-rw-r--r--tools/perf/util/evlist.c294
-rw-r--r--tools/perf/util/evlist.h21
-rw-r--r--tools/perf/util/evsel.c581
-rw-r--r--tools/perf/util/evsel.h19
-rw-r--r--tools/perf/util/header.c161
-rw-r--r--tools/perf/util/header.h40
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/hist.h26
-rw-r--r--tools/perf/util/include/linux/string.h1
-rw-r--r--tools/perf/util/machine.c155
-rw-r--r--tools/perf/util/machine.h14
-rw-r--r--tools/perf/util/map.c67
-rw-r--r--tools/perf/util/map.h13
-rw-r--r--tools/perf/util/parse-events.c174
-rw-r--r--tools/perf/util/parse-events.h11
-rw-r--r--tools/perf/util/parse-events.l4
-rw-r--r--tools/perf/util/parse-events.y62
-rw-r--r--tools/perf/util/pmu.c87
-rw-r--r--tools/perf/util/pmu.h5
-rw-r--r--tools/perf/util/python.c21
-rw-r--r--tools/perf/util/record.c108
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c14
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c9
-rw-r--r--tools/perf/util/session.c241
-rw-r--r--tools/perf/util/session.h14
-rw-r--r--tools/perf/util/sort.c12
-rw-r--r--tools/perf/util/sort.h13
-rw-r--r--tools/perf/util/stat.c6
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/string.c24
-rw-r--r--tools/perf/util/symbol-elf.c174
-rw-r--r--tools/perf/util/symbol-minimal.c7
-rw-r--r--tools/perf/util/symbol.c285
-rw-r--r--tools/perf/util/symbol.h5
-rw-r--r--tools/perf/util/thread.c11
-rw-r--r--tools/perf/util/thread.h23
-rw-r--r--tools/perf/util/tool.h10
-rw-r--r--tools/perf/util/top.h2
-rw-r--r--tools/perf/util/trace-event-info.c96
-rw-r--r--tools/perf/util/trace-event-parse.c6
-rw-r--r--tools/perf/util/trace-event-read.c52
-rw-r--r--tools/perf/util/trace-event-scripting.c3
-rw-r--r--tools/perf/util/trace-event.h21
-rw-r--r--tools/perf/util/unwind.c2
-rw-r--r--tools/perf/util/util.c92
-rw-r--r--tools/perf/util/util.h5
55 files changed, 2436 insertions, 766 deletions
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index d102716c43a1..bfc5a27597d6 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -110,10 +110,10 @@ static int jump__parse(struct ins_operands *ops)
{
const char *s = strchr(ops->raw, '+');
- ops->target.addr = strtoll(ops->raw, NULL, 16);
+ ops->target.addr = strtoull(ops->raw, NULL, 16);
if (s++ != NULL)
- ops->target.offset = strtoll(s, NULL, 16);
+ ops->target.offset = strtoull(s, NULL, 16);
else
ops->target.offset = UINT64_MAX;
@@ -821,11 +821,55 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
if (dl == NULL)
return -1;
+ if (dl->ops.target.offset == UINT64_MAX)
+ dl->ops.target.offset = dl->ops.target.addr -
+ map__rip_2objdump(map, sym->start);
+
+ /*
+ * kcore has no symbols, so add the call target name if it is on the
+ * same map.
+ */
+ if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
+ struct symbol *s;
+ u64 ip = dl->ops.target.addr;
+
+ if (ip >= map->start && ip <= map->end) {
+ ip = map->map_ip(map, ip);
+ s = map__find_symbol(map, ip, NULL);
+ if (s && s->start == ip)
+ dl->ops.target.name = strdup(s->name);
+ }
+ }
+
disasm__add(&notes->src->source, dl);
return 0;
}
+static void delete_last_nop(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct list_head *list = &notes->src->source;
+ struct disasm_line *dl;
+
+ while (!list_empty(list)) {
+ dl = list_entry(list->prev, struct disasm_line, node);
+
+ if (dl->ins && dl->ins->ops) {
+ if (dl->ins->ops != &nop_ops)
+ return;
+ } else {
+ if (!strstr(dl->line, " nop ") &&
+ !strstr(dl->line, " nopl ") &&
+ !strstr(dl->line, " nopw "))
+ return;
+ }
+
+ list_del(&dl->node);
+ disasm_line__free(dl);
+ }
+}
+
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
{
struct dso *dso = map->dso;
@@ -864,7 +908,8 @@ fallback:
free_filename = false;
}
- if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
+ if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(dso)) {
char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
char *build_id_msg = NULL;
@@ -898,7 +943,7 @@ fallback:
snprintf(command, sizeof(command),
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -d %s %s -C %s|grep -v %s|expand",
+ " -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
objdump_path ? objdump_path : "objdump",
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
@@ -918,6 +963,13 @@ fallback:
if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
break;
+ /*
+ * kallsyms does not have symbol sizes so there may a nop at the end.
+ * Remove it.
+ */
+ if (dso__is_kcore(dso))
+ delete_last_nop(sym);
+
pclose(file);
out_free_filename:
if (free_filename)
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 5295625c0c00..fb584092eb88 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -18,13 +18,14 @@
int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __maybe_unused,
+ struct perf_sample *sample,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
@@ -33,7 +34,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
}
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, &al);
+ sample->ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
@@ -47,7 +48,9 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
__maybe_unused,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+ struct thread *thread = machine__findnew_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 42b6a632fe7b..482f68081cd8 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -15,19 +15,12 @@
#include <errno.h>
#include <math.h>
+#include "hist.h"
#include "util.h"
#include "callchain.h"
__thread struct callchain_cursor callchain_cursor;
-bool ip_callchain__valid(struct ip_callchain *chain,
- const union perf_event *event)
-{
- unsigned int chain_size = event->header.size;
- chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
- return chain->nr * sizeof(u64) <= chain_size;
-}
-
#define chain_for_each_child(child, parent) \
list_for_each_entry(child, &parent->children, siblings)
@@ -327,7 +320,8 @@ append_chain(struct callchain_node *root,
/*
* Lookup in the current node
* If we have a symbol, then compare the start to match
- * anywhere inside a function.
+ * anywhere inside a function, unless function
+ * mode is disabled.
*/
list_for_each_entry(cnode, &root->val, list) {
struct callchain_cursor_node *node;
@@ -339,7 +333,8 @@ append_chain(struct callchain_node *root,
sym = node->sym;
- if (cnode->ms.sym && sym) {
+ if (cnode->ms.sym && sym &&
+ callchain_param.key == CCKEY_FUNCTION) {
if (cnode->ms.sym->start != sym->start)
break;
} else if (cnode->ip != node->ip)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 3ee9f67d5af0..2b585bc308cf 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -41,12 +41,18 @@ struct callchain_param;
typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
u64, struct callchain_param *);
+enum chain_key {
+ CCKEY_FUNCTION,
+ CCKEY_ADDRESS
+};
+
struct callchain_param {
enum chain_mode mode;
u32 print_limit;
double min_percent;
sort_chain_func_t sort;
enum chain_order order;
+ enum chain_key key;
};
struct callchain_list {
@@ -103,11 +109,6 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
-struct ip_callchain;
-union perf_event;
-
-bool ip_callchain__valid(struct ip_callchain *chain,
- const union perf_event *event);
/*
* Initialize a cursor before adding entries inside, but keep
* the previously allocated entries as a cache.
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 9bed02e5fb3d..b123bb9d6f55 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -41,7 +41,7 @@ static inline int cpu_map__nr(const struct cpu_map *map)
return map ? map->nr : 1;
}
-static inline bool cpu_map__all(const struct cpu_map *map)
+static inline bool cpu_map__empty(const struct cpu_map *map)
{
return map ? map->map[0] == -1 : true;
}
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index c4374f07603c..e3c1ff8512c8 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -78,6 +78,8 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
symbol_conf.symfs, build_id_hex, build_id_hex + 2);
break;
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
snprintf(file, size, "%s%s",
symbol_conf.symfs, dso->long_name);
@@ -93,11 +95,14 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
dso->long_name);
break;
+ case DSO_BINARY_TYPE__KCORE:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ snprintf(file, size, "%s", dso->long_name);
+ break;
+
default:
case DSO_BINARY_TYPE__KALLSYMS:
- case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
- case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
@@ -419,6 +424,7 @@ struct dso *dso__new(const char *name)
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->data_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->loaded = 0;
+ dso->rel = 0;
dso->sorted_by_name = 0;
dso->has_build_id = 0;
dso->kernel = DSO_TYPE_USER;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index d51aaf272c68..b793053335d6 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/rbtree.h>
+#include <stdbool.h>
#include "types.h"
#include "map.h"
@@ -20,6 +21,8 @@ enum dso_binary_type {
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__KCORE,
+ DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -84,6 +87,7 @@ struct dso {
u8 lname_alloc:1;
u8 sorted_by_name;
u8 loaded;
+ u8 rel;
u8 build_id[BUILD_ID_SIZE];
const char *short_name;
char *long_name;
@@ -146,4 +150,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
size_t dso__fprintf_symbols_by_name(struct dso *dso,
enum map_type type, FILE *fp);
size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
+
+static inline bool dso__is_vmlinux(struct dso *dso)
+{
+ return dso->data_type == DSO_BINARY_TYPE__VMLINUX ||
+ dso->data_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
+}
+
+static inline bool dso__is_kcore(struct dso *dso)
+{
+ return dso->data_type == DSO_BINARY_TYPE__KCORE ||
+ dso->data_type == DSO_BINARY_TYPE__GUEST_KCORE;
+}
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 5cd13d768cec..8d51f21107aa 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -595,6 +595,7 @@ void thread__find_addr_map(struct thread *self,
struct addr_location *al)
{
struct map_groups *mg = &self->mg;
+ bool load_map = false;
al->thread = self;
al->addr = addr;
@@ -609,11 +610,13 @@ void thread__find_addr_map(struct thread *self,
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
mg = &machine->kmaps;
+ load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
mg = &machine->kmaps;
+ load_map = true;
} else {
/*
* 'u' means guest os user space.
@@ -654,18 +657,25 @@ try_again:
mg = &machine->kmaps;
goto try_again;
}
- } else
+ } else {
+ /*
+ * Kernel maps might be changed when loading symbols so loading
+ * must be done prior to using kernel maps.
+ */
+ if (load_map)
+ map__load(al->map, machine->symbol_filter);
al->addr = al->map->map_ip(al->map, al->addr);
+ }
}
void thread__find_addr_location(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter)
+ struct addr_location *al)
{
thread__find_addr_map(thread, machine, cpumode, type, addr, al);
if (al->map != NULL)
- al->sym = map__find_symbol(al->map, al->addr, filter);
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
else
al->sym = NULL;
}
@@ -673,11 +683,11 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine,
int perf_event__preprocess_sample(const union perf_event *event,
struct machine *machine,
struct addr_location *al,
- struct perf_sample *sample,
- symbol_filter_t filter)
+ struct perf_sample *sample)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->pid);
if (thread == NULL)
return -1;
@@ -686,7 +696,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
!strlist__has_entry(symbol_conf.comm_list, thread->comm))
goto out_filtered;
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->tid);
/*
* Have we already created the kernel maps for this machine?
*
@@ -699,7 +709,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
machine__create_kernel_maps(machine);
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, al);
+ sample->ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -717,7 +727,8 @@ int perf_event__preprocess_sample(const union perf_event *event,
dso->long_name)))))
goto out_filtered;
- al->sym = map__find_symbol(al->map, al->addr, filter);
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
}
if (symbol_conf.sym_list &&
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 181389535c0c..93130d856bf0 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -8,16 +8,6 @@
#include "map.h"
#include "build-id.h"
-/*
- * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
- */
-struct ip_event {
- struct perf_event_header header;
- u64 ip;
- u32 pid, tid;
- unsigned char __more_data[];
-};
-
struct mmap_event {
struct perf_event_header header;
u32 pid, tid;
@@ -63,7 +53,8 @@ struct read_event {
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
- PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
+ PERF_SAMPLE_IDENTIFIER)
struct sample_event {
struct perf_event_header header;
@@ -71,6 +62,7 @@ struct sample_event {
};
struct regs_dump {
+ u64 abi;
u64 *regs;
};
@@ -80,6 +72,23 @@ struct stack_dump {
char *data;
};
+struct sample_read_value {
+ u64 value;
+ u64 id;
+};
+
+struct sample_read {
+ u64 time_enabled;
+ u64 time_running;
+ union {
+ struct {
+ u64 nr;
+ struct sample_read_value *values;
+ } group;
+ struct sample_read_value one;
+ };
+};
+
struct perf_sample {
u64 ip;
u32 pid, tid;
@@ -97,6 +106,7 @@ struct perf_sample {
struct branch_stack *branch_stack;
struct regs_dump user_regs;
struct stack_dump user_stack;
+ struct sample_read read;
};
#define PERF_MEM_DATA_SRC_NONE \
@@ -116,7 +126,7 @@ struct build_id_event {
enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_USER_TYPE_START = 64,
PERF_RECORD_HEADER_ATTR = 64,
- PERF_RECORD_HEADER_EVENT_TYPE = 65,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
PERF_RECORD_HEADER_TRACING_DATA = 66,
PERF_RECORD_HEADER_BUILD_ID = 67,
PERF_RECORD_FINISHED_ROUND = 68,
@@ -148,7 +158,6 @@ struct tracing_data_event {
union perf_event {
struct perf_event_header header;
- struct ip_event ip;
struct mmap_event mmap;
struct comm_event comm;
struct fork_event fork;
@@ -216,12 +225,14 @@ struct addr_location;
int perf_event__preprocess_sample(const union perf_event *self,
struct machine *machine,
struct addr_location *al,
- struct perf_sample *sample,
- symbol_filter_t filter);
+ struct perf_sample *sample);
const char *perf_event__name(unsigned int id);
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 sample_regs_user, u64 read_format);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 sample_regs_user, u64 read_format,
const struct perf_sample *sample,
bool swapped);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8065ce8fa9a5..b8727ae45e3b 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -14,6 +14,7 @@
#include "target.h"
#include "evlist.h"
#include "evsel.h"
+#include "debug.h"
#include <unistd.h>
#include "parse-events.h"
@@ -48,26 +49,19 @@ struct perf_evlist *perf_evlist__new(void)
return evlist;
}
-void perf_evlist__config(struct perf_evlist *evlist,
- struct perf_record_opts *opts)
+/**
+ * perf_evlist__set_id_pos - set the positions of event ids.
+ * @evlist: selected event list
+ *
+ * Events with compatible sample types all have the same id_pos
+ * and is_pos. For convenience, put a copy on evlist.
+ */
+void perf_evlist__set_id_pos(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel;
- /*
- * Set the evsel leader links before we configure attributes,
- * since some might depend on this info.
- */
- if (opts->group)
- perf_evlist__set_leader(evlist);
-
- if (evlist->cpus->map[0] < 0)
- opts->no_inherit = true;
-
- list_for_each_entry(evsel, &evlist->entries, node) {
- perf_evsel__config(evsel, opts);
+ struct perf_evsel *first = perf_evlist__first(evlist);
- if (evlist->nr_entries > 1)
- perf_evsel__set_sample_id(evsel);
- }
+ evlist->id_pos = first->id_pos;
+ evlist->is_pos = first->is_pos;
}
static void perf_evlist__purge(struct perf_evlist *evlist)
@@ -100,15 +94,20 @@ void perf_evlist__delete(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
list_add_tail(&entry->node, &evlist->entries);
- ++evlist->nr_entries;
+ if (!evlist->nr_entries++)
+ perf_evlist__set_id_pos(evlist);
}
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list,
int nr_entries)
{
+ bool set_id_pos = !evlist->nr_entries;
+
list_splice_tail(list, &evlist->entries);
evlist->nr_entries += nr_entries;
+ if (set_id_pos)
+ perf_evlist__set_id_pos(evlist);
}
void __perf_evlist__set_leader(struct list_head *list)
@@ -209,6 +208,21 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
return NULL;
}
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
+ (strcmp(evsel->name, name) == 0))
+ return evsel;
+ }
+
+ return NULL;
+}
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler)
{
@@ -232,7 +246,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
- if (!perf_evsel__is_group_leader(pos))
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
@@ -250,7 +264,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
- if (!perf_evsel__is_group_leader(pos))
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
@@ -259,6 +273,44 @@ void perf_evlist__enable(struct perf_evlist *evlist)
}
}
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return 0;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -302,6 +354,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
read(fd, &read_data, sizeof(read_data)) == -1)
@@ -312,25 +382,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++id_idx;
- perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
+ id = read_data[id_idx];
+
+ add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
return 0;
}
-struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
{
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
- if (evlist->nr_entries == 1)
- return perf_evlist__first(evlist);
-
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash];
hlist_for_each_entry(sid, head, node)
if (sid->id == id)
- return sid->evsel;
+ return sid;
+
+ return NULL;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct perf_sample_id *sid;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ sid = perf_evlist__id2sid(evlist, id);
+ if (sid)
+ return sid->evsel;
if (!perf_evlist__sample_id_all(evlist))
return perf_evlist__first(evlist);
@@ -338,6 +422,55 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
return NULL;
}
+static int perf_evlist__event2id(struct perf_evlist *evlist,
+ union perf_event *event, u64 *id)
+{
+ const u64 *array = event->sample.array;
+ ssize_t n;
+
+ n = (event->header.size - sizeof(event->header)) >> 3;
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ if (evlist->id_pos >= n)
+ return -1;
+ *id = array[evlist->id_pos];
+ } else {
+ if (evlist->is_pos > n)
+ return -1;
+ n -= evlist->is_pos;
+ *id = array[n];
+ }
+ return 0;
+}
+
+static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+ union perf_event *event)
+{
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+ u64 id;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ if (perf_evlist__event2id(evlist, event, &id))
+ return NULL;
+
+ /* Synthesized events have an id of zero */
+ if (!id)
+ return perf_evlist__first(evlist);
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node) {
+ if (sid->id == id)
+ return sid->evsel;
+ }
+ return NULL;
+}
+
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
@@ -403,16 +536,20 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return event;
}
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
+{
+ if (evlist->mmap[idx].base != NULL) {
+ munmap(evlist->mmap[idx].base, evlist->mmap_len);
+ evlist->mmap[idx].base = NULL;
+ }
+}
+
void perf_evlist__munmap(struct perf_evlist *evlist)
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- if (evlist->mmap[i].base != NULL) {
- munmap(evlist->mmap[i].base, evlist->mmap_len);
- evlist->mmap[i].base = NULL;
- }
- }
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ __perf_evlist__munmap(evlist, i);
free(evlist->mmap);
evlist->mmap = NULL;
@@ -421,7 +558,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
- if (cpu_map__all(evlist->cpus))
+ if (cpu_map__empty(evlist->cpus))
evlist->nr_mmaps = thread_map__nr(evlist->threads);
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
@@ -450,6 +587,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
+ pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
@@ -477,12 +615,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
return 0;
out_unmap:
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (evlist->mmap[cpu].base != NULL) {
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
- evlist->mmap[cpu].base = NULL;
- }
- }
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ __perf_evlist__munmap(evlist, cpu);
return -1;
}
@@ -492,6 +626,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
int thread;
int nr_threads = thread_map__nr(evlist->threads);
+ pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
@@ -517,12 +652,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
return 0;
out_unmap:
- for (thread = 0; thread < nr_threads; thread++) {
- if (evlist->mmap[thread].base != NULL) {
- munmap(evlist->mmap[thread].base, evlist->mmap_len);
- evlist->mmap[thread].base = NULL;
- }
- }
+ for (thread = 0; thread < nr_threads; thread++)
+ __perf_evlist__munmap(evlist, thread);
return -1;
}
@@ -573,7 +704,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return -ENOMEM;
}
- if (cpu_map__all(cpus))
+ if (cpu_map__empty(cpus))
return perf_evlist__mmap_per_thread(evlist, prot, mask);
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
@@ -650,20 +781,66 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
{
+ struct perf_evsel *pos;
+
+ if (evlist->nr_entries == 1)
+ return true;
+
+ if (evlist->id_pos < 0 || evlist->is_pos < 0)
+ return false;
+
+ list_for_each_entry(pos, &evlist->entries, node) {
+ if (pos->id_pos != evlist->id_pos ||
+ pos->is_pos != evlist->is_pos)
+ return false;
+ }
+
+ return true;
+}
+
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->combined_sample_type)
+ return evlist->combined_sample_type;
+
+ list_for_each_entry(evsel, &evlist->entries, node)
+ evlist->combined_sample_type |= evsel->attr.sample_type;
+
+ return evlist->combined_sample_type;
+}
+
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ evlist->combined_sample_type = 0;
+ return __perf_evlist__combined_sample_type(evlist);
+}
+
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
+{
struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+ u64 read_format = first->attr.read_format;
+ u64 sample_type = first->attr.sample_type;
list_for_each_entry_continue(pos, &evlist->entries, node) {
- if (first->attr.sample_type != pos->attr.sample_type)
+ if (read_format != pos->attr.read_format)
return false;
}
+ /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ if ((sample_type & PERF_SAMPLE_READ) &&
+ !(read_format & PERF_FORMAT_ID)) {
+ return false;
+ }
+
return true;
}
-u64 perf_evlist__sample_type(struct perf_evlist *evlist)
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
struct perf_evsel *first = perf_evlist__first(evlist);
- return first->attr.sample_type;
+ return first->attr.read_format;
}
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
@@ -692,6 +869,9 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu) * 2;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
out:
return size;
}
@@ -783,13 +963,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
- * Do a dummy execvp to get the PLT entry resolved,
- * so we avoid the resolver overhead on the real
- * execvp call.
- */
- execvp("", (char **)argv);
-
- /*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
@@ -838,7 +1011,7 @@ out_close_ready_pipe:
int perf_evlist__start_workload(struct perf_evlist *evlist)
{
if (evlist->workload.cork_fd > 0) {
- char bf;
+ char bf = 0;
int ret;
/*
* Remove the cork, let it rip!
@@ -857,7 +1030,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
struct perf_sample *sample)
{
- struct perf_evsel *evsel = perf_evlist__first(evlist);
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
return perf_evsel__parse_sample(evsel, event, sample);
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0583d36252be..880d7139d2fb 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -32,6 +32,9 @@ struct perf_evlist {
int nr_fds;
int nr_mmaps;
int mmap_len;
+ int id_pos;
+ int is_pos;
+ u64 combined_sample_type;
struct {
int cork_fd;
pid_t pid;
@@ -71,6 +74,10 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name);
+
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
@@ -78,11 +85,15 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
+
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist);
+void perf_evlist__set_id_pos(struct perf_evlist *evlist);
+bool perf_can_sample_identifier(void);
void perf_evlist__config(struct perf_evlist *evlist,
struct perf_record_opts *opts);
@@ -99,6 +110,11 @@ void perf_evlist__munmap(struct perf_evlist *evlist);
void perf_evlist__disable(struct perf_evlist *evlist);
void perf_evlist__enable(struct perf_evlist *evlist);
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
void perf_evlist__set_selected(struct perf_evlist *evlist,
struct perf_evsel *evsel);
@@ -118,7 +134,9 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist);
void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct perf_evlist *evlist);
-u64 perf_evlist__sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__read_format(struct perf_evlist *evlist);
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
@@ -127,6 +145,7 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c9c7494506a1..3612183e2cc5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -9,18 +9,20 @@
#include <byteswap.h>
#include <linux/bitops.h>
-#include "asm/bug.h"
#include <lk/debugfs.h>
-#include "event-parse.h"
+#include <traceevent/event-parse.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <sys/resource.h>
+#include "asm/bug.h"
#include "evsel.h"
#include "evlist.h"
#include "util.h"
#include "cpumap.h"
#include "thread_map.h"
#include "target.h"
-#include <linux/hw_breakpoint.h>
-#include <linux/perf_event.h>
#include "perf_regs.h"
+#include "debug.h"
static struct {
bool sample_id_all;
@@ -29,7 +31,7 @@ static struct {
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-static int __perf_evsel__sample_size(u64 sample_type)
+int __perf_evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
@@ -45,6 +47,72 @@ static int __perf_evsel__sample_size(u64 sample_type)
return size;
}
+/**
+ * __perf_evsel__calc_id_pos - calculate id_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
+ * sample_event.
+ */
+static int __perf_evsel__calc_id_pos(u64 sample_type)
+{
+ int idx = 0;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 0;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_IP)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_ADDR)
+ idx += 1;
+
+ return idx;
+}
+
+/**
+ * __perf_evsel__calc_is_pos - calculate is_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position (counting backwards) of the event id
+ * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
+ * sample_id_all is used there is an id sample appended to non-sample events.
+ */
+static int __perf_evsel__calc_is_pos(u64 sample_type)
+{
+ int idx = 1;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 1;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ idx += 1;
+
+ return idx;
+}
+
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
+{
+ evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
+ evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
+}
+
void hists__init(struct hists *hists)
{
memset(hists, 0, sizeof(*hists));
@@ -61,6 +129,7 @@ void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
if (!(evsel->attr.sample_type & bit)) {
evsel->attr.sample_type |= bit;
evsel->sample_size += sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
}
}
@@ -70,12 +139,19 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
if (evsel->attr.sample_type & bit) {
evsel->attr.sample_type &= ~bit;
evsel->sample_size -= sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
}
}
-void perf_evsel__set_sample_id(struct perf_evsel *evsel)
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool can_sample_identifier)
{
- perf_evsel__set_sample_bit(evsel, ID);
+ if (can_sample_identifier) {
+ perf_evsel__reset_sample_bit(evsel, ID);
+ perf_evsel__set_sample_bit(evsel, IDENTIFIER);
+ } else {
+ perf_evsel__set_sample_bit(evsel, ID);
+ }
evsel->attr.read_format |= PERF_FORMAT_ID;
}
@@ -88,6 +164,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
+ perf_evsel__calc_id_pos(evsel);
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -246,6 +323,7 @@ const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
"major-faults",
"alignment-faults",
"emulation-faults",
+ "dummy",
};
static const char *__perf_evsel__sw_name(u64 config)
@@ -490,6 +568,7 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts)
{
+ struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
@@ -499,6 +578,25 @@ void perf_evsel__config(struct perf_evsel *evsel,
perf_evsel__set_sample_bit(evsel, IP);
perf_evsel__set_sample_bit(evsel, TID);
+ if (evsel->sample_read) {
+ perf_evsel__set_sample_bit(evsel, READ);
+
+ /*
+ * We need ID even in case of single event, because
+ * PERF_SAMPLE_READ process ID specific data.
+ */
+ perf_evsel__set_sample_id(evsel, false);
+
+ /*
+ * Apply group format only if we belong to group
+ * with more than one members.
+ */
+ if (leader->nr_members > 1) {
+ attr->read_format |= PERF_FORMAT_GROUP;
+ attr->inherit = 0;
+ }
+ }
+
/*
* We default some events to a 1 default interval. But keep
* it a weak assumption overridable by the user.
@@ -514,6 +612,15 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
}
+ /*
+ * Disable sampling for all group members other
+ * than leader in case leader 'leads' the sampling.
+ */
+ if ((leader != evsel) && leader->sample_read) {
+ attr->sample_freq = 0;
+ attr->sample_period = 0;
+ }
+
if (opts->no_samples)
attr->sample_freq = 0;
@@ -605,15 +712,15 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
-int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
- const char *filter)
+static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
+ int ioc, void *arg)
{
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
+ err = ioctl(fd, ioc, arg);
if (err)
return err;
@@ -623,6 +730,21 @@ int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
return 0;
}
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_SET_FILTER,
+ (void *)filter);
+}
+
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_ENABLE,
+ 0);
+}
+
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
@@ -817,12 +939,72 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
return fd;
}
+#define __PRINT_ATTR(fmt, cast, field) \
+ fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
+
+#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
+#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
+#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
+#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
+
+#define PRINT_ATTR2N(name1, field1, name2, field2) \
+ fprintf(fp, " %-19s %u %-19s %u\n", \
+ name1, attr->field1, name2, attr->field2)
+
+#define PRINT_ATTR2(field1, field2) \
+ PRINT_ATTR2N(#field1, field1, #field2, field2)
+
+static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
+{
+ size_t ret = 0;
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+ ret += fprintf(fp, "perf_event_attr:\n");
+
+ ret += PRINT_ATTR_U32(type);
+ ret += PRINT_ATTR_U32(size);
+ ret += PRINT_ATTR_X64(config);
+ ret += PRINT_ATTR_U64(sample_period);
+ ret += PRINT_ATTR_U64(sample_freq);
+ ret += PRINT_ATTR_X64(sample_type);
+ ret += PRINT_ATTR_X64(read_format);
+
+ ret += PRINT_ATTR2(disabled, inherit);
+ ret += PRINT_ATTR2(pinned, exclusive);
+ ret += PRINT_ATTR2(exclude_user, exclude_kernel);
+ ret += PRINT_ATTR2(exclude_hv, exclude_idle);
+ ret += PRINT_ATTR2(mmap, comm);
+ ret += PRINT_ATTR2(freq, inherit_stat);
+ ret += PRINT_ATTR2(enable_on_exec, task);
+ ret += PRINT_ATTR2(watermark, precise_ip);
+ ret += PRINT_ATTR2(mmap_data, sample_id_all);
+ ret += PRINT_ATTR2(exclude_host, exclude_guest);
+ ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
+ "excl.callchain_user", exclude_callchain_user);
+
+ ret += PRINT_ATTR_U32(wakeup_events);
+ ret += PRINT_ATTR_U32(wakeup_watermark);
+ ret += PRINT_ATTR_X32(bp_type);
+ ret += PRINT_ATTR_X64(bp_addr);
+ ret += PRINT_ATTR_X64(config1);
+ ret += PRINT_ATTR_U64(bp_len);
+ ret += PRINT_ATTR_X64(config2);
+ ret += PRINT_ATTR_X64(branch_sample_type);
+ ret += PRINT_ATTR_X64(sample_regs_user);
+ ret += PRINT_ATTR_U32(sample_stack_user);
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+
+ return ret;
+}
+
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
{
int cpu, thread;
unsigned long flags = 0;
int pid = -1, err;
+ enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
if (evsel->fd == NULL &&
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
@@ -840,6 +1022,9 @@ retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
+ if (verbose >= 2)
+ perf_event_attr__fprintf(&evsel->attr, stderr);
+
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
@@ -849,6 +1034,9 @@ retry_sample_id:
pid = threads->map[thread];
group_fd = get_group_fd(evsel, cpu, thread);
+retry_open:
+ pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pid, cpus->map[cpu], group_fd, flags);
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
pid,
@@ -858,12 +1046,37 @@ retry_sample_id:
err = -errno;
goto try_fallback;
}
+ set_rlimit = NO_CHANGE;
}
}
return 0;
try_fallback:
+ /*
+ * perf stat needs between 5 and 22 fds per CPU. When we run out
+ * of them try to increase the limits.
+ */
+ if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
+ struct rlimit l;
+ int old_errno = errno;
+
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (set_rlimit == NO_CHANGE)
+ l.rlim_cur = l.rlim_max;
+ else {
+ l.rlim_cur = l.rlim_max + 1000;
+ l.rlim_max = l.rlim_cur;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
+ set_rlimit++;
+ errno = old_errno;
+ goto retry_open;
+ }
+ }
+ errno = old_errno;
+ }
+
if (err != -EINVAL || cpu > 0 || thread > 0)
goto out_close;
@@ -951,6 +1164,11 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
array += ((event->header.size -
sizeof(event->header)) / sizeof(u64)) - 1;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ sample->id = *array;
+ array--;
+ }
+
if (type & PERF_SAMPLE_CPU) {
u.val64 = *array;
if (swapped) {
@@ -994,24 +1212,30 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
return 0;
}
-static bool sample_overlap(const union perf_event *event,
- const void *offset, u64 size)
+static inline bool overflow(const void *endp, u16 max_size, const void *offset,
+ u64 size)
{
- const void *base = event;
+ return size > max_size || offset + size > endp;
+}
- if (offset + size > base + event->header.size)
- return true;
+#define OVERFLOW_CHECK(offset, size, max_size) \
+ do { \
+ if (overflow(endp, (max_size), (offset), (size))) \
+ return -EFAULT; \
+ } while (0)
- return false;
-}
+#define OVERFLOW_CHECK_u64(offset) \
+ OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
u64 type = evsel->attr.sample_type;
- u64 regs_user = evsel->attr.sample_regs_user;
bool swapped = evsel->needs_swap;
const u64 *array;
+ u16 max_size = event->header.size;
+ const void *endp = (void *)event + max_size;
+ u64 sz;
/*
* used for cross-endian analysis. See git commit 65014ab3
@@ -1033,11 +1257,22 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = event->sample.array;
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
if (evsel->sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ data->id = *array;
+ array++;
+ }
+
if (type & PERF_SAMPLE_IP) {
- data->ip = event->ip.ip;
+ data->ip = *array;
array++;
}
@@ -1066,7 +1301,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->id = -1ULL;
if (type & PERF_SAMPLE_ID) {
data->id = *array;
array++;
@@ -1096,25 +1330,62 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
}
if (type & PERF_SAMPLE_READ) {
- fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
- return -1;
+ u64 read_format = evsel->attr.read_format;
+
+ OVERFLOW_CHECK_u64(array);
+ if (read_format & PERF_FORMAT_GROUP)
+ data->read.group.nr = *array;
+ else
+ data->read.one.value = *array;
+
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_enabled = *array;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_running = *array;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ const u64 max_group_nr = UINT64_MAX /
+ sizeof(struct sample_read_value);
+
+ if (data->read.group.nr > max_group_nr)
+ return -EFAULT;
+ sz = data->read.group.nr *
+ sizeof(struct sample_read_value);
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->read.group.values =
+ (struct sample_read_value *)array;
+ array = (void *)array + sz;
+ } else {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.id = *array;
+ array++;
+ }
}
if (type & PERF_SAMPLE_CALLCHAIN) {
- if (sample_overlap(event, array, sizeof(data->callchain->nr)))
- return -EFAULT;
-
- data->callchain = (struct ip_callchain *)array;
+ const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
- if (sample_overlap(event, array, data->callchain->nr))
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ if (data->callchain->nr > max_callchain_nr)
return -EFAULT;
-
- array += 1 + data->callchain->nr;
+ sz = data->callchain->nr * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
}
if (type & PERF_SAMPLE_RAW) {
- const u64 *pdata;
-
+ OVERFLOW_CHECK_u64(array);
u.val64 = *array;
if (WARN_ONCE(swapped,
"Endianness of raw data not corrected!\n")) {
@@ -1123,65 +1394,71 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
-
- if (sample_overlap(event, array, sizeof(u32)))
- return -EFAULT;
-
data->raw_size = u.val32[0];
- pdata = (void *) array + sizeof(u32);
+ array = (void *)array + sizeof(u32);
- if (sample_overlap(event, pdata, data->raw_size))
- return -EFAULT;
-
- data->raw_data = (void *) pdata;
-
- array = (void *)array + data->raw_size + sizeof(u32);
+ OVERFLOW_CHECK(array, data->raw_size, max_size);
+ data->raw_data = (void *)array;
+ array = (void *)array + data->raw_size;
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
- u64 sz;
+ const u64 max_branch_nr = UINT64_MAX /
+ sizeof(struct branch_entry);
- data->branch_stack = (struct branch_stack *)array;
- array++; /* nr */
+ OVERFLOW_CHECK_u64(array);
+ data->branch_stack = (struct branch_stack *)array++;
+ if (data->branch_stack->nr > max_branch_nr)
+ return -EFAULT;
sz = data->branch_stack->nr * sizeof(struct branch_entry);
- sz /= sizeof(u64);
- array += sz;
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
}
if (type & PERF_SAMPLE_REGS_USER) {
- /* First u64 tells us if we have any regs in sample. */
- u64 avail = *array++;
+ OVERFLOW_CHECK_u64(array);
+ data->user_regs.abi = *array;
+ array++;
+
+ if (data->user_regs.abi) {
+ u64 regs_user = evsel->attr.sample_regs_user;
- if (avail) {
+ sz = hweight_long(regs_user) * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
data->user_regs.regs = (u64 *)array;
- array += hweight_long(regs_user);
+ array = (void *)array + sz;
}
}
if (type & PERF_SAMPLE_STACK_USER) {
- u64 size = *array++;
+ OVERFLOW_CHECK_u64(array);
+ sz = *array++;
data->user_stack.offset = ((char *)(array - 1)
- (char *) event);
- if (!size) {
+ if (!sz) {
data->user_stack.size = 0;
} else {
+ OVERFLOW_CHECK(array, sz, max_size);
data->user_stack.data = (char *)array;
- array += size / sizeof(*array);
+ array = (void *)array + sz;
+ OVERFLOW_CHECK_u64(array);
data->user_stack.size = *array++;
}
}
data->weight = 0;
if (type & PERF_SAMPLE_WEIGHT) {
+ OVERFLOW_CHECK_u64(array);
data->weight = *array;
array++;
}
data->data_src = PERF_MEM_DATA_SRC_NONE;
if (type & PERF_SAMPLE_DATA_SRC) {
+ OVERFLOW_CHECK_u64(array);
data->data_src = *array;
array++;
}
@@ -1189,12 +1466,105 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
return 0;
}
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 sample_regs_user, u64 read_format)
+{
+ size_t sz, result = sizeof(struct sample_event);
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_IP)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TIME)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ADDR)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_CPU)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_READ) {
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ result += sizeof(u64);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ result += sizeof(u32);
+ result += sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight_long(sample_regs_user) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ result += sizeof(u64);
+ if (sz) {
+ result += sz;
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ result += sizeof(u64);
+
+ return result;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 sample_regs_user, u64 read_format,
const struct perf_sample *sample,
bool swapped)
{
u64 *array;
-
+ size_t sz;
/*
* used for cross-endian analysis. See git commit 65014ab3
* for why this goofiness is needed.
@@ -1203,8 +1573,13 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
array = event->sample.array;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
if (type & PERF_SAMPLE_IP) {
- event->ip.ip = sample->ip;
+ *array = sample->ip;
array++;
}
@@ -1262,6 +1637,97 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
array++;
}
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ *array = sample->read.group.nr;
+ else
+ *array = sample->read.one.value;
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ *array = sample->read.time_enabled;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ *array = sample->read.time_running;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ memcpy(array, sample->read.group.values, sz);
+ array = (void *)array + sz;
+ } else {
+ *array = sample->read.one.id;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ memcpy(array, sample->callchain, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u.val32[0] = sample->raw_size;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array = (void *)array + sizeof(u32);
+
+ memcpy(array, sample->raw_data, sample->raw_size);
+ array = (void *)array + sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ memcpy(array, sample->branch_stack, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ *array++ = sample->user_regs.abi;
+ sz = hweight_long(sample_regs_user) * sizeof(u64);
+ memcpy(array, sample->user_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ *array++ = sz;
+ if (sz) {
+ memcpy(array, sample->user_stack.data, sz);
+ array = (void *)array + sz;
+ *array++ = sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ *array = sample->weight;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ *array = sample->data_src;
+ array++;
+ }
+
return 0;
}
@@ -1391,6 +1857,7 @@ static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER),
{ .name = NULL, }
};
#undef bit_name
@@ -1482,7 +1949,7 @@ out:
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize)
{
- if ((err == ENOENT || err == ENXIO) &&
+ if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
evsel->attr.type == PERF_TYPE_HARDWARE &&
evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
/*
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3f156ccc1acb..4a7bdc713bab 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -38,6 +38,9 @@ struct perf_sample_id {
struct hlist_node node;
u64 id;
struct perf_evsel *evsel;
+
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
};
/** struct perf_evsel - event selector
@@ -45,6 +48,12 @@ struct perf_sample_id {
* @name - Can be set to retain the original event name passed by the user,
* so that when showing results in tools such as 'perf stat', we
* show the name used, not some alias.
+ * @id_pos: the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
+ * struct sample_event
+ * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
+ * is used there is an id sample appended to non-sample events
*/
struct perf_evsel {
struct list_head node;
@@ -71,11 +80,14 @@ struct perf_evsel {
} handler;
struct cpu_map *cpus;
unsigned int sample_size;
+ int id_pos;
+ int is_pos;
bool supported;
bool needs_swap;
/* parse modifier helper */
int exclude_GH;
int nr_members;
+ int sample_read;
struct perf_evsel *leader;
char *group_name;
};
@@ -100,6 +112,9 @@ void perf_evsel__delete(struct perf_evsel *evsel);
void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts);
+int __perf_evsel__sample_size(u64 sample_type);
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
+
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
#define PERF_EVSEL__MAX_ALIASES 8
@@ -138,10 +153,12 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
#define perf_evsel__reset_sample_bit(evsel, bit) \
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
-void perf_evsel__set_sample_id(struct perf_evsel *evsel);
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool use_sample_identifier);
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter);
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a4dafbee2511..a33197a4fd21 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -25,41 +25,9 @@
static bool no_buildid_cache = false;
-static int trace_event_count;
-static struct perf_trace_event_type *trace_events;
-
static u32 header_argc;
static const char **header_argv;
-int perf_header__push_event(u64 id, const char *name)
-{
- struct perf_trace_event_type *nevents;
-
- if (strlen(name) > MAX_EVENT_NAME)
- pr_warning("Event %s will be truncated\n", name);
-
- nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events));
- if (nevents == NULL)
- return -ENOMEM;
- trace_events = nevents;
-
- memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type));
- trace_events[trace_event_count].event_id = id;
- strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1);
- trace_event_count++;
- return 0;
-}
-
-char *perf_header__find_event(u64 id)
-{
- int i;
- for (i = 0 ; i < trace_event_count; i++) {
- if (trace_events[i].event_id == id)
- return trace_events[i].name;
- }
- return NULL;
-}
-
/*
* magic2 = "PERFILE2"
* must be a numerical value to let the endianness
@@ -748,18 +716,19 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu)
char filename[MAXPATHLEN];
char *buf = NULL, *p;
size_t len = 0;
+ ssize_t sret;
u32 i = 0;
int ret = -1;
sprintf(filename, CORE_SIB_FMT, cpu);
fp = fopen(filename, "r");
if (!fp)
- return -1;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
+ goto try_threads;
+ sret = getline(&buf, &len, fp);
fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
p = strchr(buf, '\n');
if (p)
@@ -775,7 +744,9 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu)
buf = NULL;
len = 0;
}
+ ret = 0;
+try_threads:
sprintf(filename, THRD_SIB_FMT, cpu);
fp = fopen(filename, "r");
if (!fp)
@@ -2257,7 +2228,7 @@ static int perf_header__adds_write(struct perf_header *header,
sec_size = sizeof(*feat_sec) * nr_sections;
- sec_start = header->data_offset + header->data_size;
+ sec_start = header->feat_offset;
lseek(fd, sec_start + sec_size, SEEK_SET);
for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
@@ -2304,6 +2275,7 @@ int perf_session__write_header(struct perf_session *session,
struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
struct perf_evsel *evsel;
+ u64 attr_offset;
int err;
lseek(fd, sizeof(f_header), SEEK_SET);
@@ -2317,7 +2289,7 @@ int perf_session__write_header(struct perf_session *session,
}
}
- header->attr_offset = lseek(fd, 0, SEEK_CUR);
+ attr_offset = lseek(fd, 0, SEEK_CUR);
list_for_each_entry(evsel, &evlist->entries, node) {
f_attr = (struct perf_file_attr){
@@ -2334,17 +2306,8 @@ int perf_session__write_header(struct perf_session *session,
}
}
- header->event_offset = lseek(fd, 0, SEEK_CUR);
- header->event_size = trace_event_count * sizeof(struct perf_trace_event_type);
- if (trace_events) {
- err = do_write(fd, trace_events, header->event_size);
- if (err < 0) {
- pr_debug("failed to write perf header events\n");
- return err;
- }
- }
-
header->data_offset = lseek(fd, 0, SEEK_CUR);
+ header->feat_offset = header->data_offset + header->data_size;
if (at_exit) {
err = perf_header__adds_write(header, evlist, fd);
@@ -2357,17 +2320,14 @@ int perf_session__write_header(struct perf_session *session,
.size = sizeof(f_header),
.attr_size = sizeof(f_attr),
.attrs = {
- .offset = header->attr_offset,
+ .offset = attr_offset,
.size = evlist->nr_entries * sizeof(f_attr),
},
.data = {
.offset = header->data_offset,
.size = header->data_size,
},
- .event_types = {
- .offset = header->event_offset,
- .size = header->event_size,
- },
+ /* event_types is ignored, store zeros */
};
memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
@@ -2417,7 +2377,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
sec_size = sizeof(*feat_sec) * nr_sections;
- lseek(fd, header->data_offset + header->data_size, SEEK_SET);
+ lseek(fd, header->feat_offset, SEEK_SET);
err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
if (err < 0)
@@ -2523,6 +2483,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
/* check for legacy format */
ret = memcmp(&magic, __perf_magic1, sizeof(magic));
if (ret == 0) {
+ ph->version = PERF_HEADER_VERSION_1;
pr_debug("legacy perf.data format\n");
if (is_pipe)
return try_all_pipe_abis(hdr_sz, ph);
@@ -2544,6 +2505,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
return -1;
ph->needs_swap = true;
+ ph->version = PERF_HEADER_VERSION_2;
return 0;
}
@@ -2614,10 +2576,9 @@ int perf_file_header__read(struct perf_file_header *header,
memcpy(&ph->adds_features, &header->adds_features,
sizeof(ph->adds_features));
- ph->event_offset = header->event_types.offset;
- ph->event_size = header->event_types.size;
ph->data_offset = header->data.offset;
ph->data_size = header->data.size;
+ ph->feat_offset = header->data.offset + header->data.size;
return 0;
}
@@ -2666,19 +2627,17 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
return 0;
}
-static int perf_header__read_pipe(struct perf_session *session, int fd)
+static int perf_header__read_pipe(struct perf_session *session)
{
struct perf_header *header = &session->header;
struct perf_pipe_file_header f_header;
- if (perf_file_header__read_pipe(&f_header, header, fd,
+ if (perf_file_header__read_pipe(&f_header, header, session->fd,
session->repipe) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
}
- session->fd = fd;
-
return 0;
}
@@ -2772,20 +2731,21 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
return 0;
}
-int perf_session__read_header(struct perf_session *session, int fd)
+int perf_session__read_header(struct perf_session *session)
{
struct perf_header *header = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
+ int fd = session->fd;
session->evlist = perf_evlist__new();
if (session->evlist == NULL)
return -ENOMEM;
if (session->fd_pipe)
- return perf_header__read_pipe(session, fd);
+ return perf_header__read_pipe(session);
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
@@ -2839,22 +2799,9 @@ int perf_session__read_header(struct perf_session *session, int fd)
symbol_conf.nr_events = nr_attrs;
- if (f_header.event_types.size) {
- lseek(fd, f_header.event_types.offset, SEEK_SET);
- trace_events = malloc(f_header.event_types.size);
- if (trace_events == NULL)
- return -ENOMEM;
- if (perf_header__getbuffer64(header, fd, trace_events,
- f_header.event_types.size))
- goto out_errno;
- trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
- }
-
perf_header__process_sections(header, fd, &session->pevent,
perf_file_section__process);
- lseek(fd, header->data_offset, SEEK_SET);
-
if (perf_evlist__prepare_tracepoint_events(session->evlist,
session->pevent))
goto out_delete_evlist;
@@ -2922,7 +2869,8 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
return err;
}
-int perf_event__process_attr(union perf_event *event,
+int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
struct perf_evlist **pevlist)
{
u32 i, ids, n_ids;
@@ -2961,64 +2909,6 @@ int perf_event__process_attr(union perf_event *event,
return 0;
}
-int perf_event__synthesize_event_type(struct perf_tool *tool,
- u64 event_id, char *name,
- perf_event__handler_t process,
- struct machine *machine)
-{
- union perf_event ev;
- size_t size = 0;
- int err = 0;
-
- memset(&ev, 0, sizeof(ev));
-
- ev.event_type.event_type.event_id = event_id;
- memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
- strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
-
- ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
- size = strlen(ev.event_type.event_type.name);
- size = PERF_ALIGN(size, sizeof(u64));
- ev.event_type.header.size = sizeof(ev.event_type) -
- (sizeof(ev.event_type.event_type.name) - size);
-
- err = process(tool, &ev, NULL, machine);
-
- return err;
-}
-
-int perf_event__synthesize_event_types(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_trace_event_type *type;
- int i, err = 0;
-
- for (i = 0; i < trace_event_count; i++) {
- type = &trace_events[i];
-
- err = perf_event__synthesize_event_type(tool, type->event_id,
- type->name, process,
- machine);
- if (err) {
- pr_debug("failed to create perf header event type\n");
- return err;
- }
- }
-
- return err;
-}
-
-int perf_event__process_event_type(struct perf_tool *tool __maybe_unused,
- union perf_event *event)
-{
- if (perf_header__push_event(event->event_type.event_type.event_id,
- event->event_type.event_type.name) < 0)
- return -ENOMEM;
-
- return 0;
-}
-
int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
struct perf_evlist *evlist,
perf_event__handler_t process)
@@ -3065,7 +2955,8 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
return aligned_size;
}
-int perf_event__process_tracing_data(union perf_event *event,
+int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
struct perf_session *session)
{
ssize_t size_read, padding, size = event->tracing_data.size;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 16a3e83c584e..307c9aed972e 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -34,6 +34,11 @@ enum {
HEADER_FEAT_BITS = 256,
};
+enum perf_header_version {
+ PERF_HEADER_VERSION_1,
+ PERF_HEADER_VERSION_2,
+};
+
struct perf_file_section {
u64 offset;
u64 size;
@@ -45,6 +50,7 @@ struct perf_file_header {
u64 attr_size;
struct perf_file_section attrs;
struct perf_file_section data;
+ /* event_types is ignored */
struct perf_file_section event_types;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
@@ -84,28 +90,24 @@ struct perf_session_env {
};
struct perf_header {
- bool needs_swap;
- s64 attr_offset;
- u64 data_offset;
- u64 data_size;
- u64 event_offset;
- u64 event_size;
+ enum perf_header_version version;
+ bool needs_swap;
+ u64 data_offset;
+ u64 data_size;
+ u64 feat_offset;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
- struct perf_session_env env;
+ struct perf_session_env env;
};
struct perf_evlist;
struct perf_session;
-int perf_session__read_header(struct perf_session *session, int fd);
+int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
struct perf_evlist *evlist,
int fd, bool at_exit);
int perf_header__write_pipe(int fd);
-int perf_header__push_event(u64 id, const char *name);
-char *perf_header__find_event(u64 id);
-
void perf_header__set_feat(struct perf_header *header, int feat);
void perf_header__clear_feat(struct perf_header *header, int feat);
bool perf_header__has_feat(const struct perf_header *header, int feat);
@@ -130,22 +132,14 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process);
-int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
-
-int perf_event__synthesize_event_type(struct perf_tool *tool,
- u64 event_id, char *name,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_event_types(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__process_event_type(struct perf_tool *tool,
- union perf_event *event);
+int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
+ struct perf_evlist **pevlist);
int perf_event__synthesize_tracing_data(struct perf_tool *tool,
int fd, struct perf_evlist *evlist,
perf_event__handler_t process);
-int perf_event__process_tracing_data(union perf_event *event,
+int perf_event__process_tracing_data(struct perf_tool *tool,
+ union perf_event *event,
struct perf_session *session);
int perf_event__synthesize_build_id(struct perf_tool *tool,
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b11a6cfdb414..46a0d35a05e1 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -24,7 +24,8 @@ enum hist_filter {
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
.min_percent = 0.5,
- .order = ORDER_CALLEE
+ .order = ORDER_CALLEE,
+ .key = CCKEY_FUNCTION
};
u16 hists__col_len(struct hists *hists, enum hist_column col)
@@ -912,6 +913,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
hists__inc_nr_entries(hists, he);
+ he->dummy = true;
}
out:
return he;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 2d3790fd99bb..1329b6b6ffe6 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -141,10 +141,12 @@ struct perf_hpp {
};
struct perf_hpp_fmt {
- int (*header)(struct perf_hpp *hpp);
- int (*width)(struct perf_hpp *hpp);
- int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
- int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
+ int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
+ int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
+ int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
struct list_head list;
};
@@ -157,7 +159,7 @@ extern struct list_head perf_hpp__list;
extern struct perf_hpp_fmt perf_hpp__format[];
enum {
- PERF_HPP__BASELINE,
+ /* Matches perf_hpp__format array. */
PERF_HPP__OVERHEAD,
PERF_HPP__OVERHEAD_SYS,
PERF_HPP__OVERHEAD_US,
@@ -165,11 +167,6 @@ enum {
PERF_HPP__OVERHEAD_GUEST_US,
PERF_HPP__SAMPLES,
PERF_HPP__PERIOD,
- PERF_HPP__PERIOD_BASELINE,
- PERF_HPP__DELTA,
- PERF_HPP__RATIO,
- PERF_HPP__WEIGHTED_DIFF,
- PERF_HPP__FORMULA,
PERF_HPP__MAX_INDEX
};
@@ -177,8 +174,6 @@ enum {
void perf_hpp__init(void);
void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_enable(unsigned col);
-int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
- bool color);
struct perf_evlist;
@@ -245,11 +240,4 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
#endif
unsigned int hists__sort_list_width(struct hists *self);
-
-double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair);
-double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair);
-s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair);
-int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
- char *buf, size_t size);
-double perf_diff__period_percent(struct hist_entry *he, u64 period);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
index 6f19c548ecc0..97a800738226 100644
--- a/tools/perf/util/include/linux/string.h
+++ b/tools/perf/util/include/linux/string.h
@@ -1,3 +1,4 @@
#include <string.h>
void *memdup(const void *src, size_t len);
+int str_append(char **s, int *len, const char *a);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b2ecad6ec46b..1dca61f0512d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -25,12 +25,15 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->kmaps.machine = machine;
machine->pid = pid;
+ machine->symbol_filter = NULL;
+
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
return -ENOMEM;
if (pid != HOST_KERNEL_ID) {
- struct thread *thread = machine__findnew_thread(machine, pid);
+ struct thread *thread = machine__findnew_thread(machine, 0,
+ pid);
char comm[64];
if (thread == NULL)
@@ -95,6 +98,7 @@ void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
machines->guests = RB_ROOT;
+ machines->symbol_filter = NULL;
}
void machines__exit(struct machines *machines)
@@ -118,6 +122,8 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
return NULL;
}
+ machine->symbol_filter = machines->symbol_filter;
+
while (*p != NULL) {
parent = *p;
pos = rb_entry(parent, struct machine, rb_node);
@@ -133,6 +139,21 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
return machine;
}
+void machines__set_symbol_filter(struct machines *machines,
+ symbol_filter_t symbol_filter)
+{
+ struct rb_node *nd;
+
+ machines->symbol_filter = symbol_filter;
+ machines->host.symbol_filter = symbol_filter;
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *machine = rb_entry(nd, struct machine, rb_node);
+
+ machine->symbol_filter = symbol_filter;
+ }
+}
+
struct machine *machines__find(struct machines *machines, pid_t pid)
{
struct rb_node **p = &machines->guests.rb_node;
@@ -233,7 +254,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
return;
}
-static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
+static struct thread *__machine__findnew_thread(struct machine *machine,
+ pid_t pid, pid_t tid,
bool create)
{
struct rb_node **p = &machine->threads.rb_node;
@@ -241,23 +263,28 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p
struct thread *th;
/*
- * Font-end cache - PID lookups come in blocks,
+ * Front-end cache - TID lookups come in blocks,
* so most of the time we dont have to look up
* the full rbtree:
*/
- if (machine->last_match && machine->last_match->pid == pid)
+ if (machine->last_match && machine->last_match->tid == tid) {
+ if (pid && pid != machine->last_match->pid_)
+ machine->last_match->pid_ = pid;
return machine->last_match;
+ }
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
- if (th->pid == pid) {
+ if (th->tid == tid) {
machine->last_match = th;
+ if (pid && pid != th->pid_)
+ th->pid_ = pid;
return th;
}
- if (pid < th->pid)
+ if (tid < th->tid)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -266,7 +293,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p
if (!create)
return NULL;
- th = thread__new(pid);
+ th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, &machine->threads);
@@ -276,19 +303,22 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p
return th;
}
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
+ pid_t tid)
{
- return __machine__findnew_thread(machine, pid, true);
+ return __machine__findnew_thread(machine, pid, tid, true);
}
-struct thread *machine__find_thread(struct machine *machine, pid_t pid)
+struct thread *machine__find_thread(struct machine *machine, pid_t tid)
{
- return __machine__findnew_thread(machine, pid, false);
+ return __machine__findnew_thread(machine, 0, tid, false);
}
int machine__process_comm_event(struct machine *machine, union perf_event *event)
{
- struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
+ struct thread *thread = machine__findnew_thread(machine,
+ event->comm.pid,
+ event->comm.tid);
if (dump_trace)
perf_event__fprintf_comm(event, stdout);
@@ -628,10 +658,8 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_vmlinux_path(map->dso, map, filter);
- if (ret > 0) {
+ if (ret > 0)
dso__set_loaded(map->dso, type);
- map__reloc_vmlinux(map);
- }
return ret;
}
@@ -808,7 +836,10 @@ static int machine__create_modules(struct machine *machine)
free(line);
fclose(file);
- return machine__set_modules_path(machine);
+ if (machine__set_modules_path(machine) < 0) {
+ pr_debug("Problems setting modules path maps, continuing anyway...\n");
+ }
+ return 0;
out_delete_line:
free(line);
@@ -858,6 +889,18 @@ static void machine__set_kernel_mmap_len(struct machine *machine,
}
}
+static bool machine__uses_kcore(struct machine *machine)
+{
+ struct dso *dso;
+
+ list_for_each_entry(dso, &machine->kernel_dsos, node) {
+ if (dso__is_kcore(dso))
+ return true;
+ }
+
+ return false;
+}
+
static int machine__process_kernel_mmap_event(struct machine *machine,
union perf_event *event)
{
@@ -866,6 +909,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
enum dso_kernel_type kernel_type;
bool is_kernel_mmap;
+ /* If we have maps from kcore then we do not need or want any others */
+ if (machine__uses_kcore(machine))
+ return 0;
+
machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
if (machine__is_host(machine))
kernel_type = DSO_TYPE_KERNEL;
@@ -969,7 +1016,8 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
return 0;
}
- thread = machine__findnew_thread(machine, event->mmap.pid);
+ thread = machine__findnew_thread(machine, event->mmap.pid,
+ event->mmap.pid);
if (thread == NULL)
goto out_problem;
@@ -994,11 +1042,30 @@ out_problem:
return 0;
}
+static void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+ machine->last_match = NULL;
+ rb_erase(&th->rb_node, &machine->threads);
+ /*
+ * We may have references to this thread, for instance in some hist_entry
+ * instances, so just move them to a separate list.
+ */
+ list_add_tail(&th->node, &machine->dead_threads);
+}
+
int machine__process_fork_event(struct machine *machine, union perf_event *event)
{
- struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
- struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
+ struct thread *thread = machine__find_thread(machine, event->fork.tid);
+ struct thread *parent = machine__findnew_thread(machine,
+ event->fork.ppid,
+ event->fork.ptid);
+ /* if a thread currently exists for the thread id remove it */
+ if (thread != NULL)
+ machine__remove_thread(machine, thread);
+
+ thread = machine__findnew_thread(machine, event->fork.pid,
+ event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
@@ -1011,18 +1078,8 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
return 0;
}
-static void machine__remove_thread(struct machine *machine, struct thread *th)
-{
- machine->last_match = NULL;
- rb_erase(&th->rb_node, &machine->threads);
- /*
- * We may have references to this thread, for instance in some hist_entry
- * instances, so just move them to a separate list.
- */
- list_add_tail(&th->node, &machine->dead_threads);
-}
-
-int machine__process_exit_event(struct machine *machine, union perf_event *event)
+int machine__process_exit_event(struct machine *machine __maybe_unused,
+ union perf_event *event)
{
struct thread *thread = machine__find_thread(machine, event->fork.tid);
@@ -1030,7 +1087,7 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
perf_event__fprintf_task(event, stdout);
if (thread != NULL)
- machine__remove_thread(machine, thread);
+ thread__exited(thread);
return 0;
}
@@ -1058,11 +1115,10 @@ int machine__process_event(struct machine *machine, union perf_event *event)
return ret;
}
-static bool symbol__match_parent_regex(struct symbol *sym)
+static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
- if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+ if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
return 1;
-
return 0;
}
@@ -1094,7 +1150,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread,
* or else, the symbol is unknown
*/
thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
- ip, &al, NULL);
+ ip, &al);
if (al.sym)
goto found;
}
@@ -1112,8 +1168,8 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread,
memset(&al, 0, sizeof(al));
- thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, &al,
- NULL);
+ thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
+ &al);
ams->addr = addr;
ams->al_addr = al.addr;
ams->sym = al.sym;
@@ -1159,8 +1215,8 @@ struct branch_info *machine__resolve_bstack(struct machine *machine,
static int machine__resolve_callchain_sample(struct machine *machine,
struct thread *thread,
struct ip_callchain *chain,
- struct symbol **parent)
-
+ struct symbol **parent,
+ struct addr_location *root_al)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
@@ -1208,11 +1264,18 @@ static int machine__resolve_callchain_sample(struct machine *machine,
al.filtered = false;
thread__find_addr_location(thread, machine, cpumode,
- MAP__FUNCTION, ip, &al, NULL);
+ MAP__FUNCTION, ip, &al);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
- symbol__match_parent_regex(al.sym))
+ symbol__match_regex(al.sym, &parent_regex))
*parent = al.sym;
+ else if (have_ignore_callees && root_al &&
+ symbol__match_regex(al.sym, &ignore_callees_regex)) {
+ /* Treat this symbol as the root,
+ forgetting its callees. */
+ *root_al = al;
+ callchain_cursor_reset(&callchain_cursor);
+ }
if (!symbol_conf.use_callchain)
break;
}
@@ -1237,15 +1300,13 @@ int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
struct perf_sample *sample,
- struct symbol **parent)
-
+ struct symbol **parent,
+ struct addr_location *root_al)
{
int ret;
- callchain_cursor_reset(&callchain_cursor);
-
ret = machine__resolve_callchain_sample(machine, thread,
- sample->callchain, parent);
+ sample->callchain, parent, root_al);
if (ret)
return ret;
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 77940680f1fc..0df925ba6a44 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -5,6 +5,7 @@
#include <linux/rbtree.h>
#include "map.h"
+struct addr_location;
struct branch_stack;
struct perf_evsel;
struct perf_sample;
@@ -28,6 +29,7 @@ struct machine {
struct list_head kernel_dsos;
struct map_groups kmaps;
struct map *vmlinux_maps[MAP__NR_TYPES];
+ symbol_filter_t symbol_filter;
};
static inline
@@ -36,7 +38,7 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type)
return machine->vmlinux_maps[type];
}
-struct thread *machine__find_thread(struct machine *machine, pid_t pid);
+struct thread *machine__find_thread(struct machine *machine, pid_t tid);
int machine__process_comm_event(struct machine *machine, union perf_event *event);
int machine__process_exit_event(struct machine *machine, union perf_event *event);
@@ -50,6 +52,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
struct rb_root guests;
+ symbol_filter_t symbol_filter;
};
void machines__init(struct machines *machines);
@@ -67,6 +70,9 @@ struct machine *machines__findnew(struct machines *machines, pid_t pid);
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
+void machines__set_symbol_filter(struct machines *machines,
+ symbol_filter_t symbol_filter);
+
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
void machine__exit(struct machine *machine);
void machine__delete_dead_threads(struct machine *machine);
@@ -83,7 +89,8 @@ int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
struct perf_sample *sample,
- struct symbol **parent);
+ struct symbol **parent,
+ struct addr_location *root_al);
/*
* Default guest kernel is defined by parameter --guestkallsyms
@@ -99,7 +106,8 @@ static inline bool machine__is_host(struct machine *machine)
return machine ? machine->pid == HOST_KERNEL_ID : false;
}
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
+ pid_t tid);
size_t machine__fprintf(struct machine *machine, FILE *fp);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 8bcdf9e54089..9e8304ca343e 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -182,12 +182,6 @@ int map__load(struct map *map, symbol_filter_t filter)
#endif
return -1;
}
- /*
- * Only applies to the kernel, as its symtabs aren't relative like the
- * module ones.
- */
- if (map->dso->kernel)
- map__reloc_vmlinux(map);
return 0;
}
@@ -254,14 +248,18 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
/*
* objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
- * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
+ * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
+ * relative to section start.
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{
- u64 addr = map->dso->adjust_symbols ?
- map->unmap_ip(map, rip) : /* RIP -> IP */
- rip;
- return addr;
+ if (!map->dso->adjust_symbols)
+ return rip;
+
+ if (map->dso->rel)
+ return rip - map->pgoff;
+
+ return map->unmap_ip(map, rip);
}
void map_groups__init(struct map_groups *mg)
@@ -513,35 +511,6 @@ int map_groups__clone(struct map_groups *mg,
return 0;
}
-static u64 map__reloc_map_ip(struct map *map, u64 ip)
-{
- return ip + (s64)map->pgoff;
-}
-
-static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
-{
- return ip - (s64)map->pgoff;
-}
-
-void map__reloc_vmlinux(struct map *map)
-{
- struct kmap *kmap = map__kmap(map);
- s64 reloc;
-
- if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
- return;
-
- reloc = (kmap->ref_reloc_sym->unrelocated_addr -
- kmap->ref_reloc_sym->addr);
-
- if (!reloc)
- return;
-
- map->map_ip = map__reloc_map_ip;
- map->unmap_ip = map__reloc_unmap_ip;
- map->pgoff = reloc;
-}
-
void maps__insert(struct rb_root *maps, struct map *map)
{
struct rb_node **p = &maps->rb_node;
@@ -586,3 +555,21 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
return NULL;
}
+
+struct map *maps__first(struct rb_root *maps)
+{
+ struct rb_node *first = rb_first(maps);
+
+ if (first)
+ return rb_entry(first, struct map, rb_node);
+ return NULL;
+}
+
+struct map *maps__next(struct map *map)
+{
+ struct rb_node *next = rb_next(&map->rb_node);
+
+ if (next)
+ return rb_entry(next, struct map, rb_node);
+ return NULL;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index a887f2c9dfbb..2cc93cbf0e17 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -112,6 +112,8 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg,
void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
+struct map *maps__first(struct rb_root *maps);
+struct map *maps__next(struct map *map);
void map_groups__init(struct map_groups *mg);
void map_groups__exit(struct map_groups *mg);
int map_groups__clone(struct map_groups *mg,
@@ -139,6 +141,17 @@ static inline struct map *map_groups__find(struct map_groups *mg,
return maps__find(&mg->maps[type], addr);
}
+static inline struct map *map_groups__first(struct map_groups *mg,
+ enum map_type type)
+{
+ return maps__first(&mg->maps[type]);
+}
+
+static inline struct map *map_groups__next(struct map *map)
+{
+ return maps__next(map);
+}
+
struct symbol *map_groups__find_symbol(struct map_groups *mg,
enum map_type type, u64 addr,
struct map **mapp,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 995fc25db8c6..98125319b158 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -6,7 +6,7 @@
#include "parse-options.h"
#include "parse-events.h"
#include "exec_cmd.h"
-#include "string.h"
+#include "linux/string.h"
#include "symbol.h"
#include "cache.h"
#include "header.h"
@@ -15,6 +15,7 @@
#define YY_EXTRA_TYPE int
#include "parse-events-flex.h"
#include "pmu.h"
+#include "thread_map.h"
#define MAX_NAME_LEN 100
@@ -108,6 +109,10 @@ static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
.symbol = "emulation-faults",
.alias = "",
},
+ [PERF_COUNT_SW_DUMMY] = {
+ .symbol = "dummy",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
@@ -217,6 +222,29 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
return NULL;
}
+struct tracepoint_path *tracepoint_name_to_path(const char *name)
+{
+ struct tracepoint_path *path = zalloc(sizeof(*path));
+ char *str = strchr(name, ':');
+
+ if (path == NULL || str == NULL) {
+ free(path);
+ return NULL;
+ }
+
+ path->system = strndup(name, str - name);
+ path->name = strdup(str+1);
+
+ if (path->system == NULL || path->name == NULL) {
+ free(path->system);
+ free(path->name);
+ free(path);
+ path = NULL;
+ }
+
+ return path;
+}
+
const char *event_type(int type)
{
switch (type) {
@@ -241,40 +269,29 @@ const char *event_type(int type)
-static int __add_event(struct list_head **_list, int *idx,
+static int __add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
char *name, struct cpu_map *cpus)
{
struct perf_evsel *evsel;
- struct list_head *list = *_list;
-
- if (!list) {
- list = malloc(sizeof(*list));
- if (!list)
- return -ENOMEM;
- INIT_LIST_HEAD(list);
- }
event_attr_init(attr);
evsel = perf_evsel__new(attr, (*idx)++);
- if (!evsel) {
- free(list);
+ if (!evsel)
return -ENOMEM;
- }
evsel->cpus = cpus;
if (name)
evsel->name = strdup(name);
list_add_tail(&evsel->node, list);
- *_list = list;
return 0;
}
-static int add_event(struct list_head **_list, int *idx,
+static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name)
{
- return __add_event(_list, idx, attr, name, NULL);
+ return __add_event(list, idx, attr, name, NULL);
}
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -295,7 +312,7 @@ static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES]
return -1;
}
-int parse_events_add_cache(struct list_head **list, int *idx,
+int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2)
{
struct perf_event_attr attr;
@@ -356,31 +373,21 @@ int parse_events_add_cache(struct list_head **list, int *idx,
return add_event(list, idx, &attr, name);
}
-static int add_tracepoint(struct list_head **listp, int *idx,
+static int add_tracepoint(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
struct perf_evsel *evsel;
- struct list_head *list = *listp;
-
- if (!list) {
- list = malloc(sizeof(*list));
- if (!list)
- return -ENOMEM;
- INIT_LIST_HEAD(list);
- }
evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++);
- if (!evsel) {
- free(list);
+ if (!evsel)
return -ENOMEM;
- }
list_add_tail(&evsel->node, list);
- *listp = list;
+
return 0;
}
-static int add_tracepoint_multi_event(struct list_head **list, int *idx,
+static int add_tracepoint_multi_event(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
char evt_path[MAXPATHLEN];
@@ -412,7 +419,7 @@ static int add_tracepoint_multi_event(struct list_head **list, int *idx,
return ret;
}
-static int add_tracepoint_event(struct list_head **list, int *idx,
+static int add_tracepoint_event(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
return strpbrk(evt_name, "*?") ?
@@ -420,7 +427,7 @@ static int add_tracepoint_event(struct list_head **list, int *idx,
add_tracepoint(list, idx, sys_name, evt_name);
}
-static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
+static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
struct dirent *events_ent;
@@ -452,7 +459,7 @@ static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
return ret;
}
-int parse_events_add_tracepoint(struct list_head **list, int *idx,
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event)
{
int ret;
@@ -507,7 +514,7 @@ do { \
return 0;
}
-int parse_events_add_breakpoint(struct list_head **list, int *idx,
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type)
{
struct perf_event_attr attr;
@@ -588,7 +595,7 @@ static int config_attr(struct perf_event_attr *attr,
return 0;
}
-int parse_events_add_numeric(struct list_head **list, int *idx,
+int parse_events_add_numeric(struct list_head *list, int *idx,
u32 type, u64 config,
struct list_head *head_config)
{
@@ -621,7 +628,7 @@ static char *pmu_event_name(struct list_head *head_terms)
return NULL;
}
-int parse_events_add_pmu(struct list_head **list, int *idx,
+int parse_events_add_pmu(struct list_head *list, int *idx,
char *name, struct list_head *head_config)
{
struct perf_event_attr attr;
@@ -664,6 +671,7 @@ void parse_events__set_leader(char *name, struct list_head *list)
leader->group_name = name ? strdup(name) : NULL;
}
+/* list_event is assumed to point to malloc'ed memory */
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all)
{
@@ -684,6 +692,8 @@ struct event_modifier {
int eG;
int precise;
int exclude_GH;
+ int sample_read;
+ int pinned;
};
static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -695,6 +705,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int eH = evsel ? evsel->attr.exclude_host : 0;
int eG = evsel ? evsel->attr.exclude_guest : 0;
int precise = evsel ? evsel->attr.precise_ip : 0;
+ int sample_read = 0;
+ int pinned = evsel ? evsel->attr.pinned : 0;
int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0;
@@ -727,6 +739,10 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
/* use of precise requires exclude_guest */
if (!exclude_GH)
eG = 1;
+ } else if (*str == 'S') {
+ sample_read = 1;
+ } else if (*str == 'D') {
+ pinned = 1;
} else
break;
@@ -753,6 +769,9 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->eG = eG;
mod->precise = precise;
mod->exclude_GH = exclude_GH;
+ mod->sample_read = sample_read;
+ mod->pinned = pinned;
+
return 0;
}
@@ -765,7 +784,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHppp") - 1))
+ if (strlen(str) > (sizeof("ukhGHpppSD") - 1))
return -1;
while (*p) {
@@ -803,6 +822,10 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->attr.exclude_host = mod.eH;
evsel->attr.exclude_guest = mod.eG;
evsel->exclude_GH = mod.exclude_GH;
+ evsel->sample_read = mod.sample_read;
+
+ if (perf_evsel__is_group_leader(evsel))
+ evsel->attr.pinned = mod.pinned;
}
return 0;
@@ -820,6 +843,32 @@ int parse_events_name(struct list_head *list, char *name)
return 0;
}
+static int parse_events__scanner(const char *str, void *data, int start_token);
+
+static int parse_events_fixup(int ret, const char *str, void *data,
+ int start_token)
+{
+ char *o = strdup(str);
+ char *s = NULL;
+ char *t = o;
+ char *p;
+ int len = 0;
+
+ if (!o)
+ return ret;
+ while ((p = strsep(&t, ",")) != NULL) {
+ if (s)
+ str_append(&s, &len, ",");
+ str_append(&s, &len, "cpu/");
+ str_append(&s, &len, p);
+ str_append(&s, &len, "/");
+ }
+ free(o);
+ if (!s)
+ return -ENOMEM;
+ return parse_events__scanner(s, data, start_token);
+}
+
static int parse_events__scanner(const char *str, void *data, int start_token)
{
YY_BUFFER_STATE buffer;
@@ -840,6 +889,8 @@ static int parse_events__scanner(const char *str, void *data, int start_token)
parse_events__flush_buffer(buffer, scanner);
parse_events__delete_buffer(buffer, scanner);
parse_events_lex_destroy(scanner);
+ if (ret && !strchr(str, '/'))
+ ret = parse_events_fixup(ret, str, data, start_token);
return ret;
}
@@ -1026,6 +1077,33 @@ int is_valid_tracepoint(const char *event_string)
return 0;
}
+static bool is_event_supported(u8 type, unsigned config)
+{
+ bool ret = true;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ };
+ struct {
+ struct thread_map map;
+ int threads[1];
+ } tmap = {
+ .map.nr = 1,
+ .threads = { 0 },
+ };
+
+ evsel = perf_evsel__new(&attr, 0);
+ if (evsel) {
+ ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ perf_evsel__delete(evsel);
+ }
+
+ return ret;
+}
+
static void __print_events_type(u8 type, struct event_symbol *syms,
unsigned max)
{
@@ -1033,14 +1111,16 @@ static void __print_events_type(u8 type, struct event_symbol *syms,
unsigned i;
for (i = 0; i < max ; i++, syms++) {
+ if (!is_event_supported(type, i))
+ continue;
+
if (strlen(syms->alias))
snprintf(name, sizeof(name), "%s OR %s",
syms->symbol, syms->alias);
else
snprintf(name, sizeof(name), "%s", syms->symbol);
- printf(" %-50s [%s]\n", name,
- event_type_descriptors[type]);
+ printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
}
}
@@ -1069,6 +1149,10 @@ int print_hwcache_events(const char *event_glob, bool name_only)
if (event_glob != NULL && !strglobmatch(name, event_glob))
continue;
+ if (!is_event_supported(PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16)))
+ continue;
+
if (name_only)
printf("%s ", name);
else
@@ -1079,6 +1163,8 @@ int print_hwcache_events(const char *event_glob, bool name_only)
}
}
+ if (printed)
+ printf("\n");
return printed;
}
@@ -1096,6 +1182,9 @@ static void print_symbol_events(const char *event_glob, unsigned type,
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
+ if (!is_event_supported(type, i))
+ continue;
+
if (name_only) {
printf("%s ", syms->symbol);
continue;
@@ -1133,11 +1222,12 @@ void print_events(const char *event_glob, bool name_only)
print_hwcache_events(event_glob, name_only);
+ print_pmu_events(event_glob, name_only);
+
if (event_glob != NULL)
return;
if (!name_only) {
- printf("\n");
printf(" %-50s [%s]\n",
"rNNN",
event_type_descriptors[PERF_TYPE_RAW]);
@@ -1237,6 +1327,4 @@ void parse_events__free_terms(struct list_head *terms)
list_for_each_entry_safe(term, h, terms, list)
free(term);
-
- free(terms);
}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 8a4859315fd9..f1cb4c4b3c70 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,6 +23,7 @@ struct tracepoint_path {
};
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
+extern struct tracepoint_path *tracepoint_name_to_path(const char *name);
extern bool have_tracepoints(struct list_head *evlist);
const char *event_type(int type);
@@ -84,16 +85,16 @@ void parse_events__free_terms(struct list_head *terms);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, char *name);
-int parse_events_add_tracepoint(struct list_head **list, int *idx,
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event);
-int parse_events_add_numeric(struct list_head **list, int *idx,
+int parse_events_add_numeric(struct list_head *list, int *idx,
u32 type, u64 config,
struct list_head *head_config);
-int parse_events_add_cache(struct list_head **list, int *idx,
+int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2);
-int parse_events_add_breakpoint(struct list_head **list, int *idx,
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type);
-int parse_events_add_pmu(struct list_head **list, int *idx,
+int parse_events_add_pmu(struct list_head *list, int *idx,
char *pmu , struct list_head *head_config);
void parse_events__set_leader(char *name, struct list_head *list);
void parse_events_update_lists(struct list_head *list_event,
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index e9d1134c2c68..91346b753960 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -82,7 +82,8 @@ num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
-modifier_event [ukhpGH]+
+/* If you add a modifier you need to update check_modifier() */
+modifier_event [ukhpGHSD]+
modifier_bp [rwx]{1,3}
%%
@@ -144,6 +145,7 @@ context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW
cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
L1-dcache|l1-d|l1d|L1-data |
L1-icache|l1-i|l1i|L1-instruction |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index afc44c18dfe1..4eb67ec333f1 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -22,6 +22,13 @@ do { \
YYABORT; \
} while (0)
+#define ALLOC_LIST(list) \
+do { \
+ list = malloc(sizeof(*list)); \
+ ABORT_ON(!list); \
+ INIT_LIST_HEAD(list); \
+} while (0)
+
static inc_group_count(struct list_head *list,
struct parse_events_evlist *data)
{
@@ -196,9 +203,10 @@ event_pmu:
PE_NAME '/' event_config '/'
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3));
parse_events__free_terms($3);
$$ = list;
}
@@ -212,11 +220,12 @@ event_legacy_symbol:
value_sym '/' event_config '/'
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
int type = $1 >> 16;
int config = $1 & 255;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
type, config, $3));
parse_events__free_terms($3);
$$ = list;
@@ -225,11 +234,12 @@ value_sym '/' event_config '/'
value_sym sep_slash_dc
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
int type = $1 >> 16;
int config = $1 & 255;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
type, config, NULL));
$$ = list;
}
@@ -238,27 +248,30 @@ event_legacy_cache:
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5));
$$ = list;
}
|
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL));
$$ = list;
}
|
PE_NAME_CACHE_TYPE
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL));
$$ = list;
}
@@ -266,9 +279,10 @@ event_legacy_mem:
PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, $4));
$$ = list;
}
@@ -276,9 +290,10 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
PE_PREFIX_MEM PE_VALUE sep_dc
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, NULL));
$$ = list;
}
@@ -287,9 +302,10 @@ event_legacy_tracepoint:
PE_NAME ':' PE_NAME
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3));
$$ = list;
}
@@ -297,9 +313,10 @@ event_legacy_numeric:
PE_VALUE ':' PE_VALUE
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL));
$$ = list;
}
@@ -307,9 +324,10 @@ event_legacy_raw:
PE_RAW
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
PERF_TYPE_RAW, $1, NULL));
$$ = list;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 4c6f9c490a8d..bc9d8069d376 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -73,7 +73,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head)
* located at:
* /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
*/
-static int pmu_format(char *name, struct list_head *format)
+static int pmu_format(const char *name, struct list_head *format)
{
struct stat st;
char path[PATH_MAX];
@@ -162,7 +162,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
* Reading the pmu event aliases definition, which should be located at:
* /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
*/
-static int pmu_aliases(char *name, struct list_head *head)
+static int pmu_aliases(const char *name, struct list_head *head)
{
struct stat st;
char path[PATH_MAX];
@@ -208,7 +208,7 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
* located at:
* /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
*/
-static int pmu_type(char *name, __u32 *type)
+static int pmu_type(const char *name, __u32 *type)
{
struct stat st;
char path[PATH_MAX];
@@ -266,7 +266,7 @@ static void pmu_read_sysfs(void)
closedir(dir);
}
-static struct cpu_map *pmu_cpumask(char *name)
+static struct cpu_map *pmu_cpumask(const char *name)
{
struct stat st;
char path[PATH_MAX];
@@ -293,7 +293,7 @@ static struct cpu_map *pmu_cpumask(char *name)
return cpus;
}
-static struct perf_pmu *pmu_lookup(char *name)
+static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
LIST_HEAD(format);
@@ -330,7 +330,7 @@ static struct perf_pmu *pmu_lookup(char *name)
return pmu;
}
-static struct perf_pmu *pmu_find(char *name)
+static struct perf_pmu *pmu_find(const char *name)
{
struct perf_pmu *pmu;
@@ -356,7 +356,7 @@ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu)
return NULL;
}
-struct perf_pmu *perf_pmu__find(char *name)
+struct perf_pmu *perf_pmu__find(const char *name)
{
struct perf_pmu *pmu;
@@ -564,3 +564,76 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
for (b = from; b <= to; b++)
set_bit(b, bits);
}
+
+static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias)
+{
+ snprintf(buf, len, "%s/%s/", pmu->name, alias->name);
+ return buf;
+}
+
+static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias)
+{
+ snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name);
+ return buf;
+}
+
+static int cmp_string(const void *a, const void *b)
+{
+ const char * const *as = a;
+ const char * const *bs = b;
+ return strcmp(*as, *bs);
+}
+
+void print_pmu_events(const char *event_glob, bool name_only)
+{
+ struct perf_pmu *pmu;
+ struct perf_pmu_alias *alias;
+ char buf[1024];
+ int printed = 0;
+ int len, j;
+ char **aliases;
+
+ pmu = NULL;
+ len = 0;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL)
+ list_for_each_entry(alias, &pmu->aliases, list)
+ len++;
+ aliases = malloc(sizeof(char *) * len);
+ if (!aliases)
+ return;
+ pmu = NULL;
+ j = 0;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL)
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ char *name = format_alias(buf, sizeof(buf), pmu, alias);
+ bool is_cpu = !strcmp(pmu->name, "cpu");
+
+ if (event_glob != NULL &&
+ !(strglobmatch(name, event_glob) ||
+ (!is_cpu && strglobmatch(alias->name,
+ event_glob))))
+ continue;
+ aliases[j] = name;
+ if (is_cpu && !name_only)
+ aliases[j] = format_alias_or(buf, sizeof(buf),
+ pmu, alias);
+ aliases[j] = strdup(aliases[j]);
+ j++;
+ }
+ len = j;
+ qsort(aliases, len, sizeof(char *), cmp_string);
+ for (j = 0; j < len; j++) {
+ if (name_only) {
+ printf("%s ", aliases[j]);
+ continue;
+ }
+ printf(" %-50s [Kernel PMU event]\n", aliases[j]);
+ free(aliases[j]);
+ printed++;
+ }
+ if (printed)
+ printf("\n");
+ free(aliases);
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 32fe55b659fa..6b2cbe2d4cc3 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/perf_event.h>
+#include <stdbool.h>
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
@@ -21,7 +22,7 @@ struct perf_pmu {
struct list_head list;
};
-struct perf_pmu *perf_pmu__find(char *name);
+struct perf_pmu *perf_pmu__find(const char *name);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms);
int perf_pmu__config_terms(struct list_head *formats,
@@ -40,5 +41,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
+void print_pmu_events(const char *event_glob, bool name_only);
+
int perf_pmu__test(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 925e0c3e6d91..71b5412bbbb9 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -8,6 +8,26 @@
#include "cpumap.h"
#include "thread_map.h"
+/*
+ * Support debug printing even though util/debug.c is not linked. That means
+ * implementing 'verbose' and 'eprintf'.
+ */
+int verbose;
+
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (verbose >= level) {
+ va_start(args, fmt);
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
/* Define PyVarObject_HEAD_INIT for python 2.5 */
#ifndef PyVarObject_HEAD_INIT
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
@@ -967,6 +987,7 @@ static struct {
{ "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ },
{ "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
{ "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
+ { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY },
{ "SAMPLE_IP", PERF_SAMPLE_IP },
{ "SAMPLE_TID", PERF_SAMPLE_TID },
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
new file mode 100644
index 000000000000..18d73aa2f0f8
--- /dev/null
+++ b/tools/perf/util/record.c
@@ -0,0 +1,108 @@
+#include "evlist.h"
+#include "evsel.h"
+#include "cpumap.h"
+#include "parse-events.h"
+
+typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
+
+static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ int err = -EAGAIN, fd;
+
+ evlist = perf_evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ if (parse_events(evlist, str))
+ goto out_delete;
+
+ evsel = perf_evlist__first(evlist);
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd < 0)
+ goto out_delete;
+ close(fd);
+
+ fn(evsel);
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd < 0) {
+ if (errno == EINVAL)
+ err = -EINVAL;
+ goto out_delete;
+ }
+ close(fd);
+ err = 0;
+
+out_delete:
+ perf_evlist__delete(evlist);
+ return err;
+}
+
+static bool perf_probe_api(setup_probe_fn_t fn)
+{
+ const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL};
+ struct cpu_map *cpus;
+ int cpu, ret, i = 0;
+
+ cpus = cpu_map__new(NULL);
+ if (!cpus)
+ return false;
+ cpu = cpus->map[0];
+ cpu_map__delete(cpus);
+
+ do {
+ ret = perf_do_probe_api(fn, cpu, try[i++]);
+ if (!ret)
+ return true;
+ } while (ret == -EAGAIN && try[i]);
+
+ return false;
+}
+
+static void perf_probe_sample_identifier(struct perf_evsel *evsel)
+{
+ evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
+}
+
+bool perf_can_sample_identifier(void)
+{
+ return perf_probe_api(perf_probe_sample_identifier);
+}
+
+void perf_evlist__config(struct perf_evlist *evlist,
+ struct perf_record_opts *opts)
+{
+ struct perf_evsel *evsel;
+ bool use_sample_identifier = false;
+
+ /*
+ * Set the evsel leader links before we configure attributes,
+ * since some might depend on this info.
+ */
+ if (opts->group)
+ perf_evlist__set_leader(evlist);
+
+ if (evlist->cpus->map[0] < 0)
+ opts->no_inherit = true;
+
+ list_for_each_entry(evsel, &evlist->entries, node)
+ perf_evsel__config(evsel, opts);
+
+ if (evlist->nr_entries > 1) {
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->attr.sample_type == first->attr.sample_type)
+ continue;
+ use_sample_identifier = perf_can_sample_identifier();
+ break;
+ }
+ list_for_each_entry(evsel, &evlist->entries, node)
+ perf_evsel__set_sample_id(evsel, use_sample_identifier);
+ }
+
+ perf_evlist__set_id_pos(evlist);
+}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index eacec859f299..a85e4ae5f3ac 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -261,7 +261,8 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
- struct addr_location *al)
+ struct thread *thread,
+ struct addr_location *al)
{
struct format_field *field;
static char handler[256];
@@ -272,7 +273,6 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
- struct thread *thread = al->thread;
char *comm = thread->comm;
dSP;
@@ -351,7 +351,8 @@ static void perl_process_event_generic(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct thread *thread __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
dSP;
@@ -377,10 +378,11 @@ static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
- struct addr_location *al)
+ struct thread *thread,
+ struct addr_location *al)
{
- perl_process_tracepoint(event, sample, evsel, machine, al);
- perl_process_event_generic(event, sample, evsel, machine, al);
+ perl_process_tracepoint(event, sample, evsel, machine, thread, al);
+ perl_process_event_generic(event, sample, evsel, machine, thread, al);
}
static void run_start_sub(void)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index e87aa5d9696b..cc75a3cef388 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -225,6 +225,7 @@ static void python_process_tracepoint(union perf_event *perf_event
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
+ struct thread *thread,
struct addr_location *al)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
@@ -238,7 +239,6 @@ static void python_process_tracepoint(union perf_event *perf_event
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
- struct thread *thread = al->thread;
char *comm = thread->comm;
t = PyTuple_New(MAX_FIELDS);
@@ -345,12 +345,12 @@ static void python_process_general_event(union perf_event *perf_event
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
+ struct thread *thread,
struct addr_location *al)
{
PyObject *handler, *retval, *t, *dict;
static char handler_name[64];
unsigned n = 0;
- struct thread *thread = al->thread;
/*
* Use the MAX_FIELDS to make the function expandable, though
@@ -404,17 +404,18 @@ static void python_process_event(union perf_event *perf_event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
+ struct thread *thread,
struct addr_location *al)
{
switch (evsel->attr.type) {
case PERF_TYPE_TRACEPOINT:
python_process_tracepoint(perf_event, sample, evsel,
- machine, al);
+ machine, thread, al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
python_process_general_event(perf_event, sample, evsel,
- machine, al);
+ machine, thread, al);
}
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index cf1fe01b7e89..1fc0c628683e 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <traceevent/event-parse.h>
#include <byteswap.h>
#include <unistd.h>
@@ -12,7 +13,6 @@
#include "sort.h"
#include "util.h"
#include "cpumap.h"
-#include "event-parse.h"
#include "perf_regs.h"
#include "vdso.h"
@@ -24,7 +24,7 @@ static int perf_session__open(struct perf_session *self, bool force)
self->fd_pipe = true;
self->fd = STDIN_FILENO;
- if (perf_session__read_header(self, self->fd) < 0)
+ if (perf_session__read_header(self) < 0)
pr_err("incompatible file format (rerun with -v to learn more)");
return 0;
@@ -56,7 +56,7 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close;
}
- if (perf_session__read_header(self, self->fd) < 0) {
+ if (perf_session__read_header(self) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)");
goto out_close;
}
@@ -71,6 +71,11 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close;
}
+ if (!perf_evlist__valid_read_format(self->evlist)) {
+ pr_err("non matching read_format");
+ goto out_close;
+ }
+
self->size = input_stat.st_size;
return 0;
@@ -193,7 +198,9 @@ void perf_session__delete(struct perf_session *self)
vdso__exit();
}
-static int process_event_synth_tracing_data_stub(union perf_event *event
+static int process_event_synth_tracing_data_stub(struct perf_tool *tool
+ __maybe_unused,
+ union perf_event *event
__maybe_unused,
struct perf_session *session
__maybe_unused)
@@ -202,7 +209,8 @@ static int process_event_synth_tracing_data_stub(union perf_event *event
return 0;
}
-static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
+static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
struct perf_evlist **pevlist
__maybe_unused)
{
@@ -238,18 +246,11 @@ static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
-static void perf_tool__fill_defaults(struct perf_tool *tool)
+void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
tool->sample = process_event_sample_stub;
@@ -271,8 +272,6 @@ static void perf_tool__fill_defaults(struct perf_tool *tool)
tool->unthrottle = process_event_stub;
if (tool->attr == NULL)
tool->attr = process_event_synth_attr_stub;
- if (tool->event_type == NULL)
- tool->event_type = process_event_type_stub;
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
@@ -496,7 +495,7 @@ static int perf_session_deliver_event(struct perf_session *session,
u64 file_offset);
static int flush_sample_queue(struct perf_session *s,
- struct perf_tool *tool)
+ struct perf_tool *tool)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples;
@@ -644,7 +643,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
-static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset)
{
struct ordered_samples *os = &s->ordered_samples;
@@ -740,7 +739,7 @@ static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
- u64 sample_type = perf_evlist__sample_type(session->evlist);
+ u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
if (event->header.type != PERF_RECORD_SAMPLE &&
!perf_evlist__sample_id_all(session->evlist)) {
@@ -755,6 +754,36 @@ static void perf_session__print_tstamp(struct perf_session *session,
printf("%" PRIu64 " ", sample->time);
}
+static void sample_read__printf(struct perf_sample *sample, u64 read_format)
+{
+ printf("... sample_read:\n");
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ printf("...... time enabled %016" PRIx64 "\n",
+ sample->read.time_enabled);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ printf("...... time running %016" PRIx64 "\n",
+ sample->read.time_running);
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ u64 i;
+
+ printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
+
+ for (i = 0; i < sample->read.group.nr; i++) {
+ struct sample_read_value *value;
+
+ value = &sample->read.group.values[i];
+ printf("..... id %016" PRIx64
+ ", value %016" PRIx64 "\n",
+ value->id, value->value);
+ }
+ } else
+ printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
+ sample->read.one.id, sample->read.one.value);
+}
+
static void dump_event(struct perf_session *session, union perf_event *event,
u64 file_offset, struct perf_sample *sample)
{
@@ -804,11 +833,15 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
+
+ if (sample_type & PERF_SAMPLE_READ)
+ sample_read__printf(sample, evsel->attr.read_format);
}
static struct machine *
perf_session__find_machine_for_cpumode(struct perf_session *session,
- union perf_event *event)
+ union perf_event *event,
+ struct perf_sample *sample)
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
@@ -820,7 +853,7 @@ static struct machine *
if (event->header.type == PERF_RECORD_MMAP)
pid = event->mmap.pid;
else
- pid = event->ip.pid;
+ pid = sample->pid;
return perf_session__findnew_machine(session, pid);
}
@@ -828,6 +861,75 @@ static struct machine *
return &session->machines.host;
}
+static int deliver_sample_value(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct sample_read_value *v,
+ struct machine *machine)
+{
+ struct perf_sample_id *sid;
+
+ sid = perf_evlist__id2sid(session->evlist, v->id);
+ if (sid) {
+ sample->id = v->id;
+ sample->period = v->value - sid->period;
+ sid->period = v->value;
+ }
+
+ if (!sid || sid->evsel == NULL) {
+ ++session->stats.nr_unknown_id;
+ return 0;
+ }
+
+ return tool->sample(tool, event, sample, sid->evsel, machine);
+}
+
+static int deliver_sample_group(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ int ret = -EINVAL;
+ u64 i;
+
+ for (i = 0; i < sample->read.group.nr; i++) {
+ ret = deliver_sample_value(session, tool, event, sample,
+ &sample->read.group.values[i],
+ machine);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+perf_session__deliver_sample(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ /* We know evsel != NULL. */
+ u64 sample_type = evsel->attr.sample_type;
+ u64 read_format = evsel->attr.read_format;
+
+ /* Standard sample delievery. */
+ if (!(sample_type & PERF_SAMPLE_READ))
+ return tool->sample(tool, event, sample, evsel, machine);
+
+ /* For PERF_SAMPLE_READ we have either single or group mode. */
+ if (read_format & PERF_FORMAT_GROUP)
+ return deliver_sample_group(session, tool, event, sample,
+ machine);
+ else
+ return deliver_sample_value(session, tool, event, sample,
+ &sample->read.one, machine);
+}
+
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
@@ -857,7 +959,8 @@ static int perf_session_deliver_event(struct perf_session *session,
hists__inc_nr_events(&evsel->hists, event->header.type);
}
- machine = perf_session__find_machine_for_cpumode(session, event);
+ machine = perf_session__find_machine_for_cpumode(session, event,
+ sample);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
@@ -870,7 +973,8 @@ static int perf_session_deliver_event(struct perf_session *session,
++session->stats.nr_unprocessable_samples;
return 0;
}
- return tool->sample(tool, event, sample, evsel, machine);
+ return perf_session__deliver_sample(session, tool, event,
+ sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_COMM:
@@ -895,22 +999,6 @@ static int perf_session_deliver_event(struct perf_session *session,
}
}
-static int perf_session__preprocess_sample(struct perf_session *session,
- union perf_event *event, struct perf_sample *sample)
-{
- if (event->header.type != PERF_RECORD_SAMPLE ||
- !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
- return 0;
-
- if (!ip_callchain__valid(sample->callchain, event)) {
- pr_debug("call-chain problem with event, skipping it.\n");
- ++session->stats.nr_invalid_chains;
- session->stats.total_invalid_chains += sample->period;
- return -EINVAL;
- }
- return 0;
-}
-
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_tool *tool, u64 file_offset)
{
@@ -921,16 +1009,14 @@ static int perf_session__process_user_event(struct perf_session *session, union
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
- err = tool->attr(event, &session->evlist);
+ err = tool->attr(tool, event, &session->evlist);
if (err == 0)
perf_session__set_id_hdr_size(session);
return err;
- case PERF_RECORD_HEADER_EVENT_TYPE:
- return tool->event_type(tool, event);
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(session->fd, file_offset, SEEK_SET);
- return tool->tracing_data(event, session);
+ return tool->tracing_data(tool, event, session);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
@@ -975,10 +1061,6 @@ static int perf_session__process_event(struct perf_session *session,
if (ret)
return ret;
- /* Preprocess sample records - precheck callchains */
- if (perf_session__preprocess_sample(session, event, &sample))
- return 0;
-
if (tool->ordered_samples) {
ret = perf_session_queue_event(session, event, &sample,
file_offset);
@@ -999,7 +1081,7 @@ void perf_event_header__bswap(struct perf_event_header *self)
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
- return machine__findnew_thread(&session->machines.host, pid);
+ return machine__findnew_thread(&session->machines.host, 0, pid);
}
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
@@ -1091,8 +1173,10 @@ more:
perf_event_header__bswap(&event->header);
size = event->header.size;
- if (size == 0)
- size = 8;
+ if (size < sizeof(struct perf_event_header)) {
+ pr_err("bad event header size\n");
+ goto out_err;
+ }
if (size > cur_size) {
void *new = realloc(buf, size);
@@ -1161,8 +1245,12 @@ fetch_mmaped_event(struct perf_session *session,
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
- if (head + event->header.size > mmap_size)
+ if (head + event->header.size > mmap_size) {
+ /* We're not fetching the event so swap back again */
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
return NULL;
+ }
return event;
}
@@ -1242,7 +1330,7 @@ more:
size = event->header.size;
- if (size == 0 ||
+ if (size < sizeof(struct perf_event_header) ||
perf_session__process_event(session, event, tool, file_pos) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
file_offset + head, event->header.size,
@@ -1295,12 +1383,15 @@ int perf_session__process_events(struct perf_session *self,
bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
- if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
- pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
- return false;
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &session->evlist->entries, node) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
+ return true;
}
- return true;
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
}
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
@@ -1383,13 +1474,18 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
- int print_sym, int print_dso, int print_symoffset)
+ unsigned int print_opts, unsigned int stack_depth)
{
struct addr_location al;
struct callchain_cursor_node *node;
-
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- NULL) < 0) {
+ int print_ip = print_opts & PRINT_IP_OPT_IP;
+ int print_sym = print_opts & PRINT_IP_OPT_SYM;
+ int print_dso = print_opts & PRINT_IP_OPT_DSO;
+ int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
+ int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
+ char s = print_oneline ? ' ' : '\t';
+
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
error("problem processing %d event, skipping it.\n",
event->header.type);
return;
@@ -1397,37 +1493,50 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
if (symbol_conf.use_callchain && sample->callchain) {
-
if (machine__resolve_callchain(machine, evsel, al.thread,
- sample, NULL) != 0) {
+ sample, NULL, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
return;
}
callchain_cursor_commit(&callchain_cursor);
- while (1) {
+ while (stack_depth) {
node = callchain_cursor_current(&callchain_cursor);
if (!node)
break;
- printf("\t%16" PRIx64, node->ip);
+ if (print_ip)
+ printf("%c%16" PRIx64, s, node->ip);
+
if (print_sym) {
printf(" ");
- symbol__fprintf_symname(node->sym, stdout);
+ if (print_symoffset) {
+ al.addr = node->ip;
+ al.map = node->map;
+ symbol__fprintf_symname_offs(node->sym, &al, stdout);
+ } else
+ symbol__fprintf_symname(node->sym, stdout);
}
+
if (print_dso) {
printf(" (");
map__fprintf_dsoname(node->map, stdout);
printf(")");
}
- printf("\n");
+
+ if (!print_oneline)
+ printf("\n");
callchain_cursor_advance(&callchain_cursor);
+
+ stack_depth--;
}
} else {
- printf("%16" PRIx64, sample->ip);
+ if (print_ip)
+ printf("%16" PRIx64, sample->ip);
+
if (print_sym) {
printf(" ");
if (print_symoffset)
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index f3b235ec7bf4..3aa75fb2225f 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -37,11 +37,16 @@ struct perf_session {
int fd;
bool fd_pipe;
bool repipe;
- char *cwd;
struct ordered_samples ordered_samples;
char filename[1];
};
+#define PRINT_IP_OPT_IP (1<<0)
+#define PRINT_IP_OPT_SYM (1<<1)
+#define PRINT_IP_OPT_DSO (1<<2)
+#define PRINT_IP_OPT_SYMOFFSET (1<<3)
+#define PRINT_IP_OPT_ONELINE (1<<4)
+
struct perf_tool;
struct perf_session *perf_session__new(const char *filename, int mode,
@@ -57,6 +62,11 @@ int __perf_session__process_events(struct perf_session *self,
int perf_session__process_events(struct perf_session *self,
struct perf_tool *tool);
+int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset);
+
+void perf_tool__fill_defaults(struct perf_tool *tool);
+
int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
struct thread *thread,
struct ip_callchain *chain,
@@ -99,7 +109,7 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
- int print_sym, int print_dso, int print_symoffset);
+ unsigned int print_opts, unsigned int stack_depth);
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 313a5a730112..5f118a089519 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -7,6 +7,8 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char default_sort_order[] = "comm,dso,symbol";
const char *sort_order = default_sort_order;
+regex_t ignore_callees_regex;
+int have_ignore_callees = 0;
int sort__need_collapse = 0;
int sort__has_parent = 0;
int sort__has_sym = 0;
@@ -55,14 +57,14 @@ static int64_t cmp_null(void *l, void *r)
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->pid - left->thread->pid;
+ return right->thread->tid - left->thread->tid;
}
static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
- self->thread->comm ?: "", self->thread->pid);
+ self->thread->comm ?: "", self->thread->tid);
}
struct sort_entry sort_thread = {
@@ -77,7 +79,7 @@ struct sort_entry sort_thread = {
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->pid - left->thread->pid;
+ return right->thread->tid - left->thread->tid;
}
static int64_t
@@ -872,6 +874,8 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PARENT, "parent", sort_parent),
DIM(SORT_CPU, "cpu", sort_cpu),
DIM(SORT_SRCLINE, "srcline", sort_srcline),
+ DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
+ DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
};
#undef DIM
@@ -891,8 +895,6 @@ static struct sort_dimension bstack_sort_dimensions[] = {
#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
static struct sort_dimension memory_sort_dimensions[] = {
- DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
- DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 45ac84c1e037..4e80dbd271e7 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -29,6 +29,8 @@ extern const char *sort_order;
extern const char default_parent_pattern[];
extern const char *parent_pattern;
extern const char default_sort_order[];
+extern regex_t ignore_callees_regex;
+extern int have_ignore_callees;
extern int sort__need_collapse;
extern int sort__has_parent;
extern int sort__has_sym;
@@ -87,6 +89,9 @@ struct hist_entry {
struct hist_entry_diff diff;
+ /* We are added by hists__add_dummy_entry. */
+ bool dummy;
+
/* XXX These two should move to some tree widget lib */
u16 row_offset;
u16 nr_rows;
@@ -138,6 +143,8 @@ enum sort_type {
SORT_PARENT,
SORT_CPU,
SORT_SRCLINE,
+ SORT_LOCAL_WEIGHT,
+ SORT_GLOBAL_WEIGHT,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -149,9 +156,7 @@ enum sort_type {
/* memory mode specific sort keys */
__SORT_MEMORY_MODE,
- SORT_LOCAL_WEIGHT = __SORT_MEMORY_MODE,
- SORT_GLOBAL_WEIGHT,
- SORT_MEM_DADDR_SYMBOL,
+ SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE,
SORT_MEM_DADDR_DSO,
SORT_MEM_LOCKED,
SORT_MEM_TLB,
@@ -183,4 +188,6 @@ int setup_sorting(void);
extern int sort_dimension__add(const char *);
void sort__setup_elide(FILE *fp);
+int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
+
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 7c59c28afcc5..6506b3dfb605 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -10,6 +10,12 @@ void update_stats(struct stats *stats, u64 val)
delta = val - stats->mean;
stats->mean += delta / stats->n;
stats->M2 += delta*(val - stats->mean);
+
+ if (val > stats->max)
+ stats->max = val;
+
+ if (val < stats->min)
+ stats->min = val;
}
double avg_stats(struct stats *stats)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 588367c3c767..ae8ccd7227cf 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -6,6 +6,7 @@
struct stats
{
double n, mean, M2;
+ u64 max, min;
};
void update_stats(struct stats *stats, u64 val);
@@ -13,4 +14,12 @@ double avg_stats(struct stats *stats);
double stddev_stats(struct stats *stats);
double rel_stddev_stats(double stddev, double avg);
+static inline void init_stats(struct stats *stats)
+{
+ stats->n = 0.0;
+ stats->mean = 0.0;
+ stats->M2 = 0.0;
+ stats->min = (u64) -1;
+ stats->max = 0;
+}
#endif
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 29c7b2cb2521..f0b0c008c507 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -387,3 +387,27 @@ void *memdup(const void *src, size_t len)
return p;
}
+
+/**
+ * str_append - reallocate string and append another
+ * @s: pointer to string pointer
+ * @len: pointer to len (initialized)
+ * @a: string to append.
+ */
+int str_append(char **s, int *len, const char *a)
+{
+ int olen = *s ? strlen(*s) : 0;
+ int nlen = olen + strlen(a) + 1;
+ if (*len < nlen) {
+ *len = *len * 2;
+ if (*len < nlen)
+ *len = nlen;
+ *s = realloc(*s, *len);
+ if (!*s)
+ return -ENOMEM;
+ if (olen == 0)
+ **s = 0;
+ }
+ strcat(*s, a);
+ return 0;
+}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 4b12bf850325..a7b9ab557380 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -599,11 +599,13 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (dso->kernel == DSO_TYPE_USER) {
GElf_Shdr shdr;
ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL ||
elf_section_by_name(elf, &ehdr, &shdr,
".gnu.prelink_undo",
NULL) != NULL);
} else {
- ss->adjust_symbols = 0;
+ ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL;
}
ss->name = strdup(name);
@@ -624,6 +626,37 @@ out_close:
return err;
}
+/**
+ * ref_reloc_sym_not_found - has kernel relocation symbol been found.
+ * @kmap: kernel maps and relocation reference symbol
+ *
+ * This function returns %true if we are dealing with the kernel maps and the
+ * relocation reference symbol has not yet been found. Otherwise %false is
+ * returned.
+ */
+static bool ref_reloc_sym_not_found(struct kmap *kmap)
+{
+ return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ !kmap->ref_reloc_sym->unrelocated_addr;
+}
+
+/**
+ * ref_reloc - kernel relocation offset.
+ * @kmap: kernel maps and relocation reference symbol
+ *
+ * This function returns the offset of kernel addresses as determined by using
+ * the relocation reference symbol i.e. if the kernel has not been relocated
+ * then the return value is zero.
+ */
+static u64 ref_reloc(struct kmap *kmap)
+{
+ if (kmap && kmap->ref_reloc_sym &&
+ kmap->ref_reloc_sym->unrelocated_addr)
+ return kmap->ref_reloc_sym->addr -
+ kmap->ref_reloc_sym->unrelocated_addr;
+ return 0;
+}
+
int dso__load_sym(struct dso *dso, struct map *map,
struct symsrc *syms_ss, struct symsrc *runtime_ss,
symbol_filter_t filter, int kmodule)
@@ -642,8 +675,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
Elf_Scn *sec, *sec_strndx;
Elf *elf;
int nr = 0;
+ bool remap_kernel = false, adjust_kernel_syms = false;
dso->symtab_type = syms_ss->type;
+ dso->rel = syms_ss->ehdr.e_type == ET_REL;
+
+ /*
+ * Modules may already have symbols from kallsyms, but those symbols
+ * have the wrong values for the dso maps, so remove them.
+ */
+ if (kmodule && syms_ss->symtab)
+ symbols__delete(&dso->symbols[map->type]);
if (!syms_ss->symtab) {
syms_ss->symtab = syms_ss->dynsym;
@@ -681,7 +723,31 @@ int dso__load_sym(struct dso *dso, struct map *map,
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
- dso->adjust_symbols = runtime_ss->adjust_symbols;
+
+ /*
+ * The kernel relocation symbol is needed in advance in order to adjust
+ * kernel maps correctly.
+ */
+ if (ref_reloc_sym_not_found(kmap)) {
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+
+ if (strcmp(elf_name, kmap->ref_reloc_sym->name))
+ continue;
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+ break;
+ }
+ }
+
+ dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
+ /*
+ * Initial kernel and module mappings do not map to the dso. For
+ * function mappings, flag the fixups.
+ */
+ if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
+ remap_kernel = true;
+ adjust_kernel_syms = dso->adjust_symbols;
+ }
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
const char *elf_name = elf_sym__name(&sym, symstrs);
@@ -690,10 +756,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
const char *section_name;
bool used_opd = false;
- if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
- strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
- kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
-
if (!is_label && !elf_sym__is_a(&sym, map->type))
continue;
@@ -745,20 +807,55 @@ int dso__load_sym(struct dso *dso, struct map *map,
(sym.st_value & 1))
--sym.st_value;
- if (dso->kernel != DSO_TYPE_USER || kmodule) {
+ if (dso->kernel || kmodule) {
char dso_name[PATH_MAX];
+ /* Adjust symbol to map to file offset */
+ if (adjust_kernel_syms)
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+
if (strcmp(section_name,
(curr_dso->short_name +
dso->short_name_len)) == 0)
goto new_symbol;
if (strcmp(section_name, ".text") == 0) {
+ /*
+ * The initial kernel mapping is based on
+ * kallsyms and identity maps. Overwrite it to
+ * map to the kernel dso.
+ */
+ if (remap_kernel && dso->kernel) {
+ remap_kernel = false;
+ map->start = shdr.sh_addr +
+ ref_reloc(kmap);
+ map->end = map->start + shdr.sh_size;
+ map->pgoff = shdr.sh_offset;
+ map->map_ip = map__map_ip;
+ map->unmap_ip = map__unmap_ip;
+ /* Ensure maps are correctly ordered */
+ map_groups__remove(kmap->kmaps, map);
+ map_groups__insert(kmap->kmaps, map);
+ }
+
+ /*
+ * The initial module mapping is based on
+ * /proc/modules mapped to offset zero.
+ * Overwrite it to map to the module dso.
+ */
+ if (remap_kernel && kmodule) {
+ remap_kernel = false;
+ map->pgoff = shdr.sh_offset;
+ }
+
curr_map = map;
curr_dso = dso;
goto new_symbol;
}
+ if (!kmap)
+ goto new_symbol;
+
snprintf(dso_name, sizeof(dso_name),
"%s%s", dso->short_name, section_name);
@@ -781,8 +878,16 @@ int dso__load_sym(struct dso *dso, struct map *map,
dso__delete(curr_dso);
goto out_elf_end;
}
- curr_map->map_ip = identity__map_ip;
- curr_map->unmap_ip = identity__map_ip;
+ if (adjust_kernel_syms) {
+ curr_map->start = shdr.sh_addr +
+ ref_reloc(kmap);
+ curr_map->end = curr_map->start +
+ shdr.sh_size;
+ curr_map->pgoff = shdr.sh_offset;
+ } else {
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ }
curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmap->kmaps, curr_map);
dsos__add(&dso->node, curr_dso);
@@ -846,6 +951,57 @@ out_elf_end:
return err;
}
+static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
+{
+ GElf_Phdr phdr;
+ size_t i, phdrnum;
+ int err;
+ u64 sz;
+
+ if (elf_getphdrnum(elf, &phdrnum))
+ return -1;
+
+ for (i = 0; i < phdrnum; i++) {
+ if (gelf_getphdr(elf, i, &phdr) == NULL)
+ return -1;
+ if (phdr.p_type != PT_LOAD)
+ continue;
+ if (exe) {
+ if (!(phdr.p_flags & PF_X))
+ continue;
+ } else {
+ if (!(phdr.p_flags & PF_R))
+ continue;
+ }
+ sz = min(phdr.p_memsz, phdr.p_filesz);
+ if (!sz)
+ continue;
+ err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+ bool *is_64_bit)
+{
+ int err;
+ Elf *elf;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return -1;
+
+ if (is_64_bit)
+ *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
+
+ err = elf_read_maps(elf, exe, mapfn, data);
+
+ elf_end(elf);
+ return err;
+}
+
void symbol__elf_init(void)
{
elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index a7390cde63bc..3a802c300fc5 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -301,6 +301,13 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
return 0;
}
+int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused,
+ mapfn_t mapfn __maybe_unused, void *data __maybe_unused,
+ bool *is_64_bit __maybe_unused)
+{
+ return -1;
+}
+
void symbol__elf_init(void)
{
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index d5528e1cc03a..7eb0362f4ffd 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -87,6 +87,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
s64 a;
s64 b;
+ size_t na, nb;
/* Prefer a symbol with non zero length */
a = syma->end - syma->start;
@@ -120,11 +121,21 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
else if (a > b)
return SYMBOL_B;
- /* If all else fails, choose the symbol with the longest name */
- if (strlen(syma->name) >= strlen(symb->name))
+ /* Choose the symbol with the longest name */
+ na = strlen(syma->name);
+ nb = strlen(symb->name);
+ if (na > nb)
return SYMBOL_A;
- else
+ else if (na < nb)
+ return SYMBOL_B;
+
+ /* Avoid "SyS" kernel syscall aliases */
+ if (na >= 3 && !strncmp(syma->name, "SyS", 3))
+ return SYMBOL_B;
+ if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
return SYMBOL_B;
+
+ return SYMBOL_A;
}
void symbols__fixup_duplicate(struct rb_root *symbols)
@@ -248,7 +259,10 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
if (sym && sym->name) {
length = fprintf(fp, "%s", sym->name);
if (al) {
- offset = al->addr - sym->start;
+ if (al->addr < sym->end)
+ offset = al->addr - sym->start;
+ else
+ offset = al->addr - al->map->start - sym->start;
length += fprintf(fp, "+0x%lx", offset);
}
return length;
@@ -316,6 +330,16 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
+static struct symbol *symbols__first(struct rb_root *symbols)
+{
+ struct rb_node *n = rb_first(symbols);
+
+ if (n)
+ return rb_entry(n, struct symbol, rb_node);
+
+ return NULL;
+}
+
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@@ -386,6 +410,11 @@ struct symbol *dso__find_symbol(struct dso *dso,
return symbols__find(&dso->symbols[type], addr);
}
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+{
+ return symbols__first(&dso->symbols[type]);
+}
+
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name)
{
@@ -522,6 +551,53 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
}
+static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct map *curr_map;
+ struct symbol *pos;
+ int count = 0, moved = 0;
+ struct rb_root *root = &dso->symbols[map->type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ char *module;
+
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ module = strchr(pos->name, '\t');
+ if (module)
+ *module = '\0';
+
+ curr_map = map_groups__find(kmaps, map->type, pos->start);
+
+ if (!curr_map || (filter && filter(curr_map, pos))) {
+ rb_erase(&pos->rb_node, root);
+ symbol__delete(pos);
+ } else {
+ pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end)
+ pos->end -= curr_map->start - curr_map->pgoff;
+ if (curr_map != map) {
+ rb_erase(&pos->rb_node, root);
+ symbols__insert(
+ &curr_map->dso->symbols[curr_map->type],
+ pos);
+ ++moved;
+ } else {
+ ++count;
+ }
+ }
+ }
+
+ /* Symbols have been adjusted */
+ dso->adjust_symbols = 1;
+
+ return count + moved;
+}
+
/*
* Split the symbols into maps, making sure there are no overlaps, i.e. the
* kernel range is broken in several maps, named [kernel].N, as we don't have
@@ -663,6 +739,161 @@ bool symbol__restricted_filename(const char *filename,
return restricted;
}
+struct kcore_mapfn_data {
+ struct dso *dso;
+ enum map_type type;
+ struct list_head maps;
+};
+
+static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
+{
+ struct kcore_mapfn_data *md = data;
+ struct map *map;
+
+ map = map__new2(start, md->dso, md->type);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->end = map->start + len;
+ map->pgoff = pgoff;
+
+ list_add(&map->node, &md->maps);
+
+ return 0;
+}
+
+/*
+ * If kallsyms is referenced by name then we look for kcore in the same
+ * directory.
+ */
+static bool kcore_filename_from_kallsyms_filename(char *kcore_filename,
+ const char *kallsyms_filename)
+{
+ char *name;
+
+ strcpy(kcore_filename, kallsyms_filename);
+ name = strrchr(kcore_filename, '/');
+ if (!name)
+ return false;
+
+ if (!strcmp(name, "/kallsyms")) {
+ strcpy(name, "/kcore");
+ return true;
+ }
+
+ return false;
+}
+
+static int dso__load_kcore(struct dso *dso, struct map *map,
+ const char *kallsyms_filename)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct machine *machine = kmaps->machine;
+ struct kcore_mapfn_data md;
+ struct map *old_map, *new_map, *replacement_map = NULL;
+ bool is_64_bit;
+ int err, fd;
+ char kcore_filename[PATH_MAX];
+ struct symbol *sym;
+
+ /* This function requires that the map is the kernel map */
+ if (map != machine->vmlinux_maps[map->type])
+ return -EINVAL;
+
+ if (!kcore_filename_from_kallsyms_filename(kcore_filename,
+ kallsyms_filename))
+ return -EINVAL;
+
+ md.dso = dso;
+ md.type = map->type;
+ INIT_LIST_HEAD(&md.maps);
+
+ fd = open(kcore_filename, O_RDONLY);
+ if (fd < 0)
+ return -EINVAL;
+
+ /* Read new maps into temporary lists */
+ err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
+ &is_64_bit);
+ if (err)
+ goto out_err;
+
+ if (list_empty(&md.maps)) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ /* Remove old maps */
+ old_map = map_groups__first(kmaps, map->type);
+ while (old_map) {
+ struct map *next = map_groups__next(old_map);
+
+ if (old_map != map)
+ map_groups__remove(kmaps, old_map);
+ old_map = next;
+ }
+
+ /* Find the kernel map using the first symbol */
+ sym = dso__first_symbol(dso, map->type);
+ list_for_each_entry(new_map, &md.maps, node) {
+ if (sym && sym->start >= new_map->start &&
+ sym->start < new_map->end) {
+ replacement_map = new_map;
+ break;
+ }
+ }
+
+ if (!replacement_map)
+ replacement_map = list_entry(md.maps.next, struct map, node);
+
+ /* Add new maps */
+ while (!list_empty(&md.maps)) {
+ new_map = list_entry(md.maps.next, struct map, node);
+ list_del(&new_map->node);
+ if (new_map == replacement_map) {
+ map->start = new_map->start;
+ map->end = new_map->end;
+ map->pgoff = new_map->pgoff;
+ map->map_ip = new_map->map_ip;
+ map->unmap_ip = new_map->unmap_ip;
+ map__delete(new_map);
+ /* Ensure maps are correctly ordered */
+ map_groups__remove(kmaps, map);
+ map_groups__insert(kmaps, map);
+ } else {
+ map_groups__insert(kmaps, new_map);
+ }
+ }
+
+ /*
+ * Set the data type and long name so that kcore can be read via
+ * dso__data_read_addr().
+ */
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->data_type = DSO_BINARY_TYPE__GUEST_KCORE;
+ else
+ dso->data_type = DSO_BINARY_TYPE__KCORE;
+ dso__set_long_name(dso, strdup(kcore_filename));
+
+ close(fd);
+
+ if (map->type == MAP__FUNCTION)
+ pr_debug("Using %s for kernel object code\n", kcore_filename);
+ else
+ pr_debug("Using %s for kernel data\n", kcore_filename);
+
+ return 0;
+
+out_err:
+ while (!list_empty(&md.maps)) {
+ map = list_entry(md.maps.next, struct map, node);
+ list_del(&map->node);
+ map__delete(map);
+ }
+ close(fd);
+ return -EINVAL;
+}
+
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
@@ -680,7 +911,10 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
else
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
- return dso__split_kallsyms(dso, map, filter);
+ if (!dso__load_kcore(dso, map, filename))
+ return dso__split_kallsyms_for_kcore(dso, map, filter);
+ else
+ return dso__split_kallsyms(dso, map, filter);
}
static int dso__load_perf_map(struct dso *dso, struct map *map,
@@ -843,10 +1077,15 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
if (!runtime_ss && syms_ss)
runtime_ss = syms_ss;
- if (syms_ss)
- ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0);
- else
+ if (syms_ss) {
+ int km;
+
+ km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+ ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km);
+ } else {
ret = -1;
+ }
if (ret > 0) {
int nr_plt;
@@ -888,8 +1127,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
char symfs_vmlinux[PATH_MAX];
enum dso_binary_type symtab_type;
- snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
- symbol_conf.symfs, vmlinux);
+ if (vmlinux[0] == '/')
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
+ else
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
+ symbol_conf.symfs, vmlinux);
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
@@ -903,6 +1145,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
symsrc__destroy(&ss);
if (err > 0) {
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->data_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ else
+ dso->data_type = DSO_BINARY_TYPE__VMLINUX;
dso__set_long_name(dso, (char *)vmlinux);
dso__set_loaded(dso, map->type);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
@@ -975,7 +1221,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
dso__set_long_name(dso,
strdup(symbol_conf.vmlinux_name));
dso->lname_alloc = 1;
- goto out_fixup;
+ return err;
}
return err;
}
@@ -983,7 +1229,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
if (vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map, filter);
if (err > 0)
- goto out_fixup;
+ return err;
}
/* do not try local files if a symfs was given */
@@ -1042,9 +1288,8 @@ do_kallsyms:
pr_debug("Using %s for symbols\n", kallsyms_filename);
free(kallsyms_allocated_filename);
- if (err > 0) {
+ if (err > 0 && !dso__is_kcore(dso)) {
dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
-out_fixup:
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1075,7 +1320,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
if (symbol_conf.default_guest_vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map,
symbol_conf.default_guest_vmlinux_name, filter);
- goto out_try_fixup;
+ return err;
}
kallsyms_filename = symbol_conf.default_guest_kallsyms;
@@ -1089,13 +1334,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
-
-out_try_fixup:
- if (err > 0) {
- if (kallsyms_filename != NULL) {
- machine__mmap_name(machine, path, sizeof(path));
- dso__set_long_name(dso, strdup(path));
- }
+ if (err > 0 && !dso__is_kcore(dso)) {
+ machine__mmap_name(machine, path, sizeof(path));
+ dso__set_long_name(dso, strdup(path));
map__fixup_start(map);
map__fixup_end(map);
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 5f720dc076da..fd5b70ea2981 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -215,6 +215,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name);
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
@@ -247,4 +248,8 @@ void symbols__fixup_duplicate(struct rb_root *symbols);
void symbols__fixup_end(struct rb_root *symbols);
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
+typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
+int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+ bool *is_64_bit);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 40399cbcca77..e3d4a550a703 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -7,17 +7,18 @@
#include "util.h"
#include "debug.h"
-struct thread *thread__new(pid_t pid)
+struct thread *thread__new(pid_t pid, pid_t tid)
{
struct thread *self = zalloc(sizeof(*self));
if (self != NULL) {
map_groups__init(&self->mg);
- self->pid = pid;
+ self->pid_ = pid;
+ self->tid = tid;
self->ppid = -1;
self->comm = malloc(32);
if (self->comm)
- snprintf(self->comm, 32, ":%d", self->pid);
+ snprintf(self->comm, 32, ":%d", self->tid);
}
return self;
@@ -57,7 +58,7 @@ int thread__comm_len(struct thread *self)
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
- return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) +
+ return fprintf(fp, "Thread %d %s\n", thread->tid, thread->comm) +
map_groups__fprintf(&thread->mg, verbose, fp);
}
@@ -84,7 +85,7 @@ int thread__fork(struct thread *self, struct thread *parent)
if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
return -ENOMEM;
- self->ppid = parent->pid;
+ self->ppid = parent->tid;
return 0;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index eeb7ac62b9e3..4ebbb40d46d4 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -12,10 +12,12 @@ struct thread {
struct list_head node;
};
struct map_groups mg;
- pid_t pid;
+ pid_t pid_; /* Not all tools update this */
+ pid_t tid;
pid_t ppid;
char shortname[3];
bool comm_set;
+ bool dead; /* if set thread has exited */
char *comm;
int comm_len;
@@ -24,8 +26,12 @@ struct thread {
struct machine;
-struct thread *thread__new(pid_t pid);
+struct thread *thread__new(pid_t pid, pid_t tid);
void thread__delete(struct thread *self);
+static inline void thread__exited(struct thread *thread)
+{
+ thread->dead = true;
+}
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
@@ -45,6 +51,15 @@ void thread__find_addr_map(struct thread *thread, struct machine *machine,
void thread__find_addr_location(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter);
+ struct addr_location *al);
+
+static inline void *thread__priv(struct thread *thread)
+{
+ return thread->priv;
+}
+
+static inline void thread__set_priv(struct thread *thread, void *p)
+{
+ thread->priv = p;
+}
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index b0e1aadba8d5..62b16b6165ba 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -18,12 +18,9 @@ typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine);
-typedef int (*event_attr_op)(union perf_event *event,
+typedef int (*event_attr_op)(struct perf_tool *tool,
+ union perf_event *event,
struct perf_evlist **pevlist);
-typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event);
-
-typedef int (*event_synth_op)(union perf_event *event,
- struct perf_session *session);
typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
struct perf_session *session);
@@ -39,8 +36,7 @@ struct perf_tool {
throttle,
unthrottle;
event_attr_op attr;
- event_synth_op tracing_data;
- event_simple_op event_type;
+ event_op2 tracing_data;
event_op2 finished_round,
build_id;
bool ordered_samples;
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index df46be93d902..b554ffc462b6 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -39,6 +39,8 @@ struct perf_top {
float min_percent;
};
+#define CONSOLE_CLEAR ""
+
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
void perf_top__reset_sample_counters(struct perf_top *top);
#endif /* __PERF_TOP_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 3917eb9a8479..f3c9e551bd35 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -46,65 +46,6 @@
static int output_fd;
-static const char *find_debugfs(void)
-{
- const char *path = perf_debugfs_mount(NULL);
-
- if (!path)
- pr_debug("Your kernel does not support the debugfs filesystem");
-
- return path;
-}
-
-/*
- * Finds the path to the debugfs/tracing
- * Allocates the string and stores it.
- */
-static const char *find_tracing_dir(void)
-{
- static char *tracing;
- static int tracing_found;
- const char *debugfs;
-
- if (tracing_found)
- return tracing;
-
- debugfs = find_debugfs();
- if (!debugfs)
- return NULL;
-
- tracing = malloc(strlen(debugfs) + 9);
- if (!tracing)
- return NULL;
-
- sprintf(tracing, "%s/tracing", debugfs);
-
- tracing_found = 1;
- return tracing;
-}
-
-static char *get_tracing_file(const char *name)
-{
- const char *tracing;
- char *file;
-
- tracing = find_tracing_dir();
- if (!tracing)
- return NULL;
-
- file = malloc(strlen(tracing) + strlen(name) + 2);
- if (!file)
- return NULL;
-
- sprintf(file, "%s/%s", tracing, name);
- return file;
-}
-
-static void put_tracing_file(char *file)
-{
- free(file);
-}
-
int bigendian(void)
{
unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
@@ -160,7 +101,7 @@ out:
return err;
}
-static int read_header_files(void)
+static int record_header_files(void)
{
char *path;
struct stat st;
@@ -299,7 +240,7 @@ out:
return err;
}
-static int read_ftrace_files(struct tracepoint_path *tps)
+static int record_ftrace_files(struct tracepoint_path *tps)
{
char *path;
int ret;
@@ -328,7 +269,7 @@ static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
return false;
}
-static int read_event_files(struct tracepoint_path *tps)
+static int record_event_files(struct tracepoint_path *tps)
{
struct dirent *dent;
struct stat st;
@@ -403,7 +344,7 @@ out:
return err;
}
-static int read_proc_kallsyms(void)
+static int record_proc_kallsyms(void)
{
unsigned int size;
const char *path = "/proc/kallsyms";
@@ -421,7 +362,7 @@ static int read_proc_kallsyms(void)
return record_file(path, 4);
}
-static int read_ftrace_printk(void)
+static int record_ftrace_printk(void)
{
unsigned int size;
char *path;
@@ -473,12 +414,27 @@ get_tracepoints_path(struct list_head *pattrs)
if (pos->attr.type != PERF_TYPE_TRACEPOINT)
continue;
++nr_tracepoints;
+
+ if (pos->name) {
+ ppath->next = tracepoint_name_to_path(pos->name);
+ if (ppath->next)
+ goto next;
+
+ if (strchr(pos->name, ':') == NULL)
+ goto try_id;
+
+ goto error;
+ }
+
+try_id:
ppath->next = tracepoint_id_to_path(pos->attr.config);
if (!ppath->next) {
+error:
pr_debug("No memory to alloc tracepoints list\n");
put_tracepoints_path(&path);
return NULL;
}
+next:
ppath = ppath->next;
}
@@ -520,8 +476,6 @@ static int tracing_data_header(void)
else
buf[0] = 0;
- read_trace_init(buf[0], buf[0]);
-
if (write(output_fd, buf, 1) != 1)
return -1;
@@ -583,19 +537,19 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
err = tracing_data_header();
if (err)
goto out;
- err = read_header_files();
+ err = record_header_files();
if (err)
goto out;
- err = read_ftrace_files(tps);
+ err = record_ftrace_files(tps);
if (err)
goto out;
- err = read_event_files(tps);
+ err = record_event_files(tps);
if (err)
goto out;
- err = read_proc_kallsyms();
+ err = record_proc_kallsyms();
if (err)
goto out;
- err = read_ftrace_printk();
+ err = record_ftrace_printk();
out:
/*
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 4454835a9ebc..fe7a27d67d2b 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -28,12 +28,6 @@
#include "util.h"
#include "trace-event.h"
-int header_page_size_size;
-int header_page_ts_size;
-int header_page_data_offset;
-
-bool latency_format;
-
struct pevent *read_trace_init(int file_bigendian, int host_bigendian)
{
struct pevent *pevent = pevent_alloc();
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index af215c0d2379..f2112270c663 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -39,10 +39,6 @@
static int input_fd;
-int file_bigendian;
-int host_bigendian;
-static int long_size;
-
static ssize_t trace_data_size;
static bool repipe;
@@ -216,7 +212,7 @@ static int read_ftrace_printk(struct pevent *pevent)
static int read_header_files(struct pevent *pevent)
{
unsigned long long size;
- char *header_event;
+ char *header_page;
char buf[BUFSIZ];
int ret = 0;
@@ -229,13 +225,26 @@ static int read_header_files(struct pevent *pevent)
}
size = read8(pevent);
- skip(size);
- /*
- * The size field in the page is of type long,
- * use that instead, since it represents the kernel.
- */
- long_size = header_page_size_size;
+ header_page = malloc(size);
+ if (header_page == NULL)
+ return -1;
+
+ if (do_read(header_page, size) < 0) {
+ pr_debug("did not read header page");
+ free(header_page);
+ return -1;
+ }
+
+ if (!pevent_parse_header_page(pevent, header_page, size,
+ pevent_get_long_size(pevent))) {
+ /*
+ * The commit field in the page is of type long,
+ * use that instead, since it represents the kernel.
+ */
+ pevent_set_long_size(pevent, pevent->header_page_size_size);
+ }
+ free(header_page);
if (do_read(buf, 13) < 0)
return -1;
@@ -246,14 +255,8 @@ static int read_header_files(struct pevent *pevent)
}
size = read8(pevent);
- header_event = malloc(size);
- if (header_event == NULL)
- return -1;
-
- if (do_read(header_event, size) < 0)
- ret = -1;
+ skip(size);
- free(header_event);
return ret;
}
@@ -349,6 +352,10 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
int show_funcs = 0;
int show_printk = 0;
ssize_t size = -1;
+ int file_bigendian;
+ int host_bigendian;
+ int file_long_size;
+ int file_page_size;
struct pevent *pevent;
int err;
@@ -391,12 +398,15 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
if (do_read(buf, 1) < 0)
goto out;
- long_size = buf[0];
+ file_long_size = buf[0];
- page_size = read4(pevent);
- if (!page_size)
+ file_page_size = read4(pevent);
+ if (!file_page_size)
goto out;
+ pevent_set_long_size(pevent, file_long_size);
+ pevent_set_page_size(pevent, file_page_size);
+
err = read_header_files(pevent);
if (err)
goto out;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 8715a1006d00..95199e4eea97 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -39,7 +39,8 @@ static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct thread *thread __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 1978c398ad87..fafe1a40444a 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,32 +1,18 @@
#ifndef _PERF_UTIL_TRACE_EVENT_H
#define _PERF_UTIL_TRACE_EVENT_H
+#include <traceevent/event-parse.h>
#include "parse-events.h"
-#include "event-parse.h"
#include "session.h"
struct machine;
struct perf_sample;
union perf_event;
struct perf_tool;
+struct thread;
-extern int header_page_size_size;
-extern int header_page_ts_size;
-extern int header_page_data_offset;
-
-extern bool latency_format;
extern struct pevent *perf_pevent;
-enum {
- RINGBUF_TYPE_PADDING = 29,
- RINGBUF_TYPE_TIME_EXTEND = 30,
- RINGBUF_TYPE_TIME_STAMP = 31,
-};
-
-#ifndef TS_SHIFT
-#define TS_SHIFT 27
-#endif
-
int bigendian(void);
struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
@@ -83,7 +69,8 @@ struct scripting_ops {
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
- struct addr_location *al);
+ struct thread *thread,
+ struct addr_location *al);
int (*generate_script) (struct pevent *pevent, const char *outfile);
};
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c
index 958723ba3d2e..2f891f7e70bf 100644
--- a/tools/perf/util/unwind.c
+++ b/tools/perf/util/unwind.c
@@ -473,7 +473,7 @@ static int entry(u64 ip, struct thread *thread, struct machine *machine,
thread__find_addr_location(thread, machine,
PERF_RECORD_MISC_USER,
- MAP__FUNCTION, ip, &al, NULL);
+ MAP__FUNCTION, ip, &al);
e.ip = ip;
e.map = al.map;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 59d868add275..6d17b18e915d 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -269,3 +269,95 @@ void perf_debugfs_set_path(const char *mntpt)
snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
set_tracing_events_path(mntpt);
}
+
+static const char *find_debugfs(void)
+{
+ const char *path = perf_debugfs_mount(NULL);
+
+ if (!path)
+ fprintf(stderr, "Your kernel does not support the debugfs filesystem");
+
+ return path;
+}
+
+/*
+ * Finds the path to the debugfs/tracing
+ * Allocates the string and stores it.
+ */
+const char *find_tracing_dir(void)
+{
+ static char *tracing;
+ static int tracing_found;
+ const char *debugfs;
+
+ if (tracing_found)
+ return tracing;
+
+ debugfs = find_debugfs();
+ if (!debugfs)
+ return NULL;
+
+ tracing = malloc(strlen(debugfs) + 9);
+ if (!tracing)
+ return NULL;
+
+ sprintf(tracing, "%s/tracing", debugfs);
+
+ tracing_found = 1;
+ return tracing;
+}
+
+char *get_tracing_file(const char *name)
+{
+ const char *tracing;
+ char *file;
+
+ tracing = find_tracing_dir();
+ if (!tracing)
+ return NULL;
+
+ file = malloc(strlen(tracing) + strlen(name) + 2);
+ if (!file)
+ return NULL;
+
+ sprintf(file, "%s/%s", tracing, name);
+ return file;
+}
+
+void put_tracing_file(char *file)
+{
+ free(file);
+}
+
+int parse_nsec_time(const char *str, u64 *ptime)
+{
+ u64 time_sec, time_nsec;
+ char *end;
+
+ time_sec = strtoul(str, &end, 10);
+ if (*end != '.' && *end != '\0')
+ return -1;
+
+ if (*end == '.') {
+ int i;
+ char nsec_buf[10];
+
+ if (strlen(++end) > 9)
+ return -1;
+
+ strncpy(nsec_buf, end, 9);
+ nsec_buf[9] = '\0';
+
+ /* make it nsec precision */
+ for (i = strlen(nsec_buf); i < 9; i++)
+ nsec_buf[i] = '0';
+
+ time_nsec = strtoul(nsec_buf, &end, 10);
+ if (*end != '\0')
+ return -1;
+ } else
+ time_nsec = 0;
+
+ *ptime = time_sec * NSEC_PER_SEC + time_nsec;
+ return 0;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 2732fad03908..a53535949043 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -80,6 +80,9 @@ extern char buildid_dir[];
extern char tracing_events_path[];
extern void perf_debugfs_set_path(const char *mountpoint);
const char *perf_debugfs_mount(const char *mountpoint);
+const char *find_tracing_dir(void);
+char *get_tracing_file(const char *name);
+void put_tracing_file(char *file);
/* On most systems <limits.h> would have given us this, but
* not on some systems (e.g. GNU/Hurd).
@@ -205,6 +208,8 @@ static inline int has_extension(const char *filename, const char *ext)
#define NSEC_PER_MSEC 1000000L
#endif
+int parse_nsec_time(const char *str, u64 *ptime);
+
extern unsigned char sane_ctype[256];
#define GIT_SPACE 0x01
#define GIT_DIGIT 0x02