summaryrefslogtreecommitdiff
path: root/tools/perf/util/parse-events.l
diff options
context:
space:
mode:
authorKan Liang <kan.liang@intel.com>2014-10-07 19:08:51 +0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2014-10-15 23:05:45 +0400
commitba32a4511c65e41958384d2f7a046a6ec6e151e5 (patch)
treed3fb7e8d38f0aaba0cbd611215d1826f81800a4f /tools/perf/util/parse-events.l
parentdcb4e1022b40d886027500821a592dd8f8ccde8f (diff)
downloadlinux-ba32a4511c65e41958384d2f7a046a6ec6e151e5.tar.xz
perf tools: Add support to new style format of kernel PMU event
Add new rules for kernel PMU event. Currently, the patch only want to handle the PMU event name as "a-b" and "a". event_pmu: PE_KERNEL_PMU_EVENT sep_dc | PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc PE_KERNEL_PMU_EVENT token is for cycles-ct/cycles-t/mem-loads/mem-stores. The prefix cycles is mixed up with cpu-cycles. loads and stores are mixed up with cache event So they have to be hardcode in lex. PE_PMU_EVENT_PRE and PE_PMU_EVENT_SUF tokens are for other PMU events. The lex looks generic identifier up in the table and return the matched token. If there is no match, generic PE_NAME token will be return. Using the rules, kernel PMU event could use new style format without // so you can use: perf record -e mem-loads ... instead of: perf record -e cpu/mem-loads/ Signed-off-by: Kan Liang <kan.liang@intel.com> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/1412694532-23391-4-git-send-email-kan.liang@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/parse-events.l')
-rw-r--r--tools/perf/util/parse-events.l30
1 files changed, 29 insertions, 1 deletions
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 343299575b30..906630bbf8eb 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -51,6 +51,24 @@ static int str(yyscan_t scanner, int token)
return token;
}
+static int pmu_str_check(yyscan_t scanner)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ yylval->str = strdup(text);
+ switch (perf_pmu__parse_check(text)) {
+ case PMU_EVENT_SYMBOL_PREFIX:
+ return PE_PMU_EVENT_PRE;
+ case PMU_EVENT_SYMBOL_SUFFIX:
+ return PE_PMU_EVENT_SUF;
+ case PMU_EVENT_SYMBOL:
+ return PE_KERNEL_PMU_EVENT;
+ default:
+ return PE_NAME;
+ }
+}
+
static int sym(yyscan_t scanner, int type, int config)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -178,6 +196,16 @@ alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_AL
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+ /*
+ * We have to handle the kernel PMU event cycles-ct/cycles-t/mem-loads/mem-stores separately.
+ * Because the prefix cycles is mixed up with cpu-cycles.
+ * loads and stores are mixed up with cache event
+ */
+cycles-ct { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
+cycles-t { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
+mem-loads { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
+mem-stores { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
+
L1-dcache|l1-d|l1d|L1-data |
L1-icache|l1-i|l1i|L1-instruction |
LLC|L2 |
@@ -199,7 +227,7 @@ r{num_raw_hex} { return raw(yyscanner); }
{num_hex} { return value(yyscanner, 16); }
{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
-{name} { return str(yyscanner, PE_NAME); }
+{name} { return pmu_str_check(yyscanner); }
"/" { BEGIN(config); return '/'; }
- { return '-'; }
, { BEGIN(event); return ','; }