summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2020-04-02 05:44:46 +0300
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2020-04-03 18:30:50 +0300
commit8e99cf91b99bb30e16727f10ad6828741c0e992f (patch)
treeee26bf768874abdb9d63f706a8c14d136fd850a8
parent2ab2a0924b9980551ebe1c47d2a402a94efc1835 (diff)
downloadlinux-8e99cf91b99bb30e16727f10ad6828741c0e992f.tar.xz
tracing: Do not allocate buffer in trace_find_next_entry() in atomic
When dumping out the trace data in latency format, a check is made to peek at the next event to compare its timestamp to the current one, and if the delta is of a greater size, it will add a marker showing so. But to do this, it needs to save the current event otherwise peeking at the next event will remove the current event. To save the event, a temp buffer is used, and if the event is bigger than the temp buffer, the temp buffer is freed and a bigger buffer is allocated. This allocation is a problem when called in atomic context. The only way this gets called via atomic context is via ftrace_dump(). Thus, use a static buffer of 128 bytes (which covers most events), and if the event is bigger than that, simply return NULL. The callers of trace_find_next_entry() need to handle a NULL case, as that's what would happen if the allocation failed. Link: https://lore.kernel.org/r/20200326091256.GR11705@shao2-debian Fixes: ff895103a84ab ("tracing: Save off entry when peeking at next entry") Reported-by: kernel test robot <rong.a.chen@intel.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6519b7afc499..8d2b98812625 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3472,6 +3472,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
return next;
}
+#define STATIC_TEMP_BUF_SIZE 128
+static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
+
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts)
@@ -3481,12 +3484,25 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
struct trace_entry *entry;
/*
+ * If called from ftrace_dump(), then the iter->temp buffer
+ * will be the static_temp_buf and not created from kmalloc.
+ * If the entry size is greater than the buffer, we can
+ * not save it. Just return NULL in that case. This is only
+ * used to add markers when two consecutive events' time
+ * stamps have a large delta. See trace_print_lat_context()
+ */
+ if (iter->temp == static_temp_buf &&
+ STATIC_TEMP_BUF_SIZE < ent_size)
+ return NULL;
+
+ /*
* The __find_next_entry() may call peek_next_entry(), which may
* call ring_buffer_peek() that may make the contents of iter->ent
* undefined. Need to copy iter->ent now.
*/
if (iter->ent && iter->ent != iter->temp) {
- if (!iter->temp || iter->temp_size < iter->ent_size) {
+ if ((!iter->temp || iter->temp_size < iter->ent_size) &&
+ !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
kfree(iter->temp);
iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
if (!iter->temp)
@@ -9203,6 +9219,9 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
/* Simulate the iterator */
trace_init_global_iter(&iter);
+ /* Can not use kmalloc for iter.temp */
+ iter.temp = static_temp_buf;
+ iter.temp_size = STATIC_TEMP_BUF_SIZE;
for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);