summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-27 00:02:43 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-27 00:02:43 +0300
commite382608254e06c8109f40044f5e693f2e04f3899 (patch)
tree7c46c58a5a15d19a312c35a8e70e69d1cbd93236 /include
parentfcbc1777ce8b5edf831c1eca16c1a63c1e4f39fb (diff)
parentb44754d8262d3aab842998cf747f44fe6090be9f (diff)
downloadlinux-e382608254e06c8109f40044f5e693f2e04f3899.tar.xz
Merge tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This patch series contains several clean ups and even a new trace clock "monitonic raw". Also some enhancements to make the ring buffer even faster. But the biggest and most noticeable change is the renaming of the ftrace* files, structures and variables that have to deal with trace events. Over the years I've had several developers tell me about their confusion with what ftrace is compared to events. Technically, "ftrace" is the infrastructure to do the function hooks, which include tracing and also helps with live kernel patching. But the trace events are a separate entity altogether, and the files that affect the trace events should not be named "ftrace". These include: include/trace/ftrace.h -> include/trace/trace_events.h include/linux/ftrace_event.h -> include/linux/trace_events.h Also, functions that are specific for trace events have also been renamed: ftrace_print_*() -> trace_print_*() (un)register_ftrace_event() -> (un)register_trace_event() ftrace_event_name() -> trace_event_name() ftrace_trigger_soft_disabled() -> trace_trigger_soft_disabled() ftrace_define_fields_##call() -> trace_define_fields_##call() ftrace_get_offsets_##call() -> trace_get_offsets_##call() Structures have been renamed: ftrace_event_file -> trace_event_file ftrace_event_{call,class} -> trace_event_{call,class} ftrace_event_buffer -> trace_event_buffer ftrace_subsystem_dir -> trace_subsystem_dir ftrace_event_raw_##call -> trace_event_raw_##call ftrace_event_data_offset_##call-> trace_event_data_offset_##call ftrace_event_type_funcs_##call -> trace_event_type_funcs_##call And a few various variables and flags have also been updated. This has been sitting in linux-next for some time, and I have not heard a single complaint about this rename breaking anything. Mostly because these functions, variables and structures are mostly internal to the tracing system and are seldom (if ever) used by anything external to that" * tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits) ring_buffer: Allow to exit the ring buffer benchmark immediately ring-buffer-benchmark: Fix the wrong type ring-buffer-benchmark: Fix the wrong param in module_param ring-buffer: Add enum names for the context levels ring-buffer: Remove useless unused tracing_off_permanent() ring-buffer: Give NMIs a chance to lock the reader_lock ring-buffer: Add trace_recursive checks to ring_buffer_write() ring-buffer: Allways do the trace_recursive checks ring-buffer: Move recursive check to per_cpu descriptor ring-buffer: Add unlikelys to make fast path the default tracing: Rename ftrace_get_offsets_##call() to trace_event_get_offsets_##call() tracing: Rename ftrace_define_fields_##call() to trace_event_define_fields_##call() tracing: Rename ftrace_event_type_funcs_##call to trace_event_type_funcs_##call tracing: Rename ftrace_data_offset_##call to trace_event_data_offset_##call tracing: Rename ftrace_raw_##call event structures to trace_event_raw_##call tracing: Rename ftrace_trigger_soft_disabled() to trace_trigger_soft_disabled() tracing: Rename FTRACE_EVENT_FL_* flags to EVENT_FILE_FL_* tracing: Rename struct ftrace_subsystem_dir to trace_subsystem_dir tracing: Rename ftrace_event_name() to trace_event_name() tracing: Rename FTRACE_MAX_EVENT to TRACE_EVENT_TYPE_MAX ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/trace_events.h (renamed from include/linux/ftrace_event.h)177
-rw-r--r--include/trace/define_trace.h3
-rw-r--r--include/trace/events/power.h2
-rw-r--r--include/trace/perf.h350
-rw-r--r--include/trace/syscall.h6
-rw-r--r--include/trace/trace_events.h (renamed from include/trace/ftrace.h)413
10 files changed, 478 insertions, 495 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 060dd7b61c6d..5acf5b70866d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -533,12 +533,6 @@ bool mac_pton(const char *s, u8 *mac);
*
* Most likely, you want to use tracing_on/tracing_off.
*/
-#ifdef CONFIG_RING_BUFFER
-/* trace_off_permanent stops recording with no way to bring it back */
-void tracing_off_permanent(void);
-#else
-static inline void tracing_off_permanent(void) { }
-#endif
enum ftrace_dump_mode {
DUMP_NONE,
diff --git a/include/linux/module.h b/include/linux/module.h
index 1e5436042eb0..255fca74de7d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -336,7 +336,7 @@ struct module {
const char **trace_bprintk_fmt_start;
#endif
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call **trace_events;
+ struct trace_event_call **trace_events;
unsigned int num_trace_events;
struct trace_enum_map **trace_enums;
unsigned int num_trace_enums;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3d80c432ede7..2027809433b3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -484,7 +484,7 @@ struct perf_event {
void *overflow_handler_context;
#ifdef CONFIG_EVENT_TRACING
- struct ftrace_event_call *tp_event;
+ struct trace_event_call *tp_event;
struct event_filter *filter;
#ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bb51becf23f8..b45c45b8c829 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -111,14 +111,14 @@ union bpf_attr;
#define __SC_STR_ADECL(t, a) #a
#define __SC_STR_TDECL(t, a) #t
-extern struct ftrace_event_class event_class_syscall_enter;
-extern struct ftrace_event_class event_class_syscall_exit;
+extern struct trace_event_class event_class_syscall_enter;
+extern struct trace_event_class event_class_syscall_exit;
extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_enter_##sname = { \
.class = &event_class_syscall_enter, \
{ \
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_enter_##sname = &event_enter_##sname;
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata __syscall_meta_##sname; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
event_exit_##sname = { \
.class = &event_class_syscall_exit, \
{ \
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.data = (void *)&__syscall_meta_##sname,\
.flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_exit_##sname = &event_exit_##sname;
diff --git a/include/linux/ftrace_event.h b/include/linux/trace_events.h
index f9ecf63d47f1..1063c850dbab 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/trace_events.h
@@ -1,6 +1,6 @@
-#ifndef _LINUX_FTRACE_EVENT_H
-#define _LINUX_FTRACE_EVENT_H
+#ifndef _LINUX_TRACE_EVENT_H
+#define _LINUX_TRACE_EVENT_H
#include <linux/ring_buffer.h>
#include <linux/trace_seq.h>
@@ -25,35 +25,35 @@ struct trace_print_flags_u64 {
const char *name;
};
-const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
- unsigned long flags,
- const struct trace_print_flags *flag_array);
+const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
-const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
- const struct trace_print_flags *symbol_array);
+const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
#if BITS_PER_LONG == 32
-const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
- unsigned long long val,
- const struct trace_print_flags_u64
+const char *trace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
*symbol_array);
#endif
-const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
- unsigned int bitmask_size);
+const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+ unsigned int bitmask_size);
-const char *ftrace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+const char *trace_print_hex_seq(struct trace_seq *p,
+ const unsigned char *buf, int len);
-const char *ftrace_print_array_seq(struct trace_seq *p,
+const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
size_t el_size);
struct trace_iterator;
struct trace_event;
-int ftrace_raw_output_prep(struct trace_iterator *iter,
- struct trace_event *event);
+int trace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *event);
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -68,7 +68,7 @@ struct trace_entry {
int pid;
};
-#define FTRACE_MAX_EVENT \
+#define TRACE_EVENT_TYPE_MAX \
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
/*
@@ -132,8 +132,8 @@ struct trace_event {
struct trace_event_functions *funcs;
};
-extern int register_ftrace_event(struct trace_event *event);
-extern int unregister_ftrace_event(struct trace_event *event);
+extern int register_trace_event(struct trace_event *event);
+extern int unregister_trace_event(struct trace_event *event);
/* Return values for print_line callback */
enum print_line_t {
@@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
-struct ftrace_event_file;
+struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
- struct ftrace_event_file *ftrace_file,
+ struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
struct ring_buffer_event *
@@ -183,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
void tracing_record_cmdline(struct task_struct *tsk);
-int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
+int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
struct event_filter;
@@ -200,50 +200,39 @@ enum trace_reg {
#endif
};
-struct ftrace_event_call;
+struct trace_event_call;
-struct ftrace_event_class {
+struct trace_event_class {
const char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
#endif
- int (*reg)(struct ftrace_event_call *event,
+ int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
- int (*define_fields)(struct ftrace_event_call *);
- struct list_head *(*get_fields)(struct ftrace_event_call *);
+ int (*define_fields)(struct trace_event_call *);
+ struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
- int (*raw_init)(struct ftrace_event_call *);
+ int (*raw_init)(struct trace_event_call *);
};
-extern int ftrace_event_reg(struct ftrace_event_call *event,
+extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);
-int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
- char *fmt, ...);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
-
-struct ftrace_event_buffer {
+struct trace_event_buffer {
struct ring_buffer *buffer;
struct ring_buffer_event *event;
- struct ftrace_event_file *ftrace_file;
+ struct trace_event_file *trace_file;
void *entry;
unsigned long flags;
int pc;
};
-void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
- struct ftrace_event_file *ftrace_file,
+void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
+ struct trace_event_file *trace_file,
unsigned long len);
-void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
-
-int ftrace_event_define_field(struct ftrace_event_call *call,
- char *type, int len, char *item, int offset,
- int field_size, int sign, int filter);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
enum {
TRACE_EVENT_FL_FILTERED_BIT,
@@ -261,11 +250,11 @@ enum {
* FILTERED - The event has a filter attached
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
- * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
+ * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
- * USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ * USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
*/
@@ -280,9 +269,9 @@ enum {
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
};
-struct ftrace_event_call {
+struct trace_event_call {
struct list_head list;
- struct ftrace_event_class *class;
+ struct trace_event_class *class;
union {
char *name;
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
@@ -297,7 +286,7 @@ struct ftrace_event_call {
* bit 0: filter_active
* bit 1: allow trace by non root (cap any)
* bit 2: failed to apply filter
- * bit 3: ftrace internal event (do not enable)
+ * bit 3: trace internal event (do not enable)
* bit 4: Event was enabled by module
* bit 5: use call filter rather than file filter
* bit 6: Event is a tracepoint
@@ -309,13 +298,13 @@ struct ftrace_event_call {
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
- int (*perf_perm)(struct ftrace_event_call *,
+ int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
#endif
};
static inline const char *
-ftrace_event_name(struct ftrace_event_call *call)
+trace_event_name(struct trace_event_call *call)
{
if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
return call->tp ? call->tp->name : NULL;
@@ -324,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call)
}
struct trace_array;
-struct ftrace_subsystem_dir;
+struct trace_subsystem_dir;
enum {
- FTRACE_EVENT_FL_ENABLED_BIT,
- FTRACE_EVENT_FL_RECORDED_CMD_BIT,
- FTRACE_EVENT_FL_FILTERED_BIT,
- FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
- FTRACE_EVENT_FL_SOFT_MODE_BIT,
- FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
- FTRACE_EVENT_FL_TRIGGER_COND_BIT,
+ EVENT_FILE_FL_ENABLED_BIT,
+ EVENT_FILE_FL_RECORDED_CMD_BIT,
+ EVENT_FILE_FL_FILTERED_BIT,
+ EVENT_FILE_FL_NO_SET_FILTER_BIT,
+ EVENT_FILE_FL_SOFT_MODE_BIT,
+ EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ EVENT_FILE_FL_TRIGGER_MODE_BIT,
+ EVENT_FILE_FL_TRIGGER_COND_BIT,
};
/*
- * Ftrace event file flags:
+ * Event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
* FILTERED - The event has a filter attached
@@ -350,23 +339,23 @@ enum {
* TRIGGER_COND - When set, one or more triggers has an associated filter
*/
enum {
- FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
- FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
- FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
- FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
- FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
- FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
- FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
- FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
+ EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
+ EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
+ EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
+ EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
+ EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
+ EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
+ EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
+ EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
};
-struct ftrace_event_file {
+struct trace_event_file {
struct list_head list;
- struct ftrace_event_call *event_call;
+ struct trace_event_call *event_call;
struct event_filter *filter;
struct dentry *dir;
struct trace_array *tr;
- struct ftrace_subsystem_dir *system;
+ struct trace_subsystem_dir *system;
struct list_head triggers;
/*
@@ -399,7 +388,7 @@ struct ftrace_event_file {
early_initcall(trace_init_flags_##name);
#define __TRACE_EVENT_PERF_PERM(name, expr...) \
- static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ static int perf_perm_##name(struct trace_event_call *tp_event, \
struct perf_event *p_event) \
{ \
return ({ expr; }); \
@@ -425,19 +414,19 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+extern int filter_check_discard(struct trace_event_file *file, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer,
struct ring_buffer_event *event);
-extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
+extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
-extern void event_triggers_post_call(struct ftrace_event_file *file,
+extern void event_triggers_post_call(struct trace_event_file *file,
enum event_trigger_type tt);
/**
- * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
+ * trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
*
* If any triggers without filters are attached to this event, they
@@ -446,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
* otherwise false.
*/
static inline bool
-ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
+trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
+ if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
event_triggers_call(file, NULL);
- if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
return true;
}
return false;
@@ -473,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
* Returns true if the event is discarded, false otherwise.
*/
static inline bool
-__event_trigger_test_discard(struct ftrace_event_file *file,
+__event_trigger_test_discard(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry,
@@ -481,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
{
unsigned long eflags = file->flags;
- if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
+ if (eflags & EVENT_FILE_FL_TRIGGER_COND)
*tt = event_triggers_call(file, entry);
- if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
+ if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
ring_buffer_discard_commit(buffer, event);
else if (!filter_check_discard(file, entry, buffer, event))
return false;
@@ -506,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
* if the event is soft disabled and should be discarded.
*/
static inline void
-event_trigger_unlock_commit(struct ftrace_event_file *file,
+event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc)
@@ -537,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
*/
static inline void
-event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
+event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc,
@@ -570,12 +559,12 @@ enum {
FILTER_TRACE_FN,
};
-extern int trace_event_raw_init(struct ftrace_event_call *call);
-extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+extern int trace_event_raw_init(struct trace_event_call *call);
+extern int trace_define_field(struct trace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
-extern int trace_add_event_call(struct ftrace_event_call *call);
-extern int trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_add_event_call(struct trace_event_call *call);
+extern int trace_remove_event_call(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -624,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
}
#endif
-#endif /* _LINUX_FTRACE_EVENT_H */
+#endif /* _LINUX_TRACE_EVENT_H */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 02e1003568a4..09b3880105a9 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -87,7 +87,8 @@
#define DECLARE_TRACE(name, proto, args)
#ifdef CONFIG_EVENT_TRACING
-#include <trace/ftrace.h>
+#include <trace/trace_events.h>
+#include <trace/perf.h>
#endif
#undef TRACE_EVENT
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 630d1e5e4de0..284244ebfe8d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -7,7 +7,7 @@
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#define TPS(x) tracepoint_string(x)
diff --git a/include/trace/perf.h b/include/trace/perf.h
new file mode 100644
index 000000000000..1b5443cebedc
--- /dev/null
+++ b/include/trace/perf.h
@@ -0,0 +1,350 @@
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct trace_event_call event_<call>;
+ *
+ * static void trace_event_raw_event_<call>(void *__data, proto)
+ * {
+ * struct trace_event_file *trace_file = __data;
+ * struct trace_event_call *event_call = trace_file->event_call;
+ * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
+ * unsigned long eflags = trace_file->flags;
+ * enum event_trigger_type __tt = ETT_NONE;
+ * struct ring_buffer_event *event;
+ * struct trace_event_raw_<call> *entry; <-- defined in stage 1
+ * struct ring_buffer *buffer;
+ * unsigned long irq_flags;
+ * int __data_size;
+ * int pc;
+ *
+ * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
+ * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ * event_triggers_call(trace_file, NULL);
+ * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ * return;
+ * }
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
+ *
+ * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
+ * event_<call>->event.type,
+ * sizeof(*entry) + __data_size,
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * { <assign>; } <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
+ * __tt = event_triggers_call(trace_file, entry);
+ *
+ * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
+ * &trace_file->flags))
+ * ring_buffer_discard_commit(buffer, event);
+ * else if (!filter_check_discard(trace_file, entry, buffer, event))
+ * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ *
+ * if (__tt)
+ * event_triggers_post_call(trace_file, __tt);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = trace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static char print_fmt_<call>[] = <TP_printk>;
+ *
+ * static struct trace_event_class __used event_class_<template> = {
+ * .system = "<system>",
+ * .define_fields = trace_event_define_fields_<call>,
+ * .fields = LIST_HEAD_INIT(event_class_##call.fields),
+ * .raw_init = trace_event_raw_init,
+ * .probe = trace_event_raw_event_##call,
+ * .reg = trace_event_reg,
+ * };
+ *
+ * static struct trace_event_call event_<call> = {
+ * .class = event_class_<template>,
+ * {
+ * .tp = &__tracepoint_<call>,
+ * },
+ * .event = &ftrace_event_type_<call>,
+ * .print_fmt = print_fmt_<call>,
+ * .flags = TRACE_EVENT_FL_TRACEPOINT,
+ * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct trace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
+ *
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+
+#define _TRACE_PERF_PROTO(call, proto) \
+ static notrace void \
+ perf_trace_##call(void *__data, proto);
+
+#define _TRACE_PERF_INIT(call) \
+ .perf_probe = perf_trace_##call,
+
+#else
+#define _TRACE_PERF_PROTO(call, proto)
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __assign_bitmask
+#define __assign_bitmask(dst, src, nr_bits) \
+ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_buffer fbuffer; \
+ struct trace_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+/*
+ * The ftrace_test_probe is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the ftrace probe will
+ * fail to compile unless it too is updated.
+ */
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void ftrace_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(trace_event_raw_event_##template); \
+}
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __print_hex
+#undef __get_dynamic_array
+#undef __get_dynamic_array_len
+#undef __get_str
+#undef __get_bitmask
+#undef __print_array
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+_TRACE_PERF_PROTO(call, PARAMS(proto)); \
+static char print_fmt_##call[] = print; \
+static struct trace_event_class __used __refdata event_class_##call = { \
+ .system = TRACE_SYSTEM_STRING, \
+ .define_fields = trace_event_define_fields_##call, \
+ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
+ .raw_init = trace_event_raw_init, \
+ .probe = trace_event_raw_event_##call, \
+ .reg = trace_event_reg, \
+ _TRACE_PERF_INIT(call) \
+};
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##template, \
+ .print_fmt = print_fmt_##template, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
+ \
+static char print_fmt_##call[] = print; \
+ \
+static struct trace_event_call __used event_##call = { \
+ .class = &event_class_##template, \
+ { \
+ .tp = &__tracepoint_##call, \
+ }, \
+ .event.funcs = &trace_event_type_funcs_##call, \
+ .print_fmt = print_fmt_##call, \
+ .flags = TRACE_EVENT_FL_TRACEPOINT, \
+}; \
+static struct trace_event_call __used \
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef TRACE_SYSTEM_VAR
+
+#ifdef CONFIG_PERF_EVENTS
+
+#undef __entry
+#define __entry entry
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_dynamic_array_len
+#define __get_dynamic_array_len(field) \
+ ((__entry->__data_loc_##field >> 16) & 0xffff)
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
+
+#undef __get_bitmask
+#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+
+#undef __perf_addr
+#define __perf_addr(a) (__addr = (a))
+
+#undef __perf_count
+#define __perf_count(c) (__count = (c))
+
+#undef __perf_task
+#define __perf_task(t) (__task = (t))
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ struct trace_event_call *event_call = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_raw_##call *entry; \
+ struct pt_regs *__regs; \
+ u64 __addr = 0, __count = 1; \
+ struct task_struct *__task = NULL; \
+ struct hlist_head *head; \
+ int __entry_size; \
+ int __data_size; \
+ int rctx; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ head = this_cpu_ptr(event_call->perf_events); \
+ if (__builtin_constant_p(!__task) && !__task && \
+ hlist_empty(head)) \
+ return; \
+ \
+ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
+ sizeof(u64)); \
+ __entry_size -= sizeof(u32); \
+ \
+ entry = perf_trace_buf_prepare(__entry_size, \
+ event_call->event.type, &__regs, &rctx); \
+ if (!entry) \
+ return; \
+ \
+ perf_fetch_caller_regs(__regs); \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
+ __count, __regs, head, __task); \
+}
+
+/*
+ * This part is compiled out, it is only here as a build time check
+ * to make sure that if the tracepoint handling changes, the
+ * perf probe will fail to compile unless it too is updated.
+ */
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+static inline void perf_test_probe_##call(void) \
+{ \
+ check_trace_callback_type_##call(perf_trace_##template); \
+}
+
+
+#undef DEFINE_EVENT_PRINT
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 9674145e2f6a..7434f0f5d3f6 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -3,7 +3,7 @@
#include <linux/tracepoint.h>
#include <linux/unistd.h>
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
@@ -29,8 +29,8 @@ struct syscall_metadata {
const char **args;
struct list_head enter_fields;
- struct ftrace_event_call *enter_event;
- struct ftrace_event_call *exit_event;
+ struct trace_event_call *enter_event;
+ struct trace_event_call *exit_event;
};
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
diff --git a/include/trace/ftrace.h b/include/trace/trace_events.h
index 37d4b10b111d..43be3b0e44d3 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/trace_events.h
@@ -3,7 +3,7 @@
*
* Override the macros in <trace/trace_events.h> to include the following:
*
- * struct ftrace_raw_<call> {
+ * struct trace_event_raw_<call> {
* struct trace_entry ent;
* <type> <item>;
* <type2> <item2>[<len>];
@@ -16,7 +16,7 @@
* in the structure.
*/
-#include <linux/ftrace_event.h>
+#include <linux/trace_events.h>
#ifndef TRACE_SYSTEM_VAR
#define TRACE_SYSTEM_VAR TRACE_SYSTEM
@@ -95,17 +95,17 @@ TRACE_MAKE_SYSTEM_STR();
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
- struct ftrace_raw_##name { \
+ struct trace_event_raw_##name { \
struct trace_entry ent; \
tstruct \
char __data[0]; \
}; \
\
- static struct ftrace_event_class event_class_##name;
+ static struct trace_event_class event_class_##name;
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
- static struct ftrace_event_call __used \
+ static struct trace_event_call __used \
__attribute__((__aligned__(4))) event_##name
#undef DEFINE_EVENT_FN
@@ -138,7 +138,7 @@ TRACE_MAKE_SYSTEM_STR();
*
* Include the following:
*
- * struct ftrace_data_offsets_<call> {
+ * struct trace_event_data_offsets_<call> {
* u32 <item1>;
* u32 <item2>;
* [...]
@@ -178,7 +178,7 @@ TRACE_MAKE_SYSTEM_STR();
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- struct ftrace_data_offsets_##call { \
+ struct trace_event_data_offsets_##call { \
tstruct; \
};
@@ -203,10 +203,10 @@ TRACE_MAKE_SYSTEM_STR();
* Override the macros in <trace/trace_events.h> to include the following:
*
* enum print_line_t
- * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
* {
* struct trace_seq *s = &iter->seq;
- * struct ftrace_raw_<call> *field; <-- defined in stage 1
+ * struct trace_event_raw_<call> *field; <-- defined in stage 1
* struct trace_entry *entry;
* struct trace_seq *p = &iter->tmp_seq;
* int ret;
@@ -258,7 +258,7 @@ TRACE_MAKE_SYSTEM_STR();
void *__bitmask = __get_dynamic_array(field); \
unsigned int __bitmask_size; \
__bitmask_size = __get_dynamic_array_len(field); \
- ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
})
#undef __print_flags
@@ -266,7 +266,7 @@ TRACE_MAKE_SYSTEM_STR();
({ \
static const struct trace_print_flags __flags[] = \
{ flag_array, { -1, NULL }}; \
- ftrace_print_flags_seq(p, delim, flag, __flags); \
+ trace_print_flags_seq(p, delim, flag, __flags); \
})
#undef __print_symbolic
@@ -274,7 +274,7 @@ TRACE_MAKE_SYSTEM_STR();
({ \
static const struct trace_print_flags symbols[] = \
{ symbol_array, { -1, NULL }}; \
- ftrace_print_symbols_seq(p, value, symbols); \
+ trace_print_symbols_seq(p, value, symbols); \
})
#undef __print_symbolic_u64
@@ -283,7 +283,7 @@ TRACE_MAKE_SYSTEM_STR();
({ \
static const struct trace_print_flags_u64 symbols[] = \
{ symbol_array, { -1, NULL } }; \
- ftrace_print_symbols_seq_u64(p, value, symbols); \
+ trace_print_symbols_seq_u64(p, value, symbols); \
})
#else
#define __print_symbolic_u64(value, symbol_array...) \
@@ -291,30 +291,30 @@ TRACE_MAKE_SYSTEM_STR();
#endif
#undef __print_hex
-#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
+#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
#undef __print_array
#define __print_array(array, count, el_size) \
({ \
BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
el_size != 4 && el_size != 8); \
- ftrace_print_array_seq(p, array, count, el_size); \
+ trace_print_array_seq(p, array, count, el_size); \
})
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
- struct trace_event *trace_event) \
+trace_raw_output_##call(struct trace_iterator *iter, int flags, \
+ struct trace_event *trace_event) \
{ \
struct trace_seq *s = &iter->seq; \
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
- struct ftrace_raw_##call *field; \
+ struct trace_event_raw_##call *field; \
int ret; \
\
field = (typeof(field))iter->ent; \
\
- ret = ftrace_raw_output_prep(iter, trace_event); \
+ ret = trace_raw_output_prep(iter, trace_event); \
if (ret != TRACE_TYPE_HANDLED) \
return ret; \
\
@@ -322,17 +322,17 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
\
return trace_handle_return(s); \
} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
+static struct trace_event_functions trace_event_type_funcs_##call = { \
+ .trace = trace_raw_output_##call, \
};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
+trace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *event) \
{ \
- struct ftrace_raw_##template *field; \
+ struct trace_event_raw_##template *field; \
struct trace_entry *entry; \
struct trace_seq *p = &iter->tmp_seq; \
\
@@ -346,10 +346,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
field = (typeof(field))entry; \
\
trace_seq_init(p); \
- return ftrace_output_call(iter, #call, print); \
+ return trace_output_call(iter, #call, print); \
} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
+static struct trace_event_functions trace_event_type_funcs_##call = { \
+ .trace = trace_raw_output_##call, \
};
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -407,9 +407,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static int notrace __init \
-ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+trace_event_define_fields_##call(struct trace_event_call *event_call) \
{ \
- struct ftrace_raw_##call field; \
+ struct trace_event_raw_##call field; \
int ret; \
\
tstruct; \
@@ -485,12 +485,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static inline notrace int ftrace_get_offsets_##call( \
- struct ftrace_data_offsets_##call *__data_offsets, proto) \
+static inline notrace int trace_event_get_offsets_##call( \
+ struct trace_event_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
int __maybe_unused __item_length; \
- struct ftrace_raw_##call __maybe_unused *entry; \
+ struct trace_event_raw_##call __maybe_unused *entry; \
\
tstruct; \
\
@@ -506,354 +506,3 @@ static inline notrace int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * Stage 4 of the trace events.
- *
- * Override the macros in <trace/trace_events.h> to include the following:
- *
- * For those macros defined with TRACE_EVENT:
- *
- * static struct ftrace_event_call event_<call>;
- *
- * static void ftrace_raw_event_<call>(void *__data, proto)
- * {
- * struct ftrace_event_file *ftrace_file = __data;
- * struct ftrace_event_call *event_call = ftrace_file->event_call;
- * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- * unsigned long eflags = ftrace_file->flags;
- * enum event_trigger_type __tt = ETT_NONE;
- * struct ring_buffer_event *event;
- * struct ftrace_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
- * unsigned long irq_flags;
- * int __data_size;
- * int pc;
- *
- * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
- * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
- * event_triggers_call(ftrace_file, NULL);
- * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
- * return;
- * }
- *
- * local_save_flags(irq_flags);
- * pc = preempt_count();
- *
- * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
- * event_<call>->event.type,
- * sizeof(*entry) + __data_size,
- * irq_flags, pc);
- * if (!event)
- * return;
- * entry = ring_buffer_event_data(event);
- *
- * { <assign>; } <-- Here we assign the entries by the __field and
- * __array macros.
- *
- * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
- * __tt = event_triggers_call(ftrace_file, entry);
- *
- * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
- * &ftrace_file->flags))
- * ring_buffer_discard_commit(buffer, event);
- * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
- * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
- *
- * if (__tt)
- * event_triggers_post_call(ftrace_file, __tt);
- * }
- *
- * static struct trace_event ftrace_event_type_<call> = {
- * .trace = ftrace_raw_output_<call>, <-- stage 2
- * };
- *
- * static char print_fmt_<call>[] = <TP_printk>;
- *
- * static struct ftrace_event_class __used event_class_<template> = {
- * .system = "<system>",
- * .define_fields = ftrace_define_fields_<call>,
- * .fields = LIST_HEAD_INIT(event_class_##call.fields),
- * .raw_init = trace_event_raw_init,
- * .probe = ftrace_raw_event_##call,
- * .reg = ftrace_event_reg,
- * };
- *
- * static struct ftrace_event_call event_<call> = {
- * .class = event_class_<template>,
- * {
- * .tp = &__tracepoint_<call>,
- * },
- * .event = &ftrace_event_type_<call>,
- * .print_fmt = print_fmt_<call>,
- * .flags = TRACE_EVENT_FL_TRACEPOINT,
- * };
- * // its only safe to use pointers when doing linker tricks to
- * // create an array.
- * static struct ftrace_event_call __used
- * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
- *
- */
-
-#ifdef CONFIG_PERF_EVENTS
-
-#define _TRACE_PERF_PROTO(call, proto) \
- static notrace void \
- perf_trace_##call(void *__data, proto);
-
-#define _TRACE_PERF_INIT(call) \
- .perf_probe = perf_trace_##call,
-
-#else
-#define _TRACE_PERF_PROTO(call, proto)
-#define _TRACE_PERF_INIT(call)
-#endif /* CONFIG_PERF_EVENTS */
-
-#undef __entry
-#define __entry entry
-
-#undef __field
-#define __field(type, item)
-
-#undef __field_struct
-#define __field_struct(type, item)
-
-#undef __array
-#define __array(type, item, len)
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- __entry->__data_loc_##item = __data_offsets.item;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __assign_str
-#define __assign_str(dst, src) \
- strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
-
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __assign_bitmask
-#define __assign_bitmask(dst, src, nr_bits) \
- memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
-#undef __perf_count
-#define __perf_count(c) (c)
-
-#undef __perf_task
-#define __perf_task(t) (t)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
-static notrace void \
-ftrace_raw_event_##call(void *__data, proto) \
-{ \
- struct ftrace_event_file *ftrace_file = __data; \
- struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- struct ftrace_event_buffer fbuffer; \
- struct ftrace_raw_##call *entry; \
- int __data_size; \
- \
- if (ftrace_trigger_soft_disabled(ftrace_file)) \
- return; \
- \
- __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
- \
- entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
- sizeof(*entry) + __data_size); \
- \
- if (!entry) \
- return; \
- \
- tstruct \
- \
- { assign; } \
- \
- ftrace_event_buffer_commit(&fbuffer); \
-}
-/*
- * The ftrace_test_probe is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the ftrace probe will
- * fail to compile unless it too is updated.
- */
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void ftrace_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(ftrace_raw_event_##template); \
-}
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef __entry
-#define __entry REC
-
-#undef __print_flags
-#undef __print_symbolic
-#undef __print_hex
-#undef __get_dynamic_array
-#undef __get_dynamic_array_len
-#undef __get_str
-#undef __get_bitmask
-#undef __print_array
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-_TRACE_PERF_PROTO(call, PARAMS(proto)); \
-static char print_fmt_##call[] = print; \
-static struct ftrace_event_class __used __refdata event_class_##call = { \
- .system = TRACE_SYSTEM_STRING, \
- .define_fields = ftrace_define_fields_##call, \
- .fields = LIST_HEAD_INIT(event_class_##call.fields),\
- .raw_init = trace_event_raw_init, \
- .probe = ftrace_raw_event_##call, \
- .reg = ftrace_event_reg, \
- _TRACE_PERF_INIT(call) \
-};
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
- \
-static struct ftrace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &ftrace_event_type_funcs_##template, \
- .print_fmt = print_fmt_##template, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct ftrace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
- \
-static char print_fmt_##call[] = print; \
- \
-static struct ftrace_event_call __used event_##call = { \
- .class = &event_class_##template, \
- { \
- .tp = &__tracepoint_##call, \
- }, \
- .event.funcs = &ftrace_event_type_funcs_##call, \
- .print_fmt = print_fmt_##call, \
- .flags = TRACE_EVENT_FL_TRACEPOINT, \
-}; \
-static struct ftrace_event_call __used \
-__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef TRACE_SYSTEM_VAR
-
-#ifdef CONFIG_PERF_EVENTS
-
-#undef __entry
-#define __entry entry
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_dynamic_array_len
-#define __get_dynamic_array_len(field) \
- ((__entry->__data_loc_##field >> 16) & 0xffff)
-
-#undef __get_str
-#define __get_str(field) (char *)__get_dynamic_array(field)
-
-#undef __get_bitmask
-#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-
-#undef __perf_addr
-#define __perf_addr(a) (__addr = (a))
-
-#undef __perf_count
-#define __perf_count(c) (__count = (c))
-
-#undef __perf_task
-#define __perf_task(t) (__task = (t))
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static notrace void \
-perf_trace_##call(void *__data, proto) \
-{ \
- struct ftrace_event_call *event_call = __data; \
- struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- struct ftrace_raw_##call *entry; \
- struct pt_regs *__regs; \
- u64 __addr = 0, __count = 1; \
- struct task_struct *__task = NULL; \
- struct hlist_head *head; \
- int __entry_size; \
- int __data_size; \
- int rctx; \
- \
- __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
- \
- head = this_cpu_ptr(event_call->perf_events); \
- if (__builtin_constant_p(!__task) && !__task && \
- hlist_empty(head)) \
- return; \
- \
- __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
- sizeof(u64)); \
- __entry_size -= sizeof(u32); \
- \
- entry = perf_trace_buf_prepare(__entry_size, \
- event_call->event.type, &__regs, &rctx); \
- if (!entry) \
- return; \
- \
- perf_fetch_caller_regs(__regs); \
- \
- tstruct \
- \
- { assign; } \
- \
- perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, __regs, head, __task); \
-}
-
-/*
- * This part is compiled out, it is only here as a build time check
- * to make sure that if the tracepoint handling changes, the
- * perf probe will fail to compile unless it too is updated.
- */
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
-static inline void perf_test_probe_##call(void) \
-{ \
- check_trace_callback_type_##call(perf_trace_##template); \
-}
-
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif /* CONFIG_PERF_EVENTS */
-