From 2a04b8d846dca196342862caecadd2a78460d8dc Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 12 Jul 2022 18:58:20 -0400 Subject: tracing: devlink: Use static array for string in devlink_trap_report event The trace event devlink_trap_report uses the __dynamic_array() macro to determine the size of the input_dev_name field. This is because it needs to test the dev field for NULL, and will use "NULL" if it is. But it also has the size of the dynamic array as a fixed IFNAMSIZ bytes. This defeats the purpose of the dynamic array, as this will reserve that amount of bytes on the ring buffer, and to make matters worse, it will even save that size in the event as the event expects it to be dynamic (for which it is not). Since IFNAMSIZ is just 16 bytes, just make it a static array and this will remove the meta data from the event that records the size. Link: https://lkml.kernel.org/r/20220712185820.002d9fb5@gandalf.local.home Cc: Leon Romanovsky Cc: Jiri Pirko Cc: "David S. Miller" Cc: Eric Dumazet Cc: Paolo Abeni Cc: netdev@vger.kernel.org Reviewed-by: Ido Schimmel Tested-by: Ido Schimmel Acked-by: Jakub Kicinski Signed-off-by: Steven Rostedt (Google) --- include/trace/events/devlink.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h index 2814f188d98c..24969184c534 100644 --- a/include/trace/events/devlink.h +++ b/include/trace/events/devlink.h @@ -186,7 +186,7 @@ TRACE_EVENT(devlink_trap_report, __string(driver_name, devlink_to_dev(devlink)->driver->name) __string(trap_name, metadata->trap_name) __string(trap_group_name, metadata->trap_group_name) - __dynamic_array(char, input_dev_name, IFNAMSIZ) + __array(char, input_dev_name, IFNAMSIZ) ), TP_fast_assign( @@ -197,15 +197,14 @@ TRACE_EVENT(devlink_trap_report, __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __assign_str(trap_name, metadata->trap_name); __assign_str(trap_group_name, metadata->trap_group_name); - __assign_str(input_dev_name, - (input_dev ? input_dev->name : "NULL")); + strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ); ), TP_printk("bus_name=%s dev_name=%s driver_name=%s trap_name=%s " "trap_group_name=%s input_dev_name=%s", __get_str(bus_name), __get_str(dev_name), __get_str(driver_name), __get_str(trap_name), __get_str(trap_group_name), - __get_str(input_dev_name)) + __entry->input_dev_name) ); #endif /* _TRACE_DEVLINK_H */ -- cgit v1.2.3 From fca8300f68fe3fbb2fcda862f0f114011715b3bc Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Mon, 4 Jul 2022 09:14:36 -0400 Subject: tracing/ipv4/ipv6: Use static array for name field in fib*_lookup_table event The fib_lookup_table and fib6_lookup_table events declare name as a dynamic_array, but also give it a fixed size, which defeats the purpose of the dynamic array, especially since the dynamic array also includes meta data in the event to specify its size. Since the size of the name is at most 16 bytes (defined by IFNAMSIZ), it is not worth spending the effort to determine the size of the string. Just use a fixed size array and copy into it. This will save 4 bytes that are used for the meta data that saves the size and position of a dynamic array, and even slightly speed up the event processing. Link: https://lkml.kernel.org/r/20220704091436.3705edbf@rorschach.local.home Cc: David S. Miller Cc: netdev@vger.kernel.org Acked-by: Jakub Kicinski Reviewed-by: David Ahern Signed-off-by: Steven Rostedt (Google) --- include/trace/events/fib.h | 6 +++--- include/trace/events/fib6.h | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h index 6f2a4dc35e37..c2300c407f58 100644 --- a/include/trace/events/fib.h +++ b/include/trace/events/fib.h @@ -32,7 +32,7 @@ TRACE_EVENT(fib_table_lookup, __array( __u8, gw6, 16 ) __field( u16, sport ) __field( u16, dport ) - __dynamic_array(char, name, IFNAMSIZ ) + __array(char, name, IFNAMSIZ ) ), TP_fast_assign( @@ -66,7 +66,7 @@ TRACE_EVENT(fib_table_lookup, } dev = nhc ? nhc->nhc_dev : NULL; - __assign_str(name, dev ? dev->name : "-"); + strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ); if (nhc) { if (nhc->nhc_gw_family == AF_INET) { @@ -95,7 +95,7 @@ TRACE_EVENT(fib_table_lookup, __entry->tb_id, __entry->oif, __entry->iif, __entry->proto, __entry->src, __entry->sport, __entry->dst, __entry->dport, __entry->tos, __entry->scope, __entry->flags, - __get_str(name), __entry->gw4, __entry->gw6, __entry->err) + __entry->name, __entry->gw4, __entry->gw6, __entry->err) ); #endif /* _TRACE_FIB_H */ diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h index c6abdcc77c12..6e821eb79450 100644 --- a/include/trace/events/fib6.h +++ b/include/trace/events/fib6.h @@ -31,7 +31,7 @@ TRACE_EVENT(fib6_table_lookup, __field( u16, dport ) __field( u8, proto ) __field( u8, rt_type ) - __dynamic_array( char, name, IFNAMSIZ ) + __array( char, name, IFNAMSIZ ) __array( __u8, gw, 16 ) ), @@ -63,9 +63,9 @@ TRACE_EVENT(fib6_table_lookup, } if (res->nh && res->nh->fib_nh_dev) { - __assign_str(name, res->nh->fib_nh_dev); + strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ); } else { - __assign_str(name, "-"); + strcpy(__entry->name, "-"); } if (res->f6i == net->ipv6.fib6_null_entry) { struct in6_addr in6_zero = {}; @@ -83,7 +83,7 @@ TRACE_EVENT(fib6_table_lookup, __entry->tb_id, __entry->oif, __entry->iif, __entry->proto, __entry->src, __entry->sport, __entry->dst, __entry->dport, __entry->tos, __entry->scope, __entry->flags, - __get_str(name), __entry->gw, __entry->err) + __entry->name, __entry->gw, __entry->err) ); #endif /* _TRACE_FIB6_H */ -- cgit v1.2.3 From 43b2aef3735e0ddb4fc4e6401cd5dc1bf205dead Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 5 Jul 2022 18:37:41 -0400 Subject: neighbor: tracing: Have neigh_create event use __string() The dev field of the neigh_create event uses __dynamic_array() with a fixed size, which defeats the purpose of __dynamic_array(). Looking at the logic, as it already uses __assign_str(), just use the same logic in __string to create the size needed. It appears that because "dev" can be NULL, it needs the check. But __string() can have the same checks as __assign_str() so use them there too. Link: https://lkml.kernel.org/r/20220705183741.35387e3f@rorschach.local.home Cc: David Ahern Cc: David S. Miller Cc: netdev@vger.kernel.org Acked-by: Jakub Kicinski Reviewed-by: David Ahern Signed-off-by: Steven Rostedt (Google) --- include/trace/events/neigh.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/trace') diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h index 62bb17516713..5eaa1fa99171 100644 --- a/include/trace/events/neigh.h +++ b/include/trace/events/neigh.h @@ -30,7 +30,7 @@ TRACE_EVENT(neigh_create, TP_STRUCT__entry( __field(u32, family) - __dynamic_array(char, dev, IFNAMSIZ ) + __string(dev, dev ? dev->name : "NULL") __field(int, entries) __field(u8, created) __field(u8, gc_exempt) -- cgit v1.2.3 From 0563231f93c6d1f582b168a47753b345c1e20d81 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 5 Jul 2022 18:44:54 -0400 Subject: tracing/events: Add __vstring() and __assign_vstr() helper macros There's several places that open code the following logic: TP_STRUCT__entry(__dynamic_array(char, msg, MSG_MAX)), TP_fast_assign(vsnprintf(__get_str(msg), MSG_MAX, vaf->fmt, *vaf->va);) To load a string created by variable array va_list. The main issue with this approach is that "MSG_MAX" usage in the __dynamic_array() portion. That actually just reserves the MSG_MAX in the event, and even wastes space because there's dynamic meta data also saved in the event to denote the offset and size of the dynamic array. It would have been better to just use a static __array() field. Instead, create __vstring() and __assign_vstr() that work like __string and __assign_str() but instead of taking a destination string to copy, take a format string and a va_list pointer and fill in the values. It uses the helper: #define __trace_event_vstr_len(fmt, va) \ ({ \ va_list __ap; \ int __ret; \ \ va_copy(__ap, *(va)); \ __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \ va_end(__ap); \ \ min(__ret, TRACE_EVENT_STR_MAX); \ }) To figure out the length to store the string. It may be slightly slower as it needs to run the vsnprintf() twice, but it now saves space on the ring buffer. Link: https://lkml.kernel.org/r/20220705224749.053570613@goodmis.org Cc: Dennis Dalessandro Cc: Ingo Molnar Cc: Andrew Morton Cc: Jason Gunthorpe Cc: Leon Romanovsky Cc: Kalle Valo Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Arend van Spriel Cc: Franky Lin Cc: Hante Meuleman Cc: Gregory Greenman Cc: Peter Chen Cc: Greg Kroah-Hartman Cc: Mathias Nyman Cc: Chunfeng Yun Cc: Bin Liu Cc: Marek Lindner Cc: Simon Wunderlich Cc: Antonio Quartulli Cc: Sven Eckelmann Cc: Johannes Berg Cc: Jim Cromie Signed-off-by: Steven Rostedt (Google) --- include/linux/trace_events.h | 18 ++++++++++++++++++ include/trace/stages/stage1_struct_define.h | 3 +++ include/trace/stages/stage2_data_offsets.h | 3 +++ include/trace/stages/stage4_event_fields.h | 3 +++ include/trace/stages/stage5_get_offsets.h | 4 ++++ include/trace/stages/stage6_event_callback.h | 7 +++++++ 6 files changed, 38 insertions(+) (limited to 'include/trace') diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index e6e95a9f07a5..b18759a673c6 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -916,6 +916,24 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, #endif +#define TRACE_EVENT_STR_MAX 512 + +/* + * gcc warns that you can not use a va_list in an inlined + * function. But lets me make it into a macro :-/ + */ +#define __trace_event_vstr_len(fmt, va) \ +({ \ + va_list __ap; \ + int __ret; \ + \ + va_copy(__ap, *(va)); \ + __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \ + va_end(__ap); \ + \ + min(__ret, TRACE_EVENT_STR_MAX); \ +}) + #endif /* _LINUX_TRACE_EVENT_H */ /* diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h index a16783419687..1b7bab60434c 100644 --- a/include/trace/stages/stage1_struct_define.h +++ b/include/trace/stages/stage1_struct_define.h @@ -26,6 +26,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h index 42fd1e8813ec..1b7a8f764fdd 100644 --- a/include/trace/stages/stage2_data_offsets.h +++ b/include/trace/stages/stage2_data_offsets.h @@ -32,6 +32,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h index e80cdc397a43..c3790ec7a453 100644 --- a/include/trace/stages/stage4_event_fields.h +++ b/include/trace/stages/stage4_event_fields.h @@ -38,6 +38,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h index 7ee5931300e6..fba4c24ed9e6 100644 --- a/include/trace/stages/stage5_get_offsets.h +++ b/include/trace/stages/stage5_get_offsets.h @@ -39,6 +39,10 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, \ + __trace_event_vstr_len(fmt, ap)) + #undef __rel_dynamic_array #define __rel_dynamic_array(type, item, len) \ __item_length = (len) * sizeof(type); \ diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h index e1724f73594b..0f51f6b3ab70 100644 --- a/include/trace/stages/stage6_event_callback.h +++ b/include/trace/stages/stage6_event_callback.h @@ -24,6 +24,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __assign_str #define __assign_str(dst, src) \ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); @@ -35,6 +38,10 @@ __get_str(dst)[len] = '\0'; \ } while(0) +#undef __assign_vstr +#define __assign_vstr(dst, fmt, va) \ + vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, *(va)) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) -- cgit v1.2.3 From 5409b8053511f9a32ee08c3d16827632a5b17e3f Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 5 Jul 2022 18:45:03 -0400 Subject: scsi: iscsi: tracing: Use the new __vstring() helper Instead of open coding a __dynamic_array() with a fixed length (which defeats the purpose of the dynamic array in the first place). Use the new __vstring() helper that will use a va_list and only write enough of the string into the ring buffer that is needed. Link: https://lkml.kernel.org/r/20220705224750.715763972@goodmis.org Cc: Fred Herard Cc: Ingo Molnar Cc: Andrew Morton Cc: Martin K. Petersen Signed-off-by: Steven Rostedt (Google) --- include/trace/events/iscsi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h index 87408faf6e4e..8ff2a3ca5d75 100644 --- a/include/trace/events/iscsi.h +++ b/include/trace/events/iscsi.h @@ -26,12 +26,12 @@ DECLARE_EVENT_CLASS(iscsi_log_msg, TP_STRUCT__entry( __string(dname, dev_name(dev) ) - __dynamic_array(char, msg, ISCSI_MSG_MAX ) + __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( __assign_str(dname, dev_name(dev)); - vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va); + __assign_vstr(msg, vaf->fmt, vaf->va); ), TP_printk("%s: %s",__get_str(dname), __get_str(msg) -- cgit v1.2.3 From 74003fc4ae7619ac5a7bec8748a14f3a8655f3ea Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 5 Jul 2022 18:45:04 -0400 Subject: scsi: qla2xxx: tracing: Use the new __vstring() helper Instead of open coding a __dynamic_array() with a fixed length (which defeats the purpose of the dynamic array in the first place). Use the new __vstring() helper that will use a va_list and only write enough of the string into the ring buffer that is needed. Link: https://lkml.kernel.org/r/20220705224750.896553364@goodmis.org Cc: Bart Van Assche Cc: Ingo Molnar Cc: Andrew Morton Cc: Martin K. Petersen Signed-off-by: Steven Rostedt (Google) --- include/trace/events/qla.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h index 5857cf682ee7..e7fd55e7dc3d 100644 --- a/include/trace/events/qla.h +++ b/include/trace/events/qla.h @@ -22,11 +22,11 @@ DECLARE_EVENT_CLASS(qla_log_event, TP_STRUCT__entry( __string(buf, buf) - __dynamic_array(char, msg, QLA_MSG_MAX) + __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( __assign_str(buf, buf); - vsnprintf(__get_str(msg), QLA_MSG_MAX, vaf->fmt, *vaf->va); + __assign_vstr(msg, vaf->fmt, vaf->va); ), TP_printk("%s %s", __get_str(buf), __get_str(msg)) -- cgit v1.2.3 From 3a2dcbaf4d31023106975d6ae75b6df080c454cb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 19 Jul 2022 18:20:04 -0400 Subject: tracing: Use a copy of the va_list for __assign_vstr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If an instance of tracing enables the same trace event as another instance, or the top level instance, or even perf, then the va_list passed into some tracepoints can be used more than once. As va_list can only be traversed once, this can cause issues: # cat /sys/kernel/tracing/instances/qla2xxx/trace cat-56106 [012] ..... 2419873.470098: ql_dbg_log: qla2xxx [0000:05:00.0]-1054:14: Entered (null). cat-56106 [012] ..... 2419873.470101: ql_dbg_log: qla2xxx [0000:05:00.0]-1000:14: Entered ×+<96>²Ü<98>^H. cat-56106 [012] ..... 2419873.470102: ql_dbg_log: qla2xxx [0000:05:00.0]-1006:14: Prepare to issue mbox cmd=0xde589000. # cat /sys/kernel/tracing/trace cat-56106 [012] ..... 2419873.470097: ql_dbg_log: qla2xxx [0000:05:00.0]-1054:14: Entered qla2x00_get_firmware_state. cat-56106 [012] ..... 2419873.470100: ql_dbg_log: qla2xxx [0000:05:00.0]-1000:14: Entered qla2x00_mailbox_command. cat-56106 [012] ..... 2419873.470102: ql_dbg_log: qla2xxx [0000:05:00.0]-1006:14: Prepare to issue mbox cmd=0x69. The instance version is corrupted because the top level instance iterated the va_list first. Use va_copy() in the __assign_vstr() macro to make sure that each trace event for each use case gets a fresh va_list. Link: https://lore.kernel.org/all/259d53a5-958e-6508-4e45-74dba2821242@marvell.com/ Link: https://lkml.kernel.org/r/20220719182004.21daa83e@gandalf.local.home Fixes: 0563231f93c6d ("tracing/events: Add __vstring() and __assign_vstr() helper macros") Reported-by: Arun Easi Signed-off-by: Steven Rostedt (Google) --- include/trace/stages/stage6_event_callback.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/trace') diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h index 0f51f6b3ab70..3c554a585320 100644 --- a/include/trace/stages/stage6_event_callback.h +++ b/include/trace/stages/stage6_event_callback.h @@ -40,7 +40,12 @@ #undef __assign_vstr #define __assign_vstr(dst, fmt, va) \ - vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, *(va)) + do { \ + va_list __cp_va; \ + va_copy(__cp_va, *(va)); \ + vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, __cp_va); \ + va_end(__cp_va); \ + } while (0) #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) -- cgit v1.2.3 From 792575348ff70e05c6040d02fce38e949ef92c37 Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Fri, 29 Jul 2022 11:38:43 +0200 Subject: rv/include: Add deterministic automata monitor definition via C macros In Linux terms, the runtime verification monitors are encapsulated inside the "RV monitor" abstraction. The "RV monitor" includes a set of instances of the monitor (per-cpu monitor, per-task monitor, and so on), the helper functions that glue the monitor to the system reference model, and the trace output as a reaction for event parsing and exceptions, as depicted below: Linux +----- RV Monitor ----------------------------------+ Formal Realm | | Realm +-------------------+ +----------------+ +-----------------+ | Linux kernel | | Monitor | | Reference | | Tracing | -> | Instance(s) | <- | Model | | (instrumentation) | | (verification) | | (specification) | +-------------------+ +----------------+ +-----------------+ | | | | V | | +----------+ | | | Reaction | | | +--+--+--+-+ | | | | | | | | | +-> trace output ? | +------------------------|--|----------------------+ | +----> panic ? +-------> Add the rv/da_monitor.h, enabling automatic code generation for the *Monitor Instance(s)* using C macros, and code to support it. The benefits of the usage of macro for monitor synthesis are 3-fold as it: - Reduces the code duplication; - Facilitates the bug fix/improvement; - Avoids the case of developers changing the core of the monitor code to manipulate the model in a (let's say) non-standard way. This initial implementation presents three different types of monitor instances: - DECLARE_DA_MON_GLOBAL(name, type) - DECLARE_DA_MON_PER_CPU(name, type) - DECLARE_DA_MON_PER_TASK(name, type) The first declares the functions for a global deterministic automata monitor, the second for monitors with per-cpu instances, and the third with per-task instances. Link: https://lkml.kernel.org/r/51b0bf425a281e226dfeba7401d2115d6091f84e.1659052063.git.bristot@kernel.org Cc: Wim Van Sebroeck Cc: Guenter Roeck Cc: Jonathan Corbet Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Will Deacon Cc: Catalin Marinas Cc: Marco Elver Cc: Dmitry Vyukov Cc: "Paul E. McKenney" Cc: Shuah Khan Cc: Gabriele Paoloni Cc: Juri Lelli Cc: Clark Williams Cc: Tao Zhou Cc: Randy Dunlap Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Steven Rostedt (Google) --- include/linux/rv.h | 10 + include/rv/da_monitor.h | 541 ++++++++++++++++++++++++++++++++++++++++++++++ include/trace/events/rv.h | 120 ++++++++++ kernel/fork.c | 14 ++ kernel/trace/rv/Kconfig | 11 + kernel/trace/rv/rv.c | 5 + 6 files changed, 701 insertions(+) create mode 100644 include/rv/da_monitor.h create mode 100644 include/trace/events/rv.h (limited to 'include/trace') diff --git a/include/linux/rv.h b/include/linux/rv.h index dcce56987cf7..8883b41d88ec 100644 --- a/include/linux/rv.h +++ b/include/linux/rv.h @@ -7,7 +7,16 @@ #ifndef _LINUX_RV_H #define _LINUX_RV_H +#define MAX_DA_NAME_LEN 24 + #ifdef CONFIG_RV +/* + * Deterministic automaton per-object variables. + */ +struct da_monitor { + bool monitoring; + unsigned int curr_state; +}; /* * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS. @@ -22,6 +31,7 @@ * Futher monitor types are expected, so make this a union. */ union rv_task_monitor { + struct da_monitor da_mon; }; #ifdef CONFIG_RV_REACTORS diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h new file mode 100644 index 000000000000..001bc298289f --- /dev/null +++ b/include/rv/da_monitor.h @@ -0,0 +1,541 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira + * + * Deterministic automata (DA) monitor functions, to be used together + * with automata models in C generated by the dot2k tool. + * + * The dot2k tool is available at tools/verification/dot2k/ + */ + +#include +#include +#include + +#ifdef CONFIG_RV_REACTORS + +#define DECLARE_RV_REACTING_HELPERS(name, type) \ +static char REACT_MSG_##name[1024]; \ + \ +static inline char *format_react_msg_##name(type curr_state, type event) \ +{ \ + snprintf(REACT_MSG_##name, 1024, \ + "rv: monitor %s does not allow event %s on state %s\n", \ + #name, \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(curr_state)); \ + return REACT_MSG_##name; \ +} \ + \ +static void cond_react_##name(char *msg) \ +{ \ + if (rv_##name.react) \ + rv_##name.react(msg); \ +} \ + \ +static bool rv_reacting_on_##name(void) \ +{ \ + return rv_reacting_on(); \ +} + +#else /* CONFIG_RV_REACTOR */ + +#define DECLARE_RV_REACTING_HELPERS(name, type) \ +static inline char *format_react_msg_##name(type curr_state, type event) \ +{ \ + return NULL; \ +} \ + \ +static void cond_react_##name(char *msg) \ +{ \ + return; \ +} \ + \ +static bool rv_reacting_on_##name(void) \ +{ \ + return 0; \ +} +#endif + +/* + * Generic helpers for all types of deterministic automata monitors. + */ +#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ + \ +DECLARE_RV_REACTING_HELPERS(name, type) \ + \ +/* \ + * da_monitor_reset_##name - reset a monitor and setting it to init state \ + */ \ +static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \ +{ \ + da_mon->monitoring = 0; \ + da_mon->curr_state = model_get_initial_state_##name(); \ +} \ + \ +/* \ + * da_monitor_curr_state_##name - return the current state \ + */ \ +static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \ +{ \ + return da_mon->curr_state; \ +} \ + \ +/* \ + * da_monitor_set_state_##name - set the new current state \ + */ \ +static inline void \ +da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \ +{ \ + da_mon->curr_state = state; \ +} \ + \ +/* \ + * da_monitor_start_##name - start monitoring \ + * \ + * The monitor will ignore all events until monitoring is set to true. This \ + * function needs to be called to tell the monitor to start monitoring. \ + */ \ +static inline void da_monitor_start_##name(struct da_monitor *da_mon) \ +{ \ + da_mon->curr_state = model_get_initial_state_##name(); \ + da_mon->monitoring = 1; \ +} \ + \ +/* \ + * da_monitoring_##name - returns true if the monitor is processing events \ + */ \ +static inline bool da_monitoring_##name(struct da_monitor *da_mon) \ +{ \ + return da_mon->monitoring; \ +} \ + \ +/* \ + * da_monitor_enabled_##name - checks if the monitor is enabled \ + */ \ +static inline bool da_monitor_enabled_##name(void) \ +{ \ + /* global switch */ \ + if (unlikely(!rv_monitoring_on())) \ + return 0; \ + \ + /* monitor enabled */ \ + if (unlikely(!rv_##name.enabled)) \ + return 0; \ + \ + return 1; \ +} \ + \ +/* \ + * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \ + */ \ +static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \ +{ \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + /* monitor is actually monitoring */ \ + if (unlikely(!da_monitoring_##name(da_mon))) \ + return 0; \ + \ + return 1; \ +} + +/* + * Event handler for implicit monitors. Implicit monitor is the one which the + * handler does not need to specify which da_monitor to manipulate. Examples + * of implicit monitor are the per_cpu or the global ones. + */ +#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ + \ +static inline bool \ +da_event_##name(struct da_monitor *da_mon, enum events_##name event) \ +{ \ + type curr_state = da_monitor_curr_state_##name(da_mon); \ + type next_state = model_get_next_state_##name(curr_state, event); \ + \ + if (next_state != INVALID_STATE) { \ + da_monitor_set_state_##name(da_mon, next_state); \ + \ + trace_event_##name(model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(next_state), \ + model_is_final_state_##name(next_state)); \ + \ + return true; \ + } \ + \ + if (rv_reacting_on_##name()) \ + cond_react_##name(format_react_msg_##name(curr_state, event)); \ + \ + trace_error_##name(model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event)); \ + \ + return false; \ +} \ + +/* + * Event handler for per_task monitors. + */ +#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ + \ +static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ + enum events_##name event) \ +{ \ + type curr_state = da_monitor_curr_state_##name(da_mon); \ + type next_state = model_get_next_state_##name(curr_state, event); \ + \ + if (next_state != INVALID_STATE) { \ + da_monitor_set_state_##name(da_mon, next_state); \ + \ + trace_event_##name(tsk->pid, \ + model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(next_state), \ + model_is_final_state_##name(next_state)); \ + \ + return true; \ + } \ + \ + if (rv_reacting_on_##name()) \ + cond_react_##name(format_react_msg_##name(curr_state, event)); \ + \ + trace_error_##name(tsk->pid, \ + model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event)); \ + \ + return false; \ +} + +/* + * Functions to define, init and get a global monitor. + */ +#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \ + \ +/* \ + * global monitor (a single variable) \ + */ \ +static struct da_monitor da_mon_##name; \ + \ +/* \ + * da_get_monitor_##name - return the global monitor address \ + */ \ +static struct da_monitor *da_get_monitor_##name(void) \ +{ \ + return &da_mon_##name; \ +} \ + \ +/* \ + * da_monitor_reset_all_##name - reset the single monitor \ + */ \ +static void da_monitor_reset_all_##name(void) \ +{ \ + da_monitor_reset_##name(da_get_monitor_##name()); \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize a monitor \ + */ \ +static inline int da_monitor_init_##name(void) \ +{ \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - destroy the monitor \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + return; \ +} + +/* + * Functions to define, init and get a per-cpu monitor. + */ +#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \ + \ +/* \ + * per-cpu monitor variables \ + */ \ +DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \ + \ +/* \ + * da_get_monitor_##name - return current CPU monitor address \ + */ \ +static struct da_monitor *da_get_monitor_##name(void) \ +{ \ + return this_cpu_ptr(&da_mon_##name); \ +} \ + \ +/* \ + * da_monitor_reset_all_##name - reset all CPUs' monitor \ + */ \ +static void da_monitor_reset_all_##name(void) \ +{ \ + struct da_monitor *da_mon; \ + int cpu; \ + for_each_cpu(cpu, cpu_online_mask) { \ + da_mon = per_cpu_ptr(&da_mon_##name, cpu); \ + da_monitor_reset_##name(da_mon); \ + } \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize all CPUs' monitor \ + */ \ +static inline int da_monitor_init_##name(void) \ +{ \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - destroy the monitor \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + return; \ +} + +/* + * Functions to define, init and get a per-task monitor. + */ +#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \ + \ +/* \ + * The per-task monitor is stored a vector in the task struct. This variable \ + * stores the position on the vector reserved for this monitor. \ + */ \ +static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ + \ +/* \ + * da_get_monitor_##name - return the monitor in the allocated slot for tsk \ + */ \ +static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \ +{ \ + return &tsk->rv[task_mon_slot_##name].da_mon; \ +} \ + \ +static void da_monitor_reset_all_##name(void) \ +{ \ + struct task_struct *g, *p; \ + \ + read_lock(&tasklist_lock); \ + for_each_process_thread(g, p) \ + da_monitor_reset_##name(da_get_monitor_##name(p)); \ + read_unlock(&tasklist_lock); \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize the per-task monitor \ + * \ + * Try to allocate a slot in the task's vector of monitors. If there \ + * is an available slot, use it and reset all task's monitor. \ + */ \ +static int da_monitor_init_##name(void) \ +{ \ + int slot; \ + \ + slot = rv_get_task_monitor_slot(); \ + if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \ + return slot; \ + \ + task_mon_slot_##name = slot; \ + \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - return the allocated slot \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \ + WARN_ONCE(1, "Disabling a disabled monitor: " #name); \ + return; \ + } \ + rv_put_task_monitor_slot(task_mon_slot_##name); \ + task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ + return; \ +} + +/* + * Handle event for implicit monitor: da_get_monitor_##name() will figure out + * the monitor. + */ +#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \ + \ +static inline void __da_handle_event_##name(struct da_monitor *da_mon, \ + enum events_##name event) \ +{ \ + bool retval; \ + \ + retval = da_event_##name(da_mon, event); \ + if (!retval) \ + da_monitor_reset_##name(da_mon); \ +} \ + \ +/* \ + * da_handle_event_##name - handle an event \ + */ \ +static inline void da_handle_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon = da_get_monitor_##name(); \ + bool retval; \ + \ + retval = da_monitor_handling_event_##name(da_mon); \ + if (!retval) \ + return; \ + \ + __da_handle_event_##name(da_mon, event); \ +} \ + \ +/* \ + * da_handle_start_event_##name - start monitoring or handle event \ + * \ + * This function is used to notify the monitor that the system is returning \ + * to the initial state, so the monitor can start monitoring in the next event. \ + * Thus: \ + * \ + * If the monitor already started, handle the event. \ + * If the monitor did not start yet, start the monitor but skip the event. \ + */ \ +static inline bool da_handle_start_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) { \ + da_monitor_start_##name(da_mon); \ + return 0; \ + } \ + \ + __da_handle_event_##name(da_mon, event); \ + \ + return 1; \ +} \ + \ +/* \ + * da_handle_start_run_event_##name - start monitoring and handle event \ + * \ + * This function is used to notify the monitor that the system is in the \ + * initial state, so the monitor can start monitoring and handling event. \ + */ \ +static inline bool da_handle_start_run_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) \ + da_monitor_start_##name(da_mon); \ + \ + __da_handle_event_##name(da_mon, event); \ + \ + return 1; \ +} + +/* + * Handle event for per task. + */ +#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \ + \ +static inline void \ +__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ + enum events_##name event) \ +{ \ + bool retval; \ + \ + retval = da_event_##name(da_mon, tsk, event); \ + if (!retval) \ + da_monitor_reset_##name(da_mon); \ +} \ + \ +/* \ + * da_handle_event_##name - handle an event \ + */ \ +static inline void \ +da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \ +{ \ + struct da_monitor *da_mon = da_get_monitor_##name(tsk); \ + bool retval; \ + \ + retval = da_monitor_handling_event_##name(da_mon); \ + if (!retval) \ + return; \ + \ + __da_handle_event_##name(da_mon, tsk, event); \ +} \ + \ +/* \ + * da_handle_start_event_##name - start monitoring or handle event \ + * \ + * This function is used to notify the monitor that the system is returning \ + * to the initial state, so the monitor can start monitoring in the next event. \ + * Thus: \ + * \ + * If the monitor already started, handle the event. \ + * If the monitor did not start yet, start the monitor but skip the event. \ + */ \ +static inline bool \ +da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(tsk); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) { \ + da_monitor_start_##name(da_mon); \ + return 0; \ + } \ + \ + __da_handle_event_##name(da_mon, tsk, event); \ + \ + return 1; \ +} + +/* + * Entry point for the global monitor. + */ +#define DECLARE_DA_MON_GLOBAL(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ +DECLARE_DA_MON_INIT_GLOBAL(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) + +/* + * Entry point for the per-cpu monitor. + */ +#define DECLARE_DA_MON_PER_CPU(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ +DECLARE_DA_MON_INIT_PER_CPU(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) + +/* + * Entry point for the per-task monitor. + */ +#define DECLARE_DA_MON_PER_TASK(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ +DECLARE_DA_MON_INIT_PER_TASK(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h new file mode 100644 index 000000000000..20a2e09c6416 --- /dev/null +++ b/include/trace/events/rv.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rv + +#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RV_H + +#include +#include + +#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT +DECLARE_EVENT_CLASS(event_da_monitor, + + TP_PROTO(char *state, char *event, char *next_state, bool final_state), + + TP_ARGS(state, event, next_state, final_state), + + TP_STRUCT__entry( + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + __array( char, next_state, MAX_DA_NAME_LEN ) + __field( bool, final_state ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); + __entry->final_state = final_state; + ), + + TP_printk("%s x %s -> %s %s", + __entry->state, + __entry->event, + __entry->next_state, + __entry->final_state ? "(final)" : "") +); + +DECLARE_EVENT_CLASS(error_da_monitor, + + TP_PROTO(char *state, char *event), + + TP_ARGS(state, event), + + TP_STRUCT__entry( + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + ), + + TP_printk("event %s not expected in the state %s", + __entry->event, + __entry->state) +); +#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */ + +#ifdef CONFIG_DA_MON_EVENTS_ID +DECLARE_EVENT_CLASS(event_da_monitor_id, + + TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), + + TP_ARGS(id, state, event, next_state, final_state), + + TP_STRUCT__entry( + __field( int, id ) + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + __array( char, next_state, MAX_DA_NAME_LEN ) + __field( bool, final_state ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); + __entry->id = id; + __entry->final_state = final_state; + ), + + TP_printk("%d: %s x %s -> %s %s", + __entry->id, + __entry->state, + __entry->event, + __entry->next_state, + __entry->final_state ? "(final)" : "") +); + +DECLARE_EVENT_CLASS(error_da_monitor_id, + + TP_PROTO(int id, char *state, char *event), + + TP_ARGS(id, state, event), + + TP_STRUCT__entry( + __field( int, id ) + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + __entry->id = id; + ), + + TP_printk("%d: event %s not expected in the state %s", + __entry->id, + __entry->event, + __entry->state) +); +#endif /* CONFIG_DA_MON_EVENTS_ID */ +#endif /* _TRACE_RV_H */ + +/* This part ust be outside protection */ +#undef TRACE_INCLUDE_PATH +#include diff --git a/kernel/fork.c b/kernel/fork.c index 9d44f2d46c69..6f1f82ccd5f2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1964,6 +1964,18 @@ static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) mutex_unlock(&oom_adj_mutex); } +#ifdef CONFIG_RV +static void rv_task_fork(struct task_struct *p) +{ + int i; + + for (i = 0; i < RV_PER_TASK_MONITORS; i++) + p->rv[i].da_mon.monitoring = false; +} +#else +#define rv_task_fork(p) do {} while (0) +#endif + /* * This creates a new process as a copy of the old one, * but does not actually start it yet. @@ -2399,6 +2411,8 @@ static __latent_entropy struct task_struct *copy_process( */ copy_seccomp(p); + rv_task_fork(p); + rseq_fork(p, clone_flags); /* Don't start children in a dying pid namespace */ diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig index 3eb5d48ab4f6..8714800e22ad 100644 --- a/kernel/trace/rv/Kconfig +++ b/kernel/trace/rv/Kconfig @@ -1,5 +1,16 @@ # SPDX-License-Identifier: GPL-2.0-only # +config DA_MON_EVENTS + bool + +config DA_MON_EVENTS_IMPLICIT + select DA_MON_EVENTS + bool + +config DA_MON_EVENTS_ID + select DA_MON_EVENTS + bool + menuconfig RV bool "Runtime Verification" depends on TRACING diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c index 45cf64eb2600..df6678c86334 100644 --- a/kernel/trace/rv/rv.c +++ b/kernel/trace/rv/rv.c @@ -140,6 +140,11 @@ #include #include +#ifdef CONFIG_DA_MON_EVENTS +#define CREATE_TRACE_POINTS +#include +#endif + #include "rv.h" DEFINE_MUTEX(rv_interface_lock); -- cgit v1.2.3 From 10bde81c74863472047f31304064018c40f488ee Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Fri, 29 Jul 2022 11:38:52 +0200 Subject: rv/monitor: Add the wip monitor The wakeup in preemptive (wip) monitor verifies if the wakeup events always take place with preemption disabled: | | v #==================# H preemptive H <+ #==================# | | | | preempt_disable | preempt_enable v | sched_waking +------------------+ | +--------------- | | | | | non_preemptive | | +--------------> | | -+ +------------------+ The wakeup event always takes place with preemption disabled because of the scheduler synchronization. However, because the preempt_count and its trace event are not atomic with regard to interrupts, some inconsistencies might happen. The documentation illustrates one of these cases. Link: https://lkml.kernel.org/r/c98ca678df81115fddc04921b3c79720c836b18f.1659052063.git.bristot@kernel.org Cc: Wim Van Sebroeck Cc: Guenter Roeck Cc: Jonathan Corbet Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Will Deacon Cc: Catalin Marinas Cc: Marco Elver Cc: Dmitry Vyukov Cc: "Paul E. McKenney" Cc: Shuah Khan Cc: Gabriele Paoloni Cc: Juri Lelli Cc: Clark Williams Cc: Tao Zhou Cc: Randy Dunlap Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Steven Rostedt (Google) --- Documentation/trace/rv/index.rst | 1 + Documentation/trace/rv/monitor_wip.rst | 55 ++++++++++++++++++++++++++++++++++ include/trace/events/rv.h | 10 +++++++ kernel/trace/rv/Kconfig | 13 ++++++++ kernel/trace/rv/Makefile | 1 + kernel/trace/rv/monitors/wip/wip.c | 51 ++++++++++--------------------- tools/verification/models/wip.dot | 16 ++++++++++ 7 files changed, 111 insertions(+), 36 deletions(-) create mode 100644 Documentation/trace/rv/monitor_wip.rst create mode 100644 tools/verification/models/wip.dot (limited to 'include/trace') diff --git a/Documentation/trace/rv/index.rst b/Documentation/trace/rv/index.rst index db2ae3f90b90..4cb71ed628b8 100644 --- a/Documentation/trace/rv/index.rst +++ b/Documentation/trace/rv/index.rst @@ -10,3 +10,4 @@ Runtime Verification deterministic_automata.rst da_monitor_synthesis.rst da_monitor_instrumentation.rst + monitor_wip.rst diff --git a/Documentation/trace/rv/monitor_wip.rst b/Documentation/trace/rv/monitor_wip.rst new file mode 100644 index 000000000000..a95763438c48 --- /dev/null +++ b/Documentation/trace/rv/monitor_wip.rst @@ -0,0 +1,55 @@ +Monitor wip +=========== + +- Name: wip - wakeup in preemptive +- Type: per-cpu deterministic automaton +- Author: Daniel Bristot de Oliveira + +Description +----------- + +The wakeup in preemptive (wip) monitor is a sample per-cpu monitor +that verifies if the wakeup events always take place with +preemption disabled:: + + | + | + v + #==================# + H preemptive H <+ + #==================# | + | | + | preempt_disable | preempt_enable + v | + sched_waking +------------------+ | + +--------------- | | | + | | non_preemptive | | + +--------------> | | -+ + +------------------+ + +The wakeup event always takes place with preemption disabled because +of the scheduler synchronization. However, because the preempt_count +and its trace event are not atomic with regard to interrupts, some +inconsistencies might happen. For example:: + + preempt_disable() { + __preempt_count_add(1) + -------> smp_apic_timer_interrupt() { + preempt_disable() + do not trace (preempt count >= 1) + + wake up a thread + + preempt_enable() + do not trace (preempt count >= 1) + } + <------ + trace_preempt_disable(); + } + +This problem was reported and discussed here: + https://lore.kernel.org/r/cover.1559051152.git.bristot@redhat.com/ + +Specification +------------- +Grapviz Dot file in tools/verification/models/wip.dot diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h index 20a2e09c6416..e972f27d8df3 100644 --- a/include/trace/events/rv.h +++ b/include/trace/events/rv.h @@ -56,6 +56,16 @@ DECLARE_EVENT_CLASS(error_da_monitor, __entry->event, __entry->state) ); + +#ifdef CONFIG_RV_MON_WIP +DEFINE_EVENT(event_da_monitor, event_wip, + TP_PROTO(char *state, char *event, char *next_state, bool final_state), + TP_ARGS(state, event, next_state, final_state)); + +DEFINE_EVENT(error_da_monitor, error_wip, + TP_PROTO(char *state, char *event), + TP_ARGS(state, event)); +#endif /* CONFIG_RV_MON_WIP */ #endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */ #ifdef CONFIG_DA_MON_EVENTS_ID diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig index 0d9552b406c6..e50f3346164a 100644 --- a/kernel/trace/rv/Kconfig +++ b/kernel/trace/rv/Kconfig @@ -25,6 +25,19 @@ menuconfig RV For further information, see: Documentation/trace/rv/runtime-verification.rst +config RV_MON_WIP + depends on RV + depends on PREEMPT_TRACER + select DA_MON_EVENTS_IMPLICIT + bool "wip monitor" + help + Enable wip (wakeup in preemptive) sample monitor that illustrates + the usage of per-cpu monitors, and one limitation of the + preempt_disable/enable events. + + For further information, see: + Documentation/trace/rv/monitor_wip.rst + config RV_REACTORS bool "Runtime verification reactors" default y diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile index 8944274d9b41..b41109d2750a 100644 --- a/kernel/trace/rv/Makefile +++ b/kernel/trace/rv/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_RV) += rv.o obj-$(CONFIG_RV_REACTORS) += rv_reactors.o +obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o diff --git a/kernel/trace/rv/monitors/wip/wip.c b/kernel/trace/rv/monitors/wip/wip.c index 79a054ca0cde..83cace53b9fa 100644 --- a/kernel/trace/rv/monitors/wip/wip.c +++ b/kernel/trace/rv/monitors/wip/wip.c @@ -10,44 +10,26 @@ #define MODULE_NAME "wip" -/* - * XXX: include required tracepoint headers, e.g., - * #include - */ #include +#include +#include -/* - * This is the self-generated part of the monitor. Generally, there is no need - * to touch this section. - */ #include "wip.h" -/* - * Declare the deterministic automata monitor. - * - * The rv monitor reference is needed for the monitor declaration. - */ struct rv_monitor rv_wip; DECLARE_DA_MON_PER_CPU(wip, unsigned char); -/* - * This is the instrumentation part of the monitor. - * - * This is the section where manual work is required. Here the kernel events - * are translated into model's event. - * - */ -static void handle_preempt_disable(void *data, /* XXX: fill header */) +static void handle_preempt_disable(void *data, unsigned long ip, unsigned long parent_ip) { da_handle_event_wip(preempt_disable_wip); } -static void handle_preempt_enable(void *data, /* XXX: fill header */) +static void handle_preempt_enable(void *data, unsigned long ip, unsigned long parent_ip) { - da_handle_event_wip(preempt_enable_wip); + da_handle_start_event_wip(preempt_enable_wip); } -static void handle_sched_waking(void *data, /* XXX: fill header */) +static void handle_sched_waking(void *data, struct task_struct *task) { da_handle_event_wip(sched_waking_wip); } @@ -60,9 +42,9 @@ static int enable_wip(void) if (retval) return retval; - rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_disable); - rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_enable); - rv_attach_trace_probe("wip", /* XXX: tracepoint */, handle_sched_waking); + rv_attach_trace_probe("wip", preempt_enable, handle_preempt_enable); + rv_attach_trace_probe("wip", sched_waking, handle_sched_waking); + rv_attach_trace_probe("wip", preempt_disable, handle_preempt_disable); return 0; } @@ -71,19 +53,16 @@ static void disable_wip(void) { rv_wip.enabled = 0; - rv_detach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_disable); - rv_detach_trace_probe("wip", /* XXX: tracepoint */, handle_preempt_enable); - rv_detach_trace_probe("wip", /* XXX: tracepoint */, handle_sched_waking); + rv_detach_trace_probe("wip", preempt_disable, handle_preempt_disable); + rv_detach_trace_probe("wip", preempt_enable, handle_preempt_enable); + rv_detach_trace_probe("wip", sched_waking, handle_sched_waking); da_monitor_destroy_wip(); } -/* - * This is the monitor register section. - */ struct rv_monitor rv_wip = { .name = "wip", - .description = "auto-generated wip", + .description = "wakeup in preemptive per-cpu testing monitor.", .enable = enable_wip, .disable = disable_wip, .reset = da_monitor_reset_all_wip, @@ -105,5 +84,5 @@ module_init(register_wip); module_exit(unregister_wip); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("dot2k: auto-generated"); -MODULE_DESCRIPTION("wip"); +MODULE_AUTHOR("Daniel Bristot de Oliveira "); +MODULE_DESCRIPTION("wip: wakeup in preemptive - per-cpu sample monitor."); diff --git a/tools/verification/models/wip.dot b/tools/verification/models/wip.dot new file mode 100644 index 000000000000..2a53a9700a89 --- /dev/null +++ b/tools/verification/models/wip.dot @@ -0,0 +1,16 @@ +digraph state_automaton { + {node [shape = circle] "non_preemptive"}; + {node [shape = plaintext, style=invis, label=""] "__init_preemptive"}; + {node [shape = doublecircle] "preemptive"}; + {node [shape = circle] "preemptive"}; + "__init_preemptive" -> "preemptive"; + "non_preemptive" [label = "non_preemptive"]; + "non_preemptive" -> "non_preemptive" [ label = "sched_waking" ]; + "non_preemptive" -> "preemptive" [ label = "preempt_enable" ]; + "preemptive" [label = "preemptive"]; + "preemptive" -> "non_preemptive" [ label = "preempt_disable" ]; + { rank = min ; + "__init_preemptive"; + "preemptive"; + } +} -- cgit v1.2.3 From ccc319dcb450d57b7befe924453d06804d83ba73 Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Fri, 29 Jul 2022 11:38:53 +0200 Subject: rv/monitor: Add the wwnr monitor Per task wakeup while not running (wwnr) monitor. This model is broken, the reason is that a task can be running in the processor without being set as RUNNABLE. Think about a task about to sleep: 1: set_current_state(TASK_UNINTERRUPTIBLE); 2: schedule(); And then imagine an IRQ happening in between the lines one and two, waking the task up. BOOM, the wakeup will happen while the task is running. Q: Why do we need this model, so? A: To test the reactors. Link: https://lkml.kernel.org/r/473c0fc39967250fdebcff8b620311c11dccad30.1659052063.git.bristot@kernel.org Cc: Wim Van Sebroeck Cc: Guenter Roeck Cc: Jonathan Corbet Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Will Deacon Cc: Catalin Marinas Cc: Marco Elver Cc: Dmitry Vyukov Cc: "Paul E. McKenney" Cc: Shuah Khan Cc: Gabriele Paoloni Cc: Juri Lelli Cc: Clark Williams Cc: Tao Zhou Cc: Randy Dunlap Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Steven Rostedt (Google) --- Documentation/trace/rv/index.rst | 1 + Documentation/trace/rv/monitor_wwnr.rst | 45 +++++++++++++++++ include/trace/events/rv.h | 12 +++++ kernel/trace/rv/Kconfig | 12 +++++ kernel/trace/rv/Makefile | 1 + kernel/trace/rv/monitors/wwnr/wwnr.c | 87 +++++++++++++++++++++++++++++++++ kernel/trace/rv/monitors/wwnr/wwnr.h | 46 +++++++++++++++++ tools/verification/models/wwnr.dot | 16 ++++++ 8 files changed, 220 insertions(+) create mode 100644 Documentation/trace/rv/monitor_wwnr.rst create mode 100644 kernel/trace/rv/monitors/wwnr/wwnr.c create mode 100644 kernel/trace/rv/monitors/wwnr/wwnr.h create mode 100644 tools/verification/models/wwnr.dot (limited to 'include/trace') diff --git a/Documentation/trace/rv/index.rst b/Documentation/trace/rv/index.rst index 4cb71ed628b8..15fa966102c0 100644 --- a/Documentation/trace/rv/index.rst +++ b/Documentation/trace/rv/index.rst @@ -11,3 +11,4 @@ Runtime Verification da_monitor_synthesis.rst da_monitor_instrumentation.rst monitor_wip.rst + monitor_wwnr.rst diff --git a/Documentation/trace/rv/monitor_wwnr.rst b/Documentation/trace/rv/monitor_wwnr.rst new file mode 100644 index 000000000000..80f1777b85aa --- /dev/null +++ b/Documentation/trace/rv/monitor_wwnr.rst @@ -0,0 +1,45 @@ +Monitor wwnr +============ + +- Name: wwrn - wakeup while not running +- Type: per-task deterministic automaton +- Author: Daniel Bristot de Oliveira + +Description +----------- + +This is a per-task sample monitor, with the following +definition:: + + | + | + v + wakeup +-------------+ + +--------- | | + | | not_running | + +--------> | | <+ + +-------------+ | + | | + | switch_in | switch_out + v | + +-------------+ | + | running | -+ + +-------------+ + +This model is borken, the reason is that a task can be running +in the processor without being set as RUNNABLE. Think about a +task about to sleep:: + + 1: set_current_state(TASK_UNINTERRUPTIBLE); + 2: schedule(); + +And then imagine an IRQ happening in between the lines one and two, +waking the task up. BOOM, the wakeup will happen while the task is +running. + +- Why do we need this model, so? +- To test the reactors. + +Specification +------------- +Grapviz Dot file in tools/verification/models/wwnr.dot diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h index e972f27d8df3..56592da9301c 100644 --- a/include/trace/events/rv.h +++ b/include/trace/events/rv.h @@ -122,6 +122,18 @@ DECLARE_EVENT_CLASS(error_da_monitor_id, __entry->event, __entry->state) ); + +#ifdef CONFIG_RV_MON_WWNR +/* id is the pid of the task */ +DEFINE_EVENT(event_da_monitor_id, event_wwnr, + TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), + TP_ARGS(id, state, event, next_state, final_state)); + +DEFINE_EVENT(error_da_monitor_id, error_wwnr, + TP_PROTO(int id, char *state, char *event), + TP_ARGS(id, state, event)); +#endif /* CONFIG_RV_MON_WWNR */ + #endif /* CONFIG_DA_MON_EVENTS_ID */ #endif /* _TRACE_RV_H */ diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig index e50f3346164a..b259d6e8dc7c 100644 --- a/kernel/trace/rv/Kconfig +++ b/kernel/trace/rv/Kconfig @@ -38,6 +38,18 @@ config RV_MON_WIP For further information, see: Documentation/trace/rv/monitor_wip.rst +config RV_MON_WWNR + depends on RV + select DA_MON_EVENTS_ID + bool "wwnr monitor" + help + Enable wwnr (wakeup while not running) sample monitor, this is a + sample monitor that illustrates the usage of per-task monitor. + The model is borken on purpose: it serves to test reactors. + + For further information, see: + Documentation/trace/rv/monitor_wwnr.rst + config RV_REACTORS bool "Runtime verification reactors" default y diff --git a/kernel/trace/rv/Makefile b/kernel/trace/rv/Makefile index b41109d2750a..af0ff9a46418 100644 --- a/kernel/trace/rv/Makefile +++ b/kernel/trace/rv/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_RV) += rv.o obj-$(CONFIG_RV_REACTORS) += rv_reactors.o obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o +obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.c b/kernel/trace/rv/monitors/wwnr/wwnr.c new file mode 100644 index 000000000000..599225d9cf38 --- /dev/null +++ b/kernel/trace/rv/monitors/wwnr/wwnr.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "wwnr" + +#include +#include + +#include "wwnr.h" + +struct rv_monitor rv_wwnr; +DECLARE_DA_MON_PER_TASK(wwnr, unsigned char); + +static void handle_switch(void *data, bool preempt, struct task_struct *p, + struct task_struct *n, unsigned int prev_state) +{ + /* start monitoring only after the first suspension */ + if (prev_state == TASK_INTERRUPTIBLE) + da_handle_start_event_wwnr(p, switch_out_wwnr); + else + da_handle_event_wwnr(p, switch_out_wwnr); + + da_handle_event_wwnr(n, switch_in_wwnr); +} + +static void handle_wakeup(void *data, struct task_struct *p) +{ + da_handle_event_wwnr(p, wakeup_wwnr); +} + +static int enable_wwnr(void) +{ + int retval; + + retval = da_monitor_init_wwnr(); + if (retval) + return retval; + + rv_attach_trace_probe("wwnr", sched_switch, handle_switch); + rv_attach_trace_probe("wwnr", sched_wakeup, handle_wakeup); + + return 0; +} + +static void disable_wwnr(void) +{ + rv_wwnr.enabled = 0; + + rv_detach_trace_probe("wwnr", sched_switch, handle_switch); + rv_detach_trace_probe("wwnr", sched_wakeup, handle_wakeup); + + da_monitor_destroy_wwnr(); +} + +struct rv_monitor rv_wwnr = { + .name = "wwnr", + .description = "wakeup while not running per-task testing model.", + .enable = enable_wwnr, + .disable = disable_wwnr, + .reset = da_monitor_reset_all_wwnr, + .enabled = 0, +}; + +static int register_wwnr(void) +{ + rv_register_monitor(&rv_wwnr); + return 0; +} + +static void unregister_wwnr(void) +{ + rv_unregister_monitor(&rv_wwnr); +} + +module_init(register_wwnr); +module_exit(unregister_wwnr); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Daniel Bristot de Oliveira "); +MODULE_DESCRIPTION("wwnr: wakeup while not running monitor"); diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.h b/kernel/trace/rv/monitors/wwnr/wwnr.h new file mode 100644 index 000000000000..d1afe55cdd4c --- /dev/null +++ b/kernel/trace/rv/monitors/wwnr/wwnr.h @@ -0,0 +1,46 @@ +/* + * Automatically generated C representation of wwnr automaton + * For further information about this format, see kernel documentation: + * Documentation/trace/rv/deterministic_automata.rst + */ + +enum states_wwnr { + not_running_wwnr = 0, + running_wwnr, + state_max_wwnr +}; + +#define INVALID_STATE state_max_wwnr + +enum events_wwnr { + switch_in_wwnr = 0, + switch_out_wwnr, + wakeup_wwnr, + event_max_wwnr +}; + +struct automaton_wwnr { + char *state_names[state_max_wwnr]; + char *event_names[event_max_wwnr]; + unsigned char function[state_max_wwnr][event_max_wwnr]; + unsigned char initial_state; + bool final_states[state_max_wwnr]; +}; + +struct automaton_wwnr automaton_wwnr = { + .state_names = { + "not_running", + "running" + }, + .event_names = { + "switch_in", + "switch_out", + "wakeup" + }, + .function = { + { running_wwnr, INVALID_STATE, not_running_wwnr }, + { INVALID_STATE, not_running_wwnr, INVALID_STATE }, + }, + .initial_state = not_running_wwnr, + .final_states = { 1, 0 }, +}; diff --git a/tools/verification/models/wwnr.dot b/tools/verification/models/wwnr.dot new file mode 100644 index 000000000000..1b206e83129c --- /dev/null +++ b/tools/verification/models/wwnr.dot @@ -0,0 +1,16 @@ +digraph state_automaton { + {node [shape = plaintext, style=invis, label=""] "__init_not_running"}; + {node [shape = ellipse] "not_running"}; + {node [shape = plaintext] "not_running"}; + {node [shape = plaintext] "running"}; + "__init_not_running" -> "not_running"; + "not_running" [label = "not_running", color = green3]; + "not_running" -> "not_running" [ label = "wakeup" ]; + "not_running" -> "running" [ label = "switch_in" ]; + "running" [label = "running"]; + "running" -> "not_running" [ label = "switch_out" ]; + { rank = min ; + "__init_not_running"; + "not_running"; + } +} -- cgit v1.2.3 From 4c3d2f9388d36eb28640a220a6f908328442d873 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Sun, 31 Jul 2022 01:59:28 -0400 Subject: tracing: Use a struct alignof to determine trace event field alignment alignof() gives an alignment of types as they would be as standalone variables. But alignment in structures might be different, and when building the fields of events, the alignment must be the actual alignment otherwise the field offsets may not match what they actually are. This caused trace-cmd to crash, as libtraceevent did not check if the field offset was bigger than the event. The write_msr and read_msr events on 32 bit had their fields incorrect, because it had a u64 field between two ints. alignof(u64) would give 8, but the u64 field was at a 4 byte alignment. Define a macro as: ALIGN_STRUCTFIELD(type) ((int)(offsetof(struct {char a; type b;}, b))) which gives the actual alignment of types in a structure. Link: https://lkml.kernel.org/r/20220731015928.7ab3a154@rorschach.local.home Cc: Ingo Molnar Cc: Andrew Morton Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Masami Hiramatsu Cc: stable@vger.kernel.org Fixes: 04ae87a52074e ("ftrace: Rework event_create_dir()") Signed-off-by: Steven Rostedt (Google) --- include/trace/stages/stage4_event_fields.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/trace') diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h index c3790ec7a453..80d34f396555 100644 --- a/include/trace/stages/stage4_event_fields.h +++ b/include/trace/stages/stage4_event_fields.h @@ -2,16 +2,18 @@ /* Stage 4 definitions for creating trace events */ +#define ALIGN_STRUCTFIELD(type) ((int)(offsetof(struct {char a; type b;}, b))) + #undef __field_ext #define __field_ext(_type, _item, _filter_type) { \ .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ + .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \ .is_signed = is_signed_type(_type), .filter_type = _filter_type }, #undef __field_struct_ext #define __field_struct_ext(_type, _item, _filter_type) { \ .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ + .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \ 0, .filter_type = _filter_type }, #undef __field @@ -23,7 +25,7 @@ #undef __array #define __array(_type, _item, _len) { \ .type = #_type"["__stringify(_len)"]", .name = #_item, \ - .size = sizeof(_type[_len]), .align = __alignof__(_type), \ + .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, #undef __dynamic_array -- cgit v1.2.3 From 09794a5a6c348f629b35fc1687071a1622ef4265 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 2 Aug 2022 15:44:12 -0400 Subject: tracing: Use alignof__(struct {type b;}) instead of offsetof() Simplify: #define ALIGN_STRUCTFIELD(type) ((int)(offsetof(struct {char a; type b;}, b))) with #define ALIGN_STRUCTFIELD(type) __alignof__(struct {type b;}) Which works just the same. Link: https://lore.kernel.org/all/a7d202457150472588df0bd3b7334b3f@AcuMS.aculab.com/ Link: https://lkml.kernel.org/r/20220802154412.513c50e3@gandalf.local.home Suggested-by: David Laight Signed-off-by: Steven Rostedt (Google) --- include/trace/stages/stage4_event_fields.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/trace') diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h index 80d34f396555..a8fb25f39a99 100644 --- a/include/trace/stages/stage4_event_fields.h +++ b/include/trace/stages/stage4_event_fields.h @@ -2,7 +2,7 @@ /* Stage 4 definitions for creating trace events */ -#define ALIGN_STRUCTFIELD(type) ((int)(offsetof(struct {char a; type b;}, b))) +#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;}))) #undef __field_ext #define __field_ext(_type, _item, _filter_type) { \ -- cgit v1.2.3