summaryrefslogtreecommitdiff
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/bcache.h381
-rw-r--r--include/trace/events/context_tracking.h58
-rw-r--r--include/trace/events/ext4.h29
-rw-r--r--include/trace/events/power.h37
-rw-r--r--include/trace/events/rcu.h82
-rw-r--r--include/trace/events/sched.h22
6 files changed, 436 insertions, 173 deletions
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 3cc5a0b278c3..5ebda976ea93 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -9,9 +9,7 @@
struct search;
DECLARE_EVENT_CLASS(bcache_request,
-
TP_PROTO(struct search *s, struct bio *bio),
-
TP_ARGS(s, bio),
TP_STRUCT__entry(
@@ -22,7 +20,6 @@ DECLARE_EVENT_CLASS(bcache_request,
__field(dev_t, orig_sector )
__field(unsigned int, nr_sector )
__array(char, rwbs, 6 )
- __array(char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
@@ -33,36 +30,66 @@ DECLARE_EVENT_CLASS(bcache_request,
__entry->orig_sector = bio->bi_sector - 16;
__entry->nr_sector = bio->bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
- TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)",
+ TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm,
- __entry->orig_major, __entry->orig_minor,
+ __entry->rwbs, (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
(unsigned long long)__entry->orig_sector)
);
-DEFINE_EVENT(bcache_request, bcache_request_start,
+DECLARE_EVENT_CLASS(bkey,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k),
- TP_PROTO(struct search *s, struct bio *bio),
+ TP_STRUCT__entry(
+ __field(u32, size )
+ __field(u32, inode )
+ __field(u64, offset )
+ __field(bool, dirty )
+ ),
- TP_ARGS(s, bio)
+ TP_fast_assign(
+ __entry->inode = KEY_INODE(k);
+ __entry->offset = KEY_OFFSET(k);
+ __entry->size = KEY_SIZE(k);
+ __entry->dirty = KEY_DIRTY(k);
+ ),
+
+ TP_printk("%u:%llu len %u dirty %u", __entry->inode,
+ __entry->offset, __entry->size, __entry->dirty)
);
-DEFINE_EVENT(bcache_request, bcache_request_end,
+DECLARE_EVENT_CLASS(btree_node,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b),
+
+ TP_STRUCT__entry(
+ __field(size_t, bucket )
+ ),
+ TP_fast_assign(
+ __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
+ ),
+
+ TP_printk("bucket %zu", __entry->bucket)
+);
+
+/* request.c */
+
+DEFINE_EVENT(bcache_request, bcache_request_start,
TP_PROTO(struct search *s, struct bio *bio),
+ TP_ARGS(s, bio)
+);
+DEFINE_EVENT(bcache_request, bcache_request_end,
+ TP_PROTO(struct search *s, struct bio *bio),
TP_ARGS(s, bio)
);
DECLARE_EVENT_CLASS(bcache_bio,
-
TP_PROTO(struct bio *bio),
-
TP_ARGS(bio),
TP_STRUCT__entry(
@@ -70,7 +97,6 @@ DECLARE_EVENT_CLASS(bcache_bio,
__field(sector_t, sector )
__field(unsigned int, nr_sector )
__array(char, rwbs, 6 )
- __array(char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
@@ -78,191 +104,328 @@ DECLARE_EVENT_CLASS(bcache_bio,
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
- TP_printk("%d,%d %s %llu + %u [%s]",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ TP_printk("%d,%d %s %llu + %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector, __entry->nr_sector)
);
-
-DEFINE_EVENT(bcache_bio, bcache_passthrough,
-
+DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
+ TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bcache_bio, bcache_cache_hit,
+TRACE_EVENT(bcache_read,
+ TP_PROTO(struct bio *bio, bool hit, bool bypass),
+ TP_ARGS(bio, hit, bypass),
- TP_PROTO(struct bio *bio),
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __field(bool, cache_hit )
+ __field(bool, bypass )
+ ),
- TP_ARGS(bio)
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ __entry->cache_hit = hit;
+ __entry->bypass = bypass;
+ ),
+
+ TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->cache_hit, __entry->bypass)
);
-DEFINE_EVENT(bcache_bio, bcache_cache_miss,
+TRACE_EVENT(bcache_write,
+ TP_PROTO(struct bio *bio, bool writeback, bool bypass),
+ TP_ARGS(bio, writeback, bypass),
- TP_PROTO(struct bio *bio),
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __field(bool, writeback )
+ __field(bool, bypass )
+ ),
- TP_ARGS(bio)
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ __entry->writeback = writeback;
+ __entry->bypass = bypass;
+ ),
+
+ TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->writeback, __entry->bypass)
);
DEFINE_EVENT(bcache_bio, bcache_read_retry,
-
TP_PROTO(struct bio *bio),
-
TP_ARGS(bio)
);
-DEFINE_EVENT(bcache_bio, bcache_writethrough,
+DEFINE_EVENT(bkey, bcache_cache_insert,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k)
+);
- TP_PROTO(struct bio *bio),
+/* Journal */
- TP_ARGS(bio)
-);
+DECLARE_EVENT_CLASS(cache_set,
+ TP_PROTO(struct cache_set *c),
+ TP_ARGS(c),
-DEFINE_EVENT(bcache_bio, bcache_writeback,
+ TP_STRUCT__entry(
+ __array(char, uuid, 16 )
+ ),
- TP_PROTO(struct bio *bio),
+ TP_fast_assign(
+ memcpy(__entry->uuid, c->sb.set_uuid, 16);
+ ),
- TP_ARGS(bio)
+ TP_printk("%pU", __entry->uuid)
);
-DEFINE_EVENT(bcache_bio, bcache_write_skip,
-
- TP_PROTO(struct bio *bio),
+DEFINE_EVENT(bkey, bcache_journal_replay_key,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k)
+);
- TP_ARGS(bio)
+DEFINE_EVENT(cache_set, bcache_journal_full,
+ TP_PROTO(struct cache_set *c),
+ TP_ARGS(c)
);
-DEFINE_EVENT(bcache_bio, bcache_btree_read,
+DEFINE_EVENT(cache_set, bcache_journal_entry_full,
+ TP_PROTO(struct cache_set *c),
+ TP_ARGS(c)
+);
+DEFINE_EVENT(bcache_bio, bcache_journal_write,
TP_PROTO(struct bio *bio),
-
TP_ARGS(bio)
);
-DEFINE_EVENT(bcache_bio, bcache_btree_write,
+/* Btree */
- TP_PROTO(struct bio *bio),
+DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
+ TP_PROTO(struct cache_set *c),
+ TP_ARGS(c)
+);
- TP_ARGS(bio)
+DEFINE_EVENT(btree_node, bcache_btree_read,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b)
);
-DEFINE_EVENT(bcache_bio, bcache_write_dirty,
+TRACE_EVENT(bcache_btree_write,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b),
- TP_PROTO(struct bio *bio),
+ TP_STRUCT__entry(
+ __field(size_t, bucket )
+ __field(unsigned, block )
+ __field(unsigned, keys )
+ ),
- TP_ARGS(bio)
+ TP_fast_assign(
+ __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
+ __entry->block = b->written;
+ __entry->keys = b->sets[b->nsets].data->keys;
+ ),
+
+ TP_printk("bucket %zu", __entry->bucket)
);
-DEFINE_EVENT(bcache_bio, bcache_read_dirty,
+DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b)
+);
- TP_PROTO(struct bio *bio),
+DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b)
+);
- TP_ARGS(bio)
+DEFINE_EVENT(btree_node, bcache_btree_node_free,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b)
);
-DEFINE_EVENT(bcache_bio, bcache_write_moving,
+TRACE_EVENT(bcache_btree_gc_coalesce,
+ TP_PROTO(unsigned nodes),
+ TP_ARGS(nodes),
- TP_PROTO(struct bio *bio),
+ TP_STRUCT__entry(
+ __field(unsigned, nodes )
+ ),
- TP_ARGS(bio)
+ TP_fast_assign(
+ __entry->nodes = nodes;
+ ),
+
+ TP_printk("coalesced %u nodes", __entry->nodes)
);
-DEFINE_EVENT(bcache_bio, bcache_read_moving,
+DEFINE_EVENT(cache_set, bcache_gc_start,
+ TP_PROTO(struct cache_set *c),
+ TP_ARGS(c)
+);
- TP_PROTO(struct bio *bio),
+DEFINE_EVENT(cache_set, bcache_gc_end,
+ TP_PROTO(struct cache_set *c),
+ TP_ARGS(c)
+);
- TP_ARGS(bio)
+DEFINE_EVENT(bkey, bcache_gc_copy,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k)
);
-DEFINE_EVENT(bcache_bio, bcache_journal_write,
+DEFINE_EVENT(bkey, bcache_gc_copy_collision,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k)
+);
- TP_PROTO(struct bio *bio),
+TRACE_EVENT(bcache_btree_insert_key,
+ TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
+ TP_ARGS(b, k, op, status),
- TP_ARGS(bio)
-);
+ TP_STRUCT__entry(
+ __field(u64, btree_node )
+ __field(u32, btree_level )
+ __field(u32, inode )
+ __field(u64, offset )
+ __field(u32, size )
+ __field(u8, dirty )
+ __field(u8, op )
+ __field(u8, status )
+ ),
-DECLARE_EVENT_CLASS(bcache_cache_bio,
+ TP_fast_assign(
+ __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
+ __entry->btree_level = b->level;
+ __entry->inode = KEY_INODE(k);
+ __entry->offset = KEY_OFFSET(k);
+ __entry->size = KEY_SIZE(k);
+ __entry->dirty = KEY_DIRTY(k);
+ __entry->op = op;
+ __entry->status = status;
+ ),
- TP_PROTO(struct bio *bio,
- sector_t orig_sector,
- struct block_device* orig_bdev),
+ TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
+ __entry->status, __entry->op,
+ __entry->btree_node, __entry->btree_level,
+ __entry->inode, __entry->offset,
+ __entry->size, __entry->dirty)
+);
- TP_ARGS(bio, orig_sector, orig_bdev),
+DECLARE_EVENT_CLASS(btree_split,
+ TP_PROTO(struct btree *b, unsigned keys),
+ TP_ARGS(b, keys),
TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(dev_t, orig_dev )
- __field(sector_t, sector )
- __field(sector_t, orig_sector )
- __field(unsigned int, nr_sector )
- __array(char, rwbs, 6 )
- __array(char, comm, TASK_COMM_LEN )
+ __field(size_t, bucket )
+ __field(unsigned, keys )
),
TP_fast_assign(
- __entry->dev = bio->bi_bdev->bd_dev;
- __entry->orig_dev = orig_bdev->bd_dev;
- __entry->sector = bio->bi_sector;
- __entry->orig_sector = orig_sector;
- __entry->nr_sector = bio->bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
+ __entry->keys = keys;
),
- TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d %llu)",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->rwbs,
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm,
- MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
- (unsigned long long)__entry->orig_sector)
+ TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
);
-DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert,
-
- TP_PROTO(struct bio *bio,
- sector_t orig_sector,
- struct block_device *orig_bdev),
+DEFINE_EVENT(btree_split, bcache_btree_node_split,
+ TP_PROTO(struct btree *b, unsigned keys),
+ TP_ARGS(b, keys)
+);
- TP_ARGS(bio, orig_sector, orig_bdev)
+DEFINE_EVENT(btree_split, bcache_btree_node_compact,
+ TP_PROTO(struct btree *b, unsigned keys),
+ TP_ARGS(b, keys)
);
-DECLARE_EVENT_CLASS(bcache_gc,
+DEFINE_EVENT(btree_node, bcache_btree_set_root,
+ TP_PROTO(struct btree *b),
+ TP_ARGS(b)
+);
- TP_PROTO(uint8_t *uuid),
+/* Allocator */
- TP_ARGS(uuid),
+TRACE_EVENT(bcache_alloc_invalidate,
+ TP_PROTO(struct cache *ca),
+ TP_ARGS(ca),
TP_STRUCT__entry(
- __field(uint8_t *, uuid)
+ __field(unsigned, free )
+ __field(unsigned, free_inc )
+ __field(unsigned, free_inc_size )
+ __field(unsigned, unused )
),
TP_fast_assign(
- __entry->uuid = uuid;
+ __entry->free = fifo_used(&ca->free);
+ __entry->free_inc = fifo_used(&ca->free_inc);
+ __entry->free_inc_size = ca->free_inc.size;
+ __entry->unused = fifo_used(&ca->unused);
),
- TP_printk("%pU", __entry->uuid)
+ TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
+ __entry->free_inc, __entry->free_inc_size, __entry->unused)
);
+TRACE_EVENT(bcache_alloc_fail,
+ TP_PROTO(struct cache *ca),
+ TP_ARGS(ca),
-DEFINE_EVENT(bcache_gc, bcache_gc_start,
+ TP_STRUCT__entry(
+ __field(unsigned, free )
+ __field(unsigned, free_inc )
+ __field(unsigned, unused )
+ __field(unsigned, blocked )
+ ),
- TP_PROTO(uint8_t *uuid),
+ TP_fast_assign(
+ __entry->free = fifo_used(&ca->free);
+ __entry->free_inc = fifo_used(&ca->free_inc);
+ __entry->unused = fifo_used(&ca->unused);
+ __entry->blocked = atomic_read(&ca->set->prio_blocked);
+ ),
- TP_ARGS(uuid)
+ TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
+ __entry->free_inc, __entry->unused, __entry->blocked)
);
-DEFINE_EVENT(bcache_gc, bcache_gc_end,
+/* Background writeback */
- TP_PROTO(uint8_t *uuid),
+DEFINE_EVENT(bkey, bcache_writeback,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k)
+);
- TP_ARGS(uuid)
+DEFINE_EVENT(bkey, bcache_writeback_collision,
+ TP_PROTO(struct bkey *k),
+ TP_ARGS(k)
);
#endif /* _TRACE_BCACHE_H */
diff --git a/include/trace/events/context_tracking.h b/include/trace/events/context_tracking.h
new file mode 100644
index 000000000000..ce8007cf29cf
--- /dev/null
+++ b/include/trace/events/context_tracking.h
@@ -0,0 +1,58 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM context_tracking
+
+#if !defined(_TRACE_CONTEXT_TRACKING_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CONTEXT_TRACKING_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(context_tracking_user,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy),
+
+ TP_STRUCT__entry(
+ __field( int, dummy )
+ ),
+
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+
+ TP_printk("%s", "")
+);
+
+/**
+ * user_enter - called when the kernel resumes to userspace
+ * @dummy: dummy arg to make trace event macro happy
+ *
+ * This event occurs when the kernel resumes to userspace after
+ * an exception or a syscall.
+ */
+DEFINE_EVENT(context_tracking_user, user_enter,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy)
+);
+
+/**
+ * user_exit - called when userspace enters the kernel
+ * @dummy: dummy arg to make trace event macro happy
+ *
+ * This event occurs when userspace enters the kernel through
+ * an exception or a syscall.
+ */
+DEFINE_EVENT(context_tracking_user, user_exit,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy)
+);
+
+
+#endif /* _TRACE_CONTEXT_TRACKING_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 2068db241f22..197d3125df2a 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -14,7 +14,6 @@ struct ext4_prealloc_space;
struct ext4_inode_info;
struct mpage_da_data;
struct ext4_map_blocks;
-struct ext4_extent;
struct extent_status;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -64,10 +63,10 @@ struct extent_status;
{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
#define show_extent_status(status) __print_flags(status, "", \
- { (1 << 3), "W" }, \
- { (1 << 2), "U" }, \
- { (1 << 1), "D" }, \
- { (1 << 0), "H" })
+ { EXTENT_STATUS_WRITTEN, "W" }, \
+ { EXTENT_STATUS_UNWRITTEN, "U" }, \
+ { EXTENT_STATUS_DELAYED, "D" }, \
+ { EXTENT_STATUS_HOLE, "H" })
TRACE_EVENT(ext4_free_inode,
@@ -2192,7 +2191,7 @@ TRACE_EVENT(ext4_ext_remove_space_done,
(unsigned short) __entry->eh_entries)
);
-TRACE_EVENT(ext4_es_insert_extent,
+DECLARE_EVENT_CLASS(ext4__es_extent,
TP_PROTO(struct inode *inode, struct extent_status *es),
TP_ARGS(inode, es),
@@ -2212,7 +2211,7 @@ TRACE_EVENT(ext4_es_insert_extent,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
),
TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
@@ -2222,6 +2221,18 @@ TRACE_EVENT(ext4_es_insert_extent,
__entry->pblk, show_extent_status(__entry->status))
);
+DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
+ TP_PROTO(struct inode *inode, struct extent_status *es),
+
+ TP_ARGS(inode, es)
+);
+
+DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent,
+ TP_PROTO(struct inode *inode, struct extent_status *es),
+
+ TP_ARGS(inode, es)
+);
+
TRACE_EVENT(ext4_es_remove_extent,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len),
@@ -2289,7 +2300,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
),
TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
@@ -2343,7 +2354,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
__entry->found = found;
),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 8e42410bd159..cda100d6762d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -66,6 +66,43 @@ TRACE_EVENT(machine_suspend,
TP_printk("state=%lu", (unsigned long)__entry->state)
);
+TRACE_EVENT(device_pm_report_time,
+
+ TP_PROTO(struct device *dev, const char *pm_ops, s64 ops_time,
+ char *pm_event_str, int error),
+
+ TP_ARGS(dev, pm_ops, ops_time, pm_event_str, error),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(driver, dev_driver_string(dev))
+ __string(parent, dev->parent ? dev_name(dev->parent) : "none")
+ __string(pm_ops, pm_ops ? pm_ops : "none ")
+ __string(pm_event_str, pm_event_str)
+ __field(s64, ops_time)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ const char *tmp = dev->parent ? dev_name(dev->parent) : "none";
+ const char *tmp_i = pm_ops ? pm_ops : "none ";
+
+ __assign_str(device, dev_name(dev));
+ __assign_str(driver, dev_driver_string(dev));
+ __assign_str(parent, tmp);
+ __assign_str(pm_ops, tmp_i);
+ __assign_str(pm_event_str, pm_event_str);
+ __entry->ops_time = ops_time;
+ __entry->error = error;
+ ),
+
+ /* ops_str has an extra space at the end */
+ TP_printk("%s %s parent=%s state=%s ops=%snsecs=%lld err=%d",
+ __get_str(driver), __get_str(device), __get_str(parent),
+ __get_str(pm_event_str), __get_str(pm_ops),
+ __entry->ops_time, __entry->error)
+);
+
DECLARE_EVENT_CLASS(wakeup_source,
TP_PROTO(const char *name, unsigned int state),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59ebcc89f148..ee2376cfaab3 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -19,12 +19,12 @@
*/
TRACE_EVENT(rcu_utilization,
- TP_PROTO(char *s),
+ TP_PROTO(const char *s),
TP_ARGS(s),
TP_STRUCT__entry(
- __field(char *, s)
+ __field(const char *, s)
),
TP_fast_assign(
@@ -51,14 +51,14 @@ TRACE_EVENT(rcu_utilization,
*/
TRACE_EVENT(rcu_grace_period,
- TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+ TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
TP_ARGS(rcuname, gpnum, gpevent),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
- __field(char *, gpevent)
+ __field(const char *, gpevent)
),
TP_fast_assign(
@@ -89,21 +89,21 @@ TRACE_EVENT(rcu_grace_period,
*/
TRACE_EVENT(rcu_future_grace_period,
- TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed,
+ TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
unsigned long c, u8 level, int grplo, int grphi,
- char *gpevent),
+ const char *gpevent),
TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(unsigned long, completed)
__field(unsigned long, c)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
- __field(char *, gpevent)
+ __field(const char *, gpevent)
),
TP_fast_assign(
@@ -132,13 +132,13 @@ TRACE_EVENT(rcu_future_grace_period,
*/
TRACE_EVENT(rcu_grace_period_init,
- TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+ TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
int grplo, int grphi, unsigned long qsmask),
TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(u8, level)
__field(int, grplo)
@@ -168,12 +168,12 @@ TRACE_EVENT(rcu_grace_period_init,
*/
TRACE_EVENT(rcu_preempt_task,
- TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+ TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
TP_ARGS(rcuname, pid, gpnum),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, pid)
),
@@ -195,12 +195,12 @@ TRACE_EVENT(rcu_preempt_task,
*/
TRACE_EVENT(rcu_unlock_preempted_task,
- TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+ TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
TP_ARGS(rcuname, gpnum, pid),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, pid)
),
@@ -224,14 +224,14 @@ TRACE_EVENT(rcu_unlock_preempted_task,
*/
TRACE_EVENT(rcu_quiescent_state_report,
- TP_PROTO(char *rcuname, unsigned long gpnum,
+ TP_PROTO(const char *rcuname, unsigned long gpnum,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
@@ -268,15 +268,15 @@ TRACE_EVENT(rcu_quiescent_state_report,
*/
TRACE_EVENT(rcu_fqs,
- TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+ TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
TP_ARGS(rcuname, gpnum, cpu, qsevent),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, cpu)
- __field(char *, qsevent)
+ __field(const char *, qsevent)
),
TP_fast_assign(
@@ -308,12 +308,12 @@ TRACE_EVENT(rcu_fqs,
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
+ TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
TP_ARGS(polarity, oldnesting, newnesting),
TP_STRUCT__entry(
- __field(char *, polarity)
+ __field(const char *, polarity)
__field(long long, oldnesting)
__field(long long, newnesting)
),
@@ -352,12 +352,12 @@ TRACE_EVENT(rcu_dyntick,
*/
TRACE_EVENT(rcu_prep_idle,
- TP_PROTO(char *reason),
+ TP_PROTO(const char *reason),
TP_ARGS(reason),
TP_STRUCT__entry(
- __field(char *, reason)
+ __field(const char *, reason)
),
TP_fast_assign(
@@ -376,13 +376,13 @@ TRACE_EVENT(rcu_prep_idle,
*/
TRACE_EVENT(rcu_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
long qlen),
TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
__field(long, qlen_lazy)
@@ -412,13 +412,13 @@ TRACE_EVENT(rcu_callback,
*/
TRACE_EVENT(rcu_kfree_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen_lazy, long qlen),
TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
__field(long, qlen_lazy)
@@ -447,12 +447,12 @@ TRACE_EVENT(rcu_kfree_callback,
*/
TRACE_EVENT(rcu_batch_start,
- TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
+ TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(long, qlen_lazy)
__field(long, qlen)
__field(long, blimit)
@@ -477,12 +477,12 @@ TRACE_EVENT(rcu_batch_start,
*/
TRACE_EVENT(rcu_invoke_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp),
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp),
TP_ARGS(rcuname, rhp),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
),
@@ -506,12 +506,12 @@ TRACE_EVENT(rcu_invoke_callback,
*/
TRACE_EVENT(rcu_invoke_kfree_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
TP_ARGS(rcuname, rhp, offset),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
),
@@ -539,13 +539,13 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
*/
TRACE_EVENT(rcu_batch_end,
- TP_PROTO(char *rcuname, int callbacks_invoked,
+ TP_PROTO(const char *rcuname, int callbacks_invoked,
bool cb, bool nr, bool iit, bool risk),
TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(int, callbacks_invoked)
__field(bool, cb)
__field(bool, nr)
@@ -577,13 +577,13 @@ TRACE_EVENT(rcu_batch_end,
*/
TRACE_EVENT(rcu_torture_read,
- TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
+ TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
unsigned long secs, unsigned long c_old, unsigned long c),
TP_ARGS(rcutorturename, rhp, secs, c_old, c),
TP_STRUCT__entry(
- __field(char *, rcutorturename)
+ __field(const char *, rcutorturename)
__field(struct rcu_head *, rhp)
__field(unsigned long, secs)
__field(unsigned long, c_old)
@@ -623,13 +623,13 @@ TRACE_EVENT(rcu_torture_read,
*/
TRACE_EVENT(rcu_barrier,
- TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+ TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
TP_ARGS(rcuname, s, cpu, cnt, done),
TP_STRUCT__entry(
- __field(char *, rcuname)
- __field(char *, s)
+ __field(const char *, rcuname)
+ __field(const char *, s)
__field(int, cpu)
__field(int, cnt)
__field(unsigned long, done)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e5586caff67a..2e7d9947a10d 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -57,7 +57,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(p, success),
+ TP_ARGS(__perf_task(p), success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -73,9 +73,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
- )
- TP_perf_assign(
- __perf_task(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -313,7 +310,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
- TP_ARGS(tsk, delay),
+ TP_ARGS(__perf_task(tsk), __perf_count(delay)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -325,10 +322,6 @@ DECLARE_EVENT_CLASS(sched_stat_template,
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->delay = delay;
- )
- TP_perf_assign(
- __perf_count(delay);
- __perf_task(tsk);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
@@ -372,11 +365,11 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
-TRACE_EVENT(sched_stat_runtime,
+DECLARE_EVENT_CLASS(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime),
+ TP_ARGS(tsk, __perf_count(runtime), vruntime),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -390,9 +383,6 @@ TRACE_EVENT(sched_stat_runtime,
__entry->pid = tsk->pid;
__entry->runtime = runtime;
__entry->vruntime = vruntime;
- )
- TP_perf_assign(
- __perf_count(runtime);
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
@@ -401,6 +391,10 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime)
);
+DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
+ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_ARGS(tsk, runtime, vruntime));
+
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.