summaryrefslogtreecommitdiff
path: root/tools/perf/util/machine.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/machine.c')
-rw-r--r--tools/perf/util/machine.c295
1 files changed, 157 insertions, 138 deletions
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 9e02e19c1b7a..4e62843d51b7 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -43,7 +43,8 @@
#include <linux/string.h>
#include <linux/zalloc.h>
-static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
+ struct thread *th, bool lock);
static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip);
static struct dso *machine__kernel_dso(struct machine *machine)
@@ -72,6 +73,22 @@ static void machine__threads_init(struct machine *machine)
}
}
+static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
+{
+ int to_find = (int) *((pid_t *)key);
+
+ return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
+}
+
+static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
+ struct rb_root *tree)
+{
+ pid_t to_find = thread__tid(th);
+ struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
+
+ return rb_entry(nd, struct thread_rb_node, rb_node);
+}
+
static int machine__set_mmap_name(struct machine *machine)
{
if (machine__is_host(machine))
@@ -214,10 +231,10 @@ void machine__delete_threads(struct machine *machine)
down_write(&threads->lock);
nd = rb_first_cached(&threads->entries);
while (nd) {
- struct thread *t = rb_entry(nd, struct thread, rb_node);
+ struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
nd = rb_next(nd);
- __machine__remove_thread(machine, t, false);
+ __machine__remove_thread(machine, trb, trb->thread, false);
}
up_write(&threads->lock);
}
@@ -231,7 +248,7 @@ void machine__exit(struct machine *machine)
return;
machine__destroy_kernel_maps(machine);
- maps__delete(machine->kmaps);
+ maps__zput(machine->kmaps);
dsos__exit(&machine->dsos);
machine__exit_vdso(machine);
zfree(&machine->root_dir);
@@ -239,19 +256,9 @@ void machine__exit(struct machine *machine)
zfree(&machine->current_tid);
zfree(&machine->kallsyms_filename);
+ machine__delete_threads(machine);
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- struct thread *thread, *n;
- /*
- * Forget about the dead, at this point whatever threads were
- * left in the dead lists better have a reference count taken
- * by who is using them, and then, when they drop those references
- * and it finally hits zero, thread__put() will check and see that
- * its not in the dead threads list and will not try to remove it
- * from there, just calling thread__delete() straight away.
- */
- list_for_each_entry_safe(thread, n, &threads->dead, node)
- list_del_init(&thread->node);
exit_rwsem(&threads->lock);
}
@@ -435,7 +442,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
return NULL;
/* Assume maps are set up if there are any */
- if (maps__nr_maps(thread->maps))
+ if (maps__nr_maps(thread__maps(thread)))
return thread;
host_thread = machine__find_thread(host_machine, -1, pid);
@@ -448,7 +455,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
* Guest code can be found in hypervisor process at the same address
* so copy host maps.
*/
- err = maps__clone(thread, host_thread->maps);
+ err = maps__clone(thread, thread__maps(host_thread));
thread__put(host_thread);
if (err)
goto out_err;
@@ -513,45 +520,45 @@ static void machine__update_thread_pid(struct machine *machine,
{
struct thread *leader;
- if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
+ if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
return;
- th->pid_ = pid;
+ thread__set_pid(th, pid);
- if (th->pid_ == th->tid)
+ if (thread__pid(th) == thread__tid(th))
return;
- leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
+ leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
if (!leader)
goto out_err;
- if (!leader->maps)
- leader->maps = maps__new(machine);
+ if (!thread__maps(leader))
+ thread__set_maps(leader, maps__new(machine));
- if (!leader->maps)
+ if (!thread__maps(leader))
goto out_err;
- if (th->maps == leader->maps)
- return;
+ if (thread__maps(th) == thread__maps(leader))
+ goto out_put;
- if (th->maps) {
+ if (thread__maps(th)) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
- if (!maps__empty(th->maps))
+ if (!maps__empty(thread__maps(th)))
pr_err("Discarding thread maps for %d:%d\n",
- th->pid_, th->tid);
- maps__put(th->maps);
+ thread__pid(th), thread__tid(th));
+ maps__put(thread__maps(th));
}
- th->maps = maps__get(leader->maps);
+ thread__set_maps(th, maps__get(thread__maps(leader)));
out_put:
thread__put(leader);
return;
out_err:
- pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
+ pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
goto out_put;
}
@@ -568,11 +575,11 @@ __threads__get_last_match(struct threads *threads, struct machine *machine,
th = threads->last_match;
if (th != NULL) {
- if (th->tid == tid) {
+ if (thread__tid(th) == tid) {
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
-
+ thread__put(threads->last_match);
threads->last_match = NULL;
}
@@ -594,7 +601,8 @@ threads__get_last_match(struct threads *threads, struct machine *machine,
static void
__threads__set_last_match(struct threads *threads, struct thread *th)
{
- threads->last_match = th;
+ thread__put(threads->last_match);
+ threads->last_match = thread__get(th);
}
static void
@@ -616,6 +624,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
+ struct thread_rb_node *nd;
bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
@@ -624,15 +633,15 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
while (*p != NULL) {
parent = *p;
- th = rb_entry(parent, struct thread, rb_node);
+ th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
- if (th->tid == tid) {
+ if (thread__tid(th) == tid) {
threads__set_last_match(threads, th);
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
- if (tid < th->tid)
+ if (tid < thread__tid(th))
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
@@ -644,32 +653,40 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
return NULL;
th = thread__new(pid, tid);
- if (th != NULL) {
- rb_link_node(&th->rb_node, parent, p);
- rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
+ if (th == NULL)
+ return NULL;
- /*
- * We have to initialize maps separately after rb tree is updated.
- *
- * The reason is that we call machine__findnew_thread
- * within thread__init_maps to find the thread
- * leader and that would screwed the rb tree.
- */
- if (thread__init_maps(th, machine)) {
- rb_erase_cached(&th->rb_node, &threads->entries);
- RB_CLEAR_NODE(&th->rb_node);
- thread__put(th);
- return NULL;
- }
- /*
- * It is now in the rbtree, get a ref
- */
- thread__get(th);
- threads__set_last_match(threads, th);
- ++threads->nr;
+ nd = malloc(sizeof(*nd));
+ if (nd == NULL) {
+ thread__put(th);
+ return NULL;
}
+ nd->thread = th;
- return th;
+ rb_link_node(&nd->rb_node, parent, p);
+ rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost);
+ /*
+ * We have to initialize maps separately after rb tree is updated.
+ *
+ * The reason is that we call machine__findnew_thread within
+ * thread__init_maps to find the thread leader and that would screwed
+ * the rb tree.
+ */
+ if (thread__init_maps(th, machine)) {
+ pr_err("Thread init failed thread %d\n", pid);
+ rb_erase_cached(&nd->rb_node, &threads->entries);
+ RB_CLEAR_NODE(&nd->rb_node);
+ free(nd);
+ thread__put(th);
+ return NULL;
+ }
+ /*
+ * It is now in the rbtree, get a ref
+ */
+ threads__set_last_match(threads, th);
+ ++threads->nr;
+
+ return thread__get(th);
}
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
@@ -1120,7 +1137,7 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
- struct thread *pos = rb_entry(nd, struct thread, rb_node);
+ struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
ret += thread__fprintf(pos, fp);
}
@@ -1780,7 +1797,6 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
struct extra_kernel_map *xm,
struct build_id *bid)
{
- struct map *map;
enum dso_space_type dso_space;
bool is_kernel_mmap;
const char *mmap_name = machine->mmap_name;
@@ -1806,8 +1822,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
}
if (xm->name[0] == '/' ||
(!is_kernel_mmap && xm->name[0] == '[')) {
- map = machine__addnew_module_map(machine, xm->start,
- xm->name);
+ struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
+
if (map == NULL)
goto out_problem;
@@ -1816,6 +1832,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (build_id__is_defined(bid))
dso__set_build_id(map__dso(map), bid);
+ map__put(map);
} else if (is_kernel_mmap) {
const char *symbol_name = xm->name + strlen(mmap_name);
/*
@@ -1851,7 +1868,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
continue;
- kernel = dso;
+ kernel = dso__get(dso);
break;
}
@@ -1896,6 +1913,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
*/
dso__load(kernel, machine__kernel_map(machine));
}
+ dso__put(kernel);
} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
return machine__process_extra_kernel_map(machine, xm);
}
@@ -2031,34 +2049,28 @@ out_problem:
return 0;
}
-static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
+static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
+ struct thread *th, bool lock)
{
- struct threads *threads = machine__threads(machine, th->tid);
+ struct threads *threads = machine__threads(machine, thread__tid(th));
+
+ if (!nd)
+ nd = thread_rb_node__find(th, &threads->entries.rb_root);
- if (threads->last_match == th)
+ if (threads->last_match && RC_CHK_ACCESS(threads->last_match) == RC_CHK_ACCESS(th))
threads__set_last_match(threads, NULL);
if (lock)
down_write(&threads->lock);
- BUG_ON(refcount_read(&th->refcnt) == 0);
+ BUG_ON(refcount_read(thread__refcnt(th)) == 0);
- rb_erase_cached(&th->rb_node, &threads->entries);
- RB_CLEAR_NODE(&th->rb_node);
+ thread__put(nd->thread);
+ rb_erase_cached(&nd->rb_node, &threads->entries);
+ RB_CLEAR_NODE(&nd->rb_node);
--threads->nr;
- /*
- * Move it first to the dead_threads list, then drop the reference,
- * if this is the last reference, then the thread__delete destructor
- * will be called and we will remove it from the dead_threads list.
- */
- list_add_tail(&th->node, &threads->dead);
- /*
- * We need to do the put here because if this is the last refcount,
- * then we will be touching the threads->dead head when removing the
- * thread.
- */
- thread__put(th);
+ free(nd);
if (lock)
up_write(&threads->lock);
@@ -2066,7 +2078,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
void machine__remove_thread(struct machine *machine, struct thread *th)
{
- return __machine__remove_thread(machine, th, true);
+ return __machine__remove_thread(machine, NULL, th, true);
}
int machine__process_fork_event(struct machine *machine, union perf_event *event,
@@ -2090,9 +2102,9 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
* (fork) event that would have removed the thread was lost. Assume the
* latter case and continue on as best we can.
*/
- if (parent->pid_ != (pid_t)event->fork.ppid) {
+ if (thread__pid(parent) != (pid_t)event->fork.ppid) {
dump_printf("removing erroneous parent thread %d/%d\n",
- parent->pid_, parent->tid);
+ thread__pid(parent), thread__tid(parent));
machine__remove_thread(machine, parent);
thread__put(parent);
parent = machine__findnew_thread(machine, event->fork.ppid,
@@ -2145,10 +2157,8 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
if (dump_trace)
perf_event__fprintf_task(event, stdout);
- if (thread != NULL) {
- thread__exited(thread);
+ if (thread != NULL)
thread__put(thread);
- }
return 0;
}
@@ -2213,7 +2223,7 @@ static void ip__resolve_ams(struct thread *thread,
{
struct addr_location al;
- memset(&al, 0, sizeof(al));
+ addr_location__init(&al);
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
@@ -2226,11 +2236,12 @@ static void ip__resolve_ams(struct thread *thread,
ams->addr = ip;
ams->al_addr = al.addr;
ams->al_level = al.level;
- ams->ms.maps = al.maps;
+ ams->ms.maps = maps__get(al.maps);
ams->ms.sym = al.sym;
- ams->ms.map = al.map;
+ ams->ms.map = map__get(al.map);
ams->phys_addr = 0;
ams->data_page_size = 0;
+ addr_location__exit(&al);
}
static void ip__resolve_data(struct thread *thread,
@@ -2239,18 +2250,19 @@ static void ip__resolve_data(struct thread *thread,
{
struct addr_location al;
- memset(&al, 0, sizeof(al));
+ addr_location__init(&al);
thread__find_symbol(thread, m, addr, &al);
ams->addr = addr;
ams->al_addr = al.addr;
ams->al_level = al.level;
- ams->ms.maps = al.maps;
+ ams->ms.maps = maps__get(al.maps);
ams->ms.sym = al.sym;
- ams->ms.map = al.map;
+ ams->ms.map = map__get(al.map);
ams->phys_addr = phys_addr;
ams->data_page_size = daddr_page_size;
+ addr_location__exit(&al);
}
struct mem_info *sample__resolve_mem(struct perf_sample *sample,
@@ -2309,12 +2321,13 @@ static int add_callchain_ip(struct thread *thread,
struct iterations *iter,
u64 branch_from)
{
- struct map_symbol ms;
+ struct map_symbol ms = {};
struct addr_location al;
- int nr_loop_iter = 0, err;
+ int nr_loop_iter = 0, err = 0;
u64 iter_cycles = 0;
const char *srcline = NULL;
+ addr_location__init(&al);
al.filtered = 0;
al.sym = NULL;
al.srcline = NULL;
@@ -2340,9 +2353,10 @@ static int add_callchain_ip(struct thread *thread,
* Discard all.
*/
callchain_cursor_reset(cursor);
- return 1;
+ err = 1;
+ goto out;
}
- return 0;
+ goto out;
}
thread__find_symbol(thread, *cpumode, ip, &al);
}
@@ -2355,31 +2369,34 @@ static int add_callchain_ip(struct thread *thread,
symbol__match_regex(al.sym, &ignore_callees_regex)) {
/* Treat this symbol as the root,
forgetting its callees. */
- *root_al = al;
+ addr_location__copy(root_al, &al);
callchain_cursor_reset(cursor);
}
}
if (symbol_conf.hide_unresolved && al.sym == NULL)
- return 0;
+ goto out;
if (iter) {
nr_loop_iter = iter->nr_loop_iter;
iter_cycles = iter->cycles;
}
- ms.maps = al.maps;
- ms.map = al.map;
+ ms.maps = maps__get(al.maps);
+ ms.map = map__get(al.map);
ms.sym = al.sym;
if (!branch && append_inlines(cursor, &ms, ip) == 0)
- return 0;
+ goto out;
srcline = callchain_srcline(&ms, al.addr);
err = callchain_cursor_append(cursor, ip, &ms,
branch, flags, nr_loop_iter,
iter_cycles, branch_from, srcline);
- map__put(al.map);
+out:
+ addr_location__exit(&al);
+ maps__put(ms.maps);
+ map__put(ms.map);
return err;
}
@@ -2504,7 +2521,7 @@ static void save_lbr_cursor_node(struct thread *thread,
struct callchain_cursor *cursor,
int idx)
{
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
if (!lbr_stitch)
return;
@@ -2546,7 +2563,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
* in callchain_cursor_commit() when the writing session is closed.
* Using curr and pos to track the current cursor node.
*/
- if (thread->lbr_stitch) {
+ if (thread__lbr_stitch(thread)) {
cursor->curr = NULL;
cursor->pos = cursor->nr;
if (cursor->nr) {
@@ -2574,7 +2591,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
* But does not need to save current cursor node for entry 0.
* It's impossible to stitch the whole LBRs of previous sample.
*/
- if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
+ if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
if (!cursor->curr)
cursor->curr = cursor->first;
else
@@ -2627,7 +2644,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
struct callchain_cursor *cursor)
{
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct callchain_cursor_node *cnode;
struct stitch_list *stitch_node;
int err;
@@ -2651,7 +2668,7 @@ static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
static struct stitch_list *get_stitch_node(struct thread *thread)
{
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
struct stitch_list *stitch_node;
if (!list_empty(&lbr_stitch->free_lists)) {
@@ -2675,7 +2692,7 @@ static bool has_stitched_lbr(struct thread *thread,
struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
struct branch_stack *prev_stack = prev->branch_stack;
struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
- struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
int i, j, nr_identical_branches = 0;
struct stitch_list *stitch_node;
u64 cur_base, distance;
@@ -2739,27 +2756,29 @@ static bool has_stitched_lbr(struct thread *thread,
static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
{
- if (thread->lbr_stitch)
+ if (thread__lbr_stitch(thread))
return true;
- thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
- if (!thread->lbr_stitch)
+ thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
+ if (!thread__lbr_stitch(thread))
goto err;
- thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
- if (!thread->lbr_stitch->prev_lbr_cursor)
+ thread__lbr_stitch(thread)->prev_lbr_cursor =
+ calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
+ if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
goto free_lbr_stitch;
- INIT_LIST_HEAD(&thread->lbr_stitch->lists);
- INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
+ INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
+ INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
return true;
free_lbr_stitch:
- zfree(&thread->lbr_stitch);
+ free(thread__lbr_stitch(thread));
+ thread__set_lbr_stitch(thread, NULL);
err:
pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
- thread->lbr_stitch_enable = false;
+ thread__set_lbr_stitch_enable(thread, false);
return false;
}
@@ -2795,9 +2814,9 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
if (i == chain_nr)
return 0;
- if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
+ if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
(max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
- lbr_stitch = thread->lbr_stitch;
+ lbr_stitch = thread__lbr_stitch(thread);
stitched_lbr = has_stitched_lbr(thread, sample,
&lbr_stitch->prev_sample,
@@ -2877,7 +2896,7 @@ static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
static u64 get_leaf_frame_caller(struct perf_sample *sample,
struct thread *thread, int usr_idx)
{
- if (machine__normalized_is(maps__machine(thread->maps), "arm64"))
+ if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
else
return 0;
@@ -3072,6 +3091,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
struct dso *dso;
u64 addr;
int ret = 1;
+ struct map_symbol ilist_ms;
if (!symbol_conf.inline_name || !map || !sym)
return ret;
@@ -3088,18 +3108,20 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
inlines__tree_insert(&dso->inlined_nodes, inline_node);
}
+ ilist_ms = (struct map_symbol) {
+ .maps = maps__get(ms->maps),
+ .map = map__get(map),
+ };
list_for_each_entry(ilist, &inline_node->val, list) {
- struct map_symbol ilist_ms = {
- .maps = ms->maps,
- .map = map,
- .sym = ilist->symbol,
- };
+ ilist_ms.sym = ilist->symbol;
ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
NULL, 0, 0, 0, ilist->srcline);
if (ret != 0)
return ret;
}
+ map__put(ilist_ms.map);
+ maps__put(ilist_ms.maps);
return ret;
}
@@ -3158,6 +3180,9 @@ int thread__resolve_callchain(struct thread *thread,
{
int ret = 0;
+ if (cursor == NULL)
+ return -ENOMEM;
+
callchain_cursor_reset(cursor);
if (callchain_param.order == ORDER_CALLEE) {
@@ -3191,7 +3216,6 @@ int machine__for_each_thread(struct machine *machine,
{
struct threads *threads;
struct rb_node *nd;
- struct thread *thread;
int rc = 0;
int i;
@@ -3199,14 +3223,9 @@ int machine__for_each_thread(struct machine *machine,
threads = &machine->threads[i];
for (nd = rb_first_cached(&threads->entries); nd;
nd = rb_next(nd)) {
- thread = rb_entry(nd, struct thread, rb_node);
- rc = fn(thread, priv);
- if (rc != 0)
- return rc;
- }
+ struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
- list_for_each_entry(thread, &threads->dead, node) {
- rc = fn(thread, priv);
+ rc = fn(trb->thread, priv);
if (rc != 0)
return rc;
}
@@ -3264,7 +3283,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
if (!thread)
return -ENOMEM;
- thread->cpu = cpu;
+ thread__set_cpu(thread, cpu);
thread__put(thread);
return 0;