summaryrefslogtreecommitdiff
path: root/tools/perf/util/symbol-elf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-23 21:29:51 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-23 21:29:51 +0300
commit0df82189bc42037678fa590a77ed0116f428c90d (patch)
tree2d75bef59c8bf1d44d7c7899b9fa7512bba10fa0 /tools/perf/util/symbol-elf.c
parentb72b5fecc1b8a2e595bd03d7d257c88ea3f9fd45 (diff)
parentf9fa0778ee7349a9aa3d2ea10e9f2ab843a0b44e (diff)
downloadlinux-0df82189bc42037678fa590a77ed0116f428c90d.tar.xz
Merge tag 'perf-tools-for-v6.3-1-2023-02-22' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tools updates from Arnaldo Carvalho de Melo: "Miscellaneous: - Add Ian Rogers to MAINTAINERS as a perf tools reviewer. - Add support for retire latency feature (pipeline stall of a instruction compared to the previous one, in cycles) present on some Intel processors. - Add 'perf c2c' report option to show false sharing with adjacent cachelines, to be used in machines with cacheline prefetching, where accesses to a cacheline brings the next one too. - Skip 'perf test bpf' when the required kernel-debuginfo package isn't installed. - Avoid d3-flame-graph package dependency in 'perf script flamegraph', making this feature more generally available. - Add JSON metric events to present CPI stall cycles in Power10. - Assorted improvements/refactorings on the JSON metrics parsing code. perf lock contention: - Add -o/--lock-owner option: $ sudo ./perf lock contention -abo -- ./perf bench sched pipe # Running 'sched/pipe' benchmark: # Executed 1000000 pipe operations between two processes Total time: 4.766 [sec] 4.766540 usecs/op 209795 ops/sec contended total wait max wait avg wait pid owner 403 565.32 us 26.81 us 1.40 us -1 Unknown 4 27.99 us 8.57 us 7.00 us 1583145 sched-pipe 1 8.25 us 8.25 us 8.25 us 1583144 sched-pipe 1 2.03 us 2.03 us 2.03 us 5068 chrome The owner is unknown in most cases. Filtering only for the mutex locks, it will more likely get the owners. - -S/--callstack-filter is to limit display entries having the given string in the callstack: $ sudo ./perf lock contention -abv -S net sleep 1 ... contended total wait max wait avg wait type caller 5 70.20 us 16.13 us 14.04 us spinlock __dev_queue_xmit+0xb6d 0xffffffffa5dd1c60 _raw_spin_lock+0x30 0xffffffffa5b8f6ed __dev_queue_xmit+0xb6d 0xffffffffa5cd8267 ip6_finish_output2+0x2c7 0xffffffffa5cdac14 ip6_finish_output+0x1d4 0xffffffffa5cdb477 ip6_xmit+0x457 0xffffffffa5d1fd17 inet6_csk_xmit+0xd7 0xffffffffa5c5f4aa __tcp_transmit_skb+0x54a 0xffffffffa5c6467d tcp_keepalive_timer+0x2fd Please note that to have the -b option (BPF) working above one has to build with BUILD_BPF_SKEL=1. - Add more 'perf test' entries to test these new features. perf script: - Add 'cgroup' field for 'perf script' output: $ perf record --all-cgroups -- true $ perf script -F comm,pid,cgroup true 337112 /user.slice/user-657345.slice/user@657345.service/... true 337112 /user.slice/user-657345.slice/user@657345.service/... true 337112 /user.slice/user-657345.slice/user@657345.service/... true 337112 /user.slice/user-657345.slice/user@657345.service/... - Add support for showing branch speculation information in 'perf script' and in the 'perf report' raw dump (-D). perf record: - Fix 'perf record' segfault with --overwrite and --max-size. perf test/bench: - Switch basic BPF filtering test to use syscall tracepoint to avoid the variable number of probes inserted when using the previous probe point (do_epoll_wait) that happens on different CPU architectures. - Fix DWARF unwind test by adding non-inline to expected function in a backtrace. - Use 'grep -c' where the longer form 'grep | wc -l' was being used. - Add getpid and execve benchmarks to 'perf bench syscall'. Intel PT: - Add support for synthesizing "cycle" events from Intel PT traces as we support "instruction" events when Intel PT CYC packets are available. This enables much more accurate profiles than when using the regular 'perf record -e cycles' (the default) when the workload lasts for very short periods (<10ms). - .plt symbol handling improvements, better handling IBT (in the past MPX) done in the context of decoding Intel PT processor traces, IFUNC symbols on x86_64, static executables, understanding .plt.got symbols on x86_64. - Add a 'perf test' to test symbol resolution, part of the .plt improvements series, this tests things like symbol size in contexts where only the symbol start is available (kallsyms), etc. - Better handle auxtrace/Intel PT data when using pipe mode (perf record sleep 1|perf report). - Fix symbol lookup with kcore with multiple segments match stext, getting the symbol resolution to just show DSOs as unknown. ARM: - Timestamp improvements for ARM64 systems with ETMv4 (Embedded Trace Macrocell v4). - Ensure ARM64 CoreSight timestamps don't go backwards. - Document that ARM64 SPE (Statistical Profiling Extension) is used with 'perf c2c/mem'. - Add raw decoding for ARM64 SPEv1.2 previous branch address. - Update neoverse-n2-v2 ARM vendor events (JSON tables): topdown L1, TLB, cache, branch, PE utilization and instruction mix metrics. - Update decoder code for OpenCSD version 1.4, on ARM64 systems. - Fix command line auto-complete of CPU events on aarch64. Build: - Fix 'perf probe' and 'perf test' when libtraceevent isn't linked, as several tests use tracepoints, those should be skipped. - More fallout fixes for the removal of tools/lib/traceevent/. - Fix build error when linking with libpfm" * tag 'perf-tools-for-v6.3-1-2023-02-22' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (114 commits) perf tests stat_all_metrics: Change true workload to sleep workload for system wide check perf vendor events power10: Add JSON metric events to present CPI stall cycles in powerpc perf intel-pt: Synthesize cycle events perf c2c: Add report option to show false sharing in adjacent cachelines perf record: Fix segfault with --overwrite and --max-size perf stat: Avoid merging/aggregating metric counts twice perf tools: Fix perf tool build error in util/pfm.c perf tools: Fix auto-complete on aarch64 perf lock contention: Support old rw_semaphore type perf lock contention: Add -o/--lock-owner option perf lock contention: Fix to save callstack for the default modified perf test bpf: Skip test if kernel-debuginfo is not present perf probe: Update the exit error codes in function try_to_find_probe_trace_event perf script: Fix missing Retire Latency fields option documentation perf event x86: Add retire_lat when synthesizing PERF_SAMPLE_WEIGHT_STRUCT perf test x86: Support the retire_lat (Retire Latency) sample_type check perf test bpf: Check for libtraceevent support perf script: Support Retire Latency perf report: Support Retire Latency perf lock contention: Support filters for different aggregation ...
Diffstat (limited to 'tools/perf/util/symbol-elf.c')
-rw-r--r--tools/perf/util/symbol-elf.c527
1 files changed, 425 insertions, 102 deletions
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 96767d1b3f1c..41882ae8452e 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -323,15 +323,325 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
return demangled;
}
-#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
- for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
- idx < nr_entries; \
- ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
+struct rel_info {
+ u32 nr_entries;
+ u32 *sorted;
+ bool is_rela;
+ Elf_Data *reldata;
+ GElf_Rela rela;
+ GElf_Rel rel;
+};
+
+static u32 get_rel_symidx(struct rel_info *ri, u32 idx)
+{
+ idx = ri->sorted ? ri->sorted[idx] : idx;
+ if (ri->is_rela) {
+ gelf_getrela(ri->reldata, idx, &ri->rela);
+ return GELF_R_SYM(ri->rela.r_info);
+ }
+ gelf_getrel(ri->reldata, idx, &ri->rel);
+ return GELF_R_SYM(ri->rel.r_info);
+}
-#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
- for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
- idx < nr_entries; \
- ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
+static u64 get_rel_offset(struct rel_info *ri, u32 x)
+{
+ if (ri->is_rela) {
+ GElf_Rela rela;
+
+ gelf_getrela(ri->reldata, x, &rela);
+ return rela.r_offset;
+ } else {
+ GElf_Rel rel;
+
+ gelf_getrel(ri->reldata, x, &rel);
+ return rel.r_offset;
+ }
+}
+
+static int rel_cmp(const void *a, const void *b, void *r)
+{
+ struct rel_info *ri = r;
+ u64 a_offset = get_rel_offset(ri, *(const u32 *)a);
+ u64 b_offset = get_rel_offset(ri, *(const u32 *)b);
+
+ return a_offset < b_offset ? -1 : (a_offset > b_offset ? 1 : 0);
+}
+
+static int sort_rel(struct rel_info *ri)
+{
+ size_t sz = sizeof(ri->sorted[0]);
+ u32 i;
+
+ ri->sorted = calloc(ri->nr_entries, sz);
+ if (!ri->sorted)
+ return -1;
+ for (i = 0; i < ri->nr_entries; i++)
+ ri->sorted[i] = i;
+ qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri);
+ return 0;
+}
+
+/*
+ * For x86_64, the GNU linker is putting IFUNC information in the relocation
+ * addend.
+ */
+static bool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri)
+{
+ return ehdr->e_machine == EM_X86_64 && ri->is_rela &&
+ GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE;
+}
+
+static bool get_ifunc_name(Elf *elf, struct dso *dso, GElf_Ehdr *ehdr,
+ struct rel_info *ri, char *buf, size_t buf_sz)
+{
+ u64 addr = ri->rela.r_addend;
+ struct symbol *sym;
+ GElf_Phdr phdr;
+
+ if (!addend_may_be_ifunc(ehdr, ri))
+ return false;
+
+ if (elf_read_program_header(elf, addr, &phdr))
+ return false;
+
+ addr -= phdr.p_vaddr - phdr.p_offset;
+
+ sym = dso__find_symbol_nocache(dso, addr);
+
+ /* Expecting the address to be an IFUNC or IFUNC alias */
+ if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias))
+ return false;
+
+ snprintf(buf, buf_sz, "%s@plt", sym->name);
+
+ return true;
+}
+
+static void exit_rel(struct rel_info *ri)
+{
+ free(ri->sorted);
+}
+
+static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
+ u64 *plt_header_size, u64 *plt_entry_size)
+{
+ switch (ehdr->e_machine) {
+ case EM_ARM:
+ *plt_header_size = 20;
+ *plt_entry_size = 12;
+ return true;
+ case EM_AARCH64:
+ *plt_header_size = 32;
+ *plt_entry_size = 16;
+ return true;
+ case EM_SPARC:
+ *plt_header_size = 48;
+ *plt_entry_size = 12;
+ return true;
+ case EM_SPARCV9:
+ *plt_header_size = 128;
+ *plt_entry_size = 32;
+ return true;
+ case EM_386:
+ case EM_X86_64:
+ *plt_entry_size = shdr_plt->sh_entsize;
+ /* Size is 8 or 16, if not, assume alignment indicates size */
+ if (*plt_entry_size != 8 && *plt_entry_size != 16)
+ *plt_entry_size = shdr_plt->sh_addralign == 8 ? 8 : 16;
+ *plt_header_size = *plt_entry_size;
+ break;
+ default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
+ *plt_header_size = shdr_plt->sh_entsize;
+ *plt_entry_size = shdr_plt->sh_entsize;
+ break;
+ }
+ if (*plt_entry_size)
+ return true;
+ pr_debug("Missing PLT entry size for %s\n", dso->long_name);
+ return false;
+}
+
+static bool machine_is_x86(GElf_Half e_machine)
+{
+ return e_machine == EM_386 || e_machine == EM_X86_64;
+}
+
+struct rela_dyn {
+ GElf_Addr offset;
+ u32 sym_idx;
+};
+
+struct rela_dyn_info {
+ struct dso *dso;
+ Elf_Data *plt_got_data;
+ u32 nr_entries;
+ struct rela_dyn *sorted;
+ Elf_Data *dynsym_data;
+ Elf_Data *dynstr_data;
+ Elf_Data *rela_dyn_data;
+};
+
+static void exit_rela_dyn(struct rela_dyn_info *di)
+{
+ free(di->sorted);
+}
+
+static int cmp_offset(const void *a, const void *b)
+{
+ const struct rela_dyn *va = a;
+ const struct rela_dyn *vb = b;
+
+ return va->offset < vb->offset ? -1 : (va->offset > vb->offset ? 1 : 0);
+}
+
+static int sort_rela_dyn(struct rela_dyn_info *di)
+{
+ u32 i, n;
+
+ di->sorted = calloc(di->nr_entries, sizeof(di->sorted[0]));
+ if (!di->sorted)
+ return -1;
+
+ /* Get data for sorting: the offset and symbol index */
+ for (i = 0, n = 0; i < di->nr_entries; i++) {
+ GElf_Rela rela;
+ u32 sym_idx;
+
+ gelf_getrela(di->rela_dyn_data, i, &rela);
+ sym_idx = GELF_R_SYM(rela.r_info);
+ if (sym_idx) {
+ di->sorted[n].sym_idx = sym_idx;
+ di->sorted[n].offset = rela.r_offset;
+ n += 1;
+ }
+ }
+
+ /* Sort by offset */
+ di->nr_entries = n;
+ qsort(di->sorted, n, sizeof(di->sorted[0]), cmp_offset);
+
+ return 0;
+}
+
+static void get_rela_dyn_info(Elf *elf, GElf_Ehdr *ehdr, struct rela_dyn_info *di, Elf_Scn *scn)
+{
+ GElf_Shdr rela_dyn_shdr;
+ GElf_Shdr shdr;
+
+ di->plt_got_data = elf_getdata(scn, NULL);
+
+ scn = elf_section_by_name(elf, ehdr, &rela_dyn_shdr, ".rela.dyn", NULL);
+ if (!scn || !rela_dyn_shdr.sh_link || !rela_dyn_shdr.sh_entsize)
+ return;
+
+ di->nr_entries = rela_dyn_shdr.sh_size / rela_dyn_shdr.sh_entsize;
+ di->rela_dyn_data = elf_getdata(scn, NULL);
+
+ scn = elf_getscn(elf, rela_dyn_shdr.sh_link);
+ if (!scn || !gelf_getshdr(scn, &shdr) || !shdr.sh_link)
+ return;
+
+ di->dynsym_data = elf_getdata(scn, NULL);
+ di->dynstr_data = elf_getdata(elf_getscn(elf, shdr.sh_link), NULL);
+
+ if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data)
+ return;
+
+ /* Sort into offset order */
+ sort_rela_dyn(di);
+}
+
+/* Get instruction displacement from a plt entry for x86_64 */
+static u32 get_x86_64_plt_disp(const u8 *p)
+{
+ u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa};
+ int n = 0;
+
+ /* Skip endbr64 */
+ if (!memcmp(p, endbr64, sizeof(endbr64)))
+ n += sizeof(endbr64);
+ /* Skip bnd prefix */
+ if (p[n] == 0xf2)
+ n += 1;
+ /* jmp with 4-byte displacement */
+ if (p[n] == 0xff && p[n + 1] == 0x25) {
+ n += 2;
+ /* Also add offset from start of entry to end of instruction */
+ return n + 4 + le32toh(*(const u32 *)(p + n));
+ }
+ return 0;
+}
+
+static bool get_plt_got_name(GElf_Shdr *shdr, size_t i,
+ struct rela_dyn_info *di,
+ char *buf, size_t buf_sz)
+{
+ struct rela_dyn vi, *vr;
+ const char *sym_name;
+ char *demangled;
+ GElf_Sym sym;
+ u32 disp;
+
+ if (!di->sorted)
+ return false;
+
+ disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i);
+ if (!disp)
+ return false;
+
+ /* Compute target offset of the .plt.got entry */
+ vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp;
+
+ /* Find that offset in .rela.dyn (sorted by offset) */
+ vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset);
+ if (!vr)
+ return false;
+
+ /* Get the associated symbol */
+ gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
+ sym_name = elf_sym__name(&sym, di->dynstr_data);
+ demangled = demangle_sym(di->dso, 0, sym_name);
+ if (demangled != NULL)
+ sym_name = demangled;
+
+ snprintf(buf, buf_sz, "%s@plt", sym_name);
+
+ free(demangled);
+
+ return *sym_name;
+}
+
+static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf,
+ GElf_Ehdr *ehdr,
+ char *buf, size_t buf_sz)
+{
+ struct rela_dyn_info di = { .dso = dso };
+ struct symbol *sym;
+ GElf_Shdr shdr;
+ Elf_Scn *scn;
+ int err = -1;
+ size_t i;
+
+ scn = elf_section_by_name(elf, ehdr, &shdr, ".plt.got", NULL);
+ if (!scn || !shdr.sh_entsize)
+ return 0;
+
+ if (ehdr->e_machine == EM_X86_64)
+ get_rela_dyn_info(elf, ehdr, &di, scn);
+
+ for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) {
+ if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz))
+ snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i);
+ sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf);
+ if (!sym)
+ goto out;
+ symbols__insert(&dso->symbols, sym);
+ }
+ err = 0;
+out:
+ exit_rela_dyn(&di);
+ return err;
+}
/*
* We need to check if we have a .dynsym, so that we can handle the
@@ -342,56 +652,104 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
*/
int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
{
- uint32_t nr_rel_entries, idx;
+ uint32_t idx;
GElf_Sym sym;
u64 plt_offset, plt_header_size, plt_entry_size;
- GElf_Shdr shdr_plt;
- struct symbol *f;
+ GElf_Shdr shdr_plt, plt_sec_shdr;
+ struct symbol *f, *plt_sym;
GElf_Shdr shdr_rel_plt, shdr_dynsym;
- Elf_Data *reldata, *syms, *symstrs;
+ Elf_Data *syms, *symstrs;
Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
- size_t dynsym_idx;
GElf_Ehdr ehdr;
char sympltname[1024];
Elf *elf;
- int nr = 0, symidx, err = 0;
-
- if (!ss->dynsym)
- return 0;
+ int nr = 0, err = -1;
+ struct rel_info ri = { .is_rela = false };
+ bool lazy_plt;
elf = ss->elf;
ehdr = ss->ehdr;
- scn_dynsym = ss->dynsym;
- shdr_dynsym = ss->dynshdr;
- dynsym_idx = ss->dynsym_idx;
+ if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL))
+ return 0;
+
+ /*
+ * A symbol from a previous section (e.g. .init) can have been expanded
+ * by symbols__fixup_end() to overlap .plt. Truncate it before adding
+ * a symbol for .plt header.
+ */
+ f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset);
+ if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset)
+ f->end = shdr_plt.sh_offset;
+
+ if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size))
+ return 0;
+
+ /* Add a symbol for .plt header */
+ plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt");
+ if (!plt_sym)
+ goto out_elf_end;
+ symbols__insert(&dso->symbols, plt_sym);
- if (scn_dynsym == NULL)
+ /* Only x86 has .plt.got */
+ if (machine_is_x86(ehdr.e_machine) &&
+ dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname)))
goto out_elf_end;
+ /* Only x86 has .plt.sec */
+ if (machine_is_x86(ehdr.e_machine) &&
+ elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) {
+ if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size))
+ return 0;
+ /* Extend .plt symbol to entire .plt */
+ plt_sym->end = plt_sym->start + shdr_plt.sh_size;
+ /* Use .plt.sec offset */
+ plt_offset = plt_sec_shdr.sh_offset;
+ lazy_plt = false;
+ } else {
+ plt_offset = shdr_plt.sh_offset;
+ lazy_plt = true;
+ }
+
scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
".rela.plt", NULL);
if (scn_plt_rel == NULL) {
scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
".rel.plt", NULL);
if (scn_plt_rel == NULL)
- goto out_elf_end;
+ return 0;
}
- err = -1;
+ if (shdr_rel_plt.sh_type != SHT_RELA &&
+ shdr_rel_plt.sh_type != SHT_REL)
+ return 0;
- if (shdr_rel_plt.sh_link != dynsym_idx)
- goto out_elf_end;
+ if (!shdr_rel_plt.sh_link)
+ return 0;
- if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
+ if (shdr_rel_plt.sh_link == ss->dynsym_idx) {
+ scn_dynsym = ss->dynsym;
+ shdr_dynsym = ss->dynshdr;
+ } else if (shdr_rel_plt.sh_link == ss->symtab_idx) {
+ /*
+ * A static executable can have a .plt due to IFUNCs, in which
+ * case .symtab is used not .dynsym.
+ */
+ scn_dynsym = ss->symtab;
+ shdr_dynsym = ss->symshdr;
+ } else {
goto out_elf_end;
+ }
+
+ if (!scn_dynsym)
+ return 0;
/*
* Fetch the relocation section to find the idxes to the GOT
* and the symbols in the .dynsym they refer to.
*/
- reldata = elf_getdata(scn_plt_rel, NULL);
- if (reldata == NULL)
+ ri.reldata = elf_getdata(scn_plt_rel, NULL);
+ if (!ri.reldata)
goto out_elf_end;
syms = elf_getdata(scn_dynsym, NULL);
@@ -409,93 +767,57 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
if (symstrs->d_size == 0)
goto out_elf_end;
- nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
- plt_offset = shdr_plt.sh_offset;
- switch (ehdr.e_machine) {
- case EM_ARM:
- plt_header_size = 20;
- plt_entry_size = 12;
- break;
+ ri.nr_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
- case EM_AARCH64:
- plt_header_size = 32;
- plt_entry_size = 16;
- break;
+ ri.is_rela = shdr_rel_plt.sh_type == SHT_RELA;
- case EM_SPARC:
- plt_header_size = 48;
- plt_entry_size = 12;
- break;
-
- case EM_SPARCV9:
- plt_header_size = 128;
- plt_entry_size = 32;
- break;
-
- default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
- plt_header_size = shdr_plt.sh_entsize;
- plt_entry_size = shdr_plt.sh_entsize;
- break;
+ if (lazy_plt) {
+ /*
+ * Assume a .plt with the same number of entries as the number
+ * of relocation entries is not lazy and does not have a header.
+ */
+ if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size)
+ dso__delete_symbol(dso, plt_sym);
+ else
+ plt_offset += plt_header_size;
}
- plt_offset += plt_header_size;
-
- if (shdr_rel_plt.sh_type == SHT_RELA) {
- GElf_Rela pos_mem, *pos;
- elf_section__for_each_rela(reldata, pos, pos_mem, idx,
- nr_rel_entries) {
- const char *elf_name = NULL;
- char *demangled = NULL;
- symidx = GELF_R_SYM(pos->r_info);
- gelf_getsym(syms, symidx, &sym);
+ /*
+ * x86 doesn't insert IFUNC relocations in .plt order, so sort to get
+ * back in order.
+ */
+ if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri))
+ goto out_elf_end;
- elf_name = elf_sym__name(&sym, symstrs);
- demangled = demangle_sym(dso, 0, elf_name);
- if (demangled != NULL)
- elf_name = demangled;
- snprintf(sympltname, sizeof(sympltname),
- "%s@plt", elf_name);
- free(demangled);
+ for (idx = 0; idx < ri.nr_entries; idx++) {
+ const char *elf_name = NULL;
+ char *demangled = NULL;
- f = symbol__new(plt_offset, plt_entry_size,
- STB_GLOBAL, STT_FUNC, sympltname);
- if (!f)
- goto out_elf_end;
+ gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym);
- plt_offset += plt_entry_size;
- symbols__insert(&dso->symbols, f);
- ++nr;
- }
- } else if (shdr_rel_plt.sh_type == SHT_REL) {
- GElf_Rel pos_mem, *pos;
- elf_section__for_each_rel(reldata, pos, pos_mem, idx,
- nr_rel_entries) {
- const char *elf_name = NULL;
- char *demangled = NULL;
- symidx = GELF_R_SYM(pos->r_info);
- gelf_getsym(syms, symidx, &sym);
-
- elf_name = elf_sym__name(&sym, symstrs);
- demangled = demangle_sym(dso, 0, elf_name);
- if (demangled != NULL)
- elf_name = demangled;
+ elf_name = elf_sym__name(&sym, symstrs);
+ demangled = demangle_sym(dso, 0, elf_name);
+ if (demangled)
+ elf_name = demangled;
+ if (*elf_name)
+ snprintf(sympltname, sizeof(sympltname), "%s@plt", elf_name);
+ else if (!get_ifunc_name(elf, dso, &ehdr, &ri, sympltname, sizeof(sympltname)))
snprintf(sympltname, sizeof(sympltname),
- "%s@plt", elf_name);
- free(demangled);
+ "offset_%#" PRIx64 "@plt", plt_offset);
+ free(demangled);
- f = symbol__new(plt_offset, plt_entry_size,
- STB_GLOBAL, STT_FUNC, sympltname);
- if (!f)
- goto out_elf_end;
+ f = symbol__new(plt_offset, plt_entry_size, STB_GLOBAL, STT_FUNC, sympltname);
+ if (!f)
+ goto out_elf_end;
- plt_offset += plt_entry_size;
- symbols__insert(&dso->symbols, f);
- ++nr;
- }
+ plt_offset += plt_entry_size;
+ symbols__insert(&dso->symbols, f);
+ ++nr;
}
err = 0;
out_elf_end:
+ exit_rel(&ri);
if (err == 0)
return nr;
pr_debug("%s: problems reading %s PLT info.\n",
@@ -946,8 +1268,9 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
+ ss->symtab_idx = 0;
ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
- NULL);
+ &ss->symtab_idx);
if (ss->symshdr.sh_type != SHT_SYMTAB)
ss->symtab = NULL;