summaryrefslogtreecommitdiff
path: root/arch/sparc/vdso/vdso2c.h
diff options
context:
space:
mode:
authorNagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com>2017-09-21 18:05:31 +0300
committerDavid S. Miller <davem@davemloft.net>2017-11-15 08:21:03 +0300
commit9a08862a5d2e266ecea1865547463da2745fc687 (patch)
treec651ef6b7c2d7dc75e0ec73fcc6d2e2f15f72a9e /arch/sparc/vdso/vdso2c.h
parent23198ddffb6cddb5d5824230af4dd4b46e4046a4 (diff)
downloadlinux-9a08862a5d2e266ecea1865547463da2745fc687.tar.xz
vDSO for sparc
Following patch is based on work done by Nick Alcock on 64-bit vDSO for sparc in Oracle linux. I have extended it to include support for 32-bit vDSO for sparc on 64-bit kernel. vDSO for sparc is based on the X86 implementation. This patch provides vDSO support for both 64-bit and 32-bit programs on 64-bit kernel. vDSO will be disabled on 32-bit linux kernel on sparc. *) vclock_gettime.c contains all the vdso functions. Since data page is mapped before the vdso code page, the pointer to data page is got by subracting offset from an address in the vdso code page. The return address stored in %i7 is used for this purpose. *) During compilation, both 32-bit and 64-bit vdso images are compiled and are converted into raw bytes by vdso2c program to be ready for mapping into the process. 32-bit images are compiled only if CONFIG_COMPAT is enabled. vdso2c generates two files vdso-image-64.c and vdso-image-32.c which contains the respective vDSO image in C structure. *) During vdso initialization, required number of vdso pages are allocated and raw bytes are copied into the pages. *) During every exec, these pages are mapped into the process through arch_setup_additional_pages and the location of mapping is passed on to the process through aux vector AT_SYSINFO_EHDR which is used by glibc. *) A new update_vsyscall routine for sparc is added to keep the data page in vdso updated. *) As vDSO cannot contain dynamically relocatable references, a new version of cpu_relax is added for the use of vDSO. This change also requires a putback to glibc to use vDSO. For testing, programs planning to try vDSO can be compiled against the generated vdso(64/32).so in the source. Testing: ======== [root@localhost ~]# cat vdso_test.c int main() { struct timespec tv_start, tv_end; struct timeval tv_tmp; int i; int count = 1 * 1000 * 10000; long long diff; clock_gettime(0, &tv_start); for (i = 0; i < count; i++) gettimeofday(&tv_tmp, NULL); clock_gettime(0, &tv_end); diff = (long long)(tv_end.tv_sec - tv_start.tv_sec)*(1*1000*1000*1000); diff += (tv_end.tv_nsec - tv_start.tv_nsec); printf("Start sec: %d\n", tv_start.tv_sec); printf("End sec : %d\n", tv_end.tv_sec); printf("%d cycles in %lld ns = %f ns/cycle\n", count, diff, (double)diff / (double)count); return 0; } [root@localhost ~]# cc vdso_test.c -o t32_without_fix -m32 -lrt [root@localhost ~]# ./t32_without_fix Start sec: 1502396130 End sec : 1502396140 10000000 cycles in 9565148528 ns = 956.514853 ns/cycle [root@localhost ~]# cc vdso_test.c -o t32_with_fix -m32 ./vdso32.so.dbg [root@localhost ~]# ./t32_with_fix Start sec: 1502396168 End sec : 1502396169 10000000 cycles in 798141262 ns = 79.814126 ns/cycle [root@localhost ~]# cc vdso_test.c -o t64_without_fix -m64 -lrt [root@localhost ~]# ./t64_without_fix Start sec: 1502396208 End sec : 1502396218 10000000 cycles in 9846091800 ns = 984.609180 ns/cycle [root@localhost ~]# cc vdso_test.c -o t64_with_fix -m64 ./vdso64.so.dbg [root@localhost ~]# ./t64_with_fix Start sec: 1502396257 End sec : 1502396257 10000000 cycles in 380984048 ns = 38.098405 ns/cycle V1 to V2 Changes: ================= Added hot patching code to switch the read stick instruction to read tick instruction based on the hardware. V2 to V3 Changes: ================= Merged latest changes from sparc-next and moved the initialization of clocksource_tick.archdata.vclock_mode to time_init_early. Disabled queued spinlock and rwlock configuration when simulating 32-bit config to compile 32-bit VDSO. V3 to V4 Changes: ================= Hardcoded the page size as 8192 in linker script for both 64-bit and 32-bit binaries. Removed unused variables in vdso2c.h. Added -mv8plus flag to Makefile to prevent the generation of relocation entries for __lshrdi3 in 32-bit vdso binary. Signed-off-by: Nick Alcock <nick.alcock@oracle.com> Signed-off-by: Nagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com> Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/vdso/vdso2c.h')
-rw-r--r--arch/sparc/vdso/vdso2c.h143
1 files changed, 143 insertions, 0 deletions
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
new file mode 100644
index 000000000000..808decb0f7be
--- /dev/null
+++ b/arch/sparc/vdso/vdso2c.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This file is included up to twice from vdso2c.c. It generates code for
+ * 32-bit and 64-bit vDSOs. We will eventually need both for 64-bit builds,
+ * since 32-bit vDSOs will then be built for 32-bit userspace.
+ */
+
+static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+ void *stripped_addr, size_t stripped_len,
+ FILE *outfile, const char *name)
+{
+ int found_load = 0;
+ unsigned long load_size = -1; /* Work around bogus warning */
+ unsigned long mapping_size;
+ int i;
+ unsigned long j;
+
+ ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
+ ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+ ELF(Dyn) *dyn = 0, *dyn_end = 0;
+ INT_BITS syms[NSYMS] = {};
+
+ ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
+
+ /* Walk the segment table. */
+ for (i = 0; i < GET_BE(&hdr->e_phnum); i++) {
+ if (GET_BE(&pt[i].p_type) == PT_LOAD) {
+ if (found_load)
+ fail("multiple PT_LOAD segs\n");
+
+ if (GET_BE(&pt[i].p_offset) != 0 ||
+ GET_BE(&pt[i].p_vaddr) != 0)
+ fail("PT_LOAD in wrong place\n");
+
+ if (GET_BE(&pt[i].p_memsz) != GET_BE(&pt[i].p_filesz))
+ fail("cannot handle memsz != filesz\n");
+
+ load_size = GET_BE(&pt[i].p_memsz);
+ found_load = 1;
+ } else if (GET_BE(&pt[i].p_type) == PT_DYNAMIC) {
+ dyn = raw_addr + GET_BE(&pt[i].p_offset);
+ dyn_end = raw_addr + GET_BE(&pt[i].p_offset) +
+ GET_BE(&pt[i].p_memsz);
+ }
+ }
+ if (!found_load)
+ fail("no PT_LOAD seg\n");
+
+ if (stripped_len < load_size)
+ fail("stripped input is too short\n");
+
+ /* Walk the dynamic table */
+ for (i = 0; dyn + i < dyn_end &&
+ GET_BE(&dyn[i].d_tag) != DT_NULL; i++) {
+ typeof(dyn[i].d_tag) tag = GET_BE(&dyn[i].d_tag);
+ typeof(dyn[i].d_un.d_val) val = GET_BE(&dyn[i].d_un.d_val);
+
+ if ((tag == DT_RELSZ || tag == DT_RELASZ) && (val != 0))
+ fail("vdso image contains dynamic relocations\n");
+ }
+
+ /* Walk the section table */
+ for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
+ ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
+ GET_BE(&hdr->e_shentsize) * i;
+ if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
+ symtab_hdr = sh;
+ }
+
+ if (!symtab_hdr)
+ fail("no symbol table\n");
+
+ strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
+ GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link);
+
+ /* Walk the symbol table */
+ for (i = 0;
+ i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize);
+ i++) {
+ int k;
+
+ ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) +
+ GET_BE(&symtab_hdr->sh_entsize) * i;
+ const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) +
+ GET_BE(&sym->st_name);
+
+ for (k = 0; k < NSYMS; k++) {
+ if (!strcmp(name, required_syms[k].name)) {
+ if (syms[k]) {
+ fail("duplicate symbol %s\n",
+ required_syms[k].name);
+ }
+
+ /*
+ * Careful: we use negative addresses, but
+ * st_value is unsigned, so we rely
+ * on syms[k] being a signed type of the
+ * correct width.
+ */
+ syms[k] = GET_BE(&sym->st_value);
+ }
+ }
+ }
+
+ /* Validate mapping addresses. */
+ if (syms[sym_vvar_start] % 8192)
+ fail("vvar_begin must be a multiple of 8192\n");
+
+ if (!name) {
+ fwrite(stripped_addr, stripped_len, 1, outfile);
+ return;
+ }
+
+ mapping_size = (stripped_len + 8191) / 8192 * 8192;
+
+ fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+ fprintf(outfile, "#include <linux/cache.h>\n");
+ fprintf(outfile, "#include <asm/vdso.h>\n");
+ fprintf(outfile, "\n");
+ fprintf(outfile,
+ "static unsigned char raw_data[%lu] __ro_after_init __aligned(8192)= {",
+ mapping_size);
+ for (j = 0; j < stripped_len; j++) {
+ if (j % 10 == 0)
+ fprintf(outfile, "\n\t");
+ fprintf(outfile, "0x%02X, ",
+ (int)((unsigned char *)stripped_addr)[j]);
+ }
+ fprintf(outfile, "\n};\n\n");
+
+ fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
+ fprintf(outfile, "\t.data = raw_data,\n");
+ fprintf(outfile, "\t.size = %lu,\n", mapping_size);
+ for (i = 0; i < NSYMS; i++) {
+ if (required_syms[i].export && syms[i])
+ fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
+ required_syms[i].name, (int64_t)syms[i]);
+ }
+ fprintf(outfile, "};\n");
+}