summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/vgtod.h
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-11-16 21:23:27 +0300
committerIngo Molnar <mingo@kernel.org>2016-11-17 10:31:14 +0300
commita582c540ac1b10f0a7d37415e04c4af42409fd08 (patch)
tree7aa03d7e392b1f08fea2719e66b536113e40c2e7 /arch/x86/include/asm/vgtod.h
parent89a01c51cbe3b6ae81008e8c91235be583df8c50 (diff)
downloadlinux-a582c540ac1b10f0a7d37415e04c4af42409fd08.tar.xz
x86/vdso: Use RDPID in preference to LSL when available
RDPID is a new instruction that reads MSR_TSC_AUX quickly. This should be considerably faster than reading the GDT. Add a cpufeature for it and use it from __vdso_getcpu() when available. Tested-by: Megha Dey <megha.dey@intel.com> Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/4f6c3a22012d10f1c65b9ca15800e01b42c7d39d.1479320367.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/vgtod.h')
-rw-r--r--arch/x86/include/asm/vgtod.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e728699db774..3a01996db58f 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -89,8 +89,13 @@ static inline unsigned int __getcpu(void)
* works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly
* hoisting it out of the calling function.
+ *
+ * If RDPID is available, use it.
*/
- asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ alternative_io ("lsl %[p],%[seg]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
return p;
}