summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-01-25 18:40:50 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-04-29 17:31:17 +0300
commitb4be98039a9224ec0cfc2b706e8e881b9ba53850 (patch)
tree87cd33ac1284ff5d926d33b5bc9805d509c9e5d6
parented523cbd4a6594edf123dc03ec9d70ea4f793671 (diff)
downloadlinux-b4be98039a9224ec0cfc2b706e8e881b9ba53850.tar.xz
KVM: VMX: Zero out *all* general purpose registers after VM-Exit
commit 0e0ab73c9a0243736bcd779b30b717e23ba9a56d upstream. ...except RSP, which is restored by hardware as part of VM-Exit. Paolo theorized that restoring registers from the stack after a VM-Exit in lieu of zeroing them could lead to speculative execution with the guest's values, e.g. if the stack accesses miss the L1 cache[1]. Zeroing XORs are dirt cheap, so just be ultra-paranoid. Note that the scratch register (currently RCX) used to save/restore the guest state is also zeroed as its host-defined value is loaded via the stack, just with a MOV instead of a POP. [1] https://patchwork.kernel.org/patch/10771539/#22441255 Fixes: 0cb5b30698fd ("kvm: vmx: Scrub hardware GPRs at VM-exit") Cc: Jim Mattson <jmattson@google.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> [bwh: Backported to 4.19: adjust filename, context] Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--arch/x86/kvm/vmx.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d37b48173e9c..e4d0ad06790e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10841,6 +10841,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%0) \n\t"
"mov %%r14, %c[r14](%0) \n\t"
"mov %%r15, %c[r15](%0) \n\t"
+
+ /*
+ * Clear all general purpose registers (except RSP, which is loaded by
+ * the CPU during VM-Exit) to prevent speculative use of the guest's
+ * values, even those that are saved/loaded via the stack. In theory,
+ * an L1 cache miss when restoring registers could lead to speculative
+ * execution with the guest's values. Zeroing XORs are dirt cheap,
+ * i.e. the extra paranoia is essentially free.
+ */
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@@ -10855,8 +10864,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"xor %%eax, %%eax \n\t"
"xor %%ebx, %%ebx \n\t"
+ "xor %%ecx, %%ecx \n\t"
+ "xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
"xor %%edi, %%edi \n\t"
+ "xor %%ebp, %%ebp \n\t"
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
".pushsection .rodata \n\t"
".global vmx_return \n\t"