summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmenter.S
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-01-25 18:41:17 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2019-02-21 00:48:17 +0300
commite75c3c3a0487da878cbfa7f125dcd080a8606eaf (patch)
treec9115f813adce4028063196664fe1208b410f36e /arch/x86/kvm/vmx/vmenter.S
parent77df549559dbe7f265ab19bd444d6acb3a718b4d (diff)
downloadlinux-e75c3c3a0487da878cbfa7f125dcd080a8606eaf.tar.xz
KVM: VMX: Return VM-Fail from vCPU-run assembly via standard ABI reg
...to prepare for making the assembly sub-routine callable from C code. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmenter.S')
-rw-r--r--arch/x86/kvm/vmx/vmenter.S16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index a3d9a8e062f9..e06a3f33311e 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -87,7 +87,7 @@ ENDPROC(vmx_vmexit)
* @launched: %true if the VMCS has been launched
*
* Returns:
- * %RBX is 0 on VM-Exit, 1 on VM-Fail
+ * 0 on VM-Exit, 1 on VM-Fail
*/
ENTRY(__vmx_vcpu_run)
push %_ASM_BP
@@ -163,17 +163,17 @@ ENTRY(__vmx_vcpu_run)
mov %r15, VCPU_R15(%_ASM_AX)
#endif
- /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
- xor %ebx, %ebx
+ /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
+ xor %eax, %eax
/*
- * Clear all general purpose registers except RSP and RBX to prevent
+ * Clear all general purpose registers except RSP and RAX to prevent
* speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
- * free. RSP and RBX are exempt as RSP is restored by hardware during
- * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
+ * free. RSP and RAX are exempt as RSP is restored by hardware during
+ * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
*/
1:
#ifdef CONFIG_X86_64
@@ -186,7 +186,7 @@ ENTRY(__vmx_vcpu_run)
xor %r14d, %r14d
xor %r15d, %r15d
#endif
- xor %eax, %eax
+ xor %ebx, %ebx
xor %ecx, %ecx
xor %edx, %edx
xor %esi, %esi
@@ -199,6 +199,6 @@ ENTRY(__vmx_vcpu_run)
ret
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
-2: mov $1, %ebx
+2: mov $1, %eax
jmp 1b
ENDPROC(__vmx_vcpu_run)