summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/handle_exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/handle_exit.c')
-rw-r--r--arch/arm64/kvm/handle_exit.c47
1 files changed, 45 insertions, 2 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index e778eefcf214..a798c0b4d717 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -16,6 +16,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include <asm/debug-monitors.h>
#include <asm/stacktrace/nvhe.h>
#include <asm/traps.h>
@@ -41,6 +42,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
+ /* Forward hvc instructions to the virtual EL2 if the guest has EL2. */
+ if (vcpu_has_nv(vcpu)) {
+ if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
+ kvm_inject_undefined(vcpu);
+ else
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ return 1;
+ }
+
ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
@@ -52,6 +63,8 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
static int handle_smc(struct kvm_vcpu *vcpu)
{
+ int ret;
+
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
@@ -59,10 +72,30 @@ static int handle_smc(struct kvm_vcpu *vcpu)
*
* We need to advance the PC after the trap, as it would
* otherwise return to the same address...
+ *
+ * Only handle SMCs from the virtual EL2 with an immediate of zero and
+ * skip it otherwise.
*/
- vcpu_set_reg(vcpu, 0, ~0UL);
+ if (!vcpu_is_el2(vcpu) || kvm_vcpu_hvc_get_imm(vcpu)) {
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ kvm_incr_pc(vcpu);
+ return 1;
+ }
+
+ /*
+ * If imm is zero then it is likely an SMCCC call.
+ *
+ * Note that on ARMv8.3, even if EL3 is not implemented, SMC executed
+ * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
+ * being treated as UNDEFINED.
+ */
+ ret = kvm_hvc_call_handler(vcpu);
+ if (ret < 0)
+ vcpu_set_reg(vcpu, 0, ~0UL);
+
kvm_incr_pc(vcpu);
- return 1;
+
+ return ret;
}
/*
@@ -196,6 +229,15 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
return 1;
}
+static int kvm_handle_eret(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
+ return kvm_handle_ptrauth(vcpu);
+
+ kvm_emulate_nested_eret(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -211,6 +253,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SMC64] = handle_smc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
+ [ESR_ELx_EC_ERET] = kvm_handle_eret,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,