summaryrefslogtreecommitdiff
path: root/arch/riscv/kvm
diff options
context:
space:
mode:
authorAndrew Jones <ajones@ventanamicro.com>2023-12-20 19:00:21 +0300
committerAnup Patel <anup@brainfault.org>2023-12-30 08:56:35 +0300
commitf61ce890b1f0742f17b3a5d1f8c72574a33ffeb2 (patch)
treef17c2b49c6e15c984ed6b50effbe5a943ec4f7b6 /arch/riscv/kvm
parent5b9e41321ba919dd051c68d2a1d2c753aa61634c (diff)
downloadlinux-f61ce890b1f0742f17b3a5d1f8c72574a33ffeb2.tar.xz
RISC-V: KVM: Add support for SBI STA registers
KVM userspace needs to be able to save and restore the steal-time shared memory address. Provide the address through the get/set-one-reg interface with two ulong-sized SBI STA extension registers (lo and hi). 64-bit KVM userspace must not set the hi register to anything other than zero and is allowed to completely neglect saving/restoring it. Reviewed-by: Anup Patel <anup@brainfault.org> Signed-off-by: Andrew Jones <ajones@ventanamicro.com> Signed-off-by: Anup Patel <anup@brainfault.org>
Diffstat (limited to 'arch/riscv/kvm')
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c37
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c5
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c55
3 files changed, 83 insertions, 14 deletions
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 143d0edd7f63..fc34557f5356 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -961,27 +961,36 @@ static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
return copy_sbi_ext_reg_indices(vcpu, NULL);
}
-static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- int n = num_sbi_regs(vcpu);
+ struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
+ int total = 0;
- for (int i = 0; i < n; i++) {
- u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
- KVM_REG_RISCV_SBI_STATE | i;
+ if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
- if (uindices) {
- if (put_user(reg, uindices))
- return -EFAULT;
- uindices++;
+ for (int i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | size |
+ KVM_REG_RISCV_SBI_STATE |
+ KVM_REG_RISCV_SBI_STA | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
}
+
+ total += n;
}
- return n;
+ return total;
+}
+
+static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
+{
+ return copy_sbi_reg_indices(vcpu, NULL);
}
static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index a1997c39dfde..72a2ffb8dcd1 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -345,6 +345,8 @@ int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
+ case KVM_REG_RISCV_SBI_STA:
+ return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
default:
return -EINVAL;
}
@@ -370,6 +372,9 @@ int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
+ case KVM_REG_RISCV_SBI_STA:
+ ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
+ break;
default:
return -EINVAL;
}
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index 6592d287fc4e..87bf1a5f05ce 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -3,6 +3,8 @@
* Copyright (c) 2023 Ventana Micro Systems Inc.
*/
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <asm/kvm_vcpu_sbi.h>
@@ -59,3 +61,56 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
.handler = kvm_sbi_ext_sta_handler,
.probe = kvm_sbi_ext_sta_probe,
};
+
+int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *reg_val)
+{
+ switch (reg_num) {
+ case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
+ *reg_val = (unsigned long)vcpu->arch.sta.shmem;
+ break;
+ case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
+ if (IS_ENABLED(CONFIG_32BIT))
+ *reg_val = upper_32_bits(vcpu->arch.sta.shmem);
+ else
+ *reg_val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val)
+{
+ switch (reg_num) {
+ case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
+
+ vcpu->arch.sta.shmem = reg_val;
+ vcpu->arch.sta.shmem |= hi << 32;
+ } else {
+ vcpu->arch.sta.shmem = reg_val;
+ }
+ break;
+ case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
+
+ vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
+ vcpu->arch.sta.shmem |= lo;
+ } else if (reg_val != 0) {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}