From bf78f1ea6e5108a7ebd55be0853f0716433117a9 Mon Sep 17 00:00:00 2001 From: Vincent Chen Date: Mon, 5 Jun 2023 11:07:15 +0000 Subject: riscv: kvm: Add V extension to KVM ISA Add V extension to KVM isa extension list to enable supporting of V extension on VCPUs. Signed-off-by: Vincent Chen Signed-off-by: Greentime Hu Signed-off-by: Andy Chiu Reviewed-by: Conor Dooley Reviewed-by: Anup Patel Acked-by: Anup Patel Reviewed-by: Heiko Stuebner Link: https://lore.kernel.org/r/20230605110724.21391-19-andy.chiu@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kvm/vcpu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/riscv/kvm/vcpu.c') diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 8bd9f2a8a0b9..f3282ff371ca 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -57,6 +57,7 @@ static const unsigned long kvm_isa_ext_arr[] = { [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, + [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, KVM_ISA_EXT_ARR(SSAIA), KVM_ISA_EXT_ARR(SSTC), -- cgit v1.2.3 From 0f4b82579716b12bb88257bd7ea80f25c791fb2c Mon Sep 17 00:00:00 2001 From: Vincent Chen Date: Mon, 5 Jun 2023 11:07:16 +0000 Subject: riscv: KVM: Add vector lazy save/restore support This patch adds vector context save/restore for guest VCPUs. To reduce the impact on KVM performance, the implementation imitates the FP context switch mechanism to lazily store and restore the vector context only when the kernel enters/exits the in-kernel run loop and not during the KVM world switch. Signed-off-by: Vincent Chen Signed-off-by: Greentime Hu Signed-off-by: Andy Chiu Reviewed-by: Anup Patel Acked-by: Anup Patel Link: https://lore.kernel.org/r/20230605110724.21391-20-andy.chiu@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 2 + arch/riscv/include/asm/kvm_vcpu_vector.h | 82 ++++++++++++++ arch/riscv/include/uapi/asm/kvm.h | 7 ++ arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 22 ++++ arch/riscv/kvm/vcpu_vector.c | 186 +++++++++++++++++++++++++++++++ 6 files changed, 300 insertions(+) create mode 100644 arch/riscv/include/asm/kvm_vcpu_vector.h create mode 100644 arch/riscv/kvm/vcpu_vector.c (limited to 'arch/riscv/kvm/vcpu.c') diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index ee0acccb1d3b..bd47a1dc2ff8 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -145,6 +146,7 @@ struct kvm_cpu_context { unsigned long sstatus; unsigned long hstatus; union __riscv_fp_state fp; + struct __riscv_v_ext_state vector; }; struct kvm_vcpu_csr { diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h new file mode 100644 index 000000000000..ff994fdd6d0d --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_vector.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 SiFive + * + * Authors: + * Vincent Chen + * Greentime Hu + */ + +#ifndef __KVM_VCPU_RISCV_VECTOR_H +#define __KVM_VCPU_RISCV_VECTOR_H + +#include + +#ifdef CONFIG_RISCV_ISA_V +#include +#include + +static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) +{ + __riscv_v_vstate_save(&context->vector, context->vector.datap); +} + +static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) +{ + __riscv_v_vstate_restore(&context->vector, context->vector.datap); +} + +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa); +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa); +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); +int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu); +#else + +struct kvm_cpu_context; + +static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) +{ +} + +static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ +} + +static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ +} + +static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) +{ +} + +static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) +{ +} + +static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *cntx) +{ + return 0; +} + +static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) +{ +} +#endif + +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); +#endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 8feb57c4c2e8..855c047e86d4 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -204,6 +204,13 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_SBI_MULTI_REG_LAST \ KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1) +/* V extension registers are mapped as type 9 */ +#define KVM_REG_RISCV_VECTOR (0x09 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_VECTOR_CSR_REG(name) \ + (offsetof(struct __riscv_v_ext_state, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_VECTOR_REG(n) \ + ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) + #endif #endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 8031b8912a0d..7b4c21f9aa6a 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -17,6 +17,7 @@ kvm-y += mmu.o kvm-y += vcpu.o kvm-y += vcpu_exit.o kvm-y += vcpu_fp.o +kvm-y += vcpu_vector.o kvm-y += vcpu_insn.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index f3282ff371ca..e5e045852e6a 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), @@ -139,6 +141,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_fp_reset(vcpu); + kvm_riscv_vcpu_vector_reset(vcpu); + kvm_riscv_vcpu_timer_reset(vcpu); kvm_riscv_vcpu_aia_reset(vcpu); @@ -199,6 +203,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) cntx->hstatus |= HSTATUS_SPVP; cntx->hstatus |= HSTATUS_SPV; + if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx)) + return -ENOMEM; + /* By default, make CY, TM, and IR counters accessible in VU mode */ reset_csr->scounteren = 0x7; @@ -242,6 +249,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) /* Free unused pages pre-allocated for G-stage page table mappings */ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + + /* Free vector context space for host and guest kernel */ + kvm_riscv_vcpu_free_vector_context(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -680,6 +690,9 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg, + KVM_REG_RISCV_VECTOR); default: break; } @@ -709,6 +722,9 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg, + KVM_REG_RISCV_VECTOR); default: break; } @@ -1003,6 +1019,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, vcpu->arch.isa); + kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); + kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, + vcpu->arch.isa); kvm_riscv_vcpu_aia_load(vcpu, cpu); @@ -1022,6 +1041,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); kvm_riscv_vcpu_timer_save(vcpu); + kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, + vcpu->arch.isa); + kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); csr->vsstatus = csr_read(CSR_VSSTATUS); csr->vsie = csr_read(CSR_VSIE); diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c new file mode 100644 index 000000000000..edd2eecbddc2 --- /dev/null +++ b/arch/riscv/kvm/vcpu_vector.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 SiFive + * + * Authors: + * Vincent Chen + * Greentime Hu + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_RISCV_ISA_V +void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu) +{ + unsigned long *isa = vcpu->arch.isa; + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + + cntx->sstatus &= ~SR_VS; + if (riscv_isa_extension_available(isa, v)) { + cntx->sstatus |= SR_VS_INITIAL; + WARN_ON(!cntx->vector.datap); + memset(cntx->vector.datap, 0, riscv_v_vsize); + } else { + cntx->sstatus |= SR_VS_OFF; + } +} + +static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) +{ + cntx->sstatus &= ~SR_VS; + cntx->sstatus |= SR_VS_CLEAN; +} + +void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ + if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) { + if (riscv_isa_extension_available(isa, v)) + __kvm_riscv_vector_save(cntx); + kvm_riscv_vcpu_vector_clean(cntx); + } +} + +void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, + unsigned long *isa) +{ + if ((cntx->sstatus & SR_VS) != SR_VS_OFF) { + if (riscv_isa_extension_available(isa, v)) + __kvm_riscv_vector_restore(cntx); + kvm_riscv_vcpu_vector_clean(cntx); + } +} + +void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) +{ + /* No need to check host sstatus as it can be modified outside */ + if (riscv_isa_extension_available(NULL, v)) + __kvm_riscv_vector_save(cntx); +} + +void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) +{ + if (riscv_isa_extension_available(NULL, v)) + __kvm_riscv_vector_restore(cntx); +} + +int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *cntx) +{ + cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); + if (!cntx->vector.datap) + return -ENOMEM; + + vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + if (!vcpu->arch.host_context.vector.datap) + return -ENOMEM; + + return 0; +} + +void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu) +{ + kfree(vcpu->arch.guest_reset_context.vector.datap); + kfree(vcpu->arch.host_context.vector.datap); +} +#endif + +static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, + unsigned long reg_num, + size_t reg_size) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + void *reg_val; + size_t vlenb = riscv_v_vsize / 32; + + if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) { + if (reg_size != sizeof(unsigned long)) + return NULL; + switch (reg_num) { + case KVM_REG_RISCV_VECTOR_CSR_REG(vstart): + reg_val = &cntx->vector.vstart; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vl): + reg_val = &cntx->vector.vl; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vtype): + reg_val = &cntx->vector.vtype; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): + reg_val = &cntx->vector.vcsr; + break; + case KVM_REG_RISCV_VECTOR_CSR_REG(datap): + default: + return NULL; + } + } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) { + if (reg_size != vlenb) + return NULL; + reg_val = cntx->vector.datap + + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb; + } else { + return NULL; + } + + return reg_val; +} + +int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + unsigned long *isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val = NULL; + size_t reg_size = KVM_REG_SIZE(reg->id); + + if (rtype == KVM_REG_RISCV_VECTOR && + riscv_isa_extension_available(isa, v)) { + reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); + } + + if (!reg_val) + return -EINVAL; + + if (copy_to_user(uaddr, reg_val, reg_size)) + return -EFAULT; + + return 0; +} + +int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + unsigned long *isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val = NULL; + size_t reg_size = KVM_REG_SIZE(reg->id); + + if (rtype == KVM_REG_RISCV_VECTOR && + riscv_isa_extension_available(isa, v)) { + reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size); + } + + if (!reg_val) + return -EINVAL; + + if (copy_from_user(reg_val, uaddr, reg_size)) + return -EFAULT; + + return 0; +} -- cgit v1.2.3 From 1fd96a3e9d5d4febe1a8486590ad52c048d1be77 Mon Sep 17 00:00:00 2001 From: Andy Chiu Date: Mon, 5 Jun 2023 11:07:18 +0000 Subject: riscv: Add prctl controls for userspace vector management This patch add two riscv-specific prctls, to allow usespace control the use of vector unit: * PR_RISCV_V_SET_CONTROL: control the permission to use Vector at next, or all following execve for a thread. Turning off a thread's Vector live is not possible since libraries may have registered ifunc that may execute Vector instructions. * PR_RISCV_V_GET_CONTROL: get the same permission setting for the current thread, and the setting for following execve(s). Signed-off-by: Andy Chiu Reviewed-by: Greentime Hu Reviewed-by: Vincent Chen Link: https://lore.kernel.org/r/20230605110724.21391-22-andy.chiu@sifive.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/processor.h | 10 ++++ arch/riscv/include/asm/vector.h | 4 ++ arch/riscv/kernel/cpufeature.c | 9 ++- arch/riscv/kernel/process.c | 1 + arch/riscv/kernel/vector.c | 114 +++++++++++++++++++++++++++++++++++++ arch/riscv/kvm/vcpu.c | 2 + include/uapi/linux/prctl.h | 11 ++++ kernel/sys.c | 12 ++++ 8 files changed, 162 insertions(+), 1 deletion(-) (limited to 'arch/riscv/kvm/vcpu.c') diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 38ded8c5f207..e82af1097e26 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -40,6 +40,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long vstate_ctrl; struct __riscv_v_ext_state vstate; }; @@ -83,6 +84,15 @@ extern void riscv_fill_hwcap(void); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); extern unsigned long signal_minsigstksz __ro_after_init; + +#ifdef CONFIG_RISCV_ISA_V +/* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */ +#define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg) +#define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current() +extern long riscv_v_vstate_ctrl_set_current(unsigned long arg); +extern long riscv_v_vstate_ctrl_get_current(void); +#endif /* CONFIG_RISCV_ISA_V */ + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index 8e56da67b5cf..04c0b07bf6cd 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -160,6 +160,9 @@ static inline void __switch_to_vector(struct task_struct *prev, riscv_v_vstate_restore(next, task_pt_regs(next)); } +void riscv_v_vstate_ctrl_init(struct task_struct *tsk); +bool riscv_v_vstate_ctrl_user_allowed(void); + #else /* ! CONFIG_RISCV_ISA_V */ struct pt_regs; @@ -168,6 +171,7 @@ static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; } static __always_inline bool has_vector(void) { return false; } static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } +static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } #define riscv_v_vsize (0) #define riscv_v_vstate_save(task, regs) do {} while (0) #define riscv_v_vstate_restore(task, regs) do {} while (0) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 29c0680652a0..8ae43e40fffc 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -295,7 +295,14 @@ void __init riscv_fill_hwcap(void) unsigned long riscv_get_elf_hwcap(void) { - return (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1)); + unsigned long hwcap; + + hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1)); + + if (!riscv_v_vstate_ctrl_user_allowed()) + hwcap &= ~COMPAT_HWCAP_ISA_V; + + return hwcap; } #ifdef CONFIG_RISCV_ALTERNATIVE diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 78eb5ac45888..e32d737e039f 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -149,6 +149,7 @@ void flush_thread(void) #endif #ifdef CONFIG_RISCV_ISA_V /* Reset vector state */ + riscv_v_vstate_ctrl_init(current); riscv_v_vstate_off(task_pt_regs(current)); kfree(current->thread.vstate.datap); memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 9d81d1b2a7f3..a7dec9230164 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -19,6 +20,8 @@ #include #include +static bool riscv_v_implicit_uacc = IS_ENABLED(CONFIG_RISCV_ISA_V_DEFAULT_ENABLE); + unsigned long riscv_v_vsize __read_mostly; EXPORT_SYMBOL_GPL(riscv_v_vsize); @@ -91,6 +94,43 @@ static int riscv_v_thread_zalloc(void) return 0; } +#define VSTATE_CTRL_GET_CUR(x) ((x) & PR_RISCV_V_VSTATE_CTRL_CUR_MASK) +#define VSTATE_CTRL_GET_NEXT(x) (((x) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2) +#define VSTATE_CTRL_MAKE_NEXT(x) (((x) << 2) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) +#define VSTATE_CTRL_GET_INHERIT(x) (!!((x) & PR_RISCV_V_VSTATE_CTRL_INHERIT)) +static inline int riscv_v_ctrl_get_cur(struct task_struct *tsk) +{ + return VSTATE_CTRL_GET_CUR(tsk->thread.vstate_ctrl); +} + +static inline int riscv_v_ctrl_get_next(struct task_struct *tsk) +{ + return VSTATE_CTRL_GET_NEXT(tsk->thread.vstate_ctrl); +} + +static inline bool riscv_v_ctrl_test_inherit(struct task_struct *tsk) +{ + return VSTATE_CTRL_GET_INHERIT(tsk->thread.vstate_ctrl); +} + +static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt, + bool inherit) +{ + unsigned long ctrl; + + ctrl = cur & PR_RISCV_V_VSTATE_CTRL_CUR_MASK; + ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt); + if (inherit) + ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT; + tsk->thread.vstate_ctrl = ctrl; +} + +bool riscv_v_vstate_ctrl_user_allowed(void) +{ + return riscv_v_ctrl_get_cur(current) == PR_RISCV_V_VSTATE_CTRL_ON; +} +EXPORT_SYMBOL_GPL(riscv_v_vstate_ctrl_user_allowed); + bool riscv_v_first_use_handler(struct pt_regs *regs) { u32 __user *epc = (u32 __user *)regs->epc; @@ -129,3 +169,77 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) riscv_v_vstate_on(regs); return true; } + +void riscv_v_vstate_ctrl_init(struct task_struct *tsk) +{ + bool inherit; + int cur, next; + + if (!has_vector()) + return; + + next = riscv_v_ctrl_get_next(tsk); + if (!next) { + if (riscv_v_implicit_uacc) + cur = PR_RISCV_V_VSTATE_CTRL_ON; + else + cur = PR_RISCV_V_VSTATE_CTRL_OFF; + } else { + cur = next; + } + /* Clear next mask if inherit-bit is not set */ + inherit = riscv_v_ctrl_test_inherit(tsk); + if (!inherit) + next = PR_RISCV_V_VSTATE_CTRL_DEFAULT; + + riscv_v_ctrl_set(tsk, cur, next, inherit); +} + +long riscv_v_vstate_ctrl_get_current(void) +{ + if (!has_vector()) + return -EINVAL; + + return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK; +} + +long riscv_v_vstate_ctrl_set_current(unsigned long arg) +{ + bool inherit; + int cur, next; + + if (!has_vector()) + return -EINVAL; + + if (arg & ~PR_RISCV_V_VSTATE_CTRL_MASK) + return -EINVAL; + + cur = VSTATE_CTRL_GET_CUR(arg); + switch (cur) { + case PR_RISCV_V_VSTATE_CTRL_OFF: + /* Do not allow user to turn off V if current is not off */ + if (riscv_v_ctrl_get_cur(current) != PR_RISCV_V_VSTATE_CTRL_OFF) + return -EPERM; + + break; + case PR_RISCV_V_VSTATE_CTRL_ON: + break; + case PR_RISCV_V_VSTATE_CTRL_DEFAULT: + cur = riscv_v_ctrl_get_cur(current); + break; + default: + return -EINVAL; + } + + next = VSTATE_CTRL_GET_NEXT(arg); + inherit = VSTATE_CTRL_GET_INHERIT(arg); + switch (next) { + case PR_RISCV_V_VSTATE_CTRL_DEFAULT: + case PR_RISCV_V_VSTATE_CTRL_OFF: + case PR_RISCV_V_VSTATE_CTRL_ON: + riscv_v_ctrl_set(current, cur, next, inherit); + return 0; + } + + return -EINVAL; +} diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index e5e045852e6a..de24127e7e93 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -88,6 +88,8 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) switch (ext) { case KVM_RISCV_ISA_EXT_H: return false; + case KVM_RISCV_ISA_EXT_V: + return riscv_v_vstate_ctrl_user_allowed(); default: break; } diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index f23d9a16507f..3c36aeade991 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -294,4 +294,15 @@ struct prctl_mm_map { #define PR_SET_MEMORY_MERGE 67 #define PR_GET_MEMORY_MERGE 68 + +#define PR_RISCV_V_SET_CONTROL 69 +#define PR_RISCV_V_GET_CONTROL 70 +# define PR_RISCV_V_VSTATE_CTRL_DEFAULT 0 +# define PR_RISCV_V_VSTATE_CTRL_OFF 1 +# define PR_RISCV_V_VSTATE_CTRL_ON 2 +# define PR_RISCV_V_VSTATE_CTRL_INHERIT (1 << 4) +# define PR_RISCV_V_VSTATE_CTRL_CUR_MASK 0x3 +# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc +# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index 339fee3eff6a..05f838929e72 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -140,6 +140,12 @@ #ifndef GET_TAGGED_ADDR_CTRL # define GET_TAGGED_ADDR_CTRL() (-EINVAL) #endif +#ifndef RISCV_V_SET_CONTROL +# define RISCV_V_SET_CONTROL(a) (-EINVAL) +#endif +#ifndef RISCV_V_GET_CONTROL +# define RISCV_V_GET_CONTROL() (-EINVAL) +#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -2708,6 +2714,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags); break; #endif + case PR_RISCV_V_SET_CONTROL: + error = RISCV_V_SET_CONTROL(arg2); + break; + case PR_RISCV_V_GET_CONTROL: + error = RISCV_V_GET_CONTROL(); + break; default: error = -EINVAL; break; -- cgit v1.2.3