summaryrefslogtreecommitdiff
path: root/arch/loongarch/include
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-10-31 16:55:40 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2023-10-31 16:55:40 +0300
commitef12ea629e69d12c8713218014bf569a788f17ae (patch)
treebb2fe4d0dabb2e4f0bc055ee35777b6ede37873d /arch/loongarch/include
parentffc253263a1375a65fa6c9f62a893e9767fbebfa (diff)
parent2c10cda4b777be4be9d9e69e4f70c818dbb15e21 (diff)
downloadlinux-ef12ea629e69d12c8713218014bf569a788f17ae.tar.xz
Merge tag 'loongarch-kvm-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson into HEAD
LoongArch KVM changes for v6.7 Add LoongArch's KVM support. Loongson 3A5000/3A6000 supports hardware assisted virtualization. With cpu virtualization, there are separate hw-supported user mode and kernel mode in guest mode. With memory virtualization, there are two-level hw mmu table for guest mode and host mode. Also there is separate hw cpu timer with consant frequency in guest mode, so that vm can migrate between hosts with different freq. Currently, we are able to boot LoongArch Linux Guests. Few key aspects of KVM LoongArch added by this series are: 1. Enable kvm hardware function when kvm module is loaded. 2. Implement VM and vcpu related ioctl interface such as vcpu create, vcpu run etc. GET_ONE_REG/SET_ONE_REG ioctl commands are use to get general registers one by one. 3. Hardware access about MMU, timer and csr are emulated in kernel. 4. Hardwares such as mmio and iocsr device are emulated in user space such as IPI, irqchips, pci devices etc.
Diffstat (limited to 'arch/loongarch/include')
-rw-r--r--arch/loongarch/include/asm/inst.h16
-rw-r--r--arch/loongarch/include/asm/kvm_csr.h211
-rw-r--r--arch/loongarch/include/asm/kvm_host.h237
-rw-r--r--arch/loongarch/include/asm/kvm_mmu.h139
-rw-r--r--arch/loongarch/include/asm/kvm_types.h11
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h93
-rw-r--r--arch/loongarch/include/asm/loongarch.h19
-rw-r--r--arch/loongarch/include/uapi/asm/kvm.h108
8 files changed, 829 insertions, 5 deletions
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 71e1ed4165c8..008a88ead60d 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -65,6 +65,14 @@ enum reg2_op {
revbd_op = 0x0f,
revh2w_op = 0x10,
revhd_op = 0x11,
+ iocsrrdb_op = 0x19200,
+ iocsrrdh_op = 0x19201,
+ iocsrrdw_op = 0x19202,
+ iocsrrdd_op = 0x19203,
+ iocsrwrb_op = 0x19204,
+ iocsrwrh_op = 0x19205,
+ iocsrwrw_op = 0x19206,
+ iocsrwrd_op = 0x19207,
};
enum reg2i5_op {
@@ -318,6 +326,13 @@ struct reg2bstrd_format {
unsigned int opcode : 10;
};
+struct reg2csr_format {
+ unsigned int rd : 5;
+ unsigned int rj : 5;
+ unsigned int csr : 14;
+ unsigned int opcode : 8;
+};
+
struct reg3_format {
unsigned int rd : 5;
unsigned int rj : 5;
@@ -346,6 +361,7 @@ union loongarch_instruction {
struct reg2i14_format reg2i14_format;
struct reg2i16_format reg2i16_format;
struct reg2bstrd_format reg2bstrd_format;
+ struct reg2csr_format reg2csr_format;
struct reg3_format reg3_format;
struct reg3sa2_format reg3sa2_format;
};
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
new file mode 100644
index 000000000000..724ca8b7b401
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_CSR_H__
+#define __ASM_LOONGARCH_KVM_CSR_H__
+
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+#include <asm/loongarch.h>
+#include <asm/kvm_vcpu.h>
+
+#define gcsr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__( \
+ " gcsrrd %[val], %[reg]\n\t" \
+ : [val] "=r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+#define gcsr_write(v, csr) \
+({ \
+ register unsigned long __v = v; \
+ __asm__ __volatile__ ( \
+ " gcsrwr %[val], %[reg]\n\t" \
+ : [val] "+r" (__v) \
+ : [reg] "i" (csr) \
+ : "memory"); \
+})
+
+#define gcsr_xchg(v, m, csr) \
+({ \
+ register unsigned long __v = v; \
+ __asm__ __volatile__( \
+ " gcsrxchg %[val], %[mask], %[reg]\n\t" \
+ : [val] "+r" (__v) \
+ : [mask] "r" (m), [reg] "i" (csr) \
+ : "memory"); \
+ __v; \
+})
+
+/* Guest CSRS read and write */
+#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD)
+#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD)
+#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD)
+#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD)
+#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN)
+#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN)
+#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC)
+#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC)
+#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG)
+#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG)
+#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT)
+#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT)
+#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA)
+#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA)
+#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV)
+#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV)
+#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI)
+#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI)
+#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY)
+#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY)
+
+#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID)
+#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID)
+#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL)
+#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL)
+#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH)
+#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH)
+#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD)
+#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD)
+#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0)
+#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0)
+#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1)
+#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1)
+#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE)
+#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE)
+#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG)
+#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG)
+
+#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID)
+#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID)
+#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1)
+#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1)
+#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2)
+#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2)
+#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3)
+#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3)
+
+#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0)
+#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0)
+#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1)
+#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1)
+#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2)
+#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2)
+#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3)
+#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3)
+#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4)
+#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4)
+#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5)
+#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5)
+#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6)
+#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6)
+#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7)
+#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7)
+
+#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID)
+#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID)
+#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG)
+#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG)
+#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL)
+#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL)
+#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC)
+#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC)
+
+#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL)
+#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL)
+
+#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX)
+#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX)
+#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY)
+#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY)
+#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV)
+#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV)
+#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA)
+#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA)
+#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE)
+#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE)
+#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0)
+#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0)
+#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1)
+#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1)
+#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI)
+#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI)
+#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD)
+#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD)
+
+#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0)
+#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0)
+#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1)
+#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1)
+#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2)
+#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2)
+#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3)
+#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3)
+
+/* Guest related CSRs */
+#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC)
+#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC)
+#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP)
+#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG)
+#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG)
+#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT)
+#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT)
+#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC)
+#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC)
+#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC)
+#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC)
+
+#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name)
+
+__BUILD_CSR_OP(gcfg)
+__BUILD_CSR_OP(gstat)
+__BUILD_CSR_OP(gtlbc)
+__BUILD_CSR_OP(gintc)
+__BUILD_GCSR_OP(llbctl)
+__BUILD_GCSR_OP(tlbidx)
+
+#define set_gcsr_estat(val) \
+ gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT)
+#define clear_gcsr_estat(val) \
+ gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT)
+
+#define kvm_read_hw_gcsr(id) gcsr_read(id)
+#define kvm_write_hw_gcsr(id, val) gcsr_write(val, id)
+
+#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid))
+#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid))
+
+int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ return csr->csrs[gid];
+}
+
+static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val)
+{
+ csr->csrs[gid] = val;
+}
+
+static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long val)
+{
+ csr->csrs[gid] |= val;
+}
+
+static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long mask, unsigned long val)
+{
+ unsigned long _mask = mask;
+
+ csr->csrs[gid] &= ~_mask;
+ csr->csrs[gid] |= val & _mask;
+}
+
+#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
new file mode 100644
index 000000000000..11328700d4fa
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_HOST_H__
+#define __ASM_LOONGARCH_KVM_HOST_H__
+
+#include <linux/cpumask.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+
+#include <asm/inst.h>
+#include <asm/kvm_mmu.h>
+#include <asm/loongarch.h>
+
+/* Loongarch KVM register ids */
+#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT)
+#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT)
+
+#define KVM_MAX_VCPUS 256
+#define KVM_MAX_CPUCFG_REGS 21
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 0
+
+#define KVM_HALT_POLL_NS_DEFAULT 500000
+
+struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
+ u64 pages;
+ u64 hugepages;
+};
+
+struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
+ u64 int_exits;
+ u64 idle_exits;
+ u64 cpucfg_exits;
+ u64 signal_exits;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_context {
+ unsigned long vpid_cache;
+ struct kvm_vcpu *last_vcpu;
+};
+
+struct kvm_world_switch {
+ int (*exc_entry)(void);
+ int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ unsigned long page_order;
+};
+
+#define MAX_PGTABLE_LEVELS 4
+
+struct kvm_arch {
+ /* Guest physical mm */
+ kvm_pte_t *pgd;
+ unsigned long gpa_size;
+ unsigned long invalid_ptes[MAX_PGTABLE_LEVELS];
+ unsigned int pte_shifts[MAX_PGTABLE_LEVELS];
+ unsigned int root_level;
+
+ s64 time_offset;
+ struct kvm_context __percpu *vmcs;
+};
+
+#define CSR_MAX_NUMS 0x800
+
+struct loongarch_csrs {
+ unsigned long csrs[CSR_MAX_NUMS];
+};
+
+/* Resume Flags */
+#define RESUME_HOST 0
+#define RESUME_GUEST 1
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
+ EMULATE_DO_IOCSR, /* handle IOCSR request */
+ EMULATE_FAIL, /* can't emulate this instruction */
+ EMULATE_EXCEPT, /* A guest exception has been generated */
+};
+
+#define KVM_LARCH_FPU (0x1 << 0)
+#define KVM_LARCH_SWCSR_LATEST (0x1 << 1)
+#define KVM_LARCH_HWCSR_USABLE (0x1 << 2)
+
+struct kvm_vcpu_arch {
+ /*
+ * Switch pointer-to-function type to unsigned long
+ * for loading the value into register directly.
+ */
+ unsigned long host_eentry;
+ unsigned long guest_eentry;
+
+ /* Pointers stored here for easy accessing from assembly code */
+ int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+ /* Host registers preserved across guest mode execution */
+ unsigned long host_sp;
+ unsigned long host_tp;
+ unsigned long host_pgd;
+
+ /* Host CSRs are used when handling exits from guest */
+ unsigned long badi;
+ unsigned long badv;
+ unsigned long host_ecfg;
+ unsigned long host_estat;
+ unsigned long host_percpu;
+
+ /* GPRs */
+ unsigned long gprs[32];
+ unsigned long pc;
+
+ /* Which auxiliary state is loaded (KVM_LARCH_*) */
+ unsigned int aux_inuse;
+
+ /* FPU state */
+ struct loongarch_fpu fpu FPU_ALIGN;
+
+ /* CSR state */
+ struct loongarch_csrs *csr;
+
+ /* GPR used as IO source/target */
+ u32 io_gpr;
+
+ /* KVM register to control count timer */
+ u32 count_ctl;
+ struct hrtimer swtimer;
+
+ /* Bitmask of intr that are pending */
+ unsigned long irq_pending;
+ /* Bitmask of pending intr to be cleared */
+ unsigned long irq_clear;
+
+ /* Bitmask of exceptions that are pending */
+ unsigned long exception_pending;
+ unsigned int esubcode;
+
+ /* Cache for pages needed inside spinlock regions */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* vcpu's vpid */
+ u64 vpid;
+
+ /* Frequency of stable timer in Hz */
+ u64 timer_mhz;
+ ktime_t expire;
+
+ /* Last CPU the vCPU state was loaded on */
+ int last_sched_cpu;
+ /* mp state */
+ struct kvm_mp_state mp_state;
+ /* cpucfg */
+ u32 cpucfg[KVM_MAX_CPUCFG_REGS];
+};
+
+static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
+{
+ return csr->csrs[reg];
+}
+
+static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val)
+{
+ csr->csrs[reg] = val;
+}
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* MMU handling */
+void kvm_flush_tlb_all(void);
+void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa);
+int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+
+static inline void update_pc(struct kvm_vcpu_arch *arch)
+{
+ arch->pc += 4;
+}
+
+/*
+ * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
+ * @vcpu: Virtual CPU.
+ *
+ * Returns: Whether the TLBL exception was likely due to an instruction
+ * fetch fault rather than a data load fault.
+ */
+static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
+{
+ return arch->pc == arch->badv;
+}
+
+/* Misc */
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
+void kvm_check_vpid(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
+void kvm_init_vmcs(struct kvm *kvm);
+void kvm_exc_entry(void);
+int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+extern unsigned long vpid_mask;
+extern const unsigned long kvm_exception_size;
+extern const unsigned long kvm_enter_guest_size;
+extern struct kvm_world_switch *kvm_loongarch_ops;
+
+#define SW_GCSR (1 << 0)
+#define HW_GCSR (1 << 1)
+#define INVALID_GCSR (1 << 2)
+
+int get_gcsr_flag(int csr);
+void set_hw_gcsr(int csr_id, unsigned long val);
+
+#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */
diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..099bafc6f797
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_mmu.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_MMU_H__
+#define __ASM_LOONGARCH_KVM_MMU_H__
+
+#include <linux/kvm_host.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
+ * for which pages need to be cached.
+ */
+#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
+
+#define _KVM_FLUSH_PGTABLE 0x1
+#define _KVM_HAS_PGMASK 0x2
+#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
+#define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT))
+
+typedef unsigned long kvm_pte_t;
+typedef struct kvm_ptw_ctx kvm_ptw_ctx;
+typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx);
+
+struct kvm_ptw_ctx {
+ kvm_pte_ops ops;
+ unsigned long flag;
+
+ /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */
+ unsigned long mask;
+ unsigned long gfn;
+
+ /* page walk mmu info */
+ unsigned int level;
+ unsigned long pgtable_shift;
+ unsigned long invalid_entry;
+ unsigned long *invalid_ptes;
+ unsigned int *pte_shifts;
+ void *opaque;
+
+ /* free pte table page list */
+ struct list_head list;
+};
+
+kvm_pte_t *kvm_pgd_alloc(void);
+
+static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
+{
+ WRITE_ONCE(*ptep, val);
+}
+
+static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
+static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
+static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
+static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
+
+static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
+{
+ return pte | _PAGE_ACCESSED;
+}
+
+static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
+{
+ return pte & ~_PAGE_ACCESSED;
+}
+
+static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
+{
+ return pte | _PAGE_DIRTY;
+}
+
+static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
+{
+ return pte & ~_PAGE_DIRTY;
+}
+
+static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
+{
+ return pte | _PAGE_HUGE;
+}
+
+static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
+{
+ return pte & ~_PAGE_HUGE;
+}
+
+static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
+{
+ return ctx->flag & _KVM_FLUSH_PGTABLE;
+}
+
+static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table,
+ phys_addr_t addr)
+{
+
+ return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1));
+}
+
+static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx,
+ phys_addr_t addr, phys_addr_t end)
+{
+ phys_addr_t boundary, size;
+
+ size = 0x1UL << ctx->pgtable_shift;
+ boundary = (addr + size) & ~(size - 1);
+ return (boundary - 1 < end - 1) ? boundary : end;
+}
+
+static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
+{
+ if (!ctx || ctx->level == 0)
+ return !!(*entry & _PAGE_PRESENT);
+
+ return *entry != ctx->invalid_entry;
+}
+
+static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry)
+{
+ return *entry == ctx->invalid_entry;
+}
+
+static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx)
+{
+ ctx->level--;
+ ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
+ ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
+}
+
+static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx)
+{
+ ctx->level++;
+ ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
+ ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
+}
+
+#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */
diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h
new file mode 100644
index 000000000000..2fe1d4bdff66
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_types.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_KVM_TYPES_H
+#define _ASM_LOONGARCH_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
+#endif /* _ASM_LOONGARCH_KVM_TYPES_H */
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
new file mode 100644
index 000000000000..553cfa2b2b1c
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
+#define __ASM_LOONGARCH_KVM_VCPU_H__
+
+#include <linux/kvm_host.h>
+#include <asm/loongarch.h>
+
+/* Controlled by 0x5 guest estat */
+#define CPU_SIP0 (_ULCAST_(1))
+#define CPU_SIP1 (_ULCAST_(1) << 1)
+#define CPU_PMU (_ULCAST_(1) << 10)
+#define CPU_TIMER (_ULCAST_(1) << 11)
+#define CPU_IPI (_ULCAST_(1) << 12)
+
+/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
+#define CPU_IP0 (_ULCAST_(1))
+#define CPU_IP1 (_ULCAST_(1) << 1)
+#define CPU_IP2 (_ULCAST_(1) << 2)
+#define CPU_IP3 (_ULCAST_(1) << 3)
+#define CPU_IP4 (_ULCAST_(1) << 4)
+#define CPU_IP5 (_ULCAST_(1) << 5)
+#define CPU_IP6 (_ULCAST_(1) << 6)
+#define CPU_IP7 (_ULCAST_(1) << 7)
+
+#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20)
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24
+#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff
+#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16
+#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff
+#define KVM_LOONGSON_IRQ_NUM_SHIFT 0
+#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff
+
+typedef union loongarch_instruction larch_inst;
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
+
+int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
+int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
+int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_emu_idle(struct kvm_vcpu *vcpu);
+int kvm_pending_timer(struct kvm_vcpu *vcpu);
+int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
+void kvm_deliver_intr(struct kvm_vcpu *vcpu);
+void kvm_deliver_exception(struct kvm_vcpu *vcpu);
+
+void kvm_own_fpu(struct kvm_vcpu *vcpu);
+void kvm_lose_fpu(struct kvm_vcpu *vcpu);
+void kvm_save_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fpu(struct loongarch_fpu *fpu);
+void kvm_restore_fcsr(struct loongarch_fpu *fpu);
+
+void kvm_acquire_timer(struct kvm_vcpu *vcpu);
+void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
+void kvm_reset_timer(struct kvm_vcpu *vcpu);
+void kvm_save_timer(struct kvm_vcpu *vcpu);
+void kvm_restore_timer(struct kvm_vcpu *vcpu);
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
+
+/*
+ * Loongarch KVM guest interrupt handling
+ */
+static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ set_bit(irq, &vcpu->arch.irq_pending);
+ clear_bit(irq, &vcpu->arch.irq_clear);
+}
+
+static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ clear_bit(irq, &vcpu->arch.irq_pending);
+ set_bit(irq, &vcpu->arch.irq_clear);
+}
+
+static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
+ unsigned int code, unsigned int subcode)
+{
+ /* only one exception can be injected */
+ if (!vcpu->arch.exception_pending) {
+ set_bit(code, &vcpu->arch.exception_pending);
+ vcpu->arch.esubcode = subcode;
+ return 0;
+ } else
+ return -1;
+}
+
+#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 33531d432b49..9b4957cefa8a 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -226,6 +226,7 @@
#define LOONGARCH_CSR_ECFG 0x4 /* Exception config */
#define CSR_ECFG_VS_SHIFT 16
#define CSR_ECFG_VS_WIDTH 3
+#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1)
#define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT)
#define CSR_ECFG_IM_SHIFT 0
#define CSR_ECFG_IM_WIDTH 14
@@ -314,13 +315,14 @@
#define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT)
#define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */
-#define CSR_GTLBC_RID_SHIFT 16
-#define CSR_GTLBC_RID_WIDTH 8
-#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT)
+#define CSR_GTLBC_TGID_SHIFT 16
+#define CSR_GTLBC_TGID_WIDTH 8
+#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1)
+#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT)
#define CSR_GTLBC_TOTI_SHIFT 13
#define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT)
-#define CSR_GTLBC_USERID_SHIFT 12
-#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT)
+#define CSR_GTLBC_USETGID_SHIFT 12
+#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT)
#define CSR_GTLBC_GMTLBSZ_SHIFT 0
#define CSR_GTLBC_GMTLBSZ_WIDTH 6
#define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT)
@@ -475,6 +477,7 @@
#define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */
#define CSR_GSTAT_GID_SHIFT 16
#define CSR_GSTAT_GID_WIDTH 8
+#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1)
#define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT)
#define CSR_GSTAT_GIDBIT_SHIFT 4
#define CSR_GSTAT_GIDBIT_WIDTH 6
@@ -525,6 +528,12 @@
#define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF)
#define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF)
#define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF)
+#define CSR_GCFG_MATP_NEST_SHIFT 2
+#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT)
+#define CSR_GCFG_MATP_ROOT_SHIFT 1
+#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT)
+#define CSR_GCFG_MATP_GUEST_SHIFT 0
+#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT)
#define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */
#define CSR_GINTC_HC_SHIFT 16
diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h
new file mode 100644
index 000000000000..c6ad2ee6106c
--- /dev/null
+++ b/arch/loongarch/include/uapi/asm/kvm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __UAPI_ASM_LOONGARCH_KVM_H
+#define __UAPI_ASM_LOONGARCH_KVM_H
+
+#include <linux/types.h>
+
+/*
+ * KVM LoongArch specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 gpr[32];
+ __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+ __u32 fcsr;
+ __u64 fcc; /* 8x8 */
+ struct kvm_fpureg {
+ __u64 val64[4];
+ } fpr[32];
+};
+
+/*
+ * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
+ * registers. The id field is broken down as follows:
+ *
+ * bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CSR registers.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / SIMD registers (see definitions below).
+ *
+ * Other sets registers may be added in the future. Each set would
+ * have its own identifier in bits[31..16].
+ */
+
+#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL)
+#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL)
+#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL)
+#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL)
+#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL)
+#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL)
+#define KVM_CSR_IDX_MASK 0x7fff
+#define KVM_CPUCFG_IDX_MASK 0x7fff
+
+/*
+ * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
+ */
+
+#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2)
+
+#define LOONGARCH_REG_SHIFT 3
+#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT))
+#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG)
+#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG)
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_iocsr_entry {
+ __u32 addr;
+ __u32 pad;
+ __u64 data;
+};
+
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 64
+#define KVM_MAX_CORES 256
+
+#endif /* __UAPI_ASM_LOONGARCH_KVM_H */