summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hyp/nvhe/pkvm.c
diff options
context:
space:
mode:
authorFuad Tabba <tabba@google.com>2022-11-10 22:02:46 +0300
committerMarc Zyngier <maz@kernel.org>2022-11-11 20:16:24 +0300
commit9d0c063a4d1d10ef8e6288899b8524413e40cfa0 (patch)
treea3f7b368382f8d19ff205973f53368b5a9500bef /arch/arm64/kvm/hyp/nvhe/pkvm.c
parenta1ec5c70d3f63d8a143fb83cd7f53bd8ff2f72c8 (diff)
downloadlinux-9d0c063a4d1d10ef8e6288899b8524413e40cfa0.tar.xz
KVM: arm64: Instantiate pKVM hypervisor VM and vCPU structures from EL1
With the pKVM hypervisor at EL2 now offering hypercalls to the host for creating and destroying VM and vCPU structures, plumb these in to the existing arm64 KVM backend to ensure that the hypervisor data structures are allocated and initialised on first vCPU run for a pKVM guest. In the host, 'struct kvm_protected_vm' is introduced to hold the handle of the pKVM VM instance as well as to track references to the memory donated to the hypervisor so that it can be freed back to the host allocator following VM teardown. The stage-2 page-table, hypervisor VM and vCPU structures are allocated separately so as to avoid the need for a large physically-contiguous allocation in the host at run-time. Tested-by: Vincent Donnefort <vdonnefort@google.com> Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221110190259.26861-14-will@kernel.org
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe/pkvm.c')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 135c9a095eca..2c73c4640e4d 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -324,7 +324,7 @@ static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
if (idx < 0)
return idx;
- hyp_vm->kvm.arch.pkvm_handle = idx_to_vm_handle(idx);
+ hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
/* VMID 0 is reserved for the host */
atomic64_set(&mmu->vmid.id, idx + 1);
@@ -333,7 +333,7 @@ static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
mmu->pgt = &hyp_vm->pgt;
vm_table[idx] = hyp_vm;
- return hyp_vm->kvm.arch.pkvm_handle;
+ return hyp_vm->kvm.arch.pkvm.handle;
}
/*
@@ -458,10 +458,10 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
goto err_remove_vm_table_entry;
hyp_spin_unlock(&vm_table_lock);
- return hyp_vm->kvm.arch.pkvm_handle;
+ return hyp_vm->kvm.arch.pkvm.handle;
err_remove_vm_table_entry:
- remove_vm_table_entry(hyp_vm->kvm.arch.pkvm_handle);
+ remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
err_unlock:
hyp_spin_unlock(&vm_table_lock);
err_remove_mappings:
@@ -528,6 +528,7 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
{
struct pkvm_hyp_vm *hyp_vm;
struct kvm *host_kvm;
+ unsigned int idx;
size_t vm_size;
int err;
@@ -553,6 +554,12 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
/* Return the metadata pages to the host */
+ for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
+ struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
+
+ unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+ }
+
host_kvm = hyp_vm->host_kvm;
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
unmap_donated_memory(hyp_vm, vm_size);