summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pkvm.c
diff options
context:
space:
mode:
authorQuentin Perret <qperret@google.com>2022-11-10 22:02:53 +0300
committerMarc Zyngier <maz@kernel.org>2022-11-11 20:18:58 +0300
commitf41dff4efb918db68923a826e966ca62c7c8e929 (patch)
tree066dfafdfe64e20b689809729de4d34b69da317d /arch/arm64/kvm/pkvm.c
parent60dfe093ec13b056856c672e1daa35134be38283 (diff)
downloadlinux-f41dff4efb918db68923a826e966ca62c7c8e929.tar.xz
KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache
Rather than relying on the host to free the previously-donated pKVM hypervisor VM pages explicitly on teardown, introduce a dedicated teardown memcache which allows the host to reclaim guest memory resources without having to keep track of all of the allocations made by the pKVM hypervisor at EL2. Tested-by: Vincent Donnefort <vdonnefort@google.com> Co-developed-by: Fuad Tabba <tabba@google.com> Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Will Deacon <will@kernel.org> [maz: dropped __maybe_unused from unmap_donated_memory_noclear()] Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221110190259.26861-21-will@kernel.org
Diffstat (limited to 'arch/arm64/kvm/pkvm.c')
-rw-r--r--arch/arm64/kvm/pkvm.c31
1 files changed, 6 insertions, 25 deletions
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 8c443b915e43..cf56958b1492 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -147,8 +147,6 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
handle = ret;
host_kvm->arch.pkvm.handle = handle;
- host_kvm->arch.pkvm.hyp_donations.pgd = pgd;
- host_kvm->arch.pkvm.hyp_donations.vm = hyp_vm;
/* Donate memory for the vcpus at hyp and initialize it. */
hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
@@ -167,12 +165,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
goto destroy_vm;
}
- host_kvm->arch.pkvm.hyp_donations.vcpus[idx] = hyp_vcpu;
-
ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
hyp_vcpu);
- if (ret)
+ if (ret) {
+ free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
goto destroy_vm;
+ }
}
return 0;
@@ -201,30 +199,13 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{
- unsigned long idx, nr_vcpus = host_kvm->created_vcpus;
- size_t pgd_sz, hyp_vm_sz;
-
- if (host_kvm->arch.pkvm.handle)
+ if (host_kvm->arch.pkvm.handle) {
WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
host_kvm->arch.pkvm.handle));
-
- host_kvm->arch.pkvm.handle = 0;
-
- for (idx = 0; idx < nr_vcpus; ++idx) {
- void *hyp_vcpu = host_kvm->arch.pkvm.hyp_donations.vcpus[idx];
-
- if (!hyp_vcpu)
- break;
-
- free_pages_exact(hyp_vcpu, PAGE_ALIGN(PKVM_HYP_VCPU_SIZE));
}
- hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
- size_mul(sizeof(void *), nr_vcpus)));
- pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
-
- free_pages_exact(host_kvm->arch.pkvm.hyp_donations.vm, hyp_vm_sz);
- free_pages_exact(host_kvm->arch.pkvm.hyp_donations.pgd, pgd_sz);
+ host_kvm->arch.pkvm.handle = 0;
+ free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
}
int pkvm_init_host_vm(struct kvm *host_kvm)