summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hyp/nvhe/pkvm.c
diff options
context:
space:
mode:
authorQuentin Perret <qperret@google.com>2022-11-10 22:02:53 +0300
committerMarc Zyngier <maz@kernel.org>2022-11-11 20:18:58 +0300
commitf41dff4efb918db68923a826e966ca62c7c8e929 (patch)
tree066dfafdfe64e20b689809729de4d34b69da317d /arch/arm64/kvm/hyp/nvhe/pkvm.c
parent60dfe093ec13b056856c672e1daa35134be38283 (diff)
downloadlinux-f41dff4efb918db68923a826e966ca62c7c8e929.tar.xz
KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache
Rather than relying on the host to free the previously-donated pKVM hypervisor VM pages explicitly on teardown, introduce a dedicated teardown memcache which allows the host to reclaim guest memory resources without having to keep track of all of the allocations made by the pKVM hypervisor at EL2. Tested-by: Vincent Donnefort <vdonnefort@google.com> Co-developed-by: Fuad Tabba <tabba@google.com> Signed-off-by: Fuad Tabba <tabba@google.com> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Will Deacon <will@kernel.org> [maz: dropped __maybe_unused from unmap_donated_memory_noclear()] Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221110190259.26861-21-will@kernel.org
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe/pkvm.c')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 0768307566d4..81835c2f4c5a 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -393,7 +393,7 @@ static void unmap_donated_memory(void *va, size_t size)
__unmap_donated_memory(va, size);
}
-static void __maybe_unused unmap_donated_memory_noclear(void *va, size_t size)
+static void unmap_donated_memory_noclear(void *va, size_t size)
{
if (!va)
return;
@@ -527,8 +527,21 @@ unlock:
return ret;
}
+static void
+teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
+{
+ size = PAGE_ALIGN(size);
+ memset(addr, 0, size);
+
+ for (void *start = addr; start < addr + size; start += PAGE_SIZE)
+ push_hyp_memcache(mc, start, hyp_virt_to_phys);
+
+ unmap_donated_memory_noclear(addr, size);
+}
+
int __pkvm_teardown_vm(pkvm_handle_t handle)
{
+ struct kvm_hyp_memcache *mc;
struct pkvm_hyp_vm *hyp_vm;
struct kvm *host_kvm;
unsigned int idx;
@@ -547,25 +560,27 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
goto err_unlock;
}
+ host_kvm = hyp_vm->host_kvm;
+
/* Ensure the VMID is clean before it can be reallocated */
__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
remove_vm_table_entry(handle);
hyp_spin_unlock(&vm_table_lock);
/* Reclaim guest pages (including page-table pages) */
- reclaim_guest_pages(hyp_vm);
+ mc = &host_kvm->arch.pkvm.teardown_mc;
+ reclaim_guest_pages(hyp_vm, mc);
unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
- /* Return the metadata pages to the host */
+ /* Push the metadata pages to the teardown memcache */
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
- unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+ teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
}
- host_kvm = hyp_vm->host_kvm;
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
- unmap_donated_memory(hyp_vm, vm_size);
+ teardown_donated_memory(mc, hyp_vm, vm_size);
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
return 0;