summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/reset.c
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2023-02-14 01:32:14 +0300
committerOliver Upton <oliver.upton@linux.dev>2023-02-14 01:32:40 +0300
commite8789ab7047a824fa415948d511634fea0c3aea3 (patch)
tree35d57257a8d1b28fb1fe734fc3fde8c36802f2db /arch/arm64/kvm/reset.c
parent619cec00857f21dbc6db5ef9e0b9c613479f3745 (diff)
parent5f623a598d128dacadf95c9d14f7c8b1313a3dc4 (diff)
downloadlinux-e8789ab7047a824fa415948d511634fea0c3aea3.tar.xz
Merge branch kvm-arm64/virtual-cache-geometry into kvmarm/next
* kvm-arm64/virtual-cache-geometry: : Virtualized cache geometry for KVM guests, courtesy of Akihiko Odaki. : : KVM/arm64 has always exposed the host cache geometry directly to the : guest, even though non-secure software should never perform CMOs by : Set/Way. This was slightly wrong, as the cache geometry was derived from : the PE on which the vCPU thread was running and not a sanitized value. : : All together this leads to issues migrating VMs on heterogeneous : systems, as the cache geometry saved/restored could be inconsistent. : : KVM/arm64 now presents 1 level of cache with 1 set and 1 way. The cache : geometry is entirely controlled by userspace, such that migrations from : older kernels continue to work. KVM: arm64: Mark some VM-scoped allocations as __GFP_ACCOUNT KVM: arm64: Normalize cache configuration KVM: arm64: Mask FEAT_CCIDX KVM: arm64: Always set HCR_TID2 arm64/cache: Move CLIDR macro definitions arm64/sysreg: Add CCSIDR2_EL1 arm64/sysreg: Convert CCSIDR_EL1 to automatic generation arm64: Allow the definition of UNKNOWN system register fields Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/kvm/reset.c')
-rw-r--r--arch/arm64/kvm/reset.c1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 2bc74739a6df..4a39da302b88 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -157,6 +157,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state);
+ kfree(vcpu->arch.ccsidr);
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)