summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/cache.h
diff options
context:
space:
mode:
authorAkihiko Odaki <akihiko.odaki@daynix.com>2023-01-12 05:38:52 +0300
committerOliver Upton <oliver.upton@linux.dev>2023-01-21 21:09:23 +0300
commit7af0c2534f4c57b16e92dfca8c5f40fa90fbb3f3 (patch)
tree81a282bcae6f2429eddee8f0a2dc07c8b862d378 /arch/arm64/include/asm/cache.h
parentbf48040cd9b0c4d93c635ce222014a594e4e93f2 (diff)
downloadlinux-7af0c2534f4c57b16e92dfca8c5f40fa90fbb3f3.tar.xz
KVM: arm64: Normalize cache configuration
Before this change, the cache configuration of the physical CPU was exposed to vcpus. This is problematic because the cache configuration a vcpu sees varies when it migrates between vcpus with different cache configurations. Fabricate cache configuration from the sanitized value, which holds the CTR_EL0 value the userspace sees regardless of which physical CPU it resides on. CLIDR_EL1 and CCSIDR_EL1 are now writable from the userspace so that the VMM can restore the values saved with the old kernel. Suggested-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Link: https://lore.kernel.org/r/20230112023852.42012-8-akihiko.odaki@daynix.com [ Oliver: Squash Marc's fix for CCSIDR_EL1.LineSize when set from userspace ] Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/include/asm/cache.h')
-rw-r--r--arch/arm64/include/asm/cache.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index ab7133654a72..a51e6e8f3171 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -22,6 +22,9 @@
#define CLIDR_CTYPE(clidr, level) \
(((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+/* Ttypen, bits [2(n - 1) + 34 : 2(n - 1) + 33], for n = 1 to 7 */
+#define CLIDR_TTYPE_SHIFT(level) (2 * ((level) - 1) + CLIDR_EL1_Ttypen_SHIFT)
+
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
* sure that all such allocations are cache aligned. Otherwise,