summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/lib
diff options
context:
space:
mode:
authorPeter Gonda <pgonda@google.com>2024-02-23 03:42:54 +0300
committerSean Christopherson <seanjc@google.com>2024-02-29 03:39:49 +0300
commitbe1bd4c5394ff7eb6f14aaf8005824ed1946bb82 (patch)
treea7c522307e18d941139c37b6f0ffd7d9c88dbe97 /tools/testing/selftests/kvm/lib
parent31e00dae72fda939a084cda86b068ac9c302a2d3 (diff)
downloadlinux-be1bd4c5394ff7eb6f14aaf8005824ed1946bb82.tar.xz
KVM: selftests: Allow tagging protected memory in guest page tables
Add support for tagging and untagging guest physical address, e.g. to allow x86's SEV and TDX guests to embed shared vs. private information in the GPA. SEV (encryption, a.k.a. C-bit) and TDX (shared, a.k.a. S-bit) steal bits from the guest's physical address space that is consumed by the CPU metadata, i.e. effectively aliases the "real" GPA. Implement generic "tagging" so that the shared vs. private metadata can be managed by x86 without bleeding too many details into common code. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Vishal Annapurve <vannapurve@google.com> Cc: Ackerly Tng <ackerleytng@google.com> cc: Andrew Jones <andrew.jones@linux.dev> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Michael Roth <michael.roth@amd.com> Tested-by: Carlos Bilbao <carlos.bilbao@amd.com> Originally-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Peter Gonda <pgonda@google.com> Link: https://lore.kernel.org/r/20240223004258.3104051-8-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c17
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c15
2 files changed, 31 insertions, 1 deletions
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index a4ef5185bb03..5d562f9698e7 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1546,6 +1546,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
{
struct userspace_mem_region *region;
+ gpa = vm_untag_gpa(vm, gpa);
+
region = userspace_mem_region_find(vm, gpa, gpa);
if (!region) {
TEST_FAIL("No vm physical memory at 0x%lx", gpa);
@@ -2254,3 +2256,18 @@ void __attribute((constructor)) kvm_selftest_init(void)
kvm_selftest_arch_init();
}
+
+bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
+{
+ sparsebit_idx_t pg = 0;
+ struct userspace_mem_region *region;
+
+ if (!vm_arch_has_protected_memory(vm))
+ return false;
+
+ region = userspace_mem_region_find(vm, paddr, paddr);
+ TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
+
+ pg = paddr >> vm->page_shift;
+ return sparsebit_is_set(region->protected_phy_pages, pg);
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index b9b6cb730a08..534f36a8a5e8 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -157,6 +157,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
{
uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
+ paddr = vm_untag_gpa(vm, paddr);
+
if (!(*pte & PTE_PRESENT_MASK)) {
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
if (current_level == target_level)
@@ -200,6 +202,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
"Physical address beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
+ TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
+ "Unexpected bits in paddr: %lx", paddr);
/*
* Allocate upper level page tables, if not already present. Return
@@ -222,6 +226,15 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
+
+ /*
+ * Neither SEV nor TDX supports shared page tables, so only the final
+ * leaf PTE needs manually set the C/S-bit.
+ */
+ if (vm_is_gpa_protected(vm, paddr))
+ *pte |= vm->arch.c_bit;
+ else
+ *pte |= vm->arch.s_bit;
}
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -496,7 +509,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
* No need for a hugepage mask on the PTE, x86-64 requires the "unused"
* address bits to be zero.
*/
- return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
+ return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
}
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)