summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/max_guest_memory_test.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-05 00:59:54 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-05 00:59:54 +0300
commit7c5c3a6177fa9646884114fc7f2e970b0bc50dc9 (patch)
tree956857522574ae7cb07d2227dc16e53d7e9e00e7 /tools/testing/selftests/kvm/max_guest_memory_test.c
parentf0a892f599c46af673e47418c47c15e69a7b67f4 (diff)
parent281106f938d3daaea6f8b6723a8217a2a1ef6936 (diff)
downloadlinux-7c5c3a6177fa9646884114fc7f2e970b0bc50dc9.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "Quite a large pull request due to a selftest API overhaul and some patches that had come in too late for 5.19. ARM: - Unwinder implementations for both nVHE modes (classic and protected), complete with an overflow stack - Rework of the sysreg access from userspace, with a complete rewrite of the vgic-v3 view to allign with the rest of the infrastructure - Disagregation of the vcpu flags in separate sets to better track their use model. - A fix for the GICv2-on-v3 selftest - A small set of cosmetic fixes RISC-V: - Track ISA extensions used by Guest using bitmap - Added system instruction emulation framework - Added CSR emulation framework - Added gfp_custom flag in struct kvm_mmu_memory_cache - Added G-stage ioremap() and iounmap() functions - Added support for Svpbmt inside Guest s390: - add an interface to provide a hypervisor dump for secure guests - improve selftests to use TAP interface - enable interpretive execution of zPCI instructions (for PCI passthrough) - First part of deferred teardown - CPU Topology - PV attestation - Minor fixes x86: - Permit guests to ignore single-bit ECC errors - Intel IPI virtualization - Allow getting/setting pending triple fault with KVM_GET/SET_VCPU_EVENTS - PEBS virtualization - Simplify PMU emulation by just using PERF_TYPE_RAW events - More accurate event reinjection on SVM (avoid retrying instructions) - Allow getting/setting the state of the speaker port data bit - Refuse starting the kvm-intel module if VM-Entry/VM-Exit controls are inconsistent - "Notify" VM exit (detect microarchitectural hangs) for Intel - Use try_cmpxchg64 instead of cmpxchg64 - Ignore benign host accesses to PMU MSRs when PMU is disabled - Allow disabling KVM's "MONITOR/MWAIT are NOPs!" behavior - Allow NX huge page mitigation to be disabled on a per-vm basis - Port eager page splitting to shadow MMU as well - Enable CMCI capability by default and handle injected UCNA errors - Expose pid of vcpu threads in debugfs - x2AVIC support for AMD - cleanup PIO emulation - Fixes for LLDT/LTR emulation - Don't require refcounted "struct page" to create huge SPTEs - Miscellaneous cleanups: - MCE MSR emulation - Use separate namespaces for guest PTEs and shadow PTEs bitmasks - PIO emulation - Reorganize rmap API, mostly around rmap destruction - Do not workaround very old KVM bugs for L0 that runs with nesting enabled - new selftests API for CPUID Generic: - Fix races in gfn->pfn cache refresh; do not pin pages tracked by the cache - new selftests API using struct kvm_vcpu instead of a (vm, id) tuple" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (606 commits) selftests: kvm: set rax before vmcall selftests: KVM: Add exponent check for boolean stats selftests: KVM: Provide descriptive assertions in kvm_binary_stats_test selftests: KVM: Check stat name before other fields KVM: x86/mmu: remove unused variable RISC-V: KVM: Add support for Svpbmt inside Guest/VM RISC-V: KVM: Use PAGE_KERNEL_IO in kvm_riscv_gstage_ioremap() RISC-V: KVM: Add G-stage ioremap() and iounmap() functions KVM: Add gfp_custom flag in struct kvm_mmu_memory_cache RISC-V: KVM: Add extensible CSR emulation framework RISC-V: KVM: Add extensible system instruction emulation framework RISC-V: KVM: Factor-out instruction emulation into separate sources RISC-V: KVM: move preempt_disable() call in kvm_arch_vcpu_ioctl_run RISC-V: KVM: Make kvm_riscv_guest_timer_init a void function RISC-V: KVM: Fix variable spelling mistake RISC-V: KVM: Improve ISA extension by using a bitmap KVM, x86/mmu: Fix the comment around kvm_tdp_mmu_zap_leafs() KVM: SVM: Dump Virtual Machine Save Area (VMSA) to klog KVM: x86/mmu: Treat NX as a valid SPTE bit for NPT KVM: x86: Do not block APIC write for non ICR registers ...
Diffstat (limited to 'tools/testing/selftests/kvm/max_guest_memory_test.c')
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c53
1 files changed, 29 insertions, 24 deletions
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 15f046e19cb2..9a6e4f3ad6b5 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -28,8 +28,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
}
struct vcpu_info {
- struct kvm_vm *vm;
- uint32_t id;
+ struct kvm_vcpu *vcpu;
uint64_t start_gpa;
uint64_t end_gpa;
};
@@ -52,45 +51,45 @@ static void rendezvous_with_boss(void)
}
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpu_id)
+static void run_vcpu(struct kvm_vcpu *vcpu)
{
- vcpu_run(vm, vcpu_id);
- ASSERT_EQ(get_ucall(vm, vcpu_id, NULL), UCALL_DONE);
+ vcpu_run(vcpu);
+ ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
}
static void *vcpu_worker(void *data)
{
- struct vcpu_info *vcpu = data;
+ struct vcpu_info *info = data;
+ struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
struct kvm_regs regs;
- vcpu_args_set(vm, vcpu->id, 3, vcpu->start_gpa, vcpu->end_gpa,
- vm_get_page_size(vm));
+ vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
/* Snapshot regs before the first run. */
- vcpu_regs_get(vm, vcpu->id, &regs);
+ vcpu_regs_get(vcpu, &regs);
rendezvous_with_boss();
- run_vcpu(vm, vcpu->id);
+ run_vcpu(vcpu);
rendezvous_with_boss();
- vcpu_regs_set(vm, vcpu->id, &regs);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_regs_set(vcpu, &regs);
+ vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */
sregs.cr0 ^= X86_CR0_WP;
#endif
- vcpu_sregs_set(vm, vcpu->id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
rendezvous_with_boss();
- run_vcpu(vm, vcpu->id);
+ run_vcpu(vcpu);
rendezvous_with_boss();
return NULL;
}
-static pthread_t *spawn_workers(struct kvm_vm *vm, uint64_t start_gpa,
- uint64_t end_gpa)
+static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
+ uint64_t start_gpa, uint64_t end_gpa)
{
struct vcpu_info *info;
uint64_t gpa, nr_bytes;
@@ -104,12 +103,11 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, uint64_t start_gpa,
TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
- ~((uint64_t)vm_get_page_size(vm) - 1);
+ ~((uint64_t)vm->page_size - 1);
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
- info[i].vm = vm;
- info[i].id = i;
+ info[i].vcpu = vcpus[i];
info[i].start_gpa = gpa;
info[i].end_gpa = gpa + nr_bytes;
pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
@@ -172,6 +170,7 @@ int main(int argc, char *argv[])
uint64_t max_gpa, gpa, slot_size, max_mem, i;
int max_slots, slot, opt, fd;
bool hugepages = false;
+ struct kvm_vcpu **vcpus;
pthread_t *threads;
struct kvm_vm *vm;
void *mem;
@@ -215,9 +214,12 @@ int main(int argc, char *argv[])
}
}
- vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+ vcpus = malloc(nr_vcpus * sizeof(*vcpus));
+ TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
+
+ vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
- max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm);
+ max_gpa = vm->max_gfn << vm->page_shift;
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
fd = kvm_memfd_alloc(slot_size, hugepages);
@@ -227,7 +229,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
- for (i = 0; i < slot_size; i += vm_get_page_size(vm))
+ for (i = 0; i < slot_size; i += vm->page_size)
((uint8_t *)mem)[i] = 0xaa;
gpa = 0;
@@ -246,13 +248,16 @@ int main(int argc, char *argv[])
for (i = 0; i < slot_size; i += size_1gb)
__virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
#else
- for (i = 0; i < slot_size; i += vm_get_page_size(vm))
+ for (i = 0; i < slot_size; i += vm->page_size)
virt_pg_map(vm, gpa + i, gpa + i);
#endif
}
atomic_set(&rendezvous, nr_vcpus + 1);
- threads = spawn_workers(vm, start_gpa, gpa);
+ threads = spawn_workers(vm, vcpus, start_gpa, gpa);
+
+ free(vcpus);
+ vcpus = NULL;
pr_info("Running with %lugb of guest memory and %u vCPUs\n",
(gpa - start_gpa) / size_1gb, nr_vcpus);