summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/access_tracking_perf_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/access_tracking_perf_test.c')
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 76c583a07ea2..3c7defd34f56 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -44,8 +44,9 @@
#include "kvm_util.h"
#include "test_util.h"
-#include "perf_test_util.h"
+#include "memstress.h"
#include "guest_modes.h"
+#include "processor.h"
/* Global variable used to synchronize all of the vCPU threads. */
static int iteration;
@@ -58,9 +59,6 @@ static enum {
ITERATION_MARK_IDLE,
} iteration_work;
-/* Set to true when vCPU threads should exit. */
-static bool done;
-
/* The iteration that was last completed by each vCPU. */
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
@@ -126,7 +124,7 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
}
static void mark_vcpu_memory_idle(struct kvm_vm *vm,
- struct perf_test_vcpu_args *vcpu_args)
+ struct memstress_vcpu_args *vcpu_args)
{
int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t base_gva = vcpu_args->gva;
@@ -148,7 +146,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
for (page = 0; page < pages; page++) {
- uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
+ uint64_t gva = base_gva + page * memstress_args.guest_page_size;
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
if (!pfn) {
@@ -180,16 +178,21 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
* access tracking but low enough as to not make the test too brittle
* over time and across architectures.
*
- * Note that when run in nested virtualization, this check will trigger
- * much more frequently because TLB size is unlimited and since no flush
- * happens, much more pages are cached there and guest won't see the
- * "idle" bit cleared.
+ * When running the guest as a nested VM, "warn" instead of asserting
+ * as the TLB size is effectively unlimited and the KVM doesn't
+ * explicitly flush the TLB when aging SPTEs. As a result, more pages
+ * are cached and the guest won't see the "idle" bit cleared.
*/
- if (still_idle < pages / 10)
- printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64
- "out of %" PRIu64 "), this will affect performance results"
- ".\n",
+ if (still_idle >= pages / 10) {
+#ifdef __x86_64__
+ TEST_ASSERT(this_cpu_has(X86_FEATURE_HYPERVISOR),
+ "vCPU%d: Too many pages still idle (%lu out of %lu)",
+ vcpu_idx, still_idle, pages);
+#endif
+ printf("WARNING: vCPU%d: Too many pages still idle (%lu out of %lu), "
+ "this will affect performance results.\n",
vcpu_idx, still_idle, pages);
+ }
close(page_idle_fd);
close(pagemap_fd);
@@ -211,7 +214,7 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
int last_iteration = *current_iteration;
do {
- if (READ_ONCE(done))
+ if (READ_ONCE(memstress_args.stop_vcpus))
return false;
*current_iteration = READ_ONCE(iteration);
@@ -220,10 +223,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true;
}
-static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
{
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- struct kvm_vm *vm = perf_test_args.vm;
+ struct kvm_vm *vm = memstress_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx;
int current_iteration = 0;
@@ -279,7 +282,7 @@ static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *descripti
static void access_memory(struct kvm_vm *vm, int nr_vcpus,
enum access_type access, const char *description)
{
- perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
+ memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, nr_vcpus, description);
}
@@ -303,10 +306,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
int nr_vcpus = params->nr_vcpus;
- vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
+ vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access);
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
+ memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
pr_info("\n");
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
@@ -321,11 +324,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
mark_memory_idle(vm, nr_vcpus);
access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
- /* Set done to signal the vCPU threads to exit */
- done = true;
-
- perf_test_join_vcpu_threads(nr_vcpus);
- perf_test_destroy_vm(vm);
+ memstress_join_vcpu_threads(nr_vcpus);
+ memstress_destroy_vm(vm);
}
static void help(char *name)
@@ -368,7 +368,7 @@ int main(int argc, char *argv[])
params.vcpu_memory_bytes = parse_size(optarg);
break;
case 'v':
- params.nr_vcpus = atoi(optarg);
+ params.nr_vcpus = atoi_positive("Number of vCPUs", optarg);
break;
case 'o':
overlap_memory_access = true;