@@ -125,10 +125,10 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
"Set page_idle bits for PFN 0x%" PRIx64, pfn);
}
-static void mark_vcpu_memory_idle(struct kvm_vm *vm,
- struct perf_test_vcpu_args *vcpu_args)
+static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_idx)
{
- int vcpu_idx = vcpu_args->vcpu_idx;
+ struct perf_test_vcpu_args *vcpu_args =
+ &perf_test_args.vcpu_args[vcpu_idx];
uint64_t base_gva = vcpu_args->gva;
uint64_t pages = vcpu_args->pages;
uint64_t page;
@@ -220,11 +220,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true;
}
-static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_thread_main(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- struct kvm_vm *vm = perf_test_args.vm;
- int vcpu_idx = vcpu_args->vcpu_idx;
+ struct kvm_vm *vm = vcpu->vm;
+ int vcpu_idx = vcpu->id;
int current_iteration = 0;
while (spin_wait_for_next_iteration(¤t_iteration)) {
@@ -234,7 +233,7 @@ static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
assert_ucall(vcpu, UCALL_SYNC);
break;
case ITERATION_MARK_IDLE:
- mark_vcpu_memory_idle(vm, vcpu_args);
+ mark_vcpu_memory_idle(vm, vcpu_idx);
break;
};
@@ -306,7 +305,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access);
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
+ perf_test_start_vcpu_threads(vm, vcpu_thread_main);
pr_info("\n");
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
@@ -324,7 +323,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Set done to signal the vCPU threads to exit */
done = true;
- perf_test_join_vcpu_threads(nr_vcpus);
perf_test_destroy_vm(vm);
}
@@ -14,7 +14,6 @@
#include <stdlib.h>
#include <time.h>
#include <poll.h>
-#include <pthread.h>
#include <linux/userfaultfd.h>
#include <sys/syscall.h>
@@ -42,10 +41,9 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- int vcpu_idx = vcpu_args->vcpu_idx;
+ int vcpu_idx = vcpu->id;
struct kvm_run *run = vcpu->run;
struct timespec start;
struct timespec ts_diff;
@@ -336,10 +334,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Finished creating vCPUs and starting uffd threads\n");
clock_gettime(CLOCK_MONOTONIC, &start);
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+ perf_test_start_vcpu_threads(vm, vcpu_worker);
pr_info("Started all vCPUs\n");
- perf_test_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n");
@@ -11,7 +11,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
-#include <pthread.h>
#include <linux/bitmap.h>
#include "kvm_util.h"
@@ -67,10 +66,11 @@ static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_args->vcpu;
- int vcpu_idx = vcpu_args->vcpu_idx;
+ int vcpu_idx = vcpu->id;
+ struct perf_test_vcpu_args *vcpu_args =
+ &perf_test_args.vcpu_args[vcpu_idx];
uint64_t pages_count = 0;
struct kvm_run *run;
struct timespec start;
@@ -248,7 +248,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
for (i = 0; i < nr_vcpus; i++)
vcpu_last_completed_iteration[i] = -1;
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+ perf_test_start_vcpu_threads(vm, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
@@ -329,7 +329,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* wait for them to exit.
*/
host_quit = true;
- perf_test_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
@@ -24,8 +24,7 @@ struct perf_test_vcpu_args {
uint64_t gva;
uint64_t pages;
- /* Only used by the host userspace part of the vCPU thread */
- struct kvm_vcpu *vcpu;
+ /* For guest to check if data is corrupted */
int vcpu_idx;
};
@@ -53,11 +52,11 @@ void perf_test_destroy_vm(struct kvm_vm *vm);
void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
-void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
-void perf_test_join_vcpu_threads(int vcpus);
+void perf_test_start_vcpu_threads(struct kvm_vm *vm,
+ void (*vcpu_fn)(struct kvm_vcpu *vcpu));
void perf_test_guest_code(uint32_t vcpu_id);
uint64_t perf_test_nested_pages(int nr_vcpus);
-void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
+void perf_test_setup_nested(struct kvm_vm *vm);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
@@ -16,28 +16,17 @@ struct perf_test_args perf_test_args;
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-struct vcpu_thread {
- /* The index of the vCPU. */
- int vcpu_idx;
-
- /* The pthread backing the vCPU. */
- pthread_t thread;
-
+struct vcpu_thread_data {
/* Set to true once the vCPU thread is up and running. */
bool running;
};
-/* The vCPU threads involved in this test. */
-static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
-
/* The function run by each vCPU thread, as provided by the test. */
-static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
+static void (*vcpu_thread_fn)(struct kvm_vcpu *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
-static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
-
/*
* Continuously write to the first 8 bytes of each page in the
* specified region.
@@ -71,7 +60,6 @@ void perf_test_guest_code(uint32_t vcpu_idx)
}
void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
- struct kvm_vcpu *vcpus[],
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
@@ -82,7 +70,6 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
for (i = 0; i < nr_vcpus; i++) {
vcpu_args = &pta->vcpu_args[i];
- vcpu_args->vcpu = vcpus[i];
vcpu_args->vcpu_idx = i;
if (partition_vcpu_memory_access) {
@@ -98,7 +85,7 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
vcpu_args->gpa = pta->gpa;
}
- vcpu_args_set(vcpus[i], 1, i);
+ vcpu_args_set(vm->vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
i, vcpu_args->gpa, vcpu_args->gpa +
@@ -153,7 +140,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
* effect as KVM allows aliasing HVAs in meslots.
*/
vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
- perf_test_guest_code, vcpus);
+ perf_test_guest_code, NULL);
pta->vm = vm;
@@ -201,12 +188,12 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
- perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
+ perf_test_setup_vcpus(vm, nr_vcpus, vcpu_memory_bytes,
partition_vcpu_memory_access);
if (pta->nested) {
pr_info("Configuring vCPUs to run in L2 (nested).\n");
- perf_test_setup_nested(vm, nr_vcpus, vcpus);
+ perf_test_setup_nested(vm);
}
ucall_init(vm, NULL);
@@ -219,6 +206,9 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
void perf_test_destroy_vm(struct kvm_vm *vm)
{
+ vm_vcpu_threads_join(vm);
+ pr_info("All vCPU threads joined\n");
+
ucall_uninit(vm);
kvm_vm_free(vm);
}
@@ -234,7 +224,7 @@ uint64_t __weak perf_test_nested_pages(int nr_vcpus)
return 0;
}
-void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
+void __weak perf_test_setup_nested(struct kvm_vm *vm)
{
pr_info("%s() not support on this architecture, skipping.\n", __func__);
exit(KSFT_SKIP);
@@ -242,9 +232,11 @@ void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_v
static void *vcpu_thread_main(void *data)
{
- struct vcpu_thread *vcpu = data;
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+ struct vcpu_thread_data *thread_data =
+ (struct vcpu_thread_data *)vcpu->private_data;
- WRITE_ONCE(vcpu->running, true);
+ WRITE_ONCE(thread_data->running, true);
/*
* Wait for all vCPU threads to be up and running before calling the test-
@@ -255,40 +247,30 @@ static void *vcpu_thread_main(void *data)
while (!READ_ONCE(all_vcpu_threads_running))
;
- vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_idx]);
+ vcpu_thread_fn(vcpu);
return NULL;
}
-void perf_test_start_vcpu_threads(int nr_vcpus,
- void (*vcpu_fn)(struct perf_test_vcpu_args *))
+void perf_test_start_vcpu_threads(struct kvm_vm *vm,
+ void (*vcpu_fn)(struct kvm_vcpu *))
{
int i;
+ struct kvm_vcpu *vcpu;
+ struct vcpu_thread_data *thread_data;
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
- for (i = 0; i < nr_vcpus; i++) {
- struct vcpu_thread *vcpu = &vcpu_threads[i];
-
- vcpu->vcpu_idx = i;
- WRITE_ONCE(vcpu->running, false);
+ /* thread_data->running already false-initialized on allocation */
+ vm_vcpu_threads_create(vm, vcpu_thread_main,
+ sizeof(struct vcpu_thread_data));
- pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
- }
-
- for (i = 0; i < nr_vcpus; i++) {
- while (!READ_ONCE(vcpu_threads[i].running))
+ vm_iterate_over_vcpus(vm, vcpu, i) {
+ thread_data = (struct vcpu_thread_data *)vcpu->private_data;
+ while (!READ_ONCE(thread_data->running))
;
}
WRITE_ONCE(all_vcpu_threads_running, true);
}
-
-void perf_test_join_vcpu_threads(int nr_vcpus)
-{
- int i;
-
- for (i = 0; i < nr_vcpus; i++)
- pthread_join(vcpu_threads[i].thread, NULL);
-}
@@ -77,16 +77,17 @@ void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
nested_identity_map_1g(vmx, vm, start, end - start);
}
-void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
+void perf_test_setup_nested(struct kvm_vm *vm)
{
struct vmx_pages *vmx, *vmx0 = NULL;
struct kvm_regs regs;
vm_vaddr_t vmx_gva;
int vcpu_id;
+ struct kvm_vcpu *vcpu;
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ vm_iterate_over_vcpus(vm, vcpu, vcpu_id) {
vmx = vcpu_alloc_vmx(vm, &vmx_gva);
if (vcpu_id == 0) {
@@ -103,9 +104,9 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
* Override the vCPU to run perf_test_l1_guest_code() which will
* bounce it into L2 before calling perf_test_guest_code().
*/
- vcpu_regs_get(vcpus[vcpu_id], ®s);
+ vcpu_regs_get(vcpu, ®s);
regs.rip = (unsigned long) perf_test_l1_guest_code;
- vcpu_regs_set(vcpus[vcpu_id], ®s);
- vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
+ vcpu_regs_set(vcpu, ®s);
+ vcpu_args_set(vcpu, 2, vmx_gva, vcpu->id);
}
}
@@ -16,7 +16,6 @@
#include <asm/unistd.h>
#include <time.h>
#include <poll.h>
-#include <pthread.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/userfaultfd.h>
@@ -36,9 +35,8 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true;
-static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
+static void vcpu_worker(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_run *run;
int ret;
@@ -103,7 +101,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Finished creating vCPUs\n");
- perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
+ perf_test_start_vcpu_threads(vm, vcpu_worker);
pr_info("Started all vCPUs\n");
@@ -112,9 +110,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
run_vcpus = false;
- perf_test_join_vcpu_threads(nr_vcpus);
- pr_info("All vCPU threads joined\n");
-
perf_test_destroy_vm(vm);
}