[v2] selftests/kvm: Allow specify physical cpu list in demand paging test

Message ID 20230601184256.180413-1-peterx@redhat.com
State New
Headers
Series [v2] selftests/kvm: Allow specify physical cpu list in demand paging test |

Commit Message

Peter Xu June 1, 2023, 6:42 p.m. UTC
  Mimic dirty log test to allow specify physical cpu pinning for vcpu threads.
Put the help message into a general helper as suggested by Sean.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 tools/testing/selftests/kvm/demand_paging_test.c  | 15 +++++++++++++--
 tools/testing/selftests/kvm/dirty_log_perf_test.c | 12 +-----------
 .../testing/selftests/kvm/include/kvm_util_base.h |  1 +
 tools/testing/selftests/kvm/lib/kvm_util.c        | 15 +++++++++++++++
 4 files changed, 30 insertions(+), 13 deletions(-)
  

Comments

Sean Christopherson June 7, 2023, 12:09 a.m. UTC | #1
On Thu, Jun 01, 2023, Peter Xu wrote:
> Mimic dirty log test to allow specify physical cpu pinning for vcpu threads.
> Put the help message into a general helper as suggested by Sean.
> 
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  tools/testing/selftests/kvm/demand_paging_test.c  | 15 +++++++++++++--
>  tools/testing/selftests/kvm/dirty_log_perf_test.c | 12 +-----------
>  .../testing/selftests/kvm/include/kvm_util_base.h |  1 +
>  tools/testing/selftests/kvm/lib/kvm_util.c        | 15 +++++++++++++++
>  4 files changed, 30 insertions(+), 13 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
> index bdb8e0748154..8581478ed4eb 100644
> --- a/tools/testing/selftests/kvm/demand_paging_test.c
> +++ b/tools/testing/selftests/kvm/demand_paging_test.c
> @@ -220,7 +220,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>  static void help(char *name)
>  {
>  	puts("");
> -	printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-a]\n"
> +	printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-a] [-c cpu_list]\n"

This appears to be based on Anish's unmerged series.

https://lore.kernel.org/all/20230602161921.208564-14-amoorthy@google.com

>  		   "          [-d uffd_delay_usec] [-r readers_per_uffd] [-b memory]\n"
>  		   "          [-s type] [-v vcpus] [-o]\n", name);
>  	guest_modes_help();
> @@ -229,6 +229,7 @@ static void help(char *name)
>  	printf(" -a: Use a single userfaultfd for all of guest memory, instead of\n"
>  		   "     creating one for each region paged by a unique vCPU\n"
>  		   "     Set implicitly with -o, and no effect without -u.\n");
> +	kvm_vcpu_pinning_help();

The helper should have a verb, e.g. kvm_print_vcpu_pinning_help().

>  	printf(" -d: add a delay in usec to the User Fault\n"
>  	       "     FD handler to simulate demand paging\n"
>  	       "     overheads. Ignored without -u.\n");
> @@ -247,6 +248,7 @@ static void help(char *name)
>  int main(int argc, char *argv[])
>  {
>  	int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
> +	const char *cpulist = NULL;
>  	struct test_params p = {
>  		.src_type = DEFAULT_VM_MEM_SRC,
>  		.partition_vcpu_memory_access = true,
> @@ -257,7 +259,7 @@ int main(int argc, char *argv[])
>  
>  	guest_modes_append_default();
>  
> -	while ((opt = getopt(argc, argv, "ahom:u:d:b:s:v:r:")) != -1) {
> +	while ((opt = getopt(argc, argv, "ac:hom:u:d:b:s:v:r:")) != -1) {
>  		switch (opt) {
>  		case 'm':
>  			guest_modes_cmdline(optarg);
> @@ -272,6 +274,9 @@ int main(int argc, char *argv[])
>  		case 'a':
>  			p.single_uffd = true;
>  			break;
> +		case 'c':
> +			cpulist = optarg;
> +			break;

I think it makes sense to put this after 'v' so that the vCPU pinning stuff
directly follows the knobs for defining the number of vCPUs.  This test doesn't
use alphabetical, so inserting here doesn't buy anything.

>  		case 'd':
>  			p.uffd_delay = strtoul(optarg, NULL, 0);
>  			TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
> @@ -309,6 +314,12 @@ int main(int argc, char *argv[])
>  		TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
>  	}
>  
> +	if (cpulist) {
> +		kvm_parse_vcpu_pinning(cpulist, memstress_args.vcpu_to_pcpu,
> +				       nr_vcpus);
> +		memstress_args.pin_vcpus = true;
> +	}
> +
>  	for_each_guest_mode(run_test, &p);
>  
>  	return 0;
> diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> index e9d6d1aecf89..a17d4ebebe55 100644
> --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
> +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
> @@ -402,17 +402,7 @@ static void help(char *name)
>  	       "     so -w X means each page has an X%% chance of writing\n"
>  	       "     and a (100-X)%% chance of reading.\n"
>  	       "     (default: 100 i.e. all pages are written to.)\n");
> -	printf(" -c: Pin tasks to physical CPUs.  Takes a list of comma separated\n"
> -	       "     values (target pCPU), one for each vCPU, plus an optional\n"
> -	       "     entry for the main application task (specified via entry\n"
> -	       "     <nr_vcpus + 1>).  If used, entries must be provided for all\n"
> -	       "     vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
> -	       "     E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
> -	       "     vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
> -	       "         ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
> -	       "     To leave the application task unpinned, drop the final entry:\n\n"
> -	       "         ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"

This should print the actual program name.

I've fixed up all of these and rebased.  I'll post a v3 shortly.
  

Patch

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index bdb8e0748154..8581478ed4eb 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -220,7 +220,7 @@  static void run_test(enum vm_guest_mode mode, void *arg)
 static void help(char *name)
 {
 	puts("");
-	printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-a]\n"
+	printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-a] [-c cpu_list]\n"
 		   "          [-d uffd_delay_usec] [-r readers_per_uffd] [-b memory]\n"
 		   "          [-s type] [-v vcpus] [-o]\n", name);
 	guest_modes_help();
@@ -229,6 +229,7 @@  static void help(char *name)
 	printf(" -a: Use a single userfaultfd for all of guest memory, instead of\n"
 		   "     creating one for each region paged by a unique vCPU\n"
 		   "     Set implicitly with -o, and no effect without -u.\n");
+	kvm_vcpu_pinning_help();
 	printf(" -d: add a delay in usec to the User Fault\n"
 	       "     FD handler to simulate demand paging\n"
 	       "     overheads. Ignored without -u.\n");
@@ -247,6 +248,7 @@  static void help(char *name)
 int main(int argc, char *argv[])
 {
 	int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+	const char *cpulist = NULL;
 	struct test_params p = {
 		.src_type = DEFAULT_VM_MEM_SRC,
 		.partition_vcpu_memory_access = true,
@@ -257,7 +259,7 @@  int main(int argc, char *argv[])
 
 	guest_modes_append_default();
 
-	while ((opt = getopt(argc, argv, "ahom:u:d:b:s:v:r:")) != -1) {
+	while ((opt = getopt(argc, argv, "ac:hom:u:d:b:s:v:r:")) != -1) {
 		switch (opt) {
 		case 'm':
 			guest_modes_cmdline(optarg);
@@ -272,6 +274,9 @@  int main(int argc, char *argv[])
 		case 'a':
 			p.single_uffd = true;
 			break;
+		case 'c':
+			cpulist = optarg;
+			break;
 		case 'd':
 			p.uffd_delay = strtoul(optarg, NULL, 0);
 			TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
@@ -309,6 +314,12 @@  int main(int argc, char *argv[])
 		TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
 	}
 
+	if (cpulist) {
+		kvm_parse_vcpu_pinning(cpulist, memstress_args.vcpu_to_pcpu,
+				       nr_vcpus);
+		memstress_args.pin_vcpus = true;
+	}
+
 	for_each_guest_mode(run_test, &p);
 
 	return 0;
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index e9d6d1aecf89..a17d4ebebe55 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -402,17 +402,7 @@  static void help(char *name)
 	       "     so -w X means each page has an X%% chance of writing\n"
 	       "     and a (100-X)%% chance of reading.\n"
 	       "     (default: 100 i.e. all pages are written to.)\n");
-	printf(" -c: Pin tasks to physical CPUs.  Takes a list of comma separated\n"
-	       "     values (target pCPU), one for each vCPU, plus an optional\n"
-	       "     entry for the main application task (specified via entry\n"
-	       "     <nr_vcpus + 1>).  If used, entries must be provided for all\n"
-	       "     vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
-	       "     E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
-	       "     vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
-	       "         ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
-	       "     To leave the application task unpinned, drop the final entry:\n\n"
-	       "         ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"
-	       "     (default: no pinning)\n");
+	kvm_vcpu_pinning_help();
 	puts("");
 	exit(0);
 }
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index fbc2a79369b8..dc8afe64cfb7 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -734,6 +734,7 @@  struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
 			    int nr_vcpus);
+void kvm_vcpu_pinning_help(void);
 
 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 8ec20ac33de0..5c9b9706f56a 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -489,6 +489,21 @@  static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
 	return pcpu;
 }
 
+void kvm_vcpu_pinning_help(void)
+{
+	printf(" -c: Pin tasks to physical CPUs.  Takes a list of comma separated\n"
+	       "     values (target pCPU), one for each vCPU, plus an optional\n"
+	       "     entry for the main application task (specified via entry\n"
+	       "     <nr_vcpus + 1>).  If used, entries must be provided for all\n"
+	       "     vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
+	       "     E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
+	       "     vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
+	       "         ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
+	       "     To leave the application task unpinned, drop the final entry:\n\n"
+	       "         ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"
+	       "     (default: no pinning)\n");
+}
+
 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
 			    int nr_vcpus)
 {