[v3,4/9] KVM: s390: selftest: memop: Replace macros by functions
Commit Message
Replace the DEFAULT_* test helpers by functions, as they don't
need the exta flexibility.
Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
---
tools/testing/selftests/kvm/s390x/memop.c | 82 +++++++++++------------
1 file changed, 39 insertions(+), 43 deletions(-)
Comments
On Thu, 17 Nov 2022 23:17:53 +0100
Janis Schoetterl-Glausch <scgl@linux.ibm.com> wrote:
> Replace the DEFAULT_* test helpers by functions, as they don't
> need the exta flexibility.
>
> Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> ---
> tools/testing/selftests/kvm/s390x/memop.c | 82 +++++++++++------------
> 1 file changed, 39 insertions(+), 43 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
> index 69869c7e2ab1..286185a59238 100644
> --- a/tools/testing/selftests/kvm/s390x/memop.c
> +++ b/tools/testing/selftests/kvm/s390x/memop.c
> @@ -48,6 +48,8 @@ struct mop_desc {
> uint8_t key;
> };
>
> +const uint8_t NO_KEY = 0xff;
> +
> static struct kvm_s390_mem_op ksmo_from_desc(const struct mop_desc *desc)
> {
> struct kvm_s390_mem_op ksmo = {
> @@ -85,7 +87,7 @@ static struct kvm_s390_mem_op ksmo_from_desc(const struct mop_desc *desc)
> ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
> if (desc->_set_flags)
> ksmo.flags = desc->set_flags;
> - if (desc->f_key) {
> + if (desc->f_key && desc->key != NO_KEY) {
is this change going to affect the behaviour?
if so, please document it in the patch description
> ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
> ksmo.key = desc->key;
> }
> @@ -268,34 +270,28 @@ static void prepare_mem12(void)
> #define ASSERT_MEM_EQ(p1, p2, size) \
> TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
>
> -#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
> -({ \
> - struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
> - enum mop_target __target = (mop_target_p); \
> - uint32_t __size = (size); \
> - \
> - prepare_mem12(); \
> - CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
> - GADDR_V(mem1), ##__VA_ARGS__); \
> - HOST_SYNC(__copy_cpu, STAGE_COPIED); \
> - CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \
> - GADDR_V(mem2), ##__VA_ARGS__); \
> - ASSERT_MEM_EQ(mem1, mem2, __size); \
> -})
> +static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
> + enum mop_target mop_target, uint32_t size, uint8_t key)
> +{
> + prepare_mem12();
> + CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
> + GADDR_V(mem1), KEY(key));
> + HOST_SYNC(copy_cpu, STAGE_COPIED);
> + CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
> + GADDR_V(mem2), KEY(key));
> + ASSERT_MEM_EQ(mem1, mem2, size);
> +}
>
> -#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
> -({ \
> - struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
> - enum mop_target __target = (mop_target_p); \
> - uint32_t __size = (size); \
> - \
> - prepare_mem12(); \
> - CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
> - GADDR_V(mem1)); \
> - HOST_SYNC(__copy_cpu, STAGE_COPIED); \
> - CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
> - ASSERT_MEM_EQ(mem1, mem2, __size); \
> -})
> +static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
> + enum mop_target mop_target, uint32_t size, uint8_t key)
> +{
> + prepare_mem12();
> + CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
> + HOST_SYNC(copy_cpu, STAGE_COPIED);
> + CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
> + GADDR_V(mem2), KEY(key));
> + ASSERT_MEM_EQ(mem1, mem2, size);
> +}
>
> static void guest_copy(void)
> {
> @@ -310,7 +306,7 @@ static void test_copy(void)
>
> HOST_SYNC(t.vcpu, STAGE_INITED);
>
> - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
> + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
>
> kvm_vm_free(t.kvm_vm);
> }
> @@ -357,26 +353,26 @@ static void test_copy_key(void)
> HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
>
> /* vm, no key */
> - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
> + default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
>
> /* vm/vcpu, machting key or key 0 */
> - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
> - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
> - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
> - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
> + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
> + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
> + default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
> + default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
> /*
> * There used to be different code paths for key handling depending on
> * if the region crossed a page boundary.
> * There currently are not, but the more tests the merrier.
> */
> - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
> - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
> - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
> - DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
> + default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
> + default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
> + default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
> + default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
>
> /* vm/vcpu, mismatching keys on read, but no fetch protection */
> - DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
> - DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
> + default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
> + default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
>
> kvm_vm_free(t.kvm_vm);
> }
> @@ -409,7 +405,7 @@ static void test_copy_key_storage_prot_override(void)
> HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
>
> /* vcpu, mismatching keys, storage protection override in effect */
> - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
> + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
>
> kvm_vm_free(t.kvm_vm);
> }
> @@ -422,8 +418,8 @@ static void test_copy_key_fetch_prot(void)
> HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
>
> /* vm/vcpu, matching key, fetch protection in effect */
> - DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
> - DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
> + default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
> + default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
>
> kvm_vm_free(t.kvm_vm);
> }
On Thu, 2022-12-01 at 17:28 +0100, Claudio Imbrenda wrote:
> On Thu, 17 Nov 2022 23:17:53 +0100
> Janis Schoetterl-Glausch <scgl@linux.ibm.com> wrote:
>
> > Replace the DEFAULT_* test helpers by functions, as they don't
> > need the exta flexibility.
> >
> > Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
> > ---
> > tools/testing/selftests/kvm/s390x/memop.c | 82 +++++++++++------------
> > 1 file changed, 39 insertions(+), 43 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
> > index 69869c7e2ab1..286185a59238 100644
> > --- a/tools/testing/selftests/kvm/s390x/memop.c
> > +++ b/tools/testing/selftests/kvm/s390x/memop.c
> > @@ -48,6 +48,8 @@ struct mop_desc {
> > uint8_t key;
> > };
> >
> > +const uint8_t NO_KEY = 0xff;
> > +
> > static struct kvm_s390_mem_op ksmo_from_desc(const struct mop_desc *desc)
> > {
> > struct kvm_s390_mem_op ksmo = {
> > @@ -85,7 +87,7 @@ static struct kvm_s390_mem_op ksmo_from_desc(const struct mop_desc *desc)
> > ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
> > if (desc->_set_flags)
> > ksmo.flags = desc->set_flags;
> > - if (desc->f_key) {
> > + if (desc->f_key && desc->key != NO_KEY) {
>
> is this change going to affect the behaviour?
> if so, please document it in the patch description
No, previously the absence of a key in the vararg would denote there not being a key,
now that a function is used there is an explicit no key argument, which is checked
here to see if we use key checking or not
>
> > ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
> > ksmo.key = desc->key;
> > }
> > @@ -268,34 +270,28 @@ static void prepare_mem12(void)
> > #define ASSERT_MEM_EQ(p1, p2, size) \
> > TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
> >
> > -#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
> > -({ \
> > - struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
> > - enum mop_target __target = (mop_target_p); \
> > - uint32_t __size = (size); \
> > - \
> > - prepare_mem12(); \
> > - CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
> > - GADDR_V(mem1), ##__VA_ARGS__); \
> > - HOST_SYNC(__copy_cpu, STAGE_COPIED); \
> > - CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \
> > - GADDR_V(mem2), ##__VA_ARGS__); \
> > - ASSERT_MEM_EQ(mem1, mem2, __size); \
> > -})
> > +static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
> > + enum mop_target mop_target, uint32_t size, uint8_t key)
> > +{
> > + prepare_mem12();
> > + CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
> > + GADDR_V(mem1), KEY(key));
> > + HOST_SYNC(copy_cpu, STAGE_COPIED);
> > + CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
> > + GADDR_V(mem2), KEY(key));
> > + ASSERT_MEM_EQ(mem1, mem2, size);
> > +}
> >
> > -#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
> > -({ \
> > - struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
> > - enum mop_target __target = (mop_target_p); \
> > - uint32_t __size = (size); \
> > - \
> > - prepare_mem12(); \
> > - CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
> > - GADDR_V(mem1)); \
> > - HOST_SYNC(__copy_cpu, STAGE_COPIED); \
> > - CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
> > - ASSERT_MEM_EQ(mem1, mem2, __size); \
> > -})
> > +static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
> > + enum mop_target mop_target, uint32_t size, uint8_t key)
> > +{
> > + prepare_mem12();
> > + CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
> > + HOST_SYNC(copy_cpu, STAGE_COPIED);
> > + CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
> > + GADDR_V(mem2), KEY(key));
> > + ASSERT_MEM_EQ(mem1, mem2, size);
> > +}
> >
> > static void guest_copy(void)
> > {
> > @@ -310,7 +306,7 @@ static void test_copy(void)
> >
> > HOST_SYNC(t.vcpu, STAGE_INITED);
> >
> > - DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
> > + default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
> >
> > kvm_vm_free(t.kvm_vm);
> > }
[...]
@@ -48,6 +48,8 @@ struct mop_desc {
uint8_t key;
};
+const uint8_t NO_KEY = 0xff;
+
static struct kvm_s390_mem_op ksmo_from_desc(const struct mop_desc *desc)
{
struct kvm_s390_mem_op ksmo = {
@@ -85,7 +87,7 @@ static struct kvm_s390_mem_op ksmo_from_desc(const struct mop_desc *desc)
ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
if (desc->_set_flags)
ksmo.flags = desc->set_flags;
- if (desc->f_key) {
+ if (desc->f_key && desc->key != NO_KEY) {
ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
ksmo.key = desc->key;
}
@@ -268,34 +270,28 @@ static void prepare_mem12(void)
#define ASSERT_MEM_EQ(p1, p2, size) \
TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
-#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
-({ \
- struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
- enum mop_target __target = (mop_target_p); \
- uint32_t __size = (size); \
- \
- prepare_mem12(); \
- CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
- GADDR_V(mem1), ##__VA_ARGS__); \
- HOST_SYNC(__copy_cpu, STAGE_COPIED); \
- CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \
- GADDR_V(mem2), ##__VA_ARGS__); \
- ASSERT_MEM_EQ(mem1, mem2, __size); \
-})
+static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
+ enum mop_target mop_target, uint32_t size, uint8_t key)
+{
+ prepare_mem12();
+ CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
+ GADDR_V(mem1), KEY(key));
+ HOST_SYNC(copy_cpu, STAGE_COPIED);
+ CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
+ GADDR_V(mem2), KEY(key));
+ ASSERT_MEM_EQ(mem1, mem2, size);
+}
-#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
-({ \
- struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
- enum mop_target __target = (mop_target_p); \
- uint32_t __size = (size); \
- \
- prepare_mem12(); \
- CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
- GADDR_V(mem1)); \
- HOST_SYNC(__copy_cpu, STAGE_COPIED); \
- CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
- ASSERT_MEM_EQ(mem1, mem2, __size); \
-})
+static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
+ enum mop_target mop_target, uint32_t size, uint8_t key)
+{
+ prepare_mem12();
+ CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
+ HOST_SYNC(copy_cpu, STAGE_COPIED);
+ CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
+ GADDR_V(mem2), KEY(key));
+ ASSERT_MEM_EQ(mem1, mem2, size);
+}
static void guest_copy(void)
{
@@ -310,7 +306,7 @@ static void test_copy(void)
HOST_SYNC(t.vcpu, STAGE_INITED);
- DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
+ default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
kvm_vm_free(t.kvm_vm);
}
@@ -357,26 +353,26 @@ static void test_copy_key(void)
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vm, no key */
- DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
+ default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
/* vm/vcpu, machting key or key 0 */
- DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
- DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
- DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
- DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
+ default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
+ default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
+ default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
+ default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
/*
* There used to be different code paths for key handling depending on
* if the region crossed a page boundary.
* There currently are not, but the more tests the merrier.
*/
- DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
- DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
- DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
- DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
+ default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
+ default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
+ default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
+ default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
/* vm/vcpu, mismatching keys on read, but no fetch protection */
- DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
- DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
+ default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
+ default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
kvm_vm_free(t.kvm_vm);
}
@@ -409,7 +405,7 @@ static void test_copy_key_storage_prot_override(void)
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vcpu, mismatching keys, storage protection override in effect */
- DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
+ default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
kvm_vm_free(t.kvm_vm);
}
@@ -422,8 +418,8 @@ static void test_copy_key_fetch_prot(void)
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
/* vm/vcpu, matching key, fetch protection in effect */
- DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
- DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
+ default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
+ default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
kvm_vm_free(t.kvm_vm);
}