kunit: memcpy: Split slow memcpy tests into MEMCPY_SLOW_KUNIT_TEST

Message ID 20230114005408.never.756-kees@kernel.org
State New
Headers
Series kunit: memcpy: Split slow memcpy tests into MEMCPY_SLOW_KUNIT_TEST |

Commit Message

Kees Cook Jan. 14, 2023, 12:54 a.m. UTC
  Since the long memcpy tests may stall a system for tens of seconds
in virtualized architecture environments, split those tests off under
CONFIG_MEMCPY_SLOW_KUNIT_TEST so they can be separately disabled.

Reported-by: Guenter Roeck <linux@roeck-us.net>
Link: https://lore.kernel.org/lkml/20221226195206.GA2626419@roeck-us.net
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
Reviewed-and-tested-by: Guenter Roeck <linux@roeck-us.net>
Reviewed-by: David Gow <davidgow@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: linux-hardening@vger.kernel.org
Signed-off-by: Kees Cook <keescook@chromium.org>
---
v2: fix tristate to bool
v1: https://lore.kernel.org/lkml/20230107040203.never.112-kees@kernel.org
---
 lib/Kconfig.debug  |  9 +++++++++
 lib/memcpy_kunit.c | 15 ++++++++++++---
 2 files changed, 21 insertions(+), 3 deletions(-)
  

Comments

Kees Cook Jan. 14, 2023, 12:57 a.m. UTC | #1
gah. this is actually v2. :(

On Fri, Jan 13, 2023 at 04:54:12PM -0800, Kees Cook wrote:
> Since the long memcpy tests may stall a system for tens of seconds
> in virtualized architecture environments, split those tests off under
> CONFIG_MEMCPY_SLOW_KUNIT_TEST so they can be separately disabled.
> 
> Reported-by: Guenter Roeck <linux@roeck-us.net>
> Link: https://lore.kernel.org/lkml/20221226195206.GA2626419@roeck-us.net
> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
> Reviewed-and-tested-by: Guenter Roeck <linux@roeck-us.net>
> Reviewed-by: David Gow <davidgow@google.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Nathan Chancellor <nathan@kernel.org>
> Cc: linux-hardening@vger.kernel.org
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---
> v2: fix tristate to bool
> v1: https://lore.kernel.org/lkml/20230107040203.never.112-kees@kernel.org
> ---
>  lib/Kconfig.debug  |  9 +++++++++
>  lib/memcpy_kunit.c | 15 ++++++++++++---
>  2 files changed, 21 insertions(+), 3 deletions(-)
> 
> diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> index c2c78d0e761c..f90637171453 100644
> --- a/lib/Kconfig.debug
> +++ b/lib/Kconfig.debug
> @@ -2621,6 +2621,15 @@ config MEMCPY_KUNIT_TEST
>  
>  	  If unsure, say N.
>  
> +config MEMCPY_SLOW_KUNIT_TEST
> +	bool "Include exhaustive memcpy tests" if !KUNIT_ALL_TESTS
> +	depends on MEMCPY_KUNIT_TEST
> +	default KUNIT_ALL_TESTS
> +	help
> +	  Some memcpy tests are quite exhaustive in checking for overlaps
> +	  and bit ranges. These can be very slow, so they are split out
> +	  as a separate config.
> +
>  config IS_SIGNED_TYPE_KUNIT_TEST
>  	tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
>  	depends on KUNIT
> diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
> index 89128551448d..5a545e1b5dbb 100644
> --- a/lib/memcpy_kunit.c
> +++ b/lib/memcpy_kunit.c
> @@ -307,8 +307,12 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
>  	}
>  }
>  
> -static void init_large(struct kunit *test)
> +static int init_large(struct kunit *test)
>  {
> +	if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) {
> +		kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
> +		return -EBUSY;
> +	}
>  
>  	/* Get many bit patterns. */
>  	get_random_bytes(large_src, ARRAY_SIZE(large_src));
> @@ -319,6 +323,8 @@ static void init_large(struct kunit *test)
>  
>  	/* Explicitly zero the entire destination. */
>  	memset(large_dst, 0, ARRAY_SIZE(large_dst));
> +
> +	return 0;
>  }
>  
>  /*
> @@ -327,7 +333,9 @@ static void init_large(struct kunit *test)
>   */
>  static void copy_large_test(struct kunit *test, bool use_memmove)
>  {
> -	init_large(test);
> +
> +	if (init_large(test))
> +		return;
>  
>  	/* Copy a growing number of non-overlapping bytes ... */
>  	for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
> @@ -472,7 +480,8 @@ static void memmove_overlap_test(struct kunit *test)
>  	static const int bytes_start = 1;
>  	static const int bytes_end = ARRAY_SIZE(large_src) + 1;
>  
> -	init_large(test);
> +	if (init_large(test))
> +		return;
>  
>  	/* Copy a growing number of overlapping bytes ... */
>  	for (int bytes = bytes_start; bytes < bytes_end;
> -- 
> 2.34.1
>
  
Daniel Latypov Jan. 14, 2023, 1:38 a.m. UTC | #2
On Fri, Jan 13, 2023 at 4:54 PM Kees Cook <keescook@chromium.org> wrote:
> diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
> index 89128551448d..5a545e1b5dbb 100644
> --- a/lib/memcpy_kunit.c
> +++ b/lib/memcpy_kunit.c
> @@ -307,8 +307,12 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
>         }
>  }
>
> -static void init_large(struct kunit *test)
> +static int init_large(struct kunit *test)
>  {
> +       if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) {
> +               kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
> +               return -EBUSY;

Note: kunit_skip() here means you don't need explicit returns in the test cases.
kunit_skip() is basically
  kunit_mark_skipped(test, "reason");
  kthread_complete_and_exit(...);

So the diff in this file could be reduced down to just these 2 lines
  if (!IS_ENABLED(...))
      kunit_skip(test, "...")

But I can see the appeal of being more explicit about the control flow.
In that case, you could switch kunit_mark_skipped(), which just sets
the status and doesn't affect control flow at all.

Daniel
  
David Gow Jan. 14, 2023, 4:31 a.m. UTC | #3
On Sat, 14 Jan 2023 at 08:54, Kees Cook <keescook@chromium.org> wrote:
>
> Since the long memcpy tests may stall a system for tens of seconds
> in virtualized architecture environments, split those tests off under
> CONFIG_MEMCPY_SLOW_KUNIT_TEST so they can be separately disabled.
>
> Reported-by: Guenter Roeck <linux@roeck-us.net>
> Link: https://lore.kernel.org/lkml/20221226195206.GA2626419@roeck-us.net
> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
> Reviewed-and-tested-by: Guenter Roeck <linux@roeck-us.net>
> Reviewed-by: David Gow <davidgow@google.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Nathan Chancellor <nathan@kernel.org>
> Cc: linux-hardening@vger.kernel.org
> Signed-off-by: Kees Cook <keescook@chromium.org>
> ---

Thanks: this is okay as-is, but I left a few suggestions below, having
tried it out a bit more.

Daniel's comment about kunit_skip() aborting the test is the only real
fix, but depending on how easy you'd want to make turning these tests
off, there are a couple of possible tweaks to the Kconfig option.

Cheers,
-- David

> v2: fix tristate to bool
> v1: https://lore.kernel.org/lkml/20230107040203.never.112-kees@kernel.org
> ---
>  lib/Kconfig.debug  |  9 +++++++++
>  lib/memcpy_kunit.c | 15 ++++++++++++---
>  2 files changed, 21 insertions(+), 3 deletions(-)
>
> diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
> index c2c78d0e761c..f90637171453 100644
> --- a/lib/Kconfig.debug
> +++ b/lib/Kconfig.debug
> @@ -2621,6 +2621,15 @@ config MEMCPY_KUNIT_TEST
>
>           If unsure, say N.
>
> +config MEMCPY_SLOW_KUNIT_TEST
> +       bool "Include exhaustive memcpy tests" if !KUNIT_ALL_TESTS

I think it'd be better to not include the "if !KUNIT_ALL_TESTS" here,
because it's very convenient to be able to use:
./tools/testing/kunit/kunit.py run memcpy --kconfig_add
CONFIG_MEMCPY_SLOW_KUNIT_TEST=n
to override it.

That does undermine the way KUNIT_ALL_TESTS works a bit, though it
depends if you want to consider this a new set of tests, or just an
option for the existing ones.

> +       depends on MEMCPY_KUNIT_TEST
> +       default KUNIT_ALL_TESTS

Does this default work for everyone? Personally, I think these tests
aren't slow enough that we'd want them disabled from an "all tests"
build by default. So I'd keep it as-is.

> +       help
> +         Some memcpy tests are quite exhaustive in checking for overlaps
> +         and bit ranges. These can be very slow, so they are split out
> +         as a separate config.
> +
>  config IS_SIGNED_TYPE_KUNIT_TEST
>         tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
>         depends on KUNIT
> diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
> index 89128551448d..5a545e1b5dbb 100644
> --- a/lib/memcpy_kunit.c
> +++ b/lib/memcpy_kunit.c
> @@ -307,8 +307,12 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
>         }
>  }
>
> -static void init_large(struct kunit *test)
> +static int init_large(struct kunit *test)
>  {
> +       if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) {
> +               kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
> +               return -EBUSY;

As Daniel notes, it shouldn't be necessary to return here, as
kunit_skip() will abort the test. (It's a macro, so isn't marked
__noreturn itself, but kunit_try_catch_throw(), which it uses, is.)

> +       }
>
>         /* Get many bit patterns. */
>         get_random_bytes(large_src, ARRAY_SIZE(large_src));
> @@ -319,6 +323,8 @@ static void init_large(struct kunit *test)
>
>         /* Explicitly zero the entire destination. */
>         memset(large_dst, 0, ARRAY_SIZE(large_dst));
> +
> +       return 0;
>  }
>
>  /*
> @@ -327,7 +333,9 @@ static void init_large(struct kunit *test)
>   */
>  static void copy_large_test(struct kunit *test, bool use_memmove)
>  {
> -       init_large(test);
> +
> +       if (init_large(test))
> +               return;
>
>         /* Copy a growing number of non-overlapping bytes ... */
>         for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
> @@ -472,7 +480,8 @@ static void memmove_overlap_test(struct kunit *test)
>         static const int bytes_start = 1;
>         static const int bytes_end = ARRAY_SIZE(large_src) + 1;
>
> -       init_large(test);
> +       if (init_large(test))
> +               return;
>
>         /* Copy a growing number of overlapping bytes ... */
>         for (int bytes = bytes_start; bytes < bytes_end;
> --
> 2.34.1
>
  

Patch

diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c2c78d0e761c..f90637171453 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2621,6 +2621,15 @@  config MEMCPY_KUNIT_TEST
 
 	  If unsure, say N.
 
+config MEMCPY_SLOW_KUNIT_TEST
+	bool "Include exhaustive memcpy tests" if !KUNIT_ALL_TESTS
+	depends on MEMCPY_KUNIT_TEST
+	default KUNIT_ALL_TESTS
+	help
+	  Some memcpy tests are quite exhaustive in checking for overlaps
+	  and bit ranges. These can be very slow, so they are split out
+	  as a separate config.
+
 config IS_SIGNED_TYPE_KUNIT_TEST
 	tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
 	depends on KUNIT
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 89128551448d..5a545e1b5dbb 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -307,8 +307,12 @@  static void set_random_nonzero(struct kunit *test, u8 *byte)
 	}
 }
 
-static void init_large(struct kunit *test)
+static int init_large(struct kunit *test)
 {
+	if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) {
+		kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
+		return -EBUSY;
+	}
 
 	/* Get many bit patterns. */
 	get_random_bytes(large_src, ARRAY_SIZE(large_src));
@@ -319,6 +323,8 @@  static void init_large(struct kunit *test)
 
 	/* Explicitly zero the entire destination. */
 	memset(large_dst, 0, ARRAY_SIZE(large_dst));
+
+	return 0;
 }
 
 /*
@@ -327,7 +333,9 @@  static void init_large(struct kunit *test)
  */
 static void copy_large_test(struct kunit *test, bool use_memmove)
 {
-	init_large(test);
+
+	if (init_large(test))
+		return;
 
 	/* Copy a growing number of non-overlapping bytes ... */
 	for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
@@ -472,7 +480,8 @@  static void memmove_overlap_test(struct kunit *test)
 	static const int bytes_start = 1;
 	static const int bytes_end = ARRAY_SIZE(large_src) + 1;
 
-	init_large(test);
+	if (init_large(test))
+		return;
 
 	/* Copy a growing number of overlapping bytes ... */
 	for (int bytes = bytes_start; bytes < bytes_end;