[15/18] lib/stacktrace, kasan, kmsan: rework extra_bits interface

Message ID fbe58d38b7d93a9ef8500a72c0c4f103222418e6.1675111415.git.andreyknvl@google.com
State New
Headers
Series lib/stackdepot: fixes and clean-ups |

Commit Message

andrey.konovalov@linux.dev Jan. 30, 2023, 8:49 p.m. UTC
  From: Andrey Konovalov <andreyknvl@google.com>

The current implementation of the extra_bits interface is confusing:
passing extra_bits to __stack_depot_save makes it seem that the extra
bits are somehow stored in stack depot. In reality, they are only
embedded into a stack depot handle and are not used within stack depot.

Drop the extra_bits argument from __stack_depot_save and instead provide
a new stack_depot_set_extra_bits function (similar to the exsiting
stack_depot_get_extra_bits) that saves extra bits into a stack depot
handle.

Update the callers of __stack_depot_save to use the new interace.

This change also fixes a minor issue in the old code: __stack_depot_save
does not return NULL if saving stack trace fails and extra_bits is used.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 include/linux/stackdepot.h |  4 +++-
 lib/stackdepot.c           | 38 +++++++++++++++++++++++++++++---------
 mm/kasan/common.c          |  2 +-
 mm/kmsan/core.c            | 10 +++++++---
 4 files changed, 40 insertions(+), 14 deletions(-)
  

Comments

Marco Elver Jan. 31, 2023, 8:53 a.m. UTC | #1
On Mon, 30 Jan 2023 at 21:51, <andrey.konovalov@linux.dev> wrote:
>
> From: Andrey Konovalov <andreyknvl@google.com>
>
> The current implementation of the extra_bits interface is confusing:
> passing extra_bits to __stack_depot_save makes it seem that the extra
> bits are somehow stored in stack depot. In reality, they are only
> embedded into a stack depot handle and are not used within stack depot.
>
> Drop the extra_bits argument from __stack_depot_save and instead provide
> a new stack_depot_set_extra_bits function (similar to the exsiting
> stack_depot_get_extra_bits) that saves extra bits into a stack depot
> handle.
>
> Update the callers of __stack_depot_save to use the new interace.
>
> This change also fixes a minor issue in the old code: __stack_depot_save
> does not return NULL if saving stack trace fails and extra_bits is used.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> ---
>  include/linux/stackdepot.h |  4 +++-
>  lib/stackdepot.c           | 38 +++++++++++++++++++++++++++++---------
>  mm/kasan/common.c          |  2 +-
>  mm/kmsan/core.c            | 10 +++++++---
>  4 files changed, 40 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
> index c4e3abc16b16..f999811c66d7 100644
> --- a/include/linux/stackdepot.h
> +++ b/include/linux/stackdepot.h
> @@ -57,7 +57,6 @@ static inline int stack_depot_early_init(void)        { return 0; }
>
>  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>                                         unsigned int nr_entries,
> -                                       unsigned int extra_bits,
>                                         gfp_t gfp_flags, bool can_alloc);
>
>  depot_stack_handle_t stack_depot_save(unsigned long *entries,
> @@ -71,6 +70,9 @@ void stack_depot_print(depot_stack_handle_t stack);
>  int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
>                        int spaces);
>
> +depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
> +                                               unsigned int extra_bits);

Can you add __must_check to this function? Either that or making
handle an in/out param, as otherwise it might be easy to think that it
doesn't return anything ("set_foo()" seems like it sets the
information in the handle-associated data but not handle itself ... in
case someone missed the documentation).

>  unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
>
>  #endif
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index 7282565722f2..f291ad6a4e72 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -346,7 +346,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
>   *
>   * @entries:           Pointer to storage array
>   * @nr_entries:                Size of the storage array
> - * @extra_bits:                Flags to store in unused bits of depot_stack_handle_t
>   * @alloc_flags:       Allocation gfp flags
>   * @can_alloc:         Allocate stack slabs (increased chance of failure if false)
>   *
> @@ -358,10 +357,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
>   * If the stack trace in @entries is from an interrupt, only the portion up to
>   * interrupt entry is saved.
>   *
> - * Additional opaque flags can be passed in @extra_bits, stored in the unused
> - * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
> - * without calling stack_depot_fetch().
> - *
>   * Context: Any context, but setting @can_alloc to %false is required if
>   *          alloc_pages() cannot be used from the current context. Currently
>   *          this is the case from contexts where neither %GFP_ATOMIC nor
> @@ -371,7 +366,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
>   */
>  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>                                         unsigned int nr_entries,
> -                                       unsigned int extra_bits,
>                                         gfp_t alloc_flags, bool can_alloc)
>  {
>         struct stack_record *found = NULL, **bucket;
> @@ -461,8 +455,6 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
>         if (found)
>                 retval.handle = found->handle.handle;
>  fast_exit:
> -       retval.extra = extra_bits;
> -
>         return retval.handle;
>  }
>  EXPORT_SYMBOL_GPL(__stack_depot_save);
> @@ -483,7 +475,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
>                                       unsigned int nr_entries,
>                                       gfp_t alloc_flags)
>  {
> -       return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
> +       return __stack_depot_save(entries, nr_entries, alloc_flags, true);
>  }
>  EXPORT_SYMBOL_GPL(stack_depot_save);
>
> @@ -566,6 +558,34 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
>  }
>  EXPORT_SYMBOL_GPL(stack_depot_snprint);
>
> +/**
> + * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
> + *
> + * @handle:    Stack depot handle
> + * @extra_bits:        Value to set the extra bits
> + *
> + * Return: Stack depot handle with extra bits set
> + *
> + * Stack depot handles have a few unused bits, which can be used for storing
> + * user-specific information. These bits are transparent to the stack depot.
> + */
> +depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
> +                                               unsigned int extra_bits)
> +{
> +       union handle_parts parts = { .handle = handle };
> +
> +       parts.extra = extra_bits;
> +       return parts.handle;
> +}
> +EXPORT_SYMBOL(stack_depot_set_extra_bits);
> +
> +/**
> + * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
> + *
> + * @handle:    Stack depot handle with extra bits saved
> + *
> + * Return: Extra bits retrieved from the stack depot handle
> + */
>  unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
>  {
>         union handle_parts parts = { .handle = handle };
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 833bf2cfd2a3..50f4338b477f 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -43,7 +43,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
>         unsigned int nr_entries;
>
>         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
> -       return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
> +       return __stack_depot_save(entries, nr_entries, flags, can_alloc);
>  }
>
>  void kasan_set_track(struct kasan_track *track, gfp_t flags)
> diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
> index 112dce135c7f..f710257d6867 100644
> --- a/mm/kmsan/core.c
> +++ b/mm/kmsan/core.c
> @@ -69,13 +69,15 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
>  {
>         unsigned long entries[KMSAN_STACK_DEPTH];
>         unsigned int nr_entries;
> +       depot_stack_handle_t handle;
>
>         nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
>
>         /* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */
>         flags &= ~__GFP_DIRECT_RECLAIM;
>
> -       return __stack_depot_save(entries, nr_entries, extra, flags, true);
> +       handle = __stack_depot_save(entries, nr_entries, flags, true);
> +       return stack_depot_set_extra_bits(handle, extra);
>  }
>
>  /* Copy the metadata following the memmove() behavior. */
> @@ -215,6 +217,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
>         u32 extra_bits;
>         int depth;
>         bool uaf;
> +       depot_stack_handle_t handle;
>
>         if (!id)
>                 return id;
> @@ -250,8 +253,9 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
>          * positives when __stack_depot_save() passes it to instrumented code.
>          */
>         kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
> -       return __stack_depot_save(entries, ARRAY_SIZE(entries), extra_bits,
> -                                 GFP_ATOMIC, true);
> +       handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC,
> +                                   true);
> +       return stack_depot_set_extra_bits(handle, extra_bits);
>  }
>
>  void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
> --
> 2.25.1
>
  
Andrey Konovalov Jan. 31, 2023, 6:58 p.m. UTC | #2
On Tue, Jan 31, 2023 at 9:54 AM Marco Elver <elver@google.com> wrote:
>
> > +depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
> > +                                               unsigned int extra_bits);
>
> Can you add __must_check to this function? Either that or making
> handle an in/out param, as otherwise it might be easy to think that it
> doesn't return anything ("set_foo()" seems like it sets the
> information in the handle-associated data but not handle itself ... in
> case someone missed the documentation).

Makes sense, will do in v2 if Alexander doesn't object to the
interface change. Thanks!
  
Alexander Potapenko Feb. 2, 2023, 10:03 a.m. UTC | #3
> This change also fixes a minor issue in the old code: __stack_depot_save
> does not return NULL if saving stack trace fails and extra_bits is used.

Good catch!


> + *
> + * Stack depot handles have a few unused bits, which can be used for storing
> + * user-specific information. These bits are transparent to the stack depot.
> + */
> +depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
> +                                               unsigned int extra_bits)
> +{
> +       union handle_parts parts = { .handle = handle };
> +
> +       parts.extra = extra_bits;
> +       return parts.handle;
> +}
> +EXPORT_SYMBOL(stack_depot_set_extra_bits);

You'd need to check for handle==NULL here, otherwise we're in the same
situation when __stack_depot_save returns NULL and we are happily
applying extra bits on top of it.
  
Alexander Potapenko Feb. 2, 2023, 10:04 a.m. UTC | #4
On Tue, Jan 31, 2023 at 7:58 PM Andrey Konovalov <andreyknvl@gmail.com> wrote:
>
> On Tue, Jan 31, 2023 at 9:54 AM Marco Elver <elver@google.com> wrote:
> >
> > > +depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
> > > +                                               unsigned int extra_bits);
> >
> > Can you add __must_check to this function? Either that or making
> > handle an in/out param, as otherwise it might be easy to think that it
> > doesn't return anything ("set_foo()" seems like it sets the
> > information in the handle-associated data but not handle itself ... in
> > case someone missed the documentation).
>
> Makes sense, will do in v2 if Alexander doesn't object to the
> interface change. Thanks!

I do not object. Thanks for doing this!
  

Patch

diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index c4e3abc16b16..f999811c66d7 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -57,7 +57,6 @@  static inline int stack_depot_early_init(void)	{ return 0; }
 
 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 					unsigned int nr_entries,
-					unsigned int extra_bits,
 					gfp_t gfp_flags, bool can_alloc);
 
 depot_stack_handle_t stack_depot_save(unsigned long *entries,
@@ -71,6 +70,9 @@  void stack_depot_print(depot_stack_handle_t stack);
 int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
 		       int spaces);
 
+depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
+						unsigned int extra_bits);
+
 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
 
 #endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 7282565722f2..f291ad6a4e72 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -346,7 +346,6 @@  static inline struct stack_record *find_stack(struct stack_record *bucket,
  *
  * @entries:		Pointer to storage array
  * @nr_entries:		Size of the storage array
- * @extra_bits:		Flags to store in unused bits of depot_stack_handle_t
  * @alloc_flags:	Allocation gfp flags
  * @can_alloc:		Allocate stack slabs (increased chance of failure if false)
  *
@@ -358,10 +357,6 @@  static inline struct stack_record *find_stack(struct stack_record *bucket,
  * If the stack trace in @entries is from an interrupt, only the portion up to
  * interrupt entry is saved.
  *
- * Additional opaque flags can be passed in @extra_bits, stored in the unused
- * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
- * without calling stack_depot_fetch().
- *
  * Context: Any context, but setting @can_alloc to %false is required if
  *          alloc_pages() cannot be used from the current context. Currently
  *          this is the case from contexts where neither %GFP_ATOMIC nor
@@ -371,7 +366,6 @@  static inline struct stack_record *find_stack(struct stack_record *bucket,
  */
 depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 					unsigned int nr_entries,
-					unsigned int extra_bits,
 					gfp_t alloc_flags, bool can_alloc)
 {
 	struct stack_record *found = NULL, **bucket;
@@ -461,8 +455,6 @@  depot_stack_handle_t __stack_depot_save(unsigned long *entries,
 	if (found)
 		retval.handle = found->handle.handle;
 fast_exit:
-	retval.extra = extra_bits;
-
 	return retval.handle;
 }
 EXPORT_SYMBOL_GPL(__stack_depot_save);
@@ -483,7 +475,7 @@  depot_stack_handle_t stack_depot_save(unsigned long *entries,
 				      unsigned int nr_entries,
 				      gfp_t alloc_flags)
 {
-	return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
+	return __stack_depot_save(entries, nr_entries, alloc_flags, true);
 }
 EXPORT_SYMBOL_GPL(stack_depot_save);
 
@@ -566,6 +558,34 @@  int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
 }
 EXPORT_SYMBOL_GPL(stack_depot_snprint);
 
+/**
+ * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
+ *
+ * @handle:	Stack depot handle
+ * @extra_bits:	Value to set the extra bits
+ *
+ * Return: Stack depot handle with extra bits set
+ *
+ * Stack depot handles have a few unused bits, which can be used for storing
+ * user-specific information. These bits are transparent to the stack depot.
+ */
+depot_stack_handle_t stack_depot_set_extra_bits(depot_stack_handle_t handle,
+						unsigned int extra_bits)
+{
+	union handle_parts parts = { .handle = handle };
+
+	parts.extra = extra_bits;
+	return parts.handle;
+}
+EXPORT_SYMBOL(stack_depot_set_extra_bits);
+
+/**
+ * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
+ *
+ * @handle:	Stack depot handle with extra bits saved
+ *
+ * Return: Extra bits retrieved from the stack depot handle
+ */
 unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
 {
 	union handle_parts parts = { .handle = handle };
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 833bf2cfd2a3..50f4338b477f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -43,7 +43,7 @@  depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
 	unsigned int nr_entries;
 
 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
-	return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
+	return __stack_depot_save(entries, nr_entries, flags, can_alloc);
 }
 
 void kasan_set_track(struct kasan_track *track, gfp_t flags)
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index 112dce135c7f..f710257d6867 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -69,13 +69,15 @@  depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
 {
 	unsigned long entries[KMSAN_STACK_DEPTH];
 	unsigned int nr_entries;
+	depot_stack_handle_t handle;
 
 	nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
 
 	/* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */
 	flags &= ~__GFP_DIRECT_RECLAIM;
 
-	return __stack_depot_save(entries, nr_entries, extra, flags, true);
+	handle = __stack_depot_save(entries, nr_entries, flags, true);
+	return stack_depot_set_extra_bits(handle, extra);
 }
 
 /* Copy the metadata following the memmove() behavior. */
@@ -215,6 +217,7 @@  depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
 	u32 extra_bits;
 	int depth;
 	bool uaf;
+	depot_stack_handle_t handle;
 
 	if (!id)
 		return id;
@@ -250,8 +253,9 @@  depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
 	 * positives when __stack_depot_save() passes it to instrumented code.
 	 */
 	kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
-	return __stack_depot_save(entries, ARRAY_SIZE(entries), extra_bits,
-				  GFP_ATOMIC, true);
+	handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC,
+				    true);
+	return stack_depot_set_extra_bits(handle, extra_bits);
 }
 
 void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,