[bpf-next,2/6] tracing: Add generic test_recursion_try_acquire()
Commit Message
From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
The ftrace_test_recursion_trylock() also disables preemption. This is not
required, but was a clean up as every place that called it also disabled
preemption, and making the two tightly coupled appeared to make the code
simpler.
But the recursion protection can be used for other purposes that do not
require disabling preemption. As the recursion bits are attached to the
task_struct, it follows the task, so there's no need for preemption being
disabled.
Add test_recursion_try_acquire/release() functions to be used generically,
and separate it from being associated with ftrace. It also removes the
"lock" name, as there is no lock happening. Keeping the "lock" for the
ftrace version is better, as it at least differentiates that preemption is
being disabled (hence, "locking the CPU").
Link: https://lore.kernel.org/linux-trace-kernel/20230321020103.13494-1-laoar.shao@gmail.com/
Acked-by: Yafang Shao <laoar.shao@gmail.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
include/linux/trace_recursion.h | 47 ++++++++++++++++++++++++++++++-----------
kernel/trace/ftrace.c | 2 ++
2 files changed, 37 insertions(+), 12 deletions(-)
Comments
On Mon, 17 Apr 2023 15:47:33 +0000
Yafang Shao <laoar.shao@gmail.com> wrote:
> From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
>
> The ftrace_test_recursion_trylock() also disables preemption. This is not
> required, but was a clean up as every place that called it also disabled
> preemption, and making the two tightly coupled appeared to make the code
> simpler.
>
> But the recursion protection can be used for other purposes that do not
> require disabling preemption. As the recursion bits are attached to the
> task_struct, it follows the task, so there's no need for preemption being
> disabled.
>
> Add test_recursion_try_acquire/release() functions to be used generically,
> and separate it from being associated with ftrace. It also removes the
> "lock" name, as there is no lock happening. Keeping the "lock" for the
> ftrace version is better, as it at least differentiates that preemption is
> being disabled (hence, "locking the CPU").
>
> Link: https://lore.kernel.org/linux-trace-kernel/20230321020103.13494-1-laoar.shao@gmail.com/
>
> Acked-by: Yafang Shao <laoar.shao@gmail.com>
> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
> Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
This looks good to me.
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
> ---
> include/linux/trace_recursion.h | 47 ++++++++++++++++++++++++++++++-----------
> kernel/trace/ftrace.c | 2 ++
> 2 files changed, 37 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
> index d48cd92..80de2ee 100644
> --- a/include/linux/trace_recursion.h
> +++ b/include/linux/trace_recursion.h
> @@ -150,9 +150,6 @@ static __always_inline int trace_get_context_bit(void)
> # define trace_warn_on_no_rcu(ip) false
> #endif
>
> -/*
> - * Preemption is promised to be disabled when return bit >= 0.
> - */
> static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
> int start)
> {
> @@ -182,18 +179,11 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign
> val |= 1 << bit;
> current->trace_recursion = val;
> barrier();
> -
> - preempt_disable_notrace();
> -
> return bit;
> }
>
> -/*
> - * Preemption will be enabled (if it was previously enabled).
> - */
> static __always_inline void trace_clear_recursion(int bit)
> {
> - preempt_enable_notrace();
> barrier();
> trace_recursion_clear(bit);
> }
> @@ -205,12 +195,18 @@ static __always_inline void trace_clear_recursion(int bit)
> * tracing recursed in the same context (normal vs interrupt),
> *
> * Returns: -1 if a recursion happened.
> - * >= 0 if no recursion.
> + * >= 0 if no recursion and preemption will be disabled.
> */
> static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
> unsigned long parent_ip)
> {
> - return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
> + int bit;
> +
> + bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
> + if (unlikely(bit < 0))
> + return bit;
> + preempt_disable_notrace();
> + return bit;
> }
>
> /**
> @@ -221,6 +217,33 @@ static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
> */
> static __always_inline void ftrace_test_recursion_unlock(int bit)
> {
> + preempt_enable_notrace();
> + trace_clear_recursion(bit);
> +}
> +
> +/**
> + * test_recursion_try_acquire - tests for recursion in same context
> + *
> + * This will detect recursion of a function.
> + *
> + * Returns: -1 if a recursion happened.
> + * >= 0 if no recursion
> + */
> +static __always_inline int test_recursion_try_acquire(unsigned long ip,
> + unsigned long parent_ip)
> +{
> + return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
> +}
> +
> +/**
> + * test_recursion_release - called after a success of test_recursion_try_acquire()
> + * @bit: The return of a successful test_recursion_try_acquire()
> + *
> + * This releases the recursion lock taken by a non-negative return call
> + * by test_recursion_try_acquire().
> + */
> +static __always_inline void test_recursion_release(int bit)
> +{
> trace_clear_recursion(bit);
> }
>
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index c67bcc8..8ad3ab4 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -7647,6 +7647,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
> if (bit < 0)
> return;
>
> + preempt_disable();
> do_for_each_ftrace_op(op, ftrace_ops_list) {
> /* Stub functions don't need to be called nor tested */
> if (op->flags & FTRACE_OPS_FL_STUB)
> @@ -7668,6 +7669,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
> }
> } while_for_each_ftrace_op(op);
> out:
> + preempt_enable();
> trace_clear_recursion(bit);
> }
>
> --
> 1.8.3.1
>
@@ -150,9 +150,6 @@ static __always_inline int trace_get_context_bit(void)
# define trace_warn_on_no_rcu(ip) false
#endif
-/*
- * Preemption is promised to be disabled when return bit >= 0.
- */
static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
int start)
{
@@ -182,18 +179,11 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign
val |= 1 << bit;
current->trace_recursion = val;
barrier();
-
- preempt_disable_notrace();
-
return bit;
}
-/*
- * Preemption will be enabled (if it was previously enabled).
- */
static __always_inline void trace_clear_recursion(int bit)
{
- preempt_enable_notrace();
barrier();
trace_recursion_clear(bit);
}
@@ -205,12 +195,18 @@ static __always_inline void trace_clear_recursion(int bit)
* tracing recursed in the same context (normal vs interrupt),
*
* Returns: -1 if a recursion happened.
- * >= 0 if no recursion.
+ * >= 0 if no recursion and preemption will be disabled.
*/
static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
unsigned long parent_ip)
{
- return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
+ int bit;
+
+ bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
+ if (unlikely(bit < 0))
+ return bit;
+ preempt_disable_notrace();
+ return bit;
}
/**
@@ -221,6 +217,33 @@ static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
*/
static __always_inline void ftrace_test_recursion_unlock(int bit)
{
+ preempt_enable_notrace();
+ trace_clear_recursion(bit);
+}
+
+/**
+ * test_recursion_try_acquire - tests for recursion in same context
+ *
+ * This will detect recursion of a function.
+ *
+ * Returns: -1 if a recursion happened.
+ * >= 0 if no recursion
+ */
+static __always_inline int test_recursion_try_acquire(unsigned long ip,
+ unsigned long parent_ip)
+{
+ return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
+}
+
+/**
+ * test_recursion_release - called after a success of test_recursion_try_acquire()
+ * @bit: The return of a successful test_recursion_try_acquire()
+ *
+ * This releases the recursion lock taken by a non-negative return call
+ * by test_recursion_try_acquire().
+ */
+static __always_inline void test_recursion_release(int bit)
+{
trace_clear_recursion(bit);
}
@@ -7647,6 +7647,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
if (bit < 0)
return;
+ preempt_disable();
do_for_each_ftrace_op(op, ftrace_ops_list) {
/* Stub functions don't need to be called nor tested */
if (op->flags & FTRACE_OPS_FL_STUB)
@@ -7668,6 +7669,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
}
} while_for_each_ftrace_op(op);
out:
+ preempt_enable();
trace_clear_recursion(bit);
}