[1/1] rcu: Refactor kvfree_call_rcu() and high-level helpers
Commit Message
Currently a kvfree_call_rcu() takes an offset within a
structure as a second parameter, so a helper such as a
kvfree_rcu_arg_2() has to convert rcu_head and a freed
ptr to an offset in order to pass it. That leads to an
extra conversion on macro entry.
Instead of converting, refactor the code in way that a
pointer that has to be freed is passed directly to the
kvfree_call_rcu().
This patch does not make any functional change and is
transparent to all kvfree_rcu() users.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
include/linux/rcupdate.h | 5 ++---
include/linux/rcutiny.h | 12 ++++++------
include/linux/rcutree.h | 2 +-
kernel/rcu/tiny.c | 9 +++------
kernel/rcu/tree.c | 29 ++++++++++++-----------------
5 files changed, 24 insertions(+), 33 deletions(-)
Comments
On Tue, Oct 25, 2022 at 04:46:12PM +0200, Uladzislau Rezki (Sony) wrote:
> Currently a kvfree_call_rcu() takes an offset within a
> structure as a second parameter, so a helper such as a
> kvfree_rcu_arg_2() has to convert rcu_head and a freed
> ptr to an offset in order to pass it. That leads to an
> extra conversion on macro entry.
>
> Instead of converting, refactor the code in way that a
> pointer that has to be freed is passed directly to the
> kvfree_call_rcu().
>
> This patch does not make any functional change and is
> transparent to all kvfree_rcu() users.
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Nice simplification!
Hearing no immediate protests, I have pulled this in for testing and
for further review.
Thanx, Paul
> ---
> include/linux/rcupdate.h | 5 ++---
> include/linux/rcutiny.h | 12 ++++++------
> include/linux/rcutree.h | 2 +-
> kernel/rcu/tiny.c | 9 +++------
> kernel/rcu/tree.c | 29 ++++++++++++-----------------
> 5 files changed, 24 insertions(+), 33 deletions(-)
>
> diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
> index 08605ce7379d..cbd34058fff1 100644
> --- a/include/linux/rcupdate.h
> +++ b/include/linux/rcupdate.h
> @@ -985,8 +985,7 @@ do { \
> \
> if (___p) { \
> BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
> - kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
> - (offsetof(typeof(*(ptr)), rhf))); \
> + kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
> } \
> } while (0)
>
> @@ -995,7 +994,7 @@ do { \
> typeof(ptr) ___p = (ptr); \
> \
> if (___p) \
> - kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
> + kvfree_call_rcu(NULL, (void *) (___p)); \
> } while (0)
>
> /*
> diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
> index 768196a5f39d..9623c039964c 100644
> --- a/include/linux/rcutiny.h
> +++ b/include/linux/rcutiny.h
> @@ -98,25 +98,25 @@ static inline void synchronize_rcu_expedited(void)
> */
> extern void kvfree(const void *addr);
>
> -static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> +static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
> {
> if (head) {
> - call_rcu(head, func);
> + call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
> return;
> }
>
> // kvfree_rcu(one_arg) call.
> might_sleep();
> synchronize_rcu();
> - kvfree((void *) func);
> + kvfree(ptr);
> }
>
> #ifdef CONFIG_KASAN_GENERIC
> -void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
> +void kvfree_call_rcu(struct rcu_head *head, void *ptr);
> #else
> -static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> +static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> {
> - __kvfree_call_rcu(head, func);
> + __kvfree_call_rcu(head, ptr);
> }
> #endif
>
> diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
> index 5efb51486e8a..e37a0747a8b2 100644
> --- a/include/linux/rcutree.h
> +++ b/include/linux/rcutree.h
> @@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
> }
>
> void synchronize_rcu_expedited(void);
> -void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
> +void kvfree_call_rcu(struct rcu_head *head, void *ptr);
>
> void rcu_barrier(void);
> bool rcu_eqs_special_set(int cpu);
> diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
> index a33a8d4942c3..6c9496d08669 100644
> --- a/kernel/rcu/tiny.c
> +++ b/kernel/rcu/tiny.c
> @@ -246,15 +246,12 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
> EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
>
> #ifdef CONFIG_KASAN_GENERIC
> -void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> +void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> {
> - if (head) {
> - void *ptr = (void *) head - (unsigned long) func;
> -
> + if (head)
> kasan_record_aux_stack_noalloc(ptr);
> - }
>
> - __kvfree_call_rcu(head, func);
> + __kvfree_call_rcu(head, ptr);
> }
> EXPORT_SYMBOL_GPL(kvfree_call_rcu);
> #endif
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 0ca21ac0f064..9fea2aff87a1 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3068,8 +3068,8 @@ static void kfree_rcu_work(struct work_struct *work)
> * This list is named "Channel 3".
> */
> for (; head; head = next) {
> - unsigned long offset = (unsigned long)head->func;
> - void *ptr = (void *)head - offset;
> + void *ptr = (void *) head->func;
> + unsigned long offset = (void *) head - ptr;
>
> next = head->next;
> debug_rcu_head_unqueue((struct rcu_head *)ptr);
> @@ -3307,26 +3307,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
> * be free'd in workqueue context. This allows us to: batch requests together to
> * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
> */
> -void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> +void kvfree_call_rcu(struct rcu_head *head, void *ptr)
> {
> unsigned long flags;
> struct kfree_rcu_cpu *krcp;
> bool success;
> - void *ptr;
>
> - if (head) {
> - ptr = (void *) head - (unsigned long) func;
> - } else {
> - /*
> - * Please note there is a limitation for the head-less
> - * variant, that is why there is a clear rule for such
> - * objects: it can be used from might_sleep() context
> - * only. For other places please embed an rcu_head to
> - * your data.
> - */
> + /*
> + * Please note there is a limitation for the head-less
> + * variant, that is why there is a clear rule for such
> + * objects: it can be used from might_sleep() context
> + * only. For other places please embed an rcu_head to
> + * your data.
> + */
> + if (!head)
> might_sleep();
> - ptr = (unsigned long *) func;
> - }
>
> // Queue the object but don't yet schedule the batch.
> if (debug_rcu_head_queue(ptr)) {
> @@ -3347,7 +3342,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> // Inline if kvfree_rcu(one_arg) call.
> goto unlock_return;
>
> - head->func = func;
> + head->func = ptr;
> head->next = krcp->head;
> krcp->head = head;
> success = true;
> --
> 2.30.2
>
@@ -985,8 +985,7 @@ do { \
\
if (___p) { \
BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
- kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
- (offsetof(typeof(*(ptr)), rhf))); \
+ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
} \
} while (0)
@@ -995,7 +994,7 @@ do { \
typeof(ptr) ___p = (ptr); \
\
if (___p) \
- kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
+ kvfree_call_rcu(NULL, (void *) (___p)); \
} while (0)
/*
@@ -98,25 +98,25 @@ static inline void synchronize_rcu_expedited(void)
*/
extern void kvfree(const void *addr);
-static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head) {
- call_rcu(head, func);
+ call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
return;
}
// kvfree_rcu(one_arg) call.
might_sleep();
synchronize_rcu();
- kvfree((void *) func);
+ kvfree(ptr);
}
#ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
#else
-static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
- __kvfree_call_rcu(head, func);
+ __kvfree_call_rcu(head, ptr);
}
#endif
@@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
}
void synchronize_rcu_expedited(void);
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
@@ -246,15 +246,12 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
#ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
- if (head) {
- void *ptr = (void *) head - (unsigned long) func;
-
+ if (head)
kasan_record_aux_stack_noalloc(ptr);
- }
- __kvfree_call_rcu(head, func);
+ __kvfree_call_rcu(head, ptr);
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
#endif
@@ -3068,8 +3068,8 @@ static void kfree_rcu_work(struct work_struct *work)
* This list is named "Channel 3".
*/
for (; head; head = next) {
- unsigned long offset = (unsigned long)head->func;
- void *ptr = (void *)head - offset;
+ void *ptr = (void *) head->func;
+ unsigned long offset = (void *) head - ptr;
next = head->next;
debug_rcu_head_unqueue((struct rcu_head *)ptr);
@@ -3307,26 +3307,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
* be free'd in workqueue context. This allows us to: batch requests together to
* reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
*/
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
unsigned long flags;
struct kfree_rcu_cpu *krcp;
bool success;
- void *ptr;
- if (head) {
- ptr = (void *) head - (unsigned long) func;
- } else {
- /*
- * Please note there is a limitation for the head-less
- * variant, that is why there is a clear rule for such
- * objects: it can be used from might_sleep() context
- * only. For other places please embed an rcu_head to
- * your data.
- */
+ /*
+ * Please note there is a limitation for the head-less
+ * variant, that is why there is a clear rule for such
+ * objects: it can be used from might_sleep() context
+ * only. For other places please embed an rcu_head to
+ * your data.
+ */
+ if (!head)
might_sleep();
- ptr = (unsigned long *) func;
- }
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
@@ -3347,7 +3342,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
// Inline if kvfree_rcu(one_arg) call.
goto unlock_return;
- head->func = func;
+ head->func = ptr;
head->next = krcp->head;
krcp->head = head;
success = true;