[next,1/5] locking/osq_lock: Move the definition of optimistic_spin_node into osf_lock.c

Message ID 6eb23a47953b445281e04f5573aead65@AcuMS.aculab.com
State New
Headers
Series locking/osq_lock: Optimisations to osq_lock code |

Commit Message

David Laight Dec. 29, 2023, 8:53 p.m. UTC
  struct optimistic_spin_node is private to the implementation.
Move it into the C file to ensure nothing is accessing it.

Signed-off-by: David Laight <david.laight@aculab.com>
---
 include/linux/osq_lock.h  | 5 -----
 kernel/locking/osq_lock.c | 7 +++++++
 2 files changed, 7 insertions(+), 5 deletions(-)
  

Comments

Waiman Long Dec. 30, 2023, 1:59 a.m. UTC | #1
On 12/29/23 15:53, David Laight wrote:
> struct optimistic_spin_node is private to the implementation.
> Move it into the C file to ensure nothing is accessing it.
>
> Signed-off-by: David Laight <david.laight@aculab.com>
> ---
>   include/linux/osq_lock.h  | 5 -----
>   kernel/locking/osq_lock.c | 7 +++++++
>   2 files changed, 7 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
> index 5581dbd3bd34..ea8fb31379e3 100644
> --- a/include/linux/osq_lock.h
> +++ b/include/linux/osq_lock.h
> @@ -6,11 +6,6 @@
>    * An MCS like lock especially tailored for optimistic spinning for sleeping
>    * lock implementations (mutex, rwsem, etc).
>    */
> -struct optimistic_spin_node {
> -	struct optimistic_spin_node *next, *prev;
> -	int locked; /* 1 if lock acquired */
> -	int cpu; /* encoded CPU # + 1 value */
> -};
>   
>   struct optimistic_spin_queue {
>   	/*
> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
> index d5610ad52b92..d414eef4bec6 100644
> --- a/kernel/locking/osq_lock.c
> +++ b/kernel/locking/osq_lock.c
> @@ -11,6 +11,13 @@
>    * called from interrupt context and we have preemption disabled while
>    * spinning.
>    */
> +
> +struct optimistic_spin_node {
> +	struct optimistic_spin_node *next, *prev;
> +	int locked; /* 1 if lock acquired */
> +	int cpu; /* encoded CPU # + 1 value */
> +};
> +
>   static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
>   
>   /*

Please correct the patch title "osf_lock.c" => "osq_lock.c".

After the fix, you can add

Acked-by: Waiman Long <longman@redhat.com>
  

Patch

diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 5581dbd3bd34..ea8fb31379e3 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -6,11 +6,6 @@ 
  * An MCS like lock especially tailored for optimistic spinning for sleeping
  * lock implementations (mutex, rwsem, etc).
  */
-struct optimistic_spin_node {
-	struct optimistic_spin_node *next, *prev;
-	int locked; /* 1 if lock acquired */
-	int cpu; /* encoded CPU # + 1 value */
-};
 
 struct optimistic_spin_queue {
 	/*
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index d5610ad52b92..d414eef4bec6 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -11,6 +11,13 @@ 
  * called from interrupt context and we have preemption disabled while
  * spinning.
  */
+
+struct optimistic_spin_node {
+	struct optimistic_spin_node *next, *prev;
+	int locked; /* 1 if lock acquired */
+	int cpu; /* encoded CPU # + 1 value */
+};
+
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
 
 /*