[next,5/5] locking/osq_lock: Optimise vcpu_is_preempted() check.

Message ID 23cef5ac49494b9087953f529ae5df16@AcuMS.aculab.com
State New
Headers
Series locking/osq_lock: Optimisations to osq_lock code |

Commit Message

David Laight Dec. 29, 2023, 8:58 p.m. UTC
  The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
  cpu is no longer running.
Although patched out for bare-metal the code still needs the cpu number.
Reading this from 'prev->cpu' is a pretty much guaranteed have a cache miss
when osq_unlock() is waking up the next cpu.

Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.

This is simpler than checking for 'node->prev' changing and caching
'prev->cpu'.

Signed-off-by: David Laight <david.laight@aculab.com>
---
 kernel/locking/osq_lock.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)
  

Comments

Waiman Long Dec. 30, 2023, 3:13 a.m. UTC | #1
On 12/29/23 15:58, David Laight wrote:
> The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
>    cpu is no longer running.
> Although patched out for bare-metal the code still needs the cpu number.
> Reading this from 'prev->cpu' is a pretty much guaranteed have a cache miss
> when osq_unlock() is waking up the next cpu.
>
> Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
> Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.
>
> This is simpler than checking for 'node->prev' changing and caching
> 'prev->cpu'.
>
> Signed-off-by: David Laight <david.laight@aculab.com>
> ---
>   kernel/locking/osq_lock.c | 14 ++++++--------
>   1 file changed, 6 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
> index b60b0add0161..89be63627434 100644
> --- a/kernel/locking/osq_lock.c
> +++ b/kernel/locking/osq_lock.c
> @@ -14,8 +14,9 @@
>   
>   struct optimistic_spin_node {
>   	struct optimistic_spin_node *self, *next, *prev;
> -	int locked; /* 1 if lock acquired */
> -	int cpu; /* encoded CPU # + 1 value */
> +	int locked;    /* 1 if lock acquired */
> +	int cpu;       /* encoded CPU # + 1 value */
> +	int prev_cpu;  /* actual CPU # for vpcu_is_preempted() */
>   };
>   
>   static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
> @@ -29,11 +30,6 @@ static inline int encode_cpu(int cpu_nr)
>   	return cpu_nr + 1;
>   }
>   
> -static inline int node_cpu(struct optimistic_spin_node *node)
> -{
> -	return node->cpu - 1;
> -}
> -
>   static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
>   {
>   	int cpu_nr = encoded_cpu_val - 1;
> @@ -114,6 +110,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
>   	if (old == OSQ_UNLOCKED_VAL)
>   		return true;
>   
> +	node->prev_cpu = old - 1;
>   	prev = decode_cpu(old);
>   	node->prev = prev;
>   	node->locked = 0;
> @@ -148,7 +145,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
>   	 * polling, be careful.
>   	 */
>   	if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
> -				  vcpu_is_preempted(node_cpu(node->prev))))
> +				  vcpu_is_preempted(node->prev_cpu)))
>   		return true;
>   
>   	/* unqueue */
> @@ -205,6 +202,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
>   	 * it will wait in Step-A.
>   	 */
>   
> +	WRITE_ONCE(next->prev_cpu, prev->cpu - 1);
>   	WRITE_ONCE(next->prev, prev);
>   	WRITE_ONCE(prev->next, next);
Reviewed-by: Waiman Long <longman@redhat.com>

>
  
Waiman Long Dec. 30, 2023, 3:57 p.m. UTC | #2
On 12/29/23 22:13, Waiman Long wrote:
>
> On 12/29/23 15:58, David Laight wrote:
>> The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
>>    cpu is no longer running.
>> Although patched out for bare-metal the code still needs the cpu number.
>> Reading this from 'prev->cpu' is a pretty much guaranteed have a 
>> cache miss
>> when osq_unlock() is waking up the next cpu.
>>
>> Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
>> Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.
>>
>> This is simpler than checking for 'node->prev' changing and caching
>> 'prev->cpu'.
>>
>> Signed-off-by: David Laight <david.laight@aculab.com>
>> ---
>>   kernel/locking/osq_lock.c | 14 ++++++--------
>>   1 file changed, 6 insertions(+), 8 deletions(-)
>>
>> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
>> index b60b0add0161..89be63627434 100644
>> --- a/kernel/locking/osq_lock.c
>> +++ b/kernel/locking/osq_lock.c
>> @@ -14,8 +14,9 @@
>>     struct optimistic_spin_node {
>>       struct optimistic_spin_node *self, *next, *prev;
>> -    int locked; /* 1 if lock acquired */
>> -    int cpu; /* encoded CPU # + 1 value */
>> +    int locked;    /* 1 if lock acquired */
>> +    int cpu;       /* encoded CPU # + 1 value */
>> +    int prev_cpu;  /* actual CPU # for vpcu_is_preempted() */
>>   };
>>     static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, 
>> osq_node);
>> @@ -29,11 +30,6 @@ static inline int encode_cpu(int cpu_nr)
>>       return cpu_nr + 1;
>>   }
>>   -static inline int node_cpu(struct optimistic_spin_node *node)
>> -{
>> -    return node->cpu - 1;
>> -}
>> -
>>   static inline struct optimistic_spin_node *decode_cpu(int 
>> encoded_cpu_val)
>>   {
>>       int cpu_nr = encoded_cpu_val - 1;
>> @@ -114,6 +110,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
>>       if (old == OSQ_UNLOCKED_VAL)
>>           return true;
>>   +    node->prev_cpu = old - 1;
>>       prev = decode_cpu(old);
>>       node->prev = prev;
>>       node->locked = 0;
>> @@ -148,7 +145,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
>>        * polling, be careful.
>>        */
>>       if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
>> -                  vcpu_is_preempted(node_cpu(node->prev))))
>> +                  vcpu_is_preempted(node->prev_cpu)))

On second thought, I believe it is more correct to use READ_ONCE() to 
access "node->prev_cpu" as this field is subjected to change by a 
WRITE_ONCE().

Cheers,
Longman
  
David Laight Dec. 30, 2023, 10:37 p.m. UTC | #3
From: Waiman Long
> Sent: 30 December 2023 15:57
> 
> On 12/29/23 22:13, Waiman Long wrote:
> >
> > On 12/29/23 15:58, David Laight wrote:
> >> The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
> >>    cpu is no longer running.
> >> Although patched out for bare-metal the code still needs the cpu number.
> >> Reading this from 'prev->cpu' is a pretty much guaranteed have a
> >> cache miss
> >> when osq_unlock() is waking up the next cpu.
> >>
> >> Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
> >> Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.
> >>
> >> This is simpler than checking for 'node->prev' changing and caching
> >> 'prev->cpu'.
> >>
> >> Signed-off-by: David Laight <david.laight@aculab.com>
> >> ---
> >>   kernel/locking/osq_lock.c | 14 ++++++--------
> >>   1 file changed, 6 insertions(+), 8 deletions(-)
> >>
> >> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
> >> index b60b0add0161..89be63627434 100644
> >> --- a/kernel/locking/osq_lock.c
> >> +++ b/kernel/locking/osq_lock.c
> >> @@ -14,8 +14,9 @@
> >>     struct optimistic_spin_node {
> >>       struct optimistic_spin_node *self, *next, *prev;
> >> -    int locked; /* 1 if lock acquired */
> >> -    int cpu; /* encoded CPU # + 1 value */
> >> +    int locked;    /* 1 if lock acquired */
> >> +    int cpu;       /* encoded CPU # + 1 value */
> >> +    int prev_cpu;  /* actual CPU # for vpcu_is_preempted() */
> >>   };
> >>     static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node,
> >> osq_node);
> >> @@ -29,11 +30,6 @@ static inline int encode_cpu(int cpu_nr)
> >>       return cpu_nr + 1;
> >>   }
> >>   -static inline int node_cpu(struct optimistic_spin_node *node)
> >> -{
> >> -    return node->cpu - 1;
> >> -}
> >> -
> >>   static inline struct optimistic_spin_node *decode_cpu(int
> >> encoded_cpu_val)
> >>   {
> >>       int cpu_nr = encoded_cpu_val - 1;
> >> @@ -114,6 +110,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> >>       if (old == OSQ_UNLOCKED_VAL)
> >>           return true;
> >>   +    node->prev_cpu = old - 1;
> >>       prev = decode_cpu(old);
> >>       node->prev = prev;
> >>       node->locked = 0;
> >> @@ -148,7 +145,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> >>        * polling, be careful.
> >>        */
> >>       if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
> >> -                  vcpu_is_preempted(node_cpu(node->prev))))
> >> +                  vcpu_is_preempted(node->prev_cpu)))
> 
> On second thought, I believe it is more correct to use READ_ONCE() to
> access "node->prev_cpu" as this field is subjected to change by a
> WRITE_ONCE().

It can be done...

Aren't pretty much all the 'node' members accessed like that?
There are a sprinkling of READ_ONCE() and WRITE_ONCE() but they
are not always used.

Maybe the structure member(s) should just be marked 'volatile' :-)
That should have exactly the same effect as the volatile cast
inside READ/WRITE_ONCE().
(I know there is a document about not using volatile...)

I've not actually checked whether the two existing WRITE_ONCE()
in 'Step C' need to be ordered - and whether that is guaranteed
by the code, especially on out good old friend 'Alpha' (is that
horrid cache system still supported?).

	David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
  

Patch

diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index b60b0add0161..89be63627434 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -14,8 +14,9 @@ 
 
 struct optimistic_spin_node {
 	struct optimistic_spin_node *self, *next, *prev;
-	int locked; /* 1 if lock acquired */
-	int cpu; /* encoded CPU # + 1 value */
+	int locked;    /* 1 if lock acquired */
+	int cpu;       /* encoded CPU # + 1 value */
+	int prev_cpu;  /* actual CPU # for vpcu_is_preempted() */
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
@@ -29,11 +30,6 @@  static inline int encode_cpu(int cpu_nr)
 	return cpu_nr + 1;
 }
 
-static inline int node_cpu(struct optimistic_spin_node *node)
-{
-	return node->cpu - 1;
-}
-
 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
 {
 	int cpu_nr = encoded_cpu_val - 1;
@@ -114,6 +110,7 @@  bool osq_lock(struct optimistic_spin_queue *lock)
 	if (old == OSQ_UNLOCKED_VAL)
 		return true;
 
+	node->prev_cpu = old - 1;
 	prev = decode_cpu(old);
 	node->prev = prev;
 	node->locked = 0;
@@ -148,7 +145,7 @@  bool osq_lock(struct optimistic_spin_queue *lock)
 	 * polling, be careful.
 	 */
 	if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
-				  vcpu_is_preempted(node_cpu(node->prev))))
+				  vcpu_is_preempted(node->prev_cpu)))
 		return true;
 
 	/* unqueue */
@@ -205,6 +202,7 @@  bool osq_lock(struct optimistic_spin_queue *lock)
 	 * it will wait in Step-A.
 	 */
 
+	WRITE_ONCE(next->prev_cpu, prev->cpu - 1);
 	WRITE_ONCE(next->prev, prev);
 	WRITE_ONCE(prev->next, next);