[3/5,RESEND] x86: qspinlock-paravirt: fix mising-prototype warnings
Commit Message
From: Arnd Bergmann <arnd@arndb.de>
__pv_queued_spin_unlock_slowpath is defined in a header file as a global
function, and designed to be called from an inline asm, but there is
no prototype visible in the definition:
kernel/locking/qspinlock_paravirt.h:493:1: error: no previous prototype for '__pv_queued_spin_unlock_slowpath' [-Werror=missing-prototypes]
Add this to the x86 header that contains the inline asm calling it,
and ensure this gets included before the definition, rather than
after it.
The native_pv_lock_init function in turn is only declared in SMP
builds but can be left out in UP to avoid another warning:
arch/x86/kernel/paravirt.c:76:13: error: no previous prototype for 'native_pv_lock_init' [-Werror=missing-prototypes]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
arch/x86/include/asm/qspinlock_paravirt.h | 2 ++
arch/x86/kernel/paravirt.c | 2 ++
kernel/locking/qspinlock_paravirt.h | 20 ++++++++++----------
3 files changed, 14 insertions(+), 10 deletions(-)
Comments
On Tue, Jul 25, 2023 at 03:48:35PM +0200, Arnd Bergmann wrote:
> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
> index 89842bb7ec9cc..64a6bba70d183 100644
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -73,11 +73,13 @@ DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
>
> DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
>
> +#ifdef CONFIG_SMP
> void __init native_pv_lock_init(void)
> {
> if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
> static_branch_disable(&virt_spin_lock_key);
> }
> +#endif
Can you add an empty UP stub instead?
We all have a great aversion against ifdeffery...
Thx.
On Tue, Aug 1, 2023, at 21:22, Borislav Petkov wrote:
> On Tue, Jul 25, 2023 at 03:48:35PM +0200, Arnd Bergmann wrote:
>> diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
>> index 89842bb7ec9cc..64a6bba70d183 100644
>> --- a/arch/x86/kernel/paravirt.c
>> +++ b/arch/x86/kernel/paravirt.c
>> @@ -73,11 +73,13 @@ DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
>>
>> DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
>>
>> +#ifdef CONFIG_SMP
>> void __init native_pv_lock_init(void)
>> {
>> if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
>> static_branch_disable(&virt_spin_lock_key);
>> }
>> +#endif
>
> Can you add an empty UP stub instead?
>
> We all have a great aversion against ifdeffery...
There is already a stub for !CONFIG_PARAVIRT in asm/qspinlock.h,
but the problem is that this header does not get included
anywhere in UP configurations.
The variant below would avoid adding more #ifdefs, by moving
the declaration into asm/paravirt.h to ensure that it's
declared even if there is no caller.
Does this look better to you?
Arnd
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b49778664d2be..fc3a377bb9b79 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
".popsection")
extern void default_banner(void);
+void native_pv_lock_init(void) __init;
#else /* __ASSEMBLY__ */
@@ -776,8 +777,13 @@ extern void default_banner(void);
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
+
+static inline void native_pv_lock_init(void)
+{
+}
#endif /* !CONFIG_PARAVIRT */
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index d87451df480bd..cde8357bb226d 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu)
*/
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
-void native_pv_lock_init(void) __init;
-
/*
* Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it.
@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
-#else
-static inline void native_pv_lock_init(void)
-{
-}
+
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 89842bb7ec9cc..066fc19d2568e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -75,7 +75,8 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void)
{
- if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
On Tue, Aug 01, 2023 at 10:26:36PM +0200, Arnd Bergmann wrote:
> The variant below would avoid adding more #ifdefs, by moving
> the declaration into asm/paravirt.h to ensure that it's
> declared even if there is no caller.
>
> Does this look better to you?
Yes, thanks. I don't mind empty stubs in the header which the compiler
discards/ignores.
Now lemme look at the rest.
Thx.
On Wed, Aug 02, 2023 at 07:26:12PM +0200, Borislav Petkov wrote:
> Now lemme look at the rest.
Yap, they look good. I'll queue your next version.
Thx.
@@ -4,6 +4,8 @@
#include <asm/ibt.h>
+void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
+
/*
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
* registers. For i386, however, only 1 32-bit register needs to be saved
@@ -73,11 +73,13 @@ DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+#ifdef CONFIG_SMP
void __init native_pv_lock_init(void)
{
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
+#endif
unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
unsigned int len)
@@ -485,6 +485,16 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
}
+/*
+ * Include the architecture specific callee-save thunk of the
+ * __pv_queued_spin_unlock(). This thunk is put together with
+ * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
+ * function close to each other sharing consecutive instruction cachelines.
+ * Alternatively, architecture specific version of __pv_queued_spin_unlock()
+ * can be defined.
+ */
+#include <asm/qspinlock_paravirt.h>
+
/*
* PV versions of the unlock fastpath and slowpath functions to be used
* instead of queued_spin_unlock().
@@ -533,16 +543,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
pv_kick(node->cpu);
}
-/*
- * Include the architecture specific callee-save thunk of the
- * __pv_queued_spin_unlock(). This thunk is put together with
- * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
- * function close to each other sharing consecutive instruction cachelines.
- * Alternatively, architecture specific version of __pv_queued_spin_unlock()
- * can be defined.
- */
-#include <asm/qspinlock_paravirt.h>
-
#ifndef __pv_queued_spin_unlock
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
{