[v2] x86/paravirt: use common macro for creating simple asm paravirt functions
Commit Message
There are some paravirt assembler functions which are sharing a common
pattern. Introduce a macro DEFINE_PARAVIRT_ASM() for creating them.
Note that this macro is including explicit alignment of the generated
functions, leading to __raw_callee_save___kvm_vcpu_is_preempted(),
_paravirt_nop() and paravirt_ret0() to be aligned at 4 byte boundaries
now.
The explicit _paravirt_nop() prototype in paravirt.c isn't needed, as
it is included in paravirt_types.h already.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
---
V2:
- expanded commit message (Srivatsa S. Bhat)
---
arch/x86/include/asm/paravirt.h | 12 ++++++
arch/x86/include/asm/qspinlock_paravirt.h | 46 ++++++++++-------------
arch/x86/kernel/kvm.c | 19 +++-------
arch/x86/kernel/paravirt.c | 22 ++---------
4 files changed, 40 insertions(+), 59 deletions(-)
Comments
On Wed, Nov 09, 2022 at 02:44:18PM +0100, Juergen Gross wrote:
> There are some paravirt assembler functions which are sharing a common
> pattern. Introduce a macro DEFINE_PARAVIRT_ASM() for creating them.
>
> Note that this macro is including explicit alignment of the generated
> functions, leading to __raw_callee_save___kvm_vcpu_is_preempted(),
> _paravirt_nop() and paravirt_ret0() to be aligned at 4 byte boundaries
> now.
>
> The explicit _paravirt_nop() prototype in paravirt.c isn't needed, as
> it is included in paravirt_types.h already.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
> Reviewed-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
> ---
Seems nice; I've made the below little edits, but this is certainly a
bit large for /urgent at this point in time. So how about I merge
locking/urgent into x86/paravirt and munge this on top?
---
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -737,7 +737,7 @@ static __always_inline unsigned long arc
__ALIGN_STR "\n" \
#func ":\n\t" \
ASM_ENDBR \
- instr \
+ instr "\n\t" \
ASM_RET \
".size " #func ", . - " #func "\n\t" \
".popsection")
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -54,8 +54,8 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_
"pop %rdx\n\t" \
FRAME_END
-DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, PV_UNLOCK_ASM,
- .spinlock.text);
+DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
+ PV_UNLOCK_ASM, .spinlock.text);
#else /* CONFIG_64BIT */
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -802,6 +802,7 @@ extern bool __raw_callee_save___kvm_vcpu
"movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
"setne %al\n\t"
+
DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
PV_VCPU_PREEMPTED_ASM, .text);
#endif
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -40,8 +40,7 @@
DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
/* stub always returning 0. */
-#define PV_RET0_ASM "xor %" _ASM_AX ", %" _ASM_AX "\n\t"
-DEFINE_PARAVIRT_ASM(paravirt_ret0, PV_RET0_ASM, .entry.text);
+DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text);
void __init default_banner(void)
{
On 16.11.22 12:04, Peter Zijlstra wrote:
> On Wed, Nov 09, 2022 at 02:44:18PM +0100, Juergen Gross wrote:
>> There are some paravirt assembler functions which are sharing a common
>> pattern. Introduce a macro DEFINE_PARAVIRT_ASM() for creating them.
>>
>> Note that this macro is including explicit alignment of the generated
>> functions, leading to __raw_callee_save___kvm_vcpu_is_preempted(),
>> _paravirt_nop() and paravirt_ret0() to be aligned at 4 byte boundaries
>> now.
>>
>> The explicit _paravirt_nop() prototype in paravirt.c isn't needed, as
>> it is included in paravirt_types.h already.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> Reviewed-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
>> ---
>
> Seems nice; I've made the below little edits, but this is certainly a
> bit large for /urgent at this point in time. So how about I merge
> locking/urgent into x86/paravirt and munge this on top?
Fine with me.
Thanks for looking at the patch,
Juergen
>
> ---
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -737,7 +737,7 @@ static __always_inline unsigned long arc
> __ALIGN_STR "\n" \
> #func ":\n\t" \
> ASM_ENDBR \
> - instr \
> + instr "\n\t" \
> ASM_RET \
> ".size " #func ", . - " #func "\n\t" \
> ".popsection")
> --- a/arch/x86/include/asm/qspinlock_paravirt.h
> +++ b/arch/x86/include/asm/qspinlock_paravirt.h
> @@ -54,8 +54,8 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_
> "pop %rdx\n\t" \
> FRAME_END
>
> -DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, PV_UNLOCK_ASM,
> - .spinlock.text);
> +DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
> + PV_UNLOCK_ASM, .spinlock.text);
>
> #else /* CONFIG_64BIT */
>
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -802,6 +802,7 @@ extern bool __raw_callee_save___kvm_vcpu
> "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
> "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
> "setne %al\n\t"
> +
> DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
> PV_VCPU_PREEMPTED_ASM, .text);
> #endif
> --- a/arch/x86/kernel/paravirt.c
> +++ b/arch/x86/kernel/paravirt.c
> @@ -40,8 +40,7 @@
> DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
>
> /* stub always returning 0. */
> -#define PV_RET0_ASM "xor %" _ASM_AX ", %" _ASM_AX "\n\t"
> -DEFINE_PARAVIRT_ASM(paravirt_ret0, PV_RET0_ASM, .entry.text);
> +DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text);
>
> void __init default_banner(void)
> {
@@ -730,6 +730,18 @@ static __always_inline unsigned long arch_local_irq_save(void)
#undef PVOP_VCALL4
#undef PVOP_CALL4
+#define DEFINE_PARAVIRT_ASM(func, instr, sec) \
+ asm (".pushsection " #sec ", \"ax\"\n" \
+ ".global " #func "\n\t" \
+ ".type " #func ", @function\n\t" \
+ __ALIGN_STR "\n" \
+ #func ":\n\t" \
+ ASM_ENDBR \
+ instr \
+ ASM_RET \
+ ".size " #func ", . - " #func "\n\t" \
+ ".popsection")
+
extern void default_banner(void);
#else /* __ASSEMBLY__ */
@@ -14,8 +14,6 @@
__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
#define __pv_queued_spin_unlock __pv_queued_spin_unlock
-#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
-#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
/*
* Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
@@ -37,32 +35,26 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
* rsi = lockval (second argument)
* rdx = internal variable (set to 0)
*/
-asm (".pushsection .spinlock.text;"
- ".globl " PV_UNLOCK ";"
- ".type " PV_UNLOCK ", @function;"
- ".align 4,0x90;"
- PV_UNLOCK ": "
- ASM_ENDBR
- FRAME_BEGIN
- "push %rdx;"
- "mov $0x1,%eax;"
- "xor %edx,%edx;"
- LOCK_PREFIX "cmpxchg %dl,(%rdi);"
- "cmp $0x1,%al;"
- "jne .slowpath;"
- "pop %rdx;"
+#define PV_UNLOCK_ASM \
+ FRAME_BEGIN \
+ "push %rdx\n\t" \
+ "mov $0x1,%eax\n\t" \
+ "xor %edx,%edx\n\t" \
+ LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
+ "cmp $0x1,%al\n\t" \
+ "jne .slowpath\n\t" \
+ "pop %rdx\n\t" \
+ FRAME_END \
+ ASM_RET \
+ ".slowpath:\n\t" \
+ "push %rsi\n\t" \
+ "movzbl %al,%esi\n\t" \
+ "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \
+ "pop %rsi\n\t" \
+ "pop %rdx\n\t" \
FRAME_END
- ASM_RET
- ".slowpath: "
- "push %rsi;"
- "movzbl %al,%esi;"
- "call " PV_UNLOCK_SLOWPATH ";"
- "pop %rsi;"
- "pop %rdx;"
- FRAME_END
- ASM_RET
- ".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
- ".popsection");
+DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, PV_UNLOCK_ASM,
+ .spinlock.text);
#else /* CONFIG_64BIT */
@@ -798,19 +798,12 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
* restoring to/from the stack.
*/
-asm(
-".pushsection .text;"
-".global __raw_callee_save___kvm_vcpu_is_preempted;"
-".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
-"__raw_callee_save___kvm_vcpu_is_preempted:"
-ASM_ENDBR
-"movq __per_cpu_offset(,%rdi,8), %rax;"
-"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
-"setne %al;"
-ASM_RET
-".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
-".popsection");
-
+#define PV_VCPU_PREEMPTED_ASM \
+ "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
+ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
+ "setne %al\n\t"
+DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
+ PV_VCPU_PREEMPTED_ASM, .text);
#endif
static void __init kvm_guest_init(void)
@@ -37,27 +37,11 @@
* nop stub, which must not clobber anything *including the stack* to
* avoid confusing the entry prologues.
*/
-extern void _paravirt_nop(void);
-asm (".pushsection .entry.text, \"ax\"\n"
- ".global _paravirt_nop\n"
- "_paravirt_nop:\n\t"
- ASM_ENDBR
- ASM_RET
- ".size _paravirt_nop, . - _paravirt_nop\n\t"
- ".type _paravirt_nop, @function\n\t"
- ".popsection");
+DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
/* stub always returning 0. */
-asm (".pushsection .entry.text, \"ax\"\n"
- ".global paravirt_ret0\n"
- "paravirt_ret0:\n\t"
- ASM_ENDBR
- "xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
- ASM_RET
- ".size paravirt_ret0, . - paravirt_ret0\n\t"
- ".type paravirt_ret0, @function\n\t"
- ".popsection");
-
+#define PV_RET0_ASM "xor %" _ASM_AX ", %" _ASM_AX "\n\t"
+DEFINE_PARAVIRT_ASM(paravirt_ret0, PV_RET0_ASM, .entry.text);
void __init default_banner(void)
{