[v3,6/6] x86/gsseg: use the LKGS instruction if available for load_gs_index()
Commit Message
From: "H. Peter Anvin (Intel)" <hpa@zytor.com>
The LKGS instruction atomically loads a segment descriptor into the
%gs descriptor registers, *except* that %gs.base is unchanged, and the
base is instead loaded into MSR_IA32_KERNEL_GS_BASE, which is exactly
what we want this function to do.
Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Xin Li <xin3.li@intel.com>
link: https://lkml.org/lkml/2022/10/7/352
link: https://lkml.org/lkml/2022/10/7/373
link: https://lkml.org/lkml/2022/10/10/1286
---
arch/x86/include/asm/gsseg.h | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
@@ -3,15 +3,39 @@
#define _ASM_X86_GSSEG_H
#include <linux/types.h>
+
+#include <asm/asm.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
#include <asm/processor.h>
+#include <asm/nops.h>
#ifdef CONFIG_X86_64
extern asmlinkage void asm_load_gs_index(u16 selector);
+/* Replace with "lkgs %di" once binutils support LKGS instruction */
+#define LKGS_DI _ASM_BYTES(0xf2,0x0f,0x00,0xf7)
+
static inline void native_load_gs_index(unsigned int selector)
{
- asm_load_gs_index(selector);
+ u16 sel = selector;
+
+ /*
+ * Note: the fixup is used for the LKGS instruction, but
+ * it needs to be attached to the primary instruction sequence
+ * as it isn't something that gets patched.
+ *
+ * %rax is provided to the assembly routine as a scratch
+ * register.
+ */
+ asm_inline volatile("1:\n"
+ ALTERNATIVE("call asm_load_gs_index\n",
+ _ASM_BYTES(0x3e) LKGS_DI,
+ X86_FEATURE_LKGS)
+ _ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k[sel])
+ : ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT)
+ : : _ASM_AX);
}
#endif /* CONFIG_X86_64 */