[19/22] x86/srso: Improve i-cache locality for alias mitigation

Message ID 61b4147837d1e0273d094f3d11384fcdcf9b637f.1692580085.git.jpoimboe@kernel.org
State New
Headers
Series SRSO fixes/cleanups |

Commit Message

Josh Poimboeuf Aug. 21, 2023, 1:19 a.m. UTC
  Move srso_alias_return_thunk() to the same section as
srso_alias_safe_ret() so they can share a cache line.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
 arch/x86/lib/retpoline.S | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
  

Patch

diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index af3c1f0e4fb8..415521dbe15e 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -166,14 +166,14 @@  SYM_CODE_START_NOALIGN(srso_alias_safe_ret)
 	ret
 	int3
 SYM_FUNC_END(srso_alias_safe_ret)
-	.popsection
 
-SYM_CODE_START(srso_alias_return_thunk)
+SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
 	UNWIND_HINT_FUNC
 	ANNOTATE_NOENDBR
 	call srso_alias_safe_ret
 	ud2
 SYM_CODE_END(srso_alias_return_thunk)
+	.popsection
 
 /*
  * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()