public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.1 commit in: /
Date: Sat, 26 Aug 2023 15:00:38 +0000 (UTC)	[thread overview]
Message-ID: <1693062024.597ad722c61ee3671b7cd0dcefa8ce523a757ed3.mpagano@gentoo> (raw)

commit:     597ad722c61ee3671b7cd0dcefa8ce523a757ed3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Aug 26 15:00:24 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Aug 26 15:00:24 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=597ad722

Linux patch 6.1.48

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |   4 +
 1047_linux-6.1.48.patch | 604 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 608 insertions(+)

diff --git a/0000_README b/0000_README
index 0ad2cca2..40eb5947 100644
--- a/0000_README
+++ b/0000_README
@@ -231,6 +231,10 @@ Patch:  1046_linux-6.1.47.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.1.47
 
+Patch:  1047_linux-6.1.48.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.1.48
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1047_linux-6.1.48.patch b/1047_linux-6.1.48.patch
new file mode 100644
index 00000000..eb7ca312
--- /dev/null
+++ b/1047_linux-6.1.48.patch
@@ -0,0 +1,604 @@
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index 2f923c805802f..f79cb11b080f6 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -124,8 +124,8 @@ sequence.
+ To ensure the safety of this mitigation, the kernel must ensure that the
+ safe return sequence is itself free from attacker interference.  In Zen3
+ and Zen4, this is accomplished by creating a BTB alias between the
+-untraining function srso_untrain_ret_alias() and the safe return
+-function srso_safe_ret_alias() which results in evicting a potentially
++untraining function srso_alias_untrain_ret() and the safe return
++function srso_alias_safe_ret() which results in evicting a potentially
+ poisoned BTB entry and using that safe one for all function returns.
+ 
+ In older Zen1 and Zen2, this is accomplished using a reinterpretation
+diff --git a/Makefile b/Makefile
+index 375efcfb91f8f..8bb8dd199c552 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Curry Ramen
+ 
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index 674ed46d3ceda..11203a9fe0a87 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+ 	mds_user_clear_cpu_buffers();
++	amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+ 
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 31fa631c8587c..2f123d4fb85b5 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -168,9 +168,9 @@
+ .endm
+ 
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET	"call zen_untrain_ret"
++#define CALL_UNTRAIN_RET	"call entry_untrain_ret"
+ #else
+-#define CALL_ZEN_UNTRAIN_RET	""
++#define CALL_UNTRAIN_RET	""
+ #endif
+ 
+ /*
+@@ -178,7 +178,7 @@
+  * return thunk isn't mapped into the userspace tables (then again, AMD
+  * typically has NO_MELTDOWN).
+  *
+- * While zen_untrain_ret() doesn't clobber anything but requires stack,
++ * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
+  * entry_ibpb() will clobber AX, CX, DX.
+  *
+  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+@@ -189,14 +189,9 @@
+ 	defined(CONFIG_CPU_SRSO)
+ 	ANNOTATE_UNRET_END
+ 	ALTERNATIVE_2 "",						\
+-	              CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,		\
++		      CALL_UNTRAIN_RET, X86_FEATURE_UNRET,		\
+ 		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
+ #endif
+-
+-#ifdef CONFIG_CPU_SRSO
+-	ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+-			  "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
+-#endif
+ .endm
+ 
+ #else /* __ASSEMBLY__ */
+@@ -210,10 +205,21 @@
+ typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
+ extern retpoline_thunk_t __x86_indirect_thunk_array[];
+ 
++#ifdef CONFIG_RETHUNK
+ extern void __x86_return_thunk(void);
+-extern void zen_untrain_ret(void);
++#else
++static inline void __x86_return_thunk(void) {}
++#endif
++
++extern void retbleed_return_thunk(void);
++extern void srso_return_thunk(void);
++extern void srso_alias_return_thunk(void);
++
++extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+-extern void srso_untrain_ret_alias(void);
++extern void srso_alias_untrain_ret(void);
++
++extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+ 
+ #ifdef CONFIG_RETPOLINE
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 239b302973d7a..f240c978d85e4 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1295,3 +1295,4 @@ void noinstr amd_clear_divider(void)
+ 	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+ 		     :: "a" (0), "d" (0), "r" (1));
+ }
++EXPORT_SYMBOL_GPL(amd_clear_divider);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index d98f33ea57e47..3a893ab398a01 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+ 
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+ 
++void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
++
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+ {
+@@ -164,8 +166,13 @@ void __init cpu_select_mitigations(void)
+ 	md_clear_select_mitigation();
+ 	srbds_select_mitigation();
+ 	l1d_flush_select_mitigation();
+-	gds_select_mitigation();
++
++	/*
++	 * srso_select_mitigation() depends and must run after
++	 * retbleed_select_mitigation().
++	 */
+ 	srso_select_mitigation();
++	gds_select_mitigation();
+ }
+ 
+ /*
+@@ -1013,6 +1020,9 @@ do_cmd_auto:
+ 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ 		setup_force_cpu_cap(X86_FEATURE_UNRET);
+ 
++		if (IS_ENABLED(CONFIG_RETHUNK))
++			x86_return_thunk = retbleed_return_thunk;
++
+ 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+ 			pr_err(RETBLEED_UNTRAIN_MSG);
+@@ -2388,9 +2398,10 @@ static void __init srso_select_mitigation(void)
+ 		 * Zen1/2 with SMT off aren't vulnerable after the right
+ 		 * IBPB microcode has been applied.
+ 		 */
+-		if ((boot_cpu_data.x86 < 0x19) &&
+-		    (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
++		if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
+ 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
++			return;
++		}
+ 	}
+ 
+ 	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+@@ -2419,11 +2430,15 @@ static void __init srso_select_mitigation(void)
+ 			 * like ftrace, static_call, etc.
+ 			 */
+ 			setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++			setup_force_cpu_cap(X86_FEATURE_UNRET);
+ 
+-			if (boot_cpu_data.x86 == 0x19)
++			if (boot_cpu_data.x86 == 0x19) {
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+-			else
++				x86_return_thunk = srso_alias_return_thunk;
++			} else {
+ 				setup_force_cpu_cap(X86_FEATURE_SRSO);
++				x86_return_thunk = srso_return_thunk;
++			}
+ 			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ 		} else {
+ 			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+@@ -2672,6 +2687,9 @@ static ssize_t gds_show_state(char *buf)
+ 
+ static ssize_t srso_show_state(char *buf)
+ {
++	if (boot_cpu_has(X86_FEATURE_SRSO_NO))
++		return sysfs_emit(buf, "Mitigation: SMT disabled\n");
++
+ 	return sysfs_emit(buf, "%s%s\n",
+ 			  srso_strings[srso_mitigation],
+ 			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index a9b54b795ebff..3fbb491688275 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -184,6 +184,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+  */
+ bool __static_call_fixup(void *tramp, u8 op, void *dest)
+ {
++	unsigned long addr = (unsigned long)tramp;
++	/*
++	 * Not all .return_sites are a static_call trampoline (most are not).
++	 * Check if the 3 bytes after the return are still kernel text, if not,
++	 * then this definitely is not a trampoline and we need not worry
++	 * further.
++	 *
++	 * This avoids the memcmp() below tripping over pagefaults etc..
++	 */
++	if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
++	    !kernel_text_address(addr + 7))
++		return false;
++
+ 	if (memcmp(tramp+5, tramp_ud, 3)) {
+ 		/* Not a trampoline site, not our problem. */
+ 		return false;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 7e8795d8b0f17..c0a5a4f225d9a 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+ 	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ 		      FPE_INTDIV, error_get_trap_addr(regs));
+-
+-	amd_clear_divider();
+ }
+ 
+ DEFINE_IDTENTRY(exc_overflow)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index e6939ebb606ab..78ccb5ec3c0e7 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -134,18 +134,18 @@ SECTIONS
+ 		KPROBES_TEXT
+ 		ALIGN_ENTRY_TEXT_BEGIN
+ #ifdef CONFIG_CPU_SRSO
+-		*(.text.__x86.rethunk_untrain)
++		*(.text..__x86.rethunk_untrain)
+ #endif
+ 
+ 		ENTRY_TEXT
+ 
+ #ifdef CONFIG_CPU_SRSO
+ 		/*
+-		 * See the comment above srso_untrain_ret_alias()'s
++		 * See the comment above srso_alias_untrain_ret()'s
+ 		 * definition.
+ 		 */
+-		. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+-		*(.text.__x86.rethunk_safe)
++		. = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
++		*(.text..__x86.rethunk_safe)
+ #endif
+ 		ALIGN_ENTRY_TEXT_END
+ 		SOFTIRQENTRY_TEXT
+@@ -154,8 +154,8 @@ SECTIONS
+ 
+ #ifdef CONFIG_RETPOLINE
+ 		__indirect_thunk_start = .;
+-		*(.text.__x86.indirect_thunk)
+-		*(.text.__x86.return_thunk)
++		*(.text..__x86.indirect_thunk)
++		*(.text..__x86.return_thunk)
+ 		__indirect_thunk_end = .;
+ #endif
+ 	} :text =0xcccc
+@@ -507,8 +507,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+            "fixed_percpu_data is not at start of per-cpu area");
+ #endif
+ 
+- #ifdef CONFIG_RETHUNK
+-. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
++#ifdef CONFIG_RETHUNK
++. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+ 
+@@ -523,8 +523,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+  * Instead do: (A | B) - (A & B) in order to compute the XOR
+  * of the two function addresses:
+  */
+-. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
+-		(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
++		(ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+ 		"SRSO function pair won't alias");
+ #endif
+ 
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index fdb6007f2eb86..a96f9a17e8b5d 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3947,6 +3947,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
+ 
+ 	guest_state_enter_irqoff();
+ 
++	amd_clear_divider();
++
+ 	if (sev_es_guest(vcpu->kvm))
+ 		__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
+ 	else
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 30e76fab678a5..65c5c44f006bc 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -11,7 +11,7 @@
+ #include <asm/frame.h>
+ #include <asm/nops.h>
+ 
+-	.section .text.__x86.indirect_thunk
++	.section .text..__x86.indirect_thunk
+ 
+ .macro RETPOLINE reg
+ 	ANNOTATE_INTRA_FUNCTION_CALL
+@@ -76,75 +76,106 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+ #ifdef CONFIG_RETHUNK
+ 
+ /*
+- * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
++ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+  * special addresses:
+  *
+- * - srso_untrain_ret_alias() is 2M aligned
+- * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
++ * - srso_alias_untrain_ret() is 2M aligned
++ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+  * and 20 in its virtual address are set (while those bits in the
+- * srso_untrain_ret_alias() function are cleared).
++ * srso_alias_untrain_ret() function are cleared).
+  *
+  * This guarantees that those two addresses will alias in the branch
+  * target buffer of Zen3/4 generations, leading to any potential
+  * poisoned entries at that BTB slot to get evicted.
+  *
+- * As a result, srso_safe_ret_alias() becomes a safe return.
++ * As a result, srso_alias_safe_ret() becomes a safe return.
+  */
+ #ifdef CONFIG_CPU_SRSO
+-	.section .text.__x86.rethunk_untrain
++	.section .text..__x86.rethunk_untrain
+ 
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	UNWIND_HINT_FUNC
+ 	ANNOTATE_NOENDBR
+ 	ASM_NOP2
+ 	lfence
+-	jmp __x86_return_thunk
+-SYM_FUNC_END(srso_untrain_ret_alias)
+-__EXPORT_THUNK(srso_untrain_ret_alias)
++	jmp srso_alias_return_thunk
++SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
+ 
+-	.section .text.__x86.rethunk_safe
++	.section .text..__x86.rethunk_safe
++#else
++/* dummy definition for alternatives */
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	ANNOTATE_UNRET_SAFE
++	ret
++	int3
++SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+ 
+-/* Needs a definition for the __x86_return_thunk alternative below. */
+-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+-#ifdef CONFIG_CPU_SRSO
+-	add $8, %_ASM_SP
++SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	lea 8(%_ASM_SP), %_ASM_SP
+ 	UNWIND_HINT_FUNC
+-#endif
+ 	ANNOTATE_UNRET_SAFE
+ 	ret
+ 	int3
+-SYM_FUNC_END(srso_safe_ret_alias)
++SYM_FUNC_END(srso_alias_safe_ret)
++
++	.section .text..__x86.return_thunk
+ 
+-	.section .text.__x86.return_thunk
++SYM_CODE_START(srso_alias_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	call srso_alias_safe_ret
++	ud2
++SYM_CODE_END(srso_alias_return_thunk)
++
++/*
++ * Some generic notes on the untraining sequences:
++ *
++ * They are interchangeable when it comes to flushing potentially wrong
++ * RET predictions from the BTB.
++ *
++ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
++ * Retbleed sequence because the return sequence done there
++ * (srso_safe_ret()) is longer and the return sequence must fully nest
++ * (end before) the untraining sequence. Therefore, the untraining
++ * sequence must fully overlap the return sequence.
++ *
++ * Regarding alignment - the instructions which need to be untrained,
++ * must all start at a cacheline boundary for Zen1/2 generations. That
++ * is, instruction sequences starting at srso_safe_ret() and
++ * the respective instruction sequences at retbleed_return_thunk()
++ * must start at a cacheline boundary.
++ */
+ 
+ /*
+  * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+  *    alignment within the BTB.
+- * 2) The instruction at zen_untrain_ret must contain, and not
++ * 2) The instruction at retbleed_untrain_ret must contain, and not
+  *    end with, the 0xc3 byte of the RET.
+  * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
+  *    from re-poisioning the BTB prediction.
+  */
+ 	.align 64
+-	.skip 64 - (__ret - zen_untrain_ret), 0xcc
+-SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
++SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ANNOTATE_NOENDBR
+ 	/*
+-	 * As executed from zen_untrain_ret, this is:
++	 * As executed from retbleed_untrain_ret, this is:
+ 	 *
+ 	 *   TEST $0xcc, %bl
+ 	 *   LFENCE
+-	 *   JMP __x86_return_thunk
++	 *   JMP retbleed_return_thunk
+ 	 *
+ 	 * Executing the TEST instruction has a side effect of evicting any BTB
+ 	 * prediction (potentially attacker controlled) attached to the RET, as
+-	 * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
++	 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
+ 	 */
+ 	.byte	0xf6
+ 
+ 	/*
+-	 * As executed from __x86_return_thunk, this is a plain RET.
++	 * As executed from retbleed_return_thunk, this is a plain RET.
+ 	 *
+ 	 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+ 	 *
+@@ -156,13 +187,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	 * With SMT enabled and STIBP active, a sibling thread cannot poison
+ 	 * RET's prediction to a type of its choice, but can evict the
+ 	 * prediction due to competitive sharing. If the prediction is
+-	 * evicted, __x86_return_thunk will suffer Straight Line Speculation
++	 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
+ 	 * which will be contained safely by the INT3.
+ 	 */
+-SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
++SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
+ 	ret
+ 	int3
+-SYM_CODE_END(__ret)
++SYM_CODE_END(retbleed_return_thunk)
+ 
+ 	/*
+ 	 * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -173,16 +204,16 @@ SYM_CODE_END(__ret)
+ 	 * Jump back and execute the RET in the middle of the TEST instruction.
+ 	 * INT3 is for SLS protection.
+ 	 */
+-	jmp __ret
++	jmp retbleed_return_thunk
+ 	int3
+-SYM_FUNC_END(zen_untrain_ret)
+-__EXPORT_THUNK(zen_untrain_ret)
++SYM_FUNC_END(retbleed_untrain_ret)
++__EXPORT_THUNK(retbleed_untrain_ret)
+ 
+ /*
+- * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
++ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+  * above. On kernel entry, srso_untrain_ret() is executed which is a
+  *
+- * movabs $0xccccccc308c48348,%rax
++ * movabs $0xccccc30824648d48,%rax
+  *
+  * and when the return thunk executes the inner label srso_safe_ret()
+  * later, it is a stack manipulation and a RET which is mispredicted and
+@@ -194,22 +225,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ 	ANNOTATE_NOENDBR
+ 	.byte 0x48, 0xb8
+ 
++/*
++ * This forces the function return instruction to speculate into a trap
++ * (UD2 in srso_return_thunk() below).  This RET will then mispredict
++ * and execution will continue at the return site read from the top of
++ * the stack.
++ */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+-	add $8, %_ASM_SP
++	lea 8(%_ASM_SP), %_ASM_SP
+ 	ret
+ 	int3
+ 	int3
+-	int3
++	/* end of movabs */
+ 	lfence
+ 	call srso_safe_ret
+-	int3
++	ud2
+ SYM_CODE_END(srso_safe_ret)
+ SYM_FUNC_END(srso_untrain_ret)
+ __EXPORT_THUNK(srso_untrain_ret)
+ 
+-SYM_FUNC_START(__x86_return_thunk)
+-	ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+-			"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
++SYM_CODE_START(srso_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	call srso_safe_ret
++	ud2
++SYM_CODE_END(srso_return_thunk)
++
++SYM_FUNC_START(entry_untrain_ret)
++	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
++		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
++		      "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++SYM_FUNC_END(entry_untrain_ret)
++__EXPORT_THUNK(entry_untrain_ret)
++
++SYM_CODE_START(__x86_return_thunk)
++	UNWIND_HINT_FUNC
++	ANNOTATE_NOENDBR
++	ANNOTATE_UNRET_SAFE
++	ret
+ 	int3
+ SYM_CODE_END(__x86_return_thunk)
+ EXPORT_SYMBOL(__x86_return_thunk)
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index a60c5efe34b36..29c35279c7ed8 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -799,5 +799,5 @@ bool arch_is_rethunk(struct symbol *sym)
+ 	return !strcmp(sym->name, "__x86_return_thunk") ||
+ 	       !strcmp(sym->name, "srso_untrain_ret") ||
+ 	       !strcmp(sym->name, "srso_safe_ret") ||
+-	       !strcmp(sym->name, "__ret");
++	       !strcmp(sym->name, "retbleed_return_thunk");
+ }
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index c2c350933a237..913bd361c3684 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -379,7 +379,7 @@ static int decode_instructions(struct objtool_file *file)
+ 
+ 		if (!strcmp(sec->name, ".noinstr.text") ||
+ 		    !strcmp(sec->name, ".entry.text") ||
+-		    !strncmp(sec->name, ".text.__x86.", 12))
++		    !strncmp(sec->name, ".text..__x86.", 13))
+ 			sec->noinstr = true;
+ 
+ 		for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
+@@ -1430,7 +1430,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ 			struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
+ 
+ 			/*
+-			 * This is a special case for zen_untrain_ret().
++			 * This is a special case for retbleed_untrain_ret().
+ 			 * It jumps to __x86_return_thunk(), but objtool
+ 			 * can't find the thunk's starting RET
+ 			 * instruction, because the RET is also in the
+@@ -2450,12 +2450,17 @@ static int decode_sections(struct objtool_file *file)
+ 	return 0;
+ }
+ 
+-static bool is_fentry_call(struct instruction *insn)
++static bool is_special_call(struct instruction *insn)
+ {
+-	if (insn->type == INSN_CALL &&
+-	    insn->call_dest &&
+-	    insn->call_dest->fentry)
+-		return true;
++	if (insn->type == INSN_CALL) {
++		struct symbol *dest = insn->call_dest;
++
++		if (!dest)
++			return false;
++
++		if (dest->fentry)
++			return true;
++	}
+ 
+ 	return false;
+ }
+@@ -3448,7 +3453,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ 			if (ret)
+ 				return ret;
+ 
+-			if (opts.stackval && func && !is_fentry_call(insn) &&
++			if (opts.stackval && func && !is_special_call(insn) &&
+ 			    !has_valid_stack_frame(&state)) {
+ 				WARN_FUNC("call without frame pointer save/setup",
+ 					  sec, insn->offset);


             reply	other threads:[~2023-08-26 15:00 UTC|newest]

Thread overview: 205+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-26 15:00 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2025-07-24  9:18 [gentoo-commits] proj/linux-patches:6.1 commit in: / Arisu Tachibana
2025-07-18 12:21 Arisu Tachibana
2025-07-18 12:06 Arisu Tachibana
2025-07-14 16:21 Arisu Tachibana
2025-07-11  2:29 Arisu Tachibana
2025-07-06 13:28 Arisu Tachibana
2025-06-27 11:19 Mike Pagano
2025-06-04 18:13 Mike Pagano
2025-05-22 13:39 Mike Pagano
2025-05-18 14:34 Mike Pagano
2025-05-09 10:59 Mike Pagano
2025-05-05 11:32 Mike Pagano
2025-05-03 20:22 Mike Pagano
2025-05-02 10:56 Mike Pagano
2025-04-25 11:49 Mike Pagano
2025-04-10 13:35 Mike Pagano
2025-04-07 10:31 Mike Pagano
2025-03-29 10:49 Mike Pagano
2025-03-13 12:56 Mike Pagano
2025-03-07 16:38 Mike Pagano
2025-02-21 13:32 Mike Pagano
2025-02-01 23:08 Mike Pagano
2025-01-30 12:56 Mike Pagano
2025-01-23 17:04 Mike Pagano
2025-01-19 10:58 Mike Pagano
2025-01-17 13:19 Mike Pagano
2025-01-09 13:54 Mike Pagano
2025-01-02 12:35 Mike Pagano
2024-12-27 14:09 Mike Pagano
2024-12-19 18:08 Mike Pagano
2024-12-14 23:49 Mike Pagano
2024-12-12 19:42 Mike Pagano
2024-11-22 17:48 Mike Pagano
2024-11-17 18:17 Mike Pagano
2024-11-14 14:55 Mike Pagano
2024-11-08 16:31 Mike Pagano
2024-11-04 20:52 Mike Pagano
2024-11-03 13:58 Mike Pagano
2024-11-01 11:33 Mike Pagano
2024-11-01 11:28 Mike Pagano
2024-10-25 11:46 Mike Pagano
2024-10-22 16:58 Mike Pagano
2024-10-17 14:24 Mike Pagano
2024-10-17 14:06 Mike Pagano
2024-09-30 16:04 Mike Pagano
2024-09-18 18:04 Mike Pagano
2024-09-12 12:35 Mike Pagano
2024-09-08 11:06 Mike Pagano
2024-09-04 13:52 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:43 Mike Pagano
2024-08-14 15:06 Mike Pagano
2024-08-14 14:11 Mike Pagano
2024-08-11 13:32 Mike Pagano
2024-08-11 13:29 Mike Pagano
2024-08-10 15:45 Mike Pagano
2024-08-03 15:28 Mike Pagano
2024-07-27 13:47 Mike Pagano
2024-07-25 12:15 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:16 Mike Pagano
2024-07-11 11:49 Mike Pagano
2024-07-05 11:07 Mike Pagano
2024-06-27 13:10 Mike Pagano
2024-06-27 12:33 Mike Pagano
2024-06-21 14:07 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:16 Mike Pagano
2024-05-25 15:16 Mike Pagano
2024-05-17 11:36 Mike Pagano
2024-05-05 18:10 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-29 11:30 Mike Pagano
2024-04-29 11:27 Mike Pagano
2024-04-27 22:45 Mike Pagano
2024-04-27 17:06 Mike Pagano
2024-04-18  3:05 Alice Ferrazzi
2024-04-13 13:07 Mike Pagano
2024-04-10 15:10 Mike Pagano
2024-04-03 13:54 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-01 13:07 Mike Pagano
2024-02-23 13:19 Mike Pagano
2024-02-23 12:37 Mike Pagano
2024-02-16 19:00 Mike Pagano
2024-02-05 21:01 Mike Pagano
2024-02-01  1:23 Mike Pagano
2024-01-26  0:09 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:47 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:54 Mike Pagano
2024-01-05 14:50 Mike Pagano
2024-01-04 16:10 Mike Pagano
2024-01-01 13:46 Mike Pagano
2023-12-20 16:56 Mike Pagano
2023-12-13 18:27 Mike Pagano
2023-12-11 14:20 Mike Pagano
2023-12-08 10:55 Mike Pagano
2023-12-03 11:16 Mike Pagano
2023-12-01 10:36 Mike Pagano
2023-11-28 17:51 Mike Pagano
2023-11-20 11:23 Mike Pagano
2023-11-08 14:02 Mike Pagano
2023-11-02 11:10 Mike Pagano
2023-10-25 11:36 Mike Pagano
2023-10-22 22:53 Mike Pagano
2023-10-19 22:30 Mike Pagano
2023-10-18 20:04 Mike Pagano
2023-10-15 17:40 Mike Pagano
2023-10-10 22:56 Mike Pagano
2023-10-06 13:18 Mike Pagano
2023-10-05 14:23 Mike Pagano
2023-09-23 11:03 Mike Pagano
2023-09-23 10:16 Mike Pagano
2023-09-19 13:20 Mike Pagano
2023-09-15 18:04 Mike Pagano
2023-09-13 11:19 Mike Pagano
2023-09-13 11:05 Mike Pagano
2023-09-06 22:16 Mike Pagano
2023-09-02  9:56 Mike Pagano
2023-08-30 14:42 Mike Pagano
2023-08-27 21:41 Mike Pagano
2023-08-26 15:19 Mike Pagano
2023-08-23 18:08 Mike Pagano
2023-08-16 18:32 Mike Pagano
2023-08-16 18:32 Mike Pagano
2023-08-11 11:55 Mike Pagano
2023-08-08 18:40 Mike Pagano
2023-08-03 11:54 Mike Pagano
2023-08-03 11:48 Mike Pagano
2023-07-27 11:48 Mike Pagano
2023-07-24 20:27 Mike Pagano
2023-07-23 15:14 Mike Pagano
2023-07-19 17:05 Mike Pagano
2023-07-05 20:34 Mike Pagano
2023-07-05 20:28 Mike Pagano
2023-07-04 13:15 Mike Pagano
2023-07-01 18:27 Mike Pagano
2023-06-28 10:26 Mike Pagano
2023-06-21 14:54 Alice Ferrazzi
2023-06-14 10:17 Mike Pagano
2023-06-09 12:02 Mike Pagano
2023-06-09 11:29 Mike Pagano
2023-06-05 11:48 Mike Pagano
2023-06-02 15:07 Mike Pagano
2023-05-30 16:51 Mike Pagano
2023-05-24 17:05 Mike Pagano
2023-05-17 10:57 Mike Pagano
2023-05-11 16:08 Mike Pagano
2023-05-11 14:49 Mike Pagano
2023-05-10 17:54 Mike Pagano
2023-05-10 16:18 Mike Pagano
2023-04-30 23:50 Alice Ferrazzi
2023-04-26 13:19 Mike Pagano
2023-04-20 11:16 Alice Ferrazzi
2023-04-13 16:09 Mike Pagano
2023-04-06 10:41 Alice Ferrazzi
2023-03-30 20:52 Mike Pagano
2023-03-30 11:21 Alice Ferrazzi
2023-03-22 14:15 Alice Ferrazzi
2023-03-21 13:32 Mike Pagano
2023-03-17 10:43 Mike Pagano
2023-03-13 11:30 Alice Ferrazzi
2023-03-11 14:09 Mike Pagano
2023-03-11 11:19 Mike Pagano
2023-03-10 12:57 Mike Pagano
2023-03-10 12:47 Mike Pagano
2023-03-06 17:30 Mike Pagano
2023-03-03 13:01 Mike Pagano
2023-03-03 12:28 Mike Pagano
2023-02-27 16:59 Mike Pagano
2023-02-26 18:24 Mike Pagano
2023-02-26 18:16 Mike Pagano
2023-02-25 11:02 Alice Ferrazzi
2023-02-24  3:03 Alice Ferrazzi
2023-02-22 13:46 Alice Ferrazzi
2023-02-14 18:35 Mike Pagano
2023-02-13 13:38 Mike Pagano
2023-02-09 12:52 Mike Pagano
2023-02-09 12:49 Mike Pagano
2023-02-09 12:47 Mike Pagano
2023-02-09 12:40 Mike Pagano
2023-02-09 12:34 Mike Pagano
2023-02-06 12:46 Mike Pagano
2023-02-02 19:02 Mike Pagano
2023-02-01  8:05 Alice Ferrazzi
2023-01-24  7:19 Alice Ferrazzi
2023-01-22 14:59 Mike Pagano
2023-01-18 11:29 Mike Pagano
2023-01-14 13:48 Mike Pagano
2023-01-12 15:25 Mike Pagano
2023-01-12 12:16 Mike Pagano
2023-01-07 11:10 Mike Pagano
2023-01-04 11:37 Mike Pagano
2022-12-31 15:28 Mike Pagano
2022-12-21 19:05 Alice Ferrazzi
2022-12-16 20:25 Mike Pagano
2022-12-16 19:44 Mike Pagano
2022-12-11 23:32 Mike Pagano
2022-12-11 14:28 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1693062024.597ad722c61ee3671b7cd0dcefa8ce523a757ed3.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox