x86/ibt: Annotate text references
Annotate away some of the generic code references. This is things where we take the address of a symbol for exception handling or return addresses (eg. context switch). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Link: https://lore.kernel.org/r/20220308154318.877758523@infradead.org
This commit is contained in:
parent
fe379fa4d1
commit
3e3f069504
|
@ -277,6 +277,7 @@ SYM_FUNC_END(__switch_to_asm)
|
|||
.pushsection .text, "ax"
|
||||
SYM_CODE_START(ret_from_fork)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR // copy_thread
|
||||
movq %rax, %rdi
|
||||
call schedule_tail /* rdi: 'prev' task parameter */
|
||||
|
||||
|
@ -569,6 +570,7 @@ __irqentry_text_start:
|
|||
.align 16
|
||||
.globl __irqentry_text_end
|
||||
__irqentry_text_end:
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
SYM_CODE_START_LOCAL(common_interrupt_return)
|
||||
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
||||
|
@ -650,6 +652,7 @@ SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
|
|||
#endif
|
||||
|
||||
SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
|
||||
ANNOTATE_NOENDBR // exc_double_fault
|
||||
/*
|
||||
* This may fault. Non-paranoid faults on return to userspace are
|
||||
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
|
||||
|
@ -744,6 +747,7 @@ SYM_FUNC_START(asm_load_gs_index)
|
|||
FRAME_BEGIN
|
||||
swapgs
|
||||
.Lgs_change:
|
||||
ANNOTATE_NOENDBR // error_entry
|
||||
movl %edi, %gs
|
||||
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
|
||||
swapgs
|
||||
|
@ -1322,6 +1326,7 @@ first_nmi:
|
|||
#endif
|
||||
|
||||
repeat_nmi:
|
||||
ANNOTATE_NOENDBR // this code
|
||||
/*
|
||||
* If there was a nested NMI, the first NMI's iret will return
|
||||
* here. But NMIs are still enabled and we can take another
|
||||
|
@ -1350,6 +1355,7 @@ repeat_nmi:
|
|||
.endr
|
||||
subq $(5*8), %rsp
|
||||
end_repeat_nmi:
|
||||
ANNOTATE_NOENDBR // this code
|
||||
|
||||
/*
|
||||
* Everything below this point can be preempted by a nested NMI.
|
||||
|
|
|
@ -148,6 +148,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
|
|||
popfq
|
||||
jmp .Lsysenter_flags_fixed
|
||||
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
|
||||
ANNOTATE_NOENDBR // is_sysenter_singlestep
|
||||
SYM_CODE_END(entry_SYSENTER_compat)
|
||||
|
||||
/*
|
||||
|
|
|
@ -713,6 +713,7 @@ asm (
|
|||
" .pushsection .init.text, \"ax\", @progbits\n"
|
||||
" .type int3_magic, @function\n"
|
||||
"int3_magic:\n"
|
||||
ANNOTATE_NOENDBR
|
||||
" movl $1, (%" _ASM_ARG1 ")\n"
|
||||
ASM_RET
|
||||
" .size int3_magic, .-int3_magic\n"
|
||||
|
@ -724,16 +725,19 @@ extern void int3_selftest_ip(void); /* defined in asm below */
|
|||
static int __init
|
||||
int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
|
||||
{
|
||||
unsigned long selftest = (unsigned long)&int3_selftest_ip;
|
||||
struct die_args *args = data;
|
||||
struct pt_regs *regs = args->regs;
|
||||
|
||||
OPTIMIZER_HIDE_VAR(selftest);
|
||||
|
||||
if (!regs || user_mode(regs))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (val != DIE_INT3)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (regs->ip - INT3_INSN_SIZE != (unsigned long)&int3_selftest_ip)
|
||||
if (regs->ip - INT3_INSN_SIZE != selftest)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
int3_emulate_call(regs, (unsigned long)&int3_magic);
|
||||
|
@ -757,7 +761,9 @@ static noinline void __init int3_selftest(void)
|
|||
* INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
|
||||
* notifier above will emulate CALL for us.
|
||||
*/
|
||||
asm volatile ("int3_selftest_ip: int3; nop; nop; nop; nop\n\t"
|
||||
asm volatile ("int3_selftest_ip:\n\t"
|
||||
ANNOTATE_NOENDBR
|
||||
" int3; nop; nop; nop; nop\n\t"
|
||||
: ASM_CALL_CONSTRAINT
|
||||
: __ASM_SEL_RAW(a, D) (&val)
|
||||
: "memory");
|
||||
|
|
|
@ -99,6 +99,7 @@ SYM_CODE_END(startup_64)
|
|||
|
||||
SYM_CODE_START(secondary_startup_64)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
/*
|
||||
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
|
||||
* and someone has loaded a mapped page table.
|
||||
|
@ -127,6 +128,7 @@ SYM_CODE_START(secondary_startup_64)
|
|||
*/
|
||||
SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
/*
|
||||
* Retrieve the modifier (SME encryption mask if SME is active) to be
|
||||
|
@ -192,6 +194,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
|
|||
jmp *%rax
|
||||
1:
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR // above
|
||||
|
||||
/*
|
||||
* We must switch to a new descriptor in kernel space for the GDT
|
||||
|
@ -299,6 +302,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
|
|||
pushq %rax # target address in negative space
|
||||
lretq
|
||||
.Lafter_lret:
|
||||
ANNOTATE_NOENDBR
|
||||
SYM_CODE_END(secondary_startup_64)
|
||||
|
||||
#include "verify_cpu.S"
|
||||
|
|
|
@ -1033,6 +1033,7 @@ asm(
|
|||
".type __kretprobe_trampoline, @function\n"
|
||||
"__kretprobe_trampoline:\n"
|
||||
#ifdef CONFIG_X86_64
|
||||
ANNOTATE_NOENDBR
|
||||
/* Push a fake return address to tell the unwinder it's a kretprobe. */
|
||||
" pushq $__kretprobe_trampoline\n"
|
||||
UNWIND_HINT_FUNC
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
.code64
|
||||
SYM_CODE_START_NOALIGN(relocate_kernel)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
/*
|
||||
* %rdi indirection_page
|
||||
* %rsi page_list
|
||||
|
@ -223,6 +224,7 @@ SYM_CODE_END(identity_mapped)
|
|||
|
||||
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR // RET target, above
|
||||
movq RSP(%r8), %rsp
|
||||
movq CR4(%r8), %rax
|
||||
movq %rax, %cr4
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/error-injection.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/objtool.h>
|
||||
|
||||
asmlinkage void just_return_func(void);
|
||||
|
||||
|
@ -11,6 +12,7 @@ asm(
|
|||
".type just_return_func, @function\n"
|
||||
".globl just_return_func\n"
|
||||
"just_return_func:\n"
|
||||
ANNOTATE_NOENDBR
|
||||
ASM_RET
|
||||
".size just_return_func, .-just_return_func\n"
|
||||
);
|
||||
|
|
|
@ -55,6 +55,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
|
|||
|
||||
.align RETPOLINE_THUNK_SIZE
|
||||
SYM_CODE_START(__x86_indirect_thunk_array)
|
||||
ANNOTATE_NOENDBR // apply_retpolines
|
||||
|
||||
#define GEN(reg) THUNK reg
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
|
|
Loading…
Reference in New Issue