mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti bits and fixes from Thomas Gleixner: "This last update contains: - An objtool fix to prevent a segfault with the gold linker by changing the invocation order. That's not just for gold, it's a general robustness improvement. - An improved error message for objtool which spares tearing hairs. - Make KASAN fail loudly if there is not enough memory instead of oopsing at some random place later - RSB fill on context switch to prevent RSB underflow and speculation through other units. - Make the retpoline/RSB functionality work reliably for both Intel and AMD - Add retpoline to the module version magic so mismatch can be detected - A small (non-fix) update for cpufeatures which prevents cpu feature clashing for the upcoming extra mitigation bits to ease backporting" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: module: Add retpoline tag to VERMAGIC x86/cpufeature: Move processor tracing out of scattered features objtool: Improve error message for bad file argument objtool: Fix seg fault with gold linker x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros x86/retpoline: Fill RSB on context switch for affected CPUs x86/kasan: Panic if there is not enough memory to boot
This commit is contained in:
commit
88dc7fca18
|
@ -244,6 +244,17 @@ ENTRY(__switch_to_asm)
|
||||||
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
/*
|
||||||
|
* When switching from a shallower to a deeper call stack
|
||||||
|
* the RSB may either underflow or use entries populated
|
||||||
|
* with userspace addresses. On CPUs where those concerns
|
||||||
|
* exist, overwrite the RSB with entries which capture
|
||||||
|
* speculative execution to prevent attack.
|
||||||
|
*/
|
||||||
|
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||||
|
#endif
|
||||||
|
|
||||||
/* restore callee-saved registers */
|
/* restore callee-saved registers */
|
||||||
popl %esi
|
popl %esi
|
||||||
popl %edi
|
popl %edi
|
||||||
|
|
|
@ -491,6 +491,17 @@ ENTRY(__switch_to_asm)
|
||||||
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
|
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
/*
|
||||||
|
* When switching from a shallower to a deeper call stack
|
||||||
|
* the RSB may either underflow or use entries populated
|
||||||
|
* with userspace addresses. On CPUs where those concerns
|
||||||
|
* exist, overwrite the RSB with entries which capture
|
||||||
|
* speculative execution to prevent attack.
|
||||||
|
*/
|
||||||
|
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||||
|
#endif
|
||||||
|
|
||||||
/* restore callee-saved registers */
|
/* restore callee-saved registers */
|
||||||
popq %r15
|
popq %r15
|
||||||
popq %r14
|
popq %r14
|
||||||
|
|
|
@ -206,11 +206,11 @@
|
||||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
|
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
|
||||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
|
||||||
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
|
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
||||||
|
|
||||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||||
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||||
|
|
||||||
/* Virtualization flags: Linux defined, word 8 */
|
/* Virtualization flags: Linux defined, word 8 */
|
||||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||||
|
@ -245,6 +245,7 @@
|
||||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||||
|
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
|
||||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
* Fill the CPU return stack buffer.
|
* Fill the CPU return stack buffer.
|
||||||
*
|
*
|
||||||
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
||||||
* infinite 'pause; jmp' loop to capture speculative execution.
|
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
||||||
*
|
*
|
||||||
* This is required in various cases for retpoline and IBRS-based
|
* This is required in various cases for retpoline and IBRS-based
|
||||||
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
||||||
|
@ -38,11 +38,13 @@
|
||||||
call 772f; \
|
call 772f; \
|
||||||
773: /* speculation trap */ \
|
773: /* speculation trap */ \
|
||||||
pause; \
|
pause; \
|
||||||
|
lfence; \
|
||||||
jmp 773b; \
|
jmp 773b; \
|
||||||
772: \
|
772: \
|
||||||
call 774f; \
|
call 774f; \
|
||||||
775: /* speculation trap */ \
|
775: /* speculation trap */ \
|
||||||
pause; \
|
pause; \
|
||||||
|
lfence; \
|
||||||
jmp 775b; \
|
jmp 775b; \
|
||||||
774: \
|
774: \
|
||||||
dec reg; \
|
dec reg; \
|
||||||
|
@ -73,6 +75,7 @@
|
||||||
call .Ldo_rop_\@
|
call .Ldo_rop_\@
|
||||||
.Lspec_trap_\@:
|
.Lspec_trap_\@:
|
||||||
pause
|
pause
|
||||||
|
lfence
|
||||||
jmp .Lspec_trap_\@
|
jmp .Lspec_trap_\@
|
||||||
.Ldo_rop_\@:
|
.Ldo_rop_\@:
|
||||||
mov \reg, (%_ASM_SP)
|
mov \reg, (%_ASM_SP)
|
||||||
|
@ -165,6 +168,7 @@
|
||||||
" .align 16\n" \
|
" .align 16\n" \
|
||||||
"901: call 903f;\n" \
|
"901: call 903f;\n" \
|
||||||
"902: pause;\n" \
|
"902: pause;\n" \
|
||||||
|
" lfence;\n" \
|
||||||
" jmp 902b;\n" \
|
" jmp 902b;\n" \
|
||||||
" .align 16\n" \
|
" .align 16\n" \
|
||||||
"903: addl $4, %%esp;\n" \
|
"903: addl $4, %%esp;\n" \
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
|
|
||||||
static void __init spectre_v2_select_mitigation(void);
|
static void __init spectre_v2_select_mitigation(void);
|
||||||
|
|
||||||
|
@ -155,6 +156,23 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||||
return SPECTRE_V2_CMD_NONE;
|
return SPECTRE_V2_CMD_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||||
|
static bool __init is_skylake_era(void)
|
||||||
|
{
|
||||||
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||||
|
boot_cpu_data.x86 == 6) {
|
||||||
|
switch (boot_cpu_data.x86_model) {
|
||||||
|
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||||
|
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||||
|
case INTEL_FAM6_SKYLAKE_X:
|
||||||
|
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||||
|
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init spectre_v2_select_mitigation(void)
|
static void __init spectre_v2_select_mitigation(void)
|
||||||
{
|
{
|
||||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||||
|
@ -213,6 +231,24 @@ static void __init spectre_v2_select_mitigation(void)
|
||||||
|
|
||||||
spectre_v2_enabled = mode;
|
spectre_v2_enabled = mode;
|
||||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If neither SMEP or KPTI are available, there is a risk of
|
||||||
|
* hitting userspace addresses in the RSB after a context switch
|
||||||
|
* from a shallow call stack to a deeper one. To prevent this fill
|
||||||
|
* the entire RSB, even when using IBRS.
|
||||||
|
*
|
||||||
|
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||||
|
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||||
|
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||||
|
* or deactivated in favour of retpolines the RSB fill on context
|
||||||
|
* switch is required.
|
||||||
|
*/
|
||||||
|
if ((!boot_cpu_has(X86_FEATURE_PTI) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||||
|
pr_info("Filling RSB on context switch\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
|
|
|
@ -21,7 +21,6 @@ struct cpuid_bit {
|
||||||
static const struct cpuid_bit cpuid_bits[] = {
|
static const struct cpuid_bit cpuid_bits[] = {
|
||||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||||
{ X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
|
|
||||||
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
|
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
|
||||||
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
|
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
|
||||||
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
||||||
|
|
|
@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
||||||
|
|
||||||
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
|
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
|
||||||
|
|
||||||
static __init void *early_alloc(size_t size, int nid)
|
static __init void *early_alloc(size_t size, int nid, bool panic)
|
||||||
{
|
{
|
||||||
return memblock_virt_alloc_try_nid_nopanic(size, size,
|
if (panic)
|
||||||
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
|
return memblock_virt_alloc_try_nid(size, size,
|
||||||
|
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
|
||||||
|
else
|
||||||
|
return memblock_virt_alloc_try_nid_nopanic(size, size,
|
||||||
|
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
|
@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
if (boot_cpu_has(X86_FEATURE_PSE) &&
|
if (boot_cpu_has(X86_FEATURE_PSE) &&
|
||||||
((end - addr) == PMD_SIZE) &&
|
((end - addr) == PMD_SIZE) &&
|
||||||
IS_ALIGNED(addr, PMD_SIZE)) {
|
IS_ALIGNED(addr, PMD_SIZE)) {
|
||||||
p = early_alloc(PMD_SIZE, nid);
|
p = early_alloc(PMD_SIZE, nid, false);
|
||||||
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
|
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
|
||||||
return;
|
return;
|
||||||
else if (p)
|
else if (p)
|
||||||
memblock_free(__pa(p), PMD_SIZE);
|
memblock_free(__pa(p), PMD_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
pmd_populate_kernel(&init_mm, pmd, p);
|
pmd_populate_kernel(&init_mm, pmd, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
if (!pte_none(*pte))
|
if (!pte_none(*pte))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
|
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
|
||||||
set_pte_at(&init_mm, addr, pte, entry);
|
set_pte_at(&init_mm, addr, pte, entry);
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
|
@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
|
||||||
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
|
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
|
||||||
((end - addr) == PUD_SIZE) &&
|
((end - addr) == PUD_SIZE) &&
|
||||||
IS_ALIGNED(addr, PUD_SIZE)) {
|
IS_ALIGNED(addr, PUD_SIZE)) {
|
||||||
p = early_alloc(PUD_SIZE, nid);
|
p = early_alloc(PUD_SIZE, nid, false);
|
||||||
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
|
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
|
||||||
return;
|
return;
|
||||||
else if (p)
|
else if (p)
|
||||||
memblock_free(__pa(p), PUD_SIZE);
|
memblock_free(__pa(p), PUD_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
pud_populate(&init_mm, pud, p);
|
pud_populate(&init_mm, pud, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
if (p4d_none(*p4d)) {
|
if (p4d_none(*p4d)) {
|
||||||
void *p = early_alloc(PAGE_SIZE, nid);
|
void *p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
|
|
||||||
p4d_populate(&init_mm, p4d, p);
|
p4d_populate(&init_mm, p4d, p);
|
||||||
}
|
}
|
||||||
|
@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
if (pgd_none(*pgd)) {
|
if (pgd_none(*pgd)) {
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
pgd_populate(&init_mm, pgd, p);
|
pgd_populate(&init_mm, pgd, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,11 +31,17 @@
|
||||||
#else
|
#else
|
||||||
#define MODULE_RANDSTRUCT_PLUGIN
|
#define MODULE_RANDSTRUCT_PLUGIN
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef RETPOLINE
|
||||||
|
#define MODULE_VERMAGIC_RETPOLINE "retpoline "
|
||||||
|
#else
|
||||||
|
#define MODULE_VERMAGIC_RETPOLINE ""
|
||||||
|
#endif
|
||||||
|
|
||||||
#define VERMAGIC_STRING \
|
#define VERMAGIC_STRING \
|
||||||
UTS_RELEASE " " \
|
UTS_RELEASE " " \
|
||||||
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
||||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
||||||
MODULE_ARCH_VERMAGIC \
|
MODULE_ARCH_VERMAGIC \
|
||||||
MODULE_RANDSTRUCT_PLUGIN
|
MODULE_RANDSTRUCT_PLUGIN \
|
||||||
|
MODULE_VERMAGIC_RETPOLINE
|
||||||
|
|
||||||
|
|
|
@ -265,12 +265,18 @@ else
|
||||||
objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
|
objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_MODVERSIONS
|
||||||
|
objtool_o = $(@D)/.tmp_$(@F)
|
||||||
|
else
|
||||||
|
objtool_o = $(@)
|
||||||
|
endif
|
||||||
|
|
||||||
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
|
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
|
||||||
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
|
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
|
||||||
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
|
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
|
||||||
cmd_objtool = $(if $(patsubst y%,, \
|
cmd_objtool = $(if $(patsubst y%,, \
|
||||||
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
||||||
$(__objtool_obj) $(objtool_args) "$(@)";)
|
$(__objtool_obj) $(objtool_args) "$(objtool_o)";)
|
||||||
objtool_obj = $(if $(patsubst y%,, \
|
objtool_obj = $(if $(patsubst y%,, \
|
||||||
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
||||||
$(__objtool_obj))
|
$(__objtool_obj))
|
||||||
|
@ -286,16 +292,16 @@ objtool_dep = $(objtool_obj) \
|
||||||
define rule_cc_o_c
|
define rule_cc_o_c
|
||||||
$(call echo-cmd,checksrc) $(cmd_checksrc) \
|
$(call echo-cmd,checksrc) $(cmd_checksrc) \
|
||||||
$(call cmd_and_fixdep,cc_o_c) \
|
$(call cmd_and_fixdep,cc_o_c) \
|
||||||
$(cmd_modversions_c) \
|
|
||||||
$(cmd_checkdoc) \
|
$(cmd_checkdoc) \
|
||||||
$(call echo-cmd,objtool) $(cmd_objtool) \
|
$(call echo-cmd,objtool) $(cmd_objtool) \
|
||||||
|
$(cmd_modversions_c) \
|
||||||
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
|
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
define rule_as_o_S
|
define rule_as_o_S
|
||||||
$(call cmd_and_fixdep,as_o_S) \
|
$(call cmd_and_fixdep,as_o_S) \
|
||||||
$(cmd_modversions_S) \
|
$(call echo-cmd,objtool) $(cmd_objtool) \
|
||||||
$(call echo-cmd,objtool) $(cmd_objtool)
|
$(cmd_modversions_S)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
# List module undefined symbols (or empty line if not enabled)
|
# List module undefined symbols (or empty line if not enabled)
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <errno.h>
|
||||||
|
|
||||||
#include "elf.h"
|
#include "elf.h"
|
||||||
#include "warn.h"
|
#include "warn.h"
|
||||||
|
@ -358,7 +359,8 @@ struct elf *elf_open(const char *name, int flags)
|
||||||
|
|
||||||
elf->fd = open(name, flags);
|
elf->fd = open(name, flags);
|
||||||
if (elf->fd == -1) {
|
if (elf->fd == -1) {
|
||||||
perror("open");
|
fprintf(stderr, "objtool: Can't open '%s': %s\n",
|
||||||
|
name, strerror(errno));
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue