Merge branch 'for-next/asm-annotations' into for-next/core

* for-next/asm-annotations:
  : Modernise arm64 assembly annotations
  arm64: head: Convert install_el2_stub to SYM_INNER_LABEL
  arm64: Mark call_smc_arch_workaround_1 as __maybe_unused
  arm64: entry-ftrace.S: Fix missing argument for CONFIG_FUNCTION_GRAPH_TRACER=y
  arm64: vdso32: Convert to modern assembler annotations
  arm64: vdso: Convert to modern assembler annotations
  arm64: sdei: Annotate SDEI entry points using new style annotations
  arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations
  arm64: kvm: Modernize annotation for __bp_harden_hyp_vecs
  arm64: kvm: Annotate assembly using modern annoations
  arm64: kernel: Convert to modern annotations for assembly data
  arm64: head: Annotate stext and preserve_boot_args as code
  arm64: head.S: Convert to modern annotations for assembly functions
  arm64: ftrace: Modernise annotation of return_to_handler
  arm64: ftrace: Correct annotation of ftrace_caller assembly
  arm64: entry-ftrace.S: Convert to modern annotations for assembly functions
  arm64: entry: Additional annotation conversions for entry.S
  arm64: entry: Annotate ret_from_fork as code
  arm64: entry: Annotate vector table and handlers as code
  arm64: crypto: Modernize names for AES function macros
  arm64: crypto: Modernize some extra assembly annotations
This commit is contained in:
Catalin Marinas 2020-03-25 11:10:46 +00:00
commit 0829a07695
17 changed files with 212 additions and 207 deletions

View File

@ -9,8 +9,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
#define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func)
#define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func)
.arch armv8-a+crypto

View File

@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x)
* int blocks)
*/
AES_ENTRY(aes_ecb_encrypt)
AES_FUNC_START(aes_ecb_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbencout:
ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_encrypt)
AES_FUNC_END(aes_ecb_encrypt)
AES_ENTRY(aes_ecb_decrypt)
AES_FUNC_START(aes_ecb_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbdecout:
ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_decrypt)
AES_FUNC_END(aes_ecb_decrypt)
/*
@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt)
* u32 const rk2[]);
*/
AES_ENTRY(aes_essiv_cbc_encrypt)
AES_FUNC_START(aes_essiv_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */
mov w8, #14 /* AES-256: 14 rounds */
@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt)
enc_switch_key w3, x2, x6
b .Lcbcencloop4x
AES_ENTRY(aes_cbc_encrypt)
AES_FUNC_START(aes_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */
enc_prepare w3, x2, x6
@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt)
.Lcbcencout:
st1 {v4.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
AES_ENDPROC(aes_essiv_cbc_encrypt)
AES_FUNC_END(aes_cbc_encrypt)
AES_FUNC_END(aes_essiv_cbc_encrypt)
AES_ENTRY(aes_essiv_cbc_decrypt)
AES_FUNC_START(aes_essiv_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt)
encrypt_block cbciv, w8, x6, x7, w9
b .Lessivcbcdecstart
AES_ENTRY(aes_cbc_decrypt)
AES_FUNC_START(aes_cbc_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 )
st1 {cbciv.16b}, [x5] /* return iv */
ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_cbc_decrypt)
AES_ENDPROC(aes_essiv_cbc_decrypt)
AES_FUNC_END(aes_cbc_decrypt)
AES_FUNC_END(aes_essiv_cbc_decrypt)
/*
@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt)
* int rounds, int bytes, u8 const iv[])
*/
AES_ENTRY(aes_cbc_cts_encrypt)
AES_FUNC_START(aes_cbc_cts_encrypt)
adr_l x8, .Lcts_permute_table
sub x4, x4, #16
add x9, x8, #32
@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt)
st1 {v0.16b}, [x4] /* overlapping stores */
st1 {v1.16b}, [x0]
ret
AES_ENDPROC(aes_cbc_cts_encrypt)
AES_FUNC_END(aes_cbc_cts_encrypt)
AES_ENTRY(aes_cbc_cts_decrypt)
AES_FUNC_START(aes_cbc_cts_decrypt)
adr_l x8, .Lcts_permute_table
sub x4, x4, #16
add x9, x8, #32
@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
st1 {v0.16b}, [x0]
ret
AES_ENDPROC(aes_cbc_cts_decrypt)
AES_FUNC_END(aes_cbc_cts_decrypt)
.section ".rodata", "a"
.align 6
@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt)
* int blocks, u8 ctr[])
*/
AES_ENTRY(aes_ctr_encrypt)
AES_FUNC_START(aes_ctr_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
rev x7, x7
ins vctr.d[0], x7
b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
AES_FUNC_END(aes_ctr_encrypt)
/*
@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt)
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
.endm
AES_ENTRY(aes_xts_encrypt)
AES_FUNC_START(aes_xts_encrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr
b .Lxtsencctsout
AES_ENDPROC(aes_xts_encrypt)
AES_FUNC_END(aes_xts_encrypt)
AES_ENTRY(aes_xts_decrypt)
AES_FUNC_START(aes_xts_decrypt)
stp x29, x30, [sp, #-16]!
mov x29, sp
@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr
b .Lxtsdecctsout
AES_ENDPROC(aes_xts_decrypt)
AES_FUNC_END(aes_xts_decrypt)
/*
* aes_mac_update(u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 dg[], int enc_before, int enc_after)
*/
AES_ENTRY(aes_mac_update)
AES_FUNC_START(aes_mac_update)
frame_push 6
mov x19, x0
@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update)
ld1 {v0.16b}, [x23] /* get dg */
enc_prepare w21, x20, x0
b .Lmacloop4x
AES_ENDPROC(aes_mac_update)
AES_FUNC_END(aes_mac_update)

View File

@ -8,8 +8,8 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
#define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func)
#define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func)
xtsmask .req v7
cbciv .req v7

View File

@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 )
* struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag)
*/
ENTRY(pmull_gcm_encrypt)
SYM_FUNC_START(pmull_gcm_encrypt)
pmull_gcm_do_crypt 1
ENDPROC(pmull_gcm_encrypt)
SYM_FUNC_END(pmull_gcm_encrypt)
/*
* void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
* struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag)
*/
ENTRY(pmull_gcm_decrypt)
SYM_FUNC_START(pmull_gcm_decrypt)
pmull_gcm_do_crypt 0
ENDPROC(pmull_gcm_decrypt)
SYM_FUNC_END(pmull_gcm_decrypt)
pmull_gcm_ghash_4x:
SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x)
movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57
@ -681,9 +681,9 @@ pmull_gcm_ghash_4x:
eor XL.16b, XL.16b, T2.16b
ret
ENDPROC(pmull_gcm_ghash_4x)
SYM_FUNC_END(pmull_gcm_ghash_4x)
pmull_gcm_enc_4x:
SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x)
ld1 {KS0.16b}, [x5] // load upper counter
sub w10, w8, #4
sub w11, w8, #3
@ -746,7 +746,7 @@ pmull_gcm_enc_4x:
eor INP3.16b, INP3.16b, KS3.16b
ret
ENDPROC(pmull_gcm_enc_4x)
SYM_FUNC_END(pmull_gcm_enc_4x)
.section ".rodata", "a"
.align 6

View File

@ -36,6 +36,8 @@
*/
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#ifndef __ASSEMBLY__
#include <linux/mm.h>
@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \
({ \

View File

@ -480,7 +480,7 @@ static inline void *kvm_get_hyp_vector(void)
int slot = -1;
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
@ -509,14 +509,13 @@ static inline int kvm_map_vectors(void)
* HBP + HEL2 -> use hardened vertors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
unsigned long size = (__bp_harden_hyp_vecs_end -
__bp_harden_hyp_vecs_start);
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
/*
* Always allocate a spare vector slot, as we don't

View File

@ -13,6 +13,7 @@
#define TTBR_ASID_MASK (UL(0xffff) << 48)
#define BP_HARDEN_EL2_SLOTS 4
#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
#ifndef __ASSEMBLY__
@ -45,7 +46,8 @@ struct bp_hardening_data {
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
defined(CONFIG_HARDEN_EL2_VECTORS))
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot;
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */

View File

@ -11,6 +11,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
static bool __maybe_unused
@ -113,13 +114,10 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM_INDIRECT_VECTORS
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i;
for (i = 0; i < SZ_2K; i += 0x80)
@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
raw_spin_unlock(&bp_lock);
}
#else
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
@ -176,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
#include <linux/arm-smccc.h>
static void call_smc_arch_workaround_1(void)
static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
smccc_end = NULL;
break;
#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
case SMCCC_CONDUIT_SMC:
cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start;
smccc_end = __smccc_workaround_1_smc_end;
smccc_start = __smccc_workaround_1_smc;
smccc_end = __smccc_workaround_1_smc +
__SMCCC_WORKAROUND_1_SMC_SZ;
break;
#endif
default:
return -1;

View File

@ -75,27 +75,27 @@
add x29, sp, #S_STACKFRAME
.endm
ENTRY(ftrace_regs_caller)
SYM_CODE_START(ftrace_regs_caller)
ftrace_regs_entry 1
b ftrace_common
ENDPROC(ftrace_regs_caller)
SYM_CODE_END(ftrace_regs_caller)
ENTRY(ftrace_caller)
SYM_CODE_START(ftrace_caller)
ftrace_regs_entry 0
b ftrace_common
ENDPROC(ftrace_caller)
SYM_CODE_END(ftrace_caller)
ENTRY(ftrace_common)
SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op
mov x3, sp // regs
GLOBAL(ftrace_call)
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
@ -122,17 +122,17 @@ ftrace_common_return:
add sp, sp, #S_FRAME_SIZE + 16
ret x9
ENDPROC(ftrace_common)
SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
ENDPROC(ftrace_graph_caller)
SYM_CODE_END(ftrace_graph_caller)
#endif
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
ENTRY(_mcount)
SYM_FUNC_START(_mcount)
mcount_enter
ldr_l x2, ftrace_trace_function
@ -242,7 +242,7 @@ skip_ftrace_call: // }
b.ne ftrace_graph_caller // ftrace_graph_caller();
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
ENDPROC(_mcount)
SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@ -253,9 +253,9 @@ NOKPROBE(_mcount)
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
* NOP when disabled per-function base.
*/
ENTRY(_mcount)
SYM_FUNC_START(_mcount)
ret
ENDPROC(_mcount)
SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@ -268,24 +268,24 @@ NOKPROBE(_mcount)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
ENTRY(ftrace_caller)
SYM_FUNC_START(ftrace_caller)
mcount_enter
mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr
GLOBAL(ftrace_call) // tracer(pc, lr);
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
mcount_exit
ENDPROC(ftrace_caller)
SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -298,20 +298,20 @@ ENDPROC(ftrace_caller)
* the call stack in order to intercept instrumented function's return path
* and run return_to_handler() later on its exit.
*/
ENTRY(ftrace_graph_caller)
SYM_FUNC_START(ftrace_graph_caller)
mcount_get_pc x0 // function's pc
mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit
ENDPROC(ftrace_graph_caller)
SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
ENTRY(ftrace_stub)
SYM_FUNC_START(ftrace_stub)
ret
ENDPROC(ftrace_stub)
SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@ -320,7 +320,7 @@ ENDPROC(ftrace_stub)
* Run ftrace_return_to_handler() before going back to parent.
* @fp is checked against the value passed by ftrace_graph_caller().
*/
ENTRY(return_to_handler)
SYM_CODE_START(return_to_handler)
/* save return value regs */
sub sp, sp, #64
stp x0, x1, [sp]
@ -340,5 +340,5 @@ ENTRY(return_to_handler)
add sp, sp, #64
ret
END(return_to_handler)
SYM_CODE_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

View File

@ -465,7 +465,7 @@ alternative_endif
.pushsection ".entry.text", "ax"
.align 11
ENTRY(vectors)
SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t
@ -492,7 +492,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
END(vectors)
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
/*
@ -534,57 +534,57 @@ __bad_stack:
ASM_BUG()
.endm
el0_sync_invalid:
SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC
ENDPROC(el0_sync_invalid)
SYM_CODE_END(el0_sync_invalid)
el0_irq_invalid:
SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ
ENDPROC(el0_irq_invalid)
SYM_CODE_END(el0_irq_invalid)
el0_fiq_invalid:
SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ
ENDPROC(el0_fiq_invalid)
SYM_CODE_END(el0_fiq_invalid)
el0_error_invalid:
SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR
ENDPROC(el0_error_invalid)
SYM_CODE_END(el0_error_invalid)
#ifdef CONFIG_COMPAT
el0_fiq_invalid_compat:
SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
SYM_CODE_END(el0_fiq_invalid_compat)
#endif
el1_sync_invalid:
SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC
ENDPROC(el1_sync_invalid)
SYM_CODE_END(el1_sync_invalid)
el1_irq_invalid:
SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ
ENDPROC(el1_irq_invalid)
SYM_CODE_END(el1_irq_invalid)
el1_fiq_invalid:
SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ
ENDPROC(el1_fiq_invalid)
SYM_CODE_END(el1_fiq_invalid)
el1_error_invalid:
SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
ENDPROC(el1_error_invalid)
SYM_CODE_END(el1_error_invalid)
/*
* EL1 mode handlers.
*/
.align 6
el1_sync:
SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1
mov x0, sp
bl el1_sync_handler
kernel_exit 1
ENDPROC(el1_sync)
SYM_CODE_END(el1_sync)
.align 6
el1_irq:
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
@ -639,42 +639,42 @@ alternative_else_nop_endif
#endif
kernel_exit 1
ENDPROC(el1_irq)
SYM_CODE_END(el1_irq)
/*
* EL0 mode handlers.
*/
.align 6
el0_sync:
SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0
mov x0, sp
bl el0_sync_handler
b ret_to_user
ENDPROC(el0_sync)
SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT
.align 6
el0_sync_compat:
SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32
mov x0, sp
bl el0_sync_compat_handler
b ret_to_user
ENDPROC(el0_sync_compat)
SYM_CODE_END(el0_sync_compat)
.align 6
el0_irq_compat:
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
b el0_irq_naked
ENDPROC(el0_irq_compat)
SYM_CODE_END(el0_irq_compat)
el0_error_compat:
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
b el0_error_naked
ENDPROC(el0_error_compat)
SYM_CODE_END(el0_error_compat)
#endif
.align 6
el0_irq:
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
@ -696,9 +696,9 @@ el0_irq_naked:
bl trace_hardirqs_on
#endif
b ret_to_user
ENDPROC(el0_irq)
SYM_CODE_END(el0_irq)
el1_error:
SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2
@ -706,9 +706,9 @@ el1_error:
mov x0, sp
bl do_serror
kernel_exit 1
ENDPROC(el1_error)
SYM_CODE_END(el1_error)
el0_error:
SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
el0_error_naked:
mrs x25, esr_el1
@ -720,7 +720,7 @@ el0_error_naked:
bl do_serror
enable_da_f
b ret_to_user
ENDPROC(el0_error)
SYM_CODE_END(el0_error)
/*
* Ok, we need to do extra processing, enter the slow path.
@ -832,7 +832,7 @@ alternative_else_nop_endif
.endm
.align 11
ENTRY(tramp_vectors)
SYM_CODE_START_NOALIGN(tramp_vectors)
.space 0x400
tramp_ventry
@ -844,24 +844,24 @@ ENTRY(tramp_vectors)
tramp_ventry 32
tramp_ventry 32
tramp_ventry 32
END(tramp_vectors)
SYM_CODE_END(tramp_vectors)
ENTRY(tramp_exit_native)
SYM_CODE_START(tramp_exit_native)
tramp_exit
END(tramp_exit_native)
SYM_CODE_END(tramp_exit_native)
ENTRY(tramp_exit_compat)
SYM_CODE_START(tramp_exit_compat)
tramp_exit 32
END(tramp_exit_compat)
SYM_CODE_END(tramp_exit_compat)
.ltorg
.popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
.align PAGE_SHIFT
.globl __entry_tramp_data_start
__entry_tramp_data_start:
SYM_DATA_START(__entry_tramp_data_start)
.quad vectors
SYM_DATA_END(__entry_tramp_data_start)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@ -874,7 +874,7 @@ __entry_tramp_data_start:
* Previous and next are guaranteed not to be the same.
*
*/
ENTRY(cpu_switch_to)
SYM_FUNC_START(cpu_switch_to)
mov x10, #THREAD_CPU_CONTEXT
add x8, x0, x10
mov x9, sp
@ -896,20 +896,20 @@ ENTRY(cpu_switch_to)
mov sp, x9
msr sp_el0, x1
ret
ENDPROC(cpu_switch_to)
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
/*
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
SYM_CODE_START(ret_from_fork)
bl schedule_tail
cbz x19, 1f // not a kernel thread
mov x0, x20
blr x19
1: get_current_task tsk
b ret_to_user
ENDPROC(ret_from_fork)
SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
#ifdef CONFIG_ARM_SDE_INTERFACE
@ -938,7 +938,7 @@ NOKPROBE(ret_from_fork)
*/
.ltorg
.pushsection ".entry.tramp.text", "ax"
ENTRY(__sdei_asm_entry_trampoline)
SYM_CODE_START(__sdei_asm_entry_trampoline)
mrs x4, ttbr1_el1
tbz x4, #USER_ASID_BIT, 1f
@ -960,7 +960,7 @@ ENTRY(__sdei_asm_entry_trampoline)
ldr x4, =__sdei_asm_handler
#endif
br x4
ENDPROC(__sdei_asm_entry_trampoline)
SYM_CODE_END(__sdei_asm_entry_trampoline)
NOKPROBE(__sdei_asm_entry_trampoline)
/*
@ -970,21 +970,22 @@ NOKPROBE(__sdei_asm_entry_trampoline)
* x2: exit_mode
* x4: struct sdei_registered_event argument from registration time.
*/
ENTRY(__sdei_asm_exit_trampoline)
SYM_CODE_START(__sdei_asm_exit_trampoline)
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
cbnz x4, 1f
tramp_unmap_kernel tmp=x4
1: sdei_handler_exit exit_mode=x2
ENDPROC(__sdei_asm_exit_trampoline)
SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline)
.ltorg
.popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
__sdei_asm_trampoline_next_handler:
SYM_DATA_START(__sdei_asm_trampoline_next_handler)
.quad __sdei_asm_handler
SYM_DATA_END(__sdei_asm_trampoline_next_handler)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@ -1002,7 +1003,7 @@ __sdei_asm_trampoline_next_handler:
* follow SMC-CC. We save (or retrieve) all the registers as the handler may
* want them.
*/
ENTRY(__sdei_asm_handler)
SYM_CODE_START(__sdei_asm_handler)
stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
@ -1085,6 +1086,6 @@ alternative_else_nop_endif
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
br x5
#endif
ENDPROC(__sdei_asm_handler)
SYM_CODE_END(__sdei_asm_handler)
NOKPROBE(__sdei_asm_handler)
#endif /* CONFIG_ARM_SDE_INTERFACE */

View File

@ -105,7 +105,7 @@ pe_header:
* x24 __primary_switch() .. relocate_kernel()
* current RELR displacement
*/
ENTRY(stext)
SYM_CODE_START(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
adrp x23, __PHYS_OFFSET
@ -120,12 +120,12 @@ ENTRY(stext)
*/
bl __cpu_setup // initialise processor
b __primary_switch
ENDPROC(stext)
SYM_CODE_END(stext)
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
*/
preserve_boot_args:
SYM_CODE_START_LOCAL(preserve_boot_args)
mov x21, x0 // x21=FDT
adr_l x0, boot_args // record the contents of
@ -137,7 +137,7 @@ preserve_boot_args:
mov x1, #0x20 // 4 x 8 bytes
b __inval_dcache_area // tail call
ENDPROC(preserve_boot_args)
SYM_CODE_END(preserve_boot_args)
/*
* Macro to create a table entry to the next page.
@ -275,7 +275,7 @@ ENDPROC(preserve_boot_args)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled
*/
__create_page_tables:
SYM_FUNC_START_LOCAL(__create_page_tables)
mov x28, lr
/*
@ -403,14 +403,14 @@ __create_page_tables:
bl __inval_dcache_area
ret x28
ENDPROC(__create_page_tables)
SYM_FUNC_END(__create_page_tables)
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
__primary_switched:
SYM_FUNC_START_LOCAL(__primary_switched)
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
adr_l x5, init_task
@ -455,7 +455,7 @@ __primary_switched:
mov x29, #0
mov x30, #0
b start_kernel
ENDPROC(__primary_switched)
SYM_FUNC_END(__primary_switched)
/*
* end early head section, begin head code that is also used for
@ -463,8 +463,9 @@ ENDPROC(__primary_switched)
*/
.section ".idmap.text","awx"
ENTRY(kimage_vaddr)
SYM_DATA_START(kimage_vaddr)
.quad _text - TEXT_OFFSET
SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr)
/*
@ -474,7 +475,7 @@ EXPORT_SYMBOL(kimage_vaddr)
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
SYM_FUNC_START(el2_setup)
msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@ -598,7 +599,7 @@ set_hcr:
isb
ret
install_el2_stub:
SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
/*
* When VHE is not in use, early init of EL2 and EL1 needs to be
* done here.
@ -635,13 +636,13 @@ install_el2_stub:
msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
SYM_FUNC_END(el2_setup)
/*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
* in w0. See arch/arm64/include/asm/virt.h for more info.
*/
set_cpu_boot_mode_flag:
SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
adr_l x1, __boot_cpu_mode
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
@ -650,7 +651,7 @@ set_cpu_boot_mode_flag:
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret
ENDPROC(set_cpu_boot_mode_flag)
SYM_FUNC_END(set_cpu_boot_mode_flag)
/*
* These values are written with the MMU off, but read with the MMU on.
@ -666,15 +667,17 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
ENTRY(__boot_cpu_mode)
SYM_DATA_START(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
.long BOOT_CPU_MODE_EL1
SYM_DATA_END(__boot_cpu_mode)
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
*/
ENTRY(__early_cpu_boot_status)
SYM_DATA_START(__early_cpu_boot_status)
.quad 0
SYM_DATA_END(__early_cpu_boot_status)
.popsection
@ -682,7 +685,7 @@ ENTRY(__early_cpu_boot_status)
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
SYM_FUNC_START(secondary_holding_pen)
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
@ -694,19 +697,19 @@ pen: ldr x4, [x3]
b.eq secondary_startup
wfe
b pen
ENDPROC(secondary_holding_pen)
SYM_FUNC_END(secondary_holding_pen)
/*
* Secondary entry point that jumps straight into the kernel. Only to
* be used where CPUs are brought online dynamically by the kernel.
*/
ENTRY(secondary_entry)
SYM_FUNC_START(secondary_entry)
bl el2_setup // Drop to EL1
bl set_cpu_boot_mode_flag
b secondary_startup
ENDPROC(secondary_entry)
SYM_FUNC_END(secondary_entry)
secondary_startup:
SYM_FUNC_START_LOCAL(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
@ -716,9 +719,9 @@ secondary_startup:
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
ENDPROC(secondary_startup)
SYM_FUNC_END(secondary_startup)
__secondary_switched:
SYM_FUNC_START_LOCAL(__secondary_switched)
adr_l x5, vectors
msr vbar_el1, x5
isb
@ -733,13 +736,13 @@ __secondary_switched:
mov x29, #0
mov x30, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
SYM_FUNC_END(__secondary_switched)
__secondary_too_slow:
SYM_FUNC_START_LOCAL(__secondary_too_slow)
wfe
wfi
b __secondary_too_slow
ENDPROC(__secondary_too_slow)
SYM_FUNC_END(__secondary_too_slow)
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
@ -771,7 +774,7 @@ ENDPROC(__secondary_too_slow)
* Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU
*/
ENTRY(__enable_mmu)
SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
@ -795,9 +798,9 @@ ENTRY(__enable_mmu)
dsb nsh
isb
ret
ENDPROC(__enable_mmu)
SYM_FUNC_END(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva)
SYM_FUNC_START(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x0, vabits_actual
cmp x0, #52
@ -815,9 +818,9 @@ ENTRY(__cpu_secondary_check52bitva)
#endif
2: ret
ENDPROC(__cpu_secondary_check52bitva)
SYM_FUNC_END(__cpu_secondary_check52bitva)
__no_granule_support:
SYM_FUNC_START_LOCAL(__no_granule_support)
/* Indicate that this CPU can't boot and is stuck in the kernel */
update_early_cpu_boot_status \
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
@ -825,10 +828,10 @@ __no_granule_support:
wfe
wfi
b 1b
ENDPROC(__no_granule_support)
SYM_FUNC_END(__no_granule_support)
#ifdef CONFIG_RELOCATABLE
__relocate_kernel:
SYM_FUNC_START_LOCAL(__relocate_kernel)
/*
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
@ -930,10 +933,10 @@ __relocate_kernel:
#endif
ret
ENDPROC(__relocate_kernel)
SYM_FUNC_END(__relocate_kernel)
#endif
__primary_switch:
SYM_FUNC_START_LOCAL(__primary_switch)
#ifdef CONFIG_RANDOMIZE_BASE
mov x19, x0 // preserve new SCTLR_EL1 value
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
@ -976,4 +979,4 @@ __primary_switch:
ldr x8, =__primary_switched
adrp x0, __PHYS_OFFSET
br x8
ENDPROC(__primary_switch)
SYM_FUNC_END(__primary_switch)

View File

@ -14,7 +14,7 @@
.text
nop
ENTRY(__kernel_rt_sigreturn)
SYM_FUNC_START(__kernel_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
.cfi_def_cfa x29, 0
@ -23,4 +23,4 @@ ENTRY(__kernel_rt_sigreturn)
mov x8, #__NR_rt_sigreturn
svc #0
.cfi_endproc
ENDPROC(__kernel_rt_sigreturn)
SYM_FUNC_END(__kernel_rt_sigreturn)

View File

@ -10,13 +10,6 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#define ARM_ENTRY(name) \
ENTRY(name)
#define ARM_ENDPROC(name) \
.type name, %function; \
END(name)
.text
.arm
@ -24,39 +17,39 @@
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
ARM_ENTRY(__kernel_sigreturn_arm)
SYM_FUNC_START(__kernel_sigreturn_arm)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
ARM_ENDPROC(__kernel_sigreturn_arm)
SYM_FUNC_END(__kernel_sigreturn_arm)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
ARM_ENTRY(__kernel_rt_sigreturn_arm)
SYM_FUNC_START(__kernel_rt_sigreturn_arm)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
ARM_ENDPROC(__kernel_rt_sigreturn_arm)
SYM_FUNC_END(__kernel_rt_sigreturn_arm)
.thumb
.fnstart
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
ARM_ENTRY(__kernel_sigreturn_thumb)
SYM_FUNC_START(__kernel_sigreturn_thumb)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
ARM_ENDPROC(__kernel_sigreturn_thumb)
SYM_FUNC_END(__kernel_sigreturn_thumb)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
ARM_ENTRY(__kernel_rt_sigreturn_thumb)
SYM_FUNC_START(__kernel_rt_sigreturn_thumb)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
ARM_ENDPROC(__kernel_rt_sigreturn_thumb)
SYM_FUNC_END(__kernel_rt_sigreturn_thumb)

View File

@ -18,7 +18,7 @@
.align 11
ENTRY(__kvm_hyp_init)
SYM_CODE_START(__kvm_hyp_init)
ventry __invalid // Synchronous EL2t
ventry __invalid // IRQ EL2t
ventry __invalid // FIQ EL2t
@ -117,9 +117,9 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
/* Hello, World! */
eret
ENDPROC(__kvm_hyp_init)
SYM_CODE_END(__kvm_hyp_init)
ENTRY(__kvm_handle_stub_hvc)
SYM_CODE_START(__kvm_handle_stub_hvc)
cmp x0, #HVC_SOFT_RESTART
b.ne 1f
@ -158,7 +158,7 @@ reset:
ldr x0, =HVC_STUB_ERR
eret
ENDPROC(__kvm_handle_stub_hvc)
SYM_CODE_END(__kvm_handle_stub_hvc)
.ltorg

View File

@ -28,7 +28,7 @@
* and is used to implement hyp stubs in the same way as in
* arch/arm64/kernel/hyp_stub.S.
*/
ENTRY(__kvm_call_hyp)
SYM_FUNC_START(__kvm_call_hyp)
hvc #0
ret
ENDPROC(__kvm_call_hyp)
SYM_FUNC_END(__kvm_call_hyp)

View File

@ -11,12 +11,12 @@
.text
.pushsection .hyp.text, "ax"
ENTRY(__fpsimd_save_state)
SYM_FUNC_START(__fpsimd_save_state)
fpsimd_save x0, 1
ret
ENDPROC(__fpsimd_save_state)
SYM_FUNC_END(__fpsimd_save_state)
ENTRY(__fpsimd_restore_state)
SYM_FUNC_START(__fpsimd_restore_state)
fpsimd_restore x0, 1
ret
ENDPROC(__fpsimd_restore_state)
SYM_FUNC_END(__fpsimd_restore_state)

View File

@ -180,7 +180,7 @@ el2_error:
eret
sb
ENTRY(__hyp_do_panic)
SYM_FUNC_START(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
@ -188,18 +188,19 @@ ENTRY(__hyp_do_panic)
msr elr_el2, lr
eret
sb
ENDPROC(__hyp_do_panic)
SYM_FUNC_END(__hyp_do_panic)
ENTRY(__hyp_panic)
SYM_CODE_START(__hyp_panic)
get_host_ctxt x0, x1
b hyp_panic
ENDPROC(__hyp_panic)
SYM_CODE_END(__hyp_panic)
.macro invalid_vector label, target = __hyp_panic
.align 2
SYM_CODE_START(\label)
\label:
b \target
ENDPROC(\label)
SYM_CODE_END(\label)
.endm
/* None of these should ever happen */
@ -246,7 +247,7 @@ check_preamble_length 661b, 662b
check_preamble_length 661b, 662b
.endm
ENTRY(__kvm_hyp_vector)
SYM_CODE_START(__kvm_hyp_vector)
invalid_vect el2t_sync_invalid // Synchronous EL2t
invalid_vect el2t_irq_invalid // IRQ EL2t
invalid_vect el2t_fiq_invalid // FIQ EL2t
@ -266,7 +267,7 @@ ENTRY(__kvm_hyp_vector)
valid_vect el1_irq // IRQ 32-bit EL1
invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
SYM_CODE_END(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
@ -311,15 +312,17 @@ alternative_cb_end
.endm
.align 11
ENTRY(__bp_harden_hyp_vecs_start)
SYM_CODE_START(__bp_harden_hyp_vecs)
.rept BP_HARDEN_EL2_SLOTS
generate_vectors
.endr
ENTRY(__bp_harden_hyp_vecs_end)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
.popsection
ENTRY(__smccc_workaround_1_smc_start)
SYM_CODE_START(__smccc_workaround_1_smc)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
@ -329,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start)
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
ENTRY(__smccc_workaround_1_smc_end)
1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_CODE_END(__smccc_workaround_1_smc)
#endif