arm64: Refactor sysinstr exception handling
Right now we trap some of the user space data cache operations based on a few Errata (ARM 819472, 826319, 827319 and 824069). We need to trap userspace access to CTR_EL0, if we detect mismatched cache line size. Since both these traps share the EC, refactor the handler a little bit to make it a bit more reader friendly. Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
072f0a6338
commit
9dbd5bb25c
|
@ -78,6 +78,23 @@
|
||||||
|
|
||||||
#define ESR_ELx_IL (UL(1) << 25)
|
#define ESR_ELx_IL (UL(1) << 25)
|
||||||
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
|
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
|
||||||
|
|
||||||
|
/* ISS field definitions shared by different classes */
|
||||||
|
#define ESR_ELx_WNR (UL(1) << 6)
|
||||||
|
|
||||||
|
/* Shared ISS field definitions for Data/Instruction aborts */
|
||||||
|
#define ESR_ELx_EA (UL(1) << 9)
|
||||||
|
#define ESR_ELx_S1PTW (UL(1) << 7)
|
||||||
|
|
||||||
|
/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
|
||||||
|
#define ESR_ELx_FSC (0x3F)
|
||||||
|
#define ESR_ELx_FSC_TYPE (0x3C)
|
||||||
|
#define ESR_ELx_FSC_EXTABT (0x10)
|
||||||
|
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||||
|
#define ESR_ELx_FSC_FAULT (0x04)
|
||||||
|
#define ESR_ELx_FSC_PERM (0x0C)
|
||||||
|
|
||||||
|
/* ISS field definitions for Data Aborts */
|
||||||
#define ESR_ELx_ISV (UL(1) << 24)
|
#define ESR_ELx_ISV (UL(1) << 24)
|
||||||
#define ESR_ELx_SAS_SHIFT (22)
|
#define ESR_ELx_SAS_SHIFT (22)
|
||||||
#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
|
#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
|
||||||
|
@ -86,16 +103,9 @@
|
||||||
#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
|
#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
|
||||||
#define ESR_ELx_SF (UL(1) << 15)
|
#define ESR_ELx_SF (UL(1) << 15)
|
||||||
#define ESR_ELx_AR (UL(1) << 14)
|
#define ESR_ELx_AR (UL(1) << 14)
|
||||||
#define ESR_ELx_EA (UL(1) << 9)
|
|
||||||
#define ESR_ELx_CM (UL(1) << 8)
|
#define ESR_ELx_CM (UL(1) << 8)
|
||||||
#define ESR_ELx_S1PTW (UL(1) << 7)
|
|
||||||
#define ESR_ELx_WNR (UL(1) << 6)
|
/* ISS field definitions for exceptions taken in to Hyp */
|
||||||
#define ESR_ELx_FSC (0x3F)
|
|
||||||
#define ESR_ELx_FSC_TYPE (0x3C)
|
|
||||||
#define ESR_ELx_FSC_EXTABT (0x10)
|
|
||||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
|
||||||
#define ESR_ELx_FSC_FAULT (0x04)
|
|
||||||
#define ESR_ELx_FSC_PERM (0x0C)
|
|
||||||
#define ESR_ELx_CV (UL(1) << 24)
|
#define ESR_ELx_CV (UL(1) << 24)
|
||||||
#define ESR_ELx_COND_SHIFT (20)
|
#define ESR_ELx_COND_SHIFT (20)
|
||||||
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
|
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
|
||||||
|
@ -109,6 +119,54 @@
|
||||||
((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
|
((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
|
||||||
((imm) & 0xffff))
|
((imm) & 0xffff))
|
||||||
|
|
||||||
|
/* ISS field definitions for System instruction traps */
|
||||||
|
#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22
|
||||||
|
#define ESR_ELx_SYS64_ISS_RES0_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_DIR_MASK 0x1
|
||||||
|
#define ESR_ELx_SYS64_ISS_DIR_READ 0x1
|
||||||
|
#define ESR_ELx_SYS64_ISS_DIR_WRITE 0x0
|
||||||
|
|
||||||
|
#define ESR_ELx_SYS64_ISS_RT_SHIFT 5
|
||||||
|
#define ESR_ELx_SYS64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRN_SHIFT 10
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRN_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_OP1_SHIFT 14
|
||||||
|
#define ESR_ELx_SYS64_ISS_OP1_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_OP2_SHIFT 17
|
||||||
|
#define ESR_ELx_SYS64_ISS_OP2_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_OP0_SHIFT 20
|
||||||
|
#define ESR_ELx_SYS64_ISS_OP0_MASK (UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT)
|
||||||
|
#define ESR_ELx_SYS64_ISS_SYS_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_OP2_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_CRM_MASK)
|
||||||
|
#define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \
|
||||||
|
(((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \
|
||||||
|
((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \
|
||||||
|
((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \
|
||||||
|
((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \
|
||||||
|
((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT))
|
||||||
|
/*
|
||||||
|
* User space cache operations have the following sysreg encoding
|
||||||
|
* in System instructions.
|
||||||
|
* op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 14 }, WRITE (L=0)
|
||||||
|
*/
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
|
||||||
|
#define ESR_ELx_SYS64_ISS_CRM_IC_IVAU 5
|
||||||
|
|
||||||
|
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_OP2_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||||
|
ESR_ELx_SYS64_ISS_DIR_MASK)
|
||||||
|
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \
|
||||||
|
(ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \
|
||||||
|
ESR_ELx_SYS64_ISS_DIR_WRITE)
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
|
|
||||||
|
|
|
@ -447,39 +447,32 @@ void cpu_enable_cache_maint_trap(void *__unused)
|
||||||
: "=r" (res) \
|
: "=r" (res) \
|
||||||
: "r" (address), "i" (-EFAULT) )
|
: "r" (address), "i" (-EFAULT) )
|
||||||
|
|
||||||
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long address;
|
unsigned long address;
|
||||||
int ret;
|
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
|
||||||
|
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
|
||||||
/* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */
|
int ret = 0;
|
||||||
if ((esr & 0x01fffc01) == 0x0012dc00) {
|
|
||||||
int rt = (esr >> 5) & 0x1f;
|
|
||||||
int crm = (esr >> 1) & 0x0f;
|
|
||||||
|
|
||||||
address = (rt == 31) ? 0 : regs->regs[rt];
|
address = (rt == 31) ? 0 : regs->regs[rt];
|
||||||
|
|
||||||
switch (crm) {
|
switch (crm) {
|
||||||
case 11: /* DC CVAU, gets promoted */
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
|
||||||
__user_cache_maint("dc civac", address, ret);
|
__user_cache_maint("dc civac", address, ret);
|
||||||
break;
|
break;
|
||||||
case 10: /* DC CVAC, gets promoted */
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
|
||||||
__user_cache_maint("dc civac", address, ret);
|
__user_cache_maint("dc civac", address, ret);
|
||||||
break;
|
break;
|
||||||
case 14: /* DC CIVAC */
|
case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
|
||||||
__user_cache_maint("dc civac", address, ret);
|
__user_cache_maint("dc civac", address, ret);
|
||||||
break;
|
break;
|
||||||
case 5: /* IC IVAU */
|
case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
|
||||||
__user_cache_maint("ic ivau", address, ret);
|
__user_cache_maint("ic ivau", address, ret);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
arm64_notify_segfault(regs, address);
|
arm64_notify_segfault(regs, address);
|
||||||
|
@ -487,6 +480,34 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
||||||
regs->pc += 4;
|
regs->pc += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct sys64_hook {
|
||||||
|
unsigned int esr_mask;
|
||||||
|
unsigned int esr_val;
|
||||||
|
void (*handler)(unsigned int esr, struct pt_regs *regs);
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct sys64_hook sys64_hooks[] = {
|
||||||
|
{
|
||||||
|
.esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
|
||||||
|
.esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
|
||||||
|
.handler = user_cache_maint_handler,
|
||||||
|
},
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
|
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct sys64_hook *hook;
|
||||||
|
|
||||||
|
for (hook = sys64_hooks; hook->handler; hook++)
|
||||||
|
if ((hook->esr_mask & esr) == hook->esr_val) {
|
||||||
|
hook->handler(esr, regs);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
||||||
|
}
|
||||||
|
|
||||||
long compat_arm_syscall(struct pt_regs *regs);
|
long compat_arm_syscall(struct pt_regs *regs);
|
||||||
|
|
||||||
asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
||||||
|
|
Loading…
Reference in New Issue