mirror of https://gitee.com/openkylin/linux.git
powerpc: Add little endian support to alignment handler
Handle most unaligned load and store faults in little endian mode. Strings, multiples and VSX are not supported. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
a5841a4602
commit
835e206a67
|
@ -262,6 +262,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
|
||||||
|
|
||||||
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
|
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
|
||||||
|
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
|
||||||
unsigned int reg, unsigned int nb,
|
unsigned int reg, unsigned int nb,
|
||||||
unsigned int flags, unsigned int instr,
|
unsigned int flags, unsigned int instr,
|
||||||
|
@ -390,6 +391,7 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
return 1; /* exception handled and fixed up */
|
return 1; /* exception handled and fixed up */
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
|
|
||||||
|
@ -628,7 +630,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SPE */
|
#endif /* CONFIG_SPE */
|
||||||
|
|
||||||
#ifdef CONFIG_VSX
|
#if defined(CONFIG_VSX) && defined(__BIG_ENDIAN__)
|
||||||
/*
|
/*
|
||||||
* Emulate VSX instructions...
|
* Emulate VSX instructions...
|
||||||
*/
|
*/
|
||||||
|
@ -702,18 +704,28 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
unsigned int dsisr;
|
unsigned int dsisr;
|
||||||
unsigned char __user *addr;
|
unsigned char __user *addr;
|
||||||
unsigned long p, swiz;
|
unsigned long p, swiz;
|
||||||
int ret;
|
int ret, i;
|
||||||
union {
|
union data {
|
||||||
u64 ll;
|
u64 ll;
|
||||||
double dd;
|
double dd;
|
||||||
unsigned char v[8];
|
unsigned char v[8];
|
||||||
struct {
|
struct {
|
||||||
|
#ifdef __LITTLE_ENDIAN__
|
||||||
|
int low32;
|
||||||
|
unsigned hi32;
|
||||||
|
#else
|
||||||
unsigned hi32;
|
unsigned hi32;
|
||||||
int low32;
|
int low32;
|
||||||
|
#endif
|
||||||
} x32;
|
} x32;
|
||||||
struct {
|
struct {
|
||||||
|
#ifdef __LITTLE_ENDIAN__
|
||||||
|
short low16;
|
||||||
|
unsigned char hi48[6];
|
||||||
|
#else
|
||||||
unsigned char hi48[6];
|
unsigned char hi48[6];
|
||||||
short low16;
|
short low16;
|
||||||
|
#endif
|
||||||
} x16;
|
} x16;
|
||||||
} data;
|
} data;
|
||||||
|
|
||||||
|
@ -772,8 +784,9 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
|
|
||||||
/* Byteswap little endian loads and stores */
|
/* Byteswap little endian loads and stores */
|
||||||
swiz = 0;
|
swiz = 0;
|
||||||
if (regs->msr & MSR_LE) {
|
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
|
||||||
flags ^= SW;
|
flags ^= SW;
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
/*
|
/*
|
||||||
* So-called "PowerPC little endian" mode works by
|
* So-called "PowerPC little endian" mode works by
|
||||||
* swizzling addresses rather than by actually doing
|
* swizzling addresses rather than by actually doing
|
||||||
|
@ -786,11 +799,13 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
if (cpu_has_feature(CPU_FTR_PPC_LE))
|
if (cpu_has_feature(CPU_FTR_PPC_LE))
|
||||||
swiz = 7;
|
swiz = 7;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* DAR has the operand effective address */
|
/* DAR has the operand effective address */
|
||||||
addr = (unsigned char __user *)regs->dar;
|
addr = (unsigned char __user *)regs->dar;
|
||||||
|
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
#ifdef CONFIG_VSX
|
#ifdef CONFIG_VSX
|
||||||
if ((instruction & 0xfc00003e) == 0x7c000018) {
|
if ((instruction & 0xfc00003e) == 0x7c000018) {
|
||||||
unsigned int elsize;
|
unsigned int elsize;
|
||||||
|
@ -810,7 +825,7 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
elsize = 8;
|
elsize = 8;
|
||||||
|
|
||||||
flags = 0;
|
flags = 0;
|
||||||
if (regs->msr & MSR_LE)
|
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
|
||||||
flags |= SW;
|
flags |= SW;
|
||||||
if (instruction & 0x100)
|
if (instruction & 0x100)
|
||||||
flags |= ST;
|
flags |= ST;
|
||||||
|
@ -824,6 +839,9 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
PPC_WARN_ALIGNMENT(vsx, regs);
|
PPC_WARN_ALIGNMENT(vsx, regs);
|
||||||
return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
|
return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
return -EFAULT;
|
||||||
#endif
|
#endif
|
||||||
/* A size of 0 indicates an instruction we don't support, with
|
/* A size of 0 indicates an instruction we don't support, with
|
||||||
* the exception of DCBZ which is handled as a special case here
|
* the exception of DCBZ which is handled as a special case here
|
||||||
|
@ -839,9 +857,13 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
* function
|
* function
|
||||||
*/
|
*/
|
||||||
if (flags & M) {
|
if (flags & M) {
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
PPC_WARN_ALIGNMENT(multiple, regs);
|
PPC_WARN_ALIGNMENT(multiple, regs);
|
||||||
return emulate_multiple(regs, addr, reg, nb,
|
return emulate_multiple(regs, addr, reg, nb,
|
||||||
flags, instr, swiz);
|
flags, instr, swiz);
|
||||||
|
#else
|
||||||
|
return -EFAULT;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Verify the address of the operand */
|
/* Verify the address of the operand */
|
||||||
|
@ -860,8 +882,12 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
|
|
||||||
/* Special case for 16-byte FP loads and stores */
|
/* Special case for 16-byte FP loads and stores */
|
||||||
if (nb == 16) {
|
if (nb == 16) {
|
||||||
|
#ifdef __BIG_ENDIAN__
|
||||||
PPC_WARN_ALIGNMENT(fp_pair, regs);
|
PPC_WARN_ALIGNMENT(fp_pair, regs);
|
||||||
return emulate_fp_pair(addr, reg, flags);
|
return emulate_fp_pair(addr, reg, flags);
|
||||||
|
#else
|
||||||
|
return -EFAULT;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
PPC_WARN_ALIGNMENT(unaligned, regs);
|
PPC_WARN_ALIGNMENT(unaligned, regs);
|
||||||
|
@ -870,24 +896,28 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
* get it from register values
|
* get it from register values
|
||||||
*/
|
*/
|
||||||
if (!(flags & ST)) {
|
if (!(flags & ST)) {
|
||||||
|
unsigned int start = 0;
|
||||||
|
|
||||||
|
switch (nb) {
|
||||||
|
case 4:
|
||||||
|
start = offsetof(union data, x32.low32);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
start = offsetof(union data, x16.low16);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
data.ll = 0;
|
data.ll = 0;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
p = (unsigned long) addr;
|
p = (unsigned long)addr;
|
||||||
switch (nb) {
|
|
||||||
case 8:
|
for (i = 0; i < nb; i++)
|
||||||
ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
|
ret |= __get_user_inatomic(data.v[start + i],
|
||||||
ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
|
SWIZ_PTR(p++));
|
||||||
ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
|
|
||||||
ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
|
if (unlikely(ret))
|
||||||
case 4:
|
return -EFAULT;
|
||||||
ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
|
|
||||||
ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
|
|
||||||
case 2:
|
|
||||||
ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
|
|
||||||
ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
|
|
||||||
if (unlikely(ret))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
} else if (flags & F) {
|
} else if (flags & F) {
|
||||||
data.dd = current->thread.TS_FPR(reg);
|
data.dd = current->thread.TS_FPR(reg);
|
||||||
if (flags & S) {
|
if (flags & S) {
|
||||||
|
@ -945,21 +975,24 @@ int fix_alignment(struct pt_regs *regs)
|
||||||
|
|
||||||
/* Store result to memory or update registers */
|
/* Store result to memory or update registers */
|
||||||
if (flags & ST) {
|
if (flags & ST) {
|
||||||
ret = 0;
|
unsigned int start = 0;
|
||||||
p = (unsigned long) addr;
|
|
||||||
switch (nb) {
|
switch (nb) {
|
||||||
case 8:
|
|
||||||
ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
|
|
||||||
ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
|
|
||||||
ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
|
|
||||||
ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
|
|
||||||
case 4:
|
case 4:
|
||||||
ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
|
start = offsetof(union data, x32.low32);
|
||||||
ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
|
start = offsetof(union data, x16.low16);
|
||||||
ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
p = (unsigned long)addr;
|
||||||
|
|
||||||
|
for (i = 0; i < nb; i++)
|
||||||
|
ret |= __put_user_inatomic(data.v[start + i],
|
||||||
|
SWIZ_PTR(p++));
|
||||||
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (flags & F)
|
} else if (flags & F)
|
||||||
|
|
Loading…
Reference in New Issue