Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "Another round of small ARM fixes. restore_user_regs early stack deallocation is buggy in the presence of FIQs which switch to SVC mode, and could lead to corrupted registers being returned to a user process given an inopportune FIQ event. Another bug was spotted in the ARM perf code where it could lose track of perf counter overflows, leading to incorrect perf results. Lastly, a bug in arm_add_memory() was spotted where the memory sizes aren't properly rounded. As most people pass properly rounded sizes, this hasn't been noticed" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8292/1: mm: fix size rounding-down of arm_add_memory() function ARM: 8255/1: perf: Prevent wraparound during overflow ARM: 8266/1: Remove early stack deallocation from restore_user_regs
This commit is contained in:
commit
0ad4989d62
|
@ -253,21 +253,22 @@
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro restore_user_regs, fast = 0, offset = 0
|
.macro restore_user_regs, fast = 0, offset = 0
|
||||||
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
mov r2, sp
|
||||||
ldr lr, [sp, #\offset + S_PC]! @ get pc
|
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
|
||||||
|
ldr lr, [r2, #\offset + S_PC]! @ get pc
|
||||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
||||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||||
strex r1, r2, [sp] @ clear the exclusive monitor
|
strex r1, r2, [r2] @ clear the exclusive monitor
|
||||||
#endif
|
#endif
|
||||||
.if \fast
|
.if \fast
|
||||||
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
|
ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
|
||||||
.else
|
.else
|
||||||
ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
|
ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
|
||||||
.endif
|
.endif
|
||||||
mov r0, r0 @ ARMv5T and earlier require a nop
|
mov r0, r0 @ ARMv5T and earlier require a nop
|
||||||
@ after ldm {}^
|
@ after ldm {}^
|
||||||
add sp, sp, #S_FRAME_SIZE - S_PC
|
add sp, sp, #\offset + S_FRAME_SIZE
|
||||||
movs pc, lr @ return & move spsr_svc into cpsr
|
movs pc, lr @ return & move spsr_svc into cpsr
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (left > (s64)armpmu->max_period)
|
/*
|
||||||
left = armpmu->max_period;
|
* Limit the maximum period to prevent the counter value
|
||||||
|
* from overtaking the one we are about to program. In
|
||||||
|
* effect we are reducing max_period to account for
|
||||||
|
* interrupt latency (and we are being very conservative).
|
||||||
|
*/
|
||||||
|
if (left > (armpmu->max_period >> 1))
|
||||||
|
left = armpmu->max_period >> 1;
|
||||||
|
|
||||||
local64_set(&hwc->prev_count, (u64)-left);
|
local64_set(&hwc->prev_count, (u64)-left);
|
||||||
|
|
||||||
|
|
|
@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that start/size are aligned to a page boundary.
|
* Ensure that start/size are aligned to a page boundary.
|
||||||
* Size is appropriately rounded down, start is rounded up.
|
* Size is rounded down, start is rounded up.
|
||||||
*/
|
*/
|
||||||
size -= start & ~PAGE_MASK;
|
|
||||||
aligned_start = PAGE_ALIGN(start);
|
aligned_start = PAGE_ALIGN(start);
|
||||||
|
if (aligned_start > start + size)
|
||||||
|
size = 0;
|
||||||
|
else
|
||||||
|
size -= aligned_start - start;
|
||||||
|
|
||||||
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
|
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
|
||||||
if (aligned_start > ULONG_MAX) {
|
if (aligned_start > ULONG_MAX) {
|
||||||
|
|
Loading…
Reference in New Issue