Merge branch 'for-next/timers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into for-next/core
Conflicts: arch/arm64/Kconfig arch/arm64/include/asm/arch_timer.h
This commit is contained in:
commit
24cf262da1
|
@ -61,6 +61,7 @@ stable kernels.
|
|||
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
|
||||
| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
|
||||
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
|
||||
| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
|
||||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
#include <clocksource/arm_arch_timer.h>
|
||||
|
||||
#ifdef CONFIG_ARM_ARCH_TIMER
|
||||
/* 32bit ARM doesn't know anything about timer errata... */
|
||||
#define has_erratum_handler(h) (false)
|
||||
#define erratum_handler(h) (arch_timer_##h)
|
||||
|
||||
int arch_timer_arch_init(void);
|
||||
|
||||
/*
|
||||
|
@ -79,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void)
|
|||
return val;
|
||||
}
|
||||
|
||||
static inline u64 arch_counter_get_cntpct(void)
|
||||
static inline u64 __arch_counter_get_cntpct(void)
|
||||
{
|
||||
u64 cval;
|
||||
|
||||
|
@ -88,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void)
|
|||
return cval;
|
||||
}
|
||||
|
||||
static inline u64 arch_counter_get_cntvct(void)
|
||||
static inline u64 __arch_counter_get_cntpct_stable(void)
|
||||
{
|
||||
return __arch_counter_get_cntpct();
|
||||
}
|
||||
|
||||
static inline u64 __arch_counter_get_cntvct(void)
|
||||
{
|
||||
u64 cval;
|
||||
|
||||
|
@ -97,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void)
|
|||
return cval;
|
||||
}
|
||||
|
||||
static inline u64 __arch_counter_get_cntvct_stable(void)
|
||||
{
|
||||
return __arch_counter_get_cntvct();
|
||||
}
|
||||
|
||||
static inline u32 arch_timer_get_cntkctl(void)
|
||||
{
|
||||
u32 cntkctl;
|
||||
|
|
|
@ -68,6 +68,8 @@
|
|||
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
|
||||
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
|
||||
|
||||
#define CNTVCT __ACCESS_CP15_64(1, c14)
|
||||
|
||||
extern unsigned long cr_alignment; /* defined in entry-armv.S */
|
||||
|
||||
static inline unsigned long get_cr(void)
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
#include <linux/compiler.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/time.h>
|
||||
#include <asm/arch_timer.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/vdso_datapage.h>
|
||||
|
@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
|
|||
u64 cycle_now;
|
||||
u64 nsec;
|
||||
|
||||
cycle_now = arch_counter_get_cntvct();
|
||||
isb();
|
||||
cycle_now = read_sysreg(CNTVCT);
|
||||
|
||||
cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
|
||||
|
||||
|
|
|
@ -477,15 +477,17 @@ config ARM64_ERRATUM_1024718
|
|||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1188873
|
||||
bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
|
||||
bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
|
||||
default y
|
||||
depends on COMPAT
|
||||
select ARM_ARCH_TIMER_OOL_WORKAROUND
|
||||
help
|
||||
This option adds a workaround for ARM Cortex-A76 erratum 1188873.
|
||||
This option adds a workaround for ARM Cortex-A76/Neoverse-N1
|
||||
erratum 1188873.
|
||||
|
||||
Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
|
||||
register corruption when accessing the timer registers from
|
||||
AArch32 userspace.
|
||||
Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
|
||||
cause register corruption when accessing the timer registers
|
||||
from AArch32 userspace.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
|
|
|
@ -31,11 +31,23 @@
|
|||
#include <clocksource/arm_arch_timer.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
|
||||
extern struct static_key_false arch_timer_read_ool_enabled;
|
||||
#define needs_unstable_timer_counter_workaround() \
|
||||
static_branch_unlikely(&arch_timer_read_ool_enabled)
|
||||
#define has_erratum_handler(h) \
|
||||
({ \
|
||||
const struct arch_timer_erratum_workaround *__wa; \
|
||||
__wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
(__wa && __wa->h); \
|
||||
})
|
||||
|
||||
#define erratum_handler(h) \
|
||||
({ \
|
||||
const struct arch_timer_erratum_workaround *__wa; \
|
||||
__wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
(__wa && __wa->h) ? __wa->h : arch_timer_##h; \
|
||||
})
|
||||
|
||||
#else
|
||||
#define needs_unstable_timer_counter_workaround() false
|
||||
#define has_erratum_handler(h) false
|
||||
#define erratum_handler(h) (arch_timer_##h)
|
||||
#endif
|
||||
|
||||
enum arch_timer_erratum_match_type {
|
||||
|
@ -61,23 +73,37 @@ struct arch_timer_erratum_workaround {
|
|||
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
|
||||
timer_unstable_counter_workaround);
|
||||
|
||||
/* inline sysreg accessors that make erratum_handler() work */
|
||||
static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
|
||||
{
|
||||
return read_sysreg(cntp_tval_el0);
|
||||
}
|
||||
|
||||
static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
|
||||
{
|
||||
return read_sysreg(cntv_tval_el0);
|
||||
}
|
||||
|
||||
static inline notrace u64 arch_timer_read_cntpct_el0(void)
|
||||
{
|
||||
return read_sysreg(cntpct_el0);
|
||||
}
|
||||
|
||||
static inline notrace u64 arch_timer_read_cntvct_el0(void)
|
||||
{
|
||||
return read_sysreg(cntvct_el0);
|
||||
}
|
||||
|
||||
#define arch_timer_reg_read_stable(reg) \
|
||||
({ \
|
||||
u64 _val; \
|
||||
if (needs_unstable_timer_counter_workaround()) { \
|
||||
const struct arch_timer_erratum_workaround *wa; \
|
||||
({ \
|
||||
u64 _val; \
|
||||
\
|
||||
preempt_disable_notrace(); \
|
||||
wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
if (wa && wa->read_##reg) \
|
||||
_val = wa->read_##reg(); \
|
||||
else \
|
||||
_val = read_sysreg(reg); \
|
||||
_val = erratum_handler(read_ ## reg)(); \
|
||||
preempt_enable_notrace(); \
|
||||
} else { \
|
||||
_val = read_sysreg(reg); \
|
||||
} \
|
||||
_val; \
|
||||
})
|
||||
\
|
||||
_val; \
|
||||
})
|
||||
|
||||
/*
|
||||
* These register accessors are marked inline so the compiler can
|
||||
|
@ -167,7 +193,7 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
|
|||
: "=r" (tmp) : "r" (_val)); \
|
||||
} while (0)
|
||||
|
||||
static inline u64 arch_counter_get_cntpct(void)
|
||||
static inline u64 __arch_counter_get_cntpct_stable(void)
|
||||
{
|
||||
u64 cnt;
|
||||
|
||||
|
@ -177,7 +203,17 @@ static inline u64 arch_counter_get_cntpct(void)
|
|||
return cnt;
|
||||
}
|
||||
|
||||
static inline u64 arch_counter_get_cntvct(void)
|
||||
static inline u64 __arch_counter_get_cntpct(void)
|
||||
{
|
||||
u64 cnt;
|
||||
|
||||
isb();
|
||||
cnt = read_sysreg(cntpct_el0);
|
||||
arch_counter_enforce_ordering(cnt);
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static inline u64 __arch_counter_get_cntvct_stable(void)
|
||||
{
|
||||
u64 cnt;
|
||||
|
||||
|
@ -187,6 +223,16 @@ static inline u64 arch_counter_get_cntvct(void)
|
|||
return cnt;
|
||||
}
|
||||
|
||||
static inline u64 __arch_counter_get_cntvct(void)
|
||||
{
|
||||
u64 cnt;
|
||||
|
||||
isb();
|
||||
cnt = read_sysreg(cntvct_el0);
|
||||
arch_counter_enforce_ordering(cnt);
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#undef arch_counter_enforce_ordering
|
||||
|
||||
static inline int arch_timer_arch_init(void)
|
||||
|
|
|
@ -89,6 +89,7 @@
|
|||
#define ARM_CPU_PART_CORTEX_A35 0xD04
|
||||
#define ARM_CPU_PART_CORTEX_A55 0xD05
|
||||
#define ARM_CPU_PART_CORTEX_A76 0xD0B
|
||||
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
|
@ -118,6 +119,7 @@
|
|||
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
|
||||
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
|
||||
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
|
||||
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
|
|
|
@ -682,6 +682,16 @@ static const struct midr_range workaround_clean_cache[] = {
|
|||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1188873
|
||||
static const struct midr_range erratum_1188873_list[] = {
|
||||
/* Cortex-A76 r0p0 to r2p0 */
|
||||
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
|
||||
/* Neoverse-N1 r0p0 to r2p0 */
|
||||
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
|
@ -801,10 +811,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
},
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1188873
|
||||
{
|
||||
/* Cortex-A76 r0p0 to r2p0 */
|
||||
.desc = "ARM erratum 1188873",
|
||||
.capability = ARM64_WORKAROUND_1188873,
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
|
||||
ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1165522
|
||||
|
|
|
@ -336,6 +336,21 @@ alternative_if ARM64_WORKAROUND_845719
|
|||
alternative_else_nop_endif
|
||||
#endif
|
||||
3:
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1188873
|
||||
alternative_if_not ARM64_WORKAROUND_1188873
|
||||
b 4f
|
||||
alternative_else_nop_endif
|
||||
/*
|
||||
* if (x22.mode32 == cntkctl_el1.el0vcten)
|
||||
* cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
|
||||
*/
|
||||
mrs x1, cntkctl_el1
|
||||
eon x0, x1, x22, lsr #3
|
||||
tbz x0, #1, 4f
|
||||
eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
|
||||
msr cntkctl_el1, x1
|
||||
4:
|
||||
#endif
|
||||
apply_ssbd 0, x0, x1
|
||||
.endif
|
||||
|
||||
|
@ -362,11 +377,11 @@ alternative_else_nop_endif
|
|||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
bne 4f
|
||||
bne 5f
|
||||
msr far_el1, x30
|
||||
tramp_alias x30, tramp_exit_native
|
||||
br x30
|
||||
4:
|
||||
5:
|
||||
tramp_alias x30, tramp_exit_compat
|
||||
br x30
|
||||
#endif
|
||||
|
|
|
@ -496,7 +496,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
|
|||
{
|
||||
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
||||
|
||||
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
|
||||
pt_regs_write_reg(regs, rt, arch_timer_read_counter());
|
||||
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
||||
}
|
||||
|
||||
|
@ -668,7 +668,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
|
|||
{
|
||||
int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
|
||||
int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
|
||||
u64 val = arch_counter_get_cntvct();
|
||||
u64 val = arch_timer_read_counter();
|
||||
|
||||
pt_regs_write_reg(regs, rt, lower_32_bits(val));
|
||||
pt_regs_write_reg(regs, rt2, upper_32_bits(val));
|
||||
|
|
|
@ -152,6 +152,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
|
|||
return val;
|
||||
}
|
||||
|
||||
static u64 arch_counter_get_cntpct_stable(void)
|
||||
{
|
||||
return __arch_counter_get_cntpct_stable();
|
||||
}
|
||||
|
||||
static u64 arch_counter_get_cntpct(void)
|
||||
{
|
||||
return __arch_counter_get_cntpct();
|
||||
}
|
||||
|
||||
static u64 arch_counter_get_cntvct_stable(void)
|
||||
{
|
||||
return __arch_counter_get_cntvct_stable();
|
||||
}
|
||||
|
||||
static u64 arch_counter_get_cntvct(void)
|
||||
{
|
||||
return __arch_counter_get_cntvct();
|
||||
}
|
||||
|
||||
/*
|
||||
* Default to cp15 based access because arm64 uses this function for
|
||||
* sched_clock() before DT is probed and the cp15 method is guaranteed
|
||||
|
@ -319,13 +339,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1188873
|
||||
static u64 notrace arm64_1188873_read_cntvct_el0(void)
|
||||
{
|
||||
return read_sysreg(cntvct_el0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
|
||||
/*
|
||||
* The low bits of the counter registers are indeterminate while bit 10 or
|
||||
|
@ -372,8 +385,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
|
|||
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
|
||||
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
|
||||
EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
|
||||
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
|
||||
|
||||
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
|
||||
struct clock_event_device *clk)
|
||||
|
@ -457,14 +469,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
|
|||
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1188873
|
||||
{
|
||||
.match_type = ate_match_local_cap_id,
|
||||
.id = (void *)ARM64_WORKAROUND_1188873,
|
||||
.desc = "ARM erratum 1188873",
|
||||
.read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
|
||||
{
|
||||
.match_type = ate_match_dt,
|
||||
|
@ -552,11 +556,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
|
|||
per_cpu(timer_unstable_counter_workaround, i) = wa;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the locked version, as we're called from the CPU
|
||||
* hotplug framework. Otherwise, we end-up in deadlock-land.
|
||||
*/
|
||||
static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
|
||||
if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
|
||||
atomic_set(&timer_unstable_counter_workaround_in_use, 1);
|
||||
|
||||
/*
|
||||
* Don't use the vdso fastpath if errata require using the
|
||||
|
@ -573,7 +574,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
|
|||
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
|
||||
void *arg)
|
||||
{
|
||||
const struct arch_timer_erratum_workaround *wa;
|
||||
const struct arch_timer_erratum_workaround *wa, *__wa;
|
||||
ate_match_fn_t match_fn = NULL;
|
||||
bool local = false;
|
||||
|
||||
|
@ -597,53 +598,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
|
|||
if (!wa)
|
||||
return;
|
||||
|
||||
if (needs_unstable_timer_counter_workaround()) {
|
||||
const struct arch_timer_erratum_workaround *__wa;
|
||||
__wa = __this_cpu_read(timer_unstable_counter_workaround);
|
||||
if (__wa && wa != __wa)
|
||||
pr_warn("Can't enable workaround for %s (clashes with %s\n)",
|
||||
wa->desc, __wa->desc);
|
||||
__wa = __this_cpu_read(timer_unstable_counter_workaround);
|
||||
if (__wa && wa != __wa)
|
||||
pr_warn("Can't enable workaround for %s (clashes with %s\n)",
|
||||
wa->desc, __wa->desc);
|
||||
|
||||
if (__wa)
|
||||
return;
|
||||
}
|
||||
if (__wa)
|
||||
return;
|
||||
|
||||
arch_timer_enable_workaround(wa, local);
|
||||
pr_info("Enabling %s workaround for %s\n",
|
||||
local ? "local" : "global", wa->desc);
|
||||
}
|
||||
|
||||
#define erratum_handler(fn, r, ...) \
|
||||
({ \
|
||||
bool __val; \
|
||||
if (needs_unstable_timer_counter_workaround()) { \
|
||||
const struct arch_timer_erratum_workaround *__wa; \
|
||||
__wa = __this_cpu_read(timer_unstable_counter_workaround); \
|
||||
if (__wa && __wa->fn) { \
|
||||
r = __wa->fn(__VA_ARGS__); \
|
||||
__val = true; \
|
||||
} else { \
|
||||
__val = false; \
|
||||
} \
|
||||
} else { \
|
||||
__val = false; \
|
||||
} \
|
||||
__val; \
|
||||
})
|
||||
|
||||
static bool arch_timer_this_cpu_has_cntvct_wa(void)
|
||||
{
|
||||
const struct arch_timer_erratum_workaround *wa;
|
||||
return has_erratum_handler(read_cntvct_el0);
|
||||
}
|
||||
|
||||
wa = __this_cpu_read(timer_unstable_counter_workaround);
|
||||
return wa && wa->read_cntvct_el0;
|
||||
static bool arch_timer_counter_has_wa(void)
|
||||
{
|
||||
return atomic_read(&timer_unstable_counter_workaround_in_use);
|
||||
}
|
||||
#else
|
||||
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
|
||||
#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
|
||||
#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
|
||||
#define erratum_handler(fn, r, ...) ({false;})
|
||||
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
|
||||
#define arch_timer_counter_has_wa() ({false;})
|
||||
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
|
||||
|
||||
static __always_inline irqreturn_t timer_handler(const int access,
|
||||
|
@ -736,11 +716,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
|
|||
static int arch_timer_set_next_event_virt(unsigned long evt,
|
||||
struct clock_event_device *clk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (erratum_handler(set_next_event_virt, ret, evt, clk))
|
||||
return ret;
|
||||
|
||||
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
|
||||
return 0;
|
||||
}
|
||||
|
@ -748,11 +723,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt,
|
|||
static int arch_timer_set_next_event_phys(unsigned long evt,
|
||||
struct clock_event_device *clk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (erratum_handler(set_next_event_phys, ret, evt, clk))
|
||||
return ret;
|
||||
|
||||
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
|
||||
return 0;
|
||||
}
|
||||
|
@ -777,6 +747,10 @@ static void __arch_timer_setup(unsigned type,
|
|||
clk->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||
|
||||
if (type == ARCH_TIMER_TYPE_CP15) {
|
||||
typeof(clk->set_next_event) sne;
|
||||
|
||||
arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
|
||||
|
||||
if (arch_timer_c3stop)
|
||||
clk->features |= CLOCK_EVT_FEAT_C3STOP;
|
||||
clk->name = "arch_sys_timer";
|
||||
|
@ -787,20 +761,20 @@ static void __arch_timer_setup(unsigned type,
|
|||
case ARCH_TIMER_VIRT_PPI:
|
||||
clk->set_state_shutdown = arch_timer_shutdown_virt;
|
||||
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
|
||||
clk->set_next_event = arch_timer_set_next_event_virt;
|
||||
sne = erratum_handler(set_next_event_virt);
|
||||
break;
|
||||
case ARCH_TIMER_PHYS_SECURE_PPI:
|
||||
case ARCH_TIMER_PHYS_NONSECURE_PPI:
|
||||
case ARCH_TIMER_HYP_PPI:
|
||||
clk->set_state_shutdown = arch_timer_shutdown_phys;
|
||||
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
|
||||
clk->set_next_event = arch_timer_set_next_event_phys;
|
||||
sne = erratum_handler(set_next_event_phys);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
|
||||
clk->set_next_event = sne;
|
||||
} else {
|
||||
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
|
||||
clk->name = "arch_mem_timer";
|
||||
|
@ -1002,12 +976,22 @@ static void __init arch_counter_register(unsigned type)
|
|||
|
||||
/* Register the CP15 based counter if we have one */
|
||||
if (type & ARCH_TIMER_TYPE_CP15) {
|
||||
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
|
||||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
|
||||
arch_timer_read_counter = arch_counter_get_cntvct;
|
||||
else
|
||||
arch_timer_read_counter = arch_counter_get_cntpct;
|
||||
u64 (*rd)(void);
|
||||
|
||||
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
|
||||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
|
||||
if (arch_timer_counter_has_wa())
|
||||
rd = arch_counter_get_cntvct_stable;
|
||||
else
|
||||
rd = arch_counter_get_cntvct;
|
||||
} else {
|
||||
if (arch_timer_counter_has_wa())
|
||||
rd = arch_counter_get_cntpct_stable;
|
||||
else
|
||||
rd = arch_counter_get_cntpct;
|
||||
}
|
||||
|
||||
arch_timer_read_counter = rd;
|
||||
clocksource_counter.archdata.vdso_direct = vdso_default;
|
||||
} else {
|
||||
arch_timer_read_counter = arch_counter_get_cntvct_mem;
|
||||
|
|
|
@ -161,7 +161,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
|
|||
timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
|
||||
|
||||
timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
|
||||
arch_counter_get_cntvct();
|
||||
arch_timer_read_counter();
|
||||
|
||||
do_div(timeleft, gwdt->clk);
|
||||
|
||||
|
|
Loading…
Reference in New Issue