clocksource: Use a plain u64 instead of cycle_t
There is no point in having an extra type for extra confusion. u64 is unambiguous. Conversion was done with the following coccinelle script: @rem@ @@ -typedef u64 cycle_t; @fix@ typedef cycle_t; @@ -cycle_t +u64 Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
7c0f6ba682
commit
a5a1d1c291
|
@ -133,7 +133,7 @@ init_rtc_clockevent(void)
|
||||||
* The QEMU clock as a clocksource primitive.
|
* The QEMU clock as a clocksource primitive.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static cycle_t
|
static u64
|
||||||
qemu_cs_read(struct clocksource *cs)
|
qemu_cs_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return qemu_get_vmtime();
|
return qemu_get_vmtime();
|
||||||
|
@ -260,7 +260,7 @@ common_init_rtc(void)
|
||||||
* use this method when WTINT is in use.
|
* use this method when WTINT is in use.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static cycle_t read_rpcc(struct clocksource *cs)
|
static u64 read_rpcc(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return rpcc();
|
return rpcc();
|
||||||
}
|
}
|
||||||
|
|
|
@ -268,7 +268,7 @@ static void __init timer_init(void)
|
||||||
/*
|
/*
|
||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
static cycle_t read_cycles(struct clocksource *cs)
|
static u64 read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct timer_s *t = &timers[TID_CLOCKSOURCE];
|
struct timer_s *t = &timers[TID_CLOCKSOURCE];
|
||||||
|
|
||||||
|
|
|
@ -59,13 +59,13 @@ static u64 notrace ep93xx_read_sched_clock(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t ep93xx_clocksource_read(struct clocksource *c)
|
u64 ep93xx_clocksource_read(struct clocksource *c)
|
||||||
{
|
{
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
ret = readl(EP93XX_TIMER4_VALUE_LOW);
|
ret = readl(EP93XX_TIMER4_VALUE_LOW);
|
||||||
ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
|
ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
|
||||||
return (cycle_t) ret;
|
return (u64) ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ep93xx_clkevt_set_next_event(unsigned long next,
|
static int ep93xx_clkevt_set_next_event(unsigned long next,
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
static cycle_t cksrc_dc21285_read(struct clocksource *cs)
|
static u64 cksrc_dc21285_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return cs->mask - *CSR_TIMER2_VALUE;
|
return cs->mask - *CSR_TIMER2_VALUE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -493,7 +493,7 @@ static u64 notrace ixp4xx_read_sched_clock(void)
|
||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
|
static u64 ixp4xx_clocksource_read(struct clocksource *c)
|
||||||
{
|
{
|
||||||
return *IXP4XX_OSTS;
|
return *IXP4XX_OSTS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,7 +144,7 @@ static struct clock_event_device ckevt = {
|
||||||
.set_state_oneshot = timer_set_shutdown,
|
.set_state_oneshot = timer_set_shutdown,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t clksrc_read(struct clocksource *cs)
|
static u64 clksrc_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return timer_read();
|
return timer_read();
|
||||||
}
|
}
|
||||||
|
|
|
@ -369,9 +369,9 @@ static bool use_gptimer_clksrc __initdata;
|
||||||
/*
|
/*
|
||||||
* clocksource
|
* clocksource
|
||||||
*/
|
*/
|
||||||
static cycle_t clocksource_read_cycles(struct clocksource *cs)
|
static u64 clocksource_read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)__omap_dm_timer_read_counter(&clksrc,
|
return (u64)__omap_dm_timer_read_counter(&clksrc,
|
||||||
OMAP_TIMER_NONPOSTED);
|
OMAP_TIMER_NONPOSTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
/*
|
/*
|
||||||
* IOP clocksource (free-running timer 1).
|
* IOP clocksource (free-running timer 1).
|
||||||
*/
|
*/
|
||||||
static cycle_t notrace iop_clocksource_read(struct clocksource *unused)
|
static u64 notrace iop_clocksource_read(struct clocksource *unused)
|
||||||
{
|
{
|
||||||
return 0xffffffffu - read_tcr1();
|
return 0xffffffffu - read_tcr1();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,9 +20,9 @@
|
||||||
|
|
||||||
static bool disable_cpu_idle_poll;
|
static bool disable_cpu_idle_poll;
|
||||||
|
|
||||||
static cycle_t read_cycle_count(struct clocksource *cs)
|
static u64 read_cycle_count(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)sysreg_read(COUNT);
|
return (u64)sysreg_read(COUNT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
|
|
||||||
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
|
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
|
||||||
|
|
||||||
static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
|
static notrace u64 bfin_read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_CPU_FREQ
|
#ifdef CONFIG_CPU_FREQ
|
||||||
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
|
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
|
||||||
|
@ -80,7 +80,7 @@ void __init setup_gptimer0(void)
|
||||||
enable_gptimers(TIMER0bit);
|
enable_gptimers(TIMER0bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t bfin_read_gptimer0(struct clocksource *cs)
|
static u64 bfin_read_gptimer0(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return bfin_read_TIMER0_COUNTER();
|
return bfin_read_TIMER0_COUNTER();
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
static u32 sched_clock_multiplier;
|
static u32 sched_clock_multiplier;
|
||||||
#define SCHED_CLOCK_SHIFT 16
|
#define SCHED_CLOCK_SHIFT 16
|
||||||
|
|
||||||
static cycle_t tsc_read(struct clocksource *cs)
|
static u64 tsc_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return get_cycles();
|
return get_cycles();
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,9 +72,9 @@ struct adsp_hw_timer_struct {
|
||||||
/* Look for "TCX0" for related constants. */
|
/* Look for "TCX0" for related constants. */
|
||||||
static __iomem struct adsp_hw_timer_struct *rtos_timer;
|
static __iomem struct adsp_hw_timer_struct *rtos_timer;
|
||||||
|
|
||||||
static cycle_t timer_get_cycles(struct clocksource *cs)
|
static u64 timer_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t) __vmgettime();
|
return (u64) __vmgettime();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource hexagon_clocksource = {
|
static struct clocksource hexagon_clocksource = {
|
||||||
|
|
|
@ -21,9 +21,9 @@ void __init cyclone_setup(void)
|
||||||
|
|
||||||
static void __iomem *cyclone_mc;
|
static void __iomem *cyclone_mc;
|
||||||
|
|
||||||
static cycle_t read_cyclone(struct clocksource *cs)
|
static u64 read_cyclone(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)readq((void __iomem *)cyclone_mc);
|
return (u64)readq((void __iomem *)cyclone_mc);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_cyclone = {
|
static struct clocksource clocksource_cyclone = {
|
||||||
|
|
|
@ -9,15 +9,15 @@ struct fsyscall_gtod_data_t {
|
||||||
seqcount_t seq;
|
seqcount_t seq;
|
||||||
struct timespec wall_time;
|
struct timespec wall_time;
|
||||||
struct timespec monotonic_time;
|
struct timespec monotonic_time;
|
||||||
cycle_t clk_mask;
|
u64 clk_mask;
|
||||||
u32 clk_mult;
|
u32 clk_mult;
|
||||||
u32 clk_shift;
|
u32 clk_shift;
|
||||||
void *clk_fsys_mmio;
|
void *clk_fsys_mmio;
|
||||||
cycle_t clk_cycle_last;
|
u64 clk_cycle_last;
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
struct itc_jitter_data_t {
|
struct itc_jitter_data_t {
|
||||||
int itc_jitter;
|
int itc_jitter;
|
||||||
cycle_t itc_lastcycle;
|
u64 itc_lastcycle;
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
|
|
||||||
#include "fsyscall_gtod_data.h"
|
#include "fsyscall_gtod_data.h"
|
||||||
|
|
||||||
static cycle_t itc_get_cycles(struct clocksource *cs);
|
static u64 itc_get_cycles(struct clocksource *cs);
|
||||||
|
|
||||||
struct fsyscall_gtod_data_t fsyscall_gtod_data;
|
struct fsyscall_gtod_data_t fsyscall_gtod_data;
|
||||||
|
|
||||||
|
@ -323,7 +323,7 @@ void ia64_init_itm(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t itc_get_cycles(struct clocksource *cs)
|
static u64 itc_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long lcycle, now, ret;
|
unsigned long lcycle, now, ret;
|
||||||
|
|
||||||
|
@ -397,7 +397,7 @@ void update_vsyscall_tz(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
||||||
struct clocksource *c, u32 mult, cycle_t cycle_last)
|
struct clocksource *c, u32 mult, u64 cycle_last)
|
||||||
{
|
{
|
||||||
write_seqcount_begin(&fsyscall_gtod_data.seq);
|
write_seqcount_begin(&fsyscall_gtod_data.seq);
|
||||||
|
|
||||||
|
|
|
@ -22,9 +22,9 @@
|
||||||
|
|
||||||
extern unsigned long sn_rtc_cycles_per_second;
|
extern unsigned long sn_rtc_cycles_per_second;
|
||||||
|
|
||||||
static cycle_t read_sn2(struct clocksource *cs)
|
static u64 read_sn2(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)readq(RTC_COUNTER_ADDR);
|
return (u64)readq(RTC_COUNTER_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_sn2 = {
|
static struct clocksource clocksource_sn2 = {
|
||||||
|
|
|
@ -76,7 +76,7 @@ static struct irqaction m68328_timer_irq = {
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
static cycle_t m68328_read_clk(struct clocksource *cs)
|
static u64 m68328_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles;
|
u32 cycles;
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
|
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
|
||||||
#define DMA_DTMR_ENABLE (1 << 0)
|
#define DMA_DTMR_ENABLE (1 << 0)
|
||||||
|
|
||||||
static cycle_t cf_dt_get_cycles(struct clocksource *cs)
|
static u64 cf_dt_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __raw_readl(DTCN0);
|
return __raw_readl(DTCN0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,7 +118,7 @@ static struct irqaction pit_irq = {
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
static cycle_t pit_read_clk(struct clocksource *cs)
|
static u64 pit_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles;
|
u32 cycles;
|
||||||
|
|
|
@ -97,7 +97,7 @@ static struct irqaction mcfslt_timer_irq = {
|
||||||
.handler = mcfslt_tick,
|
.handler = mcfslt_tick,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t mcfslt_read_clk(struct clocksource *cs)
|
static u64 mcfslt_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles, scnt;
|
u32 cycles, scnt;
|
||||||
|
|
|
@ -89,7 +89,7 @@ static struct irqaction mcftmr_timer_irq = {
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
static cycle_t mcftmr_read_clk(struct clocksource *cs)
|
static u64 mcftmr_read_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 cycles;
|
u32 cycles;
|
||||||
|
|
|
@ -190,17 +190,17 @@ static u64 xilinx_clock_read(void)
|
||||||
return read_fn(timer_baseaddr + TCR1);
|
return read_fn(timer_baseaddr + TCR1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t xilinx_read(struct clocksource *cs)
|
static u64 xilinx_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
/* reading actual value of timer 1 */
|
/* reading actual value of timer 1 */
|
||||||
return (cycle_t)xilinx_clock_read();
|
return (u64)xilinx_clock_read();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct timecounter xilinx_tc = {
|
static struct timecounter xilinx_tc = {
|
||||||
.cc = NULL,
|
.cc = NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t xilinx_cc_read(const struct cyclecounter *cc)
|
static u64 xilinx_cc_read(const struct cyclecounter *cc)
|
||||||
{
|
{
|
||||||
return xilinx_read(NULL);
|
return xilinx_read(NULL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@
|
||||||
/* 32kHz clock enabled and detected */
|
/* 32kHz clock enabled and detected */
|
||||||
#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
|
#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
|
||||||
|
|
||||||
static cycle_t au1x_counter1_read(struct clocksource *cs)
|
static u64 au1x_counter1_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return alchemy_rdsys(AU1000_SYS_RTCREAD);
|
return alchemy_rdsys(AU1000_SYS_RTCREAD);
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,7 @@ void octeon_init_cvmcount(void)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t octeon_cvmcount_read(struct clocksource *cs)
|
static u64 octeon_cvmcount_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_c0_cvmcount();
|
return read_c0_cvmcount();
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
|
|
||||||
static uint16_t jz4740_jiffies_per_tick;
|
static uint16_t jz4740_jiffies_per_tick;
|
||||||
|
|
||||||
static cycle_t jz4740_clocksource_read(struct clocksource *cs)
|
static u64 jz4740_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return jz4740_timer_get_count(TIMER_CLOCKSOURCE);
|
return jz4740_timer_get_count(TIMER_CLOCKSOURCE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ struct txx9_clocksource {
|
||||||
struct txx9_tmr_reg __iomem *tmrptr;
|
struct txx9_tmr_reg __iomem *tmrptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t txx9_cs_read(struct clocksource *cs)
|
static u64 txx9_cs_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct txx9_clocksource *txx9_cs =
|
struct txx9_clocksource *txx9_cs =
|
||||||
container_of(cs, struct txx9_clocksource, cs);
|
container_of(cs, struct txx9_clocksource, cs);
|
||||||
|
|
|
@ -25,9 +25,9 @@
|
||||||
|
|
||||||
#include <asm/sibyte/sb1250.h>
|
#include <asm/sibyte/sb1250.h>
|
||||||
|
|
||||||
static cycle_t bcm1480_hpt_read(struct clocksource *cs)
|
static u64 bcm1480_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
|
return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct clocksource bcm1480_clocksource = {
|
struct clocksource bcm1480_clocksource = {
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
#include <asm/dec/ioasic.h>
|
#include <asm/dec/ioasic.h>
|
||||||
#include <asm/dec/ioasic_addrs.h>
|
#include <asm/dec/ioasic_addrs.h>
|
||||||
|
|
||||||
static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
|
static u64 dec_ioasic_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ioasic_read(IO_REG_FCTR);
|
return ioasic_read(IO_REG_FCTR);
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
|
|
||||||
static cycle_t c0_hpt_read(struct clocksource *cs)
|
static u64 c0_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_c0_count();
|
return read_c0_count();
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
|
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
|
||||||
* again.
|
* again.
|
||||||
*/
|
*/
|
||||||
static inline cycle_t sb1250_hpt_get_cycles(void)
|
static inline u64 sb1250_hpt_get_cycles(void)
|
||||||
{
|
{
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
void __iomem *addr;
|
void __iomem *addr;
|
||||||
|
@ -41,7 +41,7 @@ static inline cycle_t sb1250_hpt_get_cycles(void)
|
||||||
return SB1250_HPT_VALUE - count;
|
return SB1250_HPT_VALUE - count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t sb1250_hpt_read(struct clocksource *cs)
|
static u64 sb1250_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return sb1250_hpt_get_cycles();
|
return sb1250_hpt_get_cycles();
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ void __init ls1x_pwmtimer_init(void)
|
||||||
ls1x_pwmtimer_restart();
|
ls1x_pwmtimer_restart();
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t ls1x_clocksource_read(struct clocksource *cs)
|
static u64 ls1x_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int count;
|
int count;
|
||||||
|
@ -107,7 +107,7 @@ static cycle_t ls1x_clocksource_read(struct clocksource *cs)
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
|
raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
|
||||||
|
|
||||||
return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count;
|
return (u64) (jifs * ls1x_jiffies_per_tick) + count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource ls1x_clocksource = {
|
static struct clocksource ls1x_clocksource = {
|
||||||
|
|
|
@ -144,7 +144,7 @@ void __init setup_mfgpt0_timer(void)
|
||||||
* to just read by itself. So use jiffies to emulate a free
|
* to just read by itself. So use jiffies to emulate a free
|
||||||
* running counter:
|
* running counter:
|
||||||
*/
|
*/
|
||||||
static cycle_t mfgpt_read(struct clocksource *cs)
|
static u64 mfgpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int count;
|
int count;
|
||||||
|
@ -188,7 +188,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
|
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
|
||||||
|
|
||||||
return (cycle_t) (jifs * COMPARE) + count;
|
return (u64) (jifs * COMPARE) + count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_mfgpt = {
|
static struct clocksource clocksource_mfgpt = {
|
||||||
|
|
|
@ -248,9 +248,9 @@ void __init setup_hpet_timer(void)
|
||||||
pr_info("hpet clock event device register\n");
|
pr_info("hpet clock event device register\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t hpet_read_counter(struct clocksource *cs)
|
static u64 hpet_read_counter(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)hpet_read(HPET_COUNTER);
|
return (u64)hpet_read(HPET_COUNTER);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hpet_suspend(struct clocksource *cs)
|
static void hpet_suspend(struct clocksource *cs)
|
||||||
|
|
|
@ -75,7 +75,7 @@ static void __init estimate_frequencies(void)
|
||||||
unsigned int count, start;
|
unsigned int count, start;
|
||||||
unsigned char secs1, secs2, ctrl;
|
unsigned char secs1, secs2, ctrl;
|
||||||
int secs;
|
int secs;
|
||||||
cycle_t giccount = 0, gicstart = 0;
|
u64 giccount = 0, gicstart = 0;
|
||||||
|
|
||||||
#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
|
#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
|
||||||
mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
|
mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
|
||||||
|
|
|
@ -59,14 +59,14 @@ unsigned int get_c0_compare_int(void)
|
||||||
return IRQ_TIMER;
|
return IRQ_TIMER;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t nlm_get_pic_timer(struct clocksource *cs)
|
static u64 nlm_get_pic_timer(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
uint64_t picbase = nlm_get_node(0)->picbase;
|
uint64_t picbase = nlm_get_node(0)->picbase;
|
||||||
|
|
||||||
return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
|
return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
|
static u64 nlm_get_pic_timer32(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
uint64_t picbase = nlm_get_node(0)->picbase;
|
uint64_t picbase = nlm_get_node(0)->picbase;
|
||||||
|
|
||||||
|
|
|
@ -140,7 +140,7 @@ static void __init hub_rt_clock_event_global_init(void)
|
||||||
setup_irq(irq, &hub_rt_irqaction);
|
setup_irq(irq, &hub_rt_irqaction);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t hub_rt_read(struct clocksource *cs)
|
static u64 hub_rt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
|
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
#include <asm/timex.h>
|
#include <asm/timex.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static cycle_t mn10300_read(struct clocksource *cs)
|
static u64 mn10300_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_timestamp_counter();
|
return read_timestamp_counter();
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ static inline unsigned long read_timersnapshot(struct nios2_timer *timer)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t nios2_timer_read(struct clocksource *cs)
|
static u64 nios2_timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs);
|
struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -117,9 +117,9 @@ static __init void openrisc_clockevent_init(void)
|
||||||
* is 32 bits wide and runs at the CPU clock frequency.
|
* is 32 bits wide and runs at the CPU clock frequency.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static cycle_t openrisc_timer_read(struct clocksource *cs)
|
static u64 openrisc_timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t) mfspr(SPR_TTCR);
|
return (u64) mfspr(SPR_TTCR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource openrisc_timer = {
|
static struct clocksource openrisc_timer = {
|
||||||
|
|
|
@ -137,7 +137,7 @@ EXPORT_SYMBOL(profile_pc);
|
||||||
|
|
||||||
/* clock source code */
|
/* clock source code */
|
||||||
|
|
||||||
static cycle_t notrace read_cr16(struct clocksource *cs)
|
static u64 notrace read_cr16(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return get_cycles();
|
return get_cycles();
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,7 @@
|
||||||
#include <linux/clockchips.h>
|
#include <linux/clockchips.h>
|
||||||
#include <linux/timekeeper_internal.h>
|
#include <linux/timekeeper_internal.h>
|
||||||
|
|
||||||
static cycle_t rtc_read(struct clocksource *);
|
static u64 rtc_read(struct clocksource *);
|
||||||
static struct clocksource clocksource_rtc = {
|
static struct clocksource clocksource_rtc = {
|
||||||
.name = "rtc",
|
.name = "rtc",
|
||||||
.rating = 400,
|
.rating = 400,
|
||||||
|
@ -89,7 +89,7 @@ static struct clocksource clocksource_rtc = {
|
||||||
.read = rtc_read,
|
.read = rtc_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t timebase_read(struct clocksource *);
|
static u64 timebase_read(struct clocksource *);
|
||||||
static struct clocksource clocksource_timebase = {
|
static struct clocksource clocksource_timebase = {
|
||||||
.name = "timebase",
|
.name = "timebase",
|
||||||
.rating = 400,
|
.rating = 400,
|
||||||
|
@ -802,18 +802,18 @@ void read_persistent_clock(struct timespec *ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clocksource code */
|
/* clocksource code */
|
||||||
static cycle_t rtc_read(struct clocksource *cs)
|
static u64 rtc_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)get_rtc();
|
return (u64)get_rtc();
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t timebase_read(struct clocksource *cs)
|
static u64 timebase_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)get_tb();
|
return (u64)get_tb();
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
||||||
struct clocksource *clock, u32 mult, cycle_t cycle_last)
|
struct clocksource *clock, u32 mult, u64 cycle_last)
|
||||||
{
|
{
|
||||||
u64 new_tb_to_xs, new_stamp_xsec;
|
u64 new_tb_to_xs, new_stamp_xsec;
|
||||||
u32 frac_sec;
|
u32 frac_sec;
|
||||||
|
|
|
@ -209,7 +209,7 @@ void read_boot_clock64(struct timespec64 *ts)
|
||||||
tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
|
tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t read_tod_clock(struct clocksource *cs)
|
static u64 read_tod_clock(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long long now, adj;
|
unsigned long long now, adj;
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,7 @@ static unsigned int sbus_cycles_offset(void)
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t timer_cs_read(struct clocksource *cs)
|
static u64 timer_cs_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned int seq, offset;
|
unsigned int seq, offset;
|
||||||
u64 cycles;
|
u64 cycles;
|
||||||
|
|
|
@ -770,7 +770,7 @@ void udelay(unsigned long usecs)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(udelay);
|
EXPORT_SYMBOL(udelay);
|
||||||
|
|
||||||
static cycle_t clocksource_tick_read(struct clocksource *cs)
|
static u64 clocksource_tick_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return tick_ops->get_tick();
|
return tick_ops->get_tick();
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,7 @@ static irqreturn_t um_timer(int irq, void *dev)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t timer_read(struct clocksource *cs)
|
static u64 timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return os_nsecs() / TIMER_MULTIPLIER;
|
return os_nsecs() / TIMER_MULTIPLIER;
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ static struct clock_event_device ckevt_puv3_osmr0 = {
|
||||||
.set_state_oneshot = puv3_osmr0_shutdown,
|
.set_state_oneshot = puv3_osmr0_shutdown,
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t puv3_read_oscr(struct clocksource *cs)
|
static u64 puv3_read_oscr(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return readl(OST_OSCR);
|
return readl(OST_OSCR);
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,10 +92,10 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
|
||||||
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
|
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
static notrace cycle_t vread_pvclock(int *mode)
|
static notrace u64 vread_pvclock(int *mode)
|
||||||
{
|
{
|
||||||
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
|
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
|
||||||
cycle_t ret;
|
u64 ret;
|
||||||
u64 last;
|
u64 last;
|
||||||
u32 version;
|
u32 version;
|
||||||
|
|
||||||
|
@ -142,9 +142,9 @@ static notrace cycle_t vread_pvclock(int *mode)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
notrace static cycle_t vread_tsc(void)
|
notrace static u64 vread_tsc(void)
|
||||||
{
|
{
|
||||||
cycle_t ret = (cycle_t)rdtsc_ordered();
|
u64 ret = (u64)rdtsc_ordered();
|
||||||
u64 last = gtod->cycle_last;
|
u64 last = gtod->cycle_last;
|
||||||
|
|
||||||
if (likely(ret >= last))
|
if (likely(ret >= last))
|
||||||
|
|
|
@ -768,7 +768,7 @@ struct kvm_arch {
|
||||||
spinlock_t pvclock_gtod_sync_lock;
|
spinlock_t pvclock_gtod_sync_lock;
|
||||||
bool use_master_clock;
|
bool use_master_clock;
|
||||||
u64 master_kernel_ns;
|
u64 master_kernel_ns;
|
||||||
cycle_t master_cycle_now;
|
u64 master_cycle_now;
|
||||||
struct delayed_work kvmclock_update_work;
|
struct delayed_work kvmclock_update_work;
|
||||||
struct delayed_work kvmclock_sync_work;
|
struct delayed_work kvmclock_sync_work;
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* some helper functions for xen and kvm pv clock sources */
|
/* some helper functions for xen and kvm pv clock sources */
|
||||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
||||||
void pvclock_set_flags(u8 flags);
|
void pvclock_set_flags(u8 flags);
|
||||||
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
|
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
|
||||||
|
@ -87,11 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline
|
static __always_inline
|
||||||
cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
|
u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
|
||||||
u64 tsc)
|
|
||||||
{
|
{
|
||||||
u64 delta = tsc - src->tsc_timestamp;
|
u64 delta = tsc - src->tsc_timestamp;
|
||||||
cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||||
src->tsc_shift);
|
src->tsc_shift);
|
||||||
return src->system_time + offset;
|
return src->system_time + offset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
|
||||||
return rdtsc();
|
return rdtsc();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct system_counterval_t convert_art_to_tsc(cycle_t art);
|
extern struct system_counterval_t convert_art_to_tsc(u64 art);
|
||||||
|
|
||||||
extern void tsc_init(void);
|
extern void tsc_init(void);
|
||||||
extern void mark_tsc_unstable(char *reason);
|
extern void mark_tsc_unstable(char *reason);
|
||||||
|
|
|
@ -17,8 +17,8 @@ struct vsyscall_gtod_data {
|
||||||
unsigned seq;
|
unsigned seq;
|
||||||
|
|
||||||
int vclock_mode;
|
int vclock_mode;
|
||||||
cycle_t cycle_last;
|
u64 cycle_last;
|
||||||
cycle_t mask;
|
u64 mask;
|
||||||
u32 mult;
|
u32 mult;
|
||||||
u32 shift;
|
u32 shift;
|
||||||
|
|
||||||
|
|
|
@ -247,7 +247,7 @@ void apbt_setup_secondary_clock(void) {}
|
||||||
static int apbt_clocksource_register(void)
|
static int apbt_clocksource_register(void)
|
||||||
{
|
{
|
||||||
u64 start, now;
|
u64 start, now;
|
||||||
cycle_t t1;
|
u64 t1;
|
||||||
|
|
||||||
/* Start the counter, use timer 2 as source, timer 0/1 for event */
|
/* Start the counter, use timer 2 as source, timer 0/1 for event */
|
||||||
dw_apb_clocksource_start(clocksource_apbt);
|
dw_apb_clocksource_start(clocksource_apbt);
|
||||||
|
@ -355,7 +355,7 @@ unsigned long apbt_quick_calibrate(void)
|
||||||
{
|
{
|
||||||
int i, scale;
|
int i, scale;
|
||||||
u64 old, new;
|
u64 old, new;
|
||||||
cycle_t t1, t2;
|
u64 t1, t2;
|
||||||
unsigned long khz = 0;
|
unsigned long khz = 0;
|
||||||
u32 loop, shift;
|
u32 loop, shift;
|
||||||
|
|
||||||
|
|
|
@ -133,9 +133,9 @@ static uint32_t __init ms_hyperv_platform(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t read_hv_clock(struct clocksource *arg)
|
static u64 read_hv_clock(struct clocksource *arg)
|
||||||
{
|
{
|
||||||
cycle_t current_tick;
|
u64 current_tick;
|
||||||
/*
|
/*
|
||||||
* Read the partition counter to get the current tick count. This count
|
* Read the partition counter to get the current tick count. This count
|
||||||
* is set to 0 when the partition is created and is incremented in
|
* is set to 0 when the partition is created and is incremented in
|
||||||
|
|
|
@ -791,7 +791,7 @@ static union hpet_lock hpet __cacheline_aligned = {
|
||||||
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
|
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
|
||||||
};
|
};
|
||||||
|
|
||||||
static cycle_t read_hpet(struct clocksource *cs)
|
static u64 read_hpet(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
union hpet_lock old, new;
|
union hpet_lock old, new;
|
||||||
|
@ -802,7 +802,7 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||||
* Read HPET directly if in NMI.
|
* Read HPET directly if in NMI.
|
||||||
*/
|
*/
|
||||||
if (in_nmi())
|
if (in_nmi())
|
||||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
return (u64)hpet_readl(HPET_COUNTER);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read the current state of the lock and HPET value atomically.
|
* Read the current state of the lock and HPET value atomically.
|
||||||
|
@ -821,7 +821,7 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||||
WRITE_ONCE(hpet.value, new.value);
|
WRITE_ONCE(hpet.value, new.value);
|
||||||
arch_spin_unlock(&hpet.lock);
|
arch_spin_unlock(&hpet.lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return (cycle_t)new.value;
|
return (u64)new.value;
|
||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
@ -843,15 +843,15 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||||
new.lockval = READ_ONCE(hpet.lockval);
|
new.lockval = READ_ONCE(hpet.lockval);
|
||||||
} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
|
} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
|
||||||
|
|
||||||
return (cycle_t)new.value;
|
return (u64)new.value;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* For UP or 32-bit.
|
* For UP or 32-bit.
|
||||||
*/
|
*/
|
||||||
static cycle_t read_hpet(struct clocksource *cs)
|
static u64 read_hpet(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
return (u64)hpet_readl(HPET_COUNTER);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -867,7 +867,7 @@ static struct clocksource clocksource_hpet = {
|
||||||
static int hpet_clocksource_register(void)
|
static int hpet_clocksource_register(void)
|
||||||
{
|
{
|
||||||
u64 start, now;
|
u64 start, now;
|
||||||
cycle_t t1;
|
u64 t1;
|
||||||
|
|
||||||
/* Start the counter */
|
/* Start the counter */
|
||||||
hpet_restart_counter();
|
hpet_restart_counter();
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
static int kvmclock __ro_after_init = 1;
|
static int kvmclock __ro_after_init = 1;
|
||||||
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
|
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
|
||||||
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
|
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
|
||||||
static cycle_t kvm_sched_clock_offset;
|
static u64 kvm_sched_clock_offset;
|
||||||
|
|
||||||
static int parse_no_kvmclock(char *arg)
|
static int parse_no_kvmclock(char *arg)
|
||||||
{
|
{
|
||||||
|
@ -79,10 +79,10 @@ static int kvm_set_wallclock(const struct timespec *now)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t kvm_clock_read(void)
|
static u64 kvm_clock_read(void)
|
||||||
{
|
{
|
||||||
struct pvclock_vcpu_time_info *src;
|
struct pvclock_vcpu_time_info *src;
|
||||||
cycle_t ret;
|
u64 ret;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
|
@ -93,12 +93,12 @@ static cycle_t kvm_clock_read(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
|
static u64 kvm_clock_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return kvm_clock_read();
|
return kvm_clock_read();
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t kvm_sched_clock_read(void)
|
static u64 kvm_sched_clock_read(void)
|
||||||
{
|
{
|
||||||
return kvm_clock_read() - kvm_sched_clock_offset;
|
return kvm_clock_read() - kvm_sched_clock_offset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,10 +71,10 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
||||||
return flags & valid_flags;
|
return flags & valid_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||||
{
|
{
|
||||||
unsigned version;
|
unsigned version;
|
||||||
cycle_t ret;
|
u64 ret;
|
||||||
u64 last;
|
u64 last;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
|
|
||||||
|
|
|
@ -1101,9 +1101,9 @@ static void tsc_resume(struct clocksource *cs)
|
||||||
* checking the result of read_tsc() - cycle_last for being negative.
|
* checking the result of read_tsc() - cycle_last for being negative.
|
||||||
* That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
|
* That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
|
||||||
*/
|
*/
|
||||||
static cycle_t read_tsc(struct clocksource *cs)
|
static u64 read_tsc(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)rdtsc_ordered();
|
return (u64)rdtsc_ordered();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1192,7 +1192,7 @@ int unsynchronized_tsc(void)
|
||||||
/*
|
/*
|
||||||
* Convert ART to TSC given numerator/denominator found in detect_art()
|
* Convert ART to TSC given numerator/denominator found in detect_art()
|
||||||
*/
|
*/
|
||||||
struct system_counterval_t convert_art_to_tsc(cycle_t art)
|
struct system_counterval_t convert_art_to_tsc(u64 art)
|
||||||
{
|
{
|
||||||
u64 tmp, res, rem;
|
u64 tmp, res, rem;
|
||||||
|
|
||||||
|
|
|
@ -1131,8 +1131,8 @@ struct pvclock_gtod_data {
|
||||||
|
|
||||||
struct { /* extract of a clocksource struct */
|
struct { /* extract of a clocksource struct */
|
||||||
int vclock_mode;
|
int vclock_mode;
|
||||||
cycle_t cycle_last;
|
u64 cycle_last;
|
||||||
cycle_t mask;
|
u64 mask;
|
||||||
u32 mult;
|
u32 mult;
|
||||||
u32 shift;
|
u32 shift;
|
||||||
} clock;
|
} clock;
|
||||||
|
@ -1572,9 +1572,9 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
static cycle_t read_tsc(void)
|
static u64 read_tsc(void)
|
||||||
{
|
{
|
||||||
cycle_t ret = (cycle_t)rdtsc_ordered();
|
u64 ret = (u64)rdtsc_ordered();
|
||||||
u64 last = pvclock_gtod_data.clock.cycle_last;
|
u64 last = pvclock_gtod_data.clock.cycle_last;
|
||||||
|
|
||||||
if (likely(ret >= last))
|
if (likely(ret >= last))
|
||||||
|
@ -1592,7 +1592,7 @@ static cycle_t read_tsc(void)
|
||||||
return last;
|
return last;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 vgettsc(cycle_t *cycle_now)
|
static inline u64 vgettsc(u64 *cycle_now)
|
||||||
{
|
{
|
||||||
long v;
|
long v;
|
||||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||||
|
@ -1603,7 +1603,7 @@ static inline u64 vgettsc(cycle_t *cycle_now)
|
||||||
return v * gtod->clock.mult;
|
return v * gtod->clock.mult;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
|
static int do_monotonic_boot(s64 *t, u64 *cycle_now)
|
||||||
{
|
{
|
||||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||||
unsigned long seq;
|
unsigned long seq;
|
||||||
|
@ -1624,7 +1624,7 @@ static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* returns true if host is using tsc clocksource */
|
/* returns true if host is using tsc clocksource */
|
||||||
static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
|
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
|
||||||
{
|
{
|
||||||
/* checked again under seqlock below */
|
/* checked again under seqlock below */
|
||||||
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
|
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
|
||||||
|
|
|
@ -916,7 +916,7 @@ static unsigned long lguest_tsc_khz(void)
|
||||||
* If we can't use the TSC, the kernel falls back to our lower-priority
|
* If we can't use the TSC, the kernel falls back to our lower-priority
|
||||||
* "lguest_clock", where we read the time value given to us by the Host.
|
* "lguest_clock", where we read the time value given to us by the Host.
|
||||||
*/
|
*/
|
||||||
static cycle_t lguest_clock_read(struct clocksource *cs)
|
static u64 lguest_clock_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long sec, nsec;
|
unsigned long sec, nsec;
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
|
|
||||||
#define RTC_NAME "sgi_rtc"
|
#define RTC_NAME "sgi_rtc"
|
||||||
|
|
||||||
static cycle_t uv_read_rtc(struct clocksource *cs);
|
static u64 uv_read_rtc(struct clocksource *cs);
|
||||||
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
|
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
|
||||||
static int uv_rtc_shutdown(struct clock_event_device *evt);
|
static int uv_rtc_shutdown(struct clock_event_device *evt);
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ static struct clocksource clocksource_uv = {
|
||||||
.name = RTC_NAME,
|
.name = RTC_NAME,
|
||||||
.rating = 299,
|
.rating = 299,
|
||||||
.read = uv_read_rtc,
|
.read = uv_read_rtc,
|
||||||
.mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
|
.mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
|
||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -296,7 +296,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
||||||
* cachelines of it's own page. This allows faster simultaneous reads
|
* cachelines of it's own page. This allows faster simultaneous reads
|
||||||
* from a given socket.
|
* from a given socket.
|
||||||
*/
|
*/
|
||||||
static cycle_t uv_read_rtc(struct clocksource *cs)
|
static u64 uv_read_rtc(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
|
|
||||||
|
@ -305,7 +305,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs)
|
||||||
else
|
else
|
||||||
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
|
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
|
||||||
|
|
||||||
return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
return (u64)uv_read_local_mmr(UVH_RTC | offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -39,10 +39,10 @@ static unsigned long xen_tsc_khz(void)
|
||||||
return pvclock_tsc_khz(info);
|
return pvclock_tsc_khz(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t xen_clocksource_read(void)
|
u64 xen_clocksource_read(void)
|
||||||
{
|
{
|
||||||
struct pvclock_vcpu_time_info *src;
|
struct pvclock_vcpu_time_info *src;
|
||||||
cycle_t ret;
|
u64 ret;
|
||||||
|
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
src = &__this_cpu_read(xen_vcpu)->time;
|
src = &__this_cpu_read(xen_vcpu)->time;
|
||||||
|
@ -51,7 +51,7 @@ cycle_t xen_clocksource_read(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
|
static u64 xen_clocksource_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return xen_clocksource_read();
|
return xen_clocksource_read();
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ void xen_init_irq_ops(void);
|
||||||
void xen_setup_timer(int cpu);
|
void xen_setup_timer(int cpu);
|
||||||
void xen_setup_runstate_info(int cpu);
|
void xen_setup_runstate_info(int cpu);
|
||||||
void xen_teardown_timer(int cpu);
|
void xen_teardown_timer(int cpu);
|
||||||
cycle_t xen_clocksource_read(void);
|
u64 xen_clocksource_read(void);
|
||||||
void xen_setup_cpu_clockevents(void);
|
void xen_setup_cpu_clockevents(void);
|
||||||
void __init xen_init_time_ops(void);
|
void __init xen_init_time_ops(void);
|
||||||
void __init xen_hvm_init_time_ops(void);
|
void __init xen_hvm_init_time_ops(void);
|
||||||
|
|
|
@ -34,9 +34,9 @@
|
||||||
unsigned long ccount_freq; /* ccount Hz */
|
unsigned long ccount_freq; /* ccount Hz */
|
||||||
EXPORT_SYMBOL(ccount_freq);
|
EXPORT_SYMBOL(ccount_freq);
|
||||||
|
|
||||||
static cycle_t ccount_read(struct clocksource *cs)
|
static u64 ccount_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)get_ccount();
|
return (u64)get_ccount();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 notrace ccount_sched_clock_read(void)
|
static u64 notrace ccount_sched_clock_read(void)
|
||||||
|
|
|
@ -69,9 +69,9 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
|
||||||
#ifdef CONFIG_IA64
|
#ifdef CONFIG_IA64
|
||||||
static void __iomem *hpet_mctr;
|
static void __iomem *hpet_mctr;
|
||||||
|
|
||||||
static cycle_t read_hpet(struct clocksource *cs)
|
static u64 read_hpet(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
|
return (u64)read_counter((void __iomem *)hpet_mctr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_hpet = {
|
static struct clocksource clocksource_hpet = {
|
||||||
|
|
|
@ -58,16 +58,16 @@ u32 acpi_pm_read_verified(void)
|
||||||
return v2;
|
return v2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t acpi_pm_read(struct clocksource *cs)
|
static u64 acpi_pm_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)read_pmtmr();
|
return (u64)read_pmtmr();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource clocksource_acpi_pm = {
|
static struct clocksource clocksource_acpi_pm = {
|
||||||
.name = "acpi_pm",
|
.name = "acpi_pm",
|
||||||
.rating = 200,
|
.rating = 200,
|
||||||
.read = acpi_pm_read,
|
.read = acpi_pm_read,
|
||||||
.mask = (cycle_t)ACPI_PM_MASK,
|
.mask = (u64)ACPI_PM_MASK,
|
||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -81,9 +81,9 @@ static int __init acpi_pm_good_setup(char *__str)
|
||||||
}
|
}
|
||||||
__setup("acpi_pm_good", acpi_pm_good_setup);
|
__setup("acpi_pm_good", acpi_pm_good_setup);
|
||||||
|
|
||||||
static cycle_t acpi_pm_read_slow(struct clocksource *cs)
|
static u64 acpi_pm_read_slow(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)acpi_pm_read_verified();
|
return (u64)acpi_pm_read_verified();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void acpi_pm_need_workaround(void)
|
static inline void acpi_pm_need_workaround(void)
|
||||||
|
@ -145,7 +145,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
|
||||||
*/
|
*/
|
||||||
static int verify_pmtmr_rate(void)
|
static int verify_pmtmr_rate(void)
|
||||||
{
|
{
|
||||||
cycle_t value1, value2;
|
u64 value1, value2;
|
||||||
unsigned long count, delta;
|
unsigned long count, delta;
|
||||||
|
|
||||||
mach_prepare_counter();
|
mach_prepare_counter();
|
||||||
|
@ -175,7 +175,7 @@ static int verify_pmtmr_rate(void)
|
||||||
|
|
||||||
static int __init init_acpi_pm_clocksource(void)
|
static int __init init_acpi_pm_clocksource(void)
|
||||||
{
|
{
|
||||||
cycle_t value1, value2;
|
u64 value1, value2;
|
||||||
unsigned int i, j = 0;
|
unsigned int i, j = 0;
|
||||||
|
|
||||||
if (!pmtmr_ioport)
|
if (!pmtmr_ioport)
|
||||||
|
|
|
@ -56,7 +56,7 @@ static int noinline arc_get_timer_clk(struct device_node *node)
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_TIMERS_64BIT
|
#ifdef CONFIG_ARC_TIMERS_64BIT
|
||||||
|
|
||||||
static cycle_t arc_read_gfrc(struct clocksource *cs)
|
static u64 arc_read_gfrc(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 l, h;
|
u32 l, h;
|
||||||
|
@ -71,7 +71,7 @@ static cycle_t arc_read_gfrc(struct clocksource *cs)
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return (((cycle_t)h) << 32) | l;
|
return (((u64)h) << 32) | l;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource arc_counter_gfrc = {
|
static struct clocksource arc_counter_gfrc = {
|
||||||
|
@ -105,7 +105,7 @@ CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
|
||||||
#define AUX_RTC_LOW 0x104
|
#define AUX_RTC_LOW 0x104
|
||||||
#define AUX_RTC_HIGH 0x105
|
#define AUX_RTC_HIGH 0x105
|
||||||
|
|
||||||
static cycle_t arc_read_rtc(struct clocksource *cs)
|
static u64 arc_read_rtc(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long status;
|
unsigned long status;
|
||||||
u32 l, h;
|
u32 l, h;
|
||||||
|
@ -122,7 +122,7 @@ static cycle_t arc_read_rtc(struct clocksource *cs)
|
||||||
status = read_aux_reg(AUX_RTC_CTRL);
|
status = read_aux_reg(AUX_RTC_CTRL);
|
||||||
} while (!(status & _BITUL(31)));
|
} while (!(status & _BITUL(31)));
|
||||||
|
|
||||||
return (((cycle_t)h) << 32) | l;
|
return (((u64)h) << 32) | l;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource arc_counter_rtc = {
|
static struct clocksource arc_counter_rtc = {
|
||||||
|
@ -166,9 +166,9 @@ CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
|
||||||
* 32bit TIMER1 to keep counting monotonically and wraparound
|
* 32bit TIMER1 to keep counting monotonically and wraparound
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static cycle_t arc_read_timer1(struct clocksource *cs)
|
static u64 arc_read_timer1(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
|
return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource arc_counter_timer1 = {
|
static struct clocksource arc_counter_timer1 = {
|
||||||
|
|
|
@ -562,12 +562,12 @@ static u64 arch_counter_get_cntvct_mem(void)
|
||||||
*/
|
*/
|
||||||
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
|
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
|
||||||
|
|
||||||
static cycle_t arch_counter_read(struct clocksource *cs)
|
static u64 arch_counter_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return arch_timer_read_counter();
|
return arch_timer_read_counter();
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
|
static u64 arch_counter_read_cc(const struct cyclecounter *cc)
|
||||||
{
|
{
|
||||||
return arch_timer_read_counter();
|
return arch_timer_read_counter();
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ static int gt_dying_cpu(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t gt_clocksource_read(struct clocksource *cs)
|
static u64 gt_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return gt_counter_read();
|
return gt_counter_read();
|
||||||
}
|
}
|
||||||
|
|
|
@ -158,11 +158,11 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
|
||||||
*
|
*
|
||||||
* returns: Current timer counter register value
|
* returns: Current timer counter register value
|
||||||
**/
|
**/
|
||||||
static cycle_t __ttc_clocksource_read(struct clocksource *cs)
|
static u64 __ttc_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
|
struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
|
||||||
|
|
||||||
return (cycle_t)readl_relaxed(timer->base_addr +
|
return (u64)readl_relaxed(timer->base_addr +
|
||||||
TTC_COUNT_VAL_OFFSET);
|
TTC_COUNT_VAL_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
|
|
||||||
static void __iomem *clksrc_dbx500_timer_base;
|
static void __iomem *clksrc_dbx500_timer_base;
|
||||||
|
|
||||||
static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
|
static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
void __iomem *base = clksrc_dbx500_timer_base;
|
void __iomem *base = clksrc_dbx500_timer_base;
|
||||||
u32 count, count2;
|
u32 count, count2;
|
||||||
|
|
|
@ -348,7 +348,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
|
||||||
dw_apb_clocksource_read(dw_cs);
|
dw_apb_clocksource_read(dw_cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t __apbt_read_clocksource(struct clocksource *cs)
|
static u64 __apbt_read_clocksource(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
u32 current_count;
|
u32 current_count;
|
||||||
struct dw_apb_clocksource *dw_cs =
|
struct dw_apb_clocksource *dw_cs =
|
||||||
|
@ -357,7 +357,7 @@ static cycle_t __apbt_read_clocksource(struct clocksource *cs)
|
||||||
current_count = apbt_readl_relaxed(&dw_cs->timer,
|
current_count = apbt_readl_relaxed(&dw_cs->timer,
|
||||||
APBTMR_N_CURRENT_VALUE);
|
APBTMR_N_CURRENT_VALUE);
|
||||||
|
|
||||||
return (cycle_t)~current_count;
|
return (u64)~current_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apbt_restart_clocksource(struct clocksource *cs)
|
static void apbt_restart_clocksource(struct clocksource *cs)
|
||||||
|
@ -416,7 +416,7 @@ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
|
||||||
*
|
*
|
||||||
* @dw_cs: The clocksource to read.
|
* @dw_cs: The clocksource to read.
|
||||||
*/
|
*/
|
||||||
cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
|
u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
|
||||||
{
|
{
|
||||||
return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
|
return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,9 +110,9 @@ static void em_sti_disable(struct em_sti_priv *p)
|
||||||
clk_disable_unprepare(p->clk);
|
clk_disable_unprepare(p->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t em_sti_count(struct em_sti_priv *p)
|
static u64 em_sti_count(struct em_sti_priv *p)
|
||||||
{
|
{
|
||||||
cycle_t ticks;
|
u64 ticks;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* the STI hardware buffers the 48-bit count, but to
|
/* the STI hardware buffers the 48-bit count, but to
|
||||||
|
@ -121,14 +121,14 @@ static cycle_t em_sti_count(struct em_sti_priv *p)
|
||||||
* Always read STI_COUNT_H before STI_COUNT_L.
|
* Always read STI_COUNT_H before STI_COUNT_L.
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&p->lock, flags);
|
raw_spin_lock_irqsave(&p->lock, flags);
|
||||||
ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
|
ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
|
||||||
ticks |= em_sti_read(p, STI_COUNT_L);
|
ticks |= em_sti_read(p, STI_COUNT_L);
|
||||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||||
|
|
||||||
return ticks;
|
return ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
|
static u64 em_sti_set_next(struct em_sti_priv *p, u64 next)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
|
||||||
return container_of(cs, struct em_sti_priv, cs);
|
return container_of(cs, struct em_sti_priv, cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t em_sti_clocksource_read(struct clocksource *cs)
|
static u64 em_sti_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return em_sti_count(cs_to_em_sti(cs));
|
return em_sti_count(cs_to_em_sti(cs));
|
||||||
}
|
}
|
||||||
|
@ -271,7 +271,7 @@ static int em_sti_clock_event_next(unsigned long delta,
|
||||||
struct clock_event_device *ced)
|
struct clock_event_device *ced)
|
||||||
{
|
{
|
||||||
struct em_sti_priv *p = ced_to_em_sti(ced);
|
struct em_sti_priv *p = ced_to_em_sti(ced);
|
||||||
cycle_t next;
|
u64 next;
|
||||||
int safe;
|
int safe;
|
||||||
|
|
||||||
next = em_sti_set_next(p, em_sti_count(p) + delta);
|
next = em_sti_set_next(p, em_sti_count(p) + delta);
|
||||||
|
|
|
@ -183,7 +183,7 @@ static u64 exynos4_read_count_64(void)
|
||||||
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
|
hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U);
|
||||||
} while (hi != hi2);
|
} while (hi != hi2);
|
||||||
|
|
||||||
return ((cycle_t)hi << 32) | lo;
|
return ((u64)hi << 32) | lo;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -199,7 +199,7 @@ static u32 notrace exynos4_read_count_32(void)
|
||||||
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
|
return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t exynos4_frc_read(struct clocksource *cs)
|
static u64 exynos4_frc_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return exynos4_read_count_32();
|
return exynos4_read_count_32();
|
||||||
}
|
}
|
||||||
|
@ -266,7 +266,7 @@ static void exynos4_mct_comp0_stop(void)
|
||||||
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
|
static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
|
||||||
{
|
{
|
||||||
unsigned int tcon;
|
unsigned int tcon;
|
||||||
cycle_t comp_cycle;
|
u64 comp_cycle;
|
||||||
|
|
||||||
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
|
tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs)
|
||||||
return container_of(cs, struct timer16_priv, cs);
|
return container_of(cs, struct timer16_priv, cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t timer16_clocksource_read(struct clocksource *cs)
|
static u64 timer16_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct timer16_priv *p = cs_to_priv(cs);
|
struct timer16_priv *p = cs_to_priv(cs);
|
||||||
unsigned long raw, value;
|
unsigned long raw, value;
|
||||||
|
|
|
@ -64,7 +64,7 @@ static inline struct tpu_priv *cs_to_priv(struct clocksource *cs)
|
||||||
return container_of(cs, struct tpu_priv, cs);
|
return container_of(cs, struct tpu_priv, cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t tpu_clocksource_read(struct clocksource *cs)
|
static u64 tpu_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct tpu_priv *p = cs_to_priv(cs);
|
struct tpu_priv *p = cs_to_priv(cs);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -25,7 +25,7 @@ EXPORT_SYMBOL(i8253_lock);
|
||||||
* to just read by itself. So use jiffies to emulate a free
|
* to just read by itself. So use jiffies to emulate a free
|
||||||
* running counter:
|
* running counter:
|
||||||
*/
|
*/
|
||||||
static cycle_t i8253_read(struct clocksource *cs)
|
static u64 i8253_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
static int old_count;
|
static int old_count;
|
||||||
static u32 old_jifs;
|
static u32 old_jifs;
|
||||||
|
@ -83,7 +83,7 @@ static cycle_t i8253_read(struct clocksource *cs)
|
||||||
|
|
||||||
count = (PIT_LATCH - 1) - count;
|
count = (PIT_LATCH - 1) - count;
|
||||||
|
|
||||||
return (cycle_t)(jifs * PIT_LATCH) + count;
|
return (u64)(jifs * PIT_LATCH) + count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource i8253_cs = {
|
static struct clocksource i8253_cs = {
|
||||||
|
|
|
@ -57,7 +57,7 @@ static notrace u64 jcore_sched_clock_read(void)
|
||||||
return seclo * NSEC_PER_SEC + nsec;
|
return seclo * NSEC_PER_SEC + nsec;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t jcore_clocksource_read(struct clocksource *cs)
|
static u64 jcore_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return jcore_sched_clock_read();
|
return jcore_sched_clock_read();
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ static int metag_timer_set_next_event(unsigned long delta,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t metag_clocksource_read(struct clocksource *cs)
|
static u64 metag_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __core_reg_get(TXTIMER);
|
return __core_reg_get(TXTIMER);
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,7 +125,7 @@ static int gic_clockevent_init(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t gic_hpt_read(struct clocksource *cs)
|
static u64 gic_hpt_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return gic_read_count();
|
return gic_read_count();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,24 +20,24 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
|
||||||
return container_of(c, struct clocksource_mmio, clksrc);
|
return container_of(c, struct clocksource_mmio, clksrc);
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t clocksource_mmio_readl_up(struct clocksource *c)
|
u64 clocksource_mmio_readl_up(struct clocksource *c)
|
||||||
{
|
{
|
||||||
return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg);
|
return (u64)readl_relaxed(to_mmio_clksrc(c)->reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t clocksource_mmio_readl_down(struct clocksource *c)
|
u64 clocksource_mmio_readl_down(struct clocksource *c)
|
||||||
{
|
{
|
||||||
return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t clocksource_mmio_readw_up(struct clocksource *c)
|
u64 clocksource_mmio_readw_up(struct clocksource *c)
|
||||||
{
|
{
|
||||||
return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg);
|
return (u64)readw_relaxed(to_mmio_clksrc(c)->reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t clocksource_mmio_readw_down(struct clocksource *c)
|
u64 clocksource_mmio_readw_down(struct clocksource *c)
|
||||||
{
|
{
|
||||||
return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -51,7 +51,7 @@ cycle_t clocksource_mmio_readw_down(struct clocksource *c)
|
||||||
*/
|
*/
|
||||||
int __init clocksource_mmio_init(void __iomem *base, const char *name,
|
int __init clocksource_mmio_init(void __iomem *base, const char *name,
|
||||||
unsigned long hz, int rating, unsigned bits,
|
unsigned long hz, int rating, unsigned bits,
|
||||||
cycle_t (*read)(struct clocksource *))
|
u64 (*read)(struct clocksource *))
|
||||||
{
|
{
|
||||||
struct clocksource_mmio *cs;
|
struct clocksource_mmio *cs;
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ static void timrot_irq_acknowledge(void)
|
||||||
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
|
HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t timrotv1_get_cycles(struct clocksource *cs)
|
static u64 timrotv1_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
|
return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
|
||||||
& 0xffff0000) >> 16);
|
& 0xffff0000) >> 16);
|
||||||
|
|
|
@ -89,7 +89,7 @@ static struct clock_event_device __percpu *msm_evt;
|
||||||
|
|
||||||
static void __iomem *source_base;
|
static void __iomem *source_base;
|
||||||
|
|
||||||
static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
|
static notrace u64 msm_read_timer_count(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return readl_relaxed(source_base + TIMER_COUNT_VAL);
|
return readl_relaxed(source_base + TIMER_COUNT_VAL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs)
|
||||||
samsung_time_start(pwm.source_id, true);
|
samsung_time_start(pwm.source_id, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t notrace samsung_clocksource_read(struct clocksource *c)
|
static u64 notrace samsung_clocksource_read(struct clocksource *c)
|
||||||
{
|
{
|
||||||
return ~readl_relaxed(pwm.source_reg);
|
return ~readl_relaxed(pwm.source_reg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,10 +43,10 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
|
||||||
/* The base timer frequency, * 27 if selected */
|
/* The base timer frequency, * 27 if selected */
|
||||||
#define HRT_FREQ 1000000
|
#define HRT_FREQ 1000000
|
||||||
|
|
||||||
static cycle_t read_hrt(struct clocksource *cs)
|
static u64 read_hrt(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
/* Read the timer value */
|
/* Read the timer value */
|
||||||
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
|
return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct clocksource cs_hrt = {
|
static struct clocksource cs_hrt = {
|
||||||
|
|
|
@ -612,7 +612,7 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
|
||||||
return container_of(cs, struct sh_cmt_channel, cs);
|
return container_of(cs, struct sh_cmt_channel, cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
|
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
|
||||||
unsigned long flags, raw;
|
unsigned long flags, raw;
|
||||||
|
|
|
@ -255,7 +255,7 @@ static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
|
||||||
return container_of(cs, struct sh_tmu_channel, cs);
|
return container_of(cs, struct sh_tmu_channel, cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
|
static u64 sh_tmu_clocksource_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
|
struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
|
|
||||||
static void __iomem *tcaddr;
|
static void __iomem *tcaddr;
|
||||||
|
|
||||||
static cycle_t tc_get_cycles(struct clocksource *cs)
|
static u64 tc_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 lower, upper;
|
u32 lower, upper;
|
||||||
|
@ -56,7 +56,7 @@ static cycle_t tc_get_cycles(struct clocksource *cs)
|
||||||
return (upper << 16) | lower;
|
return (upper << 16) | lower;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t tc_get_cycles32(struct clocksource *cs)
|
static u64 tc_get_cycles32(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
|
||||||
writel(value, base + 0x20 * gpt_id + offset);
|
writel(value, base + 0x20 * gpt_id + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t notrace
|
static u64 notrace
|
||||||
pistachio_clocksource_read_cycles(struct clocksource *cs)
|
pistachio_clocksource_read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
|
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
|
||||||
|
@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs)
|
||||||
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
|
counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
|
||||||
raw_spin_unlock_irqrestore(&pcs->lock, flags);
|
raw_spin_unlock_irqrestore(&pcs->lock, flags);
|
||||||
|
|
||||||
return (cycle_t)~counter;
|
return (u64)~counter;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 notrace pistachio_read_sched_clock(void)
|
static u64 notrace pistachio_read_sched_clock(void)
|
||||||
|
|
|
@ -85,7 +85,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* read 64-bit timer counter */
|
/* read 64-bit timer counter */
|
||||||
static cycle_t sirfsoc_timer_read(struct clocksource *cs)
|
static u64 sirfsoc_timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
u64 cycles;
|
u64 cycles;
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsign
|
||||||
* Clocksource: just a monotonic counter of MCK/16 cycles.
|
* Clocksource: just a monotonic counter of MCK/16 cycles.
|
||||||
* We don't care whether or not PIT irqs are enabled.
|
* We don't care whether or not PIT irqs are enabled.
|
||||||
*/
|
*/
|
||||||
static cycle_t read_pit_clk(struct clocksource *cs)
|
static u64 read_pit_clk(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct pit_data *data = clksrc_to_pit_data(cs);
|
struct pit_data *data = clksrc_to_pit_data(cs);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -92,7 +92,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t read_clk32k(struct clocksource *cs)
|
static u64 read_clk32k(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
return read_CRTR();
|
return read_CRTR();
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,11 +77,11 @@ static int __init nps_get_timer_clk(struct device_node *node,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t nps_clksrc_read(struct clocksource *clksrc)
|
static u64 nps_clksrc_read(struct clocksource *clksrc)
|
||||||
{
|
{
|
||||||
int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
|
int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
|
||||||
|
|
||||||
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
|
return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init nps_setup_clocksource(struct device_node *node)
|
static int __init nps_setup_clocksource(struct device_node *node)
|
||||||
|
|
|
@ -72,7 +72,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* read 64-bit timer counter */
|
/* read 64-bit timer counter */
|
||||||
static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs)
|
static u64 notrace sirfsoc_timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
u64 cycles;
|
u64 cycles;
|
||||||
|
|
||||||
|
|
|
@ -152,7 +152,7 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
|
static u64 sun5i_clksrc_read(struct clocksource *clksrc)
|
||||||
{
|
{
|
||||||
struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
|
struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
|
||||||
|
|
||||||
|
|
|
@ -65,11 +65,11 @@ static inline struct ti_32k *to_ti_32k(struct clocksource *cs)
|
||||||
return container_of(cs, struct ti_32k, cs);
|
return container_of(cs, struct ti_32k, cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs)
|
static u64 notrace ti_32k_read_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
struct ti_32k *ti = to_ti_32k(cs);
|
struct ti_32k *ti = to_ti_32k(cs);
|
||||||
|
|
||||||
return (cycle_t)readl_relaxed(ti->counter);
|
return (u64)readl_relaxed(ti->counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ti_32k ti_32k_timer = {
|
static struct ti_32k ti_32k_timer = {
|
||||||
|
|
|
@ -53,7 +53,7 @@
|
||||||
|
|
||||||
static void __iomem *regbase;
|
static void __iomem *regbase;
|
||||||
|
|
||||||
static cycle_t vt8500_timer_read(struct clocksource *cs)
|
static u64 vt8500_timer_read(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
int loops = msecs_to_loops(10);
|
int loops = msecs_to_loops(10);
|
||||||
writel(3, regbase + TIMER_CTRL_VAL);
|
writel(3, regbase + TIMER_CTRL_VAL);
|
||||||
|
@ -75,7 +75,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
|
||||||
struct clock_event_device *evt)
|
struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
int loops = msecs_to_loops(10);
|
int loops = msecs_to_loops(10);
|
||||||
cycle_t alarm = clocksource.read(&clocksource) + cycles;
|
u64 alarm = clocksource.read(&clocksource) + cycles;
|
||||||
while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
|
while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE)
|
||||||
&& --loops)
|
&& --loops)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
|
@ -135,9 +135,9 @@ u64 hv_do_hypercall(u64 control, void *input, void *output)
|
||||||
EXPORT_SYMBOL_GPL(hv_do_hypercall);
|
EXPORT_SYMBOL_GPL(hv_do_hypercall);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
static cycle_t read_hv_clock_tsc(struct clocksource *arg)
|
static u64 read_hv_clock_tsc(struct clocksource *arg)
|
||||||
{
|
{
|
||||||
cycle_t current_tick;
|
u64 current_tick;
|
||||||
struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
|
struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
|
||||||
|
|
||||||
if (tsc_pg->tsc_sequence != 0) {
|
if (tsc_pg->tsc_sequence != 0) {
|
||||||
|
@ -146,7 +146,7 @@ static cycle_t read_hv_clock_tsc(struct clocksource *arg)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
cycle_t tmp;
|
u64 tmp;
|
||||||
u32 sequence = tsc_pg->tsc_sequence;
|
u32 sequence = tsc_pg->tsc_sequence;
|
||||||
u64 cur_tsc;
|
u64 cur_tsc;
|
||||||
u64 scale = tsc_pg->tsc_scale;
|
u64 scale = tsc_pg->tsc_scale;
|
||||||
|
@ -350,7 +350,7 @@ int hv_post_message(union hv_connection_id connection_id,
|
||||||
static int hv_ce_set_next_event(unsigned long delta,
|
static int hv_ce_set_next_event(unsigned long delta,
|
||||||
struct clock_event_device *evt)
|
struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
cycle_t current_tick;
|
u64 current_tick;
|
||||||
|
|
||||||
WARN_ON(!clockevent_state_oneshot(evt));
|
WARN_ON(!clockevent_state_oneshot(evt));
|
||||||
|
|
||||||
|
|
|
@ -152,12 +152,12 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CLKSRC_MIPS_GIC
|
#ifdef CONFIG_CLKSRC_MIPS_GIC
|
||||||
cycle_t gic_read_count(void)
|
u64 gic_read_count(void)
|
||||||
{
|
{
|
||||||
unsigned int hi, hi2, lo;
|
unsigned int hi, hi2, lo;
|
||||||
|
|
||||||
if (mips_cm_is64)
|
if (mips_cm_is64)
|
||||||
return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
|
return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
|
||||||
|
|
||||||
do {
|
do {
|
||||||
hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
|
hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
|
||||||
|
@ -165,7 +165,7 @@ cycle_t gic_read_count(void)
|
||||||
hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
|
hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
|
||||||
} while (hi2 != hi);
|
} while (hi2 != hi);
|
||||||
|
|
||||||
return (((cycle_t) hi) << 32) + lo;
|
return (((u64) hi) << 32) + lo;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int gic_get_count_width(void)
|
unsigned int gic_get_count_width(void)
|
||||||
|
@ -179,7 +179,7 @@ unsigned int gic_get_count_width(void)
|
||||||
return bits;
|
return bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gic_write_compare(cycle_t cnt)
|
void gic_write_compare(u64 cnt)
|
||||||
{
|
{
|
||||||
if (mips_cm_is64) {
|
if (mips_cm_is64) {
|
||||||
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
|
gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
|
||||||
|
@ -191,7 +191,7 @@ void gic_write_compare(cycle_t cnt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void gic_write_cpu_compare(cycle_t cnt, int cpu)
|
void gic_write_cpu_compare(u64 cnt, int cpu)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -211,17 +211,17 @@ void gic_write_cpu_compare(cycle_t cnt, int cpu)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
cycle_t gic_read_compare(void)
|
u64 gic_read_compare(void)
|
||||||
{
|
{
|
||||||
unsigned int hi, lo;
|
unsigned int hi, lo;
|
||||||
|
|
||||||
if (mips_cm_is64)
|
if (mips_cm_is64)
|
||||||
return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
|
return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
|
||||||
|
|
||||||
hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
|
hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
|
||||||
lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
|
lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
|
||||||
|
|
||||||
return (((cycle_t) hi) << 32) + lo;
|
return (((u64) hi) << 32) + lo;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gic_start_count(void)
|
void gic_start_count(void)
|
||||||
|
|
|
@ -122,7 +122,7 @@
|
||||||
#include "xgbe.h"
|
#include "xgbe.h"
|
||||||
#include "xgbe-common.h"
|
#include "xgbe-common.h"
|
||||||
|
|
||||||
static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
|
static u64 xgbe_cc_read(const struct cyclecounter *cc)
|
||||||
{
|
{
|
||||||
struct xgbe_prv_data *pdata = container_of(cc,
|
struct xgbe_prv_data *pdata = container_of(cc,
|
||||||
struct xgbe_prv_data,
|
struct xgbe_prv_data,
|
||||||
|
|
|
@ -15223,7 +15223,7 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Read the PHC */
|
/* Read the PHC */
|
||||||
static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
|
static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
|
||||||
{
|
{
|
||||||
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
|
struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
|
||||||
int port = BP_PORT(bp);
|
int port = BP_PORT(bp);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue