2005-06-24 13:01:16 +08:00
|
|
|
/*
|
|
|
|
* arch/xtensa/kernel/time.c
|
|
|
|
*
|
|
|
|
* Timer and clock support.
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 Tensilica Inc.
|
|
|
|
*
|
|
|
|
* Chris Zankel <chris@zankel.net>
|
|
|
|
*/
|
|
|
|
|
2016-09-21 02:11:08 +08:00
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/clk-provider.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
#include <linux/errno.h>
|
2009-10-07 21:09:06 +08:00
|
|
|
#include <linux/sched.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
#include <linux/time.h>
|
2009-03-05 04:39:12 +08:00
|
|
|
#include <linux/clocksource.h>
|
2013-06-18 13:48:53 +08:00
|
|
|
#include <linux/clockchips.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/profile.h>
|
|
|
|
#include <linux/delay.h>
|
2012-11-04 04:29:12 +08:00
|
|
|
#include <linux/irqdomain.h>
|
2013-06-17 16:29:46 +08:00
|
|
|
#include <linux/sched_clock.h>
|
2005-06-24 13:01:16 +08:00
|
|
|
|
|
|
|
#include <asm/timex.h>
|
|
|
|
#include <asm/platform.h>
|
|
|
|
|
2013-06-17 16:29:43 +08:00
|
|
|
unsigned long ccount_freq; /* ccount Hz */
|
2014-01-20 00:00:48 +08:00
|
|
|
EXPORT_SYMBOL(ccount_freq);
|
2005-06-24 13:01:16 +08:00
|
|
|
|
2016-12-22 03:32:01 +08:00
|
|
|
static u64 ccount_read(struct clocksource *cs)
|
2009-03-05 04:39:12 +08:00
|
|
|
{
|
2016-12-22 03:32:01 +08:00
|
|
|
return (u64)get_ccount();
|
2009-03-05 04:39:12 +08:00
|
|
|
}
|
|
|
|
|
2013-12-13 17:43:58 +08:00
|
|
|
static u64 notrace ccount_sched_clock_read(void)
|
2013-06-17 16:29:46 +08:00
|
|
|
{
|
|
|
|
return get_ccount();
|
|
|
|
}
|
|
|
|
|
2009-03-05 04:39:12 +08:00
|
|
|
static struct clocksource ccount_clocksource = {
|
|
|
|
.name = "ccount",
|
|
|
|
.rating = 200,
|
|
|
|
.read = ccount_read,
|
|
|
|
.mask = CLOCKSOURCE_MASK(32),
|
2013-10-17 06:42:18 +08:00
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
2009-03-05 04:39:12 +08:00
|
|
|
};
|
|
|
|
|
2013-10-17 06:42:19 +08:00
|
|
|
struct ccount_timer {
|
2013-06-18 13:48:53 +08:00
|
|
|
struct clock_event_device evt;
|
|
|
|
int irq_enabled;
|
2013-10-17 06:42:19 +08:00
|
|
|
char name[24];
|
2013-06-18 13:48:53 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int ccount_timer_set_next_event(unsigned long delta,
|
|
|
|
struct clock_event_device *dev)
|
|
|
|
{
|
|
|
|
unsigned long flags, next;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
next = get_ccount() + delta;
|
|
|
|
set_linux_timer(next);
|
|
|
|
if (next - get_ccount() > delta)
|
|
|
|
ret = -ETIME;
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-16 19:26:33 +08:00
|
|
|
/*
|
|
|
|
* There is no way to disable the timer interrupt at the device level,
|
|
|
|
* only at the intenable register itself. Since enable_irq/disable_irq
|
|
|
|
* calls are nested, we need to make sure that these calls are
|
|
|
|
* balanced.
|
|
|
|
*/
|
|
|
|
static int ccount_timer_shutdown(struct clock_event_device *evt)
|
|
|
|
{
|
|
|
|
struct ccount_timer *timer =
|
|
|
|
container_of(evt, struct ccount_timer, evt);
|
|
|
|
|
|
|
|
if (timer->irq_enabled) {
|
2018-01-30 01:09:41 +08:00
|
|
|
disable_irq_nosync(evt->irq);
|
2015-07-16 19:26:33 +08:00
|
|
|
timer->irq_enabled = 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ccount_timer_set_oneshot(struct clock_event_device *evt)
|
2013-06-18 13:48:53 +08:00
|
|
|
{
|
2013-10-17 06:42:19 +08:00
|
|
|
struct ccount_timer *timer =
|
|
|
|
container_of(evt, struct ccount_timer, evt);
|
2013-06-18 13:48:53 +08:00
|
|
|
|
2015-07-16 19:26:33 +08:00
|
|
|
if (!timer->irq_enabled) {
|
|
|
|
enable_irq(evt->irq);
|
|
|
|
timer->irq_enabled = 1;
|
2013-06-18 13:48:53 +08:00
|
|
|
}
|
2015-07-16 19:26:33 +08:00
|
|
|
return 0;
|
2013-06-18 13:48:53 +08:00
|
|
|
}
|
|
|
|
|
2019-01-25 07:09:21 +08:00
|
|
|
static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = {
|
|
|
|
.evt = {
|
|
|
|
.features = CLOCK_EVT_FEAT_ONESHOT,
|
|
|
|
.rating = 300,
|
|
|
|
.set_next_event = ccount_timer_set_next_event,
|
|
|
|
.set_state_shutdown = ccount_timer_shutdown,
|
|
|
|
.set_state_oneshot = ccount_timer_set_oneshot,
|
|
|
|
.tick_resume = ccount_timer_set_oneshot,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
|
|
|
|
|
|
|
|
set_linux_timer(get_linux_timer());
|
|
|
|
evt->event_handler(evt);
|
|
|
|
|
|
|
|
/* Allow platform to do something useful (Wdog). */
|
|
|
|
platform_heartbeat();
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
static struct irqaction timer_irqaction = {
|
|
|
|
.handler = timer_interrupt,
|
2013-06-18 13:48:53 +08:00
|
|
|
.flags = IRQF_TIMER,
|
2005-06-24 13:01:16 +08:00
|
|
|
.name = "timer",
|
|
|
|
};
|
|
|
|
|
2013-10-17 06:42:19 +08:00
|
|
|
void local_timer_setup(unsigned cpu)
|
|
|
|
{
|
|
|
|
struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
|
|
|
|
struct clock_event_device *clockevent = &timer->evt;
|
|
|
|
|
|
|
|
timer->irq_enabled = 1;
|
|
|
|
snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
|
2019-01-25 07:09:21 +08:00
|
|
|
clockevent->name = timer->name;
|
2013-10-17 06:42:19 +08:00
|
|
|
clockevent->cpumask = cpumask_of(cpu);
|
|
|
|
clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
|
|
|
|
if (WARN(!clockevent->irq, "error: can't map timer irq"))
|
|
|
|
return;
|
|
|
|
clockevents_config_and_register(clockevent, ccount_freq,
|
|
|
|
0xf, 0xffffffff);
|
|
|
|
}
|
|
|
|
|
2016-09-21 02:11:08 +08:00
|
|
|
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
|
|
|
|
#ifdef CONFIG_OF
|
|
|
|
static void __init calibrate_ccount(void)
|
|
|
|
{
|
|
|
|
struct device_node *cpu;
|
|
|
|
struct clk *clk;
|
|
|
|
|
|
|
|
cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu");
|
|
|
|
if (cpu) {
|
|
|
|
clk = of_clk_get(cpu, 0);
|
|
|
|
if (!IS_ERR(clk)) {
|
|
|
|
ccount_freq = clk_get_rate(clk);
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
pr_warn("%s: CPU input clock not found\n",
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pr_warn("%s: CPU node not found in the device tree\n",
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
platform_calibrate_ccount();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void calibrate_ccount(void)
|
|
|
|
{
|
|
|
|
platform_calibrate_ccount();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2005-06-24 13:01:16 +08:00
|
|
|
void __init time_init(void)
|
|
|
|
{
|
2016-09-21 02:11:08 +08:00
|
|
|
of_clk_init(NULL);
|
2005-09-23 12:44:23 +08:00
|
|
|
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
|
2016-11-05 05:45:08 +08:00
|
|
|
pr_info("Calibrating CPU frequency ");
|
2016-09-21 02:11:08 +08:00
|
|
|
calibrate_ccount();
|
2016-11-05 05:45:08 +08:00
|
|
|
pr_cont("%d.%02d MHz\n",
|
|
|
|
(int)ccount_freq / 1000000,
|
|
|
|
(int)(ccount_freq / 10000) % 100);
|
2013-07-15 12:03:38 +08:00
|
|
|
#else
|
|
|
|
ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
|
2005-06-24 13:01:16 +08:00
|
|
|
#endif
|
2016-09-21 02:11:08 +08:00
|
|
|
WARN(!ccount_freq,
|
|
|
|
"%s: CPU clock frequency is not set up correctly\n",
|
|
|
|
__func__);
|
2013-07-15 13:24:22 +08:00
|
|
|
clocksource_register_hz(&ccount_clocksource, ccount_freq);
|
2013-10-17 06:42:19 +08:00
|
|
|
local_timer_setup(0);
|
|
|
|
setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
|
2013-12-13 17:43:58 +08:00
|
|
|
sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
|
2017-05-26 23:40:46 +08:00
|
|
|
timer_probe();
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
|
2013-06-19 05:54:49 +08:00
|
|
|
void calibrate_delay(void)
|
2005-06-24 13:01:16 +08:00
|
|
|
{
|
2013-07-15 13:24:22 +08:00
|
|
|
loops_per_jiffy = ccount_freq / HZ;
|
2016-11-05 05:45:08 +08:00
|
|
|
pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
|
|
|
|
loops_per_jiffy / (1000000 / HZ),
|
|
|
|
(loops_per_jiffy / (10000 / HZ)) % 100);
|
2005-06-24 13:01:16 +08:00
|
|
|
}
|
|
|
|
#endif
|