2009-07-03 02:06:47 +08:00
|
|
|
/*
|
2010-09-13 20:40:04 +08:00
|
|
|
* linux/arch/arm/plat-nomadik/timer.c
|
2009-07-03 02:06:47 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2008 STMicroelectronics
|
2010-03-05 19:38:51 +08:00
|
|
|
* Copyright (C) 2010 Alessandro Rubini
|
2010-11-19 17:16:05 +08:00
|
|
|
* Copyright (C) 2010 Linus Walleij for ST-Ericsson
|
2009-07-03 02:06:47 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/clockchips.h>
|
2010-05-26 14:38:54 +08:00
|
|
|
#include <linux/clk.h>
|
2009-07-03 02:06:47 +08:00
|
|
|
#include <linux/jiffies.h>
|
2010-05-26 14:38:54 +08:00
|
|
|
#include <linux/err.h>
|
2010-11-19 17:16:05 +08:00
|
|
|
#include <linux/cnt32_to_63.h>
|
|
|
|
#include <linux/timer.h>
|
2009-07-03 02:06:47 +08:00
|
|
|
#include <asm/mach/time.h>
|
|
|
|
|
2009-11-12 13:20:54 +08:00
|
|
|
#include <plat/mtu.h>
|
2009-07-03 02:06:47 +08:00
|
|
|
|
2010-11-19 17:16:05 +08:00
|
|
|
void __iomem *mtu_base; /* Assigned by machine code */
|
2009-11-12 13:20:54 +08:00
|
|
|
|
2010-05-07 17:03:02 +08:00
|
|
|
/*
|
|
|
|
* Kernel assumes that sched_clock can be called early
|
|
|
|
* but the MTU may not yet be initialized.
|
|
|
|
*/
|
|
|
|
static cycle_t nmdk_read_timer_dummy(struct clocksource *cs)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
/* clocksource: MTU decrements, so we negate the value being read. */
|
2009-07-03 02:06:47 +08:00
|
|
|
static cycle_t nmdk_read_timer(struct clocksource *cs)
|
|
|
|
{
|
2010-03-05 19:38:51 +08:00
|
|
|
return -readl(mtu_base + MTU_VAL(0));
|
2009-07-03 02:06:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct clocksource nmdk_clksrc = {
|
|
|
|
.name = "mtu_0",
|
2010-03-05 19:38:51 +08:00
|
|
|
.rating = 200,
|
2010-05-07 17:03:02 +08:00
|
|
|
.read = nmdk_read_timer_dummy,
|
2010-03-05 19:38:51 +08:00
|
|
|
.mask = CLOCKSOURCE_MASK(32),
|
2009-07-03 02:06:47 +08:00
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
|
|
};
|
|
|
|
|
2010-05-07 17:03:02 +08:00
|
|
|
/*
|
|
|
|
* Override the global weak sched_clock symbol with this
|
|
|
|
* local implementation which uses the clocksource to get some
|
2010-11-19 17:16:05 +08:00
|
|
|
* better resolution when scheduling the kernel.
|
|
|
|
*
|
|
|
|
* Because the hardware timer period may be quite short
|
|
|
|
* (32.3 secs on the 133 MHz MTU timer selection on ux500)
|
|
|
|
* and because cnt32_to_63() needs to be called at least once per
|
|
|
|
* half period to work properly, a kernel keepwarm() timer is set up
|
|
|
|
* to ensure this requirement is always met.
|
|
|
|
*
|
|
|
|
* Also the sched_clock timer will wrap around at some point,
|
|
|
|
* here we set it to run continously for a year.
|
2010-05-07 17:03:02 +08:00
|
|
|
*/
|
2010-11-19 17:16:05 +08:00
|
|
|
#define SCHED_CLOCK_MIN_WRAP 3600*24*365
|
|
|
|
static struct timer_list cnt32_to_63_keepwarm_timer;
|
|
|
|
static u32 sched_mult;
|
|
|
|
static u32 sched_shift;
|
|
|
|
|
2010-05-07 17:03:02 +08:00
|
|
|
unsigned long long notrace sched_clock(void)
|
|
|
|
{
|
2010-11-19 17:16:05 +08:00
|
|
|
u64 cycles;
|
|
|
|
|
|
|
|
if (unlikely(!mtu_base))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
|
|
|
|
/*
|
|
|
|
* sched_mult is guaranteed to be even so will
|
|
|
|
* shift out bit 63
|
|
|
|
*/
|
|
|
|
return (cycles * sched_mult) >> sched_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Just kick sched_clock every so often */
|
|
|
|
static void cnt32_to_63_keepwarm(unsigned long data)
|
|
|
|
{
|
|
|
|
mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
|
|
|
|
(void) sched_clock();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
|
|
|
|
* once in half a 32bit timer wrap interval.
|
|
|
|
*/
|
|
|
|
static void __init nmdk_sched_clock_init(unsigned long rate)
|
|
|
|
{
|
|
|
|
u32 v;
|
|
|
|
unsigned long delta;
|
|
|
|
u64 days;
|
|
|
|
|
|
|
|
/* Find the apropriate mult and shift factors */
|
|
|
|
clocks_calc_mult_shift(&sched_mult, &sched_shift,
|
|
|
|
rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
|
|
|
|
/* We need to multiply by an even number to get rid of bit 63 */
|
|
|
|
if (sched_mult & 1)
|
|
|
|
sched_mult++;
|
|
|
|
|
|
|
|
/* Let's see what we get, take max counter and scale it */
|
|
|
|
days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
|
|
|
|
do_div(days, NSEC_PER_SEC);
|
|
|
|
do_div(days, (3600*24));
|
|
|
|
|
|
|
|
pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
|
|
|
|
(64 - sched_shift), rate, (unsigned long) days);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Program a timer to kick us at half 32bit wraparound
|
|
|
|
* Formula: seconds per wrap = (2^32) / f
|
|
|
|
*/
|
|
|
|
v = 0xFFFFFFFFUL / rate;
|
|
|
|
/* We want half of the wrap time to keep cnt32_to_63 warm */
|
|
|
|
v /= 2;
|
|
|
|
pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
|
|
|
|
"initialize keepwarm timer every %d seconds\n", rate, v);
|
|
|
|
/* Convert seconds to jiffies */
|
|
|
|
delta = msecs_to_jiffies(v*1000);
|
|
|
|
setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
|
|
|
|
mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
|
2010-05-07 17:03:02 +08:00
|
|
|
}
|
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
/* Clockevent device: use one-shot mode */
|
2009-07-03 02:06:47 +08:00
|
|
|
static void nmdk_clkevt_mode(enum clock_event_mode mode,
|
|
|
|
struct clock_event_device *dev)
|
|
|
|
{
|
2010-03-05 19:38:51 +08:00
|
|
|
u32 cr;
|
|
|
|
|
2009-07-03 02:06:47 +08:00
|
|
|
switch (mode) {
|
|
|
|
case CLOCK_EVT_MODE_PERIODIC:
|
2010-03-05 19:38:51 +08:00
|
|
|
pr_err("%s: periodic mode not supported\n", __func__);
|
2009-07-03 02:06:47 +08:00
|
|
|
break;
|
|
|
|
case CLOCK_EVT_MODE_ONESHOT:
|
2010-03-05 19:38:51 +08:00
|
|
|
/* Load highest value, enable device, enable interrupts */
|
|
|
|
cr = readl(mtu_base + MTU_CR(1));
|
|
|
|
writel(0, mtu_base + MTU_LR(1));
|
|
|
|
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
|
2010-09-13 20:40:04 +08:00
|
|
|
writel(1 << 1, mtu_base + MTU_IMSC);
|
2010-03-05 19:38:51 +08:00
|
|
|
break;
|
2009-07-03 02:06:47 +08:00
|
|
|
case CLOCK_EVT_MODE_SHUTDOWN:
|
|
|
|
case CLOCK_EVT_MODE_UNUSED:
|
2010-03-05 19:38:51 +08:00
|
|
|
/* disable irq */
|
|
|
|
writel(0, mtu_base + MTU_IMSC);
|
2010-06-01 15:26:49 +08:00
|
|
|
/* disable timer */
|
|
|
|
cr = readl(mtu_base + MTU_CR(1));
|
|
|
|
cr &= ~MTU_CRn_ENA;
|
|
|
|
writel(cr, mtu_base + MTU_CR(1));
|
|
|
|
/* load some high default value */
|
|
|
|
writel(0xffffffff, mtu_base + MTU_LR(1));
|
2009-07-03 02:06:47 +08:00
|
|
|
break;
|
|
|
|
case CLOCK_EVT_MODE_RESUME:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
|
|
|
|
{
|
|
|
|
/* writing the value has immediate effect */
|
|
|
|
writel(evt, mtu_base + MTU_LR(1));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-03 02:06:47 +08:00
|
|
|
static struct clock_event_device nmdk_clkevt = {
|
2010-03-05 19:38:51 +08:00
|
|
|
.name = "mtu_1",
|
|
|
|
.features = CLOCK_EVT_FEAT_ONESHOT,
|
|
|
|
.rating = 200,
|
2009-07-03 02:06:47 +08:00
|
|
|
.set_mode = nmdk_clkevt_mode,
|
2010-03-05 19:38:51 +08:00
|
|
|
.set_next_event = nmdk_clkevt_next,
|
2009-07-03 02:06:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2010-03-05 19:38:51 +08:00
|
|
|
* IRQ Handler for timer 1 of the MTU block.
|
2009-07-03 02:06:47 +08:00
|
|
|
*/
|
|
|
|
static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
2010-03-05 19:38:51 +08:00
|
|
|
struct clock_event_device *evdev = dev_id;
|
2009-07-03 02:06:47 +08:00
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
|
|
|
|
evdev->event_handler(evdev);
|
2009-07-03 02:06:47 +08:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irqaction nmdk_timer_irq = {
|
|
|
|
.name = "Nomadik Timer Tick",
|
|
|
|
.flags = IRQF_DISABLED | IRQF_TIMER,
|
|
|
|
.handler = nmdk_timer_interrupt,
|
2010-03-05 19:38:51 +08:00
|
|
|
.dev_id = &nmdk_clkevt,
|
2009-07-03 02:06:47 +08:00
|
|
|
};
|
|
|
|
|
2009-11-12 13:20:54 +08:00
|
|
|
void __init nmdk_timer_init(void)
|
2009-07-03 02:06:47 +08:00
|
|
|
{
|
|
|
|
unsigned long rate;
|
2010-05-26 14:38:54 +08:00
|
|
|
struct clk *clk0;
|
2010-09-13 20:40:04 +08:00
|
|
|
u32 cr = MTU_CRn_32BITS;
|
2010-05-26 14:38:54 +08:00
|
|
|
|
|
|
|
clk0 = clk_get_sys("mtu0", NULL);
|
|
|
|
BUG_ON(IS_ERR(clk0));
|
|
|
|
|
|
|
|
clk_enable(clk0);
|
2010-03-05 19:38:51 +08:00
|
|
|
|
|
|
|
/*
|
2010-09-13 20:40:04 +08:00
|
|
|
* Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
|
|
|
|
* for ux500.
|
|
|
|
* Use a divide-by-16 counter if the tick rate is more than 32MHz.
|
|
|
|
* At 32 MHz, the timer (with 32 bit counter) can be programmed
|
|
|
|
* to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
|
|
|
|
* with 16 gives too low timer resolution.
|
2010-03-05 19:38:51 +08:00
|
|
|
*/
|
2010-05-26 14:38:54 +08:00
|
|
|
rate = clk_get_rate(clk0);
|
2010-09-13 20:40:04 +08:00
|
|
|
if (rate > 32000000) {
|
2010-03-05 19:38:51 +08:00
|
|
|
rate /= 16;
|
|
|
|
cr |= MTU_CRn_PRESCALE_16;
|
|
|
|
} else {
|
|
|
|
cr |= MTU_CRn_PRESCALE_1;
|
|
|
|
}
|
2010-06-01 15:26:49 +08:00
|
|
|
clocksource_calc_mult_shift(&nmdk_clksrc, rate, MTU_MIN_RANGE);
|
2009-07-03 02:06:47 +08:00
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
/* Timer 0 is the free running clocksource */
|
|
|
|
writel(cr, mtu_base + MTU_CR(0));
|
|
|
|
writel(0, mtu_base + MTU_LR(0));
|
|
|
|
writel(0, mtu_base + MTU_BGLR(0));
|
|
|
|
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
|
2009-07-03 02:06:47 +08:00
|
|
|
|
2010-11-19 17:16:05 +08:00
|
|
|
/* Now the clock source is ready */
|
2010-05-07 17:03:02 +08:00
|
|
|
nmdk_clksrc.read = nmdk_read_timer;
|
2009-07-03 02:06:47 +08:00
|
|
|
|
2009-11-12 13:20:54 +08:00
|
|
|
if (clocksource_register(&nmdk_clksrc))
|
2010-03-05 19:38:51 +08:00
|
|
|
pr_err("timer: failed to initialize clock source %s\n",
|
|
|
|
nmdk_clksrc.name);
|
|
|
|
|
2010-11-19 17:16:05 +08:00
|
|
|
nmdk_sched_clock_init(rate);
|
|
|
|
|
2010-09-13 20:38:55 +08:00
|
|
|
/* Timer 1 is used for events */
|
|
|
|
|
2010-06-01 15:26:49 +08:00
|
|
|
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
|
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
|
2010-06-01 15:26:49 +08:00
|
|
|
|
2010-03-05 19:38:51 +08:00
|
|
|
nmdk_clkevt.max_delta_ns =
|
|
|
|
clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
|
|
|
|
nmdk_clkevt.min_delta_ns =
|
|
|
|
clockevent_delta2ns(0x00000002, &nmdk_clkevt);
|
|
|
|
nmdk_clkevt.cpumask = cpumask_of(0);
|
2009-07-03 02:06:47 +08:00
|
|
|
|
|
|
|
/* Register irq and clockevents */
|
|
|
|
setup_irq(IRQ_MTU0, &nmdk_timer_irq);
|
|
|
|
clockevents_register_device(&nmdk_clkevt);
|
|
|
|
}
|