arch/tile: Use separate, better minsec values for clocksource and sched_clock.

We were using the same 5-sec minsec for the clocksource and sched_clock
that we were using for the clock_event_device.  For the clock_event_device
that's exactly right since it has a short maximum countdown time.
But for sched_clock we want to avoid wraparound when converting from
ticks to nsec over a much longer window, so we force a shift of 10.
And for clocksource it seems dodgy to use a 5-sec minsec as well, so we
copy some other platforms and force a shift of 22.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
Chris Metcalf 2010-08-13 08:24:22 -04:00
parent bc63de7c5b
commit 749dc6f252
1 changed files with 19 additions and 14 deletions

View File

@ -36,16 +36,6 @@
/* How many cycles per second we are running at. */ /* How many cycles per second we are running at. */
static cycles_t cycles_per_sec __write_once; static cycles_t cycles_per_sec __write_once;
/*
* We set up shift and multiply values with a minsec of five seconds,
* since our timer counter counts down 31 bits at a frequency of
* no less than 500 MHz. See @minsec for clocks_calc_mult_shift().
* We could use a different value for the 64-bit free-running
* cycle counter, but we use the same one for consistency, and since
* we will be reasonably precise with this value anyway.
*/
#define TILE_MINSEC 5
cycles_t get_clock_rate(void) cycles_t get_clock_rate(void)
{ {
return cycles_per_sec; return cycles_per_sec;
@ -68,6 +58,14 @@ cycles_t get_cycles(void)
} }
#endif #endif
/*
* We use a relatively small shift value so that sched_clock()
* won't wrap around very often.
*/
#define SCHED_CLOCK_SHIFT 10
static unsigned long sched_clock_mult __write_once;
static cycles_t clocksource_get_cycles(struct clocksource *cs) static cycles_t clocksource_get_cycles(struct clocksource *cs)
{ {
return get_cycles(); return get_cycles();
@ -78,6 +76,7 @@ static struct clocksource cycle_counter_cs = {
.rating = 300, .rating = 300,
.read = clocksource_get_cycles, .read = clocksource_get_cycles,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.shift = 22, /* typical value, e.g. x86 tsc uses this */
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
@ -88,8 +87,10 @@ static struct clocksource cycle_counter_cs = {
void __init setup_clock(void) void __init setup_clock(void)
{ {
cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED);
clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, sched_clock_mult =
TILE_MINSEC); clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT);
cycle_counter_cs.mult =
clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift);
} }
void __init calibrate_delay(void) void __init calibrate_delay(void)
@ -117,9 +118,14 @@ void __init time_init(void)
* counter, plus bit 31, which signifies that the counter has wrapped * counter, plus bit 31, which signifies that the counter has wrapped
* from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be
* raised as long as bit 31 is set. * raised as long as bit 31 is set.
*
* The TILE_MINSEC value represents the largest range of real-time
* we can possibly cover with the timer, based on MAX_TICK combined
* with the slowest reasonable clock rate we might run at.
*/ */
#define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */
#define TILE_MINSEC 5 /* timer covers no more than 5 seconds */
static int tile_timer_set_next_event(unsigned long ticks, static int tile_timer_set_next_event(unsigned long ticks,
struct clock_event_device *evt) struct clock_event_device *evt)
@ -211,8 +217,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
unsigned long long sched_clock(void) unsigned long long sched_clock(void)
{ {
return clocksource_cyc2ns(get_cycles(), return clocksource_cyc2ns(get_cycles(),
cycle_counter_cs.mult, sched_clock_mult, SCHED_CLOCK_SHIFT);
cycle_counter_cs.shift);
} }
int setup_profiling_timer(unsigned int multiplier) int setup_profiling_timer(unsigned int multiplier)