linux/arch/um/kernel/time.c

136 lines
2.8 KiB
C
Raw Normal View History

/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/interrupt.h"
#include "linux/jiffies.h"
#include "linux/threads.h"
#include "asm/irq.h"
#include "asm/param.h"
#include "kern_util.h"
#include "os.h"
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies_64 * (1000000000 / HZ);
}
#ifdef CONFIG_UML_REAL_TIME_CLOCK
static unsigned long long prev_nsecs[NR_CPUS];
static long long delta[NR_CPUS]; /* Deviation per interval */
#endif
void timer_irq(struct uml_pt_regs *regs)
{
unsigned long long ticks = 0;
#ifdef CONFIG_UML_REAL_TIME_CLOCK
int c = cpu();
if (prev_nsecs[c]) {
/* We've had 1 tick */
unsigned long long nsecs = os_nsecs();
delta[c] += nsecs - prev_nsecs[c];
prev_nsecs[c] = nsecs;
/* Protect against the host clock being set backwards */
if (delta[c] < 0)
delta[c] = 0;
ticks += (delta[c] * HZ) / BILLION;
delta[c] -= (ticks * BILLION) / HZ;
}
else prev_nsecs[c] = os_nsecs();
#else
ticks = 1;
#endif
while (ticks > 0) {
do_IRQ(TIMER_IRQ, regs);
ticks--;
}
}
/* Protects local_offset */
static DEFINE_SPINLOCK(timer_spinlock);
static unsigned long long local_offset = 0;
static inline unsigned long long get_time(void)
{
unsigned long long nsecs;
unsigned long flags;
spin_lock_irqsave(&timer_spinlock, flags);
nsecs = os_nsecs();
nsecs += local_offset;
spin_unlock_irqrestore(&timer_spinlock, flags);
return nsecs;
}
irqreturn_t um_timer(int irq, void *dev)
{
unsigned long long nsecs;
unsigned long flags;
write_seqlock_irqsave(&xtime_lock, flags);
[PATCH] simplify update_times (avoid jiffies/jiffies_64 aliasing problem) Pass ticks to do_timer() and update_times(), and adjust x86_64 and s390 timer interrupt handler with this change. Currently update_times() calculates ticks by "jiffies - wall_jiffies", but callers of do_timer() should know how many ticks to update. Passing ticks get rid of this redundant calculation. Also there are another redundancy pointed out by Martin Schwidefsky. This cleanup make a barrier added by 5aee405c662ca644980c184774277fc6d0769a84 needless. So this patch removes it. As a bonus, this cleanup make wall_jiffies can be removed easily, since now wall_jiffies is always synced with jiffies. (This patch does not really remove wall_jiffies. It would be another cleanup patch) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: john stultz <johnstul@us.ibm.com> Cc: Andi Kleen <ak@muc.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Acked-by: Russell King <rmk@arm.linux.org.uk> Cc: Ian Molton <spyro@f2s.com> Cc: Mikael Starvik <starvik@axis.com> Acked-by: David Howells <dhowells@redhat.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Hirokazu Takata <takata.hirokazu@renesas.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Kazumoto Kojima <kkojima@rr.iij4u.or.jp> Cc: Richard Curnow <rc@rc0.org.uk> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Jeff Dike <jdike@addtoit.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Miles Bader <uclinux-v850@lsi.nec.co.jp> Cc: Chris Zankel <chris@zankel.net> Acked-by: "Luck, Tony" <tony.luck@intel.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-29 17:00:32 +08:00
do_timer(1);
#ifdef CONFIG_UML_REAL_TIME_CLOCK
nsecs = get_time();
#else
nsecs = (unsigned long long) xtime.tv_sec * BILLION + xtime.tv_nsec +
BILLION / HZ;
#endif
xtime.tv_sec = nsecs / NSEC_PER_SEC;
xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC;
write_sequnlock_irqrestore(&xtime_lock, flags);
return IRQ_HANDLED;
}
static void register_timer(void)
{
int err;
timer_init();
err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL);
if (err != 0)
printk(KERN_ERR "register_timer : request_irq failed - "
"errno = %d\n", -err);
err = set_interval();
if (err != 0)
printk(KERN_ERR "register_timer : set_interval failed - "
"errno = %d\n", -err);
}
extern void (*late_time_init)(void);
void time_init(void)
{
long long nsecs;
nsecs = os_nsecs();
set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
-nsecs % BILLION);
set_normalized_timespec(&xtime, nsecs / BILLION, nsecs % BILLION);
late_time_init = register_timer;
}
void timer_handler(int sig, struct uml_pt_regs *regs)
{
if (current_thread->cpu == 0)
timer_irq(regs);
local_irq_disable();
irq_enter();
update_process_times(regs->is_user);
irq_exit();
local_irq_enable();
}