2007-07-21 23:10:01 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2006 Andi Kleen, SUSE Labs.
|
|
|
|
* Subject to the GNU Public License, v.2
|
|
|
|
*
|
2011-05-23 21:31:30 +08:00
|
|
|
* Fast user context implementation of clock_gettime, gettimeofday, and time.
|
2007-07-21 23:10:01 +08:00
|
|
|
*
|
2014-03-18 06:22:09 +08:00
|
|
|
* 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
|
|
|
|
* sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
|
|
|
|
*
|
2007-07-21 23:10:01 +08:00
|
|
|
* The code should have no internal unresolved relocations.
|
|
|
|
* Check with readelf after changing.
|
|
|
|
*/
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
#include <uapi/linux/time.h>
|
2007-07-21 23:10:01 +08:00
|
|
|
#include <asm/vgtod.h>
|
|
|
|
#include <asm/hpet.h>
|
2014-03-18 06:22:10 +08:00
|
|
|
#include <asm/vvar.h>
|
2007-07-21 23:10:01 +08:00
|
|
|
#include <asm/unistd.h>
|
2014-03-18 06:22:10 +08:00
|
|
|
#include <asm/msr.h>
|
|
|
|
#include <linux/math64.h>
|
|
|
|
#include <linux/time.h>
|
2007-07-21 23:10:01 +08:00
|
|
|
|
2011-05-23 21:31:24 +08:00
|
|
|
#define gtod (&VVAR(vsyscall_gtod_data))
|
2007-07-21 23:10:01 +08:00
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
|
|
|
|
extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
|
|
|
|
extern time_t __vdso_time(time_t *t);
|
|
|
|
|
2014-03-18 06:22:10 +08:00
|
|
|
#ifdef CONFIG_HPET_TIMER
|
2014-05-06 03:19:36 +08:00
|
|
|
extern u8 hpet_page
|
|
|
|
__attribute__((visibility("hidden")));
|
|
|
|
|
|
|
|
static notrace cycle_t vread_hpet(void)
|
2014-03-18 06:22:10 +08:00
|
|
|
{
|
2014-05-06 03:19:36 +08:00
|
|
|
return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
|
2014-03-18 06:22:10 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
#ifndef BUILD_VDSO32
|
|
|
|
|
2014-03-18 06:22:10 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <asm/vsyscall.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
#include <asm/pvclock.h>
|
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
asm("syscall" : "=a" (ret) :
|
|
|
|
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
|
|
|
|
return ret;
|
2011-07-14 18:47:22 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
2011-07-14 18:47:22 +08:00
|
|
|
{
|
2014-03-18 06:22:03 +08:00
|
|
|
long ret;
|
|
|
|
|
|
|
|
asm("syscall" : "=a" (ret) :
|
|
|
|
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
|
|
|
return ret;
|
2011-07-14 18:47:22 +08:00
|
|
|
}
|
|
|
|
|
2012-11-28 09:28:57 +08:00
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
|
|
|
|
|
|
|
static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
|
|
|
|
{
|
|
|
|
const struct pvclock_vsyscall_time_info *pvti_base;
|
|
|
|
int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
|
|
|
|
int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
|
|
|
|
|
|
|
|
BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
|
|
|
|
|
|
|
|
pvti_base = (struct pvclock_vsyscall_time_info *)
|
|
|
|
__fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
|
|
|
|
|
|
|
|
return &pvti_base[offset];
|
|
|
|
}
|
|
|
|
|
|
|
|
static notrace cycle_t vread_pvclock(int *mode)
|
|
|
|
{
|
|
|
|
const struct pvclock_vsyscall_time_info *pvti;
|
|
|
|
cycle_t ret;
|
|
|
|
u64 last;
|
|
|
|
u32 version;
|
|
|
|
u8 flags;
|
|
|
|
unsigned cpu, cpu1;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2015-04-23 19:20:18 +08:00
|
|
|
* Note: hypervisor must guarantee that:
|
|
|
|
* 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
|
|
|
|
* 2. that per-CPU pvclock time info is updated if the
|
|
|
|
* underlying CPU changes.
|
|
|
|
* 3. that version is increased whenever underlying CPU
|
|
|
|
* changes.
|
|
|
|
*
|
2012-11-28 09:28:57 +08:00
|
|
|
*/
|
|
|
|
do {
|
|
|
|
cpu = __getcpu() & VGETCPU_CPU_MASK;
|
|
|
|
/* TODO: We can put vcpu id into higher bits of pvti.version.
|
|
|
|
* This will save a couple of cycles by getting rid of
|
|
|
|
* __getcpu() calls (Gleb).
|
|
|
|
*/
|
|
|
|
|
2015-04-23 19:20:18 +08:00
|
|
|
pvti = get_pvti(cpu);
|
2015-03-24 07:21:51 +08:00
|
|
|
|
2012-11-28 09:28:57 +08:00
|
|
|
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test we're still on the cpu as well as the version.
|
2015-04-23 19:20:18 +08:00
|
|
|
* We could have been migrated just after the first
|
|
|
|
* vgetcpu but before fetching the version, so we
|
|
|
|
* wouldn't notice a version change.
|
2012-11-28 09:28:57 +08:00
|
|
|
*/
|
2015-04-23 19:20:18 +08:00
|
|
|
cpu1 = __getcpu() & VGETCPU_CPU_MASK;
|
|
|
|
} while (unlikely(cpu != cpu1 ||
|
|
|
|
(pvti->pvti.version & 1) ||
|
|
|
|
pvti->pvti.version != version));
|
2012-11-28 09:28:57 +08:00
|
|
|
|
|
|
|
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
|
|
|
|
*mode = VCLOCK_NONE;
|
|
|
|
|
|
|
|
/* refer to tsc.c read_tsc() comment for rationale */
|
2014-03-18 06:22:10 +08:00
|
|
|
last = gtod->cycle_last;
|
2012-11-28 09:28:57 +08:00
|
|
|
|
|
|
|
if (likely(ret >= last))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
#else
|
|
|
|
|
|
|
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
|
|
|
|
asm(
|
|
|
|
"mov %%ebx, %%edx \n"
|
|
|
|
"mov %2, %%ebx \n"
|
x86, vdso: Reimplement vdso.so preparation in build-time C
Currently, vdso.so files are prepared and analyzed by a combination
of objcopy, nm, some linker script tricks, and some simple ELF
parsers in the kernel. Replace all of that with plain C code that
runs at build time.
All five vdso images now generate .c files that are compiled and
linked in to the kernel image.
This should cause only one userspace-visible change: the loaded vDSO
images are stripped more heavily than they used to be. Everything
outside the loadable segment is dropped. In particular, this causes
the section table and section name strings to be missing. This
should be fine: real dynamic loaders don't load or inspect these
tables anyway. The result is roughly equivalent to eu-strip's
--strip-sections option.
The purpose of this change is to enable the vvar and hpet mappings
to be moved to the page following the vDSO load segment. Currently,
it is possible for the section table to extend into the page after
the load segment, so, if we map it, it risks overlapping the vvar or
hpet page. This happens whenever the load segment is just under a
multiple of PAGE_SIZE.
The only real subtlety here is that the old code had a C file with
inline assembler that did 'call VDSO32_vsyscall' and a linker script
that defined 'VDSO32_vsyscall = __kernel_vsyscall'. This most
likely worked by accident: the linker script entry defines a symbol
associated with an address as opposed to an alias for the real
dynamic symbol __kernel_vsyscall. That caused ld to relocate the
reference at link time instead of leaving an interposable dynamic
relocation. Since the VDSO32_vsyscall hack is no longer needed, I
now use 'call __kernel_vsyscall', and I added -Bsymbolic to make it
work. vdso2c will generate an error and abort the build if the
resulting image contains any dynamic relocations, so we won't
silently generate bad vdso images.
(Dynamic relocations are a problem because nothing will even attempt
to relocate the vdso.)
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/2c4fcf45524162a34d87fdda1eb046b2a5cecee7.1399317206.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-05-06 03:19:34 +08:00
|
|
|
"call __kernel_vsyscall \n"
|
2014-03-18 06:22:09 +08:00
|
|
|
"mov %%edx, %%ebx \n"
|
|
|
|
: "=a" (ret)
|
|
|
|
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
|
|
|
|
: "memory", "edx");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
|
|
|
|
asm(
|
|
|
|
"mov %%ebx, %%edx \n"
|
|
|
|
"mov %2, %%ebx \n"
|
x86, vdso: Reimplement vdso.so preparation in build-time C
Currently, vdso.so files are prepared and analyzed by a combination
of objcopy, nm, some linker script tricks, and some simple ELF
parsers in the kernel. Replace all of that with plain C code that
runs at build time.
All five vdso images now generate .c files that are compiled and
linked in to the kernel image.
This should cause only one userspace-visible change: the loaded vDSO
images are stripped more heavily than they used to be. Everything
outside the loadable segment is dropped. In particular, this causes
the section table and section name strings to be missing. This
should be fine: real dynamic loaders don't load or inspect these
tables anyway. The result is roughly equivalent to eu-strip's
--strip-sections option.
The purpose of this change is to enable the vvar and hpet mappings
to be moved to the page following the vDSO load segment. Currently,
it is possible for the section table to extend into the page after
the load segment, so, if we map it, it risks overlapping the vvar or
hpet page. This happens whenever the load segment is just under a
multiple of PAGE_SIZE.
The only real subtlety here is that the old code had a C file with
inline assembler that did 'call VDSO32_vsyscall' and a linker script
that defined 'VDSO32_vsyscall = __kernel_vsyscall'. This most
likely worked by accident: the linker script entry defines a symbol
associated with an address as opposed to an alias for the real
dynamic symbol __kernel_vsyscall. That caused ld to relocate the
reference at link time instead of leaving an interposable dynamic
relocation. Since the VDSO32_vsyscall hack is no longer needed, I
now use 'call __kernel_vsyscall', and I added -Bsymbolic to make it
work. vdso2c will generate an error and abort the build if the
resulting image contains any dynamic relocations, so we won't
silently generate bad vdso images.
(Dynamic relocations are a problem because nothing will even attempt
to relocate the vdso.)
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/2c4fcf45524162a34d87fdda1eb046b2a5cecee7.1399317206.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-05-06 03:19:34 +08:00
|
|
|
"call __kernel_vsyscall \n"
|
2014-03-18 06:22:09 +08:00
|
|
|
"mov %%edx, %%ebx \n"
|
|
|
|
: "=a" (ret)
|
|
|
|
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
|
|
|
|
: "memory", "edx");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
|
|
|
|
|
|
|
static notrace cycle_t vread_pvclock(int *mode)
|
|
|
|
{
|
|
|
|
*mode = VCLOCK_NONE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
notrace static cycle_t vread_tsc(void)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2014-03-18 06:22:03 +08:00
|
|
|
cycle_t ret;
|
|
|
|
u64 last;
|
2007-07-21 23:10:01 +08:00
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
/*
|
|
|
|
* Empirically, a fence (of type that depends on the CPU)
|
|
|
|
* before rdtsc is enough to ensure that rdtsc is ordered
|
|
|
|
* with respect to loads. The various CPU manuals are unclear
|
|
|
|
* as to whether rdtsc can be reordered with later loads,
|
|
|
|
* but no one has ever seen it happen.
|
|
|
|
*/
|
|
|
|
rdtsc_barrier();
|
2014-03-18 06:22:09 +08:00
|
|
|
ret = (cycle_t)__native_read_tsc();
|
2012-03-02 14:11:09 +08:00
|
|
|
|
2014-03-18 06:22:10 +08:00
|
|
|
last = gtod->cycle_last;
|
2012-03-02 14:11:09 +08:00
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
if (likely(ret >= last))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GCC likes to generate cmov here, but this branch is extremely
|
|
|
|
* predictable (it's just a funciton of time and the likely is
|
|
|
|
* very likely) and there's a data dependence, so force GCC
|
|
|
|
* to generate a branch instead. I don't barrier() because
|
|
|
|
* we don't actually need a barrier, and if this function
|
|
|
|
* ever gets inlined it will generate worse code.
|
|
|
|
*/
|
|
|
|
asm volatile ("");
|
|
|
|
return last;
|
|
|
|
}
|
2012-03-02 14:11:09 +08:00
|
|
|
|
2012-11-28 09:28:57 +08:00
|
|
|
notrace static inline u64 vgetsns(int *mode)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2014-03-18 06:22:09 +08:00
|
|
|
u64 v;
|
2011-07-14 18:47:22 +08:00
|
|
|
cycles_t cycles;
|
2014-03-18 06:22:10 +08:00
|
|
|
|
|
|
|
if (gtod->vclock_mode == VCLOCK_TSC)
|
2011-07-14 18:47:22 +08:00
|
|
|
cycles = vread_tsc();
|
2014-03-18 06:22:09 +08:00
|
|
|
#ifdef CONFIG_HPET_TIMER
|
2014-03-18 06:22:10 +08:00
|
|
|
else if (gtod->vclock_mode == VCLOCK_HPET)
|
2011-07-14 18:47:22 +08:00
|
|
|
cycles = vread_hpet();
|
2014-03-18 06:22:09 +08:00
|
|
|
#endif
|
2012-11-28 09:28:57 +08:00
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
2014-03-18 06:22:10 +08:00
|
|
|
else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
|
2012-11-28 09:28:57 +08:00
|
|
|
cycles = vread_pvclock(mode);
|
|
|
|
#endif
|
2012-03-02 14:11:09 +08:00
|
|
|
else
|
|
|
|
return 0;
|
2014-03-18 06:22:10 +08:00
|
|
|
v = (cycles - gtod->cycle_last) & gtod->mask;
|
|
|
|
return v * gtod->mult;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
|
2012-03-23 12:15:52 +08:00
|
|
|
/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
|
|
|
|
notrace static int __always_inline do_realtime(struct timespec *ts)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2012-09-05 04:14:46 +08:00
|
|
|
unsigned long seq;
|
|
|
|
u64 ns;
|
2012-03-02 14:11:09 +08:00
|
|
|
int mode;
|
|
|
|
|
2007-07-21 23:10:01 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
mode = gtod->vclock_mode;
|
2007-07-21 23:10:01 +08:00
|
|
|
ts->tv_sec = gtod->wall_time_sec;
|
2012-09-05 04:14:46 +08:00
|
|
|
ns = gtod->wall_time_snsec;
|
2012-11-28 09:28:57 +08:00
|
|
|
ns += vgetsns(&mode);
|
2014-03-18 06:22:10 +08:00
|
|
|
ns >>= gtod->shift;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
|
|
|
|
|
|
|
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
|
|
|
|
ts->tv_nsec = ns;
|
2012-03-02 14:11:09 +08:00
|
|
|
|
|
|
|
return mode;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
notrace static int __always_inline do_monotonic(struct timespec *ts)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2012-09-05 04:14:46 +08:00
|
|
|
unsigned long seq;
|
|
|
|
u64 ns;
|
2012-03-02 14:11:09 +08:00
|
|
|
int mode;
|
|
|
|
|
2007-07-21 23:10:01 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
mode = gtod->vclock_mode;
|
2012-03-23 12:15:51 +08:00
|
|
|
ts->tv_sec = gtod->monotonic_time_sec;
|
2012-09-05 04:14:46 +08:00
|
|
|
ns = gtod->monotonic_time_snsec;
|
2012-11-28 09:28:57 +08:00
|
|
|
ns += vgetsns(&mode);
|
2014-03-18 06:22:10 +08:00
|
|
|
ns >>= gtod->shift;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
|
|
|
|
|
|
|
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
|
|
|
|
ts->tv_nsec = ns;
|
2011-05-23 21:31:27 +08:00
|
|
|
|
2012-03-02 14:11:09 +08:00
|
|
|
return mode;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:04 +08:00
|
|
|
notrace static void do_realtime_coarse(struct timespec *ts)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
|
|
|
unsigned long seq;
|
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
ts->tv_sec = gtod->wall_time_coarse_sec;
|
|
|
|
ts->tv_nsec = gtod->wall_time_coarse_nsec;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
2009-08-20 10:13:34 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:04 +08:00
|
|
|
notrace static void do_monotonic_coarse(struct timespec *ts)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
2012-03-23 12:15:51 +08:00
|
|
|
unsigned long seq;
|
2009-08-20 10:13:34 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
ts->tv_sec = gtod->monotonic_time_coarse_sec;
|
|
|
|
ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
2009-08-20 10:13:34 +08:00
|
|
|
}
|
|
|
|
|
2008-05-13 03:20:41 +08:00
|
|
|
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2011-06-06 01:50:20 +08:00
|
|
|
switch (clock) {
|
|
|
|
case CLOCK_REALTIME:
|
2014-03-18 06:22:04 +08:00
|
|
|
if (do_realtime(ts) == VCLOCK_NONE)
|
|
|
|
goto fallback;
|
2011-06-06 01:50:20 +08:00
|
|
|
break;
|
|
|
|
case CLOCK_MONOTONIC:
|
2014-03-18 06:22:04 +08:00
|
|
|
if (do_monotonic(ts) == VCLOCK_NONE)
|
|
|
|
goto fallback;
|
2011-06-06 01:50:20 +08:00
|
|
|
break;
|
|
|
|
case CLOCK_REALTIME_COARSE:
|
2014-03-18 06:22:04 +08:00
|
|
|
do_realtime_coarse(ts);
|
|
|
|
break;
|
2011-06-06 01:50:20 +08:00
|
|
|
case CLOCK_MONOTONIC_COARSE:
|
2014-03-18 06:22:04 +08:00
|
|
|
do_monotonic_coarse(ts);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto fallback;
|
2011-06-06 01:50:20 +08:00
|
|
|
}
|
|
|
|
|
2012-03-02 14:11:09 +08:00
|
|
|
return 0;
|
2014-03-18 06:22:04 +08:00
|
|
|
fallback:
|
|
|
|
return vdso_fallback_gettime(clock, ts);
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
int clock_gettime(clockid_t, struct timespec *)
|
|
|
|
__attribute__((weak, alias("__vdso_clock_gettime")));
|
|
|
|
|
2008-05-13 03:20:41 +08:00
|
|
|
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2012-03-02 14:11:09 +08:00
|
|
|
if (likely(tv != NULL)) {
|
2014-03-18 06:22:06 +08:00
|
|
|
if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
|
|
|
|
return vdso_fallback_gtod(tv, tz);
|
2012-03-02 14:11:09 +08:00
|
|
|
tv->tv_usec /= 1000;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
2012-03-02 14:11:09 +08:00
|
|
|
if (unlikely(tz != NULL)) {
|
2014-03-18 06:22:10 +08:00
|
|
|
tz->tz_minuteswest = gtod->tz_minuteswest;
|
|
|
|
tz->tz_dsttime = gtod->tz_dsttime;
|
2012-03-02 14:11:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
int gettimeofday(struct timeval *, struct timezone *)
|
|
|
|
__attribute__((weak, alias("__vdso_gettimeofday")));
|
2011-05-23 21:31:30 +08:00
|
|
|
|
2011-06-06 01:50:20 +08:00
|
|
|
/*
|
|
|
|
* This will break when the xtime seconds get inaccurate, but that is
|
|
|
|
* unlikely
|
|
|
|
*/
|
2011-05-23 21:31:30 +08:00
|
|
|
notrace time_t __vdso_time(time_t *t)
|
|
|
|
{
|
2014-03-18 06:22:09 +08:00
|
|
|
/* This is atomic on x86 so we don't need any locks. */
|
2014-03-18 06:22:05 +08:00
|
|
|
time_t result = ACCESS_ONCE(gtod->wall_time_sec);
|
2011-05-23 21:31:30 +08:00
|
|
|
|
|
|
|
if (t)
|
|
|
|
*t = result;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
int time(time_t *t)
|
|
|
|
__attribute__((weak, alias("__vdso_time")));
|