2007-07-21 23:10:01 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2006 Andi Kleen, SUSE Labs.
|
|
|
|
* Subject to the GNU Public License, v.2
|
|
|
|
*
|
2011-05-23 21:31:30 +08:00
|
|
|
* Fast user context implementation of clock_gettime, gettimeofday, and time.
|
2007-07-21 23:10:01 +08:00
|
|
|
*
|
2014-03-18 06:22:09 +08:00
|
|
|
* 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
|
|
|
|
* sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
|
|
|
|
*
|
2007-07-21 23:10:01 +08:00
|
|
|
* The code should have no internal unresolved relocations.
|
|
|
|
* Check with readelf after changing.
|
|
|
|
*/
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
#include <uapi/linux/time.h>
|
2007-07-21 23:10:01 +08:00
|
|
|
#include <asm/vgtod.h>
|
2014-03-18 06:22:10 +08:00
|
|
|
#include <asm/vvar.h>
|
2007-07-21 23:10:01 +08:00
|
|
|
#include <asm/unistd.h>
|
2014-03-18 06:22:10 +08:00
|
|
|
#include <asm/msr.h>
|
2015-12-11 11:20:22 +08:00
|
|
|
#include <asm/pvclock.h>
|
2017-03-03 21:21:42 +08:00
|
|
|
#include <asm/mshyperv.h>
|
2014-03-18 06:22:10 +08:00
|
|
|
#include <linux/math64.h>
|
|
|
|
#include <linux/time.h>
|
2015-12-11 11:20:22 +08:00
|
|
|
#include <linux/kernel.h>
|
2007-07-21 23:10:01 +08:00
|
|
|
|
2011-05-23 21:31:24 +08:00
|
|
|
#define gtod (&VVAR(vsyscall_gtod_data))
|
2007-07-21 23:10:01 +08:00
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
|
|
|
|
extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
|
|
|
|
extern time_t __vdso_time(time_t *t);
|
|
|
|
|
2015-12-11 11:20:20 +08:00
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
|
|
|
extern u8 pvclock_page
|
|
|
|
__attribute__((visibility("hidden")));
|
|
|
|
#endif
|
|
|
|
|
2017-03-03 21:21:42 +08:00
|
|
|
#ifdef CONFIG_HYPERV_TSCPAGE
|
|
|
|
extern u8 hvclock_page
|
|
|
|
__attribute__((visibility("hidden")));
|
|
|
|
#endif
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
#ifndef BUILD_VDSO32
|
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
|
|
{
|
|
|
|
long ret;
|
x86/vdso: Fix asm constraints on vDSO syscall fallbacks
The syscall fallbacks in the vDSO have incorrect asm constraints.
They are not marked as writing to their outputs -- instead, they are
marked as clobbering "memory", which is useless. In particular, gcc
is smart enough to know that the timespec parameter hasn't escaped,
so a memory clobber doesn't clobber it. And passing a pointer as an
asm *input* does not tell gcc that the pointed-to value is changed.
Add in the fact that the asm instructions weren't volatile, and gcc
was free to omit them entirely unless their sole output (the return
value) is used. Which it is (phew!), but that stops happening with
some upcoming patches.
As a trivial example, the following code:
void test_fallback(struct timespec *ts)
{
vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
}
compiles to:
00000000000000c0 <test_fallback>:
c0: c3 retq
To add insult to injury, the RCX and R11 clobbers on 64-bit
builds were missing.
The "memory" clobber is also unnecessary -- no ordering with respect to
other memory operations is needed, but that's going to be fixed in a
separate not-for-stable patch.
Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
2018-10-02 03:52:15 +08:00
|
|
|
asm ("syscall" : "=a" (ret), "=m" (*ts) :
|
|
|
|
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
|
|
|
|
"memory", "rcx", "r11");
|
2014-03-18 06:22:03 +08:00
|
|
|
return ret;
|
2011-07-14 18:47:22 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
2011-07-14 18:47:22 +08:00
|
|
|
{
|
2014-03-18 06:22:03 +08:00
|
|
|
long ret;
|
|
|
|
|
x86/vdso: Fix asm constraints on vDSO syscall fallbacks
The syscall fallbacks in the vDSO have incorrect asm constraints.
They are not marked as writing to their outputs -- instead, they are
marked as clobbering "memory", which is useless. In particular, gcc
is smart enough to know that the timespec parameter hasn't escaped,
so a memory clobber doesn't clobber it. And passing a pointer as an
asm *input* does not tell gcc that the pointed-to value is changed.
Add in the fact that the asm instructions weren't volatile, and gcc
was free to omit them entirely unless their sole output (the return
value) is used. Which it is (phew!), but that stops happening with
some upcoming patches.
As a trivial example, the following code:
void test_fallback(struct timespec *ts)
{
vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
}
compiles to:
00000000000000c0 <test_fallback>:
c0: c3 retq
To add insult to injury, the RCX and R11 clobbers on 64-bit
builds were missing.
The "memory" clobber is also unnecessary -- no ordering with respect to
other memory operations is needed, but that's going to be fixed in a
separate not-for-stable patch.
Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
2018-10-02 03:52:15 +08:00
|
|
|
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
|
|
|
|
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
|
|
|
|
"memory", "rcx", "r11");
|
2014-03-18 06:22:03 +08:00
|
|
|
return ret;
|
2011-07-14 18:47:22 +08:00
|
|
|
}
|
|
|
|
|
2012-11-28 09:28:57 +08:00
|
|
|
|
2015-12-11 11:20:22 +08:00
|
|
|
#else
|
|
|
|
|
|
|
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
|
x86/vdso: Fix asm constraints on vDSO syscall fallbacks
The syscall fallbacks in the vDSO have incorrect asm constraints.
They are not marked as writing to their outputs -- instead, they are
marked as clobbering "memory", which is useless. In particular, gcc
is smart enough to know that the timespec parameter hasn't escaped,
so a memory clobber doesn't clobber it. And passing a pointer as an
asm *input* does not tell gcc that the pointed-to value is changed.
Add in the fact that the asm instructions weren't volatile, and gcc
was free to omit them entirely unless their sole output (the return
value) is used. Which it is (phew!), but that stops happening with
some upcoming patches.
As a trivial example, the following code:
void test_fallback(struct timespec *ts)
{
vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
}
compiles to:
00000000000000c0 <test_fallback>:
c0: c3 retq
To add insult to injury, the RCX and R11 clobbers on 64-bit
builds were missing.
The "memory" clobber is also unnecessary -- no ordering with respect to
other memory operations is needed, but that's going to be fixed in a
separate not-for-stable patch.
Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
2018-10-02 03:52:15 +08:00
|
|
|
asm (
|
2015-12-11 11:20:22 +08:00
|
|
|
"mov %%ebx, %%edx \n"
|
2018-10-04 07:23:49 +08:00
|
|
|
"mov %[clock], %%ebx \n"
|
2015-12-11 11:20:22 +08:00
|
|
|
"call __kernel_vsyscall \n"
|
|
|
|
"mov %%edx, %%ebx \n"
|
x86/vdso: Fix asm constraints on vDSO syscall fallbacks
The syscall fallbacks in the vDSO have incorrect asm constraints.
They are not marked as writing to their outputs -- instead, they are
marked as clobbering "memory", which is useless. In particular, gcc
is smart enough to know that the timespec parameter hasn't escaped,
so a memory clobber doesn't clobber it. And passing a pointer as an
asm *input* does not tell gcc that the pointed-to value is changed.
Add in the fact that the asm instructions weren't volatile, and gcc
was free to omit them entirely unless their sole output (the return
value) is used. Which it is (phew!), but that stops happening with
some upcoming patches.
As a trivial example, the following code:
void test_fallback(struct timespec *ts)
{
vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
}
compiles to:
00000000000000c0 <test_fallback>:
c0: c3 retq
To add insult to injury, the RCX and R11 clobbers on 64-bit
builds were missing.
The "memory" clobber is also unnecessary -- no ordering with respect to
other memory operations is needed, but that's going to be fixed in a
separate not-for-stable patch.
Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
2018-10-02 03:52:15 +08:00
|
|
|
: "=a" (ret), "=m" (*ts)
|
2018-10-04 07:23:49 +08:00
|
|
|
: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
|
2015-12-11 11:20:22 +08:00
|
|
|
: "memory", "edx");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|
|
|
{
|
|
|
|
long ret;
|
|
|
|
|
x86/vdso: Fix asm constraints on vDSO syscall fallbacks
The syscall fallbacks in the vDSO have incorrect asm constraints.
They are not marked as writing to their outputs -- instead, they are
marked as clobbering "memory", which is useless. In particular, gcc
is smart enough to know that the timespec parameter hasn't escaped,
so a memory clobber doesn't clobber it. And passing a pointer as an
asm *input* does not tell gcc that the pointed-to value is changed.
Add in the fact that the asm instructions weren't volatile, and gcc
was free to omit them entirely unless their sole output (the return
value) is used. Which it is (phew!), but that stops happening with
some upcoming patches.
As a trivial example, the following code:
void test_fallback(struct timespec *ts)
{
vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
}
compiles to:
00000000000000c0 <test_fallback>:
c0: c3 retq
To add insult to injury, the RCX and R11 clobbers on 64-bit
builds were missing.
The "memory" clobber is also unnecessary -- no ordering with respect to
other memory operations is needed, but that's going to be fixed in a
separate not-for-stable patch.
Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
2018-10-02 03:52:15 +08:00
|
|
|
asm (
|
2015-12-11 11:20:22 +08:00
|
|
|
"mov %%ebx, %%edx \n"
|
2018-10-04 07:23:49 +08:00
|
|
|
"mov %[tv], %%ebx \n"
|
2015-12-11 11:20:22 +08:00
|
|
|
"call __kernel_vsyscall \n"
|
|
|
|
"mov %%edx, %%ebx \n"
|
x86/vdso: Fix asm constraints on vDSO syscall fallbacks
The syscall fallbacks in the vDSO have incorrect asm constraints.
They are not marked as writing to their outputs -- instead, they are
marked as clobbering "memory", which is useless. In particular, gcc
is smart enough to know that the timespec parameter hasn't escaped,
so a memory clobber doesn't clobber it. And passing a pointer as an
asm *input* does not tell gcc that the pointed-to value is changed.
Add in the fact that the asm instructions weren't volatile, and gcc
was free to omit them entirely unless their sole output (the return
value) is used. Which it is (phew!), but that stops happening with
some upcoming patches.
As a trivial example, the following code:
void test_fallback(struct timespec *ts)
{
vdso_fallback_gettime(CLOCK_MONOTONIC, ts);
}
compiles to:
00000000000000c0 <test_fallback>:
c0: c3 retq
To add insult to injury, the RCX and R11 clobbers on 64-bit
builds were missing.
The "memory" clobber is also unnecessary -- no ordering with respect to
other memory operations is needed, but that's going to be fixed in a
separate not-for-stable patch.
Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/2c0231690551989d2fafa60ed0e7b5cc8b403908.1538422295.git.luto@kernel.org
2018-10-02 03:52:15 +08:00
|
|
|
: "=a" (ret), "=m" (*tv), "=m" (*tz)
|
2018-10-04 07:23:49 +08:00
|
|
|
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
|
2015-12-11 11:20:22 +08:00
|
|
|
: "memory", "edx");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
2015-12-11 11:20:20 +08:00
|
|
|
static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
|
2012-11-28 09:28:57 +08:00
|
|
|
{
|
2015-12-11 11:20:20 +08:00
|
|
|
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
|
2012-11-28 09:28:57 +08:00
|
|
|
}
|
|
|
|
|
2016-12-22 03:32:01 +08:00
|
|
|
static notrace u64 vread_pvclock(int *mode)
|
2012-11-28 09:28:57 +08:00
|
|
|
{
|
2015-12-11 11:20:20 +08:00
|
|
|
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
|
2016-12-22 03:32:01 +08:00
|
|
|
u64 ret;
|
2016-06-10 02:12:34 +08:00
|
|
|
u64 last;
|
|
|
|
u32 version;
|
2012-11-28 09:28:57 +08:00
|
|
|
|
|
|
|
/*
|
2015-12-11 11:20:19 +08:00
|
|
|
* Note: The kernel and hypervisor must guarantee that cpu ID
|
|
|
|
* number maps 1:1 to per-CPU pvclock time info.
|
|
|
|
*
|
|
|
|
* Because the hypervisor is entirely unaware of guest userspace
|
|
|
|
* preemption, it cannot guarantee that per-CPU pvclock time
|
|
|
|
* info is updated if the underlying CPU changes or that that
|
|
|
|
* version is increased whenever underlying CPU changes.
|
|
|
|
*
|
|
|
|
* On KVM, we are guaranteed that pvti updates for any vCPU are
|
|
|
|
* atomic as seen by *all* vCPUs. This is an even stronger
|
|
|
|
* guarantee than we get with a normal seqlock.
|
2015-04-23 19:20:18 +08:00
|
|
|
*
|
2015-12-11 11:20:19 +08:00
|
|
|
* On Xen, we don't appear to have that guarantee, but Xen still
|
|
|
|
* supplies a valid seqlock using the version field.
|
2016-01-05 07:14:28 +08:00
|
|
|
*
|
2015-12-11 11:20:19 +08:00
|
|
|
* We only do pvclock vdso timing at all if
|
|
|
|
* PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
|
|
|
|
* mean that all vCPUs have matching pvti and that the TSC is
|
|
|
|
* synced, so we can just look at vCPU 0's pvti.
|
2012-11-28 09:28:57 +08:00
|
|
|
*/
|
2015-12-11 11:20:19 +08:00
|
|
|
|
|
|
|
do {
|
2016-06-09 19:06:08 +08:00
|
|
|
version = pvclock_read_begin(pvti);
|
2015-12-11 11:20:19 +08:00
|
|
|
|
2016-01-05 07:14:28 +08:00
|
|
|
if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
|
|
|
|
*mode = VCLOCK_NONE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-01 20:21:03 +08:00
|
|
|
ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
|
2016-06-09 19:06:08 +08:00
|
|
|
} while (pvclock_read_retry(pvti, version));
|
2015-12-11 11:20:19 +08:00
|
|
|
|
2015-12-11 11:20:22 +08:00
|
|
|
/* refer to vread_tsc() comment for rationale */
|
2014-03-18 06:22:10 +08:00
|
|
|
last = gtod->cycle_last;
|
2012-11-28 09:28:57 +08:00
|
|
|
|
|
|
|
if (likely(ret >= last))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
#endif
|
2017-03-03 21:21:42 +08:00
|
|
|
#ifdef CONFIG_HYPERV_TSCPAGE
|
|
|
|
static notrace u64 vread_hvclock(int *mode)
|
|
|
|
{
|
|
|
|
const struct ms_hyperv_tsc_page *tsc_pg =
|
|
|
|
(const struct ms_hyperv_tsc_page *)&hvclock_page;
|
|
|
|
u64 current_tick = hv_read_tsc_page(tsc_pg);
|
|
|
|
|
|
|
|
if (current_tick != U64_MAX)
|
|
|
|
return current_tick;
|
|
|
|
|
|
|
|
*mode = VCLOCK_NONE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2012-11-28 09:28:57 +08:00
|
|
|
|
2016-12-22 03:32:01 +08:00
|
|
|
notrace static u64 vread_tsc(void)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2016-12-22 03:32:01 +08:00
|
|
|
u64 ret = (u64)rdtsc_ordered();
|
2015-06-26 00:44:08 +08:00
|
|
|
u64 last = gtod->cycle_last;
|
2012-03-02 14:11:09 +08:00
|
|
|
|
2014-03-18 06:22:03 +08:00
|
|
|
if (likely(ret >= last))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GCC likes to generate cmov here, but this branch is extremely
|
2016-02-24 07:34:30 +08:00
|
|
|
* predictable (it's just a function of time and the likely is
|
2014-03-18 06:22:03 +08:00
|
|
|
* very likely) and there's a data dependence, so force GCC
|
|
|
|
* to generate a branch instead. I don't barrier() because
|
|
|
|
* we don't actually need a barrier, and if this function
|
|
|
|
* ever gets inlined it will generate worse code.
|
|
|
|
*/
|
|
|
|
asm volatile ("");
|
|
|
|
return last;
|
|
|
|
}
|
2012-03-02 14:11:09 +08:00
|
|
|
|
2012-11-28 09:28:57 +08:00
|
|
|
notrace static inline u64 vgetsns(int *mode)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2014-03-18 06:22:09 +08:00
|
|
|
u64 v;
|
2011-07-14 18:47:22 +08:00
|
|
|
cycles_t cycles;
|
2014-03-18 06:22:10 +08:00
|
|
|
|
|
|
|
if (gtod->vclock_mode == VCLOCK_TSC)
|
2011-07-14 18:47:22 +08:00
|
|
|
cycles = vread_tsc();
|
2012-11-28 09:28:57 +08:00
|
|
|
#ifdef CONFIG_PARAVIRT_CLOCK
|
2014-03-18 06:22:10 +08:00
|
|
|
else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
|
2012-11-28 09:28:57 +08:00
|
|
|
cycles = vread_pvclock(mode);
|
2017-03-03 21:21:42 +08:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_HYPERV_TSCPAGE
|
|
|
|
else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
|
|
|
|
cycles = vread_hvclock(mode);
|
2012-11-28 09:28:57 +08:00
|
|
|
#endif
|
2012-03-02 14:11:09 +08:00
|
|
|
else
|
|
|
|
return 0;
|
2018-09-17 20:45:36 +08:00
|
|
|
v = cycles - gtod->cycle_last;
|
2014-03-18 06:22:10 +08:00
|
|
|
return v * gtod->mult;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
|
2012-03-23 12:15:52 +08:00
|
|
|
/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
|
|
|
|
notrace static int __always_inline do_realtime(struct timespec *ts)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2018-09-17 20:45:37 +08:00
|
|
|
unsigned int seq;
|
2012-09-05 04:14:46 +08:00
|
|
|
u64 ns;
|
2012-03-02 14:11:09 +08:00
|
|
|
int mode;
|
|
|
|
|
2007-07-21 23:10:01 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
mode = gtod->vclock_mode;
|
2007-07-21 23:10:01 +08:00
|
|
|
ts->tv_sec = gtod->wall_time_sec;
|
2012-09-05 04:14:46 +08:00
|
|
|
ns = gtod->wall_time_snsec;
|
2012-11-28 09:28:57 +08:00
|
|
|
ns += vgetsns(&mode);
|
2014-03-18 06:22:10 +08:00
|
|
|
ns >>= gtod->shift;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
|
|
|
|
|
|
|
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
|
|
|
|
ts->tv_nsec = ns;
|
2012-03-02 14:11:09 +08:00
|
|
|
|
|
|
|
return mode;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:09 +08:00
|
|
|
notrace static int __always_inline do_monotonic(struct timespec *ts)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2018-09-17 20:45:37 +08:00
|
|
|
unsigned int seq;
|
2012-09-05 04:14:46 +08:00
|
|
|
u64 ns;
|
2012-03-02 14:11:09 +08:00
|
|
|
int mode;
|
|
|
|
|
2007-07-21 23:10:01 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
mode = gtod->vclock_mode;
|
2012-03-23 12:15:51 +08:00
|
|
|
ts->tv_sec = gtod->monotonic_time_sec;
|
2012-09-05 04:14:46 +08:00
|
|
|
ns = gtod->monotonic_time_snsec;
|
2012-11-28 09:28:57 +08:00
|
|
|
ns += vgetsns(&mode);
|
2014-03-18 06:22:10 +08:00
|
|
|
ns >>= gtod->shift;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
|
|
|
|
|
|
|
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
|
|
|
|
ts->tv_nsec = ns;
|
2011-05-23 21:31:27 +08:00
|
|
|
|
2012-03-02 14:11:09 +08:00
|
|
|
return mode;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:04 +08:00
|
|
|
notrace static void do_realtime_coarse(struct timespec *ts)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
2018-09-17 20:45:37 +08:00
|
|
|
unsigned int seq;
|
2009-08-20 10:13:34 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
ts->tv_sec = gtod->wall_time_coarse_sec;
|
|
|
|
ts->tv_nsec = gtod->wall_time_coarse_nsec;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
2009-08-20 10:13:34 +08:00
|
|
|
}
|
|
|
|
|
2014-03-18 06:22:04 +08:00
|
|
|
notrace static void do_monotonic_coarse(struct timespec *ts)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
2018-09-17 20:45:37 +08:00
|
|
|
unsigned int seq;
|
2009-08-20 10:13:34 +08:00
|
|
|
do {
|
2014-03-18 06:22:10 +08:00
|
|
|
seq = gtod_read_begin(gtod);
|
|
|
|
ts->tv_sec = gtod->monotonic_time_coarse_sec;
|
|
|
|
ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
|
|
|
|
} while (unlikely(gtod_read_retry(gtod, seq)));
|
2009-08-20 10:13:34 +08:00
|
|
|
}
|
|
|
|
|
2008-05-13 03:20:41 +08:00
|
|
|
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2011-06-06 01:50:20 +08:00
|
|
|
switch (clock) {
|
|
|
|
case CLOCK_REALTIME:
|
2014-03-18 06:22:04 +08:00
|
|
|
if (do_realtime(ts) == VCLOCK_NONE)
|
|
|
|
goto fallback;
|
2011-06-06 01:50:20 +08:00
|
|
|
break;
|
|
|
|
case CLOCK_MONOTONIC:
|
2014-03-18 06:22:04 +08:00
|
|
|
if (do_monotonic(ts) == VCLOCK_NONE)
|
|
|
|
goto fallback;
|
2011-06-06 01:50:20 +08:00
|
|
|
break;
|
|
|
|
case CLOCK_REALTIME_COARSE:
|
2014-03-18 06:22:04 +08:00
|
|
|
do_realtime_coarse(ts);
|
|
|
|
break;
|
2011-06-06 01:50:20 +08:00
|
|
|
case CLOCK_MONOTONIC_COARSE:
|
2014-03-18 06:22:04 +08:00
|
|
|
do_monotonic_coarse(ts);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto fallback;
|
2011-06-06 01:50:20 +08:00
|
|
|
}
|
|
|
|
|
2012-03-02 14:11:09 +08:00
|
|
|
return 0;
|
2014-03-18 06:22:04 +08:00
|
|
|
fallback:
|
|
|
|
return vdso_fallback_gettime(clock, ts);
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
int clock_gettime(clockid_t, struct timespec *)
|
|
|
|
__attribute__((weak, alias("__vdso_clock_gettime")));
|
|
|
|
|
2008-05-13 03:20:41 +08:00
|
|
|
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
|
2007-07-21 23:10:01 +08:00
|
|
|
{
|
2012-03-02 14:11:09 +08:00
|
|
|
if (likely(tv != NULL)) {
|
2014-03-18 06:22:06 +08:00
|
|
|
if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
|
|
|
|
return vdso_fallback_gtod(tv, tz);
|
2012-03-02 14:11:09 +08:00
|
|
|
tv->tv_usec /= 1000;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
2012-03-02 14:11:09 +08:00
|
|
|
if (unlikely(tz != NULL)) {
|
2014-03-18 06:22:10 +08:00
|
|
|
tz->tz_minuteswest = gtod->tz_minuteswest;
|
|
|
|
tz->tz_dsttime = gtod->tz_dsttime;
|
2012-03-02 14:11:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2007-07-21 23:10:01 +08:00
|
|
|
}
|
|
|
|
int gettimeofday(struct timeval *, struct timezone *)
|
|
|
|
__attribute__((weak, alias("__vdso_gettimeofday")));
|
2011-05-23 21:31:30 +08:00
|
|
|
|
2011-06-06 01:50:20 +08:00
|
|
|
/*
|
|
|
|
* This will break when the xtime seconds get inaccurate, but that is
|
|
|
|
* unlikely
|
|
|
|
*/
|
2011-05-23 21:31:30 +08:00
|
|
|
notrace time_t __vdso_time(time_t *t)
|
|
|
|
{
|
2014-03-18 06:22:09 +08:00
|
|
|
/* This is atomic on x86 so we don't need any locks. */
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
time_t result = READ_ONCE(gtod->wall_time_sec);
|
2011-05-23 21:31:30 +08:00
|
|
|
|
|
|
|
if (t)
|
|
|
|
*t = result;
|
|
|
|
return result;
|
|
|
|
}
|
2017-12-04 23:01:55 +08:00
|
|
|
time_t time(time_t *t)
|
2011-05-23 21:31:30 +08:00
|
|
|
__attribute__((weak, alias("__vdso_time")));
|