mirror of https://gitee.com/openkylin/linux.git
Merge branch 'fortglx/4.15/time' of https://git.linaro.org/people/john.stultz/linux into timers/core
Pull timekeeping updates from John Stultz: - More y2038 work from Arnd Bergmann - A new mechanism to allow RTC drivers to specify the resolution of the RTC so the suspend/resume code can make informed decisions whether to inject the suspended time or not in case of fast suspend/resume cycles.
This commit is contained in:
commit
fb56d689fb
|
@ -161,6 +161,9 @@ static struct rtc_device *rtc_allocate_device(void)
|
|||
|
||||
device_initialize(&rtc->dev);
|
||||
|
||||
/* Drivers can revise this default after allocating the device. */
|
||||
rtc->set_offset_nsec = NSEC_PER_SEC / 2;
|
||||
|
||||
rtc->irq_freq = 1;
|
||||
rtc->max_user_freq = 64;
|
||||
rtc->dev.class = rtc_class;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
/**
|
||||
* rtc_set_ntp_time - Save NTP synchronized time to the RTC
|
||||
* @now: Current time of day
|
||||
* @target_nsec: pointer for desired now->tv_nsec value
|
||||
*
|
||||
* Replacement for the NTP platform function update_persistent_clock64
|
||||
* that stores time for later retrieval by rtc_hctosys.
|
||||
|
@ -18,30 +19,52 @@
|
|||
* possible at all, and various other -errno for specific temporary failure
|
||||
* cases.
|
||||
*
|
||||
* -EPROTO is returned if now.tv_nsec is not close enough to *target_nsec.
|
||||
(
|
||||
* If temporary failure is indicated the caller should try again 'soon'
|
||||
*/
|
||||
int rtc_set_ntp_time(struct timespec64 now)
|
||||
int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec)
|
||||
{
|
||||
struct rtc_device *rtc;
|
||||
struct rtc_time tm;
|
||||
struct timespec64 to_set;
|
||||
int err = -ENODEV;
|
||||
|
||||
if (now.tv_nsec < (NSEC_PER_SEC >> 1))
|
||||
rtc_time64_to_tm(now.tv_sec, &tm);
|
||||
else
|
||||
rtc_time64_to_tm(now.tv_sec + 1, &tm);
|
||||
bool ok;
|
||||
|
||||
rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
|
||||
if (rtc) {
|
||||
/* rtc_hctosys exclusively uses UTC, so we call set_time here,
|
||||
* not set_mmss. */
|
||||
if (rtc->ops &&
|
||||
(rtc->ops->set_time ||
|
||||
rtc->ops->set_mmss64 ||
|
||||
rtc->ops->set_mmss))
|
||||
err = rtc_set_time(rtc, &tm);
|
||||
rtc_class_close(rtc);
|
||||
if (!rtc)
|
||||
goto out_err;
|
||||
|
||||
if (!rtc->ops || (!rtc->ops->set_time && !rtc->ops->set_mmss64 &&
|
||||
!rtc->ops->set_mmss))
|
||||
goto out_close;
|
||||
|
||||
/* Compute the value of tv_nsec we require the caller to supply in
|
||||
* now.tv_nsec. This is the value such that (now +
|
||||
* set_offset_nsec).tv_nsec == 0.
|
||||
*/
|
||||
set_normalized_timespec64(&to_set, 0, -rtc->set_offset_nsec);
|
||||
*target_nsec = to_set.tv_nsec;
|
||||
|
||||
/* The ntp code must call this with the correct value in tv_nsec, if
|
||||
* it does not we update target_nsec and return EPROTO to make the ntp
|
||||
* code try again later.
|
||||
*/
|
||||
ok = rtc_tv_nsec_ok(rtc->set_offset_nsec, &to_set, &now);
|
||||
if (!ok) {
|
||||
err = -EPROTO;
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
rtc_time64_to_tm(to_set.tv_sec, &tm);
|
||||
|
||||
/* rtc_hctosys exclusively uses UTC, so we call set_time here, not
|
||||
* set_mmss.
|
||||
*/
|
||||
err = rtc_set_time(rtc, &tm);
|
||||
|
||||
out_close:
|
||||
rtc_class_close(rtc);
|
||||
out_err:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -270,5 +270,6 @@ static inline ktime_t ms_to_ktime(u64 ms)
|
|||
}
|
||||
|
||||
# include <linux/timekeeping.h>
|
||||
# include <linux/timekeeping32.h>
|
||||
|
||||
#endif
|
||||
|
|
|
@ -135,6 +135,14 @@ struct rtc_device {
|
|||
/* Some hardware can't support UIE mode */
|
||||
int uie_unsupported;
|
||||
|
||||
/* Number of nsec it takes to set the RTC clock. This influences when
|
||||
* the set ops are called. An offset:
|
||||
* - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s
|
||||
* - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s
|
||||
* - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s
|
||||
*/
|
||||
long set_offset_nsec;
|
||||
|
||||
bool registered;
|
||||
|
||||
struct nvmem_config *nvmem_config;
|
||||
|
@ -172,7 +180,7 @@ extern void devm_rtc_device_unregister(struct device *dev,
|
|||
|
||||
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
|
||||
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
|
||||
extern int rtc_set_ntp_time(struct timespec64 now);
|
||||
extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec);
|
||||
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
|
||||
extern int rtc_read_alarm(struct rtc_device *rtc,
|
||||
struct rtc_wkalrm *alrm);
|
||||
|
@ -221,6 +229,39 @@ static inline bool is_leap_year(unsigned int year)
|
|||
return (!(year % 4) && (year % 100)) || !(year % 400);
|
||||
}
|
||||
|
||||
/* Determine if we can call to driver to set the time. Drivers can only be
|
||||
* called to set a second aligned time value, and the field set_offset_nsec
|
||||
* specifies how far away from the second aligned time to call the driver.
|
||||
*
|
||||
* This also computes 'to_set' which is the time we are trying to set, and has
|
||||
* a zero in tv_nsecs, such that:
|
||||
* to_set - set_delay_nsec == now +/- FUZZ
|
||||
*
|
||||
*/
|
||||
static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec,
|
||||
struct timespec64 *to_set,
|
||||
const struct timespec64 *now)
|
||||
{
|
||||
/* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
|
||||
const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
|
||||
struct timespec64 delay = {.tv_sec = 0,
|
||||
.tv_nsec = set_offset_nsec};
|
||||
|
||||
*to_set = timespec64_add(*now, delay);
|
||||
|
||||
if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
|
||||
to_set->tv_nsec = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
|
||||
to_set->tv_sec++;
|
||||
to_set->tv_nsec = 0;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#define rtc_register_device(device) \
|
||||
__rtc_register_device(THIS_MODULE, device)
|
||||
|
||||
|
|
|
@ -17,149 +17,10 @@ int get_itimerspec64(struct itimerspec64 *it,
|
|||
int put_itimerspec64(const struct itimerspec64 *it,
|
||||
struct itimerspec __user *uit);
|
||||
|
||||
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
|
||||
|
||||
static inline int timespec_equal(const struct timespec *a,
|
||||
const struct timespec *b)
|
||||
{
|
||||
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
|
||||
}
|
||||
|
||||
/*
|
||||
* lhs < rhs: return <0
|
||||
* lhs == rhs: return 0
|
||||
* lhs > rhs: return >0
|
||||
*/
|
||||
static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_usec - rhs->tv_usec;
|
||||
}
|
||||
|
||||
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
|
||||
const unsigned int day, const unsigned int hour,
|
||||
const unsigned int min, const unsigned int sec);
|
||||
|
||||
/**
|
||||
* Deprecated. Use mktime64().
|
||||
*/
|
||||
static inline unsigned long mktime(const unsigned int year,
|
||||
const unsigned int mon, const unsigned int day,
|
||||
const unsigned int hour, const unsigned int min,
|
||||
const unsigned int sec)
|
||||
{
|
||||
return mktime64(year, mon, day, hour, min, sec);
|
||||
}
|
||||
|
||||
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
|
||||
|
||||
/*
|
||||
* timespec_add_safe assumes both values are positive and checks
|
||||
* for overflow. It will return TIME_T_MAX if the reutrn would be
|
||||
* smaller then either of the arguments.
|
||||
*/
|
||||
extern struct timespec timespec_add_safe(const struct timespec lhs,
|
||||
const struct timespec rhs);
|
||||
|
||||
|
||||
static inline struct timespec timespec_add(struct timespec lhs,
|
||||
struct timespec rhs)
|
||||
{
|
||||
struct timespec ts_delta;
|
||||
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
|
||||
lhs.tv_nsec + rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* sub = lhs - rhs, in normalized form
|
||||
*/
|
||||
static inline struct timespec timespec_sub(struct timespec lhs,
|
||||
struct timespec rhs)
|
||||
{
|
||||
struct timespec ts_delta;
|
||||
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
|
||||
lhs.tv_nsec - rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the timespec is norm, false if denorm:
|
||||
*/
|
||||
static inline bool timespec_valid(const struct timespec *ts)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (ts->tv_sec < 0)
|
||||
return false;
|
||||
/* Can't have more nanoseconds then a second */
|
||||
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool timespec_valid_strict(const struct timespec *ts)
|
||||
{
|
||||
if (!timespec_valid(ts))
|
||||
return false;
|
||||
/* Disallow values that could overflow ktime_t */
|
||||
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool timeval_valid(const struct timeval *tv)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (tv->tv_sec < 0)
|
||||
return false;
|
||||
|
||||
/* Can't have more microseconds then a second */
|
||||
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
|
||||
|
||||
/*
|
||||
* Validates if a timespec/timeval used to inject a time offset is valid.
|
||||
* Offsets can be postive or negative. The value of the timeval/timespec
|
||||
* is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
|
||||
* always be non-negative.
|
||||
*/
|
||||
static inline bool timeval_inject_offset_valid(const struct timeval *tv)
|
||||
{
|
||||
/* We don't check the tv_sec as it can be positive or negative */
|
||||
|
||||
/* Can't have more microseconds then a second */
|
||||
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool timespec_inject_offset_valid(const struct timespec *ts)
|
||||
{
|
||||
/* We don't check the tv_sec as it can be positive or negative */
|
||||
|
||||
/* Can't have more nanoseconds then a second */
|
||||
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Some architectures do not supply their own clocksource.
|
||||
* This is mainly the case in architectures that get their
|
||||
* inter-tick times by reading the counter on their interval
|
||||
|
@ -208,73 +69,7 @@ struct tm {
|
|||
|
||||
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
|
||||
|
||||
/**
|
||||
* time_to_tm - converts the calendar time to local broken-down time
|
||||
*
|
||||
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
|
||||
* Coordinated Universal Time (UTC).
|
||||
* @offset offset seconds adding to totalsecs.
|
||||
* @result pointer to struct tm variable to receive broken-down time
|
||||
*/
|
||||
static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
|
||||
{
|
||||
time64_to_tm(totalsecs, offset, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* timespec_to_ns - Convert timespec to nanoseconds
|
||||
* @ts: pointer to the timespec variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timespec
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timespec_to_ns(const struct timespec *ts)
|
||||
{
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
|
||||
/**
|
||||
* timeval_to_ns - Convert timeval to nanoseconds
|
||||
* @ts: pointer to the timeval variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timeval
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timeval_to_ns(const struct timeval *tv)
|
||||
{
|
||||
return ((s64) tv->tv_sec * NSEC_PER_SEC) +
|
||||
tv->tv_usec * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
/**
|
||||
* ns_to_timespec - Convert nanoseconds to timespec
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timespec representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timespec ns_to_timespec(const s64 nsec);
|
||||
|
||||
/**
|
||||
* ns_to_timeval - Convert nanoseconds to timeval
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timeval representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timeval ns_to_timeval(const s64 nsec);
|
||||
|
||||
/**
|
||||
* timespec_add_ns - Adds nanoseconds to a timespec
|
||||
* @a: pointer to timespec to be incremented
|
||||
* @ns: unsigned nanoseconds value to be added
|
||||
*
|
||||
* This must always be inlined because its used from the x86-64 vdso,
|
||||
* which cannot call other kernel functions.
|
||||
*/
|
||||
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
|
||||
{
|
||||
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
|
||||
a->tv_nsec = ns;
|
||||
}
|
||||
# include <linux/time32.h>
|
||||
|
||||
static inline bool itimerspec64_valid(const struct itimerspec64 *its)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,221 @@
|
|||
#ifndef _LINUX_TIME32_H
|
||||
#define _LINUX_TIME32_H
|
||||
/*
|
||||
* These are all interfaces based on the old time_t definition
|
||||
* that overflows in 2038 on 32-bit architectures. New code
|
||||
* should use the replacements based on time64_t and timespec64.
|
||||
*
|
||||
* Any interfaces in here that become unused as we migrate
|
||||
* code to time64_t should get removed.
|
||||
*/
|
||||
|
||||
#include <linux/time64.h>
|
||||
|
||||
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
|
||||
/* timespec64 is defined as timespec here */
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
return ts64;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
return ts;
|
||||
}
|
||||
|
||||
# define timespec_equal timespec64_equal
|
||||
# define timespec_compare timespec64_compare
|
||||
# define set_normalized_timespec set_normalized_timespec64
|
||||
# define timespec_add timespec64_add
|
||||
# define timespec_sub timespec64_sub
|
||||
# define timespec_valid timespec64_valid
|
||||
# define timespec_valid_strict timespec64_valid_strict
|
||||
# define timespec_to_ns timespec64_to_ns
|
||||
# define ns_to_timespec ns_to_timespec64
|
||||
# define timespec_add_ns timespec64_add_ns
|
||||
|
||||
#else
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
struct timespec ret;
|
||||
|
||||
ret.tv_sec = (time_t)ts64.tv_sec;
|
||||
ret.tv_nsec = ts64.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
struct timespec64 ret;
|
||||
|
||||
ret.tv_sec = ts.tv_sec;
|
||||
ret.tv_nsec = ts.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int timespec_equal(const struct timespec *a,
|
||||
const struct timespec *b)
|
||||
{
|
||||
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
|
||||
}
|
||||
|
||||
/*
|
||||
* lhs < rhs: return <0
|
||||
* lhs == rhs: return 0
|
||||
* lhs > rhs: return >0
|
||||
*/
|
||||
static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
|
||||
|
||||
static inline struct timespec timespec_add(struct timespec lhs,
|
||||
struct timespec rhs)
|
||||
{
|
||||
struct timespec ts_delta;
|
||||
|
||||
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
|
||||
lhs.tv_nsec + rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* sub = lhs - rhs, in normalized form
|
||||
*/
|
||||
static inline struct timespec timespec_sub(struct timespec lhs,
|
||||
struct timespec rhs)
|
||||
{
|
||||
struct timespec ts_delta;
|
||||
|
||||
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
|
||||
lhs.tv_nsec - rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the timespec is norm, false if denorm:
|
||||
*/
|
||||
static inline bool timespec_valid(const struct timespec *ts)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (ts->tv_sec < 0)
|
||||
return false;
|
||||
/* Can't have more nanoseconds then a second */
|
||||
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool timespec_valid_strict(const struct timespec *ts)
|
||||
{
|
||||
if (!timespec_valid(ts))
|
||||
return false;
|
||||
/* Disallow values that could overflow ktime_t */
|
||||
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* timespec_to_ns - Convert timespec to nanoseconds
|
||||
* @ts: pointer to the timespec variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timespec
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timespec_to_ns(const struct timespec *ts)
|
||||
{
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
|
||||
/**
|
||||
* ns_to_timespec - Convert nanoseconds to timespec
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timespec representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timespec ns_to_timespec(const s64 nsec);
|
||||
|
||||
/**
|
||||
* timespec_add_ns - Adds nanoseconds to a timespec
|
||||
* @a: pointer to timespec to be incremented
|
||||
* @ns: unsigned nanoseconds value to be added
|
||||
*
|
||||
* This must always be inlined because its used from the x86-64 vdso,
|
||||
* which cannot call other kernel functions.
|
||||
*/
|
||||
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
|
||||
{
|
||||
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
|
||||
a->tv_nsec = ns;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* time_to_tm - converts the calendar time to local broken-down time
|
||||
*
|
||||
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
|
||||
* Coordinated Universal Time (UTC).
|
||||
* @offset offset seconds adding to totalsecs.
|
||||
* @result pointer to struct tm variable to receive broken-down time
|
||||
*/
|
||||
static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
|
||||
{
|
||||
time64_to_tm(totalsecs, offset, result);
|
||||
}
|
||||
|
||||
static inline unsigned long mktime(const unsigned int year,
|
||||
const unsigned int mon, const unsigned int day,
|
||||
const unsigned int hour, const unsigned int min,
|
||||
const unsigned int sec)
|
||||
{
|
||||
return mktime64(year, mon, day, hour, min, sec);
|
||||
}
|
||||
|
||||
static inline bool timeval_valid(const struct timeval *tv)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (tv->tv_sec < 0)
|
||||
return false;
|
||||
|
||||
/* Can't have more microseconds then a second */
|
||||
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
extern struct timespec timespec_trunc(struct timespec t, unsigned int gran);
|
||||
|
||||
/**
|
||||
* timeval_to_ns - Convert timeval to nanoseconds
|
||||
* @ts: pointer to the timeval variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timeval
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timeval_to_ns(const struct timeval *tv)
|
||||
{
|
||||
return ((s64) tv->tv_sec * NSEC_PER_SEC) +
|
||||
tv->tv_usec * NSEC_PER_USEC;
|
||||
}
|
||||
|
||||
/**
|
||||
* ns_to_timeval - Convert nanoseconds to timeval
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timeval representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timeval ns_to_timeval(const s64 nsec);
|
||||
|
||||
#endif
|
|
@ -7,11 +7,8 @@
|
|||
typedef __s64 time64_t;
|
||||
typedef __u64 timeu64_t;
|
||||
|
||||
/*
|
||||
* This wants to go into uapi/linux/time.h once we agreed about the
|
||||
* userspace interfaces.
|
||||
*/
|
||||
#if __BITS_PER_LONG == 64
|
||||
/* this trick allows us to optimize out timespec64_to_timespec */
|
||||
# define timespec64 timespec
|
||||
#define itimerspec64 itimerspec
|
||||
#else
|
||||
|
@ -41,77 +38,6 @@ struct itimerspec64 {
|
|||
#define KTIME_MAX ((s64)~((u64)1 << 63))
|
||||
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
return ts64;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
return ts;
|
||||
}
|
||||
|
||||
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
|
||||
{
|
||||
return *its64;
|
||||
}
|
||||
|
||||
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
|
||||
{
|
||||
return *its;
|
||||
}
|
||||
|
||||
# define timespec64_equal timespec_equal
|
||||
# define timespec64_compare timespec_compare
|
||||
# define set_normalized_timespec64 set_normalized_timespec
|
||||
# define timespec64_add timespec_add
|
||||
# define timespec64_sub timespec_sub
|
||||
# define timespec64_valid timespec_valid
|
||||
# define timespec64_valid_strict timespec_valid_strict
|
||||
# define timespec64_to_ns timespec_to_ns
|
||||
# define ns_to_timespec64 ns_to_timespec
|
||||
# define timespec64_add_ns timespec_add_ns
|
||||
|
||||
#else
|
||||
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
struct timespec ret;
|
||||
|
||||
ret.tv_sec = (time_t)ts64.tv_sec;
|
||||
ret.tv_nsec = ts64.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
struct timespec64 ret;
|
||||
|
||||
ret.tv_sec = ts.tv_sec;
|
||||
ret.tv_nsec = ts.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
|
||||
{
|
||||
struct itimerspec ret;
|
||||
|
||||
ret.it_interval = timespec64_to_timespec(its64->it_interval);
|
||||
ret.it_value = timespec64_to_timespec(its64->it_value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
|
||||
{
|
||||
struct itimerspec64 ret;
|
||||
|
||||
ret.it_interval = timespec_to_timespec64(its->it_interval);
|
||||
ret.it_value = timespec_to_timespec64(its->it_value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int timespec64_equal(const struct timespec64 *a,
|
||||
const struct timespec64 *b)
|
||||
{
|
||||
|
@ -213,8 +139,6 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
|
|||
a->tv_nsec = ns;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* timespec64_add_safe assumes both values are positive and checks for
|
||||
* overflow. It will return TIME64_MAX in case of overflow.
|
||||
|
|
|
@ -15,27 +15,16 @@ extern void xtime_update(unsigned long ticks);
|
|||
/*
|
||||
* Get and set timeofday
|
||||
*/
|
||||
extern void do_gettimeofday(struct timeval *tv);
|
||||
extern int do_settimeofday64(const struct timespec64 *ts);
|
||||
extern int do_sys_settimeofday64(const struct timespec64 *tv,
|
||||
const struct timezone *tz);
|
||||
/*
|
||||
* Kernel time accessors
|
||||
*/
|
||||
unsigned long get_seconds(void);
|
||||
struct timespec64 current_kernel_time64(void);
|
||||
/* does not take xtime_lock */
|
||||
struct timespec __current_kernel_time(void);
|
||||
|
||||
static inline struct timespec current_kernel_time(void)
|
||||
{
|
||||
struct timespec64 now = current_kernel_time64();
|
||||
|
||||
return timespec64_to_timespec(now);
|
||||
}
|
||||
|
||||
/*
|
||||
* timespec based interfaces
|
||||
* timespec64 based interfaces
|
||||
*/
|
||||
struct timespec64 get_monotonic_coarse64(void);
|
||||
extern void getrawmonotonic64(struct timespec64 *ts);
|
||||
|
@ -47,116 +36,6 @@ extern int __getnstimeofday64(struct timespec64 *tv);
|
|||
extern void getnstimeofday64(struct timespec64 *tv);
|
||||
extern void getboottime64(struct timespec64 *ts);
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/**
|
||||
* Deprecated. Use do_settimeofday64().
|
||||
*/
|
||||
static inline int do_settimeofday(const struct timespec *ts)
|
||||
{
|
||||
return do_settimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline int __getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
return __getnstimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline void getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
getnstimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline void ktime_get_ts(struct timespec *ts)
|
||||
{
|
||||
ktime_get_ts64(ts);
|
||||
}
|
||||
|
||||
static inline void ktime_get_real_ts(struct timespec *ts)
|
||||
{
|
||||
getnstimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline void getrawmonotonic(struct timespec *ts)
|
||||
{
|
||||
getrawmonotonic64(ts);
|
||||
}
|
||||
|
||||
static inline struct timespec get_monotonic_coarse(void)
|
||||
{
|
||||
return get_monotonic_coarse64();
|
||||
}
|
||||
|
||||
static inline void getboottime(struct timespec *ts)
|
||||
{
|
||||
return getboottime64(ts);
|
||||
}
|
||||
#else
|
||||
/**
|
||||
* Deprecated. Use do_settimeofday64().
|
||||
*/
|
||||
static inline int do_settimeofday(const struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ts64 = timespec_to_timespec64(*ts);
|
||||
return do_settimeofday64(&ts64);
|
||||
}
|
||||
|
||||
static inline int __getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
int ret = __getnstimeofday64(&ts64);
|
||||
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getnstimeofday64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void ktime_get_ts(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ktime_get_ts64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void ktime_get_real_ts(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getnstimeofday64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void getrawmonotonic(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getrawmonotonic64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline struct timespec get_monotonic_coarse(void)
|
||||
{
|
||||
return timespec64_to_timespec(get_monotonic_coarse64());
|
||||
}
|
||||
|
||||
static inline void getboottime(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getboottime64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
|
||||
|
||||
/*
|
||||
|
@ -242,23 +121,13 @@ extern u64 ktime_get_boot_fast_ns(void);
|
|||
extern u64 ktime_get_real_fast_ns(void);
|
||||
|
||||
/*
|
||||
* Timespec interfaces utilizing the ktime based ones
|
||||
* timespec64 interfaces utilizing the ktime based ones
|
||||
*/
|
||||
static inline void get_monotonic_boottime(struct timespec *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec(ktime_get_boottime());
|
||||
}
|
||||
|
||||
static inline void get_monotonic_boottime64(struct timespec64 *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec64(ktime_get_boottime());
|
||||
}
|
||||
|
||||
static inline void timekeeping_clocktai(struct timespec *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec(ktime_get_clocktai());
|
||||
}
|
||||
|
||||
static inline void timekeeping_clocktai64(struct timespec64 *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec64(ktime_get_clocktai());
|
||||
|
@ -341,10 +210,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
|
|||
*/
|
||||
extern int persistent_clock_is_local;
|
||||
|
||||
extern void read_persistent_clock(struct timespec *ts);
|
||||
extern void read_persistent_clock64(struct timespec64 *ts);
|
||||
extern void read_boot_clock64(struct timespec64 *ts);
|
||||
extern int update_persistent_clock(struct timespec now);
|
||||
extern int update_persistent_clock64(struct timespec64 now);
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
#ifndef _LINUX_TIMEKEEPING32_H
|
||||
#define _LINUX_TIMEKEEPING32_H
|
||||
/*
|
||||
* These interfaces are all based on the old timespec type
|
||||
* and should get replaced with the timespec64 based versions
|
||||
* over time so we can remove the file here.
|
||||
*/
|
||||
|
||||
extern void do_gettimeofday(struct timeval *tv);
|
||||
unsigned long get_seconds(void);
|
||||
|
||||
/* does not take xtime_lock */
|
||||
struct timespec __current_kernel_time(void);
|
||||
|
||||
static inline struct timespec current_kernel_time(void)
|
||||
{
|
||||
struct timespec64 now = current_kernel_time64();
|
||||
|
||||
return timespec64_to_timespec(now);
|
||||
}
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/**
|
||||
* Deprecated. Use do_settimeofday64().
|
||||
*/
|
||||
static inline int do_settimeofday(const struct timespec *ts)
|
||||
{
|
||||
return do_settimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline int __getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
return __getnstimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline void getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
getnstimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline void ktime_get_ts(struct timespec *ts)
|
||||
{
|
||||
ktime_get_ts64(ts);
|
||||
}
|
||||
|
||||
static inline void ktime_get_real_ts(struct timespec *ts)
|
||||
{
|
||||
getnstimeofday64(ts);
|
||||
}
|
||||
|
||||
static inline void getrawmonotonic(struct timespec *ts)
|
||||
{
|
||||
getrawmonotonic64(ts);
|
||||
}
|
||||
|
||||
static inline struct timespec get_monotonic_coarse(void)
|
||||
{
|
||||
return get_monotonic_coarse64();
|
||||
}
|
||||
|
||||
static inline void getboottime(struct timespec *ts)
|
||||
{
|
||||
return getboottime64(ts);
|
||||
}
|
||||
#else
|
||||
/**
|
||||
* Deprecated. Use do_settimeofday64().
|
||||
*/
|
||||
static inline int do_settimeofday(const struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ts64 = timespec_to_timespec64(*ts);
|
||||
return do_settimeofday64(&ts64);
|
||||
}
|
||||
|
||||
static inline int __getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
int ret = __getnstimeofday64(&ts64);
|
||||
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void getnstimeofday(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getnstimeofday64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void ktime_get_ts(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
ktime_get_ts64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void ktime_get_real_ts(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getnstimeofday64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline void getrawmonotonic(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getrawmonotonic64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
|
||||
static inline struct timespec get_monotonic_coarse(void)
|
||||
{
|
||||
return timespec64_to_timespec(get_monotonic_coarse64());
|
||||
}
|
||||
|
||||
static inline void getboottime(struct timespec *ts)
|
||||
{
|
||||
struct timespec64 ts64;
|
||||
|
||||
getboottime64(&ts64);
|
||||
*ts = timespec64_to_timespec(ts64);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Timespec interfaces utilizing the ktime based ones
|
||||
*/
|
||||
static inline void get_monotonic_boottime(struct timespec *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec(ktime_get_boottime());
|
||||
}
|
||||
|
||||
static inline void timekeeping_clocktai(struct timespec *ts)
|
||||
{
|
||||
*ts = ktime_to_timespec(ktime_get_clocktai());
|
||||
}
|
||||
|
||||
/*
|
||||
* Persistent clock related interfaces
|
||||
*/
|
||||
extern void read_persistent_clock(struct timespec *ts);
|
||||
extern int update_persistent_clock(struct timespec now);
|
||||
|
||||
#endif
|
|
@ -492,6 +492,67 @@ int second_overflow(time64_t secs)
|
|||
return leap;
|
||||
}
|
||||
|
||||
static void sync_hw_clock(struct work_struct *work);
|
||||
static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
|
||||
|
||||
static void sched_sync_hw_clock(struct timespec64 now,
|
||||
unsigned long target_nsec, bool fail)
|
||||
|
||||
{
|
||||
struct timespec64 next;
|
||||
|
||||
getnstimeofday64(&next);
|
||||
if (!fail)
|
||||
next.tv_sec = 659;
|
||||
else {
|
||||
/*
|
||||
* Try again as soon as possible. Delaying long periods
|
||||
* decreases the accuracy of the work queue timer. Due to this
|
||||
* the algorithm is very likely to require a short-sleep retry
|
||||
* after the above long sleep to synchronize ts_nsec.
|
||||
*/
|
||||
next.tv_sec = 0;
|
||||
}
|
||||
|
||||
/* Compute the needed delay that will get to tv_nsec == target_nsec */
|
||||
next.tv_nsec = target_nsec - next.tv_nsec;
|
||||
if (next.tv_nsec <= 0)
|
||||
next.tv_nsec += NSEC_PER_SEC;
|
||||
if (next.tv_nsec >= NSEC_PER_SEC) {
|
||||
next.tv_sec++;
|
||||
next.tv_nsec -= NSEC_PER_SEC;
|
||||
}
|
||||
|
||||
queue_delayed_work(system_power_efficient_wq, &sync_work,
|
||||
timespec64_to_jiffies(&next));
|
||||
}
|
||||
|
||||
static void sync_rtc_clock(void)
|
||||
{
|
||||
unsigned long target_nsec;
|
||||
struct timespec64 adjust, now;
|
||||
int rc;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
|
||||
return;
|
||||
|
||||
getnstimeofday64(&now);
|
||||
|
||||
adjust = now;
|
||||
if (persistent_clock_is_local)
|
||||
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
|
||||
|
||||
/*
|
||||
* The current RTC in use will provide the target_nsec it wants to be
|
||||
* called at, and does rtc_tv_nsec_ok internally.
|
||||
*/
|
||||
rc = rtc_set_ntp_time(adjust, &target_nsec);
|
||||
if (rc == -ENODEV)
|
||||
return;
|
||||
|
||||
sched_sync_hw_clock(now, target_nsec, rc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_CMOS_UPDATE
|
||||
int __weak update_persistent_clock(struct timespec now)
|
||||
{
|
||||
|
@ -507,77 +568,76 @@ int __weak update_persistent_clock64(struct timespec64 now64)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
|
||||
static void sync_cmos_clock(struct work_struct *work);
|
||||
|
||||
static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
|
||||
|
||||
static void sync_cmos_clock(struct work_struct *work)
|
||||
static bool sync_cmos_clock(void)
|
||||
{
|
||||
static bool no_cmos;
|
||||
struct timespec64 now;
|
||||
struct timespec64 next;
|
||||
int fail = 1;
|
||||
struct timespec64 adjust;
|
||||
int rc = -EPROTO;
|
||||
long target_nsec = NSEC_PER_SEC / 2;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
|
||||
return false;
|
||||
|
||||
if (no_cmos)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If we have an externally synchronized Linux clock, then update
|
||||
* CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
|
||||
* called as close as possible to 500 ms before the new second starts.
|
||||
* This code is run on a timer. If the clock is set, that timer
|
||||
* may not expire at the correct time. Thus, we adjust...
|
||||
* We want the clock to be within a couple of ticks from the target.
|
||||
* Historically update_persistent_clock64() has followed x86
|
||||
* semantics, which match the MC146818A/etc RTC. This RTC will store
|
||||
* 'adjust' and then in .5s it will advance once second.
|
||||
*
|
||||
* Architectures are strongly encouraged to use rtclib and not
|
||||
* implement this legacy API.
|
||||
*/
|
||||
if (!ntp_synced()) {
|
||||
/*
|
||||
* Not synced, exit, do not restart a timer (if one is
|
||||
* running, let it run out).
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
getnstimeofday64(&now);
|
||||
if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
|
||||
struct timespec64 adjust = now;
|
||||
|
||||
fail = -ENODEV;
|
||||
if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
|
||||
if (persistent_clock_is_local)
|
||||
adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
|
||||
#ifdef CONFIG_GENERIC_CMOS_UPDATE
|
||||
fail = update_persistent_clock64(adjust);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RTC_SYSTOHC
|
||||
if (fail == -ENODEV)
|
||||
fail = rtc_set_ntp_time(adjust);
|
||||
#endif
|
||||
rc = update_persistent_clock64(adjust);
|
||||
/*
|
||||
* The machine does not support update_persistent_clock64 even
|
||||
* though it defines CONFIG_GENERIC_CMOS_UPDATE.
|
||||
*/
|
||||
if (rc == -ENODEV) {
|
||||
no_cmos = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
|
||||
if (next.tv_nsec <= 0)
|
||||
next.tv_nsec += NSEC_PER_SEC;
|
||||
sched_sync_hw_clock(now, target_nsec, rc);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!fail || fail == -ENODEV)
|
||||
next.tv_sec = 659;
|
||||
else
|
||||
next.tv_sec = 0;
|
||||
/*
|
||||
* If we have an externally synchronized Linux clock, then update RTC clock
|
||||
* accordingly every ~11 minutes. Generally RTCs can only store second
|
||||
* precision, but many RTCs will adjust the phase of their second tick to
|
||||
* match the moment of update. This infrastructure arranges to call to the RTC
|
||||
* set at the correct moment to phase synchronize the RTC second tick over
|
||||
* with the kernel clock.
|
||||
*/
|
||||
static void sync_hw_clock(struct work_struct *work)
|
||||
{
|
||||
if (!ntp_synced())
|
||||
return;
|
||||
|
||||
if (next.tv_nsec >= NSEC_PER_SEC) {
|
||||
next.tv_sec++;
|
||||
next.tv_nsec -= NSEC_PER_SEC;
|
||||
}
|
||||
queue_delayed_work(system_power_efficient_wq,
|
||||
&sync_cmos_work, timespec64_to_jiffies(&next));
|
||||
if (sync_cmos_clock())
|
||||
return;
|
||||
|
||||
sync_rtc_clock();
|
||||
}
|
||||
|
||||
void ntp_notify_cmos_timer(void)
|
||||
{
|
||||
queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
|
||||
if (!ntp_synced())
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
|
||||
IS_ENABLED(CONFIG_RTC_SYSTOHC))
|
||||
queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
|
||||
}
|
||||
|
||||
#else
|
||||
void ntp_notify_cmos_timer(void) { }
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Propagate a new txc->status value into the NTP state:
|
||||
*/
|
||||
|
@ -653,67 +713,6 @@ static inline void process_adjtimex_modes(struct timex *txc,
|
|||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* ntp_validate_timex - Ensures the timex is ok for use in do_adjtimex
|
||||
*/
|
||||
int ntp_validate_timex(struct timex *txc)
|
||||
{
|
||||
if (txc->modes & ADJ_ADJTIME) {
|
||||
/* singleshot must not be used with any other mode bits */
|
||||
if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
|
||||
return -EINVAL;
|
||||
if (!(txc->modes & ADJ_OFFSET_READONLY) &&
|
||||
!capable(CAP_SYS_TIME))
|
||||
return -EPERM;
|
||||
} else {
|
||||
/* In order to modify anything, you gotta be super-user! */
|
||||
if (txc->modes && !capable(CAP_SYS_TIME))
|
||||
return -EPERM;
|
||||
/*
|
||||
* if the quartz is off by more than 10% then
|
||||
* something is VERY wrong!
|
||||
*/
|
||||
if (txc->modes & ADJ_TICK &&
|
||||
(txc->tick < 900000/USER_HZ ||
|
||||
txc->tick > 1100000/USER_HZ))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (txc->modes & ADJ_SETOFFSET) {
|
||||
/* In order to inject time, you gotta be super-user! */
|
||||
if (!capable(CAP_SYS_TIME))
|
||||
return -EPERM;
|
||||
|
||||
if (txc->modes & ADJ_NANO) {
|
||||
struct timespec ts;
|
||||
|
||||
ts.tv_sec = txc->time.tv_sec;
|
||||
ts.tv_nsec = txc->time.tv_usec;
|
||||
if (!timespec_inject_offset_valid(&ts))
|
||||
return -EINVAL;
|
||||
|
||||
} else {
|
||||
if (!timeval_inject_offset_valid(&txc->time))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for potential multiplication overflows that can
|
||||
* only happen on 64-bit systems:
|
||||
*/
|
||||
if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
|
||||
if (LLONG_MIN / PPM_SCALE > txc->freq)
|
||||
return -EINVAL;
|
||||
if (LLONG_MAX / PPM_SCALE < txc->freq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* adjtimex mainly allows reading (and writing, if superuser) of
|
||||
* kernel time-keeping variables. used by xntpd.
|
||||
|
|
|
@ -7,7 +7,6 @@ extern void ntp_clear(void);
|
|||
extern u64 ntp_tick_length(void);
|
||||
extern ktime_t ntp_get_next_leap(void);
|
||||
extern int second_overflow(time64_t secs);
|
||||
extern int ntp_validate_timex(struct timex *);
|
||||
extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
|
||||
extern void __hardpps(const struct timespec64 *, const struct timespec64 *);
|
||||
#endif /* _LINUX_NTP_INTERNAL_H */
|
||||
|
|
|
@ -157,40 +157,6 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Indicates if there is an offset between the system clock and the hardware
|
||||
* clock/persistent clock/rtc.
|
||||
*/
|
||||
int persistent_clock_is_local;
|
||||
|
||||
/*
|
||||
* Adjust the time obtained from the CMOS to be UTC time instead of
|
||||
* local time.
|
||||
*
|
||||
* This is ugly, but preferable to the alternatives. Otherwise we
|
||||
* would either need to write a program to do it in /etc/rc (and risk
|
||||
* confusion if the program gets run more than once; it would also be
|
||||
* hard to make the program warp the clock precisely n hours) or
|
||||
* compile in the timezone information into the kernel. Bad, bad....
|
||||
*
|
||||
* - TYT, 1992-01-01
|
||||
*
|
||||
* The best thing to do is to keep the CMOS clock in universal time (UTC)
|
||||
* as real UNIX machines always do it. This avoids all headaches about
|
||||
* daylight saving times and warping kernel clocks.
|
||||
*/
|
||||
static inline void warp_clock(void)
|
||||
{
|
||||
if (sys_tz.tz_minuteswest != 0) {
|
||||
struct timespec adjust;
|
||||
|
||||
persistent_clock_is_local = 1;
|
||||
adjust.tv_sec = sys_tz.tz_minuteswest * 60;
|
||||
adjust.tv_nsec = 0;
|
||||
timekeeping_inject_offset(&adjust);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In case for some reason the CMOS clock has not already been running
|
||||
* in UTC, but in some local time: The first time we set the timezone,
|
||||
|
@ -224,7 +190,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
|
|||
if (firsttime) {
|
||||
firsttime = 0;
|
||||
if (!tv)
|
||||
warp_clock();
|
||||
timekeeping_warp_clock();
|
||||
}
|
||||
}
|
||||
if (tv)
|
||||
|
@ -441,6 +407,7 @@ time64_t mktime64(const unsigned int year0, const unsigned int mon0,
|
|||
}
|
||||
EXPORT_SYMBOL(mktime64);
|
||||
|
||||
#if __BITS_PER_LONG == 32
|
||||
/**
|
||||
* set_normalized_timespec - set timespec sec and nsec parts and normalize
|
||||
*
|
||||
|
@ -501,6 +468,7 @@ struct timespec ns_to_timespec(const s64 nsec)
|
|||
return ts;
|
||||
}
|
||||
EXPORT_SYMBOL(ns_to_timespec);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ns_to_timeval - Convert nanoseconds to timeval
|
||||
|
@ -520,7 +488,6 @@ struct timeval ns_to_timeval(const s64 nsec)
|
|||
}
|
||||
EXPORT_SYMBOL(ns_to_timeval);
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
/**
|
||||
* set_normalized_timespec - set timespec sec and nsec parts and normalize
|
||||
*
|
||||
|
@ -581,7 +548,7 @@ struct timespec64 ns_to_timespec64(const s64 nsec)
|
|||
return ts;
|
||||
}
|
||||
EXPORT_SYMBOL(ns_to_timespec64);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* msecs_to_jiffies: - convert milliseconds to jiffies
|
||||
* @m: time in milliseconds
|
||||
|
@ -852,24 +819,6 @@ unsigned long nsecs_to_jiffies(u64 n)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
|
||||
|
||||
/*
|
||||
* Add two timespec values and do a safety check for overflow.
|
||||
* It's assumed that both values are valid (>= 0)
|
||||
*/
|
||||
struct timespec timespec_add_safe(const struct timespec lhs,
|
||||
const struct timespec rhs)
|
||||
{
|
||||
struct timespec res;
|
||||
|
||||
set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
|
||||
lhs.tv_nsec + rhs.tv_nsec);
|
||||
|
||||
if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
|
||||
res.tv_sec = TIME_T_MAX;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add two timespec64 values and do a safety check for overflow.
|
||||
* It's assumed that both values are valid (>= 0).
|
||||
|
|
|
@ -1306,33 +1306,31 @@ EXPORT_SYMBOL(do_settimeofday64);
|
|||
*
|
||||
* Adds or subtracts an offset value from the current time.
|
||||
*/
|
||||
int timekeeping_inject_offset(struct timespec *ts)
|
||||
static int timekeeping_inject_offset(struct timespec64 *ts)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
unsigned long flags;
|
||||
struct timespec64 ts64, tmp;
|
||||
struct timespec64 tmp;
|
||||
int ret = 0;
|
||||
|
||||
if (!timespec_inject_offset_valid(ts))
|
||||
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
|
||||
return -EINVAL;
|
||||
|
||||
ts64 = timespec_to_timespec64(*ts);
|
||||
|
||||
raw_spin_lock_irqsave(&timekeeper_lock, flags);
|
||||
write_seqcount_begin(&tk_core.seq);
|
||||
|
||||
timekeeping_forward_now(tk);
|
||||
|
||||
/* Make sure the proposed value is valid */
|
||||
tmp = timespec64_add(tk_xtime(tk), ts64);
|
||||
if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
|
||||
tmp = timespec64_add(tk_xtime(tk), *ts);
|
||||
if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
|
||||
!timespec64_valid_strict(&tmp)) {
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
tk_xtime_add(tk, &ts64);
|
||||
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
|
||||
tk_xtime_add(tk, ts);
|
||||
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
|
||||
|
||||
error: /* even if we error out, we forwarded the time, so call update */
|
||||
timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
|
||||
|
@ -1345,7 +1343,40 @@ int timekeeping_inject_offset(struct timespec *ts)
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(timekeeping_inject_offset);
|
||||
|
||||
/*
|
||||
* Indicates if there is an offset between the system clock and the hardware
|
||||
* clock/persistent clock/rtc.
|
||||
*/
|
||||
int persistent_clock_is_local;
|
||||
|
||||
/*
|
||||
* Adjust the time obtained from the CMOS to be UTC time instead of
|
||||
* local time.
|
||||
*
|
||||
* This is ugly, but preferable to the alternatives. Otherwise we
|
||||
* would either need to write a program to do it in /etc/rc (and risk
|
||||
* confusion if the program gets run more than once; it would also be
|
||||
* hard to make the program warp the clock precisely n hours) or
|
||||
* compile in the timezone information into the kernel. Bad, bad....
|
||||
*
|
||||
* - TYT, 1992-01-01
|
||||
*
|
||||
* The best thing to do is to keep the CMOS clock in universal time (UTC)
|
||||
* as real UNIX machines always do it. This avoids all headaches about
|
||||
* daylight saving times and warping kernel clocks.
|
||||
*/
|
||||
void timekeeping_warp_clock(void)
|
||||
{
|
||||
if (sys_tz.tz_minuteswest != 0) {
|
||||
struct timespec64 adjust;
|
||||
|
||||
persistent_clock_is_local = 1;
|
||||
adjust.tv_sec = sys_tz.tz_minuteswest * 60;
|
||||
adjust.tv_nsec = 0;
|
||||
timekeeping_inject_offset(&adjust);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
|
||||
|
@ -2289,6 +2320,72 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
|||
return base;
|
||||
}
|
||||
|
||||
/**
|
||||
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
|
||||
*/
|
||||
static int timekeeping_validate_timex(struct timex *txc)
|
||||
{
|
||||
if (txc->modes & ADJ_ADJTIME) {
|
||||
/* singleshot must not be used with any other mode bits */
|
||||
if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
|
||||
return -EINVAL;
|
||||
if (!(txc->modes & ADJ_OFFSET_READONLY) &&
|
||||
!capable(CAP_SYS_TIME))
|
||||
return -EPERM;
|
||||
} else {
|
||||
/* In order to modify anything, you gotta be super-user! */
|
||||
if (txc->modes && !capable(CAP_SYS_TIME))
|
||||
return -EPERM;
|
||||
/*
|
||||
* if the quartz is off by more than 10% then
|
||||
* something is VERY wrong!
|
||||
*/
|
||||
if (txc->modes & ADJ_TICK &&
|
||||
(txc->tick < 900000/USER_HZ ||
|
||||
txc->tick > 1100000/USER_HZ))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (txc->modes & ADJ_SETOFFSET) {
|
||||
/* In order to inject time, you gotta be super-user! */
|
||||
if (!capable(CAP_SYS_TIME))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Validate if a timespec/timeval used to inject a time
|
||||
* offset is valid. Offsets can be postive or negative, so
|
||||
* we don't check tv_sec. The value of the timeval/timespec
|
||||
* is the sum of its fields,but *NOTE*:
|
||||
* The field tv_usec/tv_nsec must always be non-negative and
|
||||
* we can't have more nanoseconds/microseconds than a second.
|
||||
*/
|
||||
if (txc->time.tv_usec < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (txc->modes & ADJ_NANO) {
|
||||
if (txc->time.tv_usec >= NSEC_PER_SEC)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (txc->time.tv_usec >= USEC_PER_SEC)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for potential multiplication overflows that can
|
||||
* only happen on 64-bit systems:
|
||||
*/
|
||||
if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
|
||||
if (LLONG_MIN / PPM_SCALE > txc->freq)
|
||||
return -EINVAL;
|
||||
if (LLONG_MAX / PPM_SCALE < txc->freq)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
|
||||
*/
|
||||
|
@ -2301,12 +2398,12 @@ int do_adjtimex(struct timex *txc)
|
|||
int ret;
|
||||
|
||||
/* Validate the data before disabling interrupts */
|
||||
ret = ntp_validate_timex(txc);
|
||||
ret = timekeeping_validate_timex(txc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (txc->modes & ADJ_SETOFFSET) {
|
||||
struct timespec delta;
|
||||
struct timespec64 delta;
|
||||
delta.tv_sec = txc->time.tv_sec;
|
||||
delta.tv_nsec = txc->time.tv_usec;
|
||||
if (!(txc->modes & ADJ_NANO))
|
||||
|
|
|
@ -10,7 +10,7 @@ extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
|
|||
|
||||
extern int timekeeping_valid_for_hres(void);
|
||||
extern u64 timekeeping_max_deferment(void);
|
||||
extern int timekeeping_inject_offset(struct timespec *ts);
|
||||
extern void timekeeping_warp_clock(void);
|
||||
extern int timekeeping_suspend(void);
|
||||
extern void timekeeping_resume(void);
|
||||
|
||||
|
|
Loading…
Reference in New Issue