WSL2-Linux-Kernel/include/linux/time64.h

226 строки
5.6 KiB
C
Исходник Обычный вид История

#ifndef _LINUX_TIME64_H
#define _LINUX_TIME64_H
#include <uapi/linux/time.h>
#include <linux/math64.h>
typedef __s64 time64_t;
time: Avoid undefined behaviour in timespec64_add_safe() I ran into this: ================================================================================ UBSAN: Undefined behaviour in kernel/time/time.c:783:2 signed integer overflow: 5273 + 9223372036854771711 cannot be represented in type 'long int' CPU: 0 PID: 17363 Comm: trinity-c0 Not tainted 4.8.0-rc1+ #88 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014 0000000000000000 ffff88011457f8f0 ffffffff82344f50 0000000041b58ab3 ffffffff84f98080 ffffffff82344ea4 ffff88011457f918 ffff88011457f8c8 ffff88011457f8e0 7fffffffffffefff ffff88011457f6d8 dffffc0000000000 Call Trace: [<ffffffff82344f50>] dump_stack+0xac/0xfc [<ffffffff82344ea4>] ? _atomic_dec_and_lock+0xc4/0xc4 [<ffffffff8242f4c8>] ubsan_epilogue+0xd/0x8a [<ffffffff8242fc04>] handle_overflow+0x202/0x23d [<ffffffff8242fa02>] ? val_to_string.constprop.6+0x11e/0x11e [<ffffffff823c7837>] ? debug_smp_processor_id+0x17/0x20 [<ffffffff8131b581>] ? __sigqueue_free.part.13+0x51/0x70 [<ffffffff8146d4e0>] ? rcu_is_watching+0x110/0x110 [<ffffffff8242fc4d>] __ubsan_handle_add_overflow+0xe/0x10 [<ffffffff81476ef8>] timespec64_add_safe+0x298/0x340 [<ffffffff81476c60>] ? timespec_add_safe+0x330/0x330 [<ffffffff812f7990>] ? wait_noreap_copyout+0x1d0/0x1d0 [<ffffffff8184bf18>] poll_select_set_timeout+0xf8/0x170 [<ffffffff8184be20>] ? poll_schedule_timeout+0x2b0/0x2b0 [<ffffffff813aa9bb>] ? __might_sleep+0x5b/0x260 [<ffffffff833c8a87>] __sys_recvmmsg+0x107/0x790 [<ffffffff833c8980>] ? SyS_recvmsg+0x20/0x20 [<ffffffff81486378>] ? hrtimer_start_range_ns+0x3b8/0x1380 [<ffffffff845f8bfb>] ? _raw_spin_unlock_irqrestore+0x3b/0x60 [<ffffffff8148bcea>] ? do_setitimer+0x39a/0x8e0 [<ffffffff813aa9bb>] ? __might_sleep+0x5b/0x260 [<ffffffff833c9110>] ? __sys_recvmmsg+0x790/0x790 [<ffffffff833c91e9>] SyS_recvmmsg+0xd9/0x160 [<ffffffff833c9110>] ? __sys_recvmmsg+0x790/0x790 [<ffffffff823c7853>] ? __this_cpu_preempt_check+0x13/0x20 [<ffffffff8162f680>] ? __context_tracking_exit.part.3+0x30/0x1b0 [<ffffffff833c9110>] ? __sys_recvmmsg+0x790/0x790 [<ffffffff81007bd3>] do_syscall_64+0x1b3/0x4b0 [<ffffffff845f936a>] entry_SYSCALL64_slow_path+0x25/0x25 ================================================================================ Line 783 is this: 783 set_normalized_timespec64(&res, lhs.tv_sec + rhs.tv_sec, 784 lhs.tv_nsec + rhs.tv_nsec); In other words, since lhs.tv_sec and rhs.tv_sec are both time64_t, this is a signed addition which will cause undefined behaviour on overflow. Note that this is not currently a huge concern since the kernel should be built with -fno-strict-overflow by default, but could be a problem in the future, a problem with older compilers, or other compilers than gcc. The easiest way to avoid the overflow is to cast one of the arguments to unsigned (so the addition will be done using unsigned arithmetic). Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Prarit Bhargava <prarit@redhat.com> Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
2016-08-12 21:14:09 +03:00
typedef __u64 timeu64_t;
/*
* This wants to go into uapi/linux/time.h once we agreed about the
* userspace interfaces.
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
struct itimerspec64 {
struct timespec64 it_interval;
struct timespec64 it_value;
};
#endif
/* Parameters used to convert the timespec values: */
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000LL
/* Located here for timespec[64]_valid_strict */
time: Prevent early expiry of hrtimers[CLOCK_REALTIME] at the leap second edge Currently, leapsecond adjustments are done at tick time. As a result, the leapsecond was applied at the first timer tick *after* the leapsecond (~1-10ms late depending on HZ), rather then exactly on the second edge. This was in part historical from back when we were always tick based, but correcting this since has been avoided since it adds extra conditional checks in the gettime fastpath, which has performance overhead. However, it was recently pointed out that ABS_TIME CLOCK_REALTIME timers set for right after the leapsecond could fire a second early, since some timers may be expired before we trigger the timekeeping timer, which then applies the leapsecond. This isn't quite as bad as it sounds, since behaviorally it is similar to what is possible w/ ntpd made leapsecond adjustments done w/o using the kernel discipline. Where due to latencies, timers may fire just prior to the settimeofday call. (Also, one should note that all applications using CLOCK_REALTIME timers should always be careful, since they are prone to quirks from settimeofday() disturbances.) However, the purpose of having the kernel do the leap adjustment is to avoid such latencies, so I think this is worth fixing. So in order to properly keep those timers from firing a second early, this patch modifies the ntp and timekeeping logic so that we keep enough state so that the update_base_offsets_now accessor, which provides the hrtimer core the current time, can check and apply the leapsecond adjustment on the second edge. This prevents the hrtimer core from expiring timers too early. This patch does not modify any other time read path, so no additional overhead is incurred. However, this also means that the leap-second continues to be applied at tick time for all other read-paths. Apologies to Richard Cochran, who pushed for similar changes years ago, which I resisted due to the concerns about the performance overhead. While I suspect this isn't extremely critical, folks who care about strict leap-second correctness will likely want to watch this. Potentially a -stable candidate eventually. Originally-suggested-by: Richard Cochran <richardcochran@gmail.com> Reported-by: Daniel Bristot de Oliveira <bristot@redhat.com> Reported-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: John Stultz <john.stultz@linaro.org> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Jiri Bohac <jbohac@suse.cz> Cc: Shuah Khan <shuahkh@osg.samsung.com> Cc: Ingo Molnar <mingo@kernel.org> Link: http://lkml.kernel.org/r/1434063297-28657-4-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-06-12 01:54:55 +03:00
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
#if __BITS_PER_LONG == 64
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
{
return ts64;
}
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
{
return ts;
}
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
return *its64;
}
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
return *its;
}
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
# define timespec64_add timespec_add
# define timespec64_sub timespec_sub
# define timespec64_valid timespec_valid
# define timespec64_valid_strict timespec_valid_strict
# define timespec64_to_ns timespec_to_ns
# define ns_to_timespec64 ns_to_timespec
# define timespec64_add_ns timespec_add_ns
#else
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
{
struct timespec ret;
ret.tv_sec = (time_t)ts64.tv_sec;
ret.tv_nsec = ts64.tv_nsec;
return ret;
}
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
{
struct timespec64 ret;
ret.tv_sec = ts.tv_sec;
ret.tv_nsec = ts.tv_nsec;
return ret;
}
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
struct itimerspec ret;
ret.it_interval = timespec64_to_timespec(its64->it_interval);
ret.it_value = timespec64_to_timespec(its64->it_value);
return ret;
}
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
struct itimerspec64 ret;
ret.it_interval = timespec_to_timespec64(its->it_interval);
ret.it_value = timespec_to_timespec64(its->it_value);
return ret;
}
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
/*
* lhs < rhs: return <0
* lhs == rhs: return 0
* lhs > rhs: return >0
*/
static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_nsec - rhs->tv_nsec;
}
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
static inline struct timespec64 timespec64_add(struct timespec64 lhs,
struct timespec64 rhs)
{
struct timespec64 ts_delta;
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
return ts_delta;
}
/*
* sub = lhs - rhs, in normalized form
*/
static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
struct timespec64 rhs)
{
struct timespec64 ts_delta;
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}
/*
* Returns true if the timespec64 is norm, false if denorm:
*/
static inline bool timespec64_valid(const struct timespec64 *ts)
{
/* Dates before 1970 are bogus */
if (ts->tv_sec < 0)
return false;
/* Can't have more nanoseconds then a second */
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return false;
return true;
}
static inline bool timespec64_valid_strict(const struct timespec64 *ts)
{
if (!timespec64_valid(ts))
return false;
/* Disallow values that could overflow ktime_t */
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
return false;
return true;
}
/**
* timespec64_to_ns - Convert timespec64 to nanoseconds
* @ts: pointer to the timespec64 variable to be converted
*
* Returns the scalar nanosecond representation of the timespec64
* parameter.
*/
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
{
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
/**
* ns_to_timespec64 - Convert nanoseconds to timespec64
* @nsec: the nanoseconds value to be converted
*
* Returns the timespec64 representation of the nsec parameter.
*/
extern struct timespec64 ns_to_timespec64(const s64 nsec);
/**
* timespec64_add_ns - Adds nanoseconds to a timespec64
* @a: pointer to timespec64 to be incremented
* @ns: unsigned nanoseconds value to be added
*
* This must always be inlined because its used from the x86-64 vdso,
* which cannot call other kernel functions.
*/
static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
{
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
a->tv_nsec = ns;
}
#endif
/*
* timespec64_add_safe assumes both values are positive and checks for
* overflow. It will return TIME64_MAX in case of overflow.
*/
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
const struct timespec64 rhs);
#endif /* _LINUX_TIME64_H */