Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Ingo Molnar: "The main changes in the timer code in this cycle were: - Clockevent updates: - timer-of framework cleanups. (Geert Uytterhoeven) - Use timer-of for the renesas-ostm and the device name to prevent name collision in case of multiple timers. (Geert Uytterhoeven) - Check if there is an error after calling of_clk_get in asm9260 (Chuhong Yuan) - ABI fix: Zero out high order bits of nanoseconds on compat syscalls. This got broken a year ago, with apparently no side effects so far. Since the kernel would use random data otherwise I don't think we'd have other options but to fix the bug, even if there was a side effect to applications (Dmitry Safonov) - Optimize ns_to_timespec64() on 32-bit systems: move away from div_s64_rem() which can be slow, to div_u64_rem() which is faster (Arnd Bergmann) - Annotate KCSAN-reported false positive data races in hrtimer_is_queued() users by moving timer->state handling over to the READ_ONCE()/WRITE_ONCE() APIs. This documents these accesses (Eric Dumazet) - Misc cleanups and small fixes" [ I undid the "ABI fix" and updated the comments instead. The reason there were apparently no side effects is that the fix was a no-op. The updated comment is to say _why_ it was a no-op. - Linus ] * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: time: Zero the upper 32-bits in __kernel_timespec on 32-bit time: Rename tsk->real_start_time to ->start_boottime hrtimer: Remove the comment about not used HRTIMER_SOFTIRQ time: Fix spelling mistake in comment time: Optimize ns_to_timespec64() hrtimer: Annotate lockless access to timer->state clocksource/drivers/asm9260: Add a check for of_clk_get clocksource/drivers/renesas-ostm: Use unique device name instead of ostm clocksource/drivers/renesas-ostm: Convert to timer_of clocksource/drivers/timer-of: Use unique device name instead of timer clocksource/drivers/timer-of: Convert last full_name to %pOF
This commit is contained in:
Коммит
043cf46825
|
@ -528,6 +528,7 @@ config SH_TIMER_MTU2
|
|||
config RENESAS_OSTM
|
||||
bool "Renesas OSTM timer driver" if COMPILE_TEST
|
||||
select CLKSRC_MMIO
|
||||
select TIMER_OF
|
||||
help
|
||||
Enables the support for the Renesas OSTM.
|
||||
|
||||
|
|
|
@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
|
|||
}
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk)) {
|
||||
pr_err("Failed to get clk!\n");
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret) {
|
||||
|
|
|
@ -6,14 +6,14 @@
|
|||
* Copyright (C) 2017 Chris Brandt
|
||||
*/
|
||||
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched_clock.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "timer-of.h"
|
||||
|
||||
/*
|
||||
* The OSTM contains independent channels.
|
||||
* The first OSTM channel probed will be set up as a free running
|
||||
|
@ -24,12 +24,6 @@
|
|||
* driven clock event.
|
||||
*/
|
||||
|
||||
struct ostm_device {
|
||||
void __iomem *base;
|
||||
unsigned long ticks_per_jiffy;
|
||||
struct clock_event_device ced;
|
||||
};
|
||||
|
||||
static void __iomem *system_clock; /* For sched_clock() */
|
||||
|
||||
/* OSTM REGISTERS */
|
||||
|
@ -47,41 +41,32 @@ static void __iomem *system_clock; /* For sched_clock() */
|
|||
#define CTL_ONESHOT 0x02
|
||||
#define CTL_FREERUN 0x02
|
||||
|
||||
static struct ostm_device *ced_to_ostm(struct clock_event_device *ced)
|
||||
static void ostm_timer_stop(struct timer_of *to)
|
||||
{
|
||||
return container_of(ced, struct ostm_device, ced);
|
||||
}
|
||||
|
||||
static void ostm_timer_stop(struct ostm_device *ostm)
|
||||
{
|
||||
if (readb(ostm->base + OSTM_TE) & TE) {
|
||||
writeb(TT, ostm->base + OSTM_TT);
|
||||
if (readb(timer_of_base(to) + OSTM_TE) & TE) {
|
||||
writeb(TT, timer_of_base(to) + OSTM_TT);
|
||||
|
||||
/*
|
||||
* Read back the register simply to confirm the write operation
|
||||
* has completed since I/O writes can sometimes get queued by
|
||||
* the bus architecture.
|
||||
*/
|
||||
while (readb(ostm->base + OSTM_TE) & TE)
|
||||
while (readb(timer_of_base(to) + OSTM_TE) & TE)
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate)
|
||||
static int __init ostm_init_clksrc(struct timer_of *to)
|
||||
{
|
||||
/*
|
||||
* irq not used (clock sources don't use interrupts)
|
||||
*/
|
||||
ostm_timer_stop(to);
|
||||
|
||||
ostm_timer_stop(ostm);
|
||||
writel(0, timer_of_base(to) + OSTM_CMP);
|
||||
writeb(CTL_FREERUN, timer_of_base(to) + OSTM_CTL);
|
||||
writeb(TS, timer_of_base(to) + OSTM_TS);
|
||||
|
||||
writel(0, ostm->base + OSTM_CMP);
|
||||
writeb(CTL_FREERUN, ostm->base + OSTM_CTL);
|
||||
writeb(TS, ostm->base + OSTM_TS);
|
||||
|
||||
return clocksource_mmio_init(ostm->base + OSTM_CNT,
|
||||
"ostm", rate,
|
||||
300, 32, clocksource_mmio_readl_up);
|
||||
return clocksource_mmio_init(timer_of_base(to) + OSTM_CNT,
|
||||
to->np->full_name, timer_of_rate(to), 300,
|
||||
32, clocksource_mmio_readl_up);
|
||||
}
|
||||
|
||||
static u64 notrace ostm_read_sched_clock(void)
|
||||
|
@ -89,87 +74,75 @@ static u64 notrace ostm_read_sched_clock(void)
|
|||
return readl(system_clock);
|
||||
}
|
||||
|
||||
static void __init ostm_init_sched_clock(struct ostm_device *ostm,
|
||||
unsigned long rate)
|
||||
static void __init ostm_init_sched_clock(struct timer_of *to)
|
||||
{
|
||||
system_clock = ostm->base + OSTM_CNT;
|
||||
sched_clock_register(ostm_read_sched_clock, 32, rate);
|
||||
system_clock = timer_of_base(to) + OSTM_CNT;
|
||||
sched_clock_register(ostm_read_sched_clock, 32, timer_of_rate(to));
|
||||
}
|
||||
|
||||
static int ostm_clock_event_next(unsigned long delta,
|
||||
struct clock_event_device *ced)
|
||||
struct clock_event_device *ced)
|
||||
{
|
||||
struct ostm_device *ostm = ced_to_ostm(ced);
|
||||
struct timer_of *to = to_timer_of(ced);
|
||||
|
||||
ostm_timer_stop(ostm);
|
||||
ostm_timer_stop(to);
|
||||
|
||||
writel(delta, ostm->base + OSTM_CMP);
|
||||
writeb(CTL_ONESHOT, ostm->base + OSTM_CTL);
|
||||
writeb(TS, ostm->base + OSTM_TS);
|
||||
writel(delta, timer_of_base(to) + OSTM_CMP);
|
||||
writeb(CTL_ONESHOT, timer_of_base(to) + OSTM_CTL);
|
||||
writeb(TS, timer_of_base(to) + OSTM_TS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ostm_shutdown(struct clock_event_device *ced)
|
||||
{
|
||||
struct ostm_device *ostm = ced_to_ostm(ced);
|
||||
struct timer_of *to = to_timer_of(ced);
|
||||
|
||||
ostm_timer_stop(ostm);
|
||||
ostm_timer_stop(to);
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int ostm_set_periodic(struct clock_event_device *ced)
|
||||
{
|
||||
struct ostm_device *ostm = ced_to_ostm(ced);
|
||||
struct timer_of *to = to_timer_of(ced);
|
||||
|
||||
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
|
||||
ostm_timer_stop(ostm);
|
||||
ostm_timer_stop(to);
|
||||
|
||||
writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP);
|
||||
writeb(CTL_PERIODIC, ostm->base + OSTM_CTL);
|
||||
writeb(TS, ostm->base + OSTM_TS);
|
||||
writel(timer_of_period(to) - 1, timer_of_base(to) + OSTM_CMP);
|
||||
writeb(CTL_PERIODIC, timer_of_base(to) + OSTM_CTL);
|
||||
writeb(TS, timer_of_base(to) + OSTM_TS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ostm_set_oneshot(struct clock_event_device *ced)
|
||||
{
|
||||
struct ostm_device *ostm = ced_to_ostm(ced);
|
||||
struct timer_of *to = to_timer_of(ced);
|
||||
|
||||
ostm_timer_stop(ostm);
|
||||
ostm_timer_stop(to);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct ostm_device *ostm = dev_id;
|
||||
struct clock_event_device *ced = dev_id;
|
||||
|
||||
if (clockevent_state_oneshot(&ostm->ced))
|
||||
ostm_timer_stop(ostm);
|
||||
if (clockevent_state_oneshot(ced))
|
||||
ostm_timer_stop(to_timer_of(ced));
|
||||
|
||||
/* notify clockevent layer */
|
||||
if (ostm->ced.event_handler)
|
||||
ostm->ced.event_handler(&ostm->ced);
|
||||
if (ced->event_handler)
|
||||
ced->event_handler(ced);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
|
||||
unsigned long rate)
|
||||
static int __init ostm_init_clkevt(struct timer_of *to)
|
||||
{
|
||||
struct clock_event_device *ced = &ostm->ced;
|
||||
int ret = -ENXIO;
|
||||
struct clock_event_device *ced = &to->clkevt;
|
||||
|
||||
ret = request_irq(irq, ostm_timer_interrupt,
|
||||
IRQF_TIMER | IRQF_IRQPOLL,
|
||||
"ostm", ostm);
|
||||
if (ret) {
|
||||
pr_err("ostm: failed to request irq\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ced->name = "ostm";
|
||||
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
|
||||
ced->set_state_shutdown = ostm_shutdown;
|
||||
ced->set_state_periodic = ostm_set_periodic;
|
||||
|
@ -178,79 +151,61 @@ static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
|
|||
ced->shift = 32;
|
||||
ced->rating = 300;
|
||||
ced->cpumask = cpumask_of(0);
|
||||
clockevents_config_and_register(ced, rate, 0xf, 0xffffffff);
|
||||
clockevents_config_and_register(ced, timer_of_rate(to), 0xf,
|
||||
0xffffffff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ostm_init(struct device_node *np)
|
||||
{
|
||||
struct ostm_device *ostm;
|
||||
int ret = -EFAULT;
|
||||
struct clk *ostm_clk = NULL;
|
||||
int irq;
|
||||
unsigned long rate;
|
||||
struct timer_of *to;
|
||||
int ret;
|
||||
|
||||
ostm = kzalloc(sizeof(*ostm), GFP_KERNEL);
|
||||
if (!ostm)
|
||||
to = kzalloc(sizeof(*to), GFP_KERNEL);
|
||||
if (!to)
|
||||
return -ENOMEM;
|
||||
|
||||
ostm->base = of_iomap(np, 0);
|
||||
if (!ostm->base) {
|
||||
pr_err("ostm: failed to remap I/O memory\n");
|
||||
goto err;
|
||||
to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
|
||||
if (system_clock) {
|
||||
/*
|
||||
* clock sources don't use interrupts, clock events do
|
||||
*/
|
||||
to->flags |= TIMER_OF_IRQ;
|
||||
to->of_irq.flags = IRQF_TIMER | IRQF_IRQPOLL;
|
||||
to->of_irq.handler = ostm_timer_interrupt;
|
||||
}
|
||||
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (irq < 0) {
|
||||
pr_err("ostm: Failed to get irq\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ostm_clk = of_clk_get(np, 0);
|
||||
if (IS_ERR(ostm_clk)) {
|
||||
pr_err("ostm: Failed to get clock\n");
|
||||
ostm_clk = NULL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ostm_clk);
|
||||
if (ret) {
|
||||
pr_err("ostm: Failed to enable clock\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
rate = clk_get_rate(ostm_clk);
|
||||
ostm->ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
|
||||
ret = timer_of_init(np, to);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
/*
|
||||
* First probed device will be used as system clocksource. Any
|
||||
* additional devices will be used as clock events.
|
||||
*/
|
||||
if (!system_clock) {
|
||||
ret = ostm_init_clksrc(ostm, rate);
|
||||
|
||||
if (!ret) {
|
||||
ostm_init_sched_clock(ostm, rate);
|
||||
pr_info("ostm: used for clocksource\n");
|
||||
}
|
||||
ret = ostm_init_clksrc(to);
|
||||
if (ret)
|
||||
goto err_cleanup;
|
||||
|
||||
ostm_init_sched_clock(to);
|
||||
pr_info("%pOF: used for clocksource\n", np);
|
||||
} else {
|
||||
ret = ostm_init_clkevt(ostm, irq, rate);
|
||||
ret = ostm_init_clkevt(to);
|
||||
if (ret)
|
||||
goto err_cleanup;
|
||||
|
||||
if (!ret)
|
||||
pr_info("ostm: used for clock events\n");
|
||||
}
|
||||
|
||||
err:
|
||||
if (ret) {
|
||||
clk_disable_unprepare(ostm_clk);
|
||||
iounmap(ostm->base);
|
||||
kfree(ostm);
|
||||
return ret;
|
||||
pr_info("%pOF: used for clock events\n", np);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup:
|
||||
timer_of_cleanup(to);
|
||||
err_free:
|
||||
kfree(to);
|
||||
return ret;
|
||||
}
|
||||
|
||||
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
|
||||
|
|
|
@ -57,8 +57,8 @@ static __init int timer_of_irq_init(struct device_node *np,
|
|||
if (of_irq->name) {
|
||||
of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to get interrupt %s for %s\n",
|
||||
of_irq->name, np->full_name);
|
||||
pr_err("Failed to get interrupt %s for %pOF\n",
|
||||
of_irq->name, np);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
|
@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
|
|||
}
|
||||
|
||||
if (!to->clkevt.name)
|
||||
to->clkevt.name = np->name;
|
||||
to->clkevt.name = np->full_name;
|
||||
|
||||
to->np = np;
|
||||
|
||||
|
|
|
@ -1131,7 +1131,7 @@ static int de_thread(struct task_struct *tsk)
|
|||
* also take its birthdate (always earlier than our own).
|
||||
*/
|
||||
tsk->start_time = leader->start_time;
|
||||
tsk->real_start_time = leader->real_start_time;
|
||||
tsk->start_boottime = leader->start_boottime;
|
||||
|
||||
BUG_ON(!same_thread_group(leader, tsk));
|
||||
BUG_ON(has_group_leader_pid(tsk));
|
||||
|
|
|
@ -533,7 +533,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
|||
nice = task_nice(task);
|
||||
|
||||
/* convert nsec -> ticks */
|
||||
start_time = nsec_to_clock_t(task->real_start_time);
|
||||
start_time = nsec_to_clock_t(task->start_boottime);
|
||||
|
||||
seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns));
|
||||
seq_puts(m, " (");
|
||||
|
|
|
@ -456,12 +456,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
|
|||
|
||||
extern bool hrtimer_active(const struct hrtimer *timer);
|
||||
|
||||
/*
|
||||
* Helper function to check, whether the timer is on one of the queues
|
||||
/**
|
||||
* hrtimer_is_queued = check, whether the timer is on one of the queues
|
||||
* @timer: Timer to check
|
||||
*
|
||||
* Returns: True if the timer is queued, false otherwise
|
||||
*
|
||||
* The function can be used lockless, but it gives only a current snapshot.
|
||||
*/
|
||||
static inline int hrtimer_is_queued(struct hrtimer *timer)
|
||||
static inline bool hrtimer_is_queued(struct hrtimer *timer)
|
||||
{
|
||||
return timer->state & HRTIMER_STATE_ENQUEUED;
|
||||
/* The READ_ONCE pairs with the update functions of timer->state */
|
||||
return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -533,8 +533,7 @@ enum
|
|||
IRQ_POLL_SOFTIRQ,
|
||||
TASKLET_SOFTIRQ,
|
||||
SCHED_SOFTIRQ,
|
||||
HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
|
||||
numbering. Sigh! */
|
||||
HRTIMER_SOFTIRQ,
|
||||
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
|
||||
|
||||
NR_SOFTIRQS
|
||||
|
|
|
@ -862,7 +862,7 @@ struct task_struct {
|
|||
u64 start_time;
|
||||
|
||||
/* Boot based time in nsecs: */
|
||||
u64 real_start_time;
|
||||
u64 start_boottime;
|
||||
|
||||
/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
|
||||
unsigned long min_flt;
|
||||
|
|
|
@ -2185,7 +2185,7 @@ static __latent_entropy struct task_struct *copy_process(
|
|||
*/
|
||||
|
||||
p->start_time = ktime_get_ns();
|
||||
p->real_start_time = ktime_get_boottime_ns();
|
||||
p->start_boottime = ktime_get_boottime_ns();
|
||||
|
||||
/*
|
||||
* Make it visible to the rest of the system, but dont wake it up yet.
|
||||
|
|
|
@ -966,7 +966,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
|
|||
|
||||
base->cpu_base->active_bases |= 1 << base->index;
|
||||
|
||||
timer->state = HRTIMER_STATE_ENQUEUED;
|
||||
/* Pairs with the lockless read in hrtimer_is_queued() */
|
||||
WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
|
||||
|
||||
return timerqueue_add(&base->active, &timer->node);
|
||||
}
|
||||
|
@ -988,7 +989,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|||
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
|
||||
u8 state = timer->state;
|
||||
|
||||
timer->state = newstate;
|
||||
/* Pairs with the lockless read in hrtimer_is_queued() */
|
||||
WRITE_ONCE(timer->state, newstate);
|
||||
if (!(state & HRTIMER_STATE_ENQUEUED))
|
||||
return;
|
||||
|
||||
|
@ -1013,8 +1015,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
|||
static inline int
|
||||
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
|
||||
{
|
||||
if (hrtimer_is_queued(timer)) {
|
||||
u8 state = timer->state;
|
||||
u8 state = timer->state;
|
||||
|
||||
if (state & HRTIMER_STATE_ENQUEUED) {
|
||||
int reprogram;
|
||||
|
||||
/*
|
||||
|
|
|
@ -179,7 +179,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
|
|||
return error;
|
||||
|
||||
if (tz) {
|
||||
/* Verify we're witin the +-15 hrs range */
|
||||
/* Verify we're within the +-15 hrs range */
|
||||
if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -548,18 +548,21 @@ EXPORT_SYMBOL(set_normalized_timespec64);
|
|||
*/
|
||||
struct timespec64 ns_to_timespec64(const s64 nsec)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
struct timespec64 ts = { 0, 0 };
|
||||
s32 rem;
|
||||
|
||||
if (!nsec)
|
||||
return (struct timespec64) {0, 0};
|
||||
|
||||
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
|
||||
if (unlikely(rem < 0)) {
|
||||
ts.tv_sec--;
|
||||
rem += NSEC_PER_SEC;
|
||||
if (likely(nsec > 0)) {
|
||||
ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
|
||||
ts.tv_nsec = rem;
|
||||
} else if (nsec < 0) {
|
||||
/*
|
||||
* With negative times, tv_sec points to the earlier
|
||||
* second, and tv_nsec counts the nanoseconds since
|
||||
* then, so tv_nsec is always a positive number.
|
||||
*/
|
||||
ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1;
|
||||
ts.tv_nsec = NSEC_PER_SEC - rem - 1;
|
||||
}
|
||||
ts.tv_nsec = rem;
|
||||
|
||||
return ts;
|
||||
}
|
||||
|
@ -878,10 +881,11 @@ int get_timespec64(struct timespec64 *ts,
|
|||
|
||||
ts->tv_sec = kts.tv_sec;
|
||||
|
||||
/* Zero out the padding for 32 bit systems or in compat mode */
|
||||
/* Zero out the padding in compat mode */
|
||||
if (in_compat_syscall())
|
||||
kts.tv_nsec &= 0xFFFFFFFFUL;
|
||||
|
||||
/* In 32-bit mode, this drops the padding */
|
||||
ts->tv_nsec = kts.tv_nsec;
|
||||
|
||||
return 0;
|
||||
|
|
Загрузка…
Ссылка в новой задаче