WSL2-Linux-Kernel/drivers/clocksource/timer-riscv.c

211 строки
5.5 KiB
C
Исходник Обычный вид История

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* All RISC-V systems have a timer attached to every hart. These timers can
* either be read from the "time" and "timeh" CSRs, and can use the SBI to
* setup events, or directly accessed using MMIO registers.
*/
#define pr_fmt(fmt) "riscv-timer: " fmt
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
#include <asm/hwcap.h>
#include <asm/sbi.h>
#include <asm/timex.h>
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
static bool riscv_timer_cannot_wake_cpu;
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
u64 next_tval = get_cycles64() + delta;
csr_set(CSR_IE, IE_TIE);
if (static_branch_likely(&riscv_sstc_available)) {
#if defined(CONFIG_32BIT)
csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
csr_write(CSR_STIMECMPH, next_tval >> 32);
#else
csr_write(CSR_STIMECMP, next_tval);
#endif
} else
sbi_set_timer(next_tval);
return 0;
}
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
Revert "clocksource/drivers/riscv: Events are stopped during CPU suspend" This reverts commit 232ccac1bd9b5bfe73895f527c08623e7fa0752d. On the subject of suspend, the RISC-V SBI spec states: This does not cover whether any given events actually reach the hart or not, just what the hart will do if it receives an event. On PolarFire SoC, and potentially other SiFive based implementations, events from the RISC-V timer do reach a hart during suspend. This is not the case for the implementation on the Allwinner D1 - there timer events are not received during suspend. To fix this, the CLOCK_EVT_FEAT_C3STOP (mis)feature was enabled for the timer driver - but this has broken both RCU stall detection and timers generally on PolarFire SoC and potentially other SiFive based implementations. If an AXI read to the PCIe controller on PolarFire SoC times out, the system will stall, however, with CLOCK_EVT_FEAT_C3STOP active, the system just locks up without RCU stalling: io scheduler mq-deadline registered io scheduler kyber registered microchip-pcie 2000000000.pcie: host bridge /soc/pcie@2000000000 ranges: microchip-pcie 2000000000.pcie: MEM 0x2008000000..0x2087ffffff -> 0x0008000000 microchip-pcie 2000000000.pcie: sec error in pcie2axi buffer microchip-pcie 2000000000.pcie: ded error in pcie2axi buffer microchip-pcie 2000000000.pcie: axi read request error microchip-pcie 2000000000.pcie: axi read timeout microchip-pcie 2000000000.pcie: sec error in pcie2axi buffer microchip-pcie 2000000000.pcie: ded error in pcie2axi buffer microchip-pcie 2000000000.pcie: sec error in pcie2axi buffer microchip-pcie 2000000000.pcie: ded error in pcie2axi buffer microchip-pcie 2000000000.pcie: sec error in pcie2axi buffer microchip-pcie 2000000000.pcie: ded error in pcie2axi buffer Freeing initrd memory: 7332K Similarly issues were reported with clock_nanosleep() - with a test app that sleeps each cpu for 6, 5, 4, 3 ms respectively, HZ=250 & the blamed commit in place, the sleep times are rounded up to the next jiffy: == CPU: 1 == == CPU: 2 == == CPU: 3 == == CPU: 4 == Mean: 7.974992 Mean: 7.976534 Mean: 7.962591 Mean: 3.952179 Std Dev: 0.154374 Std Dev: 0.156082 Std Dev: 0.171018 Std Dev: 0.076193 Hi: 9.472000 Hi: 10.495000 Hi: 8.864000 Hi: 4.736000 Lo: 6.087000 Lo: 6.380000 Lo: 4.872000 Lo: 3.403000 Samples: 521 Samples: 521 Samples: 521 Samples: 521 Fortunately, the D1 has a second timer, which is "currently used in preference to the RISC-V/SBI timer driver" so a revert here does not hurt operation of D1 in its current form. Ultimately, a DeviceTree property (or node) will be added to encode the behaviour of the timers, but until then revert the addition of CLOCK_EVT_FEAT_C3STOP. Fixes: 232ccac1bd9b ("clocksource/drivers/riscv: Events are stopped during CPU suspend") Signed-off-by: Conor Dooley <conor.dooley@microchip.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Palmer Dabbelt <palmer@rivosinc.com> Acked-by: Palmer Dabbelt <palmer@rivosinc.com> Acked-by: Samuel Holland <samuel@sholland.org> Link: https://lore.kernel.org/linux-riscv/YzYTNQRxLr7Q9JR0@spud/ Link: https://github.com/riscv-non-isa/riscv-sbi-doc/issues/98/ Link: https://lore.kernel.org/linux-riscv/bf6d3b1f-f703-4a25-833e-972a44a04114@sholland.org/ Link: https://lore.kernel.org/r/20221122121620.3522431-1-conor.dooley@microchip.com
2022-11-22 15:16:21 +03:00
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = riscv_clock_next_event,
};
/*
* It is guaranteed that all the timers across all the harts are synchronized
* within one tick of each other, so while this could technically go
* backwards when hopping between CPUs, practically it won't happen.
*/
static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
{
return get_cycles64();
}
clocksource: riscv: add notrace to riscv_sched_clock When enabling ftrace graph tracer, it gets the tracing clock in ftrace_push_return_trace(). Eventually, it invokes riscv_sched_clock() to get the clock value. If riscv_sched_clock() isn't marked with 'notrace', it will call ftrace_push_return_trace() and cause infinite loop. The result of failure as follow: command: echo function_graph >current_tracer [ 46.176787] Unable to handle kernel paging request at virtual address ffffffe04fb38c48 [ 46.177309] Oops [#1] [ 46.177478] Modules linked in: [ 46.177770] CPU: 0 PID: 256 Comm: $d Not tainted 5.5.0-rc1 #47 [ 46.177981] epc: ffffffe00035e59a ra : ffffffe00035e57e sp : ffffffe03a7569b0 [ 46.178216] gp : ffffffe000d29b90 tp : ffffffe03a756180 t0 : ffffffe03a756968 [ 46.178430] t1 : ffffffe00087f408 t2 : ffffffe03a7569a0 s0 : ffffffe03a7569f0 [ 46.178643] s1 : ffffffe00087f408 a0 : 0000000ac054cda4 a1 : 000000000087f411 [ 46.178856] a2 : 0000000ac054cda4 a3 : 0000000000373ca0 a4 : ffffffe04fb38c48 [ 46.179099] a5 : 00000000153e22a8 a6 : 00000000005522ff a7 : 0000000000000005 [ 46.179338] s2 : ffffffe03a756a90 s3 : ffffffe00032811c s4 : ffffffe03a756a58 [ 46.179570] s5 : ffffffe000d29fe0 s6 : 0000000000000001 s7 : 0000000000000003 [ 46.179809] s8 : 0000000000000003 s9 : 0000000000000002 s10: 0000000000000004 [ 46.180053] s11: 0000000000000000 t3 : 0000003fc815749c t4 : 00000000000efc90 [ 46.180293] t5 : ffffffe000d29658 t6 : 0000000000040000 [ 46.180482] status: 0000000000000100 badaddr: ffffffe04fb38c48 cause: 000000000000000f Signed-off-by: Zong Li <zong.li@sifive.com> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> [paul.walmsley@sifive.com: cleaned up patch description] Fixes: 92e0d143fdef ("clocksource/drivers/riscv_timer: Provide the sched_clock") Cc: stable@vger.kernel.org Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
2019-12-23 11:46:14 +03:00
static u64 notrace riscv_sched_clock(void)
{
return get_cycles64();
}
static struct clocksource riscv_clocksource = {
.name = "riscv_clocksource",
.rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = riscv_clocksource_rdtime,
#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)
.vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
#else
.vdso_clock_mode = VDSO_CLOCKMODE_NONE,
#endif
};
static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(riscv_clock_event_irq);
return 0;
}
void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
{
*mult = riscv_clocksource.mult;
*shift = riscv_clocksource.shift;
}
EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
/* called directly from the low-level interrupt handler */
static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
return IRQ_HANDLED;
}
static int __init riscv_timer_init_dt(struct device_node *n)
{
int cpuid, error;
unsigned long hartid;
struct device_node *child;
struct irq_domain *domain;
error = riscv_of_processor_hartid(n, &hartid);
if (error < 0) {
pr_warn("Not valid hartid for node [%pOF] error = [%lu]\n",
n, hartid);
return error;
}
cpuid = riscv_hartid_to_cpuid(hartid);
if (cpuid < 0) {
pr_warn("Invalid cpuid for hartid [%lu]\n", hartid);
return cpuid;
}
if (cpuid != smp_processor_id())
return 0;
child = of_find_compatible_node(NULL, NULL, "riscv,timer");
if (child) {
riscv_timer_cannot_wake_cpu = of_property_read_bool(child,
"riscv,timer-cannot-wake-cpu");
of_node_put(child);
}
domain = NULL;
child = of_get_compatible_child(n, "riscv,cpu-intc");
if (!child) {
pr_err("Failed to find INTC node [%pOF]\n", n);
return -ENODEV;
}
domain = irq_find_host(child);
of_node_put(child);
if (!domain) {
pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
return -ENODEV;
}
riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
if (!riscv_clock_event_irq) {
pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
return -ENODEV;
}
pr_info("%s: Registering clocksource cpuid [%d] hartid [%lu]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
if (error) {
pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
error, cpuid);
return error;
}
sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
error = request_percpu_irq(riscv_clock_event_irq,
riscv_timer_interrupt,
"riscv-timer", &riscv_clock_event);
if (error) {
pr_err("registering percpu irq failed [%d]\n", error);
return error;
}
if (riscv_isa_extension_available(NULL, SSTC)) {
pr_info("Timer interrupt in S-mode is available via sstc extension\n");
static_branch_enable(&riscv_sstc_available);
}
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
return error;
}
TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt);