RISC-V: Access CSRs using CSR numbers
We should prefer accessing CSRs using their CSR numbers because: 1. It compiles fine with older toolchains. 2. We can use latest CSR names in #define macro names of CSR numbers as-per RISC-V spec. 3. We can access newly added CSRs even if toolchain does not recognize newly addes CSRs by name. Signed-off-by: Anup Patel <anup.patel@wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
This commit is contained in:
Родитель
6dcaf00487
Коммит
a3182c91ef
|
@ -14,6 +14,7 @@
|
|||
#ifndef _ASM_RISCV_CSR_H
|
||||
#define _ASM_RISCV_CSR_H
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
/* Status register flags */
|
||||
|
@ -79,12 +80,29 @@
|
|||
#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
|
||||
#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
|
||||
|
||||
#define CSR_CYCLE 0xc00
|
||||
#define CSR_TIME 0xc01
|
||||
#define CSR_INSTRET 0xc02
|
||||
#define CSR_SSTATUS 0x100
|
||||
#define CSR_SIE 0x104
|
||||
#define CSR_STVEC 0x105
|
||||
#define CSR_SCOUNTEREN 0x106
|
||||
#define CSR_SSCRATCH 0x140
|
||||
#define CSR_SEPC 0x141
|
||||
#define CSR_SCAUSE 0x142
|
||||
#define CSR_STVAL 0x143
|
||||
#define CSR_SIP 0x144
|
||||
#define CSR_SATP 0x180
|
||||
#define CSR_CYCLEH 0xc80
|
||||
#define CSR_TIMEH 0xc81
|
||||
#define CSR_INSTRETH 0xc82
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define csr_swap(csr, val) \
|
||||
({ \
|
||||
unsigned long __v = (unsigned long)(val); \
|
||||
__asm__ __volatile__ ("csrrw %0, " #csr ", %1" \
|
||||
__asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
|
||||
: "=r" (__v) : "rK" (__v) \
|
||||
: "memory"); \
|
||||
__v; \
|
||||
|
@ -93,7 +111,7 @@
|
|||
#define csr_read(csr) \
|
||||
({ \
|
||||
register unsigned long __v; \
|
||||
__asm__ __volatile__ ("csrr %0, " #csr \
|
||||
__asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
|
||||
: "=r" (__v) : \
|
||||
: "memory"); \
|
||||
__v; \
|
||||
|
@ -102,7 +120,7 @@
|
|||
#define csr_write(csr, val) \
|
||||
({ \
|
||||
unsigned long __v = (unsigned long)(val); \
|
||||
__asm__ __volatile__ ("csrw " #csr ", %0" \
|
||||
__asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
|
||||
: : "rK" (__v) \
|
||||
: "memory"); \
|
||||
})
|
||||
|
@ -110,7 +128,7 @@
|
|||
#define csr_read_set(csr, val) \
|
||||
({ \
|
||||
unsigned long __v = (unsigned long)(val); \
|
||||
__asm__ __volatile__ ("csrrs %0, " #csr ", %1" \
|
||||
__asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
|
||||
: "=r" (__v) : "rK" (__v) \
|
||||
: "memory"); \
|
||||
__v; \
|
||||
|
@ -119,7 +137,7 @@
|
|||
#define csr_set(csr, val) \
|
||||
({ \
|
||||
unsigned long __v = (unsigned long)(val); \
|
||||
__asm__ __volatile__ ("csrs " #csr ", %0" \
|
||||
__asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
|
||||
: : "rK" (__v) \
|
||||
: "memory"); \
|
||||
})
|
||||
|
@ -127,7 +145,7 @@
|
|||
#define csr_read_clear(csr, val) \
|
||||
({ \
|
||||
unsigned long __v = (unsigned long)(val); \
|
||||
__asm__ __volatile__ ("csrrc %0, " #csr ", %1" \
|
||||
__asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
|
||||
: "=r" (__v) : "rK" (__v) \
|
||||
: "memory"); \
|
||||
__v; \
|
||||
|
@ -136,7 +154,7 @@
|
|||
#define csr_clear(csr, val) \
|
||||
({ \
|
||||
unsigned long __v = (unsigned long)(val); \
|
||||
__asm__ __volatile__ ("csrc " #csr ", %0" \
|
||||
__asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
|
||||
: : "rK" (__v) \
|
||||
: "memory"); \
|
||||
})
|
||||
|
|
|
@ -21,25 +21,25 @@
|
|||
/* read interrupt enabled status */
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return csr_read(sstatus);
|
||||
return csr_read(CSR_SSTATUS);
|
||||
}
|
||||
|
||||
/* unconditionally enable interrupts */
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
csr_set(sstatus, SR_SIE);
|
||||
csr_set(CSR_SSTATUS, SR_SIE);
|
||||
}
|
||||
|
||||
/* unconditionally disable interrupts */
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
csr_clear(sstatus, SR_SIE);
|
||||
csr_clear(CSR_SSTATUS, SR_SIE);
|
||||
}
|
||||
|
||||
/* get status and disable interrupts */
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
return csr_read_clear(sstatus, SR_SIE);
|
||||
return csr_read_clear(CSR_SSTATUS, SR_SIE);
|
||||
}
|
||||
|
||||
/* test flags */
|
||||
|
@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
|
|||
/* set interrupt enabled status */
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
csr_set(sstatus, flags & SR_SIE);
|
||||
csr_set(CSR_SSTATUS, flags & SR_SIE);
|
||||
}
|
||||
|
||||
#endif /* _ASM_RISCV_IRQFLAGS_H */
|
||||
|
|
|
@ -83,12 +83,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
|||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* Use the old spbtr name instead of using the current satp
|
||||
* name to support binutils 2.29 which doesn't know about the
|
||||
* privileged ISA 1.10 yet.
|
||||
*/
|
||||
csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
|
||||
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
|
||||
local_flush_tlb_all();
|
||||
|
||||
flush_icache_deferred(next);
|
||||
|
|
|
@ -37,11 +37,11 @@
|
|||
* the kernel thread pointer. If we came from the kernel, sscratch
|
||||
* will contain 0, and we should continue on the current TP.
|
||||
*/
|
||||
csrrw tp, sscratch, tp
|
||||
csrrw tp, CSR_SSCRATCH, tp
|
||||
bnez tp, _save_context
|
||||
|
||||
_restore_kernel_tpsp:
|
||||
csrr tp, sscratch
|
||||
csrr tp, CSR_SSCRATCH
|
||||
REG_S sp, TASK_TI_KERNEL_SP(tp)
|
||||
_save_context:
|
||||
REG_S sp, TASK_TI_USER_SP(tp)
|
||||
|
@ -87,11 +87,11 @@ _save_context:
|
|||
li t0, SR_SUM | SR_FS
|
||||
|
||||
REG_L s0, TASK_TI_USER_SP(tp)
|
||||
csrrc s1, sstatus, t0
|
||||
csrr s2, sepc
|
||||
csrr s3, sbadaddr
|
||||
csrr s4, scause
|
||||
csrr s5, sscratch
|
||||
csrrc s1, CSR_SSTATUS, t0
|
||||
csrr s2, CSR_SEPC
|
||||
csrr s3, CSR_STVAL
|
||||
csrr s4, CSR_SCAUSE
|
||||
csrr s5, CSR_SSCRATCH
|
||||
REG_S s0, PT_SP(sp)
|
||||
REG_S s1, PT_SSTATUS(sp)
|
||||
REG_S s2, PT_SEPC(sp)
|
||||
|
@ -107,8 +107,8 @@ _save_context:
|
|||
.macro RESTORE_ALL
|
||||
REG_L a0, PT_SSTATUS(sp)
|
||||
REG_L a2, PT_SEPC(sp)
|
||||
csrw sstatus, a0
|
||||
csrw sepc, a2
|
||||
csrw CSR_SSTATUS, a0
|
||||
csrw CSR_SEPC, a2
|
||||
|
||||
REG_L x1, PT_RA(sp)
|
||||
REG_L x3, PT_GP(sp)
|
||||
|
@ -155,7 +155,7 @@ ENTRY(handle_exception)
|
|||
* Set sscratch register to 0, so that if a recursive exception
|
||||
* occurs, the exception vector knows it came from the kernel
|
||||
*/
|
||||
csrw sscratch, x0
|
||||
csrw CSR_SSCRATCH, x0
|
||||
|
||||
/* Load the global pointer */
|
||||
.option push
|
||||
|
@ -248,7 +248,7 @@ resume_userspace:
|
|||
* Save TP into sscratch, so we can find the kernel data structures
|
||||
* again.
|
||||
*/
|
||||
csrw sscratch, tp
|
||||
csrw CSR_SSCRATCH, tp
|
||||
|
||||
restore_all:
|
||||
RESTORE_ALL
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
|
||||
__INIT
|
||||
ENTRY(_start)
|
||||
/* Mask and clear all interrupts */
|
||||
csrw sie, zero
|
||||
csrw sip, zero
|
||||
/* Mask all interrupts */
|
||||
csrw CSR_SIE, zero
|
||||
csrw CSR_SIP, zero
|
||||
|
||||
/* Load the global pointer */
|
||||
.option push
|
||||
|
@ -86,7 +86,7 @@ relocate:
|
|||
/* Point stvec to virtual address of intruction after satp write */
|
||||
la a0, 1f
|
||||
add a0, a0, a1
|
||||
csrw stvec, a0
|
||||
csrw CSR_STVEC, a0
|
||||
|
||||
/* Compute satp for kernel page tables, but don't load it yet */
|
||||
la a2, swapper_pg_dir
|
||||
|
@ -102,12 +102,12 @@ relocate:
|
|||
srl a0, a0, PAGE_SHIFT
|
||||
or a0, a0, a1
|
||||
sfence.vma
|
||||
csrw sptbr, a0
|
||||
csrw CSR_SATP, a0
|
||||
.align 2
|
||||
1:
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a0, .Lsecondary_park
|
||||
csrw stvec, a0
|
||||
csrw CSR_STVEC, a0
|
||||
|
||||
/* Reload the global pointer */
|
||||
.option push
|
||||
|
@ -116,7 +116,7 @@ relocate:
|
|||
.option pop
|
||||
|
||||
/* Switch to kernel page tables */
|
||||
csrw sptbr, a2
|
||||
csrw CSR_SATP, a2
|
||||
|
||||
ret
|
||||
|
||||
|
@ -127,7 +127,7 @@ relocate:
|
|||
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a3, .Lsecondary_park
|
||||
csrw stvec, a3
|
||||
csrw CSR_STVEC, a3
|
||||
|
||||
slli a3, a0, LGREG
|
||||
la a1, __cpu_up_stack_pointer
|
||||
|
|
|
@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
|
|||
|
||||
switch (idx) {
|
||||
case RISCV_PMU_CYCLE:
|
||||
val = csr_read(cycle);
|
||||
val = csr_read(CSR_CYCLE);
|
||||
break;
|
||||
case RISCV_PMU_INSTRET:
|
||||
val = csr_read(instret);
|
||||
val = csr_read(CSR_INSTRET);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
|
||||
|
|
|
@ -95,7 +95,7 @@ void riscv_software_interrupt(void)
|
|||
unsigned long *stats = ipi_data[smp_processor_id()].stats;
|
||||
|
||||
/* Clear pending IPI */
|
||||
csr_clear(sip, SIE_SSIE);
|
||||
csr_clear(CSR_SIP, SIE_SSIE);
|
||||
|
||||
while (true) {
|
||||
unsigned long ops;
|
||||
|
|
|
@ -159,9 +159,9 @@ void __init trap_init(void)
|
|||
* Set sup0 scratch register to 0, indicating to exception vector
|
||||
* that we are presently executing in the kernel
|
||||
*/
|
||||
csr_write(sscratch, 0);
|
||||
csr_write(CSR_SSCRATCH, 0);
|
||||
/* Set the exception vector address */
|
||||
csr_write(stvec, &handle_exception);
|
||||
csr_write(CSR_STVEC, &handle_exception);
|
||||
/* Enable all interrupts */
|
||||
csr_write(sie, -1);
|
||||
csr_write(CSR_SIE, -1);
|
||||
}
|
||||
|
|
|
@ -239,13 +239,9 @@ vmalloc_fault:
|
|||
* Do _not_ use "tsk->active_mm->pgd" here.
|
||||
* We might be inside an interrupt in the middle
|
||||
* of a task switch.
|
||||
*
|
||||
* Note: Use the old spbtr name instead of using the current
|
||||
* satp name to support binutils 2.29 which doesn't know about
|
||||
* the privileged ISA 1.10 yet.
|
||||
*/
|
||||
index = pgd_index(addr);
|
||||
pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
|
||||
pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
|
||||
pgd_k = init_mm.pgd + index;
|
||||
|
||||
if (!pgd_present(*pgd_k))
|
||||
|
|
Загрузка…
Ссылка в новой задаче