LoongArch fixes for v5.19-rc2
-----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmKkZHAWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImepEUD/wOSA3czNvkC2y2TioPelbWovo+ 2I7A0mXsdN3dT/cdIn5W4JaeYVNr1zqBnrovxQ0JLtOAssFKm3By5aGDFAaHJZPi 23nXwilgP7VCqDsQpTwuJWSjM4kv4+ZZUHDBjEN7M1lF0u5gK+703BEb+IhWlLF/ nESg1IUZdiXt8SPbCFWzAyqtc8IX4s9tmiHKvvFY8Fuj7wHhPnFVVLxoTyd3puuD RzelwX31pSLFUD2BGO5GEIFqLNYKyMT7QlQWu5cRwzhRnB+P6/Z05MANeDJsq4GY D08rvTJqu0xzBpHyjneWG11FipaZ1vsj9Z5RlWjStFwJHGw/MAHTKtUmqr1VFe5W 65mSyobR2SQD7WuOWXPXCENY7ueOP5H3gHz95KS7Xt7JpqvNHOvnwt2KirKL0S2O a3fXFPmB89siH4VQYm5idWc8hV58hdKjM9N3e2xF09pVT23Ru9Y27IffauJ5Perj BMzNWn+lkhVQznQaeWR6x5Ux8Cn3+LCZDlXJ/oYqIVM+mXWDA8mdEST1/XJXpbgv BrIHH6P4A9iQtDkC7qZ3cnuVniHSCBfKIJyJnRFxFdqpucIPd+oDTQQFsVg04xM+ WA5bKnkn+uzm7Oxr9kMNihTkW/l1MAOpXpSJZYVis3F1Zwth+P6h6IyCGSz/3fn7 tvfNxRc4kCCli5fbBA== =chWr -----END PGP SIGNATURE----- Merge tag 'loongarch-fixes-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch fixes from Huacai Chen. "Fix build errors and a stale comment" * tag 'loongarch-fixes-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: Remove MIPS comment about cycle counter LoongArch: Fix copy_thread() build errors LoongArch: Fix the !CONFIG_SMP build
This commit is contained in:
Коммит
0678afa605
|
@ -343,6 +343,7 @@ config NR_CPUS
|
||||||
|
|
||||||
config NUMA
|
config NUMA
|
||||||
bool "NUMA Support"
|
bool "NUMA Support"
|
||||||
|
select SMP
|
||||||
select ACPI_NUMA if ACPI
|
select ACPI_NUMA if ACPI
|
||||||
help
|
help
|
||||||
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
|
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
|
||||||
|
|
|
@ -19,7 +19,7 @@ typedef struct {
|
||||||
unsigned int __softirq_pending;
|
unsigned int __softirq_pending;
|
||||||
} ____cacheline_aligned irq_cpustat_t;
|
} ____cacheline_aligned irq_cpustat_t;
|
||||||
|
|
||||||
DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
|
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||||
|
|
||||||
#define __ARCH_IRQ_STAT
|
#define __ARCH_IRQ_STAT
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#define __ASM_PERCPU_H
|
#define __ASM_PERCPU_H
|
||||||
|
|
||||||
#include <asm/cmpxchg.h>
|
#include <asm/cmpxchg.h>
|
||||||
|
#include <asm/loongarch.h>
|
||||||
|
|
||||||
/* Use r21 for fast access */
|
/* Use r21 for fast access */
|
||||||
register unsigned long __my_cpu_offset __asm__("$r21");
|
register unsigned long __my_cpu_offset __asm__("$r21");
|
||||||
|
|
|
@ -9,10 +9,16 @@
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
|
||||||
|
extern int smp_num_siblings;
|
||||||
|
extern int num_processors;
|
||||||
|
extern int disabled_cpus;
|
||||||
|
extern cpumask_t cpu_sibling_map[];
|
||||||
|
extern cpumask_t cpu_core_map[];
|
||||||
|
extern cpumask_t cpu_foreign_map[];
|
||||||
|
|
||||||
void loongson3_smp_setup(void);
|
void loongson3_smp_setup(void);
|
||||||
void loongson3_prepare_cpus(unsigned int max_cpus);
|
void loongson3_prepare_cpus(unsigned int max_cpus);
|
||||||
void loongson3_boot_secondary(int cpu, struct task_struct *idle);
|
void loongson3_boot_secondary(int cpu, struct task_struct *idle);
|
||||||
|
@ -25,26 +31,11 @@ int loongson3_cpu_disable(void);
|
||||||
void loongson3_cpu_die(unsigned int cpu);
|
void loongson3_cpu_die(unsigned int cpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
|
|
||||||
static inline void plat_smp_setup(void)
|
static inline void plat_smp_setup(void)
|
||||||
{
|
{
|
||||||
loongson3_smp_setup();
|
loongson3_smp_setup();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_SMP */
|
|
||||||
|
|
||||||
static inline void plat_smp_setup(void) { }
|
|
||||||
|
|
||||||
#endif /* !CONFIG_SMP */
|
|
||||||
|
|
||||||
extern int smp_num_siblings;
|
|
||||||
extern int num_processors;
|
|
||||||
extern int disabled_cpus;
|
|
||||||
extern cpumask_t cpu_sibling_map[];
|
|
||||||
extern cpumask_t cpu_core_map[];
|
|
||||||
extern cpumask_t cpu_foreign_map[];
|
|
||||||
|
|
||||||
static inline int raw_smp_processor_id(void)
|
static inline int raw_smp_processor_id(void)
|
||||||
{
|
{
|
||||||
#if defined(__VDSO__)
|
#if defined(__VDSO__)
|
||||||
|
|
|
@ -12,13 +12,6 @@
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/cpu-features.h>
|
#include <asm/cpu-features.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* Standard way to access the cycle counter.
|
|
||||||
* Currently only used on SMP for scheduling.
|
|
||||||
*
|
|
||||||
* We know that all SMP capable CPUs have cycle counters.
|
|
||||||
*/
|
|
||||||
|
|
||||||
typedef unsigned long cycles_t;
|
typedef unsigned long cycles_t;
|
||||||
|
|
||||||
#define get_cycles get_cycles
|
#define get_cycles get_cycles
|
||||||
|
|
|
@ -138,6 +138,7 @@ void __init acpi_boot_table_init(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
static int set_processor_mask(u32 id, u32 flags)
|
static int set_processor_mask(u32 id, u32 flags)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -166,15 +167,18 @@ static int set_processor_mask(u32 id, u32 flags)
|
||||||
|
|
||||||
return cpu;
|
return cpu;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void __init acpi_process_madt(void)
|
static void __init acpi_process_madt(void)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for (i = 0; i < NR_CPUS; i++) {
|
||||||
__cpu_number_map[i] = -1;
|
__cpu_number_map[i] = -1;
|
||||||
__cpu_logical_map[i] = -1;
|
__cpu_logical_map[i] = -1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
loongson_sysconf.nr_cpus = num_processors;
|
loongson_sysconf.nr_cpus = num_processors;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
*
|
*
|
||||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||||
*/
|
*/
|
||||||
|
#include <asm/cpu-info.h>
|
||||||
#include <linux/cacheinfo.h>
|
#include <linux/cacheinfo.h>
|
||||||
|
|
||||||
/* Populates leaf and increments to next leaf */
|
/* Populates leaf and increments to next leaf */
|
||||||
|
|
|
@ -22,6 +22,8 @@
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
|
||||||
DEFINE_PER_CPU(unsigned long, irq_stack);
|
DEFINE_PER_CPU(unsigned long, irq_stack);
|
||||||
|
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
||||||
|
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
||||||
|
|
||||||
struct irq_domain *cpu_domain;
|
struct irq_domain *cpu_domain;
|
||||||
struct irq_domain *liointc_domain;
|
struct irq_domain *liointc_domain;
|
||||||
|
@ -56,8 +58,11 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||||
|
|
||||||
void __init init_IRQ(void)
|
void __init init_IRQ(void)
|
||||||
{
|
{
|
||||||
int i, r, ipi_irq;
|
int i;
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
int r, ipi_irq;
|
||||||
static int ipi_dummy_dev;
|
static int ipi_dummy_dev;
|
||||||
|
#endif
|
||||||
unsigned int order = get_order(IRQ_STACK_SIZE);
|
unsigned int order = get_order(IRQ_STACK_SIZE);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
|
|
@ -120,10 +120,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||||
/*
|
/*
|
||||||
* Copy architecture-specific thread state
|
* Copy architecture-specific thread state
|
||||||
*/
|
*/
|
||||||
int copy_thread(unsigned long clone_flags, unsigned long usp,
|
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||||
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
|
|
||||||
{
|
{
|
||||||
unsigned long childksp;
|
unsigned long childksp;
|
||||||
|
unsigned long tls = args->tls;
|
||||||
|
unsigned long usp = args->stack;
|
||||||
|
unsigned long clone_flags = args->flags;
|
||||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||||
|
|
||||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
||||||
|
@ -136,12 +138,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||||
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
|
p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
|
||||||
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
|
p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
|
||||||
p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
|
p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
|
||||||
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
|
if (unlikely(args->fn)) {
|
||||||
/* kernel thread */
|
/* kernel thread */
|
||||||
p->thread.reg23 = usp; /* fn */
|
|
||||||
p->thread.reg24 = kthread_arg;
|
|
||||||
p->thread.reg03 = childksp;
|
p->thread.reg03 = childksp;
|
||||||
p->thread.reg01 = (unsigned long) ret_from_kernel_thread;
|
p->thread.reg23 = (unsigned long)args->fn;
|
||||||
|
p->thread.reg24 = (unsigned long)args->fn_arg;
|
||||||
|
p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
|
||||||
memset(childregs, 0, sizeof(struct pt_regs));
|
memset(childregs, 0, sizeof(struct pt_regs));
|
||||||
childregs->csr_euen = p->thread.csr_euen;
|
childregs->csr_euen = p->thread.csr_euen;
|
||||||
childregs->csr_crmd = p->thread.csr_crmd;
|
childregs->csr_crmd = p->thread.csr_crmd;
|
||||||
|
|
|
@ -39,7 +39,6 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/smp.h>
|
|
||||||
#include <asm/time.h>
|
#include <asm/time.h>
|
||||||
|
|
||||||
#define SMBIOS_BIOSSIZE_OFFSET 0x09
|
#define SMBIOS_BIOSSIZE_OFFSET 0x09
|
||||||
|
@ -349,8 +348,6 @@ static void __init prefill_possible_map(void)
|
||||||
|
|
||||||
nr_cpu_ids = possible;
|
nr_cpu_ids = possible;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline void prefill_possible_map(void) {}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __init setup_arch(char **cmdline_p)
|
void __init setup_arch(char **cmdline_p)
|
||||||
|
@ -367,8 +364,10 @@ void __init setup_arch(char **cmdline_p)
|
||||||
arch_mem_init(cmdline_p);
|
arch_mem_init(cmdline_p);
|
||||||
|
|
||||||
resource_init();
|
resource_init();
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
plat_smp_setup();
|
plat_smp_setup();
|
||||||
prefill_possible_map();
|
prefill_possible_map();
|
||||||
|
#endif
|
||||||
|
|
||||||
paging_init();
|
paging_init();
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,8 +66,6 @@ static cpumask_t cpu_core_setup_map;
|
||||||
|
|
||||||
struct secondary_data cpuboot_data;
|
struct secondary_data cpuboot_data;
|
||||||
static DEFINE_PER_CPU(int, cpu_state);
|
static DEFINE_PER_CPU(int, cpu_state);
|
||||||
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
||||||
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
||||||
|
|
||||||
enum ipi_msg_type {
|
enum ipi_msg_type {
|
||||||
IPI_RESCHEDULE,
|
IPI_RESCHEDULE,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче