2019-06-04 11:11:33 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-02-12 00:56:07 +03:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/mach-vexpress/platsmp.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 ARM Ltd.
|
|
|
|
* All Rights Reserved
|
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/io.h>
|
2013-09-17 21:30:58 +04:00
|
|
|
#include <linux/of_address.h>
|
2012-10-09 15:56:36 +04:00
|
|
|
#include <linux/vexpress.h>
|
2012-02-24 13:18:14 +04:00
|
|
|
|
2013-01-30 13:12:55 +04:00
|
|
|
#include <asm/mcpm.h>
|
2012-02-24 13:18:14 +04:00
|
|
|
#include <asm/smp_scu.h>
|
|
|
|
#include <asm/mach/map.h>
|
2010-02-12 00:56:07 +03:00
|
|
|
|
2011-09-08 16:15:22 +04:00
|
|
|
#include <plat/platsmp.h>
|
2010-02-12 00:56:07 +03:00
|
|
|
|
2011-09-08 16:15:22 +04:00
|
|
|
#include "core.h"
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 13:53:12 +03:00
|
|
|
|
2013-01-30 13:12:55 +04:00
|
|
|
bool __init vexpress_smp_init_ops(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_MCPM
|
2016-09-23 16:09:06 +03:00
|
|
|
int cpu;
|
|
|
|
struct device_node *cpu_node, *cci_node;
|
|
|
|
|
2013-01-30 13:12:55 +04:00
|
|
|
/*
|
2016-09-23 16:09:06 +03:00
|
|
|
* The best way to detect a multi-cluster configuration
|
|
|
|
* is to detect if the kernel can take over CCI ports
|
|
|
|
* control. Loop over possible CPUs and check if CCI
|
|
|
|
* port control is available.
|
2013-01-30 13:12:55 +04:00
|
|
|
* Override the default vexpress_smp_ops if so.
|
|
|
|
*/
|
2016-09-23 16:09:06 +03:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
bool available;
|
|
|
|
|
|
|
|
cpu_node = of_get_cpu_node(cpu, NULL);
|
|
|
|
if (WARN(!cpu_node, "Missing cpu device node!"))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
cci_node = of_parse_phandle(cpu_node, "cci-control-port", 0);
|
|
|
|
available = cci_node && of_device_is_available(cci_node);
|
|
|
|
of_node_put(cci_node);
|
|
|
|
of_node_put(cpu_node);
|
|
|
|
|
|
|
|
if (!available)
|
|
|
|
return false;
|
2013-01-30 13:12:55 +04:00
|
|
|
}
|
2016-09-23 16:09:06 +03:00
|
|
|
|
|
|
|
mcpm_smp_set_ops();
|
|
|
|
return true;
|
|
|
|
#else
|
2013-01-30 13:12:55 +04:00
|
|
|
return false;
|
2016-09-23 16:09:06 +03:00
|
|
|
#endif
|
2013-01-30 13:12:55 +04:00
|
|
|
}
|
2013-09-17 21:30:58 +04:00
|
|
|
|
|
|
|
static const struct of_device_id vexpress_smp_dt_scu_match[] __initconst = {
|
|
|
|
{ .compatible = "arm,cortex-a5-scu", },
|
|
|
|
{ .compatible = "arm,cortex-a9-scu", },
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus)
|
|
|
|
{
|
|
|
|
struct device_node *scu = of_find_matching_node(NULL,
|
|
|
|
vexpress_smp_dt_scu_match);
|
|
|
|
|
|
|
|
if (scu)
|
|
|
|
scu_enable(of_iomap(scu, 0));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the address of secondary startup into the
|
|
|
|
* system-wide flags register. The boot monitor waits
|
|
|
|
* until it receives a soft interrupt, and then the
|
|
|
|
* secondary CPU branches to this address.
|
|
|
|
*/
|
2017-01-15 05:59:29 +03:00
|
|
|
vexpress_flags_set(__pa_symbol(versatile_secondary_startup));
|
2013-09-17 21:30:58 +04:00
|
|
|
}
|
|
|
|
|
2018-12-13 15:54:26 +03:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
static void vexpress_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
versatile_immitation_cpu_die(cpu, 0x40);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-11-15 04:39:53 +03:00
|
|
|
const struct smp_operations vexpress_smp_dt_ops __initconst = {
|
2013-09-17 21:30:58 +04:00
|
|
|
.smp_prepare_cpus = vexpress_smp_dt_prepare_cpus,
|
|
|
|
.smp_secondary_init = versatile_secondary_init,
|
|
|
|
.smp_boot_secondary = versatile_boot_secondary,
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
.cpu_die = vexpress_cpu_die,
|
|
|
|
#endif
|
|
|
|
};
|