Merge branch 'for-next/mitigations' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into for-next/core
This commit is contained in:
Коммит
50abbe1962
|
@ -88,6 +88,7 @@ parameter is applicable::
|
|||
APIC APIC support is enabled.
|
||||
APM Advanced Power Management support is enabled.
|
||||
ARM ARM architecture is enabled.
|
||||
ARM64 ARM64 architecture is enabled.
|
||||
AX25 Appropriate AX.25 support is enabled.
|
||||
CLK Common clock infrastructure is enabled.
|
||||
CMA Contiguous Memory Area support is enabled.
|
||||
|
|
|
@ -2544,6 +2544,40 @@
|
|||
in the "bleeding edge" mini2440 support kernel at
|
||||
http://repo.or.cz/w/linux-2.6/mini2440.git
|
||||
|
||||
mitigations=
|
||||
[X86,PPC,S390,ARM64] Control optional mitigations for
|
||||
CPU vulnerabilities. This is a set of curated,
|
||||
arch-independent options, each of which is an
|
||||
aggregation of existing arch-specific options.
|
||||
|
||||
off
|
||||
Disable all optional CPU mitigations. This
|
||||
improves system performance, but it may also
|
||||
expose users to several CPU vulnerabilities.
|
||||
Equivalent to: nopti [X86,PPC]
|
||||
kpti=0 [ARM64]
|
||||
nospectre_v1 [PPC]
|
||||
nobp=0 [S390]
|
||||
nospectre_v2 [X86,PPC,S390,ARM64]
|
||||
spectre_v2_user=off [X86]
|
||||
spec_store_bypass_disable=off [X86,PPC]
|
||||
ssbd=force-off [ARM64]
|
||||
l1tf=off [X86]
|
||||
|
||||
auto (default)
|
||||
Mitigate all CPU vulnerabilities, but leave SMT
|
||||
enabled, even if it's vulnerable. This is for
|
||||
users who don't want to be surprised by SMT
|
||||
getting disabled across kernel upgrades, or who
|
||||
have other ways of avoiding SMT-based attacks.
|
||||
Equivalent to: (default behavior)
|
||||
|
||||
auto,nosmt
|
||||
Mitigate all CPU vulnerabilities, disabling SMT
|
||||
if needed. This is for users who always want to
|
||||
be fully mitigated, even if it means losing SMT.
|
||||
Equivalent to: l1tf=flush,nosmt [X86]
|
||||
|
||||
mminit_loglevel=
|
||||
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
|
||||
parameter allows control of the logging verbosity for
|
||||
|
@ -2873,10 +2907,10 @@
|
|||
check bypass). With this option data leaks are possible
|
||||
in the system.
|
||||
|
||||
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
|
||||
(indirect branch prediction) vulnerability. System may
|
||||
allow data leaks with this option, which is equivalent
|
||||
to spectre_v2=off.
|
||||
nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
|
||||
the Spectre variant 2 (indirect branch prediction)
|
||||
vulnerability. System may allow data leaks with this
|
||||
option.
|
||||
|
||||
nospec_store_bypass_disable
|
||||
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
|
||||
|
|
|
@ -90,6 +90,7 @@ config ARM64
|
|||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_CPU_VULNERABILITIES
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
|
|
|
@ -633,11 +633,7 @@ static inline int arm64_get_ssbd_state(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
void arm64_set_ssbd_mitigation(bool state);
|
||||
#else
|
||||
static inline void arm64_set_ssbd_mitigation(bool state) {}
|
||||
#endif
|
||||
|
||||
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/arm-smccc.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
@ -109,7 +110,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
|
|||
|
||||
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
@ -131,9 +131,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
|
|||
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
|
||||
}
|
||||
|
||||
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
static DEFINE_RAW_SPINLOCK(bp_lock);
|
||||
int cpu, slot = -1;
|
||||
|
@ -169,7 +169,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|||
#define __smccc_workaround_1_smc_start NULL
|
||||
#define __smccc_workaround_1_smc_end NULL
|
||||
|
||||
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
|
@ -177,23 +177,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|||
}
|
||||
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
|
||||
|
||||
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
|
||||
bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
u64 pfr0;
|
||||
|
||||
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
||||
return;
|
||||
|
||||
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
||||
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
|
||||
return;
|
||||
|
||||
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
|
||||
}
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/psci.h>
|
||||
|
@ -220,60 +203,83 @@ static void qcom_link_stack_sanitization(void)
|
|||
: "=&r" (tmp));
|
||||
}
|
||||
|
||||
static void
|
||||
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
|
||||
static bool __nospectre_v2;
|
||||
static int __init parse_nospectre_v2(char *str)
|
||||
{
|
||||
__nospectre_v2 = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("nospectre_v2", parse_nospectre_v2);
|
||||
|
||||
/*
|
||||
* -1: No workaround
|
||||
* 0: No workaround required
|
||||
* 1: Workaround installed
|
||||
*/
|
||||
static int detect_harden_bp_fw(void)
|
||||
{
|
||||
bp_hardening_cb_t cb;
|
||||
void *smccc_start, *smccc_end;
|
||||
struct arm_smccc_res res;
|
||||
u32 midr = read_cpuid_id();
|
||||
|
||||
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
|
||||
return;
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 < 0)
|
||||
return;
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
/* This is a guest, no need to patch KVM vectors */
|
||||
smccc_start = NULL;
|
||||
smccc_end = NULL;
|
||||
switch ((int)res.a0) {
|
||||
case 1:
|
||||
/* Firmware says we're just fine */
|
||||
return 0;
|
||||
case 0:
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
/* This is a guest, no need to patch KVM vectors */
|
||||
smccc_start = NULL;
|
||||
smccc_end = NULL;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 < 0)
|
||||
return;
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
smccc_end = __smccc_workaround_1_smc_end;
|
||||
switch ((int)res.a0) {
|
||||
case 1:
|
||||
/* Firmware says we're just fine */
|
||||
return 0;
|
||||
case 0:
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
smccc_end = __smccc_workaround_1_smc_end;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
|
||||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
|
||||
cb = qcom_link_stack_sanitization;
|
||||
|
||||
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
|
||||
if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
|
||||
install_bp_hardening_cb(cb, smccc_start, smccc_end);
|
||||
|
||||
return;
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
|
||||
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
|
||||
static bool __ssb_safe = true;
|
||||
|
||||
static const struct ssbd_options {
|
||||
const char *str;
|
||||
|
@ -343,6 +349,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
|
|||
|
||||
void arm64_set_ssbd_mitigation(bool state)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
|
||||
pr_info_once("SSBD disabled by kernel configuration\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (this_cpu_has_cap(ARM64_SSBS)) {
|
||||
if (state)
|
||||
asm volatile(SET_PSTATE_SSBS(0));
|
||||
|
@ -372,16 +383,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|||
struct arm_smccc_res res;
|
||||
bool required = true;
|
||||
s32 val;
|
||||
bool this_cpu_safe = false;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
if (cpu_mitigations_off())
|
||||
ssbd_state = ARM64_SSBD_FORCE_DISABLE;
|
||||
|
||||
/* delay setting __ssb_safe until we get a firmware response */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
|
||||
this_cpu_safe = true;
|
||||
|
||||
if (this_cpu_has_cap(ARM64_SSBS)) {
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
required = false;
|
||||
goto out_printmsg;
|
||||
}
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -398,6 +421,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|||
|
||||
default:
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -406,14 +431,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|||
switch (val) {
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
|
||||
/* machines with mixed mitigation requirements must not return this */
|
||||
case SMCCC_RET_NOT_REQUIRED:
|
||||
pr_info_once("%s mitigation not required\n", entry->desc);
|
||||
ssbd_state = ARM64_SSBD_MITIGATED;
|
||||
return false;
|
||||
|
||||
case SMCCC_RET_SUCCESS:
|
||||
__ssb_safe = false;
|
||||
required = true;
|
||||
break;
|
||||
|
||||
|
@ -423,6 +452,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -462,7 +493,14 @@ out_printmsg:
|
|||
|
||||
return required;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_SSBD */
|
||||
|
||||
/* known invulnerable cores */
|
||||
static const struct midr_range arm64_ssb_cpus[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
{},
|
||||
};
|
||||
|
||||
static void __maybe_unused
|
||||
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
|
||||
|
@ -507,26 +545,67 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
|
|||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
|
||||
CAP_MIDR_RANGE_LIST(midr_list)
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
/* Track overall mitigation state. We are only mitigated if all cores are ok */
|
||||
static bool __hardenbp_enab = true;
|
||||
static bool __spectrev2_safe = true;
|
||||
|
||||
/*
|
||||
* List of CPUs where we need to issue a psci call to
|
||||
* harden the branch predictor.
|
||||
* List of CPUs that do not need any Spectre-v2 mitigation at all.
|
||||
*/
|
||||
static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
|
||||
{},
|
||||
static const struct midr_range spectre_v2_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
#endif
|
||||
/*
|
||||
* Track overall bp hardening for all heterogeneous cores in the machine.
|
||||
* We are only considered "safe" if all booted cores are known safe.
|
||||
*/
|
||||
static bool __maybe_unused
|
||||
check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
int need_wa;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
/* If the CPU has CSV2 set, we're safe */
|
||||
if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
|
||||
ID_AA64PFR0_CSV2_SHIFT))
|
||||
return false;
|
||||
|
||||
/* Alternatively, we have a list of unaffected CPUs */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
|
||||
return false;
|
||||
|
||||
/* Fallback to firmware detection */
|
||||
need_wa = detect_harden_bp_fw();
|
||||
if (!need_wa)
|
||||
return false;
|
||||
|
||||
__spectrev2_safe = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
|
||||
pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
|
||||
__hardenbp_enab = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* forced off */
|
||||
if (__nospectre_v2 || cpu_mitigations_off()) {
|
||||
pr_info_once("spectrev2 mitigation disabled by command line option\n");
|
||||
__hardenbp_enab = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (need_wa < 0) {
|
||||
pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
|
||||
__hardenbp_enab = false;
|
||||
}
|
||||
|
||||
return (need_wa > 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
|
||||
|
@ -701,13 +780,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
.cpu_enable = enable_smccc_arch_workaround_1,
|
||||
ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = check_branch_predictor,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
{
|
||||
.desc = "EL2 vector hardening",
|
||||
|
@ -715,14 +792,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
{
|
||||
.desc = "Speculative Store Bypass Disable",
|
||||
.capability = ARM64_SSBD,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = has_ssbd_mitigation,
|
||||
.midr_range_list = arm64_ssb_cpus,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1188873
|
||||
{
|
||||
/* Cortex-A76 r0p0 to r2p0 */
|
||||
|
@ -742,3 +818,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
{
|
||||
}
|
||||
};
|
||||
|
||||
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
if (__spectrev2_safe)
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
if (__hardenbp_enab)
|
||||
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (__ssb_safe)
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
switch (ssbd_state) {
|
||||
case ARM64_SSBD_KERNEL:
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
if (IS_ENABLED(CONFIG_ARM64_SSBD))
|
||||
return sprintf(buf,
|
||||
"Mitigation: Speculative Store Bypass disabled via prctl\n");
|
||||
}
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/stop_machine.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
|
@ -956,7 +957,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
return has_cpuid_feature(entry, scope);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static bool __meltdown_safe = true;
|
||||
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
||||
|
||||
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
|
@ -975,7 +976,17 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|||
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
char const *str = "command line option";
|
||||
char const *str = "kpti command line option";
|
||||
bool meltdown_safe;
|
||||
|
||||
meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
|
||||
|
||||
/* Defer to CPU feature registers */
|
||||
if (has_cpuid_feature(entry, scope))
|
||||
meltdown_safe = true;
|
||||
|
||||
if (!meltdown_safe)
|
||||
__meltdown_safe = false;
|
||||
|
||||
/*
|
||||
* For reasons that aren't entirely clear, enabling KPTI on Cavium
|
||||
|
@ -987,6 +998,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|||
__kpti_forced = -1;
|
||||
}
|
||||
|
||||
/* Useful for KASLR robustness */
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
|
||||
if (!__kpti_forced) {
|
||||
str = "KASLR";
|
||||
__kpti_forced = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_mitigations_off() && !__kpti_forced) {
|
||||
str = "mitigations=off";
|
||||
__kpti_forced = -1;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
|
||||
pr_info_once("kernel page table isolation disabled by kernel configuration\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Forced? */
|
||||
if (__kpti_forced) {
|
||||
pr_info_once("kernel page table isolation forced %s by %s\n",
|
||||
|
@ -994,18 +1023,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|||
return __kpti_forced > 0;
|
||||
}
|
||||
|
||||
/* Useful for KASLR robustness */
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||
return kaslr_offset() > 0;
|
||||
|
||||
/* Don't force KPTI for CPUs that are not vulnerable */
|
||||
if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
|
||||
return false;
|
||||
|
||||
/* Defer to CPU feature registers */
|
||||
return !has_cpuid_feature(entry, scope);
|
||||
return !meltdown_safe;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
static void
|
||||
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
|
@ -1035,6 +1056,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
|||
|
||||
return;
|
||||
}
|
||||
#else
|
||||
static void
|
||||
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
static int __init parse_kpti(char *str)
|
||||
{
|
||||
|
@ -1048,7 +1075,6 @@ static int __init parse_kpti(char *str)
|
|||
return 0;
|
||||
}
|
||||
early_param("kpti", parse_kpti);
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
#ifdef CONFIG_ARM64_HW_AFDBM
|
||||
static inline void __cpu_enable_hw_dbm(void)
|
||||
|
@ -1315,7 +1341,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.field_pos = ID_AA64PFR0_EL0_SHIFT,
|
||||
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
|
||||
},
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
{
|
||||
.desc = "Kernel page table isolation (KPTI)",
|
||||
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
|
||||
|
@ -1331,7 +1356,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.matches = unmap_kernel_at_el0,
|
||||
.cpu_enable = kpti_install_ng_mappings,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
/* FP/SIMD is not implemented */
|
||||
.capability = ARM64_HAS_NO_FPSIMD,
|
||||
|
@ -2156,3 +2180,15 @@ static int __init enable_mrs_emulation(void)
|
|||
}
|
||||
|
||||
core_initcall(enable_mrs_emulation);
|
||||
|
||||
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
if (__meltdown_safe)
|
||||
return sprintf(buf, "Not affected\n");
|
||||
|
||||
if (arm64_kernel_unmapped_at_el0())
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ void setup_barrier_nospec(void)
|
|||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
|
||||
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
|
||||
|
||||
if (!no_nospec)
|
||||
if (!no_nospec && !cpu_mitigations_off())
|
||||
enable_barrier_nospec(enable);
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ static int __init handle_nospectre_v2(char *p)
|
|||
early_param("nospectre_v2", handle_nospectre_v2);
|
||||
void setup_spectre_v2(void)
|
||||
{
|
||||
if (no_spectrev2)
|
||||
if (no_spectrev2 || cpu_mitigations_off())
|
||||
do_btb_flush_fixups();
|
||||
else
|
||||
btb_flush_enabled = true;
|
||||
|
@ -300,7 +300,7 @@ void setup_stf_barrier(void)
|
|||
|
||||
stf_enabled_flush_types = type;
|
||||
|
||||
if (!no_stf_barrier)
|
||||
if (!no_stf_barrier && !cpu_mitigations_off())
|
||||
stf_barrier_enable(enable);
|
||||
}
|
||||
|
||||
|
|
|
@ -932,7 +932,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
|
|||
|
||||
enabled_flush_types = types;
|
||||
|
||||
if (!no_rfi_flush)
|
||||
if (!no_rfi_flush && !cpu_mitigations_off())
|
||||
rfi_flush_enable(enable);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
static int __init nobp_setup_early(char *str)
|
||||
|
@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
|
|||
|
||||
void __init nospec_auto_detect(void)
|
||||
{
|
||||
if (test_facility(156)) {
|
||||
if (test_facility(156) || cpu_mitigations_off()) {
|
||||
/*
|
||||
* The machine supports etokens.
|
||||
* Disable expolines and disable nobp.
|
||||
|
|
|
@ -440,7 +440,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
|
||||
cpu_mitigations_off())
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
|
||||
|
@ -672,7 +673,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
|||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
|
||||
cpu_mitigations_off()) {
|
||||
return SPEC_STORE_BYPASS_CMD_NONE;
|
||||
} else {
|
||||
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
|
||||
|
@ -1008,6 +1010,11 @@ static void __init l1tf_select_mitigation(void)
|
|||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
if (cpu_mitigations_off())
|
||||
l1tf_mitigation = L1TF_MITIGATION_OFF;
|
||||
else if (cpu_mitigations_auto_nosmt())
|
||||
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
|
||||
|
||||
override_cache_bits(&boot_cpu_data);
|
||||
|
||||
switch (l1tf_mitigation) {
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void)
|
|||
}
|
||||
}
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nopti")) {
|
||||
if (cmdline_find_option_bool(boot_command_line, "nopti") ||
|
||||
cpu_mitigations_off()) {
|
||||
pti_mode = PTI_FORCE_OFF;
|
||||
pti_print_if_insecure("disabled on command line.");
|
||||
return;
|
||||
|
|
|
@ -187,4 +187,28 @@ static inline void cpu_smt_disable(bool force) { }
|
|||
static inline void cpu_smt_check_topology(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These are used for a global "mitigations=" cmdline option for toggling
|
||||
* optional CPU mitigations.
|
||||
*/
|
||||
enum cpu_mitigations {
|
||||
CPU_MITIGATIONS_OFF,
|
||||
CPU_MITIGATIONS_AUTO,
|
||||
CPU_MITIGATIONS_AUTO_NOSMT,
|
||||
};
|
||||
|
||||
extern enum cpu_mitigations cpu_mitigations;
|
||||
|
||||
/* mitigations=off */
|
||||
static inline bool cpu_mitigations_off(void)
|
||||
{
|
||||
return cpu_mitigations == CPU_MITIGATIONS_OFF;
|
||||
}
|
||||
|
||||
/* mitigations=auto,nosmt */
|
||||
static inline bool cpu_mitigations_auto_nosmt(void)
|
||||
{
|
||||
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_CPU_H_ */
|
||||
|
|
15
kernel/cpu.c
15
kernel/cpu.c
|
@ -2304,3 +2304,18 @@ void __init boot_cpu_hotplug_init(void)
|
|||
#endif
|
||||
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
|
||||
}
|
||||
|
||||
enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
|
||||
|
||||
static int __init mitigations_parse_cmdline(char *arg)
|
||||
{
|
||||
if (!strcmp(arg, "off"))
|
||||
cpu_mitigations = CPU_MITIGATIONS_OFF;
|
||||
else if (!strcmp(arg, "auto"))
|
||||
cpu_mitigations = CPU_MITIGATIONS_AUTO;
|
||||
else if (!strcmp(arg, "auto,nosmt"))
|
||||
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("mitigations", mitigations_parse_cmdline);
|
||||
|
|
Загрузка…
Ссылка в новой задаче