x86/bugs: Add "unknown" reporting for MMIO Stale Data
commit7df548840c
upstream. Older Intel CPUs that are not in the affected processor list for MMIO Stale Data vulnerabilities currently report "Not affected" in sysfs, which may not be correct. Vulnerability status for these older CPUs is unknown. Add known-not-affected CPUs to the whitelist. Report "unknown" mitigation status for CPUs that are not in blacklist, whitelist and also don't enumerate MSR ARCH_CAPABILITIES bits that reflect hardware immunity to MMIO Stale Data vulnerabilities. Mitigation is not deployed when the status is unknown. [ bp: Massage, fixup. ] Fixes:8d50cdf8b8
("x86/speculation/mmio: Add sysfs reporting for Processor MMIO Stale Data") Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com> Suggested-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/a932c154772f2121794a5f2eded1a11013114711.1657846269.git.pawan.kumar.gupta@linux.intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
a7484eb9f3
Коммит
75fa6c733b
|
@ -230,6 +230,20 @@ The possible values in this file are:
|
||||||
* - 'Mitigation: Clear CPU buffers'
|
* - 'Mitigation: Clear CPU buffers'
|
||||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||||
enabled.
|
enabled.
|
||||||
|
* - 'Unknown: No mitigations'
|
||||||
|
- The processor vulnerability status is unknown because it is
|
||||||
|
out of Servicing period. Mitigation is not attempted.
|
||||||
|
|
||||||
|
Definitions:
|
||||||
|
------------
|
||||||
|
|
||||||
|
Servicing period: The process of providing functional and security updates to
|
||||||
|
Intel processors or platforms, utilizing the Intel Platform Update (IPU)
|
||||||
|
process or other similar mechanisms.
|
||||||
|
|
||||||
|
End of Servicing Updates (ESU): ESU is the date at which Intel will no
|
||||||
|
longer provide Servicing, such as through IPU or other similar update
|
||||||
|
processes. ESU dates will typically be aligned to end of quarter.
|
||||||
|
|
||||||
If the processor is vulnerable then the following information is appended to
|
If the processor is vulnerable then the following information is appended to
|
||||||
the above information:
|
the above information:
|
||||||
|
|
|
@ -446,7 +446,8 @@
|
||||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||||
#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
|
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||||
|
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||||
|
|
||||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||||
|
|
|
@ -433,7 +433,8 @@ static void __init mmio_select_mitigation(void)
|
||||||
u64 ia32_cap;
|
u64 ia32_cap;
|
||||||
|
|
||||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||||
cpu_mitigations_off()) {
|
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
|
||||||
|
cpu_mitigations_off()) {
|
||||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -538,6 +539,8 @@ out:
|
||||||
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
||||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||||
|
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||||
|
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init md_clear_select_mitigation(void)
|
static void __init md_clear_select_mitigation(void)
|
||||||
|
@ -2268,6 +2271,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||||
|
|
||||||
static ssize_t mmio_stale_data_show_state(char *buf)
|
static ssize_t mmio_stale_data_show_state(char *buf)
|
||||||
{
|
{
|
||||||
|
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||||
|
return sysfs_emit(buf, "Unknown: No mitigations\n");
|
||||||
|
|
||||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||||
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
||||||
|
|
||||||
|
@ -2414,6 +2420,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||||
return srbds_show_state(buf);
|
return srbds_show_state(buf);
|
||||||
|
|
||||||
case X86_BUG_MMIO_STALE_DATA:
|
case X86_BUG_MMIO_STALE_DATA:
|
||||||
|
case X86_BUG_MMIO_UNKNOWN:
|
||||||
return mmio_stale_data_show_state(buf);
|
return mmio_stale_data_show_state(buf);
|
||||||
|
|
||||||
case X86_BUG_RETBLEED:
|
case X86_BUG_RETBLEED:
|
||||||
|
@ -2473,7 +2480,10 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
|
||||||
|
|
||||||
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||||
|
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
|
||||||
|
else
|
||||||
|
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
|
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
|
|
|
@ -1027,7 +1027,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||||
#define NO_SWAPGS BIT(6)
|
#define NO_SWAPGS BIT(6)
|
||||||
#define NO_ITLB_MULTIHIT BIT(7)
|
#define NO_ITLB_MULTIHIT BIT(7)
|
||||||
#define NO_SPECTRE_V2 BIT(8)
|
#define NO_SPECTRE_V2 BIT(8)
|
||||||
#define NO_EIBRS_PBRSB BIT(9)
|
#define NO_MMIO BIT(9)
|
||||||
|
#define NO_EIBRS_PBRSB BIT(10)
|
||||||
|
|
||||||
#define VULNWL(vendor, family, model, whitelist) \
|
#define VULNWL(vendor, family, model, whitelist) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
||||||
|
@ -1048,6 +1049,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||||
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
||||||
|
|
||||||
/* Intel Family 6 */
|
/* Intel Family 6 */
|
||||||
|
VULNWL_INTEL(TIGERLAKE, NO_MMIO),
|
||||||
|
VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
|
||||||
|
VULNWL_INTEL(ALDERLAKE, NO_MMIO),
|
||||||
|
VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
|
@ -1066,9 +1072,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||||
|
@ -1083,18 +1089,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||||
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||||
|
|
||||||
/* AMD Family 0xf - 0x12 */
|
/* AMD Family 0xf - 0x12 */
|
||||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
|
|
||||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
|
|
||||||
/* Zhaoxin Family 7 */
|
/* Zhaoxin Family 7 */
|
||||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
|
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||||
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS),
|
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1248,10 +1254,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||||
* Affected CPU list is generally enough to enumerate the vulnerability,
|
* Affected CPU list is generally enough to enumerate the vulnerability,
|
||||||
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
||||||
* not want the guest to enumerate the bug.
|
* not want the guest to enumerate the bug.
|
||||||
|
*
|
||||||
|
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
|
||||||
|
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
|
||||||
*/
|
*/
|
||||||
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
|
if (!arch_cap_mmio_immune(ia32_cap)) {
|
||||||
!arch_cap_mmio_immune(ia32_cap))
|
if (cpu_matches(cpu_vuln_blacklist, MMIO))
|
||||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||||
|
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
|
||||||
|
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
|
||||||
|
}
|
||||||
|
|
||||||
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
||||||
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
|
||||||
|
|
Загрузка…
Ссылка в новой задаче