Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge net again, after pulling in x86/bugs fixes to clang
linking errors.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-08-10 18:59:43 -07:00
Родитель 4a8d287909 9ebbb29db9
Коммит 35f563d61b
7 изменённых файлов: 83 добавлений и 113 удалений

Просмотреть файл

@ -13,11 +13,11 @@ are configurable at compile, boot or run time.
l1tf
mds
tsx_async_abort
multihit.rst
special-register-buffer-data-sampling.rst
core-scheduling.rst
l1d_flush.rst
processor_mmio_stale_data.rst
cross-thread-rsb.rst
multihit
special-register-buffer-data-sampling
core-scheduling
l1d_flush
processor_mmio_stale_data
cross-thread-rsb
srso
gather_data_sampling.rst
gather_data_sampling

Просмотреть файл

@ -42,42 +42,59 @@ The sysfs file showing SRSO mitigation status is:
The possible values in this file are:
- 'Not affected' The processor is not vulnerable
* 'Not affected':
- 'Vulnerable: no microcode' The processor is vulnerable, no
microcode extending IBPB functionality
to address the vulnerability has been
applied.
The processor is not vulnerable
- 'Mitigation: microcode' Extended IBPB functionality microcode
patch has been applied. It does not
address User->Kernel and Guest->Host
transitions protection but it does
address User->User and VM->VM attack
vectors.
* 'Vulnerable: no microcode':
(spec_rstack_overflow=microcode)
The processor is vulnerable, no microcode extending IBPB
functionality to address the vulnerability has been applied.
- 'Mitigation: safe RET' Software-only mitigation. It complements
the extended IBPB microcode patch
functionality by addressing User->Kernel
and Guest->Host transitions protection.
* 'Mitigation: microcode':
Selected by default or by
spec_rstack_overflow=safe-ret
Extended IBPB functionality microcode patch has been applied. It does
not address User->Kernel and Guest->Host transitions protection but it
does address User->User and VM->VM attack vectors.
- 'Mitigation: IBPB' Similar protection as "safe RET" above
but employs an IBPB barrier on privilege
domain crossings (User->Kernel,
Guest->Host).
Note that User->User mitigation is controlled by how the IBPB aspect in
the Spectre v2 mitigation is selected:
(spec_rstack_overflow=ibpb)
* conditional IBPB:
where each process can select whether it needs an IBPB issued
around it PR_SPEC_DISABLE/_ENABLE etc, see :doc:`spectre`
* strict:
i.e., always on - by supplying spectre_v2_user=on on the kernel
command line
(spec_rstack_overflow=microcode)
* 'Mitigation: safe RET':
Software-only mitigation. It complements the extended IBPB microcode
patch functionality by addressing User->Kernel and Guest->Host
transitions protection.
Selected by default or by spec_rstack_overflow=safe-ret
* 'Mitigation: IBPB':
Similar protection as "safe RET" above but employs an IBPB barrier on
privilege domain crossings (User->Kernel, Guest->Host).
(spec_rstack_overflow=ibpb)
* 'Mitigation: IBPB on VMEXIT':
Mitigation addressing the cloud provider scenario - the Guest->Host
transitions only.
(spec_rstack_overflow=ibpb-vmexit)
- 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider
scenario - the Guest->Host transitions
only.
(spec_rstack_overflow=ibpb-vmexit)
In order to exploit vulnerability, an attacker needs to:

Просмотреть файл

@ -731,4 +731,6 @@ bool arch_is_platform_page(u64 paddr);
#define arch_is_platform_page arch_is_platform_page
#endif
extern bool gds_ucode_mitigated(void);
#endif /* _ASM_X86_PROCESSOR_H */

Просмотреть файл

@ -529,11 +529,17 @@ INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_CPU_SRSO
/*
* GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
* GNU ld cannot do XOR until 2.41.
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
*
* LLVM lld cannot do XOR until lld-17.
* https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
*
* Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses:
*/
. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
(srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias");
#endif

Просмотреть файл

@ -314,8 +314,6 @@ u64 __read_mostly host_xcr0;
static struct kmem_cache *x86_emulator_cache;
extern bool gds_ucode_mitigated(void);
/*
* When called, it means the previous get/set msr reached an invalid msr.
* Return true if we want to ignore/silent this failed msr access.

Просмотреть файл

@ -509,85 +509,30 @@ static void __init cpu_dev_register_generic(void)
}
#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
ssize_t __weak cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_srbds(struct device *dev,
static ssize_t cpu_show_not_affected(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
#define CPU_SHOW_VULN_FALLBACK(func) \
ssize_t cpu_show_##func(struct device *, \
struct device_attribute *, char *) \
__attribute__((weak, alias("cpu_show_not_affected")))
ssize_t __weak cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
CPU_SHOW_VULN_FALLBACK(meltdown);
CPU_SHOW_VULN_FALLBACK(spectre_v1);
CPU_SHOW_VULN_FALLBACK(spectre_v2);
CPU_SHOW_VULN_FALLBACK(spec_store_bypass);
CPU_SHOW_VULN_FALLBACK(l1tf);
CPU_SHOW_VULN_FALLBACK(mds);
CPU_SHOW_VULN_FALLBACK(tsx_async_abort);
CPU_SHOW_VULN_FALLBACK(itlb_multihit);
CPU_SHOW_VULN_FALLBACK(srbds);
CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gather_data_sampling);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@ -601,7 +546,7 @@ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gather_data_sampling, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,

Просмотреть файл

@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,