xen: introduce xen_vcpu_id mapping
It may happen that Xen's and Linux's ideas of vCPU id diverge. In particular, when we crash on a secondary vCPU we may want to do kdump and unlike plain kexec where we do migrate_to_reboot_cpu() we try booting on the vCPU which crashed. This doesn't work very well for PVHVM guests as we have a number of hypercalls where we pass vCPU id as a parameter. These hypercalls either fail or do something unexpected. To solve the issue introduce percpu xen_vcpu_id mapping. ARM and PV guests get direct mapping for now. Boot CPU for PVHVM guest gets its id from CPUID. With secondary CPUs it is a bit more trickier. Currently, we initialize IPI vectors before these CPUs boot so we can't use CPUID. Use ACPI ids from MADT instead. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
This commit is contained in:
Родитель
3e9e57fad3
Коммит
88e957d6e4
|
@ -49,6 +49,10 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
|
||||||
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
||||||
static struct vcpu_info __percpu *xen_vcpu_info;
|
static struct vcpu_info __percpu *xen_vcpu_info;
|
||||||
|
|
||||||
|
/* Linux <-> Xen vCPU id mapping */
|
||||||
|
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||||
|
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||||
|
|
||||||
/* These are unused until we support booting "pre-ballooned" */
|
/* These are unused until we support booting "pre-ballooned" */
|
||||||
unsigned long xen_released_pages;
|
unsigned long xen_released_pages;
|
||||||
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
|
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
|
||||||
|
@ -167,6 +171,9 @@ static void xen_percpu_init(void)
|
||||||
pr_info("Xen: initializing cpu%d\n", cpu);
|
pr_info("Xen: initializing cpu%d\n", cpu);
|
||||||
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
|
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
|
||||||
|
|
||||||
|
/* Direct vCPU id mapping for ARM guests. */
|
||||||
|
per_cpu(xen_vcpu_id, cpu) = cpu;
|
||||||
|
|
||||||
info.mfn = virt_to_gfn(vcpup);
|
info.mfn = virt_to_gfn(vcpup);
|
||||||
info.offset = xen_offset_in_page(vcpup);
|
info.offset = xen_offset_in_page(vcpup);
|
||||||
|
|
||||||
|
@ -388,6 +395,9 @@ static int __init xen_guest_init(void)
|
||||||
if (xen_vcpu_info == NULL)
|
if (xen_vcpu_info == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* Direct vCPU id mapping for ARM guests. */
|
||||||
|
per_cpu(xen_vcpu_id, 0) = 0;
|
||||||
|
|
||||||
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
|
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
|
||||||
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
|
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
|
||||||
&xen_auto_xlat_grant_frames.vaddr,
|
&xen_auto_xlat_grant_frames.vaddr,
|
||||||
|
|
|
@ -59,6 +59,7 @@
|
||||||
#include <asm/xen/pci.h>
|
#include <asm/xen/pci.h>
|
||||||
#include <asm/xen/hypercall.h>
|
#include <asm/xen/hypercall.h>
|
||||||
#include <asm/xen/hypervisor.h>
|
#include <asm/xen/hypervisor.h>
|
||||||
|
#include <asm/xen/cpuid.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
|
@ -118,6 +119,10 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
||||||
*/
|
*/
|
||||||
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
||||||
|
|
||||||
|
/* Linux <-> Xen vCPU id mapping */
|
||||||
|
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||||
|
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||||
|
|
||||||
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
||||||
EXPORT_SYMBOL_GPL(xen_domain_type);
|
EXPORT_SYMBOL_GPL(xen_domain_type);
|
||||||
|
|
||||||
|
@ -1137,8 +1142,11 @@ void xen_setup_vcpu_info_placement(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu) {
|
||||||
|
/* Set up direct vCPU id mapping for PV guests. */
|
||||||
|
per_cpu(xen_vcpu_id, cpu) = cpu;
|
||||||
xen_vcpu_setup(cpu);
|
xen_vcpu_setup(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
/* xen_vcpu_setup managed to place the vcpu_info within the
|
/* xen_vcpu_setup managed to place the vcpu_info within the
|
||||||
* percpu area for all cpus, so make use of it. Note that for
|
* percpu area for all cpus, so make use of it. Note that for
|
||||||
|
@ -1729,6 +1737,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||||
#endif
|
#endif
|
||||||
xen_raw_console_write("about to get started...\n");
|
xen_raw_console_write("about to get started...\n");
|
||||||
|
|
||||||
|
/* Let's presume PV guests always boot on vCPU with id 0. */
|
||||||
|
per_cpu(xen_vcpu_id, 0) = 0;
|
||||||
|
|
||||||
xen_setup_runstate_info(0);
|
xen_setup_runstate_info(0);
|
||||||
|
|
||||||
xen_efi_init();
|
xen_efi_init();
|
||||||
|
@ -1797,6 +1808,12 @@ static void __init init_hvm_pv_info(void)
|
||||||
|
|
||||||
xen_setup_features();
|
xen_setup_features();
|
||||||
|
|
||||||
|
cpuid(base + 4, &eax, &ebx, &ecx, &edx);
|
||||||
|
if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
|
||||||
|
this_cpu_write(xen_vcpu_id, ebx);
|
||||||
|
else
|
||||||
|
this_cpu_write(xen_vcpu_id, smp_processor_id());
|
||||||
|
|
||||||
pv_info.name = "Xen HVM";
|
pv_info.name = "Xen HVM";
|
||||||
|
|
||||||
xen_domain_type = XEN_HVM_DOMAIN;
|
xen_domain_type = XEN_HVM_DOMAIN;
|
||||||
|
@ -1808,6 +1825,10 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
|
||||||
int cpu = (long)hcpu;
|
int cpu = (long)hcpu;
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
|
if (cpu_acpi_id(cpu) != U32_MAX)
|
||||||
|
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
|
||||||
|
else
|
||||||
|
per_cpu(xen_vcpu_id, cpu) = cpu;
|
||||||
xen_vcpu_setup(cpu);
|
xen_vcpu_setup(cpu);
|
||||||
if (xen_have_vector_callback) {
|
if (xen_have_vector_callback) {
|
||||||
if (xen_feature(XENFEAT_hvm_safe_pvclock))
|
if (xen_feature(XENFEAT_hvm_safe_pvclock))
|
||||||
|
|
|
@ -9,6 +9,12 @@
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
||||||
|
|
||||||
|
DECLARE_PER_CPU(int, xen_vcpu_id);
|
||||||
|
static inline int xen_vcpu_nr(int cpu)
|
||||||
|
{
|
||||||
|
return per_cpu(xen_vcpu_id, cpu);
|
||||||
|
}
|
||||||
|
|
||||||
void xen_arch_pre_suspend(void);
|
void xen_arch_pre_suspend(void);
|
||||||
void xen_arch_post_suspend(int suspend_cancelled);
|
void xen_arch_post_suspend(int suspend_cancelled);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче