Merge branch 'x86/apic' into x86/platform
Merge in x86/apic to solve a vector_allocation_domain() API change semantic merge conflict. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Коммит
8461689c67
|
@ -306,7 +306,7 @@ struct apic {
|
|||
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
|
||||
unsigned long (*check_apicid_present)(int apicid);
|
||||
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
bool (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
|
||||
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
|
||||
|
@ -331,9 +331,9 @@ struct apic {
|
|||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
unsigned long apic_id_mask;
|
||||
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid);
|
||||
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
|
@ -537,6 +537,11 @@ static inline const struct cpumask *default_target_cpus(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline const struct cpumask *online_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
|
||||
|
||||
|
||||
|
@ -586,21 +591,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
|||
|
||||
#endif
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
static inline int
|
||||
flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
|
||||
unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
|
||||
cpumask_bits(andmask)[0] &
|
||||
cpumask_bits(cpu_online_mask)[0] &
|
||||
APIC_ALL_CPUS;
|
||||
|
||||
if (likely(cpu_mask)) {
|
||||
*apicid = (unsigned int)cpu_mask;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
extern int
|
||||
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid);
|
||||
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
static inline bool
|
||||
flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
default_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_copy(retmask, cpumask_of(cpu));
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
|
||||
|
|
|
@ -9,15 +9,6 @@
|
|||
#include <asm/ipi.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
/*
|
||||
* Need to use more than cpu 0, because we need more vectors
|
||||
* when MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static int x2apic_apic_id_valid(int apicid)
|
||||
{
|
||||
return 1;
|
||||
|
@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* For now each logical cpu is in its own vector allocation domain.
|
||||
*/
|
||||
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static void
|
||||
__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
|
||||
{
|
||||
|
|
|
@ -156,7 +156,6 @@ struct x86_cpuinit_ops {
|
|||
/**
|
||||
* struct x86_platform_ops - platform specific runtime functions
|
||||
* @calibrate_tsc: calibrate TSC
|
||||
* @wallclock_init: init the wallclock device
|
||||
* @get_wallclock: get time from HW clock like RTC etc.
|
||||
* @set_wallclock: set time back to HW clock
|
||||
* @is_untracked_pat_range exclude from PAT logic
|
||||
|
@ -168,7 +167,6 @@ struct x86_cpuinit_ops {
|
|||
*/
|
||||
struct x86_platform_ops {
|
||||
unsigned long (*calibrate_tsc)(void);
|
||||
void (*wallclock_init)(void);
|
||||
unsigned long (*get_wallclock)(void);
|
||||
int (*set_wallclock)(unsigned long nowtime);
|
||||
void (*iommu_shutdown)(void);
|
||||
|
|
|
@ -2123,6 +2123,25 @@ void default_init_apic_ldr(void)
|
|||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
}
|
||||
|
||||
if (likely(cpu < nr_cpu_ids)) {
|
||||
*apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Power management
|
||||
*/
|
||||
|
|
|
@ -36,25 +36,6 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static const struct cpumask *flat_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
|
@ -92,7 +73,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
|
|||
}
|
||||
|
||||
static void
|
||||
flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
|
||||
flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
|
||||
{
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
int cpu = smp_processor_id();
|
||||
|
@ -186,7 +167,7 @@ static struct apic apic_flat = {
|
|||
.irq_delivery_mode = dest_LowestPrio,
|
||||
.irq_dest_mode = 1, /* logical */
|
||||
|
||||
.target_cpus = flat_target_cpus,
|
||||
.target_cpus = online_target_cpus,
|
||||
.disable_esr = 0,
|
||||
.dest_logical = APIC_DEST_LOGICAL,
|
||||
.check_apicid_used = NULL,
|
||||
|
@ -210,8 +191,7 @@ static struct apic apic_flat = {
|
|||
.set_apic_id = set_apic_id,
|
||||
.apic_id_mask = 0xFFu << 24,
|
||||
|
||||
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = flat_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
|
||||
|
@ -262,17 +242,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct cpumask *physflat_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
|
||||
{
|
||||
default_send_IPI_mask_sequence_phys(cpumask, vector);
|
||||
|
@ -294,38 +263,6 @@ static void physflat_send_IPI_all(int vector)
|
|||
physflat_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_first(cpumask);
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
}
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static int physflat_probe(void)
|
||||
{
|
||||
if (apic == &apic_physflat || num_possible_cpus() > 8)
|
||||
|
@ -345,13 +282,13 @@ static struct apic apic_physflat = {
|
|||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = physflat_target_cpus,
|
||||
.target_cpus = online_target_cpus,
|
||||
.disable_esr = 0,
|
||||
.dest_logical = 0,
|
||||
.check_apicid_used = NULL,
|
||||
.check_apicid_present = NULL,
|
||||
|
||||
.vector_allocation_domain = physflat_vector_allocation_domain,
|
||||
.vector_allocation_domain = default_vector_allocation_domain,
|
||||
/* not needed, but shouldn't hurt: */
|
||||
.init_apic_ldr = flat_init_apic_ldr,
|
||||
|
||||
|
@ -370,8 +307,7 @@ static struct apic apic_physflat = {
|
|||
.set_apic_id = set_apic_id,
|
||||
.apic_id_mask = 0xFFu << 24,
|
||||
|
||||
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = physflat_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
|
||||
|
|
|
@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit)
|
|||
return physid_isset(bit, phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
static bool noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
if (cpu != 0)
|
||||
pr_warning("APIC: Vector allocated for non-BSP cpu\n");
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
cpumask_copy(retmask, cpumask_of(cpu));
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 noop_apic_read(u32 reg)
|
||||
|
@ -159,8 +159,7 @@ struct apic apic_noop = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0x0F << 24,
|
||||
|
||||
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = noop_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
|
||||
|
|
|
@ -72,17 +72,6 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
|
|||
return initial_apic_id >> index_msb;
|
||||
}
|
||||
|
||||
static const struct cpumask *numachip_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
{
|
||||
union numachip_csr_g3_ext_irq_gen int_gen;
|
||||
|
@ -157,38 +146,6 @@ static void numachip_send_IPI_self(int vector)
|
|||
__default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
|
||||
static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_first(cpumask);
|
||||
if (likely((unsigned)cpu < nr_cpu_ids))
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
}
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static int __init numachip_probe(void)
|
||||
{
|
||||
return apic == &apic_numachip;
|
||||
|
@ -253,13 +210,13 @@ static struct apic apic_numachip __refconst = {
|
|||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = numachip_target_cpus,
|
||||
.target_cpus = online_target_cpus,
|
||||
.disable_esr = 0,
|
||||
.dest_logical = 0,
|
||||
.check_apicid_used = NULL,
|
||||
.check_apicid_present = NULL,
|
||||
|
||||
.vector_allocation_domain = numachip_vector_allocation_domain,
|
||||
.vector_allocation_domain = default_vector_allocation_domain,
|
||||
.init_apic_ldr = flat_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = NULL,
|
||||
|
@ -277,8 +234,7 @@ static struct apic apic_numachip __refconst = {
|
|||
.set_apic_id = set_apic_id,
|
||||
.apic_id_mask = 0xffU << 24,
|
||||
|
||||
.cpu_mask_to_apicid = numachip_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = numachip_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
|
||||
|
|
|
@ -26,15 +26,6 @@ static int bigsmp_apic_id_registered(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static const struct cpumask *bigsmp_target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
|
||||
{
|
||||
return 0;
|
||||
|
@ -105,32 +96,6 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_physical_id(cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
|
@ -177,12 +142,6 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
|
|||
{ } /* NULL entry stops DMI scanning */
|
||||
};
|
||||
|
||||
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int probe_bigsmp(void)
|
||||
{
|
||||
if (def_to_bigsmp)
|
||||
|
@ -205,13 +164,13 @@ static struct apic apic_bigsmp = {
|
|||
/* phys delivery to target CPU: */
|
||||
.irq_dest_mode = 0,
|
||||
|
||||
.target_cpus = bigsmp_target_cpus,
|
||||
.target_cpus = default_target_cpus,
|
||||
.disable_esr = 1,
|
||||
.dest_logical = 0,
|
||||
.check_apicid_used = bigsmp_check_apicid_used,
|
||||
.check_apicid_present = bigsmp_check_apicid_present,
|
||||
|
||||
.vector_allocation_domain = bigsmp_vector_allocation_domain,
|
||||
.vector_allocation_domain = default_vector_allocation_domain,
|
||||
.init_apic_ldr = bigsmp_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
|
||||
|
@ -229,8 +188,7 @@ static struct apic apic_bigsmp = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0xFF << 24,
|
||||
|
||||
.cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = bigsmp_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = NULL,
|
||||
|
|
|
@ -394,21 +394,6 @@ static void es7000_enable_apic_mode(void)
|
|||
WARN(1, "Command failed, status = %x\n", mip_status);
|
||||
}
|
||||
|
||||
static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
|
||||
static void es7000_wait_for_init_deassert(atomic_t *deassert)
|
||||
{
|
||||
while (!atomic_read(deassert))
|
||||
|
@ -540,45 +525,49 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
static inline int
|
||||
es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
|
||||
{
|
||||
unsigned int round = 0;
|
||||
int cpu, uninitialized_var(apicid);
|
||||
unsigned int cpu, uninitialized_var(apicid);
|
||||
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster.
|
||||
*/
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
|
||||
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
|
||||
WARN(1, "Not a valid mask!");
|
||||
|
||||
return BAD_APICID;
|
||||
return -EINVAL;
|
||||
}
|
||||
apicid = new_apicid;
|
||||
apicid |= new_apicid;
|
||||
round++;
|
||||
}
|
||||
return apicid;
|
||||
if (!round)
|
||||
return -EINVAL;
|
||||
*dest_id = apicid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
static int
|
||||
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
cpumask_var_t cpumask;
|
||||
*apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
return apicid;
|
||||
return 0;
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = es7000_cpu_mask_to_apicid(cpumask);
|
||||
es7000_cpu_mask_to_apicid(cpumask, apicid);
|
||||
|
||||
free_cpumask_var(cpumask);
|
||||
|
||||
return apicid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
|
@ -638,7 +627,7 @@ static struct apic __refdata apic_es7000_cluster = {
|
|||
.check_apicid_used = es7000_check_apicid_used,
|
||||
.check_apicid_present = es7000_check_apicid_present,
|
||||
|
||||
.vector_allocation_domain = es7000_vector_allocation_domain,
|
||||
.vector_allocation_domain = flat_vector_allocation_domain,
|
||||
.init_apic_ldr = es7000_init_apic_ldr_cluster,
|
||||
|
||||
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
|
||||
|
@ -656,7 +645,6 @@ static struct apic __refdata apic_es7000_cluster = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0xFF << 24,
|
||||
|
||||
.cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = es7000_send_IPI_mask,
|
||||
|
@ -705,7 +693,7 @@ static struct apic __refdata apic_es7000 = {
|
|||
.check_apicid_used = es7000_check_apicid_used,
|
||||
.check_apicid_present = es7000_check_apicid_present,
|
||||
|
||||
.vector_allocation_domain = es7000_vector_allocation_domain,
|
||||
.vector_allocation_domain = flat_vector_allocation_domain,
|
||||
.init_apic_ldr = es7000_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
|
||||
|
@ -723,7 +711,6 @@ static struct apic __refdata apic_es7000 = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0xFF << 24,
|
||||
|
||||
.cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = es7000_send_IPI_mask,
|
||||
|
|
|
@ -1112,7 +1112,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
|||
* 0x80, because int 0x80 is hm, kind of importantish. ;)
|
||||
*/
|
||||
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
|
||||
static int current_offset = VECTOR_OFFSET_START % 8;
|
||||
static int current_offset = VECTOR_OFFSET_START % 16;
|
||||
unsigned int old_vector;
|
||||
int cpu, err;
|
||||
cpumask_var_t tmp_mask;
|
||||
|
@ -1126,8 +1126,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
|||
old_vector = cfg->vector;
|
||||
if (old_vector) {
|
||||
cpumask_and(tmp_mask, mask, cpu_online_mask);
|
||||
cpumask_and(tmp_mask, cfg->domain, tmp_mask);
|
||||
if (!cpumask_empty(tmp_mask)) {
|
||||
if (cpumask_subset(tmp_mask, cfg->domain)) {
|
||||
free_cpumask_var(tmp_mask);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1138,20 +1137,30 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
|||
for_each_cpu_and(cpu, mask, cpu_online_mask) {
|
||||
int new_cpu;
|
||||
int vector, offset;
|
||||
bool more_domains;
|
||||
|
||||
apic->vector_allocation_domain(cpu, tmp_mask);
|
||||
more_domains = apic->vector_allocation_domain(cpu, tmp_mask);
|
||||
|
||||
if (cpumask_subset(tmp_mask, cfg->domain)) {
|
||||
free_cpumask_var(tmp_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
vector = current_vector;
|
||||
offset = current_offset;
|
||||
next:
|
||||
vector += 8;
|
||||
vector += 16;
|
||||
if (vector >= first_system_vector) {
|
||||
/* If out of vectors on large boxen, must share them. */
|
||||
offset = (offset + 1) % 8;
|
||||
offset = (offset + 1) % 16;
|
||||
vector = FIRST_EXTERNAL_VECTOR + offset;
|
||||
}
|
||||
if (unlikely(current_vector == vector))
|
||||
continue;
|
||||
|
||||
if (unlikely(current_vector == vector)) {
|
||||
if (more_domains)
|
||||
continue;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (test_bit(vector, used_vectors))
|
||||
goto next;
|
||||
|
@ -1346,18 +1355,18 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
|
|||
|
||||
if (!IO_APIC_IRQ(irq))
|
||||
return;
|
||||
/*
|
||||
* For legacy irqs, cfg->domain starts with cpu 0 for legacy
|
||||
* controllers like 8259. Now that IO-APIC can handle this irq, update
|
||||
* the cfg->domain.
|
||||
*/
|
||||
if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
|
||||
apic->vector_allocation_domain(0, cfg->domain);
|
||||
|
||||
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
|
||||
return;
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
|
||||
if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
|
||||
&dest)) {
|
||||
pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
|
||||
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
|
||||
__clear_irq_vector(irq, cfg);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
apic_printk(APIC_VERBOSE,KERN_DEBUG
|
||||
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
|
||||
|
@ -1366,7 +1375,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
|
|||
cfg->vector, irq, attr->trigger, attr->polarity, dest);
|
||||
|
||||
if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
|
||||
pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
|
||||
pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
|
||||
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
|
||||
__clear_irq_vector(irq, cfg);
|
||||
|
||||
|
@ -1469,9 +1478,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
|
|||
* Set up the timer pin, possibly with the 8259A-master behind.
|
||||
*/
|
||||
static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
|
||||
unsigned int pin, int vector)
|
||||
unsigned int pin, int vector)
|
||||
{
|
||||
struct IO_APIC_route_entry entry;
|
||||
unsigned int dest;
|
||||
|
||||
if (irq_remapping_enabled)
|
||||
return;
|
||||
|
@ -1482,9 +1492,13 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
|
|||
* We use logical delivery to get the timer IRQ
|
||||
* to the first CPU.
|
||||
*/
|
||||
if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
|
||||
apic->target_cpus(), &dest)))
|
||||
dest = BAD_APICID;
|
||||
|
||||
entry.dest_mode = apic->irq_dest_mode;
|
||||
entry.mask = 0; /* don't mask IRQ for edge */
|
||||
entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
|
||||
entry.dest = dest;
|
||||
entry.delivery_mode = apic->irq_delivery_mode;
|
||||
entry.polarity = 0;
|
||||
entry.trigger = 0;
|
||||
|
@ -2210,71 +2224,6 @@ void send_cleanup_vector(struct irq_cfg *cfg)
|
|||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
|
||||
{
|
||||
int apic, pin;
|
||||
struct irq_pin_list *entry;
|
||||
u8 vector = cfg->vector;
|
||||
|
||||
for_each_irq_pin(entry, cfg->irq_2_pin) {
|
||||
unsigned int reg;
|
||||
|
||||
apic = entry->apic;
|
||||
pin = entry->pin;
|
||||
/*
|
||||
* With interrupt-remapping, destination information comes
|
||||
* from interrupt-remapping table entry.
|
||||
*/
|
||||
if (!irq_remapped(cfg))
|
||||
io_apic_write(apic, 0x11 + pin*2, dest);
|
||||
reg = io_apic_read(apic, 0x10 + pin*2);
|
||||
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
|
||||
reg |= vector;
|
||||
io_apic_modify(apic, 0x10 + pin*2, reg);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Either sets data->affinity to a valid value, and returns
|
||||
* ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
|
||||
* leaves data->affinity untouched.
|
||||
*/
|
||||
int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
unsigned int *dest_id)
|
||||
{
|
||||
struct irq_cfg *cfg = data->chip_data;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return -1;
|
||||
|
||||
if (assign_irq_vector(data->irq, data->chip_data, mask))
|
||||
return -1;
|
||||
|
||||
cpumask_copy(data->affinity, mask);
|
||||
|
||||
*dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
unsigned int dest, irq = data->irq;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&ioapic_lock, flags);
|
||||
ret = __ioapic_set_affinity(data, mask, &dest);
|
||||
if (!ret) {
|
||||
/* Only the high 8 bits are valid. */
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
__target_IO_APIC_irq(irq, dest, data->chip_data);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
{
|
||||
unsigned vector, me;
|
||||
|
@ -2362,6 +2311,87 @@ void irq_force_complete_move(int irq)
|
|||
static inline void irq_complete_move(struct irq_cfg *cfg) { }
|
||||
#endif
|
||||
|
||||
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
|
||||
{
|
||||
int apic, pin;
|
||||
struct irq_pin_list *entry;
|
||||
u8 vector = cfg->vector;
|
||||
|
||||
for_each_irq_pin(entry, cfg->irq_2_pin) {
|
||||
unsigned int reg;
|
||||
|
||||
apic = entry->apic;
|
||||
pin = entry->pin;
|
||||
/*
|
||||
* With interrupt-remapping, destination information comes
|
||||
* from interrupt-remapping table entry.
|
||||
*/
|
||||
if (!irq_remapped(cfg))
|
||||
io_apic_write(apic, 0x11 + pin*2, dest);
|
||||
reg = io_apic_read(apic, 0x10 + pin*2);
|
||||
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
|
||||
reg |= vector;
|
||||
io_apic_modify(apic, 0x10 + pin*2, reg);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Either sets data->affinity to a valid value, and returns
|
||||
* ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
|
||||
* leaves data->affinity untouched.
|
||||
*/
|
||||
int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
unsigned int *dest_id)
|
||||
{
|
||||
struct irq_cfg *cfg = data->chip_data;
|
||||
unsigned int irq = data->irq;
|
||||
int err;
|
||||
|
||||
if (!config_enabled(CONFIG_SMP))
|
||||
return -1;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return -EINVAL;
|
||||
|
||||
err = assign_irq_vector(irq, cfg, mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
|
||||
if (err) {
|
||||
if (assign_irq_vector(irq, cfg, data->affinity))
|
||||
pr_err("Failed to recover vector for irq %d\n", irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_copy(data->affinity, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
unsigned int dest, irq = data->irq;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!config_enabled(CONFIG_SMP))
|
||||
return -1;
|
||||
|
||||
raw_spin_lock_irqsave(&ioapic_lock, flags);
|
||||
ret = __ioapic_set_affinity(data, mask, &dest);
|
||||
if (!ret) {
|
||||
/* Only the high 8 bits are valid. */
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
__target_IO_APIC_irq(irq, dest, data->chip_data);
|
||||
ret = IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ack_apic_edge(struct irq_data *data)
|
||||
{
|
||||
irq_complete_move(data->chip_data);
|
||||
|
@ -2541,9 +2571,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
|
|||
chip->irq_ack = ir_ack_apic_edge;
|
||||
chip->irq_eoi = ir_ack_apic_level;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
chip->irq_set_affinity = set_remapped_irq_affinity;
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_IRQ_REMAP */
|
||||
|
||||
|
@ -2554,9 +2582,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
|||
.irq_unmask = unmask_ioapic_irq,
|
||||
.irq_ack = ack_apic_edge,
|
||||
.irq_eoi = ack_apic_level,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = ioapic_set_affinity,
|
||||
#endif
|
||||
.irq_retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
||||
|
@ -3038,7 +3064,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
|
||||
err = apic->cpu_mask_to_apicid_and(cfg->domain,
|
||||
apic->target_cpus(), &dest);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (irq_remapped(cfg)) {
|
||||
compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
|
||||
|
@ -3072,7 +3101,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
|
||||
{
|
||||
|
@ -3092,9 +3120,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
|
|||
|
||||
__write_msi_msg(data->msi_desc, &msg);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
|
||||
|
@ -3105,9 +3132,7 @@ static struct irq_chip msi_chip = {
|
|||
.irq_unmask = unmask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_ack = ack_apic_edge,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = msi_set_affinity,
|
||||
#endif
|
||||
.irq_retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
||||
|
@ -3192,7 +3217,6 @@ void native_teardown_msi_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DMAR_TABLE
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
|
@ -3214,19 +3238,15 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|||
|
||||
dmar_msi_write(irq, &msg);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static struct irq_chip dmar_msi_type = {
|
||||
.name = "DMAR_MSI",
|
||||
.irq_unmask = dmar_msi_unmask,
|
||||
.irq_mask = dmar_msi_mask,
|
||||
.irq_ack = ack_apic_edge,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = dmar_msi_set_affinity,
|
||||
#endif
|
||||
.irq_retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
||||
|
@ -3247,7 +3267,6 @@ int arch_setup_dmar_msi(unsigned int irq)
|
|||
|
||||
#ifdef CONFIG_HPET_TIMER
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int hpet_msi_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
|
@ -3267,19 +3286,15 @@ static int hpet_msi_set_affinity(struct irq_data *data,
|
|||
|
||||
hpet_msi_write(data->handler_data, &msg);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static struct irq_chip hpet_msi_type = {
|
||||
.name = "HPET_MSI",
|
||||
.irq_unmask = hpet_msi_unmask,
|
||||
.irq_mask = hpet_msi_mask,
|
||||
.irq_ack = ack_apic_edge,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = hpet_msi_set_affinity,
|
||||
#endif
|
||||
.irq_retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
||||
|
@ -3314,8 +3329,6 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
|
|||
*/
|
||||
#ifdef CONFIG_HT_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
|
||||
{
|
||||
struct ht_irq_msg msg;
|
||||
|
@ -3340,25 +3353,23 @@ ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
|
|||
return -1;
|
||||
|
||||
target_ht_irq(data->irq, dest, cfg->vector);
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static struct irq_chip ht_irq_chip = {
|
||||
.name = "PCI-HT",
|
||||
.irq_mask = mask_ht_irq,
|
||||
.irq_unmask = unmask_ht_irq,
|
||||
.irq_ack = ack_apic_edge,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = ht_set_affinity,
|
||||
#endif
|
||||
.irq_retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
||||
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
struct ht_irq_msg msg;
|
||||
unsigned dest;
|
||||
int err;
|
||||
|
||||
if (disable_apic)
|
||||
|
@ -3366,36 +3377,37 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
|||
|
||||
cfg = irq_cfg(irq);
|
||||
err = assign_irq_vector(irq, cfg, apic->target_cpus());
|
||||
if (!err) {
|
||||
struct ht_irq_msg msg;
|
||||
unsigned dest;
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain,
|
||||
apic->target_cpus());
|
||||
err = apic->cpu_mask_to_apicid_and(cfg->domain,
|
||||
apic->target_cpus(), &dest);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
|
||||
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
|
||||
|
||||
msg.address_lo =
|
||||
HT_IRQ_LOW_BASE |
|
||||
HT_IRQ_LOW_DEST_ID(dest) |
|
||||
HT_IRQ_LOW_VECTOR(cfg->vector) |
|
||||
((apic->irq_dest_mode == 0) ?
|
||||
HT_IRQ_LOW_DM_PHYSICAL :
|
||||
HT_IRQ_LOW_DM_LOGICAL) |
|
||||
HT_IRQ_LOW_RQEOI_EDGE |
|
||||
((apic->irq_delivery_mode != dest_LowestPrio) ?
|
||||
HT_IRQ_LOW_MT_FIXED :
|
||||
HT_IRQ_LOW_MT_ARBITRATED) |
|
||||
HT_IRQ_LOW_IRQ_MASKED;
|
||||
msg.address_lo =
|
||||
HT_IRQ_LOW_BASE |
|
||||
HT_IRQ_LOW_DEST_ID(dest) |
|
||||
HT_IRQ_LOW_VECTOR(cfg->vector) |
|
||||
((apic->irq_dest_mode == 0) ?
|
||||
HT_IRQ_LOW_DM_PHYSICAL :
|
||||
HT_IRQ_LOW_DM_LOGICAL) |
|
||||
HT_IRQ_LOW_RQEOI_EDGE |
|
||||
((apic->irq_delivery_mode != dest_LowestPrio) ?
|
||||
HT_IRQ_LOW_MT_FIXED :
|
||||
HT_IRQ_LOW_MT_ARBITRATED) |
|
||||
HT_IRQ_LOW_IRQ_MASKED;
|
||||
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
|
||||
irq_set_chip_and_handler_name(irq, &ht_irq_chip,
|
||||
handle_edge_irq, "edge");
|
||||
irq_set_chip_and_handler_name(irq, &ht_irq_chip,
|
||||
handle_edge_irq, "edge");
|
||||
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
|
||||
}
|
||||
return err;
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HT_IRQ */
|
||||
|
||||
|
|
|
@ -406,16 +406,13 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
|
|||
* We use physical apicids here, not logical, so just return the default
|
||||
* physical broadcast to stop people from breaking us
|
||||
*/
|
||||
static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return 0x0F;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
static int
|
||||
numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
return 0x0F;
|
||||
*apicid = 0x0F;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
|
||||
|
@ -441,20 +438,6 @@ static int probe_numaq(void)
|
|||
return found_numaq;
|
||||
}
|
||||
|
||||
static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
static void numaq_setup_portio_remap(void)
|
||||
{
|
||||
int num_quads = num_online_nodes();
|
||||
|
@ -491,7 +474,7 @@ static struct apic __refdata apic_numaq = {
|
|||
.check_apicid_used = numaq_check_apicid_used,
|
||||
.check_apicid_present = numaq_check_apicid_present,
|
||||
|
||||
.vector_allocation_domain = numaq_vector_allocation_domain,
|
||||
.vector_allocation_domain = flat_vector_allocation_domain,
|
||||
.init_apic_ldr = numaq_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = numaq_ioapic_phys_id_map,
|
||||
|
@ -509,7 +492,6 @@ static struct apic __refdata apic_numaq = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0x0F << 24,
|
||||
|
||||
.cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = numaq_send_IPI_mask,
|
||||
|
|
|
@ -66,21 +66,6 @@ static void setup_apic_flat_routing(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/*
|
||||
* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
/* should be called last. */
|
||||
static int probe_default(void)
|
||||
{
|
||||
|
@ -105,7 +90,7 @@ static struct apic apic_default = {
|
|||
.check_apicid_used = default_check_apicid_used,
|
||||
.check_apicid_present = default_check_apicid_present,
|
||||
|
||||
.vector_allocation_domain = default_vector_allocation_domain,
|
||||
.vector_allocation_domain = flat_vector_allocation_domain,
|
||||
.init_apic_ldr = default_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = default_ioapic_phys_id_map,
|
||||
|
@ -123,8 +108,7 @@ static struct apic apic_default = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0x0F << 24,
|
||||
|
||||
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = default_send_IPI_mask_logical,
|
||||
.send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
|
||||
|
|
|
@ -263,43 +263,48 @@ static int summit_check_phys_apicid_present(int physical_apicid)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
static inline int
|
||||
summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
|
||||
{
|
||||
unsigned int round = 0;
|
||||
int cpu, apicid = 0;
|
||||
unsigned int cpu, apicid = 0;
|
||||
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster.
|
||||
*/
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
|
||||
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
|
||||
printk("%s: Not a valid mask!\n", __func__);
|
||||
return BAD_APICID;
|
||||
return -EINVAL;
|
||||
}
|
||||
apicid |= new_apicid;
|
||||
round++;
|
||||
}
|
||||
return apicid;
|
||||
if (!round)
|
||||
return -EINVAL;
|
||||
*dest_id = apicid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
static int
|
||||
summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
cpumask_var_t cpumask;
|
||||
*apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
return apicid;
|
||||
return 0;
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = summit_cpu_mask_to_apicid(cpumask);
|
||||
summit_cpu_mask_to_apicid(cpumask, apicid);
|
||||
|
||||
free_cpumask_var(cpumask);
|
||||
|
||||
return apicid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -320,20 +325,6 @@ static int probe_summit(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
* priority interrupt delivery mode.
|
||||
*
|
||||
* In particular there was a hyperthreading cpu observed to
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_SUMMIT_NUMA
|
||||
static struct rio_table_hdr *rio_table_hdr;
|
||||
static struct scal_detail *scal_devs[MAX_NUMNODES];
|
||||
|
@ -509,7 +500,7 @@ static struct apic apic_summit = {
|
|||
.check_apicid_used = summit_check_apicid_used,
|
||||
.check_apicid_present = summit_check_apicid_present,
|
||||
|
||||
.vector_allocation_domain = summit_vector_allocation_domain,
|
||||
.vector_allocation_domain = flat_vector_allocation_domain,
|
||||
.init_apic_ldr = summit_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = summit_ioapic_phys_id_map,
|
||||
|
@ -527,7 +518,6 @@ static struct apic apic_summit = {
|
|||
.set_apic_id = NULL,
|
||||
.apic_id_mask = 0xFF << 24,
|
||||
|
||||
.cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = summit_send_IPI_mask,
|
||||
|
|
|
@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
|
|||
}
|
||||
|
||||
static void
|
||||
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
|
||||
}
|
||||
|
@ -96,36 +96,37 @@ static void x2apic_send_IPI_all(int vector)
|
|||
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
|
||||
}
|
||||
|
||||
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one logical APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
static int
|
||||
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
int cpu;
|
||||
u32 dest = 0;
|
||||
u16 cluster;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one logical APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
for_each_cpu_and(i, cpumask, andmask) {
|
||||
if (!cpumask_test_cpu(i, cpu_online_mask))
|
||||
continue;
|
||||
dest = per_cpu(x86_cpu_to_logical_apicid, i);
|
||||
cluster = x2apic_cluster(i);
|
||||
break;
|
||||
}
|
||||
|
||||
return per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
if (!dest)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_cpu_and(i, cpumask, andmask) {
|
||||
if (!cpumask_test_cpu(i, cpu_online_mask))
|
||||
continue;
|
||||
if (cluster != x2apic_cluster(i))
|
||||
continue;
|
||||
dest |= per_cpu(x86_cpu_to_logical_apicid, i);
|
||||
}
|
||||
|
||||
*apicid = dest;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_x2apic_ldr(void)
|
||||
|
@ -208,6 +209,16 @@ static int x2apic_cluster_probe(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each x2apic cluster is an allocation domain.
|
||||
*/
|
||||
static bool cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_clear(retmask);
|
||||
cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct apic apic_x2apic_cluster = {
|
||||
|
||||
.name = "cluster x2apic",
|
||||
|
@ -219,13 +230,13 @@ static struct apic apic_x2apic_cluster = {
|
|||
.irq_delivery_mode = dest_LowestPrio,
|
||||
.irq_dest_mode = 1, /* logical */
|
||||
|
||||
.target_cpus = x2apic_target_cpus,
|
||||
.target_cpus = online_target_cpus,
|
||||
.disable_esr = 0,
|
||||
.dest_logical = APIC_DEST_LOGICAL,
|
||||
.check_apicid_used = NULL,
|
||||
.check_apicid_present = NULL,
|
||||
|
||||
.vector_allocation_domain = x2apic_vector_allocation_domain,
|
||||
.vector_allocation_domain = cluster_vector_allocation_domain,
|
||||
.init_apic_ldr = init_x2apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = NULL,
|
||||
|
@ -243,7 +254,6 @@ static struct apic apic_x2apic_cluster = {
|
|||
.set_apic_id = x2apic_set_apic_id,
|
||||
.apic_id_mask = 0xFFFFFFFFu,
|
||||
|
||||
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = x2apic_send_IPI_mask,
|
||||
|
|
|
@ -76,38 +76,6 @@ static void x2apic_send_IPI_all(int vector)
|
|||
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
|
||||
}
|
||||
|
||||
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
for_each_cpu_and(cpu, cpumask, andmask) {
|
||||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
}
|
||||
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static void init_x2apic_ldr(void)
|
||||
{
|
||||
}
|
||||
|
@ -131,13 +99,13 @@ static struct apic apic_x2apic_phys = {
|
|||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = x2apic_target_cpus,
|
||||
.target_cpus = online_target_cpus,
|
||||
.disable_esr = 0,
|
||||
.dest_logical = 0,
|
||||
.check_apicid_used = NULL,
|
||||
.check_apicid_present = NULL,
|
||||
|
||||
.vector_allocation_domain = x2apic_vector_allocation_domain,
|
||||
.vector_allocation_domain = default_vector_allocation_domain,
|
||||
.init_apic_ldr = init_x2apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = NULL,
|
||||
|
@ -155,8 +123,7 @@ static struct apic apic_x2apic_phys = {
|
|||
.set_apic_id = x2apic_set_apic_id,
|
||||
.apic_id_mask = 0xFFFFFFFFu,
|
||||
|
||||
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = x2apic_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
|
||||
|
|
|
@ -185,17 +185,6 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
|
|||
unsigned long sn_rtc_cycles_per_second;
|
||||
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
|
||||
|
||||
static const struct cpumask *uv_target_cpus(void)
|
||||
{
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -280,25 +269,12 @@ static void uv_init_apic_ldr(void)
|
|||
{
|
||||
}
|
||||
|
||||
static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
int cpu = cpumask_first(cpumask);
|
||||
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
static int
|
||||
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
const struct cpumask *andmask,
|
||||
unsigned int *apicid)
|
||||
{
|
||||
int cpu;
|
||||
int unsigned cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
|
@ -308,7 +284,13 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
|||
if (cpumask_test_cpu(cpu, cpu_online_mask))
|
||||
break;
|
||||
}
|
||||
return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
|
||||
|
||||
if (likely(cpu < nr_cpu_ids)) {
|
||||
*apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned int x2apic_get_apic_id(unsigned long x)
|
||||
|
@ -362,13 +344,13 @@ static struct apic __refdata apic_x2apic_uv_x = {
|
|||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = uv_target_cpus,
|
||||
.target_cpus = online_target_cpus,
|
||||
.disable_esr = 0,
|
||||
.dest_logical = APIC_DEST_LOGICAL,
|
||||
.check_apicid_used = NULL,
|
||||
.check_apicid_present = NULL,
|
||||
|
||||
.vector_allocation_domain = uv_vector_allocation_domain,
|
||||
.vector_allocation_domain = default_vector_allocation_domain,
|
||||
.init_apic_ldr = uv_init_apic_ldr,
|
||||
|
||||
.ioapic_phys_id_map = NULL,
|
||||
|
@ -386,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
|
|||
.set_apic_id = set_apic_id,
|
||||
.apic_id_mask = 0xFFFFFFFFu,
|
||||
|
||||
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = uv_send_IPI_mask,
|
||||
|
|
|
@ -119,7 +119,7 @@ static __init void early_serial_init(char *s)
|
|||
unsigned char c;
|
||||
unsigned divisor;
|
||||
unsigned baud = DEFAULT_BAUD;
|
||||
char *e;
|
||||
ssize_t ret;
|
||||
|
||||
if (*s == ',')
|
||||
++s;
|
||||
|
@ -127,14 +127,14 @@ static __init void early_serial_init(char *s)
|
|||
if (*s) {
|
||||
unsigned port;
|
||||
if (!strncmp(s, "0x", 2)) {
|
||||
early_serial_base = simple_strtoul(s, &e, 16);
|
||||
ret = kstrtoint(s, 16, &early_serial_base);
|
||||
} else {
|
||||
static const int __initconst bases[] = { 0x3f8, 0x2f8 };
|
||||
|
||||
if (!strncmp(s, "ttyS", 4))
|
||||
s += 4;
|
||||
port = simple_strtoul(s, &e, 10);
|
||||
if (port > 1 || s == e)
|
||||
ret = kstrtouint(s, 10, &port);
|
||||
if (ret || port > 1)
|
||||
port = 0;
|
||||
early_serial_base = bases[port];
|
||||
}
|
||||
|
@ -149,8 +149,8 @@ static __init void early_serial_init(char *s)
|
|||
outb(0x3, early_serial_base + MCR); /* DTR + RTS */
|
||||
|
||||
if (*s) {
|
||||
baud = simple_strtoul(s, &e, 0);
|
||||
if (baud == 0 || s == e)
|
||||
ret = kstrtouint(s, 0, &baud);
|
||||
if (ret || baud == 0)
|
||||
baud = DEFAULT_BAUD;
|
||||
}
|
||||
|
||||
|
|
|
@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
x86_init.timers.wallclock_init();
|
||||
|
||||
x86_platform.wallclock_init();
|
||||
|
||||
mcheck_init();
|
||||
|
||||
arch_init_ideal_nops();
|
||||
|
|
|
@ -29,7 +29,6 @@ void __init x86_init_uint_noop(unsigned int unused) { }
|
|||
void __init x86_init_pgd_noop(pgd_t *unused) { }
|
||||
int __init iommu_init_noop(void) { return 0; }
|
||||
void iommu_shutdown_noop(void) { }
|
||||
void wallclock_init_noop(void) { }
|
||||
|
||||
/*
|
||||
* The platform setup functions are preset with the default functions
|
||||
|
@ -101,7 +100,6 @@ static int default_i8042_detect(void) { return 1; };
|
|||
|
||||
struct x86_platform_ops x86_platform = {
|
||||
.calibrate_tsc = native_calibrate_tsc,
|
||||
.wallclock_init = wallclock_init_noop,
|
||||
.get_wallclock = mach_get_cmos_time,
|
||||
.set_wallclock = mach_set_rtc_mmss,
|
||||
.iommu_shutdown = iommu_shutdown_noop,
|
||||
|
|
|
@ -135,6 +135,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
|||
unsigned long mmr_value;
|
||||
struct uv_IO_APIC_route_entry *entry;
|
||||
int mmr_pnode, err;
|
||||
unsigned int dest;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
|
||||
sizeof(unsigned long));
|
||||
|
@ -143,6 +144,10 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
|||
if (err != 0)
|
||||
return err;
|
||||
|
||||
err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
if (limit == UV_AFFINITY_CPU)
|
||||
irq_set_status_flags(irq, IRQ_NO_BALANCING);
|
||||
else
|
||||
|
@ -159,7 +164,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
|||
entry->polarity = 0;
|
||||
entry->trigger = 0;
|
||||
entry->mask = 0;
|
||||
entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
|
||||
entry->dest = dest;
|
||||
|
||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
|
@ -222,7 +227,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
|||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
return 0;
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Migrate the IO-APIC irq in the presence of intr-remapping.
|
||||
*
|
||||
|
@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|||
struct irq_cfg *cfg = data->chip_data;
|
||||
unsigned int dest, irq = data->irq;
|
||||
struct irte irte;
|
||||
int err;
|
||||
|
||||
if (!config_enabled(CONFIG_SMP))
|
||||
return -EINVAL;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return -EINVAL;
|
||||
|
@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|||
if (get_irte(irq, &irte))
|
||||
return -EBUSY;
|
||||
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return -EBUSY;
|
||||
err = assign_irq_vector(irq, cfg, mask);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
|
||||
err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
|
||||
if (err) {
|
||||
if (assign_irq_vector(irq, cfg, data->affinity))
|
||||
pr_err("Failed to recover vector for irq %d\n", irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
irte.vector = cfg->vector;
|
||||
irte.dest_id = IRTE_DEST(dest);
|
||||
|
@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|||
cpumask_copy(data->affinity, mask);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void intel_compose_msi_msg(struct pci_dev *pdev,
|
||||
unsigned int irq, unsigned int dest,
|
||||
|
@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
|
|||
.reenable = reenable_irq_remapping,
|
||||
.enable_faulting = enable_drhd_fault_handling,
|
||||
.setup_ioapic_entry = intel_setup_ioapic_entry,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = intel_ioapic_set_affinity,
|
||||
#endif
|
||||
.free_irq = free_irte,
|
||||
.compose_msi_msg = intel_compose_msi_msg,
|
||||
.msi_alloc_irq = intel_msi_alloc_irq,
|
||||
|
|
|
@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq,
|
|||
vector, attr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
if (!remap_ops || !remap_ops->set_affinity)
|
||||
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
|
||||
!remap_ops->set_affinity)
|
||||
return 0;
|
||||
|
||||
return remap_ops->set_affinity(data, mask, force);
|
||||
}
|
||||
#endif
|
||||
|
||||
void free_remapped_irq(int irq)
|
||||
{
|
||||
|
|
|
@ -59,11 +59,9 @@ struct irq_remap_ops {
|
|||
unsigned int, int,
|
||||
struct io_apic_irq_attr *);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Set the CPU affinity of a remapped interrupt */
|
||||
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force);
|
||||
#endif
|
||||
|
||||
/* Free an IRQ */
|
||||
int (*free_irq)(int);
|
||||
|
|
|
@ -150,9 +150,7 @@ struct irq_data {
|
|||
void *handler_data;
|
||||
void *chip_data;
|
||||
struct msi_desc *msi_desc;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче