genirq: revert dynarray
Revert the dynarray changes. They need more thought and polishing. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
ee32c97322
Коммит
d6c88a507e
|
@ -102,7 +102,3 @@ config HAVE_CLK
|
|||
help
|
||||
The <linux/clk.h> calls support software clock gating and
|
||||
thus are a key power management tool on many systems.
|
||||
|
||||
config HAVE_DYN_ARRAY
|
||||
def_bool n
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ config X86
|
|||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_GENERIC_DMA_COHERENT if X86_32
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_DYN_ARRAY
|
||||
|
||||
config ARCH_DEFCONFIG
|
||||
string
|
||||
|
|
|
@ -107,7 +107,6 @@ static int __init parse_noapic(char *str)
|
|||
}
|
||||
early_param("noapic", parse_noapic);
|
||||
|
||||
struct irq_cfg;
|
||||
struct irq_pin_list;
|
||||
struct irq_cfg {
|
||||
unsigned int irq;
|
||||
|
@ -120,7 +119,7 @@ struct irq_cfg {
|
|||
};
|
||||
|
||||
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
|
||||
static struct irq_cfg irq_cfg_legacy[] __initdata = {
|
||||
static struct irq_cfg irq_cfgx[NR_IRQS] = {
|
||||
[0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
|
||||
[1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
|
||||
[2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
|
||||
|
@ -139,48 +138,26 @@ static struct irq_cfg irq_cfg_legacy[] __initdata = {
|
|||
[15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
|
||||
};
|
||||
|
||||
static struct irq_cfg irq_cfg_init = { .irq = -1U, };
|
||||
|
||||
static void init_one_irq_cfg(struct irq_cfg *cfg)
|
||||
{
|
||||
memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
|
||||
}
|
||||
|
||||
static struct irq_cfg *irq_cfgx;
|
||||
|
||||
static void __init init_work(void *data)
|
||||
{
|
||||
struct dyn_array *da = data;
|
||||
struct irq_cfg *cfg;
|
||||
int legacy_count;
|
||||
int i;
|
||||
|
||||
cfg = *da->name;
|
||||
|
||||
memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
|
||||
|
||||
legacy_count = ARRAY_SIZE(irq_cfg_legacy);
|
||||
for (i = legacy_count; i < *da->nr; i++)
|
||||
init_one_irq_cfg(&cfg[i]);
|
||||
}
|
||||
|
||||
#define for_each_irq_cfg(irq, cfg) \
|
||||
for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
|
||||
for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
|
||||
|
||||
DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work);
|
||||
|
||||
struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
static struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
{
|
||||
if (irq < nr_irqs)
|
||||
return &irq_cfgx[irq];
|
||||
|
||||
return NULL;
|
||||
return irq < nr_irqs ? irq_cfgx + irq : NULL;
|
||||
}
|
||||
struct irq_cfg *irq_cfg_alloc(unsigned int irq)
|
||||
|
||||
static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
|
||||
{
|
||||
return irq_cfg(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Rough estimation of how many shared IRQs there are, can be changed
|
||||
* anytime.
|
||||
*/
|
||||
#define MAX_PLUS_SHARED_IRQS NR_IRQS
|
||||
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
|
||||
|
||||
/*
|
||||
* This is performance-critical, we want to do it O(1)
|
||||
*
|
||||
|
@ -193,59 +170,29 @@ struct irq_pin_list {
|
|||
struct irq_pin_list *next;
|
||||
};
|
||||
|
||||
static struct irq_pin_list *irq_2_pin_head;
|
||||
/* fill one page ? */
|
||||
static int nr_irq_2_pin = 0x100;
|
||||
static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
|
||||
static struct irq_pin_list *irq_2_pin_ptr;
|
||||
static void __init irq_2_pin_init_work(void *data)
|
||||
|
||||
static void __init irq_2_pin_init(void)
|
||||
{
|
||||
struct dyn_array *da = data;
|
||||
struct irq_pin_list *pin;
|
||||
struct irq_pin_list *pin = irq_2_pin_head;
|
||||
int i;
|
||||
|
||||
pin = *da->name;
|
||||
|
||||
for (i = 1; i < *da->nr; i++)
|
||||
for (i = 1; i < PIN_MAP_SIZE; i++)
|
||||
pin[i-1].next = &pin[i];
|
||||
|
||||
irq_2_pin_ptr = &pin[0];
|
||||
}
|
||||
DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
|
||||
|
||||
static struct irq_pin_list *get_one_free_irq_2_pin(void)
|
||||
{
|
||||
struct irq_pin_list *pin;
|
||||
int i;
|
||||
|
||||
pin = irq_2_pin_ptr;
|
||||
|
||||
if (pin) {
|
||||
irq_2_pin_ptr = pin->next;
|
||||
pin->next = NULL;
|
||||
return pin;
|
||||
}
|
||||
|
||||
/*
|
||||
* we run out of pre-allocate ones, allocate more
|
||||
*/
|
||||
printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
|
||||
|
||||
if (after_bootmem)
|
||||
pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
|
||||
GFP_ATOMIC);
|
||||
else
|
||||
pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
|
||||
nr_irq_2_pin, PAGE_SIZE, 0);
|
||||
struct irq_pin_list *pin = irq_2_pin_ptr;
|
||||
|
||||
if (!pin)
|
||||
panic("can not get more irq_2_pin\n");
|
||||
|
||||
for (i = 1; i < nr_irq_2_pin; i++)
|
||||
pin[i-1].next = &pin[i];
|
||||
|
||||
irq_2_pin_ptr = pin->next;
|
||||
pin->next = NULL;
|
||||
|
||||
return pin;
|
||||
}
|
||||
|
||||
|
@ -284,8 +231,9 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
|
|||
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
|
||||
{
|
||||
struct io_apic __iomem *io_apic = io_apic_base(apic);
|
||||
if (sis_apic_bug)
|
||||
writel(reg, &io_apic->index);
|
||||
|
||||
if (sis_apic_bug)
|
||||
writel(reg, &io_apic->index);
|
||||
writel(value, &io_apic->data);
|
||||
}
|
||||
|
||||
|
@ -1044,11 +992,11 @@ static int pin_2_irq(int idx, int apic, int pin)
|
|||
while (i < apic)
|
||||
irq += nr_ioapic_registers[i++];
|
||||
irq += pin;
|
||||
/*
|
||||
/*
|
||||
* For MPS mode, so far only needed by ES7000 platform
|
||||
*/
|
||||
if (ioapic_renumber_irq)
|
||||
irq = ioapic_renumber_irq(apic, irq);
|
||||
if (ioapic_renumber_irq)
|
||||
irq = ioapic_renumber_irq(apic, irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -1232,19 +1180,19 @@ static struct irq_chip ir_ioapic_chip;
|
|||
#ifdef CONFIG_X86_32
|
||||
static inline int IO_APIC_irq_trigger(int irq)
|
||||
{
|
||||
int apic, idx, pin;
|
||||
int apic, idx, pin;
|
||||
|
||||
for (apic = 0; apic < nr_ioapics; apic++) {
|
||||
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
|
||||
idx = find_irq_entry(apic, pin, mp_INT);
|
||||
if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
|
||||
return irq_trigger(idx);
|
||||
}
|
||||
}
|
||||
/*
|
||||
for (apic = 0; apic < nr_ioapics; apic++) {
|
||||
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
|
||||
idx = find_irq_entry(apic, pin, mp_INT);
|
||||
if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
|
||||
return irq_trigger(idx);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* nonexistent IRQs are edge default
|
||||
*/
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int IO_APIC_irq_trigger(int irq)
|
||||
|
@ -1509,8 +1457,8 @@ __apicdebuginit(void) print_IO_APIC(void)
|
|||
reg_01.raw = io_apic_read(apic, 1);
|
||||
if (reg_01.bits.version >= 0x10)
|
||||
reg_02.raw = io_apic_read(apic, 2);
|
||||
if (reg_01.bits.version >= 0x20)
|
||||
reg_03.raw = io_apic_read(apic, 3);
|
||||
if (reg_01.bits.version >= 0x20)
|
||||
reg_03.raw = io_apic_read(apic, 3);
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
|
||||
printk("\n");
|
||||
|
@ -2089,9 +2037,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
|
|||
#else
|
||||
static int ioapic_retrigger_irq(unsigned int irq)
|
||||
{
|
||||
send_IPI_self(irq_cfg(irq)->vector);
|
||||
send_IPI_self(irq_cfg(irq)->vector);
|
||||
|
||||
return 1;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2189,7 +2137,7 @@ static int migrate_irq_remapped_level(int irq)
|
|||
|
||||
if (io_apic_level_ack_pending(irq)) {
|
||||
/*
|
||||
* Interrupt in progress. Migrating irq now will change the
|
||||
* Interrupt in progress. Migrating irq now will change the
|
||||
* vector information in the IO-APIC RTE and that will confuse
|
||||
* the EOI broadcast performed by cpu.
|
||||
* So, delay the irq migration to the next instance.
|
||||
|
@ -2426,28 +2374,28 @@ static void ack_apic_level(unsigned int irq)
|
|||
}
|
||||
|
||||
static struct irq_chip ioapic_chip __read_mostly = {
|
||||
.name = "IO-APIC",
|
||||
.startup = startup_ioapic_irq,
|
||||
.mask = mask_IO_APIC_irq,
|
||||
.unmask = unmask_IO_APIC_irq,
|
||||
.ack = ack_apic_edge,
|
||||
.eoi = ack_apic_level,
|
||||
.name = "IO-APIC",
|
||||
.startup = startup_ioapic_irq,
|
||||
.mask = mask_IO_APIC_irq,
|
||||
.unmask = unmask_IO_APIC_irq,
|
||||
.ack = ack_apic_edge,
|
||||
.eoi = ack_apic_level,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = set_ioapic_affinity_irq,
|
||||
.set_affinity = set_ioapic_affinity_irq,
|
||||
#endif
|
||||
.retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
static struct irq_chip ir_ioapic_chip __read_mostly = {
|
||||
.name = "IR-IO-APIC",
|
||||
.startup = startup_ioapic_irq,
|
||||
.mask = mask_IO_APIC_irq,
|
||||
.unmask = unmask_IO_APIC_irq,
|
||||
.ack = ack_x2apic_edge,
|
||||
.eoi = ack_x2apic_level,
|
||||
.name = "IR-IO-APIC",
|
||||
.startup = startup_ioapic_irq,
|
||||
.mask = mask_IO_APIC_irq,
|
||||
.unmask = unmask_IO_APIC_irq,
|
||||
.ack = ack_x2apic_edge,
|
||||
.eoi = ack_x2apic_level,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = set_ir_ioapic_affinity_irq,
|
||||
.set_affinity = set_ir_ioapic_affinity_irq,
|
||||
#endif
|
||||
.retrigger = ioapic_retrigger_irq,
|
||||
};
|
||||
|
@ -2636,8 +2584,8 @@ static inline void __init check_timer(void)
|
|||
|
||||
local_irq_save(flags);
|
||||
|
||||
ver = apic_read(APIC_LVR);
|
||||
ver = GET_APIC_VERSION(ver);
|
||||
ver = apic_read(APIC_LVR);
|
||||
ver = GET_APIC_VERSION(ver);
|
||||
|
||||
/*
|
||||
* get/set the timer IRQ vector:
|
||||
|
@ -2822,12 +2770,12 @@ void __init setup_IO_APIC(void)
|
|||
io_apic_irqs = ~PIC_IRQS;
|
||||
|
||||
apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
|
||||
/*
|
||||
/*
|
||||
* Set up IO-APIC IRQ routing.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
if (!acpi_ioapic)
|
||||
setup_ioapic_ids_from_mpc();
|
||||
if (!acpi_ioapic)
|
||||
setup_ioapic_ids_from_mpc();
|
||||
#endif
|
||||
sync_Arb_IDs();
|
||||
setup_IO_APIC_irqs();
|
||||
|
@ -2842,9 +2790,9 @@ void __init setup_IO_APIC(void)
|
|||
|
||||
static int __init io_apic_bug_finalize(void)
|
||||
{
|
||||
if (sis_apic_bug == -1)
|
||||
sis_apic_bug = 0;
|
||||
return 0;
|
||||
if (sis_apic_bug == -1)
|
||||
sis_apic_bug = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(io_apic_bug_finalize);
|
||||
|
@ -3199,7 +3147,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
|
|||
if (index < 0) {
|
||||
printk(KERN_ERR
|
||||
"Unable to allocate %d IRTE for PCI %s\n", nvec,
|
||||
pci_name(dev));
|
||||
pci_name(dev));
|
||||
return -ENOSPC;
|
||||
}
|
||||
return index;
|
||||
|
@ -3885,23 +3833,24 @@ static struct resource * __init ioapic_setup_resources(void)
|
|||
void __init ioapic_init_mappings(void)
|
||||
{
|
||||
unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
|
||||
int i;
|
||||
struct resource *ioapic_res;
|
||||
int i;
|
||||
|
||||
irq_2_pin_init();
|
||||
ioapic_res = ioapic_setup_resources();
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
if (smp_found_config) {
|
||||
ioapic_phys = mp_ioapics[i].mp_apicaddr;
|
||||
#ifdef CONFIG_X86_32
|
||||
if (!ioapic_phys) {
|
||||
printk(KERN_ERR
|
||||
"WARNING: bogus zero IO-APIC "
|
||||
"address found in MPTABLE, "
|
||||
"disabling IO/APIC support!\n");
|
||||
smp_found_config = 0;
|
||||
skip_ioapic_setup = 1;
|
||||
goto fake_ioapic_page;
|
||||
}
|
||||
if (!ioapic_phys) {
|
||||
printk(KERN_ERR
|
||||
"WARNING: bogus zero IO-APIC "
|
||||
"address found in MPTABLE, "
|
||||
"disabling IO/APIC support!\n");
|
||||
smp_found_config = 0;
|
||||
skip_ioapic_setup = 1;
|
||||
goto fake_ioapic_page;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -140,7 +140,7 @@ static void __init setup_cpu_pda_map(void)
|
|||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
ssize_t size, old_size, da_size;
|
||||
ssize_t size, old_size;
|
||||
char *ptr;
|
||||
int cpu;
|
||||
unsigned long align = 1;
|
||||
|
@ -150,9 +150,8 @@ void __init setup_per_cpu_areas(void)
|
|||
|
||||
/* Copy section for each CPU (we discard the original) */
|
||||
old_size = PERCPU_ENOUGH_ROOM;
|
||||
da_size = per_cpu_dyn_array_size(&align);
|
||||
align = max_t(unsigned long, PAGE_SIZE, align);
|
||||
size = roundup(old_size + da_size, align);
|
||||
size = roundup(old_size, align);
|
||||
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
|
||||
size);
|
||||
|
||||
|
@ -182,9 +181,6 @@ void __init setup_per_cpu_areas(void)
|
|||
#endif
|
||||
per_cpu_offset(cpu) = ptr - __per_cpu_start;
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
|
||||
per_cpu_alloc_dyn_array(cpu, ptr + old_size);
|
||||
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
|
||||
|
|
|
@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
|
|||
/*
|
||||
* handle this 'virtual interrupt' as a Cobalt one now.
|
||||
*/
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
kstat_incr_irqs_this_cpu(realirq, desc);
|
||||
|
||||
if (likely(desc->action != NULL))
|
||||
handle_IRQ_event(realirq, desc->action);
|
||||
|
|
|
@ -145,7 +145,6 @@ SECTIONS
|
|||
*(.x86_cpu_dev.init)
|
||||
__x86_cpu_dev_end = .;
|
||||
}
|
||||
DYN_ARRAY_INIT(8)
|
||||
SECURITY_INIT
|
||||
. = ALIGN(4);
|
||||
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
|
||||
|
|
|
@ -174,8 +174,6 @@ SECTIONS
|
|||
}
|
||||
__x86_cpu_dev_end = .;
|
||||
|
||||
DYN_ARRAY_INIT(8)
|
||||
|
||||
SECURITY_INIT
|
||||
|
||||
. = ALIGN(8);
|
||||
|
|
|
@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
|
|||
ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
|
||||
} while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
|
||||
|
||||
kstat_irqs_this_cpu(irq_to_desc(irq))++;
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
|
||||
out:
|
||||
raw_local_irq_restore(flags);
|
||||
|
|
|
@ -558,12 +558,7 @@ struct timer_rand_state {
|
|||
unsigned dont_count_entropy:1;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
static struct timer_rand_state **irq_timer_state;
|
||||
DEFINE_DYN_ARRAY(irq_timer_state, sizeof(struct timer_rand_state *), nr_irqs, PAGE_SIZE, NULL);
|
||||
#else
|
||||
static struct timer_rand_state *irq_timer_state[NR_IRQS];
|
||||
#endif
|
||||
|
||||
static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
|
||||
{
|
||||
|
|
|
@ -19,20 +19,13 @@ struct irq_2_iommu {
|
|||
u8 irte_mask;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
static struct irq_2_iommu *irq_2_iommuX;
|
||||
DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
|
||||
#else
|
||||
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
|
||||
#endif
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
if (irq < nr_irqs)
|
||||
return &irq_2_iommuX[irq];
|
||||
|
||||
return NULL;
|
||||
return (irq < nr_irqs) ?: irq_2_iommuX + irq : NULL;
|
||||
}
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
|
||||
{
|
||||
return irq_2_iommu(irq);
|
||||
|
|
|
@ -210,19 +210,6 @@
|
|||
* All archs are supposed to use RO_DATA() */
|
||||
#define RODATA RO_DATA(4096)
|
||||
|
||||
#define DYN_ARRAY_INIT(align) \
|
||||
. = ALIGN((align)); \
|
||||
.dyn_array.init : AT(ADDR(.dyn_array.init) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__dyn_array_start) = .; \
|
||||
*(.dyn_array.init) \
|
||||
VMLINUX_SYMBOL(__dyn_array_end) = .; \
|
||||
} \
|
||||
. = ALIGN((align)); \
|
||||
.per_cpu_dyn_array.init : AT(ADDR(.per_cpu_dyn_array.init) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__per_cpu_dyn_array_start) = .; \
|
||||
*(.per_cpu_dyn_array.init) \
|
||||
VMLINUX_SYMBOL(__per_cpu_dyn_array_end) = .; \
|
||||
}
|
||||
#define SECURITY_INIT \
|
||||
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
|
||||
VMLINUX_SYMBOL(__security_initcall_start) = .; \
|
||||
|
|
|
@ -247,49 +247,6 @@ struct obs_kernel_param {
|
|||
/* Relies on boot_command_line being set */
|
||||
void __init parse_early_param(void);
|
||||
|
||||
struct dyn_array {
|
||||
void **name;
|
||||
unsigned long size;
|
||||
unsigned int *nr;
|
||||
unsigned long align;
|
||||
void (*init_work)(void *);
|
||||
};
|
||||
extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[];
|
||||
extern struct dyn_array *__per_cpu_dyn_array_start[], *__per_cpu_dyn_array_end[];
|
||||
|
||||
#define DEFINE_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
|
||||
static struct dyn_array __dyn_array_##nameX __initdata = \
|
||||
{ .name = (void **)&(nameX),\
|
||||
.size = sizeX,\
|
||||
.nr = &(nrX),\
|
||||
.align = alignX,\
|
||||
.init_work = init_workX,\
|
||||
}; \
|
||||
static struct dyn_array *__dyn_array_ptr_##nameX __used \
|
||||
__attribute__((__section__(".dyn_array.init"))) = \
|
||||
&__dyn_array_##nameX
|
||||
|
||||
#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
|
||||
DEFINE_DYN_ARRAY_ADDR(nameX, nameX, sizeX, nrX, alignX, init_workX)
|
||||
|
||||
#define DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
|
||||
static struct dyn_array __per_cpu_dyn_array_##nameX __initdata = \
|
||||
{ .name = (void **)&(addrX),\
|
||||
.size = sizeX,\
|
||||
.nr = &(nrX),\
|
||||
.align = alignX,\
|
||||
.init_work = init_workX,\
|
||||
}; \
|
||||
static struct dyn_array *__per_cpu_dyn_array_ptr_##nameX __used \
|
||||
__attribute__((__section__(".per_cpu_dyn_array.init"))) = \
|
||||
&__per_cpu_dyn_array_##nameX
|
||||
|
||||
#define DEFINE_PER_CPU_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
|
||||
DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, nameX, nrX, alignX, init_workX)
|
||||
|
||||
extern void pre_alloc_dyn_array(void);
|
||||
extern unsigned long per_cpu_dyn_array_size(unsigned long *align);
|
||||
extern void per_cpu_alloc_dyn_array(int cpu, char *ptr);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/**
|
||||
|
|
|
@ -139,8 +139,6 @@ struct irq_chip {
|
|||
const char *typename;
|
||||
};
|
||||
|
||||
struct timer_rand_state;
|
||||
struct irq_2_iommu;
|
||||
/**
|
||||
* struct irq_desc - interrupt descriptor
|
||||
*
|
||||
|
@ -167,9 +165,6 @@ struct irq_2_iommu;
|
|||
*/
|
||||
struct irq_desc {
|
||||
unsigned int irq;
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
unsigned int *kstat_irqs;
|
||||
#endif
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irq_chip *chip;
|
||||
struct msi_desc *msi_desc;
|
||||
|
@ -198,23 +193,13 @@ struct irq_desc {
|
|||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
/* could be removed if we get rid of all irq_desc reference */
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#else
|
||||
extern struct irq_desc *irq_desc;
|
||||
#endif
|
||||
|
||||
static inline struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < nr_irqs) ? irq_desc + irq : NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
#define kstat_irqs_this_cpu(DESC) \
|
||||
((DESC)->kstat_irqs[smp_processor_id()])
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Migration helpers for obsolete names, they will go away:
|
||||
*/
|
||||
|
|
|
@ -28,9 +28,7 @@ struct cpu_usage_stat {
|
|||
|
||||
struct kernel_stat {
|
||||
struct cpu_usage_stat cpustat;
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
unsigned int irqs[NR_IRQS];
|
||||
#endif
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
||||
|
@ -41,20 +39,18 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|||
|
||||
extern unsigned long long nr_context_switches(void);
|
||||
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
#define kstat_irqs_this_cpu(irq) \
|
||||
(kstat_this_cpu.irqs[irq])
|
||||
#endif
|
||||
struct irq_desc;
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
{
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
return kstat_cpu(cpu).irqs[irq];
|
||||
}
|
||||
#else
|
||||
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Number of interrupts per specific IRQ source, since bootup
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
obj-y := main.o dyn_array.o version.o mounts.o
|
||||
obj-y := main.o version.o mounts.o
|
||||
ifneq ($(CONFIG_BLK_DEV_INITRD),y)
|
||||
obj-y += noinitramfs.o
|
||||
else
|
||||
|
|
120
init/dyn_array.c
120
init/dyn_array.c
|
@ -1,120 +0,0 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
void __init pre_alloc_dyn_array(void)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
unsigned long total_size = 0, size, phys;
|
||||
unsigned long max_align = 1;
|
||||
struct dyn_array **daa;
|
||||
char *ptr;
|
||||
|
||||
/* get the total size at first */
|
||||
for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) {
|
||||
struct dyn_array *da = *daa;
|
||||
|
||||
printk(KERN_DEBUG "dyn_array %pF size:%#lx nr:%d align:%#lx\n",
|
||||
da->name, da->size, *da->nr, da->align);
|
||||
size = da->size * (*da->nr);
|
||||
total_size += roundup(size, da->align);
|
||||
if (da->align > max_align)
|
||||
max_align = da->align;
|
||||
}
|
||||
if (total_size)
|
||||
printk(KERN_DEBUG "dyn_array total_size: %#lx\n",
|
||||
total_size);
|
||||
else
|
||||
return;
|
||||
|
||||
/* allocate them all together */
|
||||
max_align = max_t(unsigned long, max_align, PAGE_SIZE);
|
||||
ptr = __alloc_bootmem(total_size, max_align, 0);
|
||||
phys = virt_to_phys(ptr);
|
||||
|
||||
for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) {
|
||||
struct dyn_array *da = *daa;
|
||||
|
||||
size = da->size * (*da->nr);
|
||||
phys = roundup(phys, da->align);
|
||||
printk(KERN_DEBUG "dyn_array %pF ==> [%#lx - %#lx]\n",
|
||||
da->name, phys, phys + size);
|
||||
*da->name = phys_to_virt(phys);
|
||||
|
||||
phys += size;
|
||||
|
||||
if (da->init_work)
|
||||
da->init_work(da);
|
||||
}
|
||||
#else
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_desc[i].irq = i;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned long __init per_cpu_dyn_array_size(unsigned long *align)
|
||||
{
|
||||
unsigned long total_size = 0;
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
unsigned long size;
|
||||
struct dyn_array **daa;
|
||||
unsigned max_align = 1;
|
||||
|
||||
for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
|
||||
struct dyn_array *da = *daa;
|
||||
|
||||
printk(KERN_DEBUG "per_cpu_dyn_array %pF size:%#lx nr:%d align:%#lx\n",
|
||||
da->name, da->size, *da->nr, da->align);
|
||||
size = da->size * (*da->nr);
|
||||
total_size += roundup(size, da->align);
|
||||
if (da->align > max_align)
|
||||
max_align = da->align;
|
||||
}
|
||||
if (total_size) {
|
||||
printk(KERN_DEBUG "per_cpu_dyn_array total_size: %#lx\n",
|
||||
total_size);
|
||||
*align = max_align;
|
||||
}
|
||||
#endif
|
||||
return total_size;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __init per_cpu_alloc_dyn_array(int cpu, char *ptr)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
unsigned long size, phys;
|
||||
struct dyn_array **daa;
|
||||
unsigned long addr;
|
||||
void **array;
|
||||
|
||||
phys = virt_to_phys(ptr);
|
||||
for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
|
||||
struct dyn_array *da = *daa;
|
||||
|
||||
size = da->size * (*da->nr);
|
||||
phys = roundup(phys, da->align);
|
||||
printk(KERN_DEBUG "per_cpu_dyn_array %pF ==> [%#lx - %#lx]\n",
|
||||
da->name, phys, phys + size);
|
||||
|
||||
addr = (unsigned long)da->name;
|
||||
addr += per_cpu_offset(cpu);
|
||||
array = (void **)addr;
|
||||
*array = phys_to_virt(phys);
|
||||
*da->name = *array; /* so init_work could use it directly */
|
||||
|
||||
phys += size;
|
||||
|
||||
if (da->init_work)
|
||||
da->init_work(da);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
11
init/main.c
11
init/main.c
|
@ -391,23 +391,17 @@ EXPORT_SYMBOL(__per_cpu_offset);
|
|||
|
||||
static void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
unsigned long size, i, old_size;
|
||||
unsigned long size, i;
|
||||
char *ptr;
|
||||
unsigned long nr_possible_cpus = num_possible_cpus();
|
||||
unsigned long align = 1;
|
||||
unsigned da_size;
|
||||
|
||||
/* Copy section for each CPU (we discard the original) */
|
||||
old_size = PERCPU_ENOUGH_ROOM;
|
||||
da_size = per_cpu_dyn_array_size(&align);
|
||||
align = max_t(unsigned long, PAGE_SIZE, align);
|
||||
size = ALIGN(old_size + da_size, align);
|
||||
size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
|
||||
ptr = alloc_bootmem_pages(size * nr_possible_cpus);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
__per_cpu_offset[i] = ptr - __per_cpu_start;
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
per_cpu_alloc_dyn_array(i, ptr + old_size);
|
||||
ptr += size;
|
||||
}
|
||||
}
|
||||
|
@ -573,7 +567,6 @@ asmlinkage void __init start_kernel(void)
|
|||
printk(KERN_NOTICE);
|
||||
printk(linux_banner);
|
||||
setup_arch(&command_line);
|
||||
pre_alloc_dyn_array();
|
||||
mm_init_owner(&init_mm, &init_task);
|
||||
setup_command_line(command_line);
|
||||
unwind_setup();
|
||||
|
|
|
@ -326,11 +326,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
|||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
goto out_unlock;
|
||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
action = desc->action;
|
||||
if (unlikely(!action || (desc->status & IRQ_DISABLED)))
|
||||
|
@ -371,11 +367,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
|||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
goto out_unlock;
|
||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/*
|
||||
* If its disabled or no action available
|
||||
|
@ -422,11 +414,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
|||
goto out;
|
||||
|
||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/*
|
||||
* If its disabled or no action available
|
||||
|
@ -490,11 +478,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|||
mask_ack_irq(desc, irq);
|
||||
goto out_unlock;
|
||||
}
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
desc->chip->ack(irq);
|
||||
|
@ -549,11 +533,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
|||
{
|
||||
irqreturn_t action_ret;
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
|
|
|
@ -18,11 +18,6 @@
|
|||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
/**
|
||||
* handle_bad_irq - handle spurious and unhandled irqs
|
||||
* @irq: the interrupt number
|
||||
|
@ -30,15 +25,10 @@ static struct lock_class_key irq_desc_lock_class;
|
|||
*
|
||||
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
|
||||
*/
|
||||
void
|
||||
handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
||||
void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
print_irq_desc(irq, desc);
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
ack_bad_irq(irq);
|
||||
}
|
||||
|
||||
|
@ -59,80 +49,6 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
|||
int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1U,
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
static void init_one_irq_desc(struct irq_desc *desc)
|
||||
{
|
||||
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
}
|
||||
|
||||
extern int after_bootmem;
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
|
||||
static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
|
||||
{
|
||||
unsigned long bytes, total_bytes;
|
||||
char *ptr;
|
||||
int i;
|
||||
unsigned long phys;
|
||||
|
||||
/* Compute how many bytes we need per irq and allocate them */
|
||||
bytes = nr * sizeof(unsigned int);
|
||||
total_bytes = bytes * nr_desc;
|
||||
if (after_bootmem)
|
||||
ptr = kzalloc(total_bytes, GFP_ATOMIC);
|
||||
else
|
||||
ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
|
||||
|
||||
if (!ptr)
|
||||
panic(" can not allocate kstat_irqs\n");
|
||||
|
||||
phys = __pa(ptr);
|
||||
printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
|
||||
|
||||
for (i = 0; i < nr_desc; i++) {
|
||||
desc[i].kstat_irqs = (unsigned int *)ptr;
|
||||
ptr += bytes;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init init_work(void *data)
|
||||
{
|
||||
struct dyn_array *da = data;
|
||||
int i;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = *da->name;
|
||||
|
||||
for (i = 0; i < *da->nr; i++) {
|
||||
init_one_irq_desc(&desc[i]);
|
||||
desc[i].irq = i;
|
||||
}
|
||||
|
||||
/* init kstat_irqs, nr_cpu_ids is ready already */
|
||||
init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
|
||||
}
|
||||
|
||||
struct irq_desc *irq_desc;
|
||||
DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
|
||||
|
||||
#else
|
||||
|
||||
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS-1] = {
|
||||
.status = IRQ_DISABLED,
|
||||
|
@ -146,8 +62,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
|||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* What should we do if we get a hw irq event on an illegal vector?
|
||||
* Each architecture has to answer this themself.
|
||||
|
@ -258,11 +172,8 @@ unsigned int __do_IRQ(unsigned int irq)
|
|||
struct irqaction *action;
|
||||
unsigned int status;
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
kstat_irqs_this_cpu(desc)++;
|
||||
#else
|
||||
kstat_irqs_this_cpu(irq)++;
|
||||
#endif
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
if (CHECK_IRQ_PER_CPU(desc->status)) {
|
||||
irqreturn_t action_ret;
|
||||
|
||||
|
@ -351,23 +262,16 @@ out:
|
|||
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
void early_init_irq_lock_class(void)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->kstat_irqs[cpu];
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(kstat_irqs_cpu);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче