arch/tile: bomb raw_local_irq_ to arch_local_irq_
This completes the tile migration to the new naming scheme for the architecture-specific irq management code. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
Родитель
38a6f42669
Коммит
5d966115de
|
@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
|
||||||
void early_panic(const char *fmt, ...)
|
void early_panic(const char *fmt, ...)
|
||||||
{
|
{
|
||||||
va_list ap;
|
va_list ap;
|
||||||
raw_local_irq_disable_all();
|
arch_local_irq_disable_all();
|
||||||
va_start(ap, fmt);
|
va_start(ap, fmt);
|
||||||
early_printk("Kernel panic - not syncing: ");
|
early_printk("Kernel panic - not syncing: ");
|
||||||
early_vprintk(fmt, ap);
|
early_vprintk(fmt, ap);
|
||||||
|
|
|
@ -151,12 +151,12 @@ enum direction_protect {
|
||||||
|
|
||||||
static void enable_firewall_interrupts(void)
|
static void enable_firewall_interrupts(void)
|
||||||
{
|
{
|
||||||
raw_local_irq_unmask_now(INT_UDN_FIREWALL);
|
arch_local_irq_unmask_now(INT_UDN_FIREWALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void disable_firewall_interrupts(void)
|
static void disable_firewall_interrupts(void)
|
||||||
{
|
{
|
||||||
raw_local_irq_mask_now(INT_UDN_FIREWALL);
|
arch_local_irq_mask_now(INT_UDN_FIREWALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set up hardwall on this cpu based on the passed hardwall_info. */
|
/* Set up hardwall on this cpu based on the passed hardwall_info. */
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
#define IS_HW_CLEARED 1
|
#define IS_HW_CLEARED 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The set of interrupts we enable for raw_local_irq_enable().
|
* The set of interrupts we enable for arch_local_irq_enable().
|
||||||
* This is initialized to have just a single interrupt that the kernel
|
* This is initialized to have just a single interrupt that the kernel
|
||||||
* doesn't actually use as a sentinel. During kernel init,
|
* doesn't actually use as a sentinel. During kernel init,
|
||||||
* interrupts are added as the kernel gets prepared to support them.
|
* interrupts are added as the kernel gets prepared to support them.
|
||||||
|
@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
|
||||||
/* Enable interrupt delivery. */
|
/* Enable interrupt delivery. */
|
||||||
unmask_irqs(~0UL);
|
unmask_irqs(~0UL);
|
||||||
#if CHIP_HAS_IPI()
|
#if CHIP_HAS_IPI()
|
||||||
raw_local_irq_unmask(INT_IPI_K);
|
arch_local_irq_unmask(INT_IPI_K);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
|
||||||
panic("hv_register_message_state: error %d", rc);
|
panic("hv_register_message_state: error %d", rc);
|
||||||
|
|
||||||
/* Make sure downcall interrupts will be enabled. */
|
/* Make sure downcall interrupts will be enabled. */
|
||||||
raw_local_irq_unmask(INT_INTCTRL_K);
|
arch_local_irq_unmask(INT_INTCTRL_K);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hv_message_intr(struct pt_regs *regs, int intnum)
|
void hv_message_intr(struct pt_regs *regs, int intnum)
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
void machine_halt(void)
|
void machine_halt(void)
|
||||||
{
|
{
|
||||||
warn_early_printk();
|
warn_early_printk();
|
||||||
raw_local_irq_disable_all();
|
arch_local_irq_disable_all();
|
||||||
smp_send_stop();
|
smp_send_stop();
|
||||||
hv_halt();
|
hv_halt();
|
||||||
}
|
}
|
||||||
|
@ -35,14 +35,14 @@ void machine_halt(void)
|
||||||
void machine_power_off(void)
|
void machine_power_off(void)
|
||||||
{
|
{
|
||||||
warn_early_printk();
|
warn_early_printk();
|
||||||
raw_local_irq_disable_all();
|
arch_local_irq_disable_all();
|
||||||
smp_send_stop();
|
smp_send_stop();
|
||||||
hv_power_off();
|
hv_power_off();
|
||||||
}
|
}
|
||||||
|
|
||||||
void machine_restart(char *cmd)
|
void machine_restart(char *cmd)
|
||||||
{
|
{
|
||||||
raw_local_irq_disable_all();
|
arch_local_irq_disable_all();
|
||||||
smp_send_stop();
|
smp_send_stop();
|
||||||
hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
|
hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
|
||||||
}
|
}
|
||||||
|
|
|
@ -868,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
|
||||||
|
|
||||||
/* Allow asynchronous TLB interrupts. */
|
/* Allow asynchronous TLB interrupts. */
|
||||||
#if CHIP_HAS_TILE_DMA()
|
#if CHIP_HAS_TILE_DMA()
|
||||||
raw_local_irq_unmask(INT_DMATLB_MISS);
|
arch_local_irq_unmask(INT_DMATLB_MISS);
|
||||||
raw_local_irq_unmask(INT_DMATLB_ACCESS);
|
arch_local_irq_unmask(INT_DMATLB_ACCESS);
|
||||||
#endif
|
#endif
|
||||||
#if CHIP_HAS_SN_PROC()
|
#if CHIP_HAS_SN_PROC()
|
||||||
raw_local_irq_unmask(INT_SNITLB_MISS);
|
arch_local_irq_unmask(INT_SNITLB_MISS);
|
||||||
#endif
|
#endif
|
||||||
#ifdef __tilegx__
|
#ifdef __tilegx__
|
||||||
raw_local_irq_unmask(INT_SINGLE_STEP_K);
|
arch_local_irq_unmask(INT_SINGLE_STEP_K);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
|
||||||
static void smp_stop_cpu_interrupt(void)
|
static void smp_stop_cpu_interrupt(void)
|
||||||
{
|
{
|
||||||
set_cpu_online(smp_processor_id(), 0);
|
set_cpu_online(smp_processor_id(), 0);
|
||||||
raw_local_irq_disable_all();
|
arch_local_irq_disable_all();
|
||||||
for (;;)
|
for (;;)
|
||||||
asm("nap");
|
asm("nap");
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
|
||||||
{
|
{
|
||||||
BUG_ON(ticks > MAX_TICK);
|
BUG_ON(ticks > MAX_TICK);
|
||||||
__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
|
__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
|
||||||
raw_local_irq_unmask_now(INT_TILE_TIMER);
|
arch_local_irq_unmask_now(INT_TILE_TIMER);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
|
||||||
static void tile_timer_set_mode(enum clock_event_mode mode,
|
static void tile_timer_set_mode(enum clock_event_mode mode,
|
||||||
struct clock_event_device *evt)
|
struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
raw_local_irq_mask_now(INT_TILE_TIMER);
|
arch_local_irq_mask_now(INT_TILE_TIMER);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
|
||||||
evt->cpumask = cpumask_of(smp_processor_id());
|
evt->cpumask = cpumask_of(smp_processor_id());
|
||||||
|
|
||||||
/* Start out with timer not firing. */
|
/* Start out with timer not firing. */
|
||||||
raw_local_irq_mask_now(INT_TILE_TIMER);
|
arch_local_irq_mask_now(INT_TILE_TIMER);
|
||||||
|
|
||||||
/* Register tile timer. */
|
/* Register tile timer. */
|
||||||
clockevents_register_device(evt);
|
clockevents_register_device(evt);
|
||||||
|
@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
|
||||||
* Mask the timer interrupt here, since we are a oneshot timer
|
* Mask the timer interrupt here, since we are a oneshot timer
|
||||||
* and there are now by definition no events pending.
|
* and there are now by definition no events pending.
|
||||||
*/
|
*/
|
||||||
raw_local_irq_mask(INT_TILE_TIMER);
|
arch_local_irq_mask(INT_TILE_TIMER);
|
||||||
|
|
||||||
/* Track time spent here in an interrupt context */
|
/* Track time spent here in an interrupt context */
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
|
Загрузка…
Ссылка в новой задаче