Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (32 commits) sh: intc: switch irq_desc iteration to new active IRQ iterator. sh: fix up cpu hotplug IRQ migration for irq_data changes. sh: oprofile: Make sure the backtrace op is available for timer-fallback. sh64: oprofile: Fix up kernel stack pointer size mismatch. sh: oprofile: Fix up and extend op_name_from_perf_id(). sh: lockless get_user_pages_fast() sh64: _PAGE_SPECIAL support. sound: sh: ctrl_in/outX to __raw_read/writeX conversion. sh: disable deprecated genirq support. sh: update show_interrupts() for irq_data chip lookup. sh: intc: irq_data conversion. sh64: irq_data conversion. sh64: update for IRQ flag handling naming changes. rtc: rtc-rs5c313: ctrl_in/outX to __raw_read/writeX conversion. sh: mach-se: irq_data conversion. input: hp680_ts_input: ctrl_in/outX to __raw_read/writeX conversion. input: jornada680_kbd: ctrl_in/outX to __raw_read/writeX conversion. sh: hd64461: irq_data conversion. sh: mach-x3proto: irq_data conversion. sh: mach-systemh: irq_data conversion. ...
This commit is contained in:
Коммит
3c37629578
|
@ -25,8 +25,11 @@ config SUPERH
|
|||
select HAVE_KERNEL_LZO
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_SPARSE_IRQ
|
||||
select RTC_LIB
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
help
|
||||
The SuperH is a RISC processor targeted for use in embedded systems
|
||||
and consumer electronics; it was also used in the Sega Dreamcast
|
||||
|
@ -49,6 +52,7 @@ config SUPERH32
|
|||
select HAVE_MIXED_BREAKPOINTS_REGS
|
||||
select PERF_EVENTS
|
||||
select ARCH_HIBERNATION_POSSIBLE if MMU
|
||||
select SPARSE_IRQ
|
||||
|
||||
config SUPERH64
|
||||
def_bool ARCH = "sh64"
|
||||
|
@ -78,19 +82,9 @@ config GENERIC_FIND_NEXT_BIT
|
|||
config GENERIC_HWEIGHT
|
||||
def_bool y
|
||||
|
||||
config GENERIC_HARDIRQS
|
||||
def_bool y
|
||||
|
||||
config GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
def_bool y
|
||||
|
||||
config IRQ_PER_CPU
|
||||
def_bool y
|
||||
|
||||
config SPARSE_IRQ
|
||||
def_bool y
|
||||
depends on SUPERH32
|
||||
|
||||
config GENERIC_GPIO
|
||||
def_bool n
|
||||
|
||||
|
|
|
@ -55,8 +55,9 @@ static struct irqaction cayman_action_pci2 = {
|
|||
.flags = IRQF_DISABLED,
|
||||
};
|
||||
|
||||
static void enable_cayman_irq(unsigned int irq)
|
||||
static void enable_cayman_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned long flags;
|
||||
unsigned long mask;
|
||||
unsigned int reg;
|
||||
|
@ -72,8 +73,9 @@ static void enable_cayman_irq(unsigned int irq)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void disable_cayman_irq(unsigned int irq)
|
||||
static void disable_cayman_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned long flags;
|
||||
unsigned long mask;
|
||||
unsigned int reg;
|
||||
|
@ -89,16 +91,10 @@ void disable_cayman_irq(unsigned int irq)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void ack_cayman_irq(unsigned int irq)
|
||||
{
|
||||
disable_cayman_irq(irq);
|
||||
}
|
||||
|
||||
struct irq_chip cayman_irq_type = {
|
||||
.name = "Cayman-IRQ",
|
||||
.unmask = enable_cayman_irq,
|
||||
.mask = disable_cayman_irq,
|
||||
.mask_ack = ack_cayman_irq,
|
||||
.irq_unmask = enable_cayman_irq,
|
||||
.irq_mask = disable_cayman_irq,
|
||||
};
|
||||
|
||||
int cayman_irq_demux(int evt)
|
||||
|
|
|
@ -60,8 +60,9 @@
|
|||
*/
|
||||
|
||||
/* Disable the hardware event by masking its bit in its EMR */
|
||||
static inline void disable_systemasic_irq(unsigned int irq)
|
||||
static inline void disable_systemasic_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
|
||||
__u32 mask;
|
||||
|
||||
|
@ -71,8 +72,9 @@ static inline void disable_systemasic_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
/* Enable the hardware event by setting its bit in its EMR */
|
||||
static inline void enable_systemasic_irq(unsigned int irq)
|
||||
static inline void enable_systemasic_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
|
||||
__u32 mask;
|
||||
|
||||
|
@ -82,18 +84,19 @@ static inline void enable_systemasic_irq(unsigned int irq)
|
|||
}
|
||||
|
||||
/* Acknowledge a hardware event by writing its bit back to its ESR */
|
||||
static void mask_ack_systemasic_irq(unsigned int irq)
|
||||
static void mask_ack_systemasic_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
__u32 esr = ESR_BASE + (LEVEL(irq) << 2);
|
||||
disable_systemasic_irq(irq);
|
||||
disable_systemasic_irq(data);
|
||||
outl((1 << EVENT_BIT(irq)), esr);
|
||||
}
|
||||
|
||||
struct irq_chip systemasic_int = {
|
||||
.name = "System ASIC",
|
||||
.mask = disable_systemasic_irq,
|
||||
.mask_ack = mask_ack_systemasic_irq,
|
||||
.unmask = enable_systemasic_irq,
|
||||
.irq_mask = disable_systemasic_irq,
|
||||
.irq_mask_ack = mask_ack_systemasic_irq,
|
||||
.irq_unmask = enable_systemasic_irq,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,25 +18,24 @@
|
|||
#include <linux/io.h>
|
||||
#include <mach-landisk/mach/iodata_landisk.h>
|
||||
|
||||
static void disable_landisk_irq(unsigned int irq)
|
||||
static void disable_landisk_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned char mask = 0xff ^ (0x01 << (irq - 5));
|
||||
unsigned char mask = 0xff ^ (0x01 << (data->irq - 5));
|
||||
|
||||
__raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK);
|
||||
}
|
||||
|
||||
static void enable_landisk_irq(unsigned int irq)
|
||||
static void enable_landisk_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned char value = (0x01 << (irq - 5));
|
||||
unsigned char value = (0x01 << (data->irq - 5));
|
||||
|
||||
__raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK);
|
||||
}
|
||||
|
||||
static struct irq_chip landisk_irq_chip __read_mostly = {
|
||||
.name = "LANDISK",
|
||||
.mask = disable_landisk_irq,
|
||||
.unmask = enable_landisk_irq,
|
||||
.mask_ack = disable_landisk_irq,
|
||||
.irq_mask = disable_landisk_irq,
|
||||
.irq_unmask = enable_landisk_irq,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -50,7 +49,7 @@ void __init init_landisk_IRQ(void)
|
|||
disable_irq_nosync(i);
|
||||
set_irq_chip_and_handler_name(i, &landisk_irq_chip,
|
||||
handle_level_irq, "level");
|
||||
enable_landisk_irq(i);
|
||||
enable_landisk_irq(irq_get_irq_data(i));
|
||||
}
|
||||
__raw_writeb(0x00, PA_PWRINT_CLR);
|
||||
}
|
||||
|
|
|
@ -65,19 +65,9 @@ static const struct {
|
|||
# error Inconsistancy in defining the IRQ# for primary IDE!
|
||||
#endif
|
||||
|
||||
static void enable_microdev_irq(unsigned int irq);
|
||||
static void disable_microdev_irq(unsigned int irq);
|
||||
static void mask_and_ack_microdev(unsigned int);
|
||||
|
||||
static struct irq_chip microdev_irq_type = {
|
||||
.name = "MicroDev-IRQ",
|
||||
.unmask = enable_microdev_irq,
|
||||
.mask = disable_microdev_irq,
|
||||
.ack = mask_and_ack_microdev,
|
||||
};
|
||||
|
||||
static void disable_microdev_irq(unsigned int irq)
|
||||
static void disable_microdev_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned int fpgaIrq;
|
||||
|
||||
if (irq >= NUM_EXTERNAL_IRQS)
|
||||
|
@ -91,8 +81,9 @@ static void disable_microdev_irq(unsigned int irq)
|
|||
__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTDSB_REG);
|
||||
}
|
||||
|
||||
static void enable_microdev_irq(unsigned int irq)
|
||||
static void enable_microdev_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned long priorityReg, priorities, pri;
|
||||
unsigned int fpgaIrq;
|
||||
|
||||
|
@ -116,17 +107,18 @@ static void enable_microdev_irq(unsigned int irq)
|
|||
__raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG);
|
||||
}
|
||||
|
||||
static struct irq_chip microdev_irq_type = {
|
||||
.name = "MicroDev-IRQ",
|
||||
.irq_unmask = enable_microdev_irq,
|
||||
.irq_mask = disable_microdev_irq,
|
||||
};
|
||||
|
||||
/* This function sets the desired irq handler to be a MicroDev type */
|
||||
static void __init make_microdev_irq(unsigned int irq)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
set_irq_chip_and_handler(irq, µdev_irq_type, handle_level_irq);
|
||||
disable_microdev_irq(irq);
|
||||
}
|
||||
|
||||
static void mask_and_ack_microdev(unsigned int irq)
|
||||
{
|
||||
disable_microdev_irq(irq);
|
||||
disable_microdev_irq(irq_get_irq_data(irq));
|
||||
}
|
||||
|
||||
extern void __init init_microdev_irq(void)
|
||||
|
|
|
@ -25,8 +25,9 @@
|
|||
#define INTC_IPR01 0xfffe0818
|
||||
#define INTC_ICR1 0xfffe0802
|
||||
|
||||
static void disable_se7206_irq(unsigned int irq)
|
||||
static void disable_se7206_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned short val;
|
||||
unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
|
||||
unsigned short msk0,msk1;
|
||||
|
@ -55,8 +56,9 @@ static void disable_se7206_irq(unsigned int irq)
|
|||
__raw_writew(msk1, INTMSK1);
|
||||
}
|
||||
|
||||
static void enable_se7206_irq(unsigned int irq)
|
||||
static void enable_se7206_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned short val;
|
||||
unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
|
||||
unsigned short msk0,msk1;
|
||||
|
@ -86,13 +88,14 @@ static void enable_se7206_irq(unsigned int irq)
|
|||
__raw_writew(msk1, INTMSK1);
|
||||
}
|
||||
|
||||
static void eoi_se7206_irq(unsigned int irq)
|
||||
static void eoi_se7206_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned short sts0,sts1;
|
||||
unsigned int irq = data->irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
|
||||
enable_se7206_irq(irq);
|
||||
enable_se7206_irq(data);
|
||||
/* FPGA isr clear */
|
||||
sts0 = __raw_readw(INTSTS0);
|
||||
sts1 = __raw_readw(INTSTS1);
|
||||
|
@ -115,10 +118,9 @@ static void eoi_se7206_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip se7206_irq_chip __read_mostly = {
|
||||
.name = "SE7206-FPGA",
|
||||
.mask = disable_se7206_irq,
|
||||
.unmask = enable_se7206_irq,
|
||||
.mask_ack = disable_se7206_irq,
|
||||
.eoi = eoi_se7206_irq,
|
||||
.irq_mask = disable_se7206_irq,
|
||||
.irq_unmask = enable_se7206_irq,
|
||||
.irq_eoi = eoi_se7206_irq,
|
||||
};
|
||||
|
||||
static void make_se7206_irq(unsigned int irq)
|
||||
|
@ -126,7 +128,7 @@ static void make_se7206_irq(unsigned int irq)
|
|||
disable_irq_nosync(irq);
|
||||
set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
|
||||
handle_level_irq, "level");
|
||||
disable_se7206_irq(irq);
|
||||
disable_se7206_irq(irq_get_irq_data(irq));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,23 +18,22 @@
|
|||
|
||||
unsigned int se7343_fpga_irq[SE7343_FPGA_IRQ_NR] = { 0, };
|
||||
|
||||
static void disable_se7343_irq(unsigned int irq)
|
||||
static void disable_se7343_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int bit = (unsigned int)get_irq_chip_data(irq);
|
||||
unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
|
||||
__raw_writew(__raw_readw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
|
||||
}
|
||||
|
||||
static void enable_se7343_irq(unsigned int irq)
|
||||
static void enable_se7343_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int bit = (unsigned int)get_irq_chip_data(irq);
|
||||
unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
|
||||
__raw_writew(__raw_readw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
|
||||
}
|
||||
|
||||
static struct irq_chip se7343_irq_chip __read_mostly = {
|
||||
.name = "SE7343-FPGA",
|
||||
.mask = disable_se7343_irq,
|
||||
.unmask = enable_se7343_irq,
|
||||
.mask_ack = disable_se7343_irq,
|
||||
.irq_mask = disable_se7343_irq,
|
||||
.irq_unmask = enable_se7343_irq,
|
||||
};
|
||||
|
||||
static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
|
|
|
@ -18,23 +18,22 @@
|
|||
|
||||
unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, };
|
||||
|
||||
static void disable_se7722_irq(unsigned int irq)
|
||||
static void disable_se7722_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int bit = (unsigned int)get_irq_chip_data(irq);
|
||||
unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
|
||||
__raw_writew(__raw_readw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
|
||||
}
|
||||
|
||||
static void enable_se7722_irq(unsigned int irq)
|
||||
static void enable_se7722_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int bit = (unsigned int)get_irq_chip_data(irq);
|
||||
unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
|
||||
__raw_writew(__raw_readw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
|
||||
}
|
||||
|
||||
static struct irq_chip se7722_irq_chip __read_mostly = {
|
||||
.name = "SE7722-FPGA",
|
||||
.mask = disable_se7722_irq,
|
||||
.unmask = enable_se7722_irq,
|
||||
.mask_ack = disable_se7722_irq,
|
||||
.irq_mask = disable_se7722_irq,
|
||||
.irq_unmask = enable_se7722_irq,
|
||||
};
|
||||
|
||||
static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
|
|
|
@ -68,15 +68,17 @@ static struct fpga_irq get_fpga_irq(unsigned int irq)
|
|||
return set;
|
||||
}
|
||||
|
||||
static void disable_se7724_irq(unsigned int irq)
|
||||
static void disable_se7724_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
|
||||
unsigned int bit = irq - set.base;
|
||||
__raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr);
|
||||
}
|
||||
|
||||
static void enable_se7724_irq(unsigned int irq)
|
||||
static void enable_se7724_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
struct fpga_irq set = get_fpga_irq(fpga2irq(irq));
|
||||
unsigned int bit = irq - set.base;
|
||||
__raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr);
|
||||
|
@ -84,9 +86,8 @@ static void enable_se7724_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip se7724_irq_chip __read_mostly = {
|
||||
.name = "SE7724-FPGA",
|
||||
.mask = disable_se7724_irq,
|
||||
.unmask = enable_se7724_irq,
|
||||
.mask_ack = disable_se7724_irq,
|
||||
.irq_mask = disable_se7724_irq,
|
||||
.irq_unmask = enable_se7724_irq,
|
||||
};
|
||||
|
||||
static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
|
|
|
@ -23,21 +23,8 @@
|
|||
static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
|
||||
static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
|
||||
|
||||
/* forward declaration */
|
||||
static void enable_systemh_irq(unsigned int irq);
|
||||
static void disable_systemh_irq(unsigned int irq);
|
||||
static void mask_and_ack_systemh(unsigned int);
|
||||
|
||||
static struct irq_chip systemh_irq_type = {
|
||||
.name = " SystemH Register",
|
||||
.unmask = enable_systemh_irq,
|
||||
.mask = disable_systemh_irq,
|
||||
.ack = mask_and_ack_systemh,
|
||||
};
|
||||
|
||||
static void disable_systemh_irq(unsigned int irq)
|
||||
static void disable_systemh_irq(struct irq_data *data)
|
||||
{
|
||||
if (systemh_irq_mask_register) {
|
||||
unsigned long val, mask = 0x01 << 1;
|
||||
|
||||
/* Clear the "irq"th bit in the mask and set it in the request */
|
||||
|
@ -48,29 +35,27 @@ static void disable_systemh_irq(unsigned int irq)
|
|||
val = __raw_readl((unsigned long)systemh_irq_request_register);
|
||||
val |= mask;
|
||||
__raw_writel(val, (unsigned long)systemh_irq_request_register);
|
||||
}
|
||||
}
|
||||
|
||||
static void enable_systemh_irq(unsigned int irq)
|
||||
static void enable_systemh_irq(struct irq_data *data)
|
||||
{
|
||||
if (systemh_irq_mask_register) {
|
||||
unsigned long val, mask = 0x01 << 1;
|
||||
|
||||
/* Set "irq"th bit in the mask register */
|
||||
val = __raw_readl((unsigned long)systemh_irq_mask_register);
|
||||
val |= mask;
|
||||
__raw_writel(val, (unsigned long)systemh_irq_mask_register);
|
||||
}
|
||||
}
|
||||
|
||||
static void mask_and_ack_systemh(unsigned int irq)
|
||||
{
|
||||
disable_systemh_irq(irq);
|
||||
}
|
||||
static struct irq_chip systemh_irq_type = {
|
||||
.name = "SystemH Register",
|
||||
.irq_unmask = enable_systemh_irq,
|
||||
.irq_mask = disable_systemh_irq,
|
||||
};
|
||||
|
||||
void make_systemh_irq(unsigned int irq)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
|
||||
disable_systemh_irq(irq);
|
||||
disable_systemh_irq(irq_get_irq_data(irq));
|
||||
}
|
||||
|
|
|
@ -54,18 +54,19 @@ static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
|
|||
|
||||
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = get_irq_desc_chip(desc);
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
unsigned long mask;
|
||||
int pin;
|
||||
|
||||
chip->mask_ack(irq);
|
||||
chip->irq_mask_ack(data);
|
||||
|
||||
mask = __raw_readw(KEYDETR);
|
||||
|
||||
for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS)
|
||||
generic_handle_irq(x3proto_gpio_to_irq(NULL, pin));
|
||||
|
||||
chip->unmask(irq);
|
||||
chip->irq_unmask(data);
|
||||
}
|
||||
|
||||
struct gpio_chip x3proto_gpio_chip = {
|
||||
|
|
|
@ -17,8 +17,9 @@
|
|||
/* This belongs in cpu specific */
|
||||
#define INTC_ICR1 0xA4140010UL
|
||||
|
||||
static void hd64461_mask_irq(unsigned int irq)
|
||||
static void hd64461_mask_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned short nimr;
|
||||
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
|
||||
|
||||
|
@ -27,8 +28,9 @@ static void hd64461_mask_irq(unsigned int irq)
|
|||
__raw_writew(nimr, HD64461_NIMR);
|
||||
}
|
||||
|
||||
static void hd64461_unmask_irq(unsigned int irq)
|
||||
static void hd64461_unmask_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned short nimr;
|
||||
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
|
||||
|
||||
|
@ -37,20 +39,21 @@ static void hd64461_unmask_irq(unsigned int irq)
|
|||
__raw_writew(nimr, HD64461_NIMR);
|
||||
}
|
||||
|
||||
static void hd64461_mask_and_ack_irq(unsigned int irq)
|
||||
static void hd64461_mask_and_ack_irq(struct irq_data *data)
|
||||
{
|
||||
hd64461_mask_irq(irq);
|
||||
hd64461_mask_irq(data);
|
||||
|
||||
#ifdef CONFIG_HD64461_ENABLER
|
||||
if (irq == HD64461_IRQBASE + 13)
|
||||
if (data->irq == HD64461_IRQBASE + 13)
|
||||
__raw_writeb(0x00, HD64461_PCC1CSCR);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct irq_chip hd64461_irq_chip = {
|
||||
.name = "HD64461-IRQ",
|
||||
.mask = hd64461_mask_irq,
|
||||
.mask_ack = hd64461_mask_and_ack_irq,
|
||||
.unmask = hd64461_unmask_irq,
|
||||
.irq_mask = hd64461_mask_irq,
|
||||
.irq_mask_ack = hd64461_mask_and_ack_irq,
|
||||
.irq_unmask = hd64461_unmask_irq,
|
||||
};
|
||||
|
||||
static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
|
|
|
@ -169,6 +169,8 @@ extern void page_table_range_init(unsigned long start, unsigned long end,
|
|||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
||||
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* __ASM_SH_PGTABLE_H */
|
||||
|
|
|
@ -378,8 +378,6 @@ PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
|
|||
PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
|
||||
PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL);
|
||||
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
/*
|
||||
* Macro and implementation to make a page protection as uncachable.
|
||||
*/
|
||||
|
|
|
@ -130,6 +130,7 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
|||
* anything above the PPN field.
|
||||
*/
|
||||
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
|
||||
#define _PAGE_SPECIAL _PAGE_EXT(0x002)
|
||||
|
||||
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
|
||||
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
|
||||
|
@ -173,7 +174,8 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
|||
/* Default flags for a User page */
|
||||
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
|
||||
|
||||
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SPECIAL)
|
||||
|
||||
/*
|
||||
* We have full permissions (Read/Write/Execute/Shared).
|
||||
|
@ -263,7 +265,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
|
|||
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
|
||||
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
|
||||
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
|
||||
static inline int pte_special(pte_t pte){ return 0; }
|
||||
static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
|
||||
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
|
||||
|
@ -272,8 +274,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) |
|
|||
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
|
||||
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
|
||||
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry.
|
||||
|
|
|
@ -89,6 +89,7 @@ struct sh_cpuinfo {
|
|||
struct task_struct *idle;
|
||||
#endif
|
||||
|
||||
unsigned int phys_bits;
|
||||
unsigned long flags;
|
||||
} __attribute__ ((aligned(L1_CACHE_BYTES)));
|
||||
|
||||
|
|
|
@ -340,6 +340,8 @@ asmlinkage void __cpuinit cpu_init(void)
|
|||
*/
|
||||
current_cpu_data.asid_cache = NO_CONTEXT;
|
||||
|
||||
current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
|
||||
|
||||
speculative_execution_init();
|
||||
expmask_init();
|
||||
|
||||
|
|
|
@ -51,16 +51,20 @@ static inline void set_interrupt_registers(int ip)
|
|||
: "t");
|
||||
}
|
||||
|
||||
static void mask_imask_irq(unsigned int irq)
|
||||
static void mask_imask_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
|
||||
clear_bit(irq, imask_mask);
|
||||
if (interrupt_priority < IMASK_PRIORITY - irq)
|
||||
interrupt_priority = IMASK_PRIORITY - irq;
|
||||
set_interrupt_registers(interrupt_priority);
|
||||
}
|
||||
|
||||
static void unmask_imask_irq(unsigned int irq)
|
||||
static void unmask_imask_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
|
||||
set_bit(irq, imask_mask);
|
||||
interrupt_priority = IMASK_PRIORITY -
|
||||
find_first_zero_bit(imask_mask, IMASK_PRIORITY);
|
||||
|
@ -69,9 +73,9 @@ static void unmask_imask_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip imask_irq_chip = {
|
||||
.name = "SR.IMASK",
|
||||
.mask = mask_imask_irq,
|
||||
.unmask = unmask_imask_irq,
|
||||
.mask_ack = mask_imask_irq,
|
||||
.irq_mask = mask_imask_irq,
|
||||
.irq_unmask = unmask_imask_irq,
|
||||
.irq_mask_ack = mask_imask_irq,
|
||||
};
|
||||
|
||||
void make_imask_irq(unsigned int irq)
|
||||
|
|
|
@ -76,39 +76,11 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
|
|||
};
|
||||
|
||||
static unsigned long intc_virt;
|
||||
|
||||
static unsigned int startup_intc_irq(unsigned int irq);
|
||||
static void shutdown_intc_irq(unsigned int irq);
|
||||
static void enable_intc_irq(unsigned int irq);
|
||||
static void disable_intc_irq(unsigned int irq);
|
||||
static void mask_and_ack_intc(unsigned int);
|
||||
static void end_intc_irq(unsigned int irq);
|
||||
|
||||
static struct irq_chip intc_irq_type = {
|
||||
.name = "INTC",
|
||||
.startup = startup_intc_irq,
|
||||
.shutdown = shutdown_intc_irq,
|
||||
.enable = enable_intc_irq,
|
||||
.disable = disable_intc_irq,
|
||||
.ack = mask_and_ack_intc,
|
||||
.end = end_intc_irq
|
||||
};
|
||||
|
||||
static int irlm; /* IRL mode */
|
||||
|
||||
static unsigned int startup_intc_irq(unsigned int irq)
|
||||
{
|
||||
enable_intc_irq(irq);
|
||||
return 0; /* never anything pending */
|
||||
}
|
||||
|
||||
static void shutdown_intc_irq(unsigned int irq)
|
||||
{
|
||||
disable_intc_irq(irq);
|
||||
}
|
||||
|
||||
static void enable_intc_irq(unsigned int irq)
|
||||
static void enable_intc_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned long reg;
|
||||
unsigned long bitmask;
|
||||
|
||||
|
@ -126,8 +98,9 @@ static void enable_intc_irq(unsigned int irq)
|
|||
__raw_writel(bitmask, reg);
|
||||
}
|
||||
|
||||
static void disable_intc_irq(unsigned int irq)
|
||||
static void disable_intc_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned long reg;
|
||||
unsigned long bitmask;
|
||||
|
||||
|
@ -142,15 +115,11 @@ static void disable_intc_irq(unsigned int irq)
|
|||
__raw_writel(bitmask, reg);
|
||||
}
|
||||
|
||||
static void mask_and_ack_intc(unsigned int irq)
|
||||
{
|
||||
disable_intc_irq(irq);
|
||||
}
|
||||
|
||||
static void end_intc_irq(unsigned int irq)
|
||||
{
|
||||
enable_intc_irq(irq);
|
||||
}
|
||||
static struct irq_chip intc_irq_type = {
|
||||
.name = "INTC",
|
||||
.irq_enable = enable_intc_irq,
|
||||
.irq_disable = disable_intc_irq,
|
||||
};
|
||||
|
||||
void __init plat_irq_setup(void)
|
||||
{
|
||||
|
|
|
@ -24,25 +24,25 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/topology.h>
|
||||
|
||||
static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
|
||||
static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
|
||||
{
|
||||
struct irq_chip *chip = get_irq_chip(irq);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
return container_of(chip, struct ipr_desc, chip);
|
||||
}
|
||||
|
||||
static void disable_ipr_irq(unsigned int irq)
|
||||
static void disable_ipr_irq(struct irq_data *data)
|
||||
{
|
||||
struct ipr_data *p = get_irq_chip_data(irq);
|
||||
unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
|
||||
struct ipr_data *p = irq_data_get_irq_chip_data(data);
|
||||
unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
|
||||
/* Set the priority in IPR to 0 */
|
||||
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
|
||||
(void)__raw_readw(addr); /* Read back to flush write posting */
|
||||
}
|
||||
|
||||
static void enable_ipr_irq(unsigned int irq)
|
||||
static void enable_ipr_irq(struct irq_data *data)
|
||||
{
|
||||
struct ipr_data *p = get_irq_chip_data(irq);
|
||||
unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
|
||||
struct ipr_data *p = irq_data_get_irq_chip_data(data);
|
||||
unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
|
||||
/* Set priority in IPR back to original value */
|
||||
__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
|
||||
}
|
||||
|
@ -56,19 +56,18 @@ void register_ipr_controller(struct ipr_desc *desc)
|
|||
{
|
||||
int i;
|
||||
|
||||
desc->chip.mask = disable_ipr_irq;
|
||||
desc->chip.unmask = enable_ipr_irq;
|
||||
desc->chip.mask_ack = disable_ipr_irq;
|
||||
desc->chip.irq_mask = disable_ipr_irq;
|
||||
desc->chip.irq_unmask = enable_ipr_irq;
|
||||
|
||||
for (i = 0; i < desc->nr_irqs; i++) {
|
||||
struct ipr_data *p = desc->ipr_data + i;
|
||||
struct irq_desc *irq_desc;
|
||||
int res;
|
||||
|
||||
BUG_ON(p->ipr_idx >= desc->nr_offsets);
|
||||
BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
|
||||
|
||||
irq_desc = irq_to_desc_alloc_node(p->irq, numa_node_id());
|
||||
if (unlikely(!irq_desc)) {
|
||||
res = irq_alloc_desc_at(p->irq, numa_node_id());
|
||||
if (unlikely(res != p->irq && res != -EEXIST)) {
|
||||
printk(KERN_INFO "can not get irq_desc for %d\n",
|
||||
p->irq);
|
||||
continue;
|
||||
|
@ -78,7 +77,7 @@ void register_ipr_controller(struct ipr_desc *desc)
|
|||
set_irq_chip_and_handler_name(p->irq, &desc->chip,
|
||||
handle_level_irq, "level");
|
||||
set_irq_chip_data(p->irq, p);
|
||||
disable_ipr_irq(p->irq);
|
||||
disable_ipr_irq(irq_get_irq_data(p->irq));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(register_ipr_controller);
|
||||
|
|
|
@ -225,7 +225,7 @@ static void sh7750_pmu_enable_all(void)
|
|||
}
|
||||
|
||||
static struct sh_pmu sh7750_pmu = {
|
||||
.name = "SH7750",
|
||||
.name = "sh7750",
|
||||
.num_events = 2,
|
||||
.event_map = sh7750_event_map,
|
||||
.max_events = ARRAY_SIZE(sh7750_general_events),
|
||||
|
|
|
@ -259,7 +259,7 @@ static void sh4a_pmu_enable_all(void)
|
|||
}
|
||||
|
||||
static struct sh_pmu sh4a_pmu = {
|
||||
.name = "SH-4A",
|
||||
.name = "sh4a",
|
||||
.num_events = 2,
|
||||
.event_map = sh4a_event_map,
|
||||
.max_events = ARRAY_SIZE(sh4a_general_events),
|
||||
|
|
|
@ -56,6 +56,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
int i = *(loff_t *)v, j, prec;
|
||||
struct irqaction *action;
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *data;
|
||||
struct irq_chip *chip;
|
||||
|
||||
if (i > nr_irqs)
|
||||
return 0;
|
||||
|
@ -77,6 +79,9 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
if (!desc)
|
||||
return 0;
|
||||
|
||||
data = irq_get_irq_data(i);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_irqs_cpu(i, j);
|
||||
|
@ -87,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%*d: ", prec, i);
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
seq_printf(p, " %14s", desc->chip->name);
|
||||
seq_printf(p, " %14s", chip->name);
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
|
@ -273,12 +278,6 @@ void __init init_IRQ(void)
|
|||
{
|
||||
plat_irq_setup();
|
||||
|
||||
/*
|
||||
* Pin any of the legacy IRQ vectors that haven't already been
|
||||
* grabbed by the platform
|
||||
*/
|
||||
reserve_irq_legacy();
|
||||
|
||||
/* Perform the machine specific initialisation */
|
||||
if (sh_mv.mv_init_irq)
|
||||
sh_mv.mv_init_irq();
|
||||
|
@ -297,13 +296,16 @@ int __init arch_probe_nr_irqs(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
|
||||
static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
|
||||
printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
|
||||
irq, desc->node, cpu);
|
||||
irq, data->node, cpu);
|
||||
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
desc->chip->set_affinity(irq, cpumask_of(cpu));
|
||||
chip->irq_set_affinity(data, cpumask_of(cpu), false);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
|
@ -314,24 +316,25 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
|
|||
*/
|
||||
void migrate_irqs(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq, cpu = smp_processor_id();
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
if (desc->node == cpu) {
|
||||
unsigned int newcpu = cpumask_any_and(desc->affinity,
|
||||
for_each_active_irq(irq) {
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
|
||||
if (data->node == cpu) {
|
||||
unsigned int newcpu = cpumask_any_and(data->affinity,
|
||||
cpu_online_mask);
|
||||
if (newcpu >= nr_cpu_ids) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
|
||||
irq, cpu);
|
||||
|
||||
cpumask_setall(desc->affinity);
|
||||
newcpu = cpumask_any_and(desc->affinity,
|
||||
cpumask_setall(data->affinity);
|
||||
newcpu = cpumask_any_and(data->affinity,
|
||||
cpu_online_mask);
|
||||
}
|
||||
|
||||
route_irq(desc, irq, newcpu);
|
||||
route_irq(data, irq, newcpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,17 +11,17 @@
|
|||
#include <linux/module.h>
|
||||
#include <cpu/registers.h>
|
||||
|
||||
void notrace raw_local_irq_restore(unsigned long flags)
|
||||
void notrace arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
unsigned long long __dummy;
|
||||
|
||||
if (flags == RAW_IRQ_DISABLED) {
|
||||
if (flags == ARCH_IRQ_DISABLED) {
|
||||
__asm__ __volatile__ (
|
||||
"getcon " __SR ", %0\n\t"
|
||||
"or %0, %1, %0\n\t"
|
||||
"putcon %0, " __SR "\n\t"
|
||||
: "=&r" (__dummy)
|
||||
: "r" (RAW_IRQ_DISABLED)
|
||||
: "r" (ARCH_IRQ_DISABLED)
|
||||
);
|
||||
} else {
|
||||
__asm__ __volatile__ (
|
||||
|
@ -29,13 +29,13 @@ void notrace raw_local_irq_restore(unsigned long flags)
|
|||
"and %0, %1, %0\n\t"
|
||||
"putcon %0, " __SR "\n\t"
|
||||
: "=&r" (__dummy)
|
||||
: "r" (~RAW_IRQ_DISABLED)
|
||||
: "r" (~ARCH_IRQ_DISABLED)
|
||||
);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(raw_local_irq_restore);
|
||||
EXPORT_SYMBOL(arch_local_irq_restore);
|
||||
|
||||
unsigned long notrace __raw_local_save_flags(void)
|
||||
unsigned long notrace arch_local_save_flags(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -43,9 +43,9 @@ unsigned long notrace __raw_local_save_flags(void)
|
|||
"getcon " __SR ", %0\n\t"
|
||||
"and %0, %1, %0"
|
||||
: "=&r" (flags)
|
||||
: "r" (RAW_IRQ_DISABLED)
|
||||
: "r" (ARCH_IRQ_DISABLED)
|
||||
);
|
||||
|
||||
return flags;
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_local_save_flags);
|
||||
EXPORT_SYMBOL(arch_local_save_flags);
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <asm/smp.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/sparsemem.h>
|
||||
|
||||
/*
|
||||
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
|
||||
|
@ -52,6 +53,7 @@ struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
|
|||
.type = CPU_SH_NONE,
|
||||
.family = CPU_FAMILY_UNKNOWN,
|
||||
.loops_per_jiffy = 10000000,
|
||||
.phys_bits = MAX_PHYSMEM_BITS,
|
||||
},
|
||||
};
|
||||
EXPORT_SYMBOL(cpu_data);
|
||||
|
@ -432,6 +434,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
if (c->flags & CPU_HAS_L2_CACHE)
|
||||
show_cacheinfo(m, "scache", c->scache);
|
||||
|
||||
seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
|
||||
|
||||
seq_printf(m, "bogomips\t: %lu.%02lu\n",
|
||||
c->loops_per_jiffy/(500000/HZ),
|
||||
(c->loops_per_jiffy/(5000/HZ)) % 100);
|
||||
|
|
|
@ -15,7 +15,7 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
|
|||
obj-y += $(cacheops-y)
|
||||
|
||||
mmu-y := nommu.o extable_32.o
|
||||
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
|
||||
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o gup.o \
|
||||
ioremap.o kmap.o pgtable.o tlbflush_$(BITS).o
|
||||
|
||||
obj-y += $(mmu-y)
|
||||
|
|
|
@ -0,0 +1,273 @@
|
|||
/*
|
||||
* Lockless get_user_pages_fast for SuperH
|
||||
*
|
||||
* Copyright (C) 2009 - 2010 Paul Mundt
|
||||
*
|
||||
* Cloned from the x86 and PowerPC versions, by:
|
||||
*
|
||||
* Copyright (C) 2008 Nick Piggin
|
||||
* Copyright (C) 2008 Novell Inc.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static inline pte_t gup_get_pte(pte_t *ptep)
|
||||
{
|
||||
#ifndef CONFIG_X2TLB
|
||||
return ACCESS_ONCE(*ptep);
|
||||
#else
|
||||
/*
|
||||
* With get_user_pages_fast, we walk down the pagetables without
|
||||
* taking any locks. For this we would like to load the pointers
|
||||
* atomically, but that is not possible with 64-bit PTEs. What
|
||||
* we do have is the guarantee that a pte will only either go
|
||||
* from not present to present, or present to not present or both
|
||||
* -- it will not switch to a completely different present page
|
||||
* without a TLB flush in between; something that we are blocking
|
||||
* by holding interrupts off.
|
||||
*
|
||||
* Setting ptes from not present to present goes:
|
||||
* ptep->pte_high = h;
|
||||
* smp_wmb();
|
||||
* ptep->pte_low = l;
|
||||
*
|
||||
* And present to not present goes:
|
||||
* ptep->pte_low = 0;
|
||||
* smp_wmb();
|
||||
* ptep->pte_high = 0;
|
||||
*
|
||||
* We must ensure here that the load of pte_low sees l iff pte_high
|
||||
* sees h. We load pte_high *after* loading pte_low, which ensures we
|
||||
* don't see an older value of pte_high. *Then* we recheck pte_low,
|
||||
* which ensures that we haven't picked up a changed pte high. We might
|
||||
* have got rubbish values from pte_low and pte_high, but we are
|
||||
* guaranteed that pte_low will not have the present bit set *unless*
|
||||
* it is 'l'. And get_user_pages_fast only operates on present ptes, so
|
||||
* we're safe.
|
||||
*
|
||||
* gup_get_pte should not be used or copied outside gup.c without being
|
||||
* very careful -- it does not atomically load the pte or anything that
|
||||
* is likely to be useful for you.
|
||||
*/
|
||||
pte_t pte;
|
||||
|
||||
retry:
|
||||
pte.pte_low = ptep->pte_low;
|
||||
smp_rmb();
|
||||
pte.pte_high = ptep->pte_high;
|
||||
smp_rmb();
|
||||
if (unlikely(pte.pte_low != ptep->pte_low))
|
||||
goto retry;
|
||||
|
||||
return pte;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* The performance critical leaf functions are made noinline otherwise gcc
|
||||
* inlines everything into a single function which results in too much
|
||||
* register pressure.
|
||||
*/
|
||||
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
u64 mask, result;
|
||||
pte_t *ptep;
|
||||
|
||||
#ifdef CONFIG_X2TLB
|
||||
result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
|
||||
if (write)
|
||||
result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
|
||||
#elif defined(CONFIG_SUPERH64)
|
||||
result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
|
||||
if (write)
|
||||
result |= _PAGE_WRITE;
|
||||
#else
|
||||
result = _PAGE_PRESENT | _PAGE_USER;
|
||||
if (write)
|
||||
result |= _PAGE_RW;
|
||||
#endif
|
||||
|
||||
mask = result | _PAGE_SPECIAL;
|
||||
|
||||
ptep = pte_offset_map(&pmd, addr);
|
||||
do {
|
||||
pte_t pte = gup_get_pte(ptep);
|
||||
struct page *page;
|
||||
|
||||
if ((pte_val(pte) & mask) != result) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
page = pte_page(pte);
|
||||
get_page(page);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
pte_unmap(ptep - 1);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmdp = pmd_offset(&pud, addr);
|
||||
do {
|
||||
pmd_t pmd = *pmdp;
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(pmd))
|
||||
return 0;
|
||||
if (!gup_pte_range(pmd, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
|
||||
int write, struct page **pages, int *nr)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
|
||||
pudp = pud_offset(&pgd, addr);
|
||||
do {
|
||||
pud_t pud = *pudp;
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
|
||||
return 0;
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
|
||||
* back to the regular GUP.
|
||||
*/
|
||||
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
unsigned long flags;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
||||
(void __user *)start, len)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This doesn't prevent pagetable teardown, but does prevent
|
||||
* the pagetables and pages from being freed.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
break;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
break;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_user_pages_fast() - pin user pages in memory
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long.
|
||||
*
|
||||
* Attempt to pin user pages in memory without taking mm->mmap_sem.
|
||||
* If not successful, it will fall back to taking the lock and
|
||||
* calling get_user_pages().
|
||||
*
|
||||
* Returns number of pages pinned. This may be fewer than the number
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno.
|
||||
*/
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
unsigned long next;
|
||||
pgd_t *pgdp;
|
||||
int nr = 0;
|
||||
|
||||
start &= PAGE_MASK;
|
||||
addr = start;
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
|
||||
end = start + len;
|
||||
if (end < start)
|
||||
goto slow_irqon;
|
||||
|
||||
local_irq_disable();
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
do {
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_enable();
|
||||
|
||||
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
|
||||
return nr;
|
||||
|
||||
{
|
||||
int ret;
|
||||
|
||||
slow:
|
||||
local_irq_enable();
|
||||
slow_irqon:
|
||||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages(current, mm, start,
|
||||
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
if (ret < 0)
|
||||
ret = nr;
|
||||
else
|
||||
ret += nr;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
obj-$(CONFIG_OPROFILE) += oprofile.o
|
||||
|
||||
CFLAGS_common.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
|
||||
oprof.o cpu_buffer.o buffer_sync.o \
|
||||
event_buffer.o oprofile_files.o \
|
||||
|
|
|
@ -91,7 +91,7 @@ void sh_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|||
if (depth > backtrace_limit)
|
||||
depth = backtrace_limit;
|
||||
|
||||
stackaddr = (unsigned long *)regs->regs[15];
|
||||
stackaddr = (unsigned long *)kernel_stack_pointer(regs);
|
||||
if (!user_mode(regs)) {
|
||||
if (depth)
|
||||
unwind_stack(NULL, regs, stackaddr,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* arch/sh/oprofile/init.c
|
||||
*
|
||||
* Copyright (C) 2003 - 2008 Paul Mundt
|
||||
* Copyright (C) 2003 - 2010 Paul Mundt
|
||||
*
|
||||
* Based on arch/mips/oprofile/common.c:
|
||||
*
|
||||
|
@ -18,43 +18,46 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#ifdef CONFIG_HW_PERF_EVENTS
|
||||
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
|
||||
|
||||
#ifdef CONFIG_HW_PERF_EVENTS
|
||||
/*
|
||||
* This will need to be reworked when multiple PMUs are supported.
|
||||
*/
|
||||
static char *sh_pmu_op_name;
|
||||
|
||||
char *op_name_from_perf_id(void)
|
||||
{
|
||||
const char *pmu;
|
||||
char buf[20];
|
||||
int size;
|
||||
|
||||
pmu = perf_pmu_name();
|
||||
if (!pmu)
|
||||
return NULL;
|
||||
|
||||
size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
|
||||
if (size > -1 && size < sizeof(buf))
|
||||
return buf;
|
||||
|
||||
return NULL;
|
||||
return sh_pmu_op_name;
|
||||
}
|
||||
|
||||
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||
{
|
||||
ops->backtrace = sh_backtrace;
|
||||
|
||||
if (perf_num_counters() == 0)
|
||||
return -ENODEV;
|
||||
|
||||
sh_pmu_op_name = kasprintf(GFP_KERNEL, "%s/%s",
|
||||
UTS_MACHINE, perf_pmu_name());
|
||||
if (unlikely(!sh_pmu_op_name))
|
||||
return -ENOMEM;
|
||||
|
||||
return oprofile_perf_init(ops);
|
||||
}
|
||||
|
||||
void __exit oprofile_arch_exit(void)
|
||||
{
|
||||
oprofile_perf_exit();
|
||||
kfree(sh_pmu_op_name);
|
||||
}
|
||||
#else
|
||||
int __init oprofile_arch_init(struct oprofile_operations *ops)
|
||||
{
|
||||
pr_info("oprofile: hardware counters not available\n");
|
||||
ops->backtrace = sh_backtrace;
|
||||
return -ENODEV;
|
||||
}
|
||||
void __exit oprofile_arch_exit(void) {}
|
||||
|
|
|
@ -142,18 +142,18 @@ static int gdrom_hardreset(struct cdrom_device_info *cd_info);
|
|||
|
||||
static bool gdrom_is_busy(void)
|
||||
{
|
||||
return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
|
||||
return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) != 0;
|
||||
}
|
||||
|
||||
static bool gdrom_data_request(void)
|
||||
{
|
||||
return (ctrl_inb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
|
||||
return (__raw_readb(GDROM_ALTSTATUS_REG) & 0x88) == 8;
|
||||
}
|
||||
|
||||
static bool gdrom_wait_clrbusy(void)
|
||||
{
|
||||
unsigned long timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
|
||||
while ((ctrl_inb(GDROM_ALTSTATUS_REG) & 0x80) &&
|
||||
while ((__raw_readb(GDROM_ALTSTATUS_REG) & 0x80) &&
|
||||
(time_before(jiffies, timeout)))
|
||||
cpu_relax();
|
||||
return time_before(jiffies, timeout + 1);
|
||||
|
@ -181,14 +181,14 @@ static void gdrom_identifydevice(void *buf)
|
|||
gdrom_getsense(NULL);
|
||||
return;
|
||||
}
|
||||
ctrl_outb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
|
||||
__raw_writeb(GDROM_COM_IDDEV, GDROM_STATUSCOMMAND_REG);
|
||||
if (!gdrom_wait_busy_sleeps()) {
|
||||
gdrom_getsense(NULL);
|
||||
return;
|
||||
}
|
||||
/* now read in the data */
|
||||
for (c = 0; c < 40; c++)
|
||||
data[c] = ctrl_inw(GDROM_DATA_REG);
|
||||
data[c] = __raw_readw(GDROM_DATA_REG);
|
||||
}
|
||||
|
||||
static void gdrom_spicommand(void *spi_string, int buflen)
|
||||
|
@ -197,21 +197,21 @@ static void gdrom_spicommand(void *spi_string, int buflen)
|
|||
unsigned long timeout;
|
||||
|
||||
/* ensure IRQ_WAIT is set */
|
||||
ctrl_outb(0x08, GDROM_ALTSTATUS_REG);
|
||||
__raw_writeb(0x08, GDROM_ALTSTATUS_REG);
|
||||
/* specify how many bytes we expect back */
|
||||
ctrl_outb(buflen & 0xFF, GDROM_BCL_REG);
|
||||
ctrl_outb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
|
||||
__raw_writeb(buflen & 0xFF, GDROM_BCL_REG);
|
||||
__raw_writeb((buflen >> 8) & 0xFF, GDROM_BCH_REG);
|
||||
/* other parameters */
|
||||
ctrl_outb(0, GDROM_INTSEC_REG);
|
||||
ctrl_outb(0, GDROM_SECNUM_REG);
|
||||
ctrl_outb(0, GDROM_ERROR_REG);
|
||||
__raw_writeb(0, GDROM_INTSEC_REG);
|
||||
__raw_writeb(0, GDROM_SECNUM_REG);
|
||||
__raw_writeb(0, GDROM_ERROR_REG);
|
||||
/* Wait until we can go */
|
||||
if (!gdrom_wait_clrbusy()) {
|
||||
gdrom_getsense(NULL);
|
||||
return;
|
||||
}
|
||||
timeout = jiffies + GDROM_DEFAULT_TIMEOUT;
|
||||
ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
|
||||
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
|
||||
while (!gdrom_data_request() && time_before(jiffies, timeout))
|
||||
cpu_relax();
|
||||
if (!time_before(jiffies, timeout + 1)) {
|
||||
|
@ -233,10 +233,10 @@ static char gdrom_execute_diagnostic(void)
|
|||
gdrom_hardreset(gd.cd_info);
|
||||
if (!gdrom_wait_clrbusy())
|
||||
return 0;
|
||||
ctrl_outb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
|
||||
__raw_writeb(GDROM_COM_EXECDIAG, GDROM_STATUSCOMMAND_REG);
|
||||
if (!gdrom_wait_busy_sleeps())
|
||||
return 0;
|
||||
return ctrl_inb(GDROM_ERROR_REG);
|
||||
return __raw_readb(GDROM_ERROR_REG);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -385,7 +385,7 @@ static void gdrom_release(struct cdrom_device_info *cd_info)
|
|||
static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
|
||||
{
|
||||
/* read the sense key */
|
||||
char sense = ctrl_inb(GDROM_ERROR_REG);
|
||||
char sense = __raw_readb(GDROM_ERROR_REG);
|
||||
sense &= 0xF0;
|
||||
if (sense == 0)
|
||||
return CDS_DISC_OK;
|
||||
|
@ -398,16 +398,16 @@ static int gdrom_drivestatus(struct cdrom_device_info *cd_info, int ignore)
|
|||
static int gdrom_mediachanged(struct cdrom_device_info *cd_info, int ignore)
|
||||
{
|
||||
/* check the sense key */
|
||||
return (ctrl_inb(GDROM_ERROR_REG) & 0xF0) == 0x60;
|
||||
return (__raw_readb(GDROM_ERROR_REG) & 0xF0) == 0x60;
|
||||
}
|
||||
|
||||
/* reset the G1 bus */
|
||||
static int gdrom_hardreset(struct cdrom_device_info *cd_info)
|
||||
{
|
||||
int count;
|
||||
ctrl_outl(0x1fffff, GDROM_RESET_REG);
|
||||
__raw_writel(0x1fffff, GDROM_RESET_REG);
|
||||
for (count = 0xa0000000; count < 0xa0200000; count += 4)
|
||||
ctrl_inl(count);
|
||||
__raw_readl(count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -536,7 +536,7 @@ static const struct block_device_operations gdrom_bdops = {
|
|||
|
||||
static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
|
||||
gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
|
||||
if (gd.pending != 1)
|
||||
return IRQ_HANDLED;
|
||||
gd.pending = 0;
|
||||
|
@ -546,7 +546,7 @@ static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
|
|||
|
||||
static irqreturn_t gdrom_dma_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
gd.status = ctrl_inb(GDROM_STATUSCOMMAND_REG);
|
||||
gd.status = __raw_readb(GDROM_STATUSCOMMAND_REG);
|
||||
if (gd.transfer != 1)
|
||||
return IRQ_HANDLED;
|
||||
gd.transfer = 0;
|
||||
|
@ -600,10 +600,10 @@ static void gdrom_readdisk_dma(struct work_struct *work)
|
|||
spin_unlock(&gdrom_lock);
|
||||
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
|
||||
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
|
||||
ctrl_outl(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG);
|
||||
ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
|
||||
ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
|
||||
ctrl_outl(1, GDROM_DMA_ENABLE_REG);
|
||||
__raw_writel(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG);
|
||||
__raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
|
||||
__raw_writel(1, GDROM_DMA_DIRECTION_REG);
|
||||
__raw_writel(1, GDROM_DMA_ENABLE_REG);
|
||||
read_command->cmd[2] = (block >> 16) & 0xFF;
|
||||
read_command->cmd[3] = (block >> 8) & 0xFF;
|
||||
read_command->cmd[4] = block & 0xFF;
|
||||
|
@ -611,18 +611,18 @@ static void gdrom_readdisk_dma(struct work_struct *work)
|
|||
read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
|
||||
read_command->cmd[10] = block_cnt & 0xFF;
|
||||
/* set for DMA */
|
||||
ctrl_outb(1, GDROM_ERROR_REG);
|
||||
__raw_writeb(1, GDROM_ERROR_REG);
|
||||
/* other registers */
|
||||
ctrl_outb(0, GDROM_SECNUM_REG);
|
||||
ctrl_outb(0, GDROM_BCL_REG);
|
||||
ctrl_outb(0, GDROM_BCH_REG);
|
||||
ctrl_outb(0, GDROM_DSEL_REG);
|
||||
ctrl_outb(0, GDROM_INTSEC_REG);
|
||||
__raw_writeb(0, GDROM_SECNUM_REG);
|
||||
__raw_writeb(0, GDROM_BCL_REG);
|
||||
__raw_writeb(0, GDROM_BCH_REG);
|
||||
__raw_writeb(0, GDROM_DSEL_REG);
|
||||
__raw_writeb(0, GDROM_INTSEC_REG);
|
||||
/* Wait for registers to reset after any previous activity */
|
||||
timeout = jiffies + HZ / 2;
|
||||
while (gdrom_is_busy() && time_before(jiffies, timeout))
|
||||
cpu_relax();
|
||||
ctrl_outb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
|
||||
__raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
|
||||
timeout = jiffies + HZ / 2;
|
||||
/* Wait for packet command to finish */
|
||||
while (gdrom_is_busy() && time_before(jiffies, timeout))
|
||||
|
@ -632,11 +632,11 @@ static void gdrom_readdisk_dma(struct work_struct *work)
|
|||
outsw(GDROM_DATA_REG, &read_command->cmd, 6);
|
||||
timeout = jiffies + HZ / 2;
|
||||
/* Wait for any pending DMA to finish */
|
||||
while (ctrl_inb(GDROM_DMA_STATUS_REG) &&
|
||||
while (__raw_readb(GDROM_DMA_STATUS_REG) &&
|
||||
time_before(jiffies, timeout))
|
||||
cpu_relax();
|
||||
/* start transfer */
|
||||
ctrl_outb(1, GDROM_DMA_STATUS_REG);
|
||||
__raw_writeb(1, GDROM_DMA_STATUS_REG);
|
||||
wait_event_interruptible_timeout(request_queue,
|
||||
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
|
||||
err = gd.transfer ? -EIO : 0;
|
||||
|
@ -714,11 +714,11 @@ free_id:
|
|||
/* set the default mode for DMA transfer */
|
||||
static int __devinit gdrom_init_dma_mode(void)
|
||||
{
|
||||
ctrl_outb(0x13, GDROM_ERROR_REG);
|
||||
ctrl_outb(0x22, GDROM_INTSEC_REG);
|
||||
__raw_writeb(0x13, GDROM_ERROR_REG);
|
||||
__raw_writeb(0x22, GDROM_INTSEC_REG);
|
||||
if (!gdrom_wait_clrbusy())
|
||||
return -EBUSY;
|
||||
ctrl_outb(0xEF, GDROM_STATUSCOMMAND_REG);
|
||||
__raw_writeb(0xEF, GDROM_STATUSCOMMAND_REG);
|
||||
if (!gdrom_wait_busy_sleeps())
|
||||
return -EBUSY;
|
||||
/* Memory protection setting for GDROM DMA
|
||||
|
@ -728,8 +728,8 @@ static int __devinit gdrom_init_dma_mode(void)
|
|||
* Bits 6 - 0 end of transfer range in 1 MB blocks OR'ed with 0x80
|
||||
* (0x40 | 0x80) = start range at 0x0C000000
|
||||
* (0x7F | 0x80) = end range at 0x0FFFFFFF */
|
||||
ctrl_outl(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
|
||||
ctrl_outl(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
|
||||
__raw_writel(0x8843407F, GDROM_DMA_ACCESS_CTRL_REG);
|
||||
__raw_writel(9, GDROM_DMA_WAIT_REG); /* DMA word setting */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,35 +139,35 @@ static void jornada_scan_keyb(unsigned char *s)
|
|||
}, *y = matrix_PDE;
|
||||
|
||||
/* Save these control reg bits */
|
||||
dc_static = (ctrl_inw(PDCR) & (~0xcc0c));
|
||||
ec_static = (ctrl_inw(PECR) & (~0xf0cf));
|
||||
dc_static = (__raw_readw(PDCR) & (~0xcc0c));
|
||||
ec_static = (__raw_readw(PECR) & (~0xf0cf));
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
/* disable output for all but the one we want to scan */
|
||||
ctrl_outw((dc_static | *y++), PDCR);
|
||||
ctrl_outw((ec_static | *y++), PECR);
|
||||
__raw_writew((dc_static | *y++), PDCR);
|
||||
__raw_writew((ec_static | *y++), PECR);
|
||||
udelay(5);
|
||||
|
||||
/* Get scanline row */
|
||||
ctrl_outb(*t++, PDDR);
|
||||
ctrl_outb(*t++, PEDR);
|
||||
__raw_writeb(*t++, PDDR);
|
||||
__raw_writeb(*t++, PEDR);
|
||||
udelay(50);
|
||||
|
||||
/* Read data */
|
||||
*s++ = ctrl_inb(PCDR);
|
||||
*s++ = ctrl_inb(PFDR);
|
||||
*s++ = __raw_readb(PCDR);
|
||||
*s++ = __raw_readb(PFDR);
|
||||
}
|
||||
/* Scan no lines */
|
||||
ctrl_outb(0xff, PDDR);
|
||||
ctrl_outb(0xff, PEDR);
|
||||
__raw_writeb(0xff, PDDR);
|
||||
__raw_writeb(0xff, PEDR);
|
||||
|
||||
/* Enable all scanlines */
|
||||
ctrl_outw((dc_static | (0x5555 & 0xcc0c)),PDCR);
|
||||
ctrl_outw((ec_static | (0x5555 & 0xf0cf)),PECR);
|
||||
__raw_writew((dc_static | (0x5555 & 0xcc0c)),PDCR);
|
||||
__raw_writew((ec_static | (0x5555 & 0xf0cf)),PECR);
|
||||
|
||||
/* Ignore extra keys and events */
|
||||
*s++ = ctrl_inb(PGDR);
|
||||
*s++ = ctrl_inb(PHDR);
|
||||
*s++ = __raw_readb(PGDR);
|
||||
*s++ = __raw_readb(PHDR);
|
||||
}
|
||||
|
||||
static void jornadakbd680_poll(struct input_polled_dev *dev)
|
||||
|
|
|
@ -28,29 +28,29 @@ static void do_softint(struct work_struct *work)
|
|||
u8 scpdr;
|
||||
int touched = 0;
|
||||
|
||||
if (ctrl_inb(PHDR) & PHDR_TS_PEN_DOWN) {
|
||||
scpdr = ctrl_inb(SCPDR);
|
||||
if (__raw_readb(PHDR) & PHDR_TS_PEN_DOWN) {
|
||||
scpdr = __raw_readb(SCPDR);
|
||||
scpdr |= SCPDR_TS_SCAN_ENABLE;
|
||||
scpdr &= ~SCPDR_TS_SCAN_Y;
|
||||
ctrl_outb(scpdr, SCPDR);
|
||||
__raw_writeb(scpdr, SCPDR);
|
||||
udelay(30);
|
||||
|
||||
absy = adc_single(ADC_CHANNEL_TS_Y);
|
||||
|
||||
scpdr = ctrl_inb(SCPDR);
|
||||
scpdr = __raw_readb(SCPDR);
|
||||
scpdr |= SCPDR_TS_SCAN_Y;
|
||||
scpdr &= ~SCPDR_TS_SCAN_X;
|
||||
ctrl_outb(scpdr, SCPDR);
|
||||
__raw_writeb(scpdr, SCPDR);
|
||||
udelay(30);
|
||||
|
||||
absx = adc_single(ADC_CHANNEL_TS_X);
|
||||
|
||||
scpdr = ctrl_inb(SCPDR);
|
||||
scpdr = __raw_readb(SCPDR);
|
||||
scpdr |= SCPDR_TS_SCAN_X;
|
||||
scpdr &= ~SCPDR_TS_SCAN_ENABLE;
|
||||
ctrl_outb(scpdr, SCPDR);
|
||||
__raw_writeb(scpdr, SCPDR);
|
||||
udelay(100);
|
||||
touched = ctrl_inb(PHDR) & PHDR_TS_PEN_DOWN;
|
||||
touched = __raw_readb(PHDR) & PHDR_TS_PEN_DOWN;
|
||||
}
|
||||
|
||||
if (touched) {
|
||||
|
|
|
@ -80,21 +80,21 @@
|
|||
/* SCSPTR1 data */
|
||||
unsigned char scsptr1_data;
|
||||
|
||||
#define RS5C313_CEENABLE ctrl_outb(RS5C313_CE_RTCCE, RS5C313_CE);
|
||||
#define RS5C313_CEDISABLE ctrl_outb(0x00, RS5C313_CE)
|
||||
#define RS5C313_MISCOP ctrl_outb(0x02, 0xB0000008)
|
||||
#define RS5C313_CEENABLE __raw_writeb(RS5C313_CE_RTCCE, RS5C313_CE);
|
||||
#define RS5C313_CEDISABLE __raw_writeb(0x00, RS5C313_CE)
|
||||
#define RS5C313_MISCOP __raw_writeb(0x02, 0xB0000008)
|
||||
|
||||
static void rs5c313_init_port(void)
|
||||
{
|
||||
/* Set SCK as I/O port and Initialize SCSPTR1 data & I/O port. */
|
||||
ctrl_outb(ctrl_inb(SCSMR1) & ~SCSMR1_CA, SCSMR1);
|
||||
ctrl_outb(ctrl_inb(SCSCR1) & ~SCSCR1_CKE, SCSCR1);
|
||||
__raw_writeb(__raw_readb(SCSMR1) & ~SCSMR1_CA, SCSMR1);
|
||||
__raw_writeb(__raw_readb(SCSCR1) & ~SCSCR1_CKE, SCSCR1);
|
||||
|
||||
/* And Initialize SCL for RS5C313 clock */
|
||||
scsptr1_data = ctrl_inb(SCSPTR1) | SCL; /* SCL:H */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
scsptr1_data = ctrl_inb(SCSPTR1) | SCL_OEN; /* SCL output enable */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
scsptr1_data = __raw_readb(SCSPTR1) | SCL; /* SCL:H */
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
scsptr1_data = __raw_readb(SCSPTR1) | SCL_OEN; /* SCL output enable */
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
RS5C313_CEDISABLE; /* CE:L */
|
||||
}
|
||||
|
||||
|
@ -106,21 +106,21 @@ static void rs5c313_write_data(unsigned char data)
|
|||
/* SDA:Write Data */
|
||||
scsptr1_data = (scsptr1_data & ~SDA) |
|
||||
((((0x80 >> i) & data) >> (7 - i)) << 2);
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
if (i == 0) {
|
||||
scsptr1_data |= SDA_OEN; /* SDA:output enable */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
}
|
||||
ndelay(700);
|
||||
scsptr1_data &= ~SCL; /* SCL:L */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
ndelay(700);
|
||||
scsptr1_data |= SCL; /* SCL:H */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
}
|
||||
|
||||
scsptr1_data &= ~SDA_OEN; /* SDA:output disable */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
}
|
||||
|
||||
static unsigned char rs5c313_read_data(void)
|
||||
|
@ -131,12 +131,12 @@ static unsigned char rs5c313_read_data(void)
|
|||
for (i = 0; i < 8; i++) {
|
||||
ndelay(700);
|
||||
/* SDA:Read Data */
|
||||
data |= ((ctrl_inb(SCSPTR1) & SDA) >> 2) << (7 - i);
|
||||
data |= ((__raw_readb(SCSPTR1) & SDA) >> 2) << (7 - i);
|
||||
scsptr1_data &= ~SCL; /* SCL:L */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
ndelay(700);
|
||||
scsptr1_data |= SCL; /* SCL:H */
|
||||
ctrl_outb(scsptr1_data, SCSPTR1);
|
||||
__raw_writeb(scsptr1_data, SCSPTR1);
|
||||
}
|
||||
return data & 0x0F;
|
||||
}
|
||||
|
|
|
@ -12,15 +12,16 @@
|
|||
#include <linux/io.h>
|
||||
#include "internals.h"
|
||||
|
||||
void _intc_enable(unsigned int irq, unsigned long handle)
|
||||
void _intc_enable(struct irq_data *data, unsigned long handle)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned long addr;
|
||||
unsigned int cpu;
|
||||
|
||||
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
|
||||
if (!cpumask_test_cpu(cpu, data->affinity))
|
||||
continue;
|
||||
#endif
|
||||
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
|
||||
|
@ -31,15 +32,16 @@ void _intc_enable(unsigned int irq, unsigned long handle)
|
|||
intc_balancing_enable(irq);
|
||||
}
|
||||
|
||||
static void intc_enable(unsigned int irq)
|
||||
static void intc_enable(struct irq_data *data)
|
||||
{
|
||||
_intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
|
||||
_intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data));
|
||||
}
|
||||
|
||||
static void intc_disable(unsigned int irq)
|
||||
static void intc_disable(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned long handle = (unsigned long)get_irq_chip_data(irq);
|
||||
unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data);
|
||||
unsigned long addr;
|
||||
unsigned int cpu;
|
||||
|
||||
|
@ -47,7 +49,7 @@ static void intc_disable(unsigned int irq)
|
|||
|
||||
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
|
||||
if (!cpumask_test_cpu(cpu, data->affinity))
|
||||
continue;
|
||||
#endif
|
||||
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
|
||||
|
@ -56,7 +58,7 @@ static void intc_disable(unsigned int irq)
|
|||
}
|
||||
}
|
||||
|
||||
static int intc_set_wake(unsigned int irq, unsigned int on)
|
||||
static int intc_set_wake(struct irq_data *data, unsigned int on)
|
||||
{
|
||||
return 0; /* allow wakeup, but setup hardware in intc_suspend() */
|
||||
}
|
||||
|
@ -67,24 +69,27 @@ static int intc_set_wake(unsigned int irq, unsigned int on)
|
|||
* additional locking here at the intc desc level. The affinity mask is
|
||||
* later tested in the enable/disable paths.
|
||||
*/
|
||||
static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
static int intc_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *cpumask,
|
||||
bool force)
|
||||
{
|
||||
if (!cpumask_intersects(cpumask, cpu_online_mask))
|
||||
return -1;
|
||||
|
||||
cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
|
||||
cpumask_copy(data->affinity, cpumask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void intc_mask_ack(unsigned int irq)
|
||||
static void intc_mask_ack(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned long handle = intc_get_ack_handle(irq);
|
||||
unsigned long addr;
|
||||
|
||||
intc_disable(irq);
|
||||
intc_disable(data);
|
||||
|
||||
/* read register and write zero only to the associated bit */
|
||||
if (handle) {
|
||||
|
@ -144,6 +149,7 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
|
|||
int intc_set_priority(unsigned int irq, unsigned int prio)
|
||||
{
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct intc_handle_int *ihp;
|
||||
|
||||
if (!intc_get_prio_level(irq) || prio <= 1)
|
||||
|
@ -162,7 +168,7 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
|
|||
* priority level will be set during next enable()
|
||||
*/
|
||||
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
|
||||
_intc_enable(irq, ihp->handle);
|
||||
_intc_enable(data, ihp->handle);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -181,8 +187,9 @@ static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static int intc_set_type(unsigned int irq, unsigned int type)
|
||||
static int intc_set_type(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
|
||||
struct intc_handle_int *ihp;
|
||||
|
@ -201,15 +208,15 @@ static int intc_set_type(unsigned int irq, unsigned int type)
|
|||
}
|
||||
|
||||
struct irq_chip intc_irq_chip = {
|
||||
.mask = intc_disable,
|
||||
.unmask = intc_enable,
|
||||
.mask_ack = intc_mask_ack,
|
||||
.enable = intc_enable,
|
||||
.disable = intc_disable,
|
||||
.shutdown = intc_disable,
|
||||
.set_type = intc_set_type,
|
||||
.set_wake = intc_set_wake,
|
||||
.irq_mask = intc_disable,
|
||||
.irq_unmask = intc_enable,
|
||||
.irq_mask_ack = intc_mask_ack,
|
||||
.irq_enable = intc_enable,
|
||||
.irq_disable = intc_disable,
|
||||
.irq_shutdown = intc_disable,
|
||||
.irq_set_type = intc_set_type,
|
||||
.irq_set_wake = intc_set_wake,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = intc_set_affinity,
|
||||
.irq_set_affinity = intc_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
|
|
@ -71,6 +71,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
|
|||
unsigned int irq)
|
||||
{
|
||||
struct intc_handle_int *hp;
|
||||
struct irq_data *irq_data;
|
||||
unsigned int data[2], primary;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -78,7 +79,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
|
|||
* Register the IRQ position with the global IRQ map, then insert
|
||||
* it in to the radix tree.
|
||||
*/
|
||||
reserve_irq_vector(irq);
|
||||
irq_reserve_irqs(irq, 1);
|
||||
|
||||
raw_spin_lock_irqsave(&intc_big_lock, flags);
|
||||
radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
|
||||
|
@ -111,6 +112,8 @@ static void __init intc_register_irq(struct intc_desc *desc,
|
|||
|
||||
BUG_ON(!data[primary]); /* must have primary masking method */
|
||||
|
||||
irq_data = irq_get_irq_data(irq);
|
||||
|
||||
disable_irq_nosync(irq);
|
||||
set_irq_chip_and_handler_name(irq, &d->chip,
|
||||
handle_level_irq, "level");
|
||||
|
@ -123,7 +126,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
|
|||
|
||||
/* enable secondary masking method if present */
|
||||
if (data[!primary])
|
||||
_intc_enable(irq, data[!primary]);
|
||||
_intc_enable(irq_data, data[!primary]);
|
||||
|
||||
/* add irq to d->prio list if priority is available */
|
||||
if (data[1]) {
|
||||
|
@ -151,7 +154,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
|
|||
}
|
||||
|
||||
/* irq should be disabled by default */
|
||||
d->chip.mask(irq);
|
||||
d->chip.irq_mask(irq_data);
|
||||
|
||||
intc_set_ack_handle(irq, desc, d, enum_id);
|
||||
intc_set_dist_handle(irq, desc, d, enum_id);
|
||||
|
@ -284,7 +287,7 @@ int __init register_intc_controller(struct intc_desc *desc)
|
|||
for (i = 0; i < hw->nr_ack_regs; i++)
|
||||
k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
|
||||
else
|
||||
d->chip.mask_ack = d->chip.disable;
|
||||
d->chip.irq_mask_ack = d->chip.irq_disable;
|
||||
|
||||
/* disable bits matching force_disable before registering irqs */
|
||||
if (desc->force_disable)
|
||||
|
@ -300,13 +303,13 @@ int __init register_intc_controller(struct intc_desc *desc)
|
|||
for (i = 0; i < hw->nr_vectors; i++) {
|
||||
struct intc_vect *vect = hw->vectors + i;
|
||||
unsigned int irq = evt2irq(vect->vect);
|
||||
struct irq_desc *irq_desc;
|
||||
int res;
|
||||
|
||||
if (!vect->enum_id)
|
||||
continue;
|
||||
|
||||
irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
|
||||
if (unlikely(!irq_desc)) {
|
||||
res = irq_alloc_desc_at(irq, numa_node_id());
|
||||
if (res != irq && res != -EEXIST) {
|
||||
pr_err("can't get irq_desc for %d\n", irq);
|
||||
continue;
|
||||
}
|
||||
|
@ -326,8 +329,8 @@ int __init register_intc_controller(struct intc_desc *desc)
|
|||
* IRQ support, each vector still needs to have
|
||||
* its own backing irq_desc.
|
||||
*/
|
||||
irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
|
||||
if (unlikely(!irq_desc)) {
|
||||
res = irq_alloc_desc_at(irq2, numa_node_id());
|
||||
if (res != irq2 && res != -EEXIST) {
|
||||
pr_err("can't get irq_desc for %d\n", irq2);
|
||||
continue;
|
||||
}
|
||||
|
@ -387,7 +390,9 @@ static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
|
|||
static int intc_suspend(struct sys_device *dev, pm_message_t state)
|
||||
{
|
||||
struct intc_desc_int *d;
|
||||
struct irq_data *data;
|
||||
struct irq_desc *desc;
|
||||
struct irq_chip *chip;
|
||||
int irq;
|
||||
|
||||
/* get intc controller associated with this sysdev */
|
||||
|
@ -398,17 +403,21 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
|
|||
if (d->state.event != PM_EVENT_FREEZE)
|
||||
break;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
for_each_active_irq(irq) {
|
||||
desc = irq_to_desc(irq);
|
||||
data = irq_get_irq_data(irq);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
|
||||
/*
|
||||
* This will catch the redirect and VIRQ cases
|
||||
* due to the dummy_irq_chip being inserted.
|
||||
*/
|
||||
if (desc->chip != &d->chip)
|
||||
if (chip != &d->chip)
|
||||
continue;
|
||||
if (desc->status & IRQ_DISABLED)
|
||||
desc->chip->disable(irq);
|
||||
chip->irq_disable(data);
|
||||
else
|
||||
desc->chip->enable(irq);
|
||||
chip->irq_enable(data);
|
||||
}
|
||||
break;
|
||||
case PM_EVENT_FREEZE:
|
||||
|
@ -416,11 +425,15 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
|
|||
break;
|
||||
case PM_EVENT_SUSPEND:
|
||||
/* enable wakeup irqs belonging to this intc controller */
|
||||
for_each_irq_desc(irq, desc) {
|
||||
if (desc->chip != &d->chip)
|
||||
for_each_active_irq(irq) {
|
||||
desc = irq_to_desc(irq);
|
||||
data = irq_get_irq_data(irq);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
|
||||
if (chip != &d->chip)
|
||||
continue;
|
||||
if ((desc->status & IRQ_WAKEUP))
|
||||
desc->chip->enable(irq);
|
||||
chip->irq_enable(data);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "internals.h" /* only for activate_irq() damage.. */
|
||||
|
||||
/*
|
||||
* The intc_irq_map provides a global map of bound IRQ vectors for a
|
||||
* The IRQ bitmap provides a global map of bound IRQ vectors for a
|
||||
* given platform. Allocation of IRQs are either static through the CPU
|
||||
* vector map, or dynamic in the case of board mux vectors or MSI.
|
||||
*
|
||||
|
@ -27,109 +27,38 @@
|
|||
* when dynamically creating IRQs, as well as tying in to otherwise
|
||||
* unused irq_desc positions in the sparse array.
|
||||
*/
|
||||
static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
|
||||
static DEFINE_RAW_SPINLOCK(vector_lock);
|
||||
|
||||
/*
|
||||
* Dynamic IRQ allocation and deallocation
|
||||
*/
|
||||
unsigned int create_irq_nr(unsigned int irq_want, int node)
|
||||
{
|
||||
unsigned int irq = 0, new;
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc;
|
||||
int irq = irq_alloc_desc_at(irq_want, node);
|
||||
if (irq < 0)
|
||||
return 0;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
|
||||
/*
|
||||
* First try the wanted IRQ
|
||||
*/
|
||||
if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
|
||||
new = irq_want;
|
||||
} else {
|
||||
/* .. then fall back to scanning. */
|
||||
new = find_first_zero_bit(intc_irq_map, nr_irqs);
|
||||
if (unlikely(new == nr_irqs))
|
||||
goto out_unlock;
|
||||
|
||||
__set_bit(new, intc_irq_map);
|
||||
}
|
||||
|
||||
desc = irq_to_desc_alloc_node(new, node);
|
||||
if (unlikely(!desc)) {
|
||||
pr_err("can't get irq_desc for %d\n", new);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
desc = move_irq_desc(desc, node);
|
||||
irq = new;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
if (irq > 0) {
|
||||
dynamic_irq_init(irq);
|
||||
activate_irq(irq);
|
||||
}
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
int create_irq(void)
|
||||
{
|
||||
int nid = cpu_to_node(smp_processor_id());
|
||||
int irq;
|
||||
|
||||
irq = create_irq_nr(NR_IRQS_LEGACY, nid);
|
||||
if (irq == 0)
|
||||
irq = -1;
|
||||
int irq = irq_alloc_desc(numa_node_id());
|
||||
if (irq >= 0)
|
||||
activate_irq(irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
void destroy_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
dynamic_irq_cleanup(irq);
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
__clear_bit(irq, intc_irq_map);
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
int reserve_irq_vector(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
if (test_and_set_bit(irq, intc_irq_map))
|
||||
ret = -EBUSY;
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
return ret;
|
||||
irq_free_desc(irq);
|
||||
}
|
||||
|
||||
void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
for (i = 0; i < nr_vecs; i++)
|
||||
__set_bit(evt2irq(vectors[i].vect), intc_irq_map);
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
void reserve_irq_legacy(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, j;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
j = find_first_bit(intc_irq_map, nr_irqs);
|
||||
for (i = 0; i < j; i++)
|
||||
__set_bit(i, intc_irq_map);
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
irq_reserve_irqs(evt2irq(vectors[i].vect), 1);
|
||||
}
|
||||
|
|
|
@ -152,7 +152,7 @@ intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
|
|||
|
||||
/* chip.c */
|
||||
extern struct irq_chip intc_irq_chip;
|
||||
void _intc_enable(unsigned int irq, unsigned long handle);
|
||||
void _intc_enable(struct irq_data *data, unsigned long handle);
|
||||
|
||||
/* core.c */
|
||||
extern struct list_head intc_list;
|
||||
|
|
|
@ -83,11 +83,11 @@ EXPORT_SYMBOL_GPL(intc_irq_lookup);
|
|||
static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
|
||||
{
|
||||
struct intc_virq_list **last, *entry;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
|
||||
/* scan for duplicates */
|
||||
last = (struct intc_virq_list **)&desc->handler_data;
|
||||
for_each_virq(entry, desc->handler_data) {
|
||||
last = (struct intc_virq_list **)&data->handler_data;
|
||||
for_each_virq(entry, data->handler_data) {
|
||||
if (entry->irq == virq)
|
||||
return 0;
|
||||
last = &entry->next;
|
||||
|
@ -108,10 +108,12 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
|
|||
|
||||
static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct intc_virq_list *entry, *vlist = get_irq_data(irq);
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data);
|
||||
struct intc_desc_int *d = get_intc_desc(irq);
|
||||
|
||||
desc->chip->mask_ack(irq);
|
||||
chip->irq_mask_ack(data);
|
||||
|
||||
for_each_virq(entry, vlist) {
|
||||
unsigned long addr, handle;
|
||||
|
@ -123,7 +125,7 @@ static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
|
|||
generic_handle_irq(entry->irq);
|
||||
}
|
||||
|
||||
desc->chip->unmask(irq);
|
||||
chip->irq_unmask(data);
|
||||
}
|
||||
|
||||
static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
|
||||
|
|
|
@ -94,9 +94,9 @@ EXPORT_SYMBOL_GPL(maple_driver_unregister);
|
|||
/* set hardware registers to enable next round of dma */
|
||||
static void maple_dma_reset(void)
|
||||
{
|
||||
ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
|
||||
__raw_writel(MAPLE_MAGIC, MAPLE_RESET);
|
||||
/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
|
||||
ctrl_outl(1, MAPLE_TRIGTYPE);
|
||||
__raw_writel(1, MAPLE_TRIGTYPE);
|
||||
/*
|
||||
* Maple system register
|
||||
* bits 31 - 16 timeout in units of 20nsec
|
||||
|
@ -105,9 +105,9 @@ static void maple_dma_reset(void)
|
|||
* bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
|
||||
* max delay is 11
|
||||
*/
|
||||
ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
|
||||
ctrl_outl(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
|
||||
ctrl_outl(1, MAPLE_ENABLE);
|
||||
__raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
|
||||
__raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
|
||||
__raw_writel(1, MAPLE_ENABLE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -130,7 +130,7 @@ EXPORT_SYMBOL_GPL(maple_getcond_callback);
|
|||
|
||||
static int maple_dma_done(void)
|
||||
{
|
||||
return (ctrl_inl(MAPLE_STATE) & 1) == 0;
|
||||
return (__raw_readl(MAPLE_STATE) & 1) == 0;
|
||||
}
|
||||
|
||||
static void maple_release_device(struct device *dev)
|
||||
|
@ -275,7 +275,7 @@ static void maple_send(void)
|
|||
return;
|
||||
|
||||
/* disable DMA */
|
||||
ctrl_outl(0, MAPLE_ENABLE);
|
||||
__raw_writel(0, MAPLE_ENABLE);
|
||||
|
||||
if (!list_empty(&maple_sentq))
|
||||
goto finish;
|
||||
|
@ -450,7 +450,7 @@ static void maple_vblank_handler(struct work_struct *work)
|
|||
if (!maple_dma_done())
|
||||
return;
|
||||
|
||||
ctrl_outl(0, MAPLE_ENABLE);
|
||||
__raw_writel(0, MAPLE_ENABLE);
|
||||
|
||||
if (!list_empty(&maple_sentq))
|
||||
goto finish;
|
||||
|
@ -636,7 +636,7 @@ static void maple_dma_handler(struct work_struct *work)
|
|||
|
||||
if (!maple_dma_done())
|
||||
return;
|
||||
ctrl_outl(0, MAPLE_ENABLE);
|
||||
__raw_writel(0, MAPLE_ENABLE);
|
||||
if (!list_empty(&maple_sentq)) {
|
||||
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
|
||||
mdev = mq->dev;
|
||||
|
@ -796,7 +796,7 @@ static int __init maple_bus_init(void)
|
|||
int retval, i;
|
||||
struct maple_device *mdev[MAPLE_PORTS];
|
||||
|
||||
ctrl_outl(0, MAPLE_ENABLE);
|
||||
__raw_writel(0, MAPLE_ENABLE);
|
||||
|
||||
retval = device_register(&maple_bus);
|
||||
if (retval)
|
||||
|
|
|
@ -129,7 +129,4 @@ static inline int register_intc_userimask(unsigned long addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
int reserve_irq_vector(unsigned int irq);
|
||||
void reserve_irq_legacy(void);
|
||||
|
||||
#endif /* __SH_INTC_H */
|
||||
|
|
|
@ -188,7 +188,7 @@ static void spu_reset(void)
|
|||
spu_memset(0, 0, 0x200000 / 4);
|
||||
/* Put ARM7 in endless loop */
|
||||
local_irq_save(flags);
|
||||
ctrl_outl(0xea000002, SPU_MEMORY_BASE);
|
||||
__raw_writel(0xea000002, SPU_MEMORY_BASE);
|
||||
local_irq_restore(flags);
|
||||
spu_enable();
|
||||
}
|
||||
|
|
|
@ -52,8 +52,8 @@ static int __init sh7760_ac97_init(void)
|
|||
unsigned short ipsel;
|
||||
|
||||
/* enable both AC97 controllers in pinmux reg */
|
||||
ipsel = ctrl_inw(IPSEL);
|
||||
ctrl_outw(ipsel | (3 << 10), IPSEL);
|
||||
ipsel = __raw_readw(IPSEL);
|
||||
__raw_writew(ipsel | (3 << 10), IPSEL);
|
||||
|
||||
ret = -ENOMEM;
|
||||
sh7760_ac97_snd_device = platform_device_alloc("soc-audio", -1);
|
||||
|
|
Загрузка…
Ссылка в новой задаче