Merge commit 'v2.6.38' into release
This commit is contained in:
Коммит
5c129a8600
|
@ -40,8 +40,6 @@ decnet.txt
|
||||||
- info on using the DECnet networking layer in Linux.
|
- info on using the DECnet networking layer in Linux.
|
||||||
depca.txt
|
depca.txt
|
||||||
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
|
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
|
||||||
dgrs.txt
|
|
||||||
- the Digi International RightSwitch SE-X Ethernet driver
|
|
||||||
dmfe.txt
|
dmfe.txt
|
||||||
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
|
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
|
||||||
e100.txt
|
e100.txt
|
||||||
|
@ -50,8 +48,6 @@ e1000.txt
|
||||||
- info on Intel's E1000 line of gigabit ethernet boards
|
- info on Intel's E1000 line of gigabit ethernet boards
|
||||||
eql.txt
|
eql.txt
|
||||||
- serial IP load balancing
|
- serial IP load balancing
|
||||||
ethertap.txt
|
|
||||||
- the Ethertap user space packet reception and transmission driver
|
|
||||||
ewrk3.txt
|
ewrk3.txt
|
||||||
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
|
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
|
||||||
filter.txt
|
filter.txt
|
||||||
|
@ -104,8 +100,6 @@ tuntap.txt
|
||||||
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
|
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
|
||||||
vortex.txt
|
vortex.txt
|
||||||
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
|
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
|
||||||
wavelan.txt
|
|
||||||
- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
|
|
||||||
x25.txt
|
x25.txt
|
||||||
- general info on X.25 development.
|
- general info on X.25 development.
|
||||||
x25-iface.txt
|
x25-iface.txt
|
||||||
|
|
|
@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
|
||||||
create dns_resolver foo:* * /usr/sbin/dns.foo %k
|
create dns_resolver foo:* * /usr/sbin/dns.foo %k
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
=====
|
=====
|
||||||
USAGE
|
USAGE
|
||||||
=====
|
=====
|
||||||
|
@ -104,6 +103,14 @@ implemented in the module can be called after doing:
|
||||||
returned also.
|
returned also.
|
||||||
|
|
||||||
|
|
||||||
|
===============================
|
||||||
|
READING DNS KEYS FROM USERSPACE
|
||||||
|
===============================
|
||||||
|
|
||||||
|
Keys of dns_resolver type can be read from userspace using keyctl_read() or
|
||||||
|
"keyctl read/print/pipe".
|
||||||
|
|
||||||
|
|
||||||
=========
|
=========
|
||||||
MECHANISM
|
MECHANISM
|
||||||
=========
|
=========
|
||||||
|
|
20
MAINTAINERS
20
MAINTAINERS
|
@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-s5p*/
|
F: arch/arm/mach-s5p*/
|
||||||
|
|
||||||
|
ARM/SAMSUNG MOBILE MACHINE SUPPORT
|
||||||
|
M: Kyungmin Park <kyungmin.park@samsung.com>
|
||||||
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
|
S: Maintained
|
||||||
|
F: arch/arm/mach-s5pv210/mach-aquila.c
|
||||||
|
F: arch/arm/mach-s5pv210/mach-goni.c
|
||||||
|
F: arch/arm/mach-exynos4/mach-universal_c210.c
|
||||||
|
F: arch/arm/mach-exynos4/mach-nuri.c
|
||||||
|
|
||||||
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
|
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
|
||||||
M: Kyungmin Park <kyungmin.park@samsung.com>
|
M: Kyungmin Park <kyungmin.park@samsung.com>
|
||||||
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||||
|
@ -1467,6 +1476,7 @@ F: include/net/bluetooth/
|
||||||
|
|
||||||
BONDING DRIVER
|
BONDING DRIVER
|
||||||
M: Jay Vosburgh <fubar@us.ibm.com>
|
M: Jay Vosburgh <fubar@us.ibm.com>
|
||||||
|
M: Andy Gospodarek <andy@greyhouse.net>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://sourceforge.net/projects/bonding/
|
W: http://sourceforge.net/projects/bonding/
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -2033,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt
|
||||||
F: drivers/scsi/dc395x.*
|
F: drivers/scsi/dc395x.*
|
||||||
|
|
||||||
DCCP PROTOCOL
|
DCCP PROTOCOL
|
||||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
|
||||||
L: dccp@vger.kernel.org
|
L: dccp@vger.kernel.org
|
||||||
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
|
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -3519,7 +3529,7 @@ F: drivers/hwmon/jc42.c
|
||||||
F: Documentation/hwmon/jc42
|
F: Documentation/hwmon/jc42
|
||||||
|
|
||||||
JFS FILESYSTEM
|
JFS FILESYSTEM
|
||||||
M: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
|
M: Dave Kleikamp <shaggy@kernel.org>
|
||||||
L: jfs-discussion@lists.sourceforge.net
|
L: jfs-discussion@lists.sourceforge.net
|
||||||
W: http://jfs.sourceforge.net/
|
W: http://jfs.sourceforge.net/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
|
||||||
|
@ -4282,10 +4292,7 @@ S: Maintained
|
||||||
F: net/sched/sch_netem.c
|
F: net/sched/sch_netem.c
|
||||||
|
|
||||||
NETERION 10GbE DRIVERS (s2io/vxge)
|
NETERION 10GbE DRIVERS (s2io/vxge)
|
||||||
M: Ramkrishna Vepa <ramkrishna.vepa@exar.com>
|
M: Jon Mason <jdmason@kudzu.us>
|
||||||
M: Sivakumar Subramani <sivakumar.subramani@exar.com>
|
|
||||||
M: Sreenivasa Honnur <sreenivasa.honnur@exar.com>
|
|
||||||
M: Jon Mason <jon.mason@exar.com>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
|
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
|
||||||
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
|
W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
|
||||||
|
@ -5171,6 +5178,7 @@ F: drivers/char/random.c
|
||||||
|
|
||||||
RAPIDIO SUBSYSTEM
|
RAPIDIO SUBSYSTEM
|
||||||
M: Matt Porter <mporter@kernel.crashing.org>
|
M: Matt Porter <mporter@kernel.crashing.org>
|
||||||
|
M: Alexandre Bounine <alexandre.bounine@idt.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/rapidio/
|
F: drivers/rapidio/
|
||||||
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 38
|
SUBLEVEL = 38
|
||||||
EXTRAVERSION = -rc7
|
EXTRAVERSION =
|
||||||
NAME = Flesh-Eating Bats with Fangs
|
NAME = Flesh-Eating Bats with Fangs
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -11,6 +11,7 @@ config ALPHA
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select AUTO_IRQ_AFFINITY if SMP
|
select AUTO_IRQ_AFFINITY if SMP
|
||||||
|
select GENERIC_HARDIRQS_NO_DEPRECATED
|
||||||
help
|
help
|
||||||
The Alpha is a 64-bit general-purpose processor designed and
|
The Alpha is a 64-bit general-purpose processor designed and
|
||||||
marketed by the Digital Equipment Corporation of blessed memory,
|
marketed by the Digital Equipment Corporation of blessed memory,
|
||||||
|
|
|
@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
|
||||||
|
|
||||||
int irq_select_affinity(unsigned int irq)
|
int irq_select_affinity(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc[irq];
|
struct irq_data *data = irq_get_irq_data(irq);
|
||||||
|
struct irq_chip *chip;
|
||||||
static int last_cpu;
|
static int last_cpu;
|
||||||
int cpu = last_cpu + 1;
|
int cpu = last_cpu + 1;
|
||||||
|
|
||||||
if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
|
if (!data)
|
||||||
|
return 1;
|
||||||
|
chip = irq_data_get_irq_chip(data);
|
||||||
|
|
||||||
|
if (!chip->irq_set_affinity || irq_user_affinity[irq])
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
while (!cpu_possible(cpu) ||
|
while (!cpu_possible(cpu) ||
|
||||||
|
@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
|
||||||
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
||||||
last_cpu = cpu;
|
last_cpu = cpu;
|
||||||
|
|
||||||
cpumask_copy(desc->affinity, cpumask_of(cpu));
|
cpumask_copy(data->affinity, cpumask_of(cpu));
|
||||||
get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
|
chip->irq_set_affinity(data, cpumask_of(cpu), false);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
|
@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
|
||||||
void __init
|
void __init
|
||||||
init_rtc_irq(void)
|
init_rtc_irq(void)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(RTC_IRQ);
|
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
|
||||||
|
handle_simple_irq, "RTC");
|
||||||
if (desc) {
|
setup_irq(RTC_IRQ, &timer_irqaction);
|
||||||
desc->status |= IRQ_DISABLED;
|
|
||||||
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
|
|
||||||
handle_simple_irq, "RTC");
|
|
||||||
setup_irq(RTC_IRQ, &timer_irqaction);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dummy irqactions. */
|
/* Dummy irqactions. */
|
||||||
|
|
|
@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
i8259a_enable_irq(unsigned int irq)
|
i8259a_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&i8259_irq_lock);
|
spin_lock(&i8259_irq_lock);
|
||||||
i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
|
i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
|
||||||
spin_unlock(&i8259_irq_lock);
|
spin_unlock(&i8259_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
i8259a_disable_irq(unsigned int irq)
|
i8259a_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&i8259_irq_lock);
|
spin_lock(&i8259_irq_lock);
|
||||||
__i8259a_disable_irq(irq);
|
__i8259a_disable_irq(d->irq);
|
||||||
spin_unlock(&i8259_irq_lock);
|
spin_unlock(&i8259_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
i8259a_mask_and_ack_irq(unsigned int irq)
|
i8259a_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
spin_lock(&i8259_irq_lock);
|
spin_lock(&i8259_irq_lock);
|
||||||
__i8259a_disable_irq(irq);
|
__i8259a_disable_irq(irq);
|
||||||
|
|
||||||
|
@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
|
||||||
|
|
||||||
struct irq_chip i8259a_irq_type = {
|
struct irq_chip i8259a_irq_type = {
|
||||||
.name = "XT-PIC",
|
.name = "XT-PIC",
|
||||||
.unmask = i8259a_enable_irq,
|
.irq_unmask = i8259a_enable_irq,
|
||||||
.mask = i8259a_disable_irq,
|
.irq_mask = i8259a_disable_irq,
|
||||||
.mask_ack = i8259a_mask_and_ack_irq,
|
.irq_mask_ack = i8259a_mask_and_ack_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init
|
void __init
|
||||||
|
|
|
@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
|
||||||
|
|
||||||
extern void common_init_isa_dma(void);
|
extern void common_init_isa_dma(void);
|
||||||
|
|
||||||
extern void i8259a_enable_irq(unsigned int);
|
extern void i8259a_enable_irq(struct irq_data *d);
|
||||||
extern void i8259a_disable_irq(unsigned int);
|
extern void i8259a_disable_irq(struct irq_data *d);
|
||||||
extern void i8259a_mask_and_ack_irq(unsigned int);
|
extern void i8259a_mask_and_ack_irq(struct irq_data *d);
|
||||||
extern unsigned int i8259a_startup_irq(unsigned int);
|
|
||||||
extern void i8259a_end_irq(unsigned int);
|
|
||||||
extern struct irq_chip i8259a_irq_type;
|
extern struct irq_chip i8259a_irq_type;
|
||||||
extern void init_i8259a_irqs(void);
|
extern void init_i8259a_irqs(void);
|
||||||
|
|
||||||
|
|
|
@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
pyxis_enable_irq(unsigned int irq)
|
pyxis_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
|
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pyxis_disable_irq(unsigned int irq)
|
pyxis_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
|
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
pyxis_mask_and_ack_irq(unsigned int irq)
|
pyxis_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned long bit = 1UL << (irq - 16);
|
unsigned long bit = 1UL << (d->irq - 16);
|
||||||
unsigned long mask = cached_irq_mask &= ~bit;
|
unsigned long mask = cached_irq_mask &= ~bit;
|
||||||
|
|
||||||
/* Disable the interrupt. */
|
/* Disable the interrupt. */
|
||||||
|
@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip pyxis_irq_type = {
|
static struct irq_chip pyxis_irq_type = {
|
||||||
.name = "PYXIS",
|
.name = "PYXIS",
|
||||||
.mask_ack = pyxis_mask_and_ack_irq,
|
.irq_mask_ack = pyxis_mask_and_ack_irq,
|
||||||
.mask = pyxis_disable_irq,
|
.irq_mask = pyxis_disable_irq,
|
||||||
.unmask = pyxis_enable_irq,
|
.irq_unmask = pyxis_enable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
|
||||||
if ((ignore_mask >> i) & 1)
|
if ((ignore_mask >> i) & 1)
|
||||||
continue;
|
continue;
|
||||||
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_irq(16+7, &isa_cascade_irqaction);
|
setup_irq(16+7, &isa_cascade_irqaction);
|
||||||
|
|
|
@ -18,27 +18,27 @@
|
||||||
DEFINE_SPINLOCK(srm_irq_lock);
|
DEFINE_SPINLOCK(srm_irq_lock);
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
srm_enable_irq(unsigned int irq)
|
srm_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&srm_irq_lock);
|
spin_lock(&srm_irq_lock);
|
||||||
cserve_ena(irq - 16);
|
cserve_ena(d->irq - 16);
|
||||||
spin_unlock(&srm_irq_lock);
|
spin_unlock(&srm_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
srm_disable_irq(unsigned int irq)
|
srm_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&srm_irq_lock);
|
spin_lock(&srm_irq_lock);
|
||||||
cserve_dis(irq - 16);
|
cserve_dis(d->irq - 16);
|
||||||
spin_unlock(&srm_irq_lock);
|
spin_unlock(&srm_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle interrupts from the SRM, assuming no additional weirdness. */
|
/* Handle interrupts from the SRM, assuming no additional weirdness. */
|
||||||
static struct irq_chip srm_irq_type = {
|
static struct irq_chip srm_irq_type = {
|
||||||
.name = "SRM",
|
.name = "SRM",
|
||||||
.unmask = srm_enable_irq,
|
.irq_unmask = srm_enable_irq,
|
||||||
.mask = srm_disable_irq,
|
.irq_mask = srm_disable_irq,
|
||||||
.mask_ack = srm_disable_irq,
|
.irq_mask_ack = srm_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init
|
void __init
|
||||||
|
@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
|
||||||
if (i < 64 && ((ignore_mask >> i) & 1))
|
if (i < 64 && ((ignore_mask >> i) & 1))
|
||||||
continue;
|
continue;
|
||||||
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
alcor_enable_irq(unsigned int irq)
|
alcor_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
|
alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
alcor_disable_irq(unsigned int irq)
|
alcor_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
|
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
alcor_mask_and_ack_irq(unsigned int irq)
|
alcor_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
alcor_disable_irq(irq);
|
alcor_disable_irq(d);
|
||||||
|
|
||||||
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
|
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
|
||||||
*(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
|
*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
|
||||||
*(vuip)GRU_INT_CLEAR = 0; mb();
|
*(vuip)GRU_INT_CLEAR = 0; mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
alcor_isa_mask_and_ack_irq(unsigned int irq)
|
alcor_isa_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
i8259a_mask_and_ack_irq(irq);
|
i8259a_mask_and_ack_irq(d);
|
||||||
|
|
||||||
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
|
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
|
||||||
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
|
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
|
||||||
|
@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip alcor_irq_type = {
|
static struct irq_chip alcor_irq_type = {
|
||||||
.name = "ALCOR",
|
.name = "ALCOR",
|
||||||
.unmask = alcor_enable_irq,
|
.irq_unmask = alcor_enable_irq,
|
||||||
.mask = alcor_disable_irq,
|
.irq_mask = alcor_disable_irq,
|
||||||
.mask_ack = alcor_mask_and_ack_irq,
|
.irq_mask_ack = alcor_mask_and_ack_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -126,9 +126,9 @@ alcor_init_irq(void)
|
||||||
if (i >= 16+20 && i <= 16+30)
|
if (i >= 16+20 && i <= 16+30)
|
||||||
continue;
|
continue;
|
||||||
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
|
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
|
||||||
|
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
common_init_isa_dma();
|
common_init_isa_dma();
|
||||||
|
|
|
@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
cabriolet_enable_irq(unsigned int irq)
|
cabriolet_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
|
cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
cabriolet_disable_irq(unsigned int irq)
|
cabriolet_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
|
cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip cabriolet_irq_type = {
|
static struct irq_chip cabriolet_irq_type = {
|
||||||
.name = "CABRIOLET",
|
.name = "CABRIOLET",
|
||||||
.unmask = cabriolet_enable_irq,
|
.irq_unmask = cabriolet_enable_irq,
|
||||||
.mask = cabriolet_disable_irq,
|
.irq_mask = cabriolet_disable_irq,
|
||||||
.mask_ack = cabriolet_disable_irq,
|
.irq_mask_ack = cabriolet_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
|
||||||
for (i = 16; i < 35; ++i) {
|
for (i = 16; i < 35; ++i) {
|
||||||
set_irq_chip_and_handler(i, &cabriolet_irq_type,
|
set_irq_chip_and_handler(i, &cabriolet_irq_type,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
dp264_enable_irq(unsigned int irq)
|
dp264_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&dp264_irq_lock);
|
spin_lock(&dp264_irq_lock);
|
||||||
cached_irq_mask |= 1UL << irq;
|
cached_irq_mask |= 1UL << d->irq;
|
||||||
tsunami_update_irq_hw(cached_irq_mask);
|
tsunami_update_irq_hw(cached_irq_mask);
|
||||||
spin_unlock(&dp264_irq_lock);
|
spin_unlock(&dp264_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
dp264_disable_irq(unsigned int irq)
|
dp264_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&dp264_irq_lock);
|
spin_lock(&dp264_irq_lock);
|
||||||
cached_irq_mask &= ~(1UL << irq);
|
cached_irq_mask &= ~(1UL << d->irq);
|
||||||
tsunami_update_irq_hw(cached_irq_mask);
|
tsunami_update_irq_hw(cached_irq_mask);
|
||||||
spin_unlock(&dp264_irq_lock);
|
spin_unlock(&dp264_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
clipper_enable_irq(unsigned int irq)
|
clipper_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&dp264_irq_lock);
|
spin_lock(&dp264_irq_lock);
|
||||||
cached_irq_mask |= 1UL << (irq - 16);
|
cached_irq_mask |= 1UL << (d->irq - 16);
|
||||||
tsunami_update_irq_hw(cached_irq_mask);
|
tsunami_update_irq_hw(cached_irq_mask);
|
||||||
spin_unlock(&dp264_irq_lock);
|
spin_unlock(&dp264_irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
clipper_disable_irq(unsigned int irq)
|
clipper_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
spin_lock(&dp264_irq_lock);
|
spin_lock(&dp264_irq_lock);
|
||||||
cached_irq_mask &= ~(1UL << (irq - 16));
|
cached_irq_mask &= ~(1UL << (d->irq - 16));
|
||||||
tsunami_update_irq_hw(cached_irq_mask);
|
tsunami_update_irq_hw(cached_irq_mask);
|
||||||
spin_unlock(&dp264_irq_lock);
|
spin_unlock(&dp264_irq_lock);
|
||||||
}
|
}
|
||||||
|
@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
|
||||||
{
|
bool force)
|
||||||
|
{
|
||||||
spin_lock(&dp264_irq_lock);
|
spin_lock(&dp264_irq_lock);
|
||||||
cpu_set_irq_affinity(irq, *affinity);
|
cpu_set_irq_affinity(d->irq, *affinity);
|
||||||
tsunami_update_irq_hw(cached_irq_mask);
|
tsunami_update_irq_hw(cached_irq_mask);
|
||||||
spin_unlock(&dp264_irq_lock);
|
spin_unlock(&dp264_irq_lock);
|
||||||
|
|
||||||
|
@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
|
||||||
{
|
bool force)
|
||||||
|
{
|
||||||
spin_lock(&dp264_irq_lock);
|
spin_lock(&dp264_irq_lock);
|
||||||
cpu_set_irq_affinity(irq - 16, *affinity);
|
cpu_set_irq_affinity(d->irq - 16, *affinity);
|
||||||
tsunami_update_irq_hw(cached_irq_mask);
|
tsunami_update_irq_hw(cached_irq_mask);
|
||||||
spin_unlock(&dp264_irq_lock);
|
spin_unlock(&dp264_irq_lock);
|
||||||
|
|
||||||
|
@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip dp264_irq_type = {
|
static struct irq_chip dp264_irq_type = {
|
||||||
.name = "DP264",
|
.name = "DP264",
|
||||||
.unmask = dp264_enable_irq,
|
.irq_unmask = dp264_enable_irq,
|
||||||
.mask = dp264_disable_irq,
|
.irq_mask = dp264_disable_irq,
|
||||||
.mask_ack = dp264_disable_irq,
|
.irq_mask_ack = dp264_disable_irq,
|
||||||
.set_affinity = dp264_set_affinity,
|
.irq_set_affinity = dp264_set_affinity,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irq_chip clipper_irq_type = {
|
static struct irq_chip clipper_irq_type = {
|
||||||
.name = "CLIPPER",
|
.name = "CLIPPER",
|
||||||
.unmask = clipper_enable_irq,
|
.irq_unmask = clipper_enable_irq,
|
||||||
.mask = clipper_disable_irq,
|
.irq_mask = clipper_disable_irq,
|
||||||
.mask_ack = clipper_disable_irq,
|
.irq_mask_ack = clipper_disable_irq,
|
||||||
.set_affinity = clipper_set_affinity,
|
.irq_set_affinity = clipper_set_affinity,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
|
||||||
{
|
{
|
||||||
long i;
|
long i;
|
||||||
for (i = imin; i <= imax; ++i) {
|
for (i = imin; i <= imax; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, ops, handle_level_irq);
|
set_irq_chip_and_handler(i, ops, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
eb64p_enable_irq(unsigned int irq)
|
eb64p_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
|
eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
eb64p_disable_irq(unsigned int irq)
|
eb64p_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
|
eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip eb64p_irq_type = {
|
static struct irq_chip eb64p_irq_type = {
|
||||||
.name = "EB64P",
|
.name = "EB64P",
|
||||||
.unmask = eb64p_enable_irq,
|
.irq_unmask = eb64p_enable_irq,
|
||||||
.mask = eb64p_disable_irq,
|
.irq_mask = eb64p_disable_irq,
|
||||||
.mask_ack = eb64p_disable_irq,
|
.irq_mask_ack = eb64p_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -118,9 +118,9 @@ eb64p_init_irq(void)
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
|
|
||||||
for (i = 16; i < 32; ++i) {
|
for (i = 16; i < 32; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
|
||||||
}
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
|
}
|
||||||
|
|
||||||
common_init_isa_dma();
|
common_init_isa_dma();
|
||||||
setup_irq(16+5, &isa_cascade_irqaction);
|
setup_irq(16+5, &isa_cascade_irqaction);
|
||||||
|
|
|
@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
eiger_enable_irq(unsigned int irq)
|
eiger_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
|
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
|
||||||
eiger_update_irq_hw(irq, mask);
|
eiger_update_irq_hw(irq, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
eiger_disable_irq(unsigned int irq)
|
eiger_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
|
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
|
||||||
eiger_update_irq_hw(irq, mask);
|
eiger_update_irq_hw(irq, mask);
|
||||||
|
@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip eiger_irq_type = {
|
static struct irq_chip eiger_irq_type = {
|
||||||
.name = "EIGER",
|
.name = "EIGER",
|
||||||
.unmask = eiger_enable_irq,
|
.irq_unmask = eiger_enable_irq,
|
||||||
.mask = eiger_disable_irq,
|
.irq_mask = eiger_disable_irq,
|
||||||
.mask_ack = eiger_disable_irq,
|
.irq_mask_ack = eiger_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -136,8 +138,8 @@ eiger_init_irq(void)
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
|
|
||||||
for (i = 16; i < 128; ++i) {
|
for (i = 16; i < 128; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,34 +63,34 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void
|
static void
|
||||||
jensen_local_enable(unsigned int irq)
|
jensen_local_enable(struct irq_data *d)
|
||||||
{
|
{
|
||||||
/* the parport is really hw IRQ 1, silly Jensen. */
|
/* the parport is really hw IRQ 1, silly Jensen. */
|
||||||
if (irq == 7)
|
if (d->irq == 7)
|
||||||
i8259a_enable_irq(1);
|
i8259a_enable_irq(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
jensen_local_disable(unsigned int irq)
|
jensen_local_disable(struct irq_data *d)
|
||||||
{
|
{
|
||||||
/* the parport is really hw IRQ 1, silly Jensen. */
|
/* the parport is really hw IRQ 1, silly Jensen. */
|
||||||
if (irq == 7)
|
if (d->irq == 7)
|
||||||
i8259a_disable_irq(1);
|
i8259a_disable_irq(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
jensen_local_mask_ack(unsigned int irq)
|
jensen_local_mask_ack(struct irq_data *d)
|
||||||
{
|
{
|
||||||
/* the parport is really hw IRQ 1, silly Jensen. */
|
/* the parport is really hw IRQ 1, silly Jensen. */
|
||||||
if (irq == 7)
|
if (d->irq == 7)
|
||||||
i8259a_mask_and_ack_irq(1);
|
i8259a_mask_and_ack_irq(d);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip jensen_local_irq_type = {
|
static struct irq_chip jensen_local_irq_type = {
|
||||||
.name = "LOCAL",
|
.name = "LOCAL",
|
||||||
.unmask = jensen_local_enable,
|
.irq_unmask = jensen_local_enable,
|
||||||
.mask = jensen_local_disable,
|
.irq_mask = jensen_local_disable,
|
||||||
.mask_ack = jensen_local_mask_ack,
|
.irq_mask_ack = jensen_local_mask_ack,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
io7_enable_irq(unsigned int irq)
|
io7_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
volatile unsigned long *ctl;
|
volatile unsigned long *ctl;
|
||||||
|
unsigned int irq = d->irq;
|
||||||
struct io7 *io7;
|
struct io7 *io7;
|
||||||
|
|
||||||
ctl = io7_get_irq_ctl(irq, &io7);
|
ctl = io7_get_irq_ctl(irq, &io7);
|
||||||
|
@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
|
||||||
__func__, irq);
|
__func__, irq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&io7->irq_lock);
|
spin_lock(&io7->irq_lock);
|
||||||
*ctl |= 1UL << 24;
|
*ctl |= 1UL << 24;
|
||||||
mb();
|
mb();
|
||||||
|
@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
io7_disable_irq(unsigned int irq)
|
io7_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
volatile unsigned long *ctl;
|
volatile unsigned long *ctl;
|
||||||
|
unsigned int irq = d->irq;
|
||||||
struct io7 *io7;
|
struct io7 *io7;
|
||||||
|
|
||||||
ctl = io7_get_irq_ctl(irq, &io7);
|
ctl = io7_get_irq_ctl(irq, &io7);
|
||||||
|
@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
|
||||||
__func__, irq);
|
__func__, irq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&io7->irq_lock);
|
spin_lock(&io7->irq_lock);
|
||||||
*ctl &= ~(1UL << 24);
|
*ctl &= ~(1UL << 24);
|
||||||
mb();
|
mb();
|
||||||
|
@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
marvel_irq_noop(unsigned int irq)
|
marvel_irq_noop(struct irq_data *d)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int
|
|
||||||
marvel_irq_noop_return(unsigned int irq)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip marvel_legacy_irq_type = {
|
static struct irq_chip marvel_legacy_irq_type = {
|
||||||
.name = "LEGACY",
|
.name = "LEGACY",
|
||||||
.mask = marvel_irq_noop,
|
.irq_mask = marvel_irq_noop,
|
||||||
.unmask = marvel_irq_noop,
|
.irq_unmask = marvel_irq_noop,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irq_chip io7_lsi_irq_type = {
|
static struct irq_chip io7_lsi_irq_type = {
|
||||||
.name = "LSI",
|
.name = "LSI",
|
||||||
.unmask = io7_enable_irq,
|
.irq_unmask = io7_enable_irq,
|
||||||
.mask = io7_disable_irq,
|
.irq_mask = io7_disable_irq,
|
||||||
.mask_ack = io7_disable_irq,
|
.irq_mask_ack = io7_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct irq_chip io7_msi_irq_type = {
|
static struct irq_chip io7_msi_irq_type = {
|
||||||
.name = "MSI",
|
.name = "MSI",
|
||||||
.unmask = io7_enable_irq,
|
.irq_unmask = io7_enable_irq,
|
||||||
.mask = io7_disable_irq,
|
.irq_mask = io7_disable_irq,
|
||||||
.ack = marvel_irq_noop,
|
.irq_ack = marvel_irq_noop,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
|
||||||
|
|
||||||
/* Set up the lsi irqs. */
|
/* Set up the lsi irqs. */
|
||||||
for (i = 0; i < 128; ++i) {
|
for (i = 0; i < 128; ++i) {
|
||||||
irq_to_desc(base + i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
|
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Disable the implemented irqs in hardware. */
|
/* Disable the implemented irqs in hardware. */
|
||||||
|
@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
|
||||||
|
|
||||||
/* Set up the msi irqs. */
|
/* Set up the msi irqs. */
|
||||||
for (i = 128; i < (128 + 512); ++i) {
|
for (i = 128; i < (128 + 512); ++i) {
|
||||||
irq_to_desc(base + i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
|
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 16; ++i)
|
for (i = 0; i < 16; ++i)
|
||||||
|
|
|
@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
mikasa_enable_irq(unsigned int irq)
|
mikasa_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
|
mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mikasa_disable_irq(unsigned int irq)
|
mikasa_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
|
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip mikasa_irq_type = {
|
static struct irq_chip mikasa_irq_type = {
|
||||||
.name = "MIKASA",
|
.name = "MIKASA",
|
||||||
.unmask = mikasa_enable_irq,
|
.irq_unmask = mikasa_enable_irq,
|
||||||
.mask = mikasa_disable_irq,
|
.irq_mask = mikasa_disable_irq,
|
||||||
.mask_ack = mikasa_disable_irq,
|
.irq_mask_ack = mikasa_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -98,8 +98,8 @@ mikasa_init_irq(void)
|
||||||
mikasa_update_irq_hw(0);
|
mikasa_update_irq_hw(0);
|
||||||
|
|
||||||
for (i = 16; i < 32; ++i) {
|
for (i = 16; i < 32; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
|
|
|
@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
noritake_enable_irq(unsigned int irq)
|
noritake_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
|
noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
noritake_disable_irq(unsigned int irq)
|
noritake_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
|
noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip noritake_irq_type = {
|
static struct irq_chip noritake_irq_type = {
|
||||||
.name = "NORITAKE",
|
.name = "NORITAKE",
|
||||||
.unmask = noritake_enable_irq,
|
.irq_unmask = noritake_enable_irq,
|
||||||
.mask = noritake_disable_irq,
|
.irq_mask = noritake_disable_irq,
|
||||||
.mask_ack = noritake_disable_irq,
|
.irq_mask_ack = noritake_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -127,8 +127,8 @@ noritake_init_irq(void)
|
||||||
outw(0, 0x54c);
|
outw(0, 0x54c);
|
||||||
|
|
||||||
for (i = 16; i < 48; ++i) {
|
for (i = 16; i < 48; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
|
|
|
@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
|
||||||
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
|
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
rawhide_enable_irq(unsigned int irq)
|
rawhide_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned int mask, hose;
|
unsigned int mask, hose;
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
irq -= 16;
|
irq -= 16;
|
||||||
hose = irq / 24;
|
hose = irq / 24;
|
||||||
|
@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rawhide_disable_irq(unsigned int irq)
|
rawhide_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned int mask, hose;
|
unsigned int mask, hose;
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
irq -= 16;
|
irq -= 16;
|
||||||
hose = irq / 24;
|
hose = irq / 24;
|
||||||
|
@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rawhide_mask_and_ack_irq(unsigned int irq)
|
rawhide_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned int mask, mask1, hose;
|
unsigned int mask, mask1, hose;
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
irq -= 16;
|
irq -= 16;
|
||||||
hose = irq / 24;
|
hose = irq / 24;
|
||||||
|
@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip rawhide_irq_type = {
|
static struct irq_chip rawhide_irq_type = {
|
||||||
.name = "RAWHIDE",
|
.name = "RAWHIDE",
|
||||||
.unmask = rawhide_enable_irq,
|
.irq_unmask = rawhide_enable_irq,
|
||||||
.mask = rawhide_disable_irq,
|
.irq_mask = rawhide_disable_irq,
|
||||||
.mask_ack = rawhide_mask_and_ack_irq,
|
.irq_mask_ack = rawhide_mask_and_ack_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -177,8 +180,8 @@ rawhide_init_irq(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 16; i < 128; ++i) {
|
for (i = 16; i < 128; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
|
|
|
@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
rx164_enable_irq(unsigned int irq)
|
rx164_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
|
rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rx164_disable_irq(unsigned int irq)
|
rx164_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
|
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip rx164_irq_type = {
|
static struct irq_chip rx164_irq_type = {
|
||||||
.name = "RX164",
|
.name = "RX164",
|
||||||
.unmask = rx164_enable_irq,
|
.irq_unmask = rx164_enable_irq,
|
||||||
.mask = rx164_disable_irq,
|
.irq_mask = rx164_disable_irq,
|
||||||
.mask_ack = rx164_disable_irq,
|
.irq_mask_ack = rx164_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -99,8 +99,8 @@ rx164_init_irq(void)
|
||||||
|
|
||||||
rx164_update_irq_hw(0);
|
rx164_update_irq_hw(0);
|
||||||
for (i = 16; i < 40; ++i) {
|
for (i = 16; i < 40; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
init_i8259a_irqs();
|
init_i8259a_irqs();
|
||||||
|
|
|
@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
|
||||||
/* GENERIC irq routines */
|
/* GENERIC irq routines */
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
sable_lynx_enable_irq(unsigned int irq)
|
sable_lynx_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned long bit, mask;
|
unsigned long bit, mask;
|
||||||
|
|
||||||
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
|
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
|
||||||
spin_lock(&sable_lynx_irq_lock);
|
spin_lock(&sable_lynx_irq_lock);
|
||||||
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
|
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
|
||||||
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
||||||
|
@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
sable_lynx_disable_irq(unsigned int irq)
|
sable_lynx_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned long bit, mask;
|
unsigned long bit, mask;
|
||||||
|
|
||||||
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
|
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
|
||||||
spin_lock(&sable_lynx_irq_lock);
|
spin_lock(&sable_lynx_irq_lock);
|
||||||
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
|
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
|
||||||
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
||||||
|
@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
sable_lynx_mask_and_ack_irq(unsigned int irq)
|
sable_lynx_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
unsigned long bit, mask;
|
unsigned long bit, mask;
|
||||||
|
|
||||||
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
|
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
|
||||||
spin_lock(&sable_lynx_irq_lock);
|
spin_lock(&sable_lynx_irq_lock);
|
||||||
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
|
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
|
||||||
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
|
||||||
|
@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip sable_lynx_irq_type = {
|
static struct irq_chip sable_lynx_irq_type = {
|
||||||
.name = "SABLE/LYNX",
|
.name = "SABLE/LYNX",
|
||||||
.unmask = sable_lynx_enable_irq,
|
.irq_unmask = sable_lynx_enable_irq,
|
||||||
.mask = sable_lynx_disable_irq,
|
.irq_mask = sable_lynx_disable_irq,
|
||||||
.mask_ack = sable_lynx_mask_and_ack_irq,
|
.irq_mask_ack = sable_lynx_mask_and_ack_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
|
||||||
long i;
|
long i;
|
||||||
|
|
||||||
for (i = 0; i < nr_of_irqs; ++i) {
|
for (i = 0; i < nr_of_irqs; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &sable_lynx_irq_type,
|
set_irq_chip_and_handler(i, &sable_lynx_irq_type,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
common_init_isa_dma();
|
common_init_isa_dma();
|
||||||
|
|
|
@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
takara_enable_irq(unsigned int irq)
|
takara_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
|
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
|
||||||
takara_update_irq_hw(irq, mask);
|
takara_update_irq_hw(irq, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
takara_disable_irq(unsigned int irq)
|
takara_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
|
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
|
||||||
takara_update_irq_hw(irq, mask);
|
takara_update_irq_hw(irq, mask);
|
||||||
|
@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip takara_irq_type = {
|
static struct irq_chip takara_irq_type = {
|
||||||
.name = "TAKARA",
|
.name = "TAKARA",
|
||||||
.unmask = takara_enable_irq,
|
.irq_unmask = takara_enable_irq,
|
||||||
.mask = takara_disable_irq,
|
.irq_mask = takara_disable_irq,
|
||||||
.mask_ack = takara_disable_irq,
|
.irq_mask_ack = takara_disable_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -136,8 +138,8 @@ takara_init_irq(void)
|
||||||
takara_update_irq_hw(i, -1);
|
takara_update_irq_hw(i, -1);
|
||||||
|
|
||||||
for (i = 16; i < 128; ++i) {
|
for (i = 16; i < 128; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
|
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
common_init_isa_dma();
|
common_init_isa_dma();
|
||||||
|
|
|
@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
titan_enable_irq(unsigned int irq)
|
titan_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
spin_lock(&titan_irq_lock);
|
spin_lock(&titan_irq_lock);
|
||||||
titan_cached_irq_mask |= 1UL << (irq - 16);
|
titan_cached_irq_mask |= 1UL << (irq - 16);
|
||||||
titan_update_irq_hw(titan_cached_irq_mask);
|
titan_update_irq_hw(titan_cached_irq_mask);
|
||||||
|
@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
titan_disable_irq(unsigned int irq)
|
titan_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
spin_lock(&titan_irq_lock);
|
spin_lock(&titan_irq_lock);
|
||||||
titan_cached_irq_mask &= ~(1UL << (irq - 16));
|
titan_cached_irq_mask &= ~(1UL << (irq - 16));
|
||||||
titan_update_irq_hw(titan_cached_irq_mask);
|
titan_update_irq_hw(titan_cached_irq_mask);
|
||||||
|
@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
|
||||||
|
bool force)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
spin_lock(&titan_irq_lock);
|
spin_lock(&titan_irq_lock);
|
||||||
titan_cpu_set_irq_affinity(irq - 16, *affinity);
|
titan_cpu_set_irq_affinity(irq - 16, *affinity);
|
||||||
titan_update_irq_hw(titan_cached_irq_mask);
|
titan_update_irq_hw(titan_cached_irq_mask);
|
||||||
|
@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
|
||||||
{
|
{
|
||||||
long i;
|
long i;
|
||||||
for (i = imin; i <= imax; ++i) {
|
for (i = imin; i <= imax; ++i) {
|
||||||
irq_to_desc(i)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i, ops, handle_level_irq);
|
set_irq_chip_and_handler(i, ops, handle_level_irq);
|
||||||
|
irq_set_status_flags(i, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irq_chip titan_irq_type = {
|
static struct irq_chip titan_irq_type = {
|
||||||
.name = "TITAN",
|
.name = "TITAN",
|
||||||
.unmask = titan_enable_irq,
|
.irq_unmask = titan_enable_irq,
|
||||||
.mask = titan_disable_irq,
|
.irq_mask = titan_disable_irq,
|
||||||
.mask_ack = titan_disable_irq,
|
.irq_mask_ack = titan_disable_irq,
|
||||||
.set_affinity = titan_set_irq_affinity,
|
.irq_set_affinity = titan_set_irq_affinity,
|
||||||
};
|
};
|
||||||
|
|
||||||
static irqreturn_t
|
static irqreturn_t
|
||||||
|
|
|
@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
wildfire_enable_irq(unsigned int irq)
|
wildfire_enable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
if (irq < 16)
|
if (irq < 16)
|
||||||
i8259a_enable_irq(irq);
|
i8259a_enable_irq(d);
|
||||||
|
|
||||||
spin_lock(&wildfire_irq_lock);
|
spin_lock(&wildfire_irq_lock);
|
||||||
set_bit(irq, &cached_irq_mask);
|
set_bit(irq, &cached_irq_mask);
|
||||||
|
@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
wildfire_disable_irq(unsigned int irq)
|
wildfire_disable_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
if (irq < 16)
|
if (irq < 16)
|
||||||
i8259a_disable_irq(irq);
|
i8259a_disable_irq(d);
|
||||||
|
|
||||||
spin_lock(&wildfire_irq_lock);
|
spin_lock(&wildfire_irq_lock);
|
||||||
clear_bit(irq, &cached_irq_mask);
|
clear_bit(irq, &cached_irq_mask);
|
||||||
|
@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
wildfire_mask_and_ack_irq(unsigned int irq)
|
wildfire_mask_and_ack_irq(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
unsigned int irq = d->irq;
|
||||||
|
|
||||||
if (irq < 16)
|
if (irq < 16)
|
||||||
i8259a_mask_and_ack_irq(irq);
|
i8259a_mask_and_ack_irq(d);
|
||||||
|
|
||||||
spin_lock(&wildfire_irq_lock);
|
spin_lock(&wildfire_irq_lock);
|
||||||
clear_bit(irq, &cached_irq_mask);
|
clear_bit(irq, &cached_irq_mask);
|
||||||
|
@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_chip wildfire_irq_type = {
|
static struct irq_chip wildfire_irq_type = {
|
||||||
.name = "WILDFIRE",
|
.name = "WILDFIRE",
|
||||||
.unmask = wildfire_enable_irq,
|
.irq_unmask = wildfire_enable_irq,
|
||||||
.mask = wildfire_disable_irq,
|
.irq_mask = wildfire_disable_irq,
|
||||||
.mask_ack = wildfire_mask_and_ack_irq,
|
.irq_mask_ack = wildfire_mask_and_ack_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init
|
static void __init
|
||||||
|
@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < 16; ++i) {
|
||||||
if (i == 2)
|
if (i == 2)
|
||||||
continue;
|
continue;
|
||||||
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
|
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
|
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
|
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
|
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
|
||||||
for (i = 40; i < 64; ++i) {
|
for (i = 40; i < 64; ++i) {
|
||||||
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
|
|
||||||
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
|
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
|
||||||
handle_level_irq);
|
handle_level_irq);
|
||||||
|
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_irq(32+irq_bias, &isa_enable);
|
setup_irq(32+irq_bias, &isa_enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init
|
static void __init
|
||||||
|
|
|
@ -6,6 +6,8 @@ config ARM_VIC
|
||||||
|
|
||||||
config ARM_VIC_NR
|
config ARM_VIC_NR
|
||||||
int
|
int
|
||||||
|
default 4 if ARCH_S5PV210
|
||||||
|
default 3 if ARCH_S5P6442 || ARCH_S5PC100
|
||||||
default 2
|
default 2
|
||||||
depends on ARM_VIC
|
depends on ARM_VIC
|
||||||
help
|
help
|
||||||
|
|
|
@ -15,10 +15,6 @@ struct meminfo;
|
||||||
struct sys_timer;
|
struct sys_timer;
|
||||||
|
|
||||||
struct machine_desc {
|
struct machine_desc {
|
||||||
/*
|
|
||||||
* Note! The first two elements are used
|
|
||||||
* by assembler code in head.S, head-common.S
|
|
||||||
*/
|
|
||||||
unsigned int nr; /* architecture number */
|
unsigned int nr; /* architecture number */
|
||||||
const char *name; /* architecture name */
|
const char *name; /* architecture name */
|
||||||
unsigned long boot_params; /* tagged list */
|
unsigned long boot_params; /* tagged list */
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
#ifndef _ASMARM_PGALLOC_H
|
#ifndef _ASMARM_PGALLOC_H
|
||||||
#define _ASMARM_PGALLOC_H
|
#define _ASMARM_PGALLOC_H
|
||||||
|
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
|
||||||
#include <asm/domain.h>
|
#include <asm/domain.h>
|
||||||
#include <asm/pgtable-hwdef.h>
|
#include <asm/pgtable-hwdef.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
|
@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||||
/*
|
/*
|
||||||
* One-time initialisation.
|
* One-time initialisation.
|
||||||
*/
|
*/
|
||||||
static void reset_ctrl_regs(void *unused)
|
static void reset_ctrl_regs(void *info)
|
||||||
{
|
{
|
||||||
int i;
|
int i, cpu = smp_processor_id();
|
||||||
|
u32 dbg_power;
|
||||||
|
cpumask_t *cpumask = info;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* v7 debug contains save and restore registers so that debug state
|
* v7 debug contains save and restore registers so that debug state
|
||||||
|
@ -849,6 +851,17 @@ static void reset_ctrl_regs(void *unused)
|
||||||
* later on.
|
* later on.
|
||||||
*/
|
*/
|
||||||
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
|
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
|
||||||
|
/*
|
||||||
|
* Ensure sticky power-down is clear (i.e. debug logic is
|
||||||
|
* powered up).
|
||||||
|
*/
|
||||||
|
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
|
||||||
|
if ((dbg_power & 0x1) == 0) {
|
||||||
|
pr_warning("CPU %d debug is powered down!\n", cpu);
|
||||||
|
cpumask_or(cpumask, cpumask, cpumask_of(cpu));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unconditionally clear the lock by writing a value
|
* Unconditionally clear the lock by writing a value
|
||||||
* other than 0xC5ACCE55 to the access register.
|
* other than 0xC5ACCE55 to the access register.
|
||||||
|
@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
|
||||||
static int __init arch_hw_breakpoint_init(void)
|
static int __init arch_hw_breakpoint_init(void)
|
||||||
{
|
{
|
||||||
u32 dscr;
|
u32 dscr;
|
||||||
|
cpumask_t cpumask = { CPU_BITS_NONE };
|
||||||
|
|
||||||
debug_arch = get_debug_arch();
|
debug_arch = get_debug_arch();
|
||||||
|
|
||||||
|
@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
|
||||||
* Reset the breakpoint resources. We assume that a halting
|
* Reset the breakpoint resources. We assume that a halting
|
||||||
* debugger will leave the world in a nice state for us.
|
* debugger will leave the world in a nice state for us.
|
||||||
*/
|
*/
|
||||||
on_each_cpu(reset_ctrl_regs, NULL, 1);
|
on_each_cpu(reset_ctrl_regs, &cpumask, 1);
|
||||||
|
if (!cpumask_empty(&cpumask)) {
|
||||||
|
core_num_brps = 0;
|
||||||
|
core_num_reserved_brps = 0;
|
||||||
|
core_num_wrps = 0;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
ARM_DBG_READ(c1, 0, dscr);
|
ARM_DBG_READ(c1, 0, dscr);
|
||||||
if (dscr & ARM_DSCR_HDBGEN) {
|
if (dscr & ARM_DSCR_HDBGEN) {
|
||||||
|
|
|
@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
|
||||||
while (!(arch_ctrl.len & 0x1))
|
while (!(arch_ctrl.len & 0x1))
|
||||||
arch_ctrl.len >>= 1;
|
arch_ctrl.len >>= 1;
|
||||||
|
|
||||||
if (idx & 0x1)
|
if (num & 0x1)
|
||||||
reg = encode_ctrl_reg(arch_ctrl);
|
|
||||||
else
|
|
||||||
reg = bp->attr.bp_addr;
|
reg = bp->attr.bp_addr;
|
||||||
|
else
|
||||||
|
reg = encode_ctrl_reg(arch_ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
put:
|
put:
|
||||||
|
|
|
@ -132,7 +132,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init davinci_cpu_init(struct cpufreq_policy *policy)
|
static int davinci_cpu_init(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
int result = 0;
|
int result = 0;
|
||||||
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
|
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
|
||||||
|
|
|
@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
|
||||||
.resource = da850_mcasp_resources,
|
.resource = da850_mcasp_resources,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct platform_device davinci_pcm_device = {
|
||||||
|
.name = "davinci-pcm-audio",
|
||||||
|
.id = -1,
|
||||||
|
};
|
||||||
|
|
||||||
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
|
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
|
||||||
{
|
{
|
||||||
|
platform_device_register(&davinci_pcm_device);
|
||||||
|
|
||||||
/* DA830/OMAP-L137 has 3 instances of McASP */
|
/* DA830/OMAP-L137 has 3 instances of McASP */
|
||||||
if (cpu_is_davinci_da830() && id == 1) {
|
if (cpu_is_davinci_da830() && id == 1) {
|
||||||
da830_mcasp1_device.dev.platform_data = pdata;
|
da830_mcasp1_device.dev.platform_data = pdata;
|
||||||
|
|
|
@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
|
||||||
|
|
||||||
spin_lock_irqsave(&ctlr->lock, flags);
|
spin_lock_irqsave(&ctlr->lock, flags);
|
||||||
|
|
||||||
gpio_reg_set_bit(®s->enable, gpio);
|
gpio_reg_set_bit(regs->enable, gpio);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ctlr->lock, flags);
|
spin_unlock_irqrestore(&ctlr->lock, flags);
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
|
||||||
|
|
||||||
spin_lock_irqsave(&ctlr->lock, flags);
|
spin_lock_irqsave(&ctlr->lock, flags);
|
||||||
|
|
||||||
gpio_reg_clear_bit(®s->enable, gpio);
|
gpio_reg_clear_bit(regs->enable, gpio);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ctlr->lock, flags);
|
spin_unlock_irqrestore(&ctlr->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
|
||||||
|
|
||||||
spin_lock_irqsave(&ctlr->lock, flags);
|
spin_lock_irqsave(&ctlr->lock, flags);
|
||||||
|
|
||||||
gpio_reg_set_bit(®s->direction, gpio);
|
gpio_reg_set_bit(regs->direction, gpio);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ctlr->lock, flags);
|
spin_unlock_irqrestore(&ctlr->lock, flags);
|
||||||
|
|
||||||
|
@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
|
||||||
spin_lock_irqsave(&ctlr->lock, flags);
|
spin_lock_irqsave(&ctlr->lock, flags);
|
||||||
|
|
||||||
if (value)
|
if (value)
|
||||||
gpio_reg_set_bit(®s->data_out, gpio);
|
gpio_reg_set_bit(regs->data_out, gpio);
|
||||||
else
|
else
|
||||||
gpio_reg_clear_bit(®s->data_out, gpio);
|
gpio_reg_clear_bit(regs->data_out, gpio);
|
||||||
|
|
||||||
gpio_reg_clear_bit(®s->direction, gpio);
|
gpio_reg_clear_bit(regs->direction, gpio);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ctlr->lock, flags);
|
spin_unlock_irqrestore(&ctlr->lock, flags);
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
|
||||||
unsigned gpio = chip->base + offset;
|
unsigned gpio = chip->base + offset;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = gpio_reg_get_bit(®s->data_in, gpio);
|
ret = gpio_reg_get_bit(regs->data_in, gpio);
|
||||||
|
|
||||||
return ret ? 1 : 0;
|
return ret ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
|
||||||
spin_lock_irqsave(&ctlr->lock, flags);
|
spin_lock_irqsave(&ctlr->lock, flags);
|
||||||
|
|
||||||
if (value)
|
if (value)
|
||||||
gpio_reg_set_bit(®s->data_out, gpio);
|
gpio_reg_set_bit(regs->data_out, gpio);
|
||||||
else
|
else
|
||||||
gpio_reg_clear_bit(®s->data_out, gpio);
|
gpio_reg_clear_bit(regs->data_out, gpio);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ctlr->lock, flags);
|
spin_unlock_irqrestore(&ctlr->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
#ifndef __MACH_CLKDEV_H
|
#ifndef __MACH_CLKDEV_H
|
||||||
#define __MACH_CLKDEV_H
|
#define __MACH_CLKDEV_H
|
||||||
|
|
||||||
|
struct clk;
|
||||||
|
|
||||||
static inline int __clk_get(struct clk *clk)
|
static inline int __clk_get(struct clk *clk)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
|
||||||
omap_mbox_type_t irq)
|
omap_mbox_type_t irq)
|
||||||
{
|
{
|
||||||
struct omap_mbox2_priv *p = mbox->priv;
|
struct omap_mbox2_priv *p = mbox->priv;
|
||||||
u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
|
u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
|
||||||
l = mbox_read_reg(p->irqdisable);
|
|
||||||
l &= ~bit;
|
if (!cpu_is_omap44xx())
|
||||||
mbox_write_reg(l, p->irqdisable);
|
bit = mbox_read_reg(p->irqdisable) & ~bit;
|
||||||
|
|
||||||
|
mbox_write_reg(bit, p->irqdisable);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
|
static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
|
||||||
|
|
|
@ -282,6 +282,7 @@ error:
|
||||||
dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
|
dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
|
||||||
"interrupt handler. Smartreflex will"
|
"interrupt handler. Smartreflex will"
|
||||||
"not function as desired\n", __func__);
|
"not function as desired\n", __func__);
|
||||||
|
kfree(name);
|
||||||
kfree(sr_info);
|
kfree(sr_info);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
||||||
ret = sr_late_init(sr_info);
|
ret = sr_late_init(sr_info);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_warning("%s: Error in SR late init\n", __func__);
|
pr_warning("%s: Error in SR late init\n", __func__);
|
||||||
return ret;
|
goto err_release_region;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -890,14 +891,17 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
||||||
* not try to create rest of the debugfs entries.
|
* not try to create rest of the debugfs entries.
|
||||||
*/
|
*/
|
||||||
vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
|
vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
|
||||||
if (!vdd_dbg_dir)
|
if (!vdd_dbg_dir) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto err_release_region;
|
||||||
|
}
|
||||||
|
|
||||||
dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
|
dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
|
||||||
if (IS_ERR(dbg_dir)) {
|
if (IS_ERR(dbg_dir)) {
|
||||||
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
|
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
|
||||||
__func__);
|
__func__);
|
||||||
return PTR_ERR(dbg_dir);
|
ret = PTR_ERR(dbg_dir);
|
||||||
|
goto err_release_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
|
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
|
||||||
|
@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
||||||
if (IS_ERR(nvalue_dir)) {
|
if (IS_ERR(nvalue_dir)) {
|
||||||
dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
|
dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
|
||||||
"for n-values\n", __func__);
|
"for n-values\n", __func__);
|
||||||
return PTR_ERR(nvalue_dir);
|
ret = PTR_ERR(nvalue_dir);
|
||||||
|
goto err_release_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
|
omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
|
||||||
|
@ -922,23 +927,15 @@ static int __init omap_sr_probe(struct platform_device *pdev)
|
||||||
" corresponding vdd vdd_%s. Cannot create debugfs"
|
" corresponding vdd vdd_%s. Cannot create debugfs"
|
||||||
"entries for n-values\n",
|
"entries for n-values\n",
|
||||||
__func__, sr_info->voltdm->name);
|
__func__, sr_info->voltdm->name);
|
||||||
return -ENODATA;
|
ret = -ENODATA;
|
||||||
|
goto err_release_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < sr_info->nvalue_count; i++) {
|
for (i = 0; i < sr_info->nvalue_count; i++) {
|
||||||
char *name;
|
char name[NVALUE_NAME_LEN + 1];
|
||||||
char volt_name[32];
|
|
||||||
|
|
||||||
name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
|
snprintf(name, sizeof(name), "volt_%d",
|
||||||
if (!name) {
|
volt_data[i].volt_nominal);
|
||||||
dev_err(&pdev->dev, "%s: Unable to allocate memory"
|
|
||||||
" for n-value directory name\n", __func__);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
strcpy(name, "volt_");
|
|
||||||
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
|
|
||||||
strcat(name, volt_name);
|
|
||||||
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
|
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
|
||||||
&(sr_info->nvalue_table[i].nvalue));
|
&(sr_info->nvalue_table[i].nvalue));
|
||||||
}
|
}
|
||||||
|
|
|
@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = {
|
||||||
&pxa25x_device_assp,
|
&pxa25x_device_assp,
|
||||||
&pxa25x_device_pwm0,
|
&pxa25x_device_pwm0,
|
||||||
&pxa25x_device_pwm1,
|
&pxa25x_device_pwm1,
|
||||||
|
&pxa_device_asoc_platform,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct sys_device pxa25x_sysdev[] = {
|
static struct sys_device pxa25x_sysdev[] = {
|
||||||
|
|
|
@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev)
|
||||||
goto err_rfk_alloc;
|
goto err_rfk_alloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
rfkill_set_led_trigger_name(rfk, "tosa-bt");
|
|
||||||
|
|
||||||
rc = rfkill_register(rfk);
|
rc = rfkill_register(rfk);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_rfkill;
|
goto err_rfkill;
|
||||||
|
|
|
@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = {
|
||||||
.dev.platform_data = &sharpsl_rom_data,
|
.dev.platform_data = &sharpsl_rom_data,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct platform_device wm9712_device = {
|
||||||
|
.name = "wm9712-codec",
|
||||||
|
.id = -1,
|
||||||
|
};
|
||||||
|
|
||||||
static struct platform_device *devices[] __initdata = {
|
static struct platform_device *devices[] __initdata = {
|
||||||
&tosascoop_device,
|
&tosascoop_device,
|
||||||
&tosascoop_jc_device,
|
&tosascoop_jc_device,
|
||||||
|
@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = {
|
||||||
&tosaled_device,
|
&tosaled_device,
|
||||||
&tosa_bt_device,
|
&tosa_bt_device,
|
||||||
&sharpsl_rom_device,
|
&sharpsl_rom_device,
|
||||||
|
&wm9712_device,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void tosa_poweroff(void)
|
static void tosa_poweroff(void)
|
||||||
|
|
|
@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
|
||||||
select POWER_SUPPLY
|
select POWER_SUPPLY
|
||||||
select MACH_NEO1973
|
select MACH_NEO1973
|
||||||
select S3C2410_PWM
|
select S3C2410_PWM
|
||||||
|
select S3C_DEV_USB_HOST
|
||||||
help
|
help
|
||||||
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
|
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
|
||||||
|
|
||||||
|
|
|
@ -44,19 +44,19 @@
|
||||||
#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
|
#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
|
||||||
#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
|
#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
|
||||||
|
|
||||||
#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */
|
#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
|
||||||
#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2
|
#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
|
||||||
#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */
|
#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
|
||||||
#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */
|
#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
|
||||||
#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4
|
#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
|
||||||
#define GTA02_GPIO_3D_RESET S3C2440_GPJ5
|
#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
|
||||||
#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */
|
#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
|
||||||
#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7
|
#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
|
||||||
#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8
|
#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
|
||||||
#define GTA02_GPIO_KEEPACT S3C2440_GPJ8
|
#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
|
||||||
#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10
|
#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
|
||||||
#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */
|
#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
|
||||||
#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */
|
#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
|
||||||
|
|
||||||
#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
|
#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
|
||||||
#define GTA02_IRQ_MODEM IRQ_EINT1
|
#define GTA02_IRQ_MODEM IRQ_EINT1
|
||||||
|
|
|
@ -150,6 +150,12 @@ static struct clk init_clocks_off[] = {
|
||||||
.parent = &clk_p,
|
.parent = &clk_p,
|
||||||
.enable = s3c64xx_pclk_ctrl,
|
.enable = s3c64xx_pclk_ctrl,
|
||||||
.ctrlbit = S3C_CLKCON_PCLK_IIC,
|
.ctrlbit = S3C_CLKCON_PCLK_IIC,
|
||||||
|
}, {
|
||||||
|
.name = "i2c",
|
||||||
|
.id = 1,
|
||||||
|
.parent = &clk_p,
|
||||||
|
.enable = s3c64xx_pclk_ctrl,
|
||||||
|
.ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
|
||||||
}, {
|
}, {
|
||||||
.name = "iis",
|
.name = "iis",
|
||||||
.id = 0,
|
.id = 0,
|
||||||
|
|
|
@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
|
||||||
|
|
||||||
regptr = regs + PL080_Cx_BASE(0);
|
regptr = regs + PL080_Cx_BASE(0);
|
||||||
|
|
||||||
for (ch = 0; ch < 8; ch++, chno++, chptr++) {
|
for (ch = 0; ch < 8; ch++, chptr++) {
|
||||||
printk(KERN_INFO "%s: registering DMA %d (%p)\n",
|
pr_debug("%s: registering DMA %d (%p)\n",
|
||||||
__func__, chno, regptr);
|
__func__, chno + ch, regptr);
|
||||||
|
|
||||||
chptr->bit = 1 << ch;
|
chptr->bit = 1 << ch;
|
||||||
chptr->number = chno;
|
chptr->number = chno + ch;
|
||||||
chptr->dmac = dmac;
|
chptr->dmac = dmac;
|
||||||
chptr->regs = regptr;
|
chptr->regs = regptr;
|
||||||
regptr += PL080_Cx_STRIDE;
|
regptr += PL080_Cx_STRIDE;
|
||||||
|
@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
|
||||||
/* for the moment, permanently enable the controller */
|
/* for the moment, permanently enable the controller */
|
||||||
writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
|
writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
|
||||||
|
|
||||||
printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
|
printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
|
||||||
|
irq, regs, chno, chno+8);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
|
||||||
.get_pull = s3c_gpio_getpull_updown,
|
.get_pull = s3c_gpio_getpull_updown,
|
||||||
};
|
};
|
||||||
|
|
||||||
int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
|
static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
|
||||||
{
|
{
|
||||||
return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
|
return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
|
static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
|
||||||
{
|
{
|
||||||
return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
|
return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
#include <linux/smsc911x.h>
|
#include <linux/smsc911x.h>
|
||||||
#include <linux/regulator/fixed.h>
|
#include <linux/regulator/fixed.h>
|
||||||
|
#include <linux/regulator/machine.h>
|
||||||
|
|
||||||
#ifdef CONFIG_SMDK6410_WM1190_EV1
|
#ifdef CONFIG_SMDK6410_WM1190_EV1
|
||||||
#include <linux/mfd/wm8350/core.h>
|
#include <linux/mfd/wm8350/core.h>
|
||||||
|
@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = {
|
||||||
/* VDD_UH_MMC, LDO5 on J5 */
|
/* VDD_UH_MMC, LDO5 on J5 */
|
||||||
static struct regulator_init_data smdk6410_vdduh_mmc = {
|
static struct regulator_init_data smdk6410_vdduh_mmc = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "PVDD_UH/PVDD_MMC",
|
.name = "PVDD_UH+PVDD_MMC",
|
||||||
.always_on = 1,
|
.always_on = 1,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
|
||||||
/* S3C64xx internal logic & PLL */
|
/* S3C64xx internal logic & PLL */
|
||||||
static struct regulator_init_data wm8350_dcdc1_data = {
|
static struct regulator_init_data wm8350_dcdc1_data = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "PVDD_INT/PVDD_PLL",
|
.name = "PVDD_INT+PVDD_PLL",
|
||||||
.min_uV = 1200000,
|
.min_uV = 1200000,
|
||||||
.max_uV = 1200000,
|
.max_uV = 1200000,
|
||||||
.always_on = 1,
|
.always_on = 1,
|
||||||
|
@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
|
||||||
|
|
||||||
static struct regulator_init_data wm8350_dcdc4_data = {
|
static struct regulator_init_data wm8350_dcdc4_data = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV",
|
.name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
|
||||||
.min_uV = 3000000,
|
.min_uV = 3000000,
|
||||||
.max_uV = 3000000,
|
.max_uV = 3000000,
|
||||||
.always_on = 1,
|
.always_on = 1,
|
||||||
|
@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
|
||||||
/* OTGi/1190-EV1 HPVDD & AVDD */
|
/* OTGi/1190-EV1 HPVDD & AVDD */
|
||||||
static struct regulator_init_data wm8350_ldo4_data = {
|
static struct regulator_init_data wm8350_ldo4_data = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "PVDD_OTGI/HPVDD/AVDD",
|
.name = "PVDD_OTGI+HPVDD+AVDD",
|
||||||
.min_uV = 1200000,
|
.min_uV = 1200000,
|
||||||
.max_uV = 1200000,
|
.max_uV = 1200000,
|
||||||
.apply_uV = 1,
|
.apply_uV = 1,
|
||||||
|
@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
|
||||||
|
|
||||||
static struct regulator_init_data wm1192_dcdc3 = {
|
static struct regulator_init_data wm1192_dcdc3 = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "PVDD_MEM/PVDD_GPS",
|
.name = "PVDD_MEM+PVDD_GPS",
|
||||||
.always_on = 1,
|
.always_on = 1,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
|
||||||
|
|
||||||
static struct regulator_init_data wm1192_ldo1 = {
|
static struct regulator_init_data wm1192_ldo1 = {
|
||||||
.constraints = {
|
.constraints = {
|
||||||
.name = "PVDD_LCD/PVDD_EXT",
|
.name = "PVDD_LCD+PVDD_EXT",
|
||||||
.always_on = 1,
|
.always_on = 1,
|
||||||
},
|
},
|
||||||
.consumer_supplies = wm1192_ldo1_consumers,
|
.consumer_supplies = wm1192_ldo1_consumers,
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
|
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
|
||||||
{
|
{
|
||||||
/* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
|
/* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
|
||||||
s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3));
|
s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
|
||||||
|
|
||||||
/* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
|
/* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
|
||||||
s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
|
s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
|
||||||
|
|
|
@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
|
||||||
else
|
else
|
||||||
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
|
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
|
||||||
|
|
||||||
printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
|
pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
|
||||||
writel(ctrl2, r + S3C_SDHCI_CONTROL2);
|
writel(ctrl2, r + S3C_SDHCI_CONTROL2);
|
||||||
writel(ctrl3, r + S3C_SDHCI_CONTROL3);
|
writel(ctrl3, r + S3C_SDHCI_CONTROL3);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
#define S5P6440_GPIO_A_NR (6)
|
#define S5P6440_GPIO_A_NR (6)
|
||||||
#define S5P6440_GPIO_B_NR (7)
|
#define S5P6440_GPIO_B_NR (7)
|
||||||
#define S5P6440_GPIO_C_NR (8)
|
#define S5P6440_GPIO_C_NR (8)
|
||||||
#define S5P6440_GPIO_F_NR (2)
|
#define S5P6440_GPIO_F_NR (16)
|
||||||
#define S5P6440_GPIO_G_NR (7)
|
#define S5P6440_GPIO_G_NR (7)
|
||||||
#define S5P6440_GPIO_H_NR (10)
|
#define S5P6440_GPIO_H_NR (10)
|
||||||
#define S5P6440_GPIO_I_NR (16)
|
#define S5P6440_GPIO_I_NR (16)
|
||||||
|
@ -36,7 +36,7 @@
|
||||||
#define S5P6450_GPIO_B_NR (7)
|
#define S5P6450_GPIO_B_NR (7)
|
||||||
#define S5P6450_GPIO_C_NR (8)
|
#define S5P6450_GPIO_C_NR (8)
|
||||||
#define S5P6450_GPIO_D_NR (8)
|
#define S5P6450_GPIO_D_NR (8)
|
||||||
#define S5P6450_GPIO_F_NR (2)
|
#define S5P6450_GPIO_F_NR (16)
|
||||||
#define S5P6450_GPIO_G_NR (14)
|
#define S5P6450_GPIO_G_NR (14)
|
||||||
#define S5P6450_GPIO_H_NR (10)
|
#define S5P6450_GPIO_H_NR (10)
|
||||||
#define S5P6450_GPIO_I_NR (16)
|
#define S5P6450_GPIO_I_NR (16)
|
||||||
|
|
|
@ -454,6 +454,7 @@ static void __init ag5evm_init(void)
|
||||||
gpio_direction_output(GPIO_PORT217, 0);
|
gpio_direction_output(GPIO_PORT217, 0);
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
gpio_set_value(GPIO_PORT217, 1);
|
gpio_set_value(GPIO_PORT217, 1);
|
||||||
|
mdelay(100);
|
||||||
|
|
||||||
/* LCD backlight controller */
|
/* LCD backlight controller */
|
||||||
gpio_request(GPIO_PORT235, NULL); /* RESET */
|
gpio_request(GPIO_PORT235, NULL); /* RESET */
|
||||||
|
|
|
@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
|
||||||
|
|
||||||
lcdc_info.clock_source = LCDC_CLK_BUS;
|
lcdc_info.clock_source = LCDC_CLK_BUS;
|
||||||
lcdc_info.ch[0].interface_type = RGB18;
|
lcdc_info.ch[0].interface_type = RGB18;
|
||||||
lcdc_info.ch[0].clock_divider = 2;
|
lcdc_info.ch[0].clock_divider = 3;
|
||||||
lcdc_info.ch[0].flags = 0;
|
lcdc_info.ch[0].flags = 0;
|
||||||
lcdc_info.ch[0].lcd_size_cfg.width = 152;
|
lcdc_info.ch[0].lcd_size_cfg.width = 152;
|
||||||
lcdc_info.ch[0].lcd_size_cfg.height = 91;
|
lcdc_info.ch[0].lcd_size_cfg.height = 91;
|
||||||
|
|
|
@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
|
||||||
.lcd_cfg = mackerel_lcdc_modes,
|
.lcd_cfg = mackerel_lcdc_modes,
|
||||||
.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
|
.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
|
||||||
.interface_type = RGB24,
|
.interface_type = RGB24,
|
||||||
.clock_divider = 2,
|
.clock_divider = 3,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
.lcd_size_cfg.width = 152,
|
.lcd_size_cfg.width = 152,
|
||||||
.lcd_size_cfg.height = 91,
|
.lcd_size_cfg.height = 91,
|
||||||
|
|
|
@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum { MSTP001,
|
enum { MSTP001,
|
||||||
MSTP125, MSTP118, MSTP116, MSTP100,
|
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
|
||||||
MSTP219,
|
MSTP219,
|
||||||
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
|
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
|
||||||
MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
|
MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
|
||||||
|
@ -275,6 +275,10 @@ enum { MSTP001,
|
||||||
|
|
||||||
static struct clk mstp_clks[MSTP_NR] = {
|
static struct clk mstp_clks[MSTP_NR] = {
|
||||||
[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
|
[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
|
||||||
|
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
|
||||||
|
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
|
||||||
|
[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
|
||||||
|
[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
|
||||||
[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
|
[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
|
||||||
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
|
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
|
||||||
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
|
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
|
||||||
|
@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = {
|
||||||
CLKDEV_CON_ID("r_clk", &r_clk),
|
CLKDEV_CON_ID("r_clk", &r_clk),
|
||||||
|
|
||||||
/* DIV6 clocks */
|
/* DIV6 clocks */
|
||||||
|
CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
|
||||||
|
CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
|
||||||
|
CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
|
||||||
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
|
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
|
||||||
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
|
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
|
||||||
CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
|
CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
|
||||||
|
@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = {
|
||||||
|
|
||||||
/* MSTP32 clocks */
|
/* MSTP32 clocks */
|
||||||
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
|
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
|
||||||
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
|
CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
|
||||||
|
CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
|
||||||
|
CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
|
||||||
|
CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
|
||||||
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
|
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
|
||||||
CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
|
CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
|
||||||
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
|
|
||||||
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
|
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
|
||||||
|
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
|
||||||
|
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
|
||||||
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
|
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
|
||||||
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
|
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
|
||||||
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
|
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
|
||||||
|
|
|
@ -6,13 +6,10 @@ LIST "RWT Setting"
|
||||||
EW 0xE6020004, 0xA500
|
EW 0xE6020004, 0xA500
|
||||||
EW 0xE6030004, 0xA500
|
EW 0xE6030004, 0xA500
|
||||||
|
|
||||||
DD 0x01001000, 0x01001000
|
|
||||||
|
|
||||||
LIST "GPIO Setting"
|
LIST "GPIO Setting"
|
||||||
EB 0xE6051013, 0xA2
|
EB 0xE6051013, 0xA2
|
||||||
|
|
||||||
LIST "CPG"
|
LIST "CPG"
|
||||||
ED 0xE6150080, 0x00000180
|
|
||||||
ED 0xE61500C0, 0x00000002
|
ED 0xE61500C0, 0x00000002
|
||||||
|
|
||||||
WAIT 1, 0xFE40009C
|
WAIT 1, 0xFE40009C
|
||||||
|
@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
|
||||||
|
|
||||||
WAIT 1, 0xFE40009C
|
WAIT 1, 0xFE40009C
|
||||||
|
|
||||||
|
LIST "SUB/USBClk"
|
||||||
|
ED 0xE6150080, 0x00000180
|
||||||
|
|
||||||
LIST "BSC"
|
LIST "BSC"
|
||||||
ED 0xFEC10000, 0x00E0001B
|
ED 0xFEC10000, 0x00E0001B
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
|
||||||
ED 0xFE40004C, 0x00110209
|
ED 0xFE40004C, 0x00110209
|
||||||
ED 0xFE400010, 0x00000087
|
ED 0xFE400010, 0x00000087
|
||||||
|
|
||||||
WAIT 10, 0xFE40009C
|
WAIT 30, 0xFE40009C
|
||||||
|
|
||||||
ED 0xFE400084, 0x0000003F
|
ED 0xFE400084, 0x0000003F
|
||||||
EB 0xFE500000, 0x00
|
EB 0xFE500000, 0x00
|
||||||
|
@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
|
||||||
|
|
||||||
WAIT 1, 0xFE40009C
|
WAIT 1, 0xFE40009C
|
||||||
|
|
||||||
ED 0xE6150354, 0x00000002
|
ED 0xFE400354, 0x01AD8002
|
||||||
|
|
||||||
LIST "SCIF0 - Serial port for earlyprintk"
|
LIST "SCIF0 - Serial port for earlyprintk"
|
||||||
EB 0xE6053098, 0x11
|
EB 0xE6053098, 0x11
|
||||||
|
|
|
@ -6,13 +6,10 @@ LIST "RWT Setting"
|
||||||
EW 0xE6020004, 0xA500
|
EW 0xE6020004, 0xA500
|
||||||
EW 0xE6030004, 0xA500
|
EW 0xE6030004, 0xA500
|
||||||
|
|
||||||
DD 0x01001000, 0x01001000
|
|
||||||
|
|
||||||
LIST "GPIO Setting"
|
LIST "GPIO Setting"
|
||||||
EB 0xE6051013, 0xA2
|
EB 0xE6051013, 0xA2
|
||||||
|
|
||||||
LIST "CPG"
|
LIST "CPG"
|
||||||
ED 0xE6150080, 0x00000180
|
|
||||||
ED 0xE61500C0, 0x00000002
|
ED 0xE61500C0, 0x00000002
|
||||||
|
|
||||||
WAIT 1, 0xFE40009C
|
WAIT 1, 0xFE40009C
|
||||||
|
@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
|
||||||
|
|
||||||
WAIT 1, 0xFE40009C
|
WAIT 1, 0xFE40009C
|
||||||
|
|
||||||
|
LIST "SUB/USBClk"
|
||||||
|
ED 0xE6150080, 0x00000180
|
||||||
|
|
||||||
LIST "BSC"
|
LIST "BSC"
|
||||||
ED 0xFEC10000, 0x00E0001B
|
ED 0xFEC10000, 0x00E0001B
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
|
||||||
ED 0xFE40004C, 0x00110209
|
ED 0xFE40004C, 0x00110209
|
||||||
ED 0xFE400010, 0x00000087
|
ED 0xFE400010, 0x00000087
|
||||||
|
|
||||||
WAIT 10, 0xFE40009C
|
WAIT 30, 0xFE40009C
|
||||||
|
|
||||||
ED 0xFE400084, 0x0000003F
|
ED 0xFE400084, 0x0000003F
|
||||||
EB 0xFE500000, 0x00
|
EB 0xFE500000, 0x00
|
||||||
|
@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
|
||||||
|
|
||||||
WAIT 1, 0xFE40009C
|
WAIT 1, 0xFE40009C
|
||||||
|
|
||||||
ED 0xE6150354, 0x00000002
|
ED 0xFE400354, 0x01AD8002
|
||||||
|
|
||||||
LIST "SCIF0 - Serial port for earlyprintk"
|
LIST "SCIF0 - Serial port for earlyprintk"
|
||||||
EB 0xE6053098, 0x11
|
EB 0xE6053098, 0x11
|
||||||
|
|
|
@ -15,6 +15,8 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
|
||||||
|
#include <plat/devs.h>
|
||||||
|
|
||||||
/* uart devices */
|
/* uart devices */
|
||||||
|
|
||||||
static struct platform_device s3c24xx_uart_device0 = {
|
static struct platform_device s3c24xx_uart_device0 = {
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
.align 2
|
.align 2
|
||||||
|
|
||||||
ENTRY(_outsl)
|
ENTRY(_outsl)
|
||||||
|
CC = R2 == 0;
|
||||||
|
IF CC JUMP 1f;
|
||||||
P0 = R0; /* P0 = port */
|
P0 = R0; /* P0 = port */
|
||||||
P1 = R1; /* P1 = address */
|
P1 = R1; /* P1 = address */
|
||||||
P2 = R2; /* P2 = count */
|
P2 = R2; /* P2 = count */
|
||||||
|
@ -20,10 +22,12 @@ ENTRY(_outsl)
|
||||||
LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
|
LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
|
||||||
.Llong_loop_s: R0 = [P1++];
|
.Llong_loop_s: R0 = [P1++];
|
||||||
.Llong_loop_e: [P0] = R0;
|
.Llong_loop_e: [P0] = R0;
|
||||||
RTS;
|
1: RTS;
|
||||||
ENDPROC(_outsl)
|
ENDPROC(_outsl)
|
||||||
|
|
||||||
ENTRY(_outsw)
|
ENTRY(_outsw)
|
||||||
|
CC = R2 == 0;
|
||||||
|
IF CC JUMP 1f;
|
||||||
P0 = R0; /* P0 = port */
|
P0 = R0; /* P0 = port */
|
||||||
P1 = R1; /* P1 = address */
|
P1 = R1; /* P1 = address */
|
||||||
P2 = R2; /* P2 = count */
|
P2 = R2; /* P2 = count */
|
||||||
|
@ -31,10 +35,12 @@ ENTRY(_outsw)
|
||||||
LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
|
LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
|
||||||
.Lword_loop_s: R0 = W[P1++];
|
.Lword_loop_s: R0 = W[P1++];
|
||||||
.Lword_loop_e: W[P0] = R0;
|
.Lword_loop_e: W[P0] = R0;
|
||||||
RTS;
|
1: RTS;
|
||||||
ENDPROC(_outsw)
|
ENDPROC(_outsw)
|
||||||
|
|
||||||
ENTRY(_outsb)
|
ENTRY(_outsb)
|
||||||
|
CC = R2 == 0;
|
||||||
|
IF CC JUMP 1f;
|
||||||
P0 = R0; /* P0 = port */
|
P0 = R0; /* P0 = port */
|
||||||
P1 = R1; /* P1 = address */
|
P1 = R1; /* P1 = address */
|
||||||
P2 = R2; /* P2 = count */
|
P2 = R2; /* P2 = count */
|
||||||
|
@ -42,10 +48,12 @@ ENTRY(_outsb)
|
||||||
LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
|
LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
|
||||||
.Lbyte_loop_s: R0 = B[P1++];
|
.Lbyte_loop_s: R0 = B[P1++];
|
||||||
.Lbyte_loop_e: B[P0] = R0;
|
.Lbyte_loop_e: B[P0] = R0;
|
||||||
RTS;
|
1: RTS;
|
||||||
ENDPROC(_outsb)
|
ENDPROC(_outsb)
|
||||||
|
|
||||||
ENTRY(_outsw_8)
|
ENTRY(_outsw_8)
|
||||||
|
CC = R2 == 0;
|
||||||
|
IF CC JUMP 1f;
|
||||||
P0 = R0; /* P0 = port */
|
P0 = R0; /* P0 = port */
|
||||||
P1 = R1; /* P1 = address */
|
P1 = R1; /* P1 = address */
|
||||||
P2 = R2; /* P2 = count */
|
P2 = R2; /* P2 = count */
|
||||||
|
@ -56,5 +64,5 @@ ENTRY(_outsw_8)
|
||||||
R0 = R0 << 8;
|
R0 = R0 << 8;
|
||||||
R0 = R0 + R1;
|
R0 = R0 + R1;
|
||||||
.Lword8_loop_e: W[P0] = R0;
|
.Lword8_loop_e: W[P0] = R0;
|
||||||
RTS;
|
1: RTS;
|
||||||
ENDPROC(_outsw_8)
|
ENDPROC(_outsw_8)
|
||||||
|
|
|
@ -58,6 +58,8 @@
|
||||||
1:
|
1:
|
||||||
.ifeqs "\flushins", BROK_FLUSH_INST
|
.ifeqs "\flushins", BROK_FLUSH_INST
|
||||||
\flushins [P0++];
|
\flushins [P0++];
|
||||||
|
nop;
|
||||||
|
nop;
|
||||||
2: nop;
|
2: nop;
|
||||||
.else
|
.else
|
||||||
2: \flushins [P0++];
|
2: \flushins [P0++];
|
||||||
|
|
|
@ -4,6 +4,7 @@ config MIPS
|
||||||
select HAVE_GENERIC_DMA_COHERENT
|
select HAVE_GENERIC_DMA_COHERENT
|
||||||
select HAVE_IDE
|
select HAVE_IDE
|
||||||
select HAVE_OPROFILE
|
select HAVE_OPROFILE
|
||||||
|
select HAVE_IRQ_WORK
|
||||||
select HAVE_PERF_EVENTS
|
select HAVE_PERF_EVENTS
|
||||||
select PERF_USE_VMALLOC
|
select PERF_USE_VMALLOC
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
|
@ -208,6 +209,7 @@ config MACH_JZ4740
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select SYS_HAS_EARLY_PRINTK
|
select SYS_HAS_EARLY_PRINTK
|
||||||
select HAVE_PWM
|
select HAVE_PWM
|
||||||
|
select HAVE_CLK
|
||||||
|
|
||||||
config LASAT
|
config LASAT
|
||||||
bool "LASAT Networks platforms"
|
bool "LASAT Networks platforms"
|
||||||
|
@ -333,6 +335,8 @@ config PNX8550_STB810
|
||||||
config PMC_MSP
|
config PMC_MSP
|
||||||
bool "PMC-Sierra MSP chipsets"
|
bool "PMC-Sierra MSP chipsets"
|
||||||
depends on EXPERIMENTAL
|
depends on EXPERIMENTAL
|
||||||
|
select CEVT_R4K
|
||||||
|
select CSRC_R4K
|
||||||
select DMA_NONCOHERENT
|
select DMA_NONCOHERENT
|
||||||
select SWAP_IO_SPACE
|
select SWAP_IO_SPACE
|
||||||
select NO_EXCEPT_FILL
|
select NO_EXCEPT_FILL
|
||||||
|
|
|
@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
|
||||||
|
|
||||||
static void mtx1_reset(char *c)
|
static void mtx1_reset(char *c)
|
||||||
{
|
{
|
||||||
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
|
/* Jump to the reset vector */
|
||||||
au_writel(0x00000000, 0xAE00001C);
|
__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtx1_power_off(void)
|
static void mtx1_power_off(void)
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
#include <linux/mtd/physmap.h>
|
#include <linux/mtd/physmap.h>
|
||||||
#include <mtd/mtd-abi.h>
|
#include <mtd/mtd-abi.h>
|
||||||
|
|
||||||
|
#include <asm/mach-au1x00/au1xxx_eth.h>
|
||||||
|
|
||||||
static struct gpio_keys_button mtx1_gpio_button[] = {
|
static struct gpio_keys_button mtx1_gpio_button[] = {
|
||||||
{
|
{
|
||||||
.gpio = 207,
|
.gpio = 207,
|
||||||
|
@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = {
|
||||||
&mtx1_mtd,
|
&mtx1_mtd,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
|
||||||
|
.phy_search_highest_addr = 1,
|
||||||
|
.phy1_search_mac0 = 1,
|
||||||
|
};
|
||||||
|
|
||||||
static int __init mtx1_register_devices(void)
|
static int __init mtx1_register_devices(void)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
|
||||||
|
|
||||||
rc = gpio_request(mtx1_gpio_button[0].gpio,
|
rc = gpio_request(mtx1_gpio_button[0].gpio,
|
||||||
mtx1_gpio_button[0].desc);
|
mtx1_gpio_button[0].desc);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
|
|
|
@ -36,8 +36,8 @@
|
||||||
|
|
||||||
static void xxs1500_reset(char *c)
|
static void xxs1500_reset(char *c)
|
||||||
{
|
{
|
||||||
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
|
/* Jump to the reset vector */
|
||||||
au_writel(0x00000000, 0xAE00001C);
|
__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xxs1500_power_off(void)
|
static void xxs1500_power_off(void)
|
||||||
|
|
|
@ -11,15 +11,5 @@
|
||||||
|
|
||||||
#ifndef __MIPS_PERF_EVENT_H__
|
#ifndef __MIPS_PERF_EVENT_H__
|
||||||
#define __MIPS_PERF_EVENT_H__
|
#define __MIPS_PERF_EVENT_H__
|
||||||
|
/* Leave it empty here. The file is required by linux/perf_event.h */
|
||||||
/*
|
|
||||||
* MIPS performance counters do not raise NMI upon overflow, a regular
|
|
||||||
* interrupt will be signaled. Hence we can do the pending perf event
|
|
||||||
* work at the tail of the irq handler.
|
|
||||||
*/
|
|
||||||
static inline void
|
|
||||||
set_perf_event_pending(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __MIPS_PERF_EVENT_H__ */
|
#endif /* __MIPS_PERF_EVENT_H__ */
|
||||||
|
|
|
@ -17,29 +17,13 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/uasm.h>
|
#include <asm/uasm.h>
|
||||||
|
|
||||||
/*
|
#include <asm-generic/sections.h>
|
||||||
* If the Instruction Pointer is in module space (0xc0000000), return true;
|
|
||||||
* otherwise, it is in kernel space (0x80000000), return false.
|
|
||||||
*
|
|
||||||
* FIXME: This will not work when the kernel space and module space are the
|
|
||||||
* same. If they are the same, we need to modify scripts/recordmcount.pl,
|
|
||||||
* ftrace_make_nop/call() and the other related parts to ensure the
|
|
||||||
* enabling/disabling of the calling site to _mcount is right for both kernel
|
|
||||||
* and module.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int in_module(unsigned long ip)
|
|
||||||
{
|
|
||||||
return ip & 0x40000000;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
|
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
|
||||||
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
|
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
|
||||||
|
|
||||||
#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
|
|
||||||
#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
|
|
||||||
#define INSN_NOP 0x00000000 /* nop */
|
#define INSN_NOP 0x00000000 /* nop */
|
||||||
#define INSN_JAL(addr) \
|
#define INSN_JAL(addr) \
|
||||||
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
|
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
|
||||||
|
@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the address is in kernel space
|
||||||
|
*
|
||||||
|
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
|
||||||
|
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
|
||||||
|
*/
|
||||||
|
static inline int in_kernel_space(unsigned long ip)
|
||||||
|
{
|
||||||
|
if (ip >= (unsigned long)_stext &&
|
||||||
|
ip <= (unsigned long)_etext)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
||||||
{
|
{
|
||||||
int faulted;
|
int faulted;
|
||||||
|
@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The details about the calling site of mcount on MIPS
|
||||||
|
*
|
||||||
|
* 1. For kernel:
|
||||||
|
*
|
||||||
|
* move at, ra
|
||||||
|
* jal _mcount --> nop
|
||||||
|
*
|
||||||
|
* 2. For modules:
|
||||||
|
*
|
||||||
|
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
|
||||||
|
*
|
||||||
|
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
|
||||||
|
* addiu v1, v1, low_16bit_of_mcount
|
||||||
|
* move at, ra
|
||||||
|
* move $12, ra_address
|
||||||
|
* jalr v1
|
||||||
|
* sub sp, sp, 8
|
||||||
|
* 1: offset = 5 instructions
|
||||||
|
* 2.2 For the Other situations
|
||||||
|
*
|
||||||
|
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
|
||||||
|
* addiu v1, v1, low_16bit_of_mcount
|
||||||
|
* move at, ra
|
||||||
|
* jalr v1
|
||||||
|
* nop | move $12, ra_address | sub sp, sp, 8
|
||||||
|
* 1: offset = 4 instructions
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
|
||||||
|
#define MCOUNT_OFFSET_INSNS 5
|
||||||
|
#else
|
||||||
|
#define MCOUNT_OFFSET_INSNS 4
|
||||||
|
#endif
|
||||||
|
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
|
||||||
|
|
||||||
int ftrace_make_nop(struct module *mod,
|
int ftrace_make_nop(struct module *mod,
|
||||||
struct dyn_ftrace *rec, unsigned long addr)
|
struct dyn_ftrace *rec, unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
|
||||||
unsigned long ip = rec->ip;
|
unsigned long ip = rec->ip;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have compiled module with -mlong-calls, but compiled the kernel
|
* If ip is in kernel space, no long call, otherwise, long call is
|
||||||
* without it, we need to cope with them respectively.
|
* needed.
|
||||||
*/
|
*/
|
||||||
if (in_module(ip)) {
|
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
|
||||||
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
|
|
||||||
/*
|
|
||||||
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
|
|
||||||
* addiu v1, v1, low_16bit_of_mcount
|
|
||||||
* move at, ra
|
|
||||||
* move $12, ra_address
|
|
||||||
* jalr v1
|
|
||||||
* sub sp, sp, 8
|
|
||||||
* 1: offset = 5 instructions
|
|
||||||
*/
|
|
||||||
new = INSN_B_1F_5;
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
|
|
||||||
* addiu v1, v1, low_16bit_of_mcount
|
|
||||||
* move at, ra
|
|
||||||
* jalr v1
|
|
||||||
* nop | move $12, ra_address | sub sp, sp, 8
|
|
||||||
* 1: offset = 4 instructions
|
|
||||||
*/
|
|
||||||
new = INSN_B_1F_4;
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* move at, ra
|
|
||||||
* jal _mcount --> nop
|
|
||||||
*/
|
|
||||||
new = INSN_NOP;
|
|
||||||
}
|
|
||||||
return ftrace_modify_code(ip, new);
|
return ftrace_modify_code(ip, new);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||||
unsigned int new;
|
unsigned int new;
|
||||||
unsigned long ip = rec->ip;
|
unsigned long ip = rec->ip;
|
||||||
|
|
||||||
/* ip, module: 0xc0000000, kernel: 0x80000000 */
|
new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
|
||||||
new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
|
insn_lui_v1_hi16_mcount;
|
||||||
|
|
||||||
return ftrace_modify_code(ip, new);
|
return ftrace_modify_code(ip, new);
|
||||||
}
|
}
|
||||||
|
@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
|
||||||
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
|
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
|
||||||
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
|
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
|
||||||
|
|
||||||
unsigned long ftrace_get_parent_addr(unsigned long self_addr,
|
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
|
||||||
unsigned long parent,
|
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
|
||||||
unsigned long parent_addr,
|
|
||||||
unsigned long fp)
|
|
||||||
{
|
{
|
||||||
unsigned long sp, ip, ra;
|
unsigned long sp, ip, tmp;
|
||||||
unsigned int code;
|
unsigned int code;
|
||||||
int faulted;
|
int faulted;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For module, move the ip from calling site of mcount to the
|
* For module, move the ip from the return address after the
|
||||||
* instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
|
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
|
||||||
* kernel, move to the instruction "move ra, at"(offset is 12)
|
* kernel, move after the instruction "move ra, at"(offset is 16)
|
||||||
*/
|
*/
|
||||||
ip = self_addr - (in_module(self_addr) ? 20 : 12);
|
ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* search the text until finding the non-store instruction or "s{d,w}
|
* search the text until finding the non-store instruction or "s{d,w}
|
||||||
* ra, offset(sp)" instruction
|
* ra, offset(sp)" instruction
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
ip -= 4;
|
|
||||||
|
|
||||||
/* get the code at "ip": code = *(unsigned int *)ip; */
|
/* get the code at "ip": code = *(unsigned int *)ip; */
|
||||||
safe_load_code(code, ip, faulted);
|
safe_load_code(code, ip, faulted);
|
||||||
|
|
||||||
|
@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
|
||||||
* store the ra on the stack
|
* store the ra on the stack
|
||||||
*/
|
*/
|
||||||
if ((code & S_R_SP) != S_R_SP)
|
if ((code & S_R_SP) != S_R_SP)
|
||||||
return parent_addr;
|
return parent_ra_addr;
|
||||||
|
|
||||||
} while (((code & S_RA_SP) != S_RA_SP));
|
/* Move to the next instruction */
|
||||||
|
ip -= 4;
|
||||||
|
} while ((code & S_RA_SP) != S_RA_SP);
|
||||||
|
|
||||||
sp = fp + (code & OFFSET_MASK);
|
sp = fp + (code & OFFSET_MASK);
|
||||||
|
|
||||||
/* ra = *(unsigned long *)sp; */
|
/* tmp = *(unsigned long *)sp; */
|
||||||
safe_load_stack(ra, sp, faulted);
|
safe_load_stack(tmp, sp, faulted);
|
||||||
if (unlikely(faulted))
|
if (unlikely(faulted))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ra == parent)
|
if (tmp == old_parent_ra)
|
||||||
return sp;
|
return sp;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
|
||||||
* Hook the return address and push it in the stack of return addrs
|
* Hook the return address and push it in the stack of return addrs
|
||||||
* in current thread info.
|
* in current thread info.
|
||||||
*/
|
*/
|
||||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
|
||||||
unsigned long fp)
|
unsigned long fp)
|
||||||
{
|
{
|
||||||
unsigned long old;
|
unsigned long old_parent_ra;
|
||||||
struct ftrace_graph_ent trace;
|
struct ftrace_graph_ent trace;
|
||||||
unsigned long return_hooker = (unsigned long)
|
unsigned long return_hooker = (unsigned long)
|
||||||
&return_to_handler;
|
&return_to_handler;
|
||||||
int faulted;
|
int faulted, insns;
|
||||||
|
|
||||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* "parent" is the stack address saved the return address of the caller
|
* "parent_ra_addr" is the stack address saved the return address of
|
||||||
* of _mcount.
|
* the caller of _mcount.
|
||||||
*
|
*
|
||||||
* if the gcc < 4.5, a leaf function does not save the return address
|
* if the gcc < 4.5, a leaf function does not save the return address
|
||||||
* in the stack address, so, we "emulate" one in _mcount's stack space,
|
* in the stack address, so, we "emulate" one in _mcount's stack space,
|
||||||
|
@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||||
* do it in ftrace_graph_caller of mcount.S.
|
* do it in ftrace_graph_caller of mcount.S.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* old = *parent; */
|
/* old_parent_ra = *parent_ra_addr; */
|
||||||
safe_load_stack(old, parent, faulted);
|
safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
|
||||||
if (unlikely(faulted))
|
if (unlikely(faulted))
|
||||||
goto out;
|
goto out;
|
||||||
#ifndef KBUILD_MCOUNT_RA_ADDRESS
|
#ifndef KBUILD_MCOUNT_RA_ADDRESS
|
||||||
parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
|
parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
|
||||||
(unsigned long)parent, fp);
|
old_parent_ra, (unsigned long)parent_ra_addr, fp);
|
||||||
/*
|
/*
|
||||||
* If fails when getting the stack address of the non-leaf function's
|
* If fails when getting the stack address of the non-leaf function's
|
||||||
* ra, stop function graph tracer and return
|
* ra, stop function graph tracer and return
|
||||||
*/
|
*/
|
||||||
if (parent == 0)
|
if (parent_ra_addr == 0)
|
||||||
goto out;
|
goto out;
|
||||||
#endif
|
#endif
|
||||||
/* *parent = return_hooker; */
|
/* *parent_ra_addr = return_hooker; */
|
||||||
safe_store_stack(return_hooker, parent, faulted);
|
safe_store_stack(return_hooker, parent_ra_addr, faulted);
|
||||||
if (unlikely(faulted))
|
if (unlikely(faulted))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
|
if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
|
||||||
-EBUSY) {
|
== -EBUSY) {
|
||||||
*parent = old;
|
*parent_ra_addr = old_parent_ra;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace.func = self_addr;
|
/*
|
||||||
|
* Get the recorded ip of the current mcount calling site in the
|
||||||
|
* __mcount_loc section, which will be used to filter the function
|
||||||
|
* entries configured through the tracing/set_graph_function interface.
|
||||||
|
*/
|
||||||
|
|
||||||
|
insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
|
||||||
|
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
|
||||||
|
|
||||||
/* Only trace if the calling function expects to */
|
/* Only trace if the calling function expects to */
|
||||||
if (!ftrace_graph_entry(&trace)) {
|
if (!ftrace_graph_entry(&trace)) {
|
||||||
current->curr_ret_stack--;
|
current->curr_ret_stack--;
|
||||||
*parent = old;
|
*parent_ra_addr = old_parent_ra;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mipspmu_enable(struct perf_event *event)
|
|
||||||
{
|
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
|
||||||
int idx;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
/* To look for a free counter for this event. */
|
|
||||||
idx = mipspmu->alloc_counter(cpuc, hwc);
|
|
||||||
if (idx < 0) {
|
|
||||||
err = idx;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there is an event in the counter we are going to use then
|
|
||||||
* make sure it is disabled.
|
|
||||||
*/
|
|
||||||
event->hw.idx = idx;
|
|
||||||
mipspmu->disable_event(idx);
|
|
||||||
cpuc->events[idx] = event;
|
|
||||||
|
|
||||||
/* Set the period for the event. */
|
|
||||||
mipspmu_event_set_period(event, hwc, idx);
|
|
||||||
|
|
||||||
/* Enable the event. */
|
|
||||||
mipspmu->enable_event(hwc, idx);
|
|
||||||
|
|
||||||
/* Propagate our changes to the userspace mapping. */
|
|
||||||
perf_event_update_userpage(event);
|
|
||||||
|
|
||||||
out:
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mipspmu_event_update(struct perf_event *event,
|
static void mipspmu_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx)
|
int idx)
|
||||||
|
@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int shift = 64 - TOTAL_BITS;
|
int shift = 64 - TOTAL_BITS;
|
||||||
s64 prev_raw_count, new_raw_count;
|
s64 prev_raw_count, new_raw_count;
|
||||||
s64 delta;
|
u64 delta;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
prev_raw_count = local64_read(&hwc->prev_count);
|
prev_raw_count = local64_read(&hwc->prev_count);
|
||||||
|
@ -231,34 +196,92 @@ again:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mipspmu_disable(struct perf_event *event)
|
static void mipspmu_start(struct perf_event *event, int flags)
|
||||||
|
{
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
if (!mipspmu)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (flags & PERF_EF_RELOAD)
|
||||||
|
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
|
||||||
|
|
||||||
|
hwc->state = 0;
|
||||||
|
|
||||||
|
/* Set the period for the event. */
|
||||||
|
mipspmu_event_set_period(event, hwc, hwc->idx);
|
||||||
|
|
||||||
|
/* Enable the event. */
|
||||||
|
mipspmu->enable_event(hwc, hwc->idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mipspmu_stop(struct perf_event *event, int flags)
|
||||||
|
{
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
|
||||||
|
if (!mipspmu)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
||||||
|
/* We are working on a local event. */
|
||||||
|
mipspmu->disable_event(hwc->idx);
|
||||||
|
barrier();
|
||||||
|
mipspmu_event_update(event, hwc, hwc->idx);
|
||||||
|
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mipspmu_add(struct perf_event *event, int flags)
|
||||||
|
{
|
||||||
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
int idx;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
perf_pmu_disable(event->pmu);
|
||||||
|
|
||||||
|
/* To look for a free counter for this event. */
|
||||||
|
idx = mipspmu->alloc_counter(cpuc, hwc);
|
||||||
|
if (idx < 0) {
|
||||||
|
err = idx;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there is an event in the counter we are going to use then
|
||||||
|
* make sure it is disabled.
|
||||||
|
*/
|
||||||
|
event->hw.idx = idx;
|
||||||
|
mipspmu->disable_event(idx);
|
||||||
|
cpuc->events[idx] = event;
|
||||||
|
|
||||||
|
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||||
|
if (flags & PERF_EF_START)
|
||||||
|
mipspmu_start(event, PERF_EF_RELOAD);
|
||||||
|
|
||||||
|
/* Propagate our changes to the userspace mapping. */
|
||||||
|
perf_event_update_userpage(event);
|
||||||
|
|
||||||
|
out:
|
||||||
|
perf_pmu_enable(event->pmu);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mipspmu_del(struct perf_event *event, int flags)
|
||||||
{
|
{
|
||||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
int idx = hwc->idx;
|
int idx = hwc->idx;
|
||||||
|
|
||||||
|
|
||||||
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
|
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
|
||||||
|
|
||||||
/* We are working on a local event. */
|
mipspmu_stop(event, PERF_EF_UPDATE);
|
||||||
mipspmu->disable_event(idx);
|
|
||||||
|
|
||||||
barrier();
|
|
||||||
|
|
||||||
mipspmu_event_update(event, hwc, idx);
|
|
||||||
cpuc->events[idx] = NULL;
|
cpuc->events[idx] = NULL;
|
||||||
clear_bit(idx, cpuc->used_mask);
|
clear_bit(idx, cpuc->used_mask);
|
||||||
|
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mipspmu_unthrottle(struct perf_event *event)
|
|
||||||
{
|
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
|
||||||
|
|
||||||
mipspmu->enable_event(hwc, hwc->idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mipspmu_read(struct perf_event *event)
|
static void mipspmu_read(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct hw_perf_event *hwc = &event->hw;
|
struct hw_perf_event *hwc = &event->hw;
|
||||||
|
@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
|
||||||
mipspmu_event_update(event, hwc, hwc->idx);
|
mipspmu_event_update(event, hwc, hwc->idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct pmu pmu = {
|
static void mipspmu_enable(struct pmu *pmu)
|
||||||
.enable = mipspmu_enable,
|
{
|
||||||
.disable = mipspmu_disable,
|
if (mipspmu)
|
||||||
.unthrottle = mipspmu_unthrottle,
|
mipspmu->start();
|
||||||
.read = mipspmu_read,
|
}
|
||||||
};
|
|
||||||
|
static void mipspmu_disable(struct pmu *pmu)
|
||||||
|
{
|
||||||
|
if (mipspmu)
|
||||||
|
mipspmu->stop();
|
||||||
|
}
|
||||||
|
|
||||||
static atomic_t active_events = ATOMIC_INIT(0);
|
static atomic_t active_events = ATOMIC_INIT(0);
|
||||||
static DEFINE_MUTEX(pmu_reserve_mutex);
|
static DEFINE_MUTEX(pmu_reserve_mutex);
|
||||||
|
@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
|
||||||
perf_irq = save_perf_irq;
|
perf_irq = save_perf_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mipsxx/rm9000/loongson2 have different performance counters, they have
|
||||||
|
* specific low-level init routines.
|
||||||
|
*/
|
||||||
|
static void reset_counters(void *arg);
|
||||||
|
static int __hw_perf_event_init(struct perf_event *event);
|
||||||
|
|
||||||
|
static void hw_perf_event_destroy(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if (atomic_dec_and_mutex_lock(&active_events,
|
||||||
|
&pmu_reserve_mutex)) {
|
||||||
|
/*
|
||||||
|
* We must not call the destroy function with interrupts
|
||||||
|
* disabled.
|
||||||
|
*/
|
||||||
|
on_each_cpu(reset_counters,
|
||||||
|
(void *)(long)mipspmu->num_counters, 1);
|
||||||
|
mipspmu_free_irq();
|
||||||
|
mutex_unlock(&pmu_reserve_mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mipspmu_event_init(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
switch (event->attr.type) {
|
||||||
|
case PERF_TYPE_RAW:
|
||||||
|
case PERF_TYPE_HARDWARE:
|
||||||
|
case PERF_TYPE_HW_CACHE:
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mipspmu || event->cpu >= nr_cpumask_bits ||
|
||||||
|
(event->cpu >= 0 && !cpu_online(event->cpu)))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (!atomic_inc_not_zero(&active_events)) {
|
||||||
|
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
|
||||||
|
atomic_dec(&active_events);
|
||||||
|
return -ENOSPC;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&pmu_reserve_mutex);
|
||||||
|
if (atomic_read(&active_events) == 0)
|
||||||
|
err = mipspmu_get_irq();
|
||||||
|
|
||||||
|
if (!err)
|
||||||
|
atomic_inc(&active_events);
|
||||||
|
mutex_unlock(&pmu_reserve_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
err = __hw_perf_event_init(event);
|
||||||
|
if (err)
|
||||||
|
hw_perf_event_destroy(event);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pmu pmu = {
|
||||||
|
.pmu_enable = mipspmu_enable,
|
||||||
|
.pmu_disable = mipspmu_disable,
|
||||||
|
.event_init = mipspmu_event_init,
|
||||||
|
.add = mipspmu_add,
|
||||||
|
.del = mipspmu_del,
|
||||||
|
.start = mipspmu_start,
|
||||||
|
.stop = mipspmu_stop,
|
||||||
|
.read = mipspmu_read,
|
||||||
|
};
|
||||||
|
|
||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
|
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
|
||||||
{
|
{
|
||||||
|
@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
|
||||||
{
|
{
|
||||||
struct hw_perf_event fake_hwc = event->hw;
|
struct hw_perf_event fake_hwc = event->hw;
|
||||||
|
|
||||||
if (event->pmu && event->pmu != &pmu)
|
/* Allow mixed event group. So return 1 to pass validation. */
|
||||||
return 0;
|
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||||
|
return 1;
|
||||||
|
|
||||||
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
|
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
|
||||||
}
|
}
|
||||||
|
@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* mipsxx/rm9000/loongson2 have different performance counters, they have
|
|
||||||
* specific low-level init routines.
|
|
||||||
*/
|
|
||||||
static void reset_counters(void *arg);
|
|
||||||
static int __hw_perf_event_init(struct perf_event *event);
|
|
||||||
|
|
||||||
static void hw_perf_event_destroy(struct perf_event *event)
|
|
||||||
{
|
|
||||||
if (atomic_dec_and_mutex_lock(&active_events,
|
|
||||||
&pmu_reserve_mutex)) {
|
|
||||||
/*
|
|
||||||
* We must not call the destroy function with interrupts
|
|
||||||
* disabled.
|
|
||||||
*/
|
|
||||||
on_each_cpu(reset_counters,
|
|
||||||
(void *)(long)mipspmu->num_counters, 1);
|
|
||||||
mipspmu_free_irq();
|
|
||||||
mutex_unlock(&pmu_reserve_mutex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
||||||
{
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (!mipspmu || event->cpu >= nr_cpumask_bits ||
|
|
||||||
(event->cpu >= 0 && !cpu_online(event->cpu)))
|
|
||||||
return ERR_PTR(-ENODEV);
|
|
||||||
|
|
||||||
if (!atomic_inc_not_zero(&active_events)) {
|
|
||||||
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
|
|
||||||
atomic_dec(&active_events);
|
|
||||||
return ERR_PTR(-ENOSPC);
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&pmu_reserve_mutex);
|
|
||||||
if (atomic_read(&active_events) == 0)
|
|
||||||
err = mipspmu_get_irq();
|
|
||||||
|
|
||||||
if (!err)
|
|
||||||
atomic_inc(&active_events);
|
|
||||||
mutex_unlock(&pmu_reserve_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
return ERR_PTR(err);
|
|
||||||
|
|
||||||
err = __hw_perf_event_init(event);
|
|
||||||
if (err)
|
|
||||||
hw_perf_event_destroy(event);
|
|
||||||
|
|
||||||
return err ? ERR_PTR(err) : &pmu;
|
|
||||||
}
|
|
||||||
|
|
||||||
void hw_perf_enable(void)
|
|
||||||
{
|
|
||||||
if (mipspmu)
|
|
||||||
mipspmu->start();
|
|
||||||
}
|
|
||||||
|
|
||||||
void hw_perf_disable(void)
|
|
||||||
{
|
|
||||||
if (mipspmu)
|
|
||||||
mipspmu->stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is needed by specific irq handlers in perf_event_*.c */
|
/* This is needed by specific irq handlers in perf_event_*.c */
|
||||||
static void
|
static void
|
||||||
handle_associated_event(struct cpu_hw_events *cpuc,
|
handle_associated_event(struct cpu_hw_events *cpuc,
|
||||||
|
@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
|
||||||
#include "perf_event_mipsxx.c"
|
#include "perf_event_mipsxx.c"
|
||||||
|
|
||||||
/* Callchain handling code. */
|
/* Callchain handling code. */
|
||||||
static inline void
|
|
||||||
callchain_store(struct perf_callchain_entry *entry,
|
|
||||||
u64 ip)
|
|
||||||
{
|
|
||||||
if (entry->nr < PERF_MAX_STACK_DEPTH)
|
|
||||||
entry->ip[entry->nr++] = ip;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Leave userspace callchain empty for now. When we find a way to trace
|
* Leave userspace callchain empty for now. When we find a way to trace
|
||||||
* the user stack callchains, we add here.
|
* the user stack callchains, we add here.
|
||||||
*/
|
*/
|
||||||
static void
|
void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||||
perf_callchain_user(struct pt_regs *regs,
|
struct pt_regs *regs)
|
||||||
struct perf_callchain_entry *entry)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
||||||
while (!kstack_end(sp)) {
|
while (!kstack_end(sp)) {
|
||||||
addr = *sp++;
|
addr = *sp++;
|
||||||
if (__kernel_text_address(addr)) {
|
if (__kernel_text_address(addr)) {
|
||||||
callchain_store(entry, addr);
|
perf_callchain_store(entry, addr);
|
||||||
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||||
perf_callchain_kernel(struct pt_regs *regs,
|
struct pt_regs *regs)
|
||||||
struct perf_callchain_entry *entry)
|
|
||||||
{
|
{
|
||||||
unsigned long sp = regs->regs[29];
|
unsigned long sp = regs->regs[29];
|
||||||
#ifdef CONFIG_KALLSYMS
|
#ifdef CONFIG_KALLSYMS
|
||||||
unsigned long ra = regs->regs[31];
|
unsigned long ra = regs->regs[31];
|
||||||
unsigned long pc = regs->cp0_epc;
|
unsigned long pc = regs->cp0_epc;
|
||||||
|
|
||||||
callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
||||||
if (raw_show_trace || !__kernel_text_address(pc)) {
|
if (raw_show_trace || !__kernel_text_address(pc)) {
|
||||||
unsigned long stack_page =
|
unsigned long stack_page =
|
||||||
(unsigned long)task_stack_page(current);
|
(unsigned long)task_stack_page(current);
|
||||||
|
@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
do {
|
do {
|
||||||
callchain_store(entry, pc);
|
perf_callchain_store(entry, pc);
|
||||||
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
||||||
break;
|
break;
|
||||||
pc = unwind_stack(current, &sp, pc, &ra);
|
pc = unwind_stack(current, &sp, pc, &ra);
|
||||||
} while (pc);
|
} while (pc);
|
||||||
#else
|
#else
|
||||||
callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
||||||
save_raw_perf_callchain(entry, sp);
|
save_raw_perf_callchain(entry, sp);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
perf_do_callchain(struct pt_regs *regs,
|
|
||||||
struct perf_callchain_entry *entry)
|
|
||||||
{
|
|
||||||
int is_user;
|
|
||||||
|
|
||||||
if (!regs)
|
|
||||||
return;
|
|
||||||
|
|
||||||
is_user = user_mode(regs);
|
|
||||||
|
|
||||||
if (!current || !current->pid)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (is_user && current->state != TASK_RUNNING)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!is_user) {
|
|
||||||
perf_callchain_kernel(regs, entry);
|
|
||||||
if (current->mm)
|
|
||||||
regs = task_pt_regs(current);
|
|
||||||
else
|
|
||||||
regs = NULL;
|
|
||||||
}
|
|
||||||
if (regs)
|
|
||||||
perf_callchain_user(regs, entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
||||||
|
|
||||||
struct perf_callchain_entry *
|
|
||||||
perf_callchain(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
|
|
||||||
|
|
||||||
entry->nr = 0;
|
|
||||||
perf_do_callchain(regs, entry);
|
|
||||||
return entry;
|
|
||||||
}
|
|
||||||
|
|
|
@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
|
||||||
* interrupt, not NMI.
|
* interrupt, not NMI.
|
||||||
*/
|
*/
|
||||||
if (handled == IRQ_HANDLED)
|
if (handled == IRQ_HANDLED)
|
||||||
perf_event_do_pending();
|
irq_work_run();
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_MT_SMP
|
#ifdef CONFIG_MIPS_MT_SMP
|
||||||
read_unlock(&pmuint_rwlock);
|
read_unlock(&pmuint_rwlock);
|
||||||
|
@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
|
||||||
"CPU, irq %d%s\n", mipspmu->name, counters, irq,
|
"CPU, irq %d%s\n", mipspmu->name, counters, irq,
|
||||||
irq < 0 ? " (share with timer interrupt)" : "");
|
irq < 0 ? " (share with timer interrupt)" : "");
|
||||||
|
|
||||||
|
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(init_hw_perf_events);
|
early_initcall(init_hw_perf_events);
|
||||||
|
|
|
@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
|
||||||
|
|
||||||
static int protected_restore_fp_context(struct sigcontext __user *sc)
|
static int protected_restore_fp_context(struct sigcontext __user *sc)
|
||||||
{
|
{
|
||||||
int err, tmp;
|
int err, tmp __maybe_unused;
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_fpu_owner();
|
lock_fpu_owner();
|
||||||
own_fpu_inatomic(0);
|
own_fpu_inatomic(0);
|
||||||
|
|
|
@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
|
||||||
|
|
||||||
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
|
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
|
||||||
{
|
{
|
||||||
int err, tmp;
|
int err, tmp __maybe_unused;
|
||||||
while (1) {
|
while (1) {
|
||||||
lock_fpu_owner();
|
lock_fpu_owner();
|
||||||
own_fpu_inatomic(0);
|
own_fpu_inatomic(0);
|
||||||
|
|
|
@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
|
||||||
*/
|
*/
|
||||||
static struct task_struct *cpu_idle_thread[NR_CPUS];
|
static struct task_struct *cpu_idle_thread[NR_CPUS];
|
||||||
|
|
||||||
|
struct create_idle {
|
||||||
|
struct work_struct work;
|
||||||
|
struct task_struct *idle;
|
||||||
|
struct completion done;
|
||||||
|
int cpu;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void __cpuinit do_fork_idle(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct create_idle *c_idle =
|
||||||
|
container_of(work, struct create_idle, work);
|
||||||
|
|
||||||
|
c_idle->idle = fork_idle(c_idle->cpu);
|
||||||
|
complete(&c_idle->done);
|
||||||
|
}
|
||||||
|
|
||||||
int __cpuinit __cpu_up(unsigned int cpu)
|
int __cpuinit __cpu_up(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct task_struct *idle;
|
struct task_struct *idle;
|
||||||
|
@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
||||||
* Linux can schedule processes on this slave.
|
* Linux can schedule processes on this slave.
|
||||||
*/
|
*/
|
||||||
if (!cpu_idle_thread[cpu]) {
|
if (!cpu_idle_thread[cpu]) {
|
||||||
idle = fork_idle(cpu);
|
/*
|
||||||
cpu_idle_thread[cpu] = idle;
|
* Schedule work item to avoid forking user task
|
||||||
|
* Ported from arch/x86/kernel/smpboot.c
|
||||||
|
*/
|
||||||
|
struct create_idle c_idle = {
|
||||||
|
.cpu = cpu,
|
||||||
|
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
|
||||||
|
};
|
||||||
|
|
||||||
|
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
|
||||||
|
schedule_work(&c_idle.work);
|
||||||
|
wait_for_completion(&c_idle.done);
|
||||||
|
idle = cpu_idle_thread[cpu] = c_idle.idle;
|
||||||
|
|
||||||
if (IS_ERR(idle))
|
if (IS_ERR(idle))
|
||||||
panic(KERN_ERR "Fork failed for CPU %d", cpu);
|
panic(KERN_ERR "Fork failed for CPU %d", cpu);
|
||||||
|
|
|
@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
|
||||||
static int __used noinline
|
static int __used noinline
|
||||||
_sys_sysmips(nabi_no_regargs struct pt_regs regs)
|
_sys_sysmips(nabi_no_regargs struct pt_regs regs)
|
||||||
{
|
{
|
||||||
long cmd, arg1, arg2, arg3;
|
long cmd, arg1, arg2;
|
||||||
|
|
||||||
cmd = regs.regs[4];
|
cmd = regs.regs[4];
|
||||||
arg1 = regs.regs[5];
|
arg1 = regs.regs[5];
|
||||||
arg2 = regs.regs[6];
|
arg2 = regs.regs[6];
|
||||||
arg3 = regs.regs[7];
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case MIPS_ATOMIC_SET:
|
case MIPS_ATOMIC_SET:
|
||||||
|
@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
|
||||||
if (arg1 & 2)
|
if (arg1 & 2)
|
||||||
set_thread_flag(TIF_LOGADE);
|
set_thread_flag(TIF_LOGADE);
|
||||||
else
|
else
|
||||||
clear_thread_flag(TIF_FIXADE);
|
clear_thread_flag(TIF_LOGADE);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -148,9 +148,9 @@ struct {
|
||||||
spinlock_t tc_list_lock;
|
spinlock_t tc_list_lock;
|
||||||
struct list_head tc_list; /* Thread contexts */
|
struct list_head tc_list; /* Thread contexts */
|
||||||
} vpecontrol = {
|
} vpecontrol = {
|
||||||
.vpe_list_lock = SPIN_LOCK_UNLOCKED,
|
.vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
|
||||||
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
|
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
|
||||||
.tc_list_lock = SPIN_LOCK_UNLOCKED,
|
.tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
|
||||||
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
|
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
|
if MACH_LOONGSON
|
||||||
|
|
||||||
choice
|
choice
|
||||||
prompt "Machine Type"
|
prompt "Machine Type"
|
||||||
depends on MACH_LOONGSON
|
|
||||||
|
|
||||||
config LEMOTE_FULOONG2E
|
config LEMOTE_FULOONG2E
|
||||||
bool "Lemote Fuloong(2e) mini-PC"
|
bool "Lemote Fuloong(2e) mini-PC"
|
||||||
|
@ -87,3 +88,5 @@ config LOONGSON_UART_BASE
|
||||||
config LOONGSON_MC146818
|
config LOONGSON_MC146818
|
||||||
bool
|
bool
|
||||||
default n
|
default n
|
||||||
|
|
||||||
|
endif # MACH_LOONGSON
|
||||||
|
|
|
@ -44,10 +44,5 @@ void __init prom_init_cmdline(void)
|
||||||
strcat(arcs_cmdline, " ");
|
strcat(arcs_cmdline, " ");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((strstr(arcs_cmdline, "console=")) == NULL)
|
|
||||||
strcat(arcs_cmdline, " console=ttyS0,115200");
|
|
||||||
if ((strstr(arcs_cmdline, "root=")) == NULL)
|
|
||||||
strcat(arcs_cmdline, " root=/dev/hda1");
|
|
||||||
|
|
||||||
prom_init_machtype();
|
prom_init_machtype();
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
|
||||||
|
|
||||||
void __init prom_init_machtype(void)
|
void __init prom_init_machtype(void)
|
||||||
{
|
{
|
||||||
char *p, str[MACHTYPE_LEN];
|
char *p, str[MACHTYPE_LEN + 1];
|
||||||
int machtype = MACH_LEMOTE_FL2E;
|
int machtype = MACH_LEMOTE_FL2E;
|
||||||
|
|
||||||
mips_machtype = LOONGSON_MACHTYPE;
|
mips_machtype = LOONGSON_MACHTYPE;
|
||||||
|
@ -53,6 +53,7 @@ void __init prom_init_machtype(void)
|
||||||
}
|
}
|
||||||
p += strlen("machtype=");
|
p += strlen("machtype=");
|
||||||
strncpy(str, p, MACHTYPE_LEN);
|
strncpy(str, p, MACHTYPE_LEN);
|
||||||
|
str[MACHTYPE_LEN] = '\0';
|
||||||
p = strstr(str, " ");
|
p = strstr(str, " ");
|
||||||
if (p)
|
if (p)
|
||||||
*p = '\0';
|
*p = '\0';
|
||||||
|
|
|
@ -70,7 +70,7 @@
|
||||||
|
|
||||||
|
|
||||||
#define COMPXSP \
|
#define COMPXSP \
|
||||||
unsigned xm; int xe; int xs; int xc
|
unsigned xm; int xe; int xs __maybe_unused; int xc
|
||||||
|
|
||||||
#define COMPYSP \
|
#define COMPYSP \
|
||||||
unsigned ym; int ye; int ys; int yc
|
unsigned ym; int ye; int ys; int yc
|
||||||
|
@ -104,7 +104,7 @@
|
||||||
|
|
||||||
|
|
||||||
#define COMPXDP \
|
#define COMPXDP \
|
||||||
u64 xm; int xe; int xs; int xc
|
u64 xm; int xe; int xs __maybe_unused; int xc
|
||||||
|
|
||||||
#define COMPYDP \
|
#define COMPYDP \
|
||||||
u64 ym; int ye; int ys; int yc
|
u64 ym; int ye; int ys; int yc
|
||||||
|
|
|
@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
|
||||||
void __init paging_init(void)
|
void __init paging_init(void)
|
||||||
{
|
{
|
||||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||||
unsigned long lastpfn;
|
unsigned long lastpfn __maybe_unused;
|
||||||
|
|
||||||
pagetable_init();
|
pagetable_init();
|
||||||
|
|
||||||
|
|
|
@ -109,6 +109,8 @@ static bool scratchpad_available(void)
|
||||||
static int scratchpad_offset(int i)
|
static int scratchpad_offset(int i)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
|
/* Really unreachable, but evidently some GCC want this. */
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
|
||||||
* RETURNS: PCIBIOS_SUCCESSFUL - success
|
* RETURNS: PCIBIOS_SUCCESSFUL - success
|
||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
static int bpci_interrupt(int irq, void *dev_id)
|
static irqreturn_t bpci_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
|
struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
|
||||||
unsigned int stat = preg->if_status;
|
unsigned int stat = preg->if_status;
|
||||||
|
@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id)
|
||||||
/* write to clear all asserted interrupts */
|
/* write to clear all asserted interrupts */
|
||||||
preg->if_status = stat;
|
preg->if_status = stat;
|
||||||
|
|
||||||
return PCIBIOS_SUCCESSFUL;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*****************************************************************************
|
/*****************************************************************************
|
||||||
|
|
|
@ -4,15 +4,11 @@ choice
|
||||||
|
|
||||||
config PMC_MSP4200_EVAL
|
config PMC_MSP4200_EVAL
|
||||||
bool "PMC-Sierra MSP4200 Eval Board"
|
bool "PMC-Sierra MSP4200 Eval Board"
|
||||||
select CEVT_R4K
|
|
||||||
select CSRC_R4K
|
|
||||||
select IRQ_MSP_SLP
|
select IRQ_MSP_SLP
|
||||||
select HW_HAS_PCI
|
select HW_HAS_PCI
|
||||||
|
|
||||||
config PMC_MSP4200_GW
|
config PMC_MSP4200_GW
|
||||||
bool "PMC-Sierra MSP4200 VoIP Gateway"
|
bool "PMC-Sierra MSP4200 VoIP Gateway"
|
||||||
select CEVT_R4K
|
|
||||||
select CSRC_R4K
|
|
||||||
select IRQ_MSP_SLP
|
select IRQ_MSP_SLP
|
||||||
select HW_HAS_PCI
|
select HW_HAS_PCI
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,7 @@ void __init plat_time_init(void)
|
||||||
mips_hpt_frequency = cpu_rate/2;
|
mips_hpt_frequency = cpu_rate/2;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int __init get_c0_compare_int(void)
|
unsigned int __cpuinit get_c0_compare_int(void)
|
||||||
{
|
{
|
||||||
return MSP_INT_VPE0_TIMER;
|
return MSP_INT_VPE0_TIMER;
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
|
||||||
* Atomically reads the value of @v. Note that the guaranteed
|
* Atomically reads the value of @v. Note that the guaranteed
|
||||||
* useful range of an atomic_t is only 24 bits.
|
* useful range of an atomic_t is only 24 bits.
|
||||||
*/
|
*/
|
||||||
#define atomic_read(v) ((v)->counter)
|
#define atomic_read(v) (ACCESS_ONCE((v)->counter))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* atomic_set - set atomic variable
|
* atomic_set - set atomic variable
|
||||||
|
|
|
@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; };
|
||||||
|
|
||||||
#define __get_user_check(x, ptr, size) \
|
#define __get_user_check(x, ptr, size) \
|
||||||
({ \
|
({ \
|
||||||
|
const __typeof__(ptr) __guc_ptr = (ptr); \
|
||||||
int _e; \
|
int _e; \
|
||||||
if (likely(__access_ok((unsigned long) (ptr), (size)))) \
|
if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
|
||||||
_e = __get_user_nocheck((x), (ptr), (size)); \
|
_e = __get_user_nocheck((x), __guc_ptr, (size)); \
|
||||||
else { \
|
else { \
|
||||||
_e = -EFAULT; \
|
_e = -EFAULT; \
|
||||||
(x) = (__typeof__(x))0; \
|
(x) = (__typeof__(x))0; \
|
||||||
|
|
|
@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
|
||||||
|
|
||||||
/* invalidate the icache coverage on that region */
|
/* invalidate the icache coverage on that region */
|
||||||
mn10300_local_icache_inv_range2(addr + off, size);
|
mn10300_local_icache_inv_range2(addr + off, size);
|
||||||
smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
|
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||||
* directly */
|
* directly */
|
||||||
start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
|
start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
|
||||||
mn10300_icache_inv_range(start_page, end);
|
mn10300_icache_inv_range(start_page, end);
|
||||||
smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
|
smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
|
||||||
if (start_page == start)
|
if (start_page == start)
|
||||||
goto done;
|
goto done;
|
||||||
end = start_page;
|
end = start_page;
|
||||||
|
|
|
@ -33,9 +33,25 @@
|
||||||
//
|
//
|
||||||
//----------------------------------------------------------------------------
|
//----------------------------------------------------------------------------
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
|
#include <linux/threads.h>
|
||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We only have to have statically allocated lppaca structs on
|
||||||
|
* legacy iSeries, which supports at most 64 cpus.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_PPC_ISERIES
|
||||||
|
#if NR_CPUS < 64
|
||||||
|
#define NR_LPPACAS NR_CPUS
|
||||||
|
#else
|
||||||
|
#define NR_LPPACAS 64
|
||||||
|
#endif
|
||||||
|
#else /* not iSeries */
|
||||||
|
#define NR_LPPACAS 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
|
/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
|
||||||
* alignment is sufficient to prevent this */
|
* alignment is sufficient to prevent this */
|
||||||
struct lppaca {
|
struct lppaca {
|
||||||
|
|
|
@ -240,6 +240,12 @@ struct machdep_calls {
|
||||||
* claims to support kexec.
|
* claims to support kexec.
|
||||||
*/
|
*/
|
||||||
int (*machine_kexec_prepare)(struct kimage *image);
|
int (*machine_kexec_prepare)(struct kimage *image);
|
||||||
|
|
||||||
|
/* Called to perform the _real_ kexec.
|
||||||
|
* Do NOT allocate memory or fail here. We are past the point of
|
||||||
|
* no return.
|
||||||
|
*/
|
||||||
|
void (*machine_kexec)(struct kimage *image);
|
||||||
#endif /* CONFIG_KEXEC */
|
#endif /* CONFIG_KEXEC */
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
|
|
|
@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
|
||||||
|
|
||||||
save_ftrace_enabled = __ftrace_enabled_save();
|
save_ftrace_enabled = __ftrace_enabled_save();
|
||||||
|
|
||||||
default_machine_kexec(image);
|
if (ppc_md.machine_kexec)
|
||||||
|
ppc_md.machine_kexec(image);
|
||||||
|
else
|
||||||
|
default_machine_kexec(image);
|
||||||
|
|
||||||
__ftrace_enabled_restore(save_ftrace_enabled);
|
__ftrace_enabled_restore(save_ftrace_enabled);
|
||||||
|
|
||||||
|
|
|
@ -26,20 +26,6 @@ extern unsigned long __toc_start;
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
|
|
||||||
/*
|
|
||||||
* We only have to have statically allocated lppaca structs on
|
|
||||||
* legacy iSeries, which supports at most 64 cpus.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_PPC_ISERIES
|
|
||||||
#if NR_CPUS < 64
|
|
||||||
#define NR_LPPACAS NR_CPUS
|
|
||||||
#else
|
|
||||||
#define NR_LPPACAS 64
|
|
||||||
#endif
|
|
||||||
#else /* not iSeries */
|
|
||||||
#define NR_LPPACAS 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The structure which the hypervisor knows about - this structure
|
* The structure which the hypervisor knows about - this structure
|
||||||
* should not cross a page boundary. The vpa_init/register_vpa call
|
* should not cross a page boundary. The vpa_init/register_vpa call
|
||||||
|
|
|
@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
|
||||||
prime_debug_regs(new_thread);
|
prime_debug_regs(new_thread);
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
||||||
|
#ifndef CONFIG_HAVE_HW_BREAKPOINT
|
||||||
static void set_debug_reg_defaults(struct thread_struct *thread)
|
static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
if (thread->dabr) {
|
if (thread->dabr) {
|
||||||
|
@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||||
set_dabr(0);
|
set_dabr(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
|
||||||
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
||||||
|
|
||||||
int set_dabr(unsigned long dabr)
|
int set_dabr(unsigned long dabr)
|
||||||
|
@ -670,11 +672,11 @@ void flush_thread(void)
|
||||||
{
|
{
|
||||||
discard_lazy_cpu_state();
|
discard_lazy_cpu_state();
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_HW_BREAKPOINTS
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||||
flush_ptrace_hw_breakpoint(current);
|
flush_ptrace_hw_breakpoint(current);
|
||||||
#else /* CONFIG_HAVE_HW_BREAKPOINTS */
|
#else /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||||
set_debug_reg_defaults(¤t->thread);
|
set_debug_reg_defaults(¤t->thread);
|
||||||
#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -1516,7 +1516,8 @@ int start_topology_update(void)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (firmware_has_feature(FW_FEATURE_VPHN) &&
|
/* Disabled until races with load balancing are fixed */
|
||||||
|
if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
|
||||||
get_lppaca()->shared_proc) {
|
get_lppaca()->shared_proc) {
|
||||||
vphn_enabled = 1;
|
vphn_enabled = 1;
|
||||||
setup_cpu_associativity_change_counters();
|
setup_cpu_associativity_change_counters();
|
||||||
|
|
|
@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
|
||||||
* neesd to be flushed. This function will either perform the flush
|
* neesd to be flushed. This function will either perform the flush
|
||||||
* immediately or will batch it up if the current CPU has an active
|
* immediately or will batch it up if the current CPU has an active
|
||||||
* batch on it.
|
* batch on it.
|
||||||
*
|
|
||||||
* Must be called from within some kind of spinlock/non-preempt region...
|
|
||||||
*/
|
*/
|
||||||
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep, unsigned long pte, int huge)
|
pte_t *ptep, unsigned long pte, int huge)
|
||||||
{
|
{
|
||||||
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
|
struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
|
||||||
unsigned long vsid, vaddr;
|
unsigned long vsid, vaddr;
|
||||||
unsigned int psize;
|
unsigned int psize;
|
||||||
int ssize;
|
int ssize;
|
||||||
|
@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||||
*/
|
*/
|
||||||
if (!batch->active) {
|
if (!batch->active) {
|
||||||
flush_hash_page(vaddr, rpte, psize, ssize, 0);
|
flush_hash_page(vaddr, rpte, psize, ssize, 0);
|
||||||
|
put_cpu_var(ppc64_tlb_batch);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||||
batch->index = ++i;
|
batch->index = ++i;
|
||||||
if (i >= PPC64_TLB_BATCH_NR)
|
if (i >= PPC64_TLB_BATCH_NR)
|
||||||
__flush_tlb_pending(batch);
|
__flush_tlb_pending(batch);
|
||||||
|
put_cpu_var(ppc64_tlb_batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
|
||||||
pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
|
pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
|
||||||
pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
|
pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for (i = 0; i < NR_LPPACAS; i++) {
|
||||||
if (lppaca_of(i).dyn_proc_status >= 2)
|
if (lppaca[i].dyn_proc_status >= 2)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
snprintf(p, 32 - (p - buf), "@%d", i);
|
snprintf(p, 32 - (p - buf), "@%d", i);
|
||||||
|
@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
|
||||||
|
|
||||||
dt_prop_str(dt, "device_type", device_type_cpu);
|
dt_prop_str(dt, "device_type", device_type_cpu);
|
||||||
|
|
||||||
index = lppaca_of(i).dyn_hv_phys_proc_index;
|
index = lppaca[i].dyn_hv_phys_proc_index;
|
||||||
d = &xIoHriProcessorVpd[index];
|
d = &xIoHriProcessorVpd[index];
|
||||||
|
|
||||||
dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
|
dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
|
||||||
|
|
|
@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void)
|
||||||
* on but calling this function multiple times is fine.
|
* on but calling this function multiple times is fine.
|
||||||
*/
|
*/
|
||||||
identify_cpu(0, mfspr(SPRN_PVR));
|
identify_cpu(0, mfspr(SPRN_PVR));
|
||||||
|
initialise_paca(&boot_paca, 0);
|
||||||
|
|
||||||
powerpc_firmware_features |= FW_FEATURE_ISERIES;
|
powerpc_firmware_features |= FW_FEATURE_ISERIES;
|
||||||
powerpc_firmware_features |= FW_FEATURE_LPAR;
|
powerpc_firmware_features |= FW_FEATURE_LPAR;
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
#include <asm-generic/sections.h>
|
#include <asm-generic/sections.h>
|
||||||
|
|
||||||
extern void __nosave_begin, __nosave_end;
|
extern long __nosave_begin, __nosave_end;
|
||||||
extern long __machvec_start, __machvec_end;
|
extern long __machvec_start, __machvec_end;
|
||||||
extern char __uncached_start, __uncached_end;
|
extern char __uncached_start, __uncached_end;
|
||||||
extern char _ebss[];
|
extern char _ebss[];
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/sh_timer.h>
|
#include <linux/sh_timer.h>
|
||||||
#include <linux/serial_sci.h>
|
#include <linux/serial_sci.h>
|
||||||
#include <asm/machtypes.h>
|
#include <generated/machtypes.h>
|
||||||
|
|
||||||
static struct resource rtc_resources[] = {
|
static struct resource rtc_resources[] = {
|
||||||
[0] = {
|
[0] = {
|
||||||
|
@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
|
||||||
|
|
||||||
void __init plat_early_device_setup(void)
|
void __init plat_early_device_setup(void)
|
||||||
{
|
{
|
||||||
|
struct platform_device *dev[1];
|
||||||
|
|
||||||
if (mach_is_rts7751r2d()) {
|
if (mach_is_rts7751r2d()) {
|
||||||
scif_platform_data.scscr |= SCSCR_CKE1;
|
scif_platform_data.scscr |= SCSCR_CKE1;
|
||||||
early_platform_add_devices(&scif_device, 1);
|
dev[0] = &scif_device;
|
||||||
|
early_platform_add_devices(dev, 1);
|
||||||
} else {
|
} else {
|
||||||
early_platform_add_devices(&sci_device, 1);
|
dev[0] = &sci_device;
|
||||||
early_platform_add_devices(&scif_device, 1);
|
early_platform_add_devices(dev, 1);
|
||||||
|
dev[0] = &scif_device;
|
||||||
|
early_platform_add_devices(dev, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
early_platform_add_devices(sh7750_early_devices,
|
early_platform_add_devices(sh7750_early_devices,
|
||||||
|
|
|
@ -10,6 +10,16 @@
|
||||||
void __delay(unsigned long loops)
|
void __delay(unsigned long loops)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
|
/*
|
||||||
|
* ST40-300 appears to have an issue with this code,
|
||||||
|
* normally taking two cycles each loop, as with all
|
||||||
|
* other SH variants. If however the branch and the
|
||||||
|
* delay slot straddle an 8 byte boundary, this increases
|
||||||
|
* to 3 cycles.
|
||||||
|
* This align directive ensures this doesn't occur.
|
||||||
|
*/
|
||||||
|
".balign 8\n\t"
|
||||||
|
|
||||||
"tst %0, %0\n\t"
|
"tst %0, %0\n\t"
|
||||||
"1:\t"
|
"1:\t"
|
||||||
"bf/s 1b\n\t"
|
"bf/s 1b\n\t"
|
||||||
|
|
|
@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||||
kunmap_atomic(vfrom, KM_USER0);
|
kunmap_atomic(vfrom, KM_USER0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
|
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
|
||||||
|
(vma->vm_flags & VM_EXEC))
|
||||||
__flush_purge_region(vto, PAGE_SIZE);
|
__flush_purge_region(vto, PAGE_SIZE);
|
||||||
|
|
||||||
kunmap_atomic(vto, KM_USER1);
|
kunmap_atomic(vto, KM_USER1);
|
||||||
|
|
|
@ -62,7 +62,12 @@ int main(int argc, char *argv[])
|
||||||
if (fseek(f, -4L, SEEK_END)) {
|
if (fseek(f, -4L, SEEK_END)) {
|
||||||
perror(argv[1]);
|
perror(argv[1]);
|
||||||
}
|
}
|
||||||
fread(&olen, sizeof olen, 1, f);
|
|
||||||
|
if (fread(&olen, sizeof(olen), 1, f) != 1) {
|
||||||
|
perror(argv[1]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
ilen = ftell(f);
|
ilen = ftell(f);
|
||||||
olen = getle32(&olen);
|
olen = getle32(&olen);
|
||||||
fclose(f);
|
fclose(f);
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
#ifndef _ASM_CE4100_H_
|
||||||
|
#define _ASM_CE4100_H_
|
||||||
|
|
||||||
|
int ce4100_pci_init(void);
|
||||||
|
|
||||||
|
#endif
|
|
@ -36,6 +36,11 @@
|
||||||
#define MSR_IA32_PERFCTR1 0x000000c2
|
#define MSR_IA32_PERFCTR1 0x000000c2
|
||||||
#define MSR_FSB_FREQ 0x000000cd
|
#define MSR_FSB_FREQ 0x000000cd
|
||||||
|
|
||||||
|
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
|
||||||
|
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
|
||||||
|
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
|
||||||
|
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
|
||||||
|
|
||||||
#define MSR_MTRRcap 0x000000fe
|
#define MSR_MTRRcap 0x000000fe
|
||||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ struct bau_msg_payload {
|
||||||
struct bau_msg_header {
|
struct bau_msg_header {
|
||||||
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
|
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
|
||||||
/* bits 5:0 */
|
/* bits 5:0 */
|
||||||
unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */
|
unsigned int base_dest_nodeid:15; /* nasid of the */
|
||||||
/* bits 20:6 */ /* first bit in uvhub map */
|
/* bits 20:6 */ /* first bit in uvhub map */
|
||||||
unsigned int command:8; /* message type */
|
unsigned int command:8; /* message type */
|
||||||
/* bits 28:21 */
|
/* bits 28:21 */
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче