2007-05-03 12:55:52 +04:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/blkdev.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/gfp.h>
|
2007-05-03 12:55:52 +04:00
|
|
|
#include <scsi/scsi_host.h>
|
|
|
|
#include <linux/ata.h>
|
|
|
|
#include <linux/libata.h>
|
|
|
|
|
|
|
|
#include <asm/dma.h>
|
|
|
|
#include <asm/ecard.h>
|
|
|
|
|
|
|
|
#define DRV_NAME "pata_icside"
|
|
|
|
|
|
|
|
#define ICS_IDENT_OFFSET 0x2280
|
|
|
|
|
|
|
|
#define ICS_ARCIN_V5_INTRSTAT 0x0000
|
|
|
|
#define ICS_ARCIN_V5_INTROFFSET 0x0004
|
|
|
|
|
|
|
|
#define ICS_ARCIN_V6_INTROFFSET_1 0x2200
|
|
|
|
#define ICS_ARCIN_V6_INTRSTAT_1 0x2290
|
|
|
|
#define ICS_ARCIN_V6_INTROFFSET_2 0x3200
|
|
|
|
#define ICS_ARCIN_V6_INTRSTAT_2 0x3290
|
|
|
|
|
|
|
|
struct portinfo {
|
|
|
|
unsigned int dataoffset;
|
|
|
|
unsigned int ctrloffset;
|
|
|
|
unsigned int stepping;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct portinfo pata_icside_portinfo_v5 = {
|
|
|
|
.dataoffset = 0x2800,
|
|
|
|
.ctrloffset = 0x2b80,
|
|
|
|
.stepping = 6,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct portinfo pata_icside_portinfo_v6_1 = {
|
|
|
|
.dataoffset = 0x2000,
|
|
|
|
.ctrloffset = 0x2380,
|
|
|
|
.stepping = 6,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct portinfo pata_icside_portinfo_v6_2 = {
|
|
|
|
.dataoffset = 0x3000,
|
|
|
|
.ctrloffset = 0x3380,
|
|
|
|
.stepping = 6,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pata_icside_state {
|
|
|
|
void __iomem *irq_port;
|
|
|
|
void __iomem *ioc_base;
|
|
|
|
unsigned int type;
|
|
|
|
unsigned int dma;
|
|
|
|
struct {
|
|
|
|
u8 port_sel;
|
|
|
|
u8 disabled;
|
|
|
|
unsigned int speed[ATA_MAX_DEVICES];
|
|
|
|
} port[2];
|
|
|
|
};
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
struct pata_icside_info {
|
|
|
|
struct pata_icside_state *state;
|
|
|
|
struct expansion_card *ec;
|
|
|
|
void __iomem *base;
|
|
|
|
void __iomem *irqaddr;
|
|
|
|
unsigned int irqmask;
|
|
|
|
const expansioncard_ops_t *irqops;
|
|
|
|
unsigned int mwdma_mask;
|
|
|
|
unsigned int nr_ports;
|
|
|
|
const struct portinfo *port[2];
|
2007-08-18 08:14:55 +04:00
|
|
|
unsigned long raw_base;
|
|
|
|
unsigned long raw_ioc_base;
|
2007-05-10 22:32:36 +04:00
|
|
|
};
|
|
|
|
|
2007-05-03 12:55:52 +04:00
|
|
|
#define ICS_TYPE_A3IN 0
|
|
|
|
#define ICS_TYPE_A3USER 1
|
|
|
|
#define ICS_TYPE_V6 3
|
|
|
|
#define ICS_TYPE_V5 15
|
|
|
|
#define ICS_TYPE_NOTYPE ((unsigned int)-1)
|
|
|
|
|
|
|
|
/* ---------------- Version 5 PCB Support Functions --------------------- */
|
|
|
|
/* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
|
|
|
|
* Purpose : enable interrupts from card
|
|
|
|
*/
|
|
|
|
static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ec->irq_data;
|
|
|
|
|
|
|
|
writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
|
|
|
|
* Purpose : disable interrupts from card
|
|
|
|
*/
|
|
|
|
static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ec->irq_data;
|
|
|
|
|
|
|
|
readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
|
|
|
|
.irqenable = pata_icside_irqenable_arcin_v5,
|
|
|
|
.irqdisable = pata_icside_irqdisable_arcin_v5,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* ---------------- Version 6 PCB Support Functions --------------------- */
|
|
|
|
/* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
|
|
|
|
* Purpose : enable interrupts from card
|
|
|
|
*/
|
|
|
|
static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ec->irq_data;
|
|
|
|
void __iomem *base = state->irq_port;
|
|
|
|
|
|
|
|
if (!state->port[0].disabled)
|
|
|
|
writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
|
|
|
|
if (!state->port[1].disabled)
|
|
|
|
writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
|
|
|
|
* Purpose : disable interrupts from card
|
|
|
|
*/
|
|
|
|
static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ec->irq_data;
|
|
|
|
|
|
|
|
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
|
|
|
|
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
|
|
|
|
* Purpose : detect an active interrupt from card
|
|
|
|
*/
|
|
|
|
static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ec->irq_data;
|
|
|
|
|
|
|
|
return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
|
|
|
|
readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
|
|
|
|
.irqenable = pata_icside_irqenable_arcin_v6,
|
|
|
|
.irqdisable = pata_icside_irqdisable_arcin_v6,
|
|
|
|
.irqpending = pata_icside_irqpending_arcin_v6,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SG-DMA support.
|
|
|
|
*
|
|
|
|
* Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
|
|
|
|
* There is only one DMA controller per card, which means that only
|
|
|
|
* one drive can be accessed at one time. NOTE! We do not enforce that
|
|
|
|
* here, but we rely on the main IDE driver spotting that both
|
|
|
|
* interfaces use the same IRQ, which should guarantee this.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure the IOMD to give the appropriate timings for the transfer
|
|
|
|
* mode being requested. We take the advice of the ATA standards, and
|
|
|
|
* calculate the cycle time based on the transfer mode, and the EIDE
|
|
|
|
* MW DMA specs that the drive provides in the IDENTIFY command.
|
|
|
|
*
|
|
|
|
* We have the following IOMD DMA modes to choose from:
|
|
|
|
*
|
|
|
|
* Type Active Recovery Cycle
|
|
|
|
* A 250 (250) 312 (550) 562 (800)
|
|
|
|
* B 187 (200) 250 (550) 437 (750)
|
|
|
|
* C 125 (125) 125 (375) 250 (500)
|
|
|
|
* D 62 (50) 125 (375) 187 (425)
|
|
|
|
*
|
|
|
|
* (figures in brackets are actual measured timings on DIOR/DIOW)
|
|
|
|
*
|
|
|
|
* However, we also need to take care of the read/write active and
|
|
|
|
* recovery timings:
|
|
|
|
*
|
|
|
|
* Read Write
|
|
|
|
* Mode Active -- Recovery -- Cycle IOMD type
|
|
|
|
* MW0 215 50 215 480 A
|
|
|
|
* MW1 80 50 50 150 C
|
|
|
|
* MW2 70 25 25 120 C
|
|
|
|
*/
|
|
|
|
static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ap->host->private_data;
|
|
|
|
struct ata_timing t;
|
|
|
|
unsigned int cycle;
|
|
|
|
char iomd_type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DMA is based on a 16MHz clock
|
|
|
|
*/
|
|
|
|
if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Choose the IOMD cycle timing which ensure that the interface
|
|
|
|
* satisfies the measured active, recovery and cycle times.
|
|
|
|
*/
|
|
|
|
if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
|
|
|
|
iomd_type = 'D', cycle = 187;
|
|
|
|
else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
|
|
|
|
iomd_type = 'C', cycle = 250;
|
|
|
|
else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
|
|
|
|
iomd_type = 'B', cycle = 437;
|
|
|
|
else
|
|
|
|
iomd_type = 'A', cycle = 562;
|
|
|
|
|
|
|
|
ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n",
|
|
|
|
t.active, t.recover, t.cycle, iomd_type);
|
|
|
|
|
|
|
|
state->port[ap->port_no].speed[adev->devno] = cycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
struct pata_icside_state *state = ap->host->private_data;
|
|
|
|
unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are simplex; BUG if we try to fiddle with DMA
|
|
|
|
* while it's active.
|
|
|
|
*/
|
|
|
|
BUG_ON(dma_channel_active(state->dma));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Route the DMA signals to the correct interface
|
|
|
|
*/
|
|
|
|
writeb(state->port[ap->port_no].port_sel, state->ioc_base);
|
|
|
|
|
|
|
|
set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
|
2008-12-08 22:25:28 +03:00
|
|
|
set_dma_sg(state->dma, qc->sg, qc->n_elem);
|
2007-05-03 12:55:52 +04:00
|
|
|
set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
|
|
|
|
|
|
|
|
/* issue r/w command */
|
2008-04-07 17:47:16 +04:00
|
|
|
ap->ops->sff_exec_command(ap, &qc->tf);
|
2007-05-03 12:55:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
struct pata_icside_state *state = ap->host->private_data;
|
|
|
|
|
|
|
|
BUG_ON(dma_channel_active(state->dma));
|
|
|
|
enable_dma(state->dma);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
|
|
|
|
{
|
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
|
struct pata_icside_state *state = ap->host->private_data;
|
|
|
|
|
|
|
|
disable_dma(state->dma);
|
|
|
|
|
|
|
|
/* see ata_bmdma_stop */
|
2008-05-30 01:10:58 +04:00
|
|
|
ata_sff_dma_pause(ap);
|
2007-05-03 12:55:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static u8 pata_icside_bmdma_status(struct ata_port *ap)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state = ap->host->private_data;
|
|
|
|
void __iomem *irq_port;
|
|
|
|
|
|
|
|
irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
|
|
|
|
ICS_ARCIN_V6_INTRSTAT_1);
|
|
|
|
|
|
|
|
return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
|
|
|
|
}
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
static int icside_dma_init(struct pata_icside_info *info)
|
2007-05-03 12:55:52 +04:00
|
|
|
{
|
2007-05-10 22:32:36 +04:00
|
|
|
struct pata_icside_state *state = info->state;
|
|
|
|
struct expansion_card *ec = info->ec;
|
2007-05-03 12:55:52 +04:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
|
state->port[0].speed[i] = 480;
|
|
|
|
state->port[1].speed[i] = 480;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
|
|
|
|
state->dma = ec->dma;
|
2009-03-14 23:38:24 +03:00
|
|
|
info->mwdma_mask = ATA_MWDMA2;
|
2007-05-03 12:55:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct scsi_host_template pata_icside_sht = {
|
2008-03-25 06:22:49 +03:00
|
|
|
ATA_BASE_SHT(DRV_NAME),
|
2008-12-11 19:37:06 +03:00
|
|
|
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
|
|
|
|
.dma_boundary = IOMD_DMA_BOUNDARY,
|
2007-05-03 12:55:52 +04:00
|
|
|
};
|
|
|
|
|
2007-10-14 04:12:39 +04:00
|
|
|
static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
|
2007-05-03 12:55:52 +04:00
|
|
|
{
|
2007-10-14 04:12:39 +04:00
|
|
|
struct ata_port *ap = link->ap;
|
2007-05-03 12:55:52 +04:00
|
|
|
struct pata_icside_state *state = ap->host->private_data;
|
|
|
|
|
2007-08-06 19:10:54 +04:00
|
|
|
if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
|
2008-04-07 17:47:16 +04:00
|
|
|
return ata_sff_postreset(link, classes);
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
state->port[ap->port_no].disabled = 1;
|
|
|
|
|
|
|
|
if (state->type == ICS_TYPE_V6) {
|
|
|
|
/*
|
|
|
|
* Disable interrupts from this port, otherwise we
|
|
|
|
* receive spurious interrupts from the floating
|
|
|
|
* interrupt line.
|
|
|
|
*/
|
|
|
|
void __iomem *irq_port = state->irq_port +
|
|
|
|
(ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
|
|
|
|
readb(irq_port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ata_port_operations pata_icside_port_ops = {
|
2010-05-10 23:41:33 +04:00
|
|
|
.inherits = &ata_bmdma_port_ops,
|
2007-05-03 12:55:52 +04:00
|
|
|
/* no need to build any PRD tables for DMA */
|
|
|
|
.qc_prep = ata_noop_qc_prep,
|
2008-04-07 17:47:16 +04:00
|
|
|
.sff_data_xfer = ata_sff_data_xfer_noirq,
|
libata: implement and use ops inheritance
libata lets low level drivers build ata_port_operations table and
register it with libata core layer. This allows low level drivers
high level of flexibility but also burdens them with lots of
boilerplate entries.
This becomes worse for drivers which support related similar
controllers which differ slightly. They share most of the operations
except for a few. However, the driver still needs to list all
operations for each variant. This results in large number of
duplicate entries, which is not only inefficient but also error-prone
as it becomes very difficult to tell what the actual differences are.
This duplicate boilerplates all over the low level drivers also make
updating the core layer exteremely difficult and error-prone. When
compounded with multi-branched development model, it ends up
accumulating inconsistencies over time. Some of those inconsistencies
cause immediate problems and fixed. Others just remain there dormant
making maintenance increasingly difficult.
To rectify the problem, this patch implements ata_port_operations
inheritance. To allow LLDs to easily re-use their own ops tables
overriding only specific methods, this patch implements poor man's
class inheritance. An ops table has ->inherits field which can be set
to any ops table as long as it doesn't create a loop. When the host
is started, the inheritance chain is followed and any operation which
isn't specified is taken from the nearest ancestor which has it
specified. This operation is called finalization and done only once
per an ops table and the LLD doesn't have to do anything special about
it other than making the ops table non-const such that libata can
update it.
libata provides four base ops tables lower drivers can inherit from -
base, sata, pmp, sff and bmdma. To avoid overriding these ops
accidentaly, these ops are declared const and LLDs should always
inherit these instead of using them directly.
After finalization, all the ops table are identical before and after
the patch except for setting .irq_handler to ata_interrupt in drivers
which didn't use to. The .irq_handler doesn't have any actual effect
and the field will soon be removed by later patch.
* sata_sx4 is still using old style EH and currently doesn't take
advantage of ops inheritance.
Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 06:22:49 +03:00
|
|
|
.bmdma_setup = pata_icside_bmdma_setup,
|
|
|
|
.bmdma_start = pata_icside_bmdma_start,
|
|
|
|
.bmdma_stop = pata_icside_bmdma_stop,
|
|
|
|
.bmdma_status = pata_icside_bmdma_status,
|
2007-05-03 12:55:52 +04:00
|
|
|
|
libata: implement and use ops inheritance
libata lets low level drivers build ata_port_operations table and
register it with libata core layer. This allows low level drivers
high level of flexibility but also burdens them with lots of
boilerplate entries.
This becomes worse for drivers which support related similar
controllers which differ slightly. They share most of the operations
except for a few. However, the driver still needs to list all
operations for each variant. This results in large number of
duplicate entries, which is not only inefficient but also error-prone
as it becomes very difficult to tell what the actual differences are.
This duplicate boilerplates all over the low level drivers also make
updating the core layer exteremely difficult and error-prone. When
compounded with multi-branched development model, it ends up
accumulating inconsistencies over time. Some of those inconsistencies
cause immediate problems and fixed. Others just remain there dormant
making maintenance increasingly difficult.
To rectify the problem, this patch implements ata_port_operations
inheritance. To allow LLDs to easily re-use their own ops tables
overriding only specific methods, this patch implements poor man's
class inheritance. An ops table has ->inherits field which can be set
to any ops table as long as it doesn't create a loop. When the host
is started, the inheritance chain is followed and any operation which
isn't specified is taken from the nearest ancestor which has it
specified. This operation is called finalization and done only once
per an ops table and the LLD doesn't have to do anything special about
it other than making the ops table non-const such that libata can
update it.
libata provides four base ops tables lower drivers can inherit from -
base, sata, pmp, sff and bmdma. To avoid overriding these ops
accidentaly, these ops are declared const and LLDs should always
inherit these instead of using them directly.
After finalization, all the ops table are identical before and after
the patch except for setting .irq_handler to ata_interrupt in drivers
which didn't use to. The .irq_handler doesn't have any actual effect
and the field will soon be removed by later patch.
* sata_sx4 is still using old style EH and currently doesn't take
advantage of ops inheritance.
Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 06:22:49 +03:00
|
|
|
.cable_detect = ata_cable_40wire,
|
|
|
|
.set_dmamode = pata_icside_set_dmamode,
|
libata: make reset related methods proper port operations
Currently reset methods are not specified directly in the
ata_port_operations table. If a LLD wants to use custom reset
methods, it should construct and use a error_handler which uses those
reset methods. It's done this way for two reasons.
First, the ops table already contained too many methods and adding
four more of them would noticeably increase the amount of necessary
boilerplate code all over low level drivers.
Second, as ->error_handler uses those reset methods, it can get
confusing. ie. By overriding ->error_handler, those reset ops can be
made useless making layering a bit hazy.
Now that ops table uses inheritance, the first problem doesn't exist
anymore. The second isn't completely solved but is relieved by
providing default values - most drivers can just override what it has
implemented and don't have to concern itself about higher level
callbacks. In fact, there currently is no driver which actually
modifies error handling behavior. Drivers which override
->error_handler just wraps the standard error handler only to prepare
the controller for EH. I don't think making ops layering strict has
any noticeable benefit.
This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and
their PMP counterparts propoer ops. Default ops are provided in the
base ops tables and drivers are converted to override individual reset
methods instead of creating custom error_handler.
* ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs
aren't accessible. sata_promise doesn't need to use separate
error_handlers for PATA and SATA anymore.
* softreset is broken for sata_inic162x and sata_sx4. As libata now
always prefers hardreset, this doesn't really matter but the ops are
forced to NULL using ATA_OP_NULL for documentation purpose.
* pata_hpt374 needs to use different prereset for the first and second
PCI functions. This used to be done by branching from
hpt374_error_handler(). The proper way to do this is to use
separate ops and port_info tables for each function. Converted.
Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 06:22:50 +03:00
|
|
|
.postreset = pata_icside_postreset,
|
2010-05-10 23:41:33 +04:00
|
|
|
|
libata-sff: clean up BMDMA initialization
When BMDMA initialization failed or BMDMA was not available for
whatever reason, bmdma_addr was left at zero and used as an indication
that BMDMA shouldn't be used. This leads to the following problems.
p1. For BMDMA drivers which don't use traditional BMDMA register,
ata_bmdma_mode_filter() incorrectly inhibits DMA modes. Those
drivers either have to inherit from ata_sff_port_ops or clear
->mode_filter explicitly.
p2. non-BMDMA drivers call into BMDMA PRD table allocation. It
doesn't actually allocate PRD table if bmdma_addr is not
initialized but is still confusing.
p3. For BMDMA drivers which don't use traditional BMDMA register, some
methods might not be invoked as expected (e.g. bmdma_stop from
ata_sff_post_internal_cmd()).
p4. SFF drivers w/ custom DMA interface implement noop BMDMA ops
worrying libata core might call into one of them.
These problems are caused by the muddy line between SFF and BMDMA and
the assumption that all BMDMA controllers initialize bmdma_addr.
This patch fixes p1 and p2 by removing the bmdma_addr assumption and
moving prd allocation to BMDMA port start. Later patches will fix the
remaining issues.
This patch improves BMDMA initialization such that
* When BMDMA register initialization fails, falls back to PIO instead
of failing. ata_pci_bmdma_init() never fails now.
* When ata_pci_bmdma_init() falls back to PIO, it clears
ap->mwdma_mask and udma_mask instead of depending on
ata_bmdma_mode_filter(). This makes ata_bmdma_mode_filter()
unnecessary thus resolving p1.
* ata_port_start() which actually is BMDMA specific is moved to
ata_bmdma_port_start(). ata_port_start() and ata_sff_port_start()
are killed.
* ata_sff_port_start32() is moved and renamed to
ata_bmdma_port_start32().
Drivers which no longer call into PRD table allocation are...
pdc_adma, sata_inic162x, sata_qstor, sata_sx4, pata_cmd640 and all
drivers which inherit from ata_sff_port_ops.
pata_icside sets ->port_start to ATA_OP_NULL as it doesn't need PRD
but is a BMDMA controller and doesn't have custom port_start like
other such controllers.
Note that with the previous patch which makes all and only BMDMA
drivers inherit from ata_bmdma_port_ops, this change doesn't break
drivers which need PRD table.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
2010-05-10 23:41:34 +04:00
|
|
|
.port_start = ATA_OP_NULL, /* don't need PRD table */
|
2007-05-03 12:55:52 +04:00
|
|
|
};
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
static void __devinit
|
2007-08-18 08:14:55 +04:00
|
|
|
pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
|
2007-10-14 04:12:39 +04:00
|
|
|
struct pata_icside_info *info,
|
|
|
|
const struct portinfo *port)
|
2007-05-03 12:55:52 +04:00
|
|
|
{
|
2007-08-18 08:14:55 +04:00
|
|
|
struct ata_ioports *ioaddr = &ap->ioaddr;
|
2007-10-14 04:12:39 +04:00
|
|
|
void __iomem *cmd = base + port->dataoffset;
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
ioaddr->cmd_addr = cmd;
|
2007-10-14 04:12:39 +04:00
|
|
|
ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping);
|
|
|
|
ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping);
|
|
|
|
ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping);
|
|
|
|
ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping);
|
|
|
|
ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping);
|
|
|
|
ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping);
|
|
|
|
ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping);
|
|
|
|
ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping);
|
|
|
|
ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping);
|
|
|
|
ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping);
|
|
|
|
|
|
|
|
ioaddr->ctl_addr = base + port->ctrloffset;
|
2007-05-03 12:55:52 +04:00
|
|
|
ioaddr->altstatus_addr = ioaddr->ctl_addr;
|
2007-08-18 08:14:55 +04:00
|
|
|
|
|
|
|
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
|
2007-10-14 04:12:39 +04:00
|
|
|
info->raw_base + port->dataoffset,
|
|
|
|
info->raw_base + port->ctrloffset);
|
2007-08-18 08:14:55 +04:00
|
|
|
|
|
|
|
if (info->raw_ioc_base)
|
|
|
|
ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
|
2007-05-03 12:55:52 +04:00
|
|
|
}
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
|
2007-05-03 12:55:52 +04:00
|
|
|
{
|
2007-05-10 22:32:36 +04:00
|
|
|
struct pata_icside_state *state = info->state;
|
2007-05-03 12:55:52 +04:00
|
|
|
void __iomem *base;
|
|
|
|
|
2007-05-10 21:40:51 +04:00
|
|
|
base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
|
2007-05-03 12:55:52 +04:00
|
|
|
if (!base)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
state->irq_port = base;
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
info->base = base;
|
|
|
|
info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
|
|
|
|
info->irqmask = 1;
|
|
|
|
info->irqops = &pata_icside_ops_arcin_v5;
|
|
|
|
info->nr_ports = 1;
|
|
|
|
info->port[0] = &pata_icside_portinfo_v5;
|
2007-05-03 12:55:52 +04:00
|
|
|
|
2007-10-14 04:12:39 +04:00
|
|
|
info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
|
2007-08-18 08:14:55 +04:00
|
|
|
|
2007-05-03 12:55:52 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
|
2007-05-03 12:55:52 +04:00
|
|
|
{
|
2007-05-10 22:32:36 +04:00
|
|
|
struct pata_icside_state *state = info->state;
|
|
|
|
struct expansion_card *ec = info->ec;
|
2007-05-03 12:55:52 +04:00
|
|
|
void __iomem *ioc_base, *easi_base;
|
|
|
|
unsigned int sel = 0;
|
|
|
|
|
2007-05-10 21:40:51 +04:00
|
|
|
ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
|
|
|
|
if (!ioc_base)
|
|
|
|
return -ENOMEM;
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
easi_base = ioc_base;
|
|
|
|
|
|
|
|
if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
|
2007-05-10 21:40:51 +04:00
|
|
|
easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
|
|
|
|
if (!easi_base)
|
|
|
|
return -ENOMEM;
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable access to the EASI region.
|
|
|
|
*/
|
|
|
|
sel = 1 << 5;
|
|
|
|
}
|
|
|
|
|
|
|
|
writeb(sel, ioc_base);
|
|
|
|
|
|
|
|
state->irq_port = easi_base;
|
|
|
|
state->ioc_base = ioc_base;
|
|
|
|
state->port[0].port_sel = sel;
|
|
|
|
state->port[1].port_sel = sel | 1;
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
info->base = easi_base;
|
|
|
|
info->irqops = &pata_icside_ops_arcin_v6;
|
|
|
|
info->nr_ports = 2;
|
|
|
|
info->port[0] = &pata_icside_portinfo_v6_1;
|
|
|
|
info->port[1] = &pata_icside_portinfo_v6_2;
|
|
|
|
|
2007-08-18 08:14:55 +04:00
|
|
|
info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
|
|
|
|
info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
return icside_dma_init(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
|
|
|
|
{
|
|
|
|
struct expansion_card *ec = info->ec;
|
|
|
|
struct ata_host *host;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (info->irqaddr) {
|
|
|
|
ec->irqaddr = info->irqaddr;
|
|
|
|
ec->irqmask = info->irqmask;
|
|
|
|
}
|
|
|
|
if (info->irqops)
|
|
|
|
ecard_setirq(ec, info->irqops, info->state);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Be on the safe side - disable interrupts
|
|
|
|
*/
|
|
|
|
ec->ops->irqdisable(ec, ec->irq);
|
|
|
|
|
|
|
|
host = ata_host_alloc(&ec->dev, info->nr_ports);
|
|
|
|
if (!host)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
host->private_data = info->state;
|
|
|
|
host->flags = ATA_HOST_SIMPLEX;
|
|
|
|
|
|
|
|
for (i = 0; i < info->nr_ports; i++) {
|
|
|
|
struct ata_port *ap = host->ports[i];
|
|
|
|
|
2009-03-14 23:38:24 +03:00
|
|
|
ap->pio_mask = ATA_PIO4;
|
2007-05-10 22:32:36 +04:00
|
|
|
ap->mwdma_mask = info->mwdma_mask;
|
2007-05-28 14:59:48 +04:00
|
|
|
ap->flags |= ATA_FLAG_SLAVE_POSS;
|
2007-05-10 22:32:36 +04:00
|
|
|
ap->ops = &pata_icside_port_ops;
|
|
|
|
|
2007-10-14 04:12:39 +04:00
|
|
|
pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
|
2007-05-10 22:32:36 +04:00
|
|
|
}
|
2007-05-03 12:55:52 +04:00
|
|
|
|
2010-05-20 00:10:21 +04:00
|
|
|
return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
|
2007-05-10 22:32:36 +04:00
|
|
|
&pata_icside_sht);
|
2007-05-03 12:55:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __devinit
|
|
|
|
pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id)
|
|
|
|
{
|
|
|
|
struct pata_icside_state *state;
|
2007-05-10 22:32:36 +04:00
|
|
|
struct pata_icside_info info;
|
2007-05-03 12:55:52 +04:00
|
|
|
void __iomem *idmem;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ecard_request_resources(ec);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
|
2007-05-03 12:55:52 +04:00
|
|
|
if (!state) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
|
|
|
|
state->type = ICS_TYPE_NOTYPE;
|
|
|
|
state->dma = NO_DMA;
|
|
|
|
|
2007-05-10 21:40:51 +04:00
|
|
|
idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
|
2007-05-03 12:55:52 +04:00
|
|
|
if (idmem) {
|
|
|
|
unsigned int type;
|
|
|
|
|
|
|
|
type = readb(idmem + ICS_IDENT_OFFSET) & 1;
|
|
|
|
type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
|
|
|
|
type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
|
|
|
|
type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
|
2007-05-10 21:40:51 +04:00
|
|
|
ecardm_iounmap(ec, idmem);
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
state->type = type;
|
|
|
|
}
|
|
|
|
|
2007-05-10 22:32:36 +04:00
|
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
info.state = state;
|
|
|
|
info.ec = ec;
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
switch (state->type) {
|
|
|
|
case ICS_TYPE_A3IN:
|
|
|
|
dev_warn(&ec->dev, "A3IN unsupported\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICS_TYPE_A3USER:
|
|
|
|
dev_warn(&ec->dev, "A3USER unsupported\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ICS_TYPE_V5:
|
2007-05-10 22:32:36 +04:00
|
|
|
ret = pata_icside_register_v5(&info);
|
2007-05-03 12:55:52 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ICS_TYPE_V6:
|
2007-05-10 22:32:36 +04:00
|
|
|
ret = pata_icside_register_v6(&info);
|
2007-05-03 12:55:52 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
dev_warn(&ec->dev, "unknown interface type\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == 0)
|
2007-05-10 22:32:36 +04:00
|
|
|
ret = pata_icside_add_ports(&info);
|
2007-05-03 12:55:52 +04:00
|
|
|
|
|
|
|
if (ret == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
release:
|
|
|
|
ecard_release_resources(ec);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pata_icside_shutdown(struct expansion_card *ec)
|
|
|
|
{
|
|
|
|
struct ata_host *host = ecard_get_drvdata(ec);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable interrupts from this card. We need to do
|
|
|
|
* this before disabling EASI since we may be accessing
|
|
|
|
* this register via that region.
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
2007-05-10 19:46:13 +04:00
|
|
|
ec->ops->irqdisable(ec, ec->irq);
|
2007-05-03 12:55:52 +04:00
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset the ROM pointer so that we can read the ROM
|
|
|
|
* after a soft reboot. This also disables access to
|
|
|
|
* the IDE taskfile via the EASI region.
|
|
|
|
*/
|
|
|
|
if (host) {
|
|
|
|
struct pata_icside_state *state = host->private_data;
|
|
|
|
if (state->ioc_base)
|
|
|
|
writeb(0, state->ioc_base);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __devexit pata_icside_remove(struct expansion_card *ec)
|
|
|
|
{
|
|
|
|
struct ata_host *host = ecard_get_drvdata(ec);
|
|
|
|
struct pata_icside_state *state = host->private_data;
|
|
|
|
|
|
|
|
ata_host_detach(host);
|
|
|
|
|
|
|
|
pata_icside_shutdown(ec);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* don't NULL out the drvdata - devres/libata wants it
|
|
|
|
* to free the ata_host structure.
|
|
|
|
*/
|
|
|
|
if (state->dma != NO_DMA)
|
|
|
|
free_dma(state->dma);
|
|
|
|
|
|
|
|
ecard_release_resources(ec);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct ecard_id pata_icside_ids[] = {
|
|
|
|
{ MANU_ICS, PROD_ICS_IDE },
|
|
|
|
{ MANU_ICS2, PROD_ICS2_IDE },
|
|
|
|
{ 0xffff, 0xffff }
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct ecard_driver pata_icside_driver = {
|
|
|
|
.probe = pata_icside_probe,
|
|
|
|
.remove = __devexit_p(pata_icside_remove),
|
|
|
|
.shutdown = pata_icside_shutdown,
|
|
|
|
.id_table = pata_icside_ids,
|
|
|
|
.drv = {
|
|
|
|
.name = DRV_NAME,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init pata_icside_init(void)
|
|
|
|
{
|
|
|
|
return ecard_register_driver(&pata_icside_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit pata_icside_exit(void)
|
|
|
|
{
|
|
|
|
ecard_remove_driver(&pata_icside_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("ICS PATA driver");
|
|
|
|
|
|
|
|
module_init(pata_icside_init);
|
|
|
|
module_exit(pata_icside_exit);
|