diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index e513c3158ad9..503f189dab3b 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -165,7 +165,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) zalon7xx-objs := zalon.o ncr53c8xx.o NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o -libata-objs := libata-core.o libata-scsi.o libata-bmdma.o +libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o # Files generated that shall be removed upon make clean diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 597e9e8bcd2c..9de48dd4234a 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -65,7 +65,6 @@ static unsigned int ata_dev_init_params(struct ata_port *ap, struct ata_device *dev, u16 heads, u16 sectors); -static int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); static unsigned int ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); @@ -409,7 +408,7 @@ static const char *sata_spd_string(unsigned int spd) return spd_str[spd - 1]; } -static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) +void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) { if (ata_dev_enabled(dev)) { printk(KERN_WARNING "ata%u: dev %u disabled\n", @@ -961,6 +960,7 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc) * @ap: Port to which the command is sent * @dev: Device to which the command is sent * @tf: Taskfile registers for the command and the result + * @cdb: CDB for packet command * @dma_dir: Data tranfer direction of the command * @buf: Data buffer of the command * @buflen: Length of data buffer @@ -975,10 +975,9 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc) * None. Should be called with kernel context, might sleep. */ -static unsigned -ata_exec_internal(struct ata_port *ap, struct ata_device *dev, - struct ata_taskfile *tf, - int dma_dir, void *buf, unsigned int buflen) +unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, void *buf, unsigned int buflen) { u8 command = tf->command; struct ata_queued_cmd *qc; @@ -992,6 +991,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, BUG_ON(qc == NULL); qc->tf = *tf; + if (cdb) + memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { ata_sg_init_one(qc, buf, buflen); @@ -1042,7 +1043,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, * * Kill the following code as soon as those drivers are fixed. */ - if (ap->flags & ATA_FLAG_PORT_DISABLED) { + if (ap->flags & ATA_FLAG_DISABLED) { err_mask |= AC_ERR_SYSTEM; ata_port_probe(ap); } @@ -1141,7 +1142,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, tf.protocol = ATA_PROT_PIO; - err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, + err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE, id, sizeof(id[0]) * ATA_ID_WORDS); if (err_mask) { rc = -EIO; @@ -1238,7 +1239,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, id[84], id[85], id[86], id[87], id[88]); /* initialize to-be-configured parameters */ - dev->flags = 0; + dev->flags &= ~ATA_DFLAG_CFG_MASK; dev->max_sectors = 0; dev->cdb_len = 0; dev->n_sectors = 0; @@ -1381,11 +1382,18 @@ err_out_nosup: static int ata_bus_probe(struct ata_port *ap) { unsigned int classes[ATA_MAX_DEVICES]; - int i, rc, found = 0; + int tries[ATA_MAX_DEVICES]; + int i, rc, down_xfermask; struct ata_device *dev; ata_port_probe(ap); + for (i = 0; i < ATA_MAX_DEVICES; i++) + tries[i] = ATA_PROBE_MAX_TRIES; + + retry: + down_xfermask = 0; + /* reset and determine device classes */ for (i = 0; i < ATA_MAX_DEVICES; i++) classes[i] = ATA_DEV_UNKNOWN; @@ -1399,7 +1407,7 @@ static int ata_bus_probe(struct ata_port *ap) } else { ap->ops->phy_reset(ap); - if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) + if (!(ap->flags & ATA_FLAG_DISABLED)) for (i = 0; i < ATA_MAX_DEVICES; i++) classes[i] = ap->device[i].class; @@ -1415,21 +1423,23 @@ static int ata_bus_probe(struct ata_port *ap) dev = &ap->device[i]; dev->class = classes[i]; + if (!tries[i]) { + ata_down_xfermask_limit(ap, dev, 1); + ata_dev_disable(ap, dev); + } + if (!ata_dev_enabled(dev)) continue; - WARN_ON(dev->id != NULL); - if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { - dev->class = ATA_DEV_NONE; - continue; - } + kfree(dev->id); + dev->id = NULL; + rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id); + if (rc) + goto fail; - if (ata_dev_configure(ap, dev, 1)) { - ata_dev_disable(ap, dev); - continue; - } - - found = 1; + rc = ata_dev_configure(ap, dev, 1); + if (rc) + goto fail; } /* configure transfer mode */ @@ -1438,12 +1448,18 @@ static int ata_bus_probe(struct ata_port *ap) * return error code and failing device on failure as * ata_set_mode() does. */ - if (found) - ap->ops->set_mode(ap); + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_enabled(&ap->device[i])) { + ap->ops->set_mode(ap); + break; + } rc = 0; } else { - while (ata_set_mode(ap, &dev)) - ata_dev_disable(ap, dev); + rc = ata_set_mode(ap, &dev); + if (rc) { + down_xfermask = 1; + goto fail; + } } for (i = 0; i < ATA_MAX_DEVICES; i++) @@ -1454,6 +1470,24 @@ static int ata_bus_probe(struct ata_port *ap) ata_port_disable(ap); ap->ops->port_disable(ap); return -ENODEV; + + fail: + switch (rc) { + case -EINVAL: + case -ENODEV: + tries[dev->devno] = 0; + break; + case -EIO: + ata_down_sata_spd_limit(ap); + /* fall through */ + default: + tries[dev->devno]--; + if (down_xfermask && + ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1)) + tries[dev->devno] = 0; + } + + goto retry; } /** @@ -1469,7 +1503,7 @@ static int ata_bus_probe(struct ata_port *ap) void ata_port_probe(struct ata_port *ap) { - ap->flags &= ~ATA_FLAG_PORT_DISABLED; + ap->flags &= ~ATA_FLAG_DISABLED; } /** @@ -1543,7 +1577,7 @@ void __sata_phy_reset(struct ata_port *ap) else ata_port_disable(ap); - if (ap->flags & ATA_FLAG_PORT_DISABLED) + if (ap->flags & ATA_FLAG_DISABLED) return; if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { @@ -1568,7 +1602,7 @@ void __sata_phy_reset(struct ata_port *ap) void sata_phy_reset(struct ata_port *ap) { __sata_phy_reset(ap); - if (ap->flags & ATA_FLAG_PORT_DISABLED) + if (ap->flags & ATA_FLAG_DISABLED) return; ata_bus_reset(ap); } @@ -1607,7 +1641,121 @@ void ata_port_disable(struct ata_port *ap) { ap->device[0].class = ATA_DEV_NONE; ap->device[1].class = ATA_DEV_NONE; - ap->flags |= ATA_FLAG_PORT_DISABLED; + ap->flags |= ATA_FLAG_DISABLED; +} + +/** + * ata_down_sata_spd_limit - adjust SATA spd limit downward + * @ap: Port to adjust SATA spd limit for + * + * Adjust SATA spd limit of @ap downward. Note that this + * function only adjusts the limit. The change must be applied + * using ata_set_sata_spd(). + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 0 on success, negative errno on failure + */ +int ata_down_sata_spd_limit(struct ata_port *ap) +{ + u32 spd, mask; + int highbit; + + if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) + return -EOPNOTSUPP; + + mask = ap->sata_spd_limit; + if (mask <= 1) + return -EINVAL; + highbit = fls(mask) - 1; + mask &= ~(1 << highbit); + + spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf; + if (spd <= 1) + return -EINVAL; + spd--; + mask &= (1 << spd) - 1; + if (!mask) + return -EINVAL; + + ap->sata_spd_limit = mask; + + printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n", + ap->id, sata_spd_string(fls(mask))); + + return 0; +} + +static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol) +{ + u32 spd, limit; + + if (ap->sata_spd_limit == UINT_MAX) + limit = 0; + else + limit = fls(ap->sata_spd_limit); + + spd = (*scontrol >> 4) & 0xf; + *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); + + return spd != limit; +} + +/** + * ata_set_sata_spd_needed - is SATA spd configuration needed + * @ap: Port in question + * + * Test whether the spd limit in SControl matches + * @ap->sata_spd_limit. This function is used to determine + * whether hardreset is necessary to apply SATA spd + * configuration. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 1 if SATA spd configuration is needed, 0 otherwise. + */ +int ata_set_sata_spd_needed(struct ata_port *ap) +{ + u32 scontrol; + + if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) + return 0; + + scontrol = scr_read(ap, SCR_CONTROL); + + return __ata_set_sata_spd_needed(ap, &scontrol); +} + +/** + * ata_set_sata_spd - set SATA spd according to spd limit + * @ap: Port to set SATA spd for + * + * Set SATA spd of @ap according to sata_spd_limit. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 0 if spd doesn't need to be changed, 1 if spd has been + * changed. -EOPNOTSUPP if SCR registers are inaccessible. + */ +static int ata_set_sata_spd(struct ata_port *ap) +{ + u32 scontrol; + + if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) + return -EOPNOTSUPP; + + scontrol = scr_read(ap, SCR_CONTROL); + if (!__ata_set_sata_spd_needed(ap, &scontrol)) + return 0; + + scr_write(ap, SCR_CONTROL, scontrol); + return 1; } /* @@ -1758,11 +1906,62 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, return 0; } +/** + * ata_down_xfermask_limit - adjust dev xfer masks downward + * @ap: Port associated with device @dev + * @dev: Device to adjust xfer masks + * @force_pio0: Force PIO0 + * + * Adjust xfer masks of @dev downward. Note that this function + * does not apply the change. Invoking ata_set_mode() afterwards + * will apply the limit. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * 0 on success, negative errno on failure + */ +int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, + int force_pio0) +{ + unsigned long xfer_mask; + int highbit; + + xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, + dev->udma_mask); + + if (!xfer_mask) + goto fail; + /* don't gear down to MWDMA from UDMA, go directly to PIO */ + if (xfer_mask & ATA_MASK_UDMA) + xfer_mask &= ~ATA_MASK_MWDMA; + + highbit = fls(xfer_mask) - 1; + xfer_mask &= ~(1 << highbit); + if (force_pio0) + xfer_mask &= 1 << ATA_SHIFT_PIO; + if (!xfer_mask) + goto fail; + + ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, + &dev->udma_mask); + + printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n", + ap->id, dev->devno, ata_mode_string(xfer_mask)); + + return 0; + + fail: + return -EINVAL; +} + static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) { unsigned int err_mask; int rc; + dev->flags &= ~ATA_DFLAG_PIO; if (dev->xfer_shift == ATA_SHIFT_PIO) dev->flags |= ATA_DFLAG_PIO; @@ -1775,12 +1974,8 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) } rc = ata_dev_revalidate(ap, dev, 0); - if (rc) { - printk(KERN_ERR - "ata%u: failed to revalidate after set xfermode\n", - ap->id); + if (rc) return rc; - } DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", dev->xfer_shift, (int)dev->xfer_mode); @@ -1806,7 +2001,7 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) * RETURNS: * 0 on success, negative errno otherwise */ -static int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) +int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) { struct ata_device *dev; int i, rc = 0, used_dma = 0, found = 0; @@ -2069,7 +2264,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, * Obtains host_set lock. * * SIDE EFFECTS: - * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. + * Sets ATA_FLAG_DISABLED if bus reset fails. */ void ata_bus_reset(struct ata_port *ap) @@ -2179,7 +2374,14 @@ static int sata_phy_resume(struct ata_port *ap) void ata_std_probeinit(struct ata_port *ap) { if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { + u32 spd; + sata_phy_resume(ap); + + spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4; + if (spd) + ap->sata_spd_limit &= (1 << spd) - 1; + if (sata_dev_present(ap)) ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); } @@ -2267,18 +2469,30 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) DPRINTK("ENTER\n"); - /* Issue phy wake/reset */ + if (ata_set_sata_spd_needed(ap)) { + /* SATA spec says nothing about how to reconfigure + * spd. To be on the safe side, turn off phy during + * reconfiguration. This works for at least ICH7 AHCI + * and Sil3124. + */ + scontrol = scr_read(ap, SCR_CONTROL); + scontrol = (scontrol & 0x0f0) | 0x302; + scr_write_flush(ap, SCR_CONTROL, scontrol); + + ata_set_sata_spd(ap); + } + + /* issue phy wake/reset */ scontrol = scr_read(ap, SCR_CONTROL); scontrol = (scontrol & 0x0f0) | 0x301; scr_write_flush(ap, SCR_CONTROL, scontrol); - /* - * Couldn't find anything in SATA I/II specs, but AHCI-1.1 + /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 * 10.4.2 says at least 1 ms. */ msleep(1); - /* Bring phy back */ + /* bring phy back */ sata_phy_resume(ap); /* TODO: phy layer with polling, timeouts, etc. */ @@ -2385,9 +2599,9 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes) ata_std_postreset, classes); } -static int ata_do_reset(struct ata_port *ap, - ata_reset_fn_t reset, ata_postreset_fn_t postreset, - int verbose, unsigned int *classes) +int ata_do_reset(struct ata_port *ap, + ata_reset_fn_t reset, ata_postreset_fn_t postreset, + int verbose, unsigned int *classes) { int i, rc; @@ -2458,21 +2672,42 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit, if (probeinit) probeinit(ap); - if (softreset) { + if (softreset && !ata_set_sata_spd_needed(ap)) { rc = ata_do_reset(ap, softreset, postreset, 0, classes); if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN) goto done; + printk(KERN_INFO "ata%u: softreset failed, will try " + "hardreset in 5 secs\n", ap->id); + ssleep(5); } if (!hardreset) goto done; - rc = ata_do_reset(ap, hardreset, postreset, 0, classes); - if (rc || classes[0] != ATA_DEV_UNKNOWN) - goto done; + while (1) { + rc = ata_do_reset(ap, hardreset, postreset, 0, classes); + if (rc == 0) { + if (classes[0] != ATA_DEV_UNKNOWN) + goto done; + break; + } + + if (ata_down_sata_spd_limit(ap)) + goto done; + + printk(KERN_INFO "ata%u: hardreset failed, will retry " + "in 5 secs\n", ap->id); + ssleep(5); + } + + if (softreset) { + printk(KERN_INFO "ata%u: hardreset succeeded without " + "classification, will retry softreset in 5 secs\n", + ap->id); + ssleep(5); - if (softreset) rc = ata_do_reset(ap, softreset, postreset, 0, classes); + } done: if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN) @@ -2560,15 +2795,14 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, int post_reset) { - unsigned int class; - u16 *id; + unsigned int class = dev->class; + u16 *id = NULL; int rc; - if (!ata_dev_enabled(dev)) - return -ENODEV; - - class = dev->class; - id = NULL; + if (!ata_dev_enabled(dev)) { + rc = -ENODEV; + goto fail; + } /* allocate & read ID data */ rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); @@ -2585,7 +2819,9 @@ int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, dev->id = id; /* configure device according to the new ID */ - return ata_dev_configure(ap, dev, 0); + rc = ata_dev_configure(ap, dev, 0); + if (rc == 0) + return 0; fail: printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", @@ -2687,23 +2923,34 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) unsigned long xfer_mask; int i; - xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, - ap->udma_mask); + xfer_mask = ata_pack_xfermask(ap->pio_mask, + ap->mwdma_mask, ap->udma_mask); + + /* Apply cable rule here. Don't apply it early because when + * we handle hot plug the cable type can itself change. + */ + if (ap->cbl == ATA_CBL_PATA40) + xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); /* FIXME: Use port-wide xfermask for now */ for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *d = &ap->device[i]; - if (!ata_dev_enabled(d)) + + if (ata_dev_absent(d)) continue; - xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, - d->udma_mask); + + if (ata_dev_disabled(d)) { + /* to avoid violating device selection timing */ + xfer_mask &= ata_pack_xfermask(d->pio_mask, + UINT_MAX, UINT_MAX); + continue; + } + + xfer_mask &= ata_pack_xfermask(d->pio_mask, + d->mwdma_mask, d->udma_mask); xfer_mask &= ata_id_xfermask(d->id); if (ata_dma_blacklisted(d)) xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - /* Apply cable rule here. Don't apply it early because when - we handle hot plug the cable type can itself change */ - if (ap->cbl == ATA_CBL_PATA40) - xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); } if (ata_dma_blacklisted(dev)) @@ -2714,11 +2961,12 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) if (hs->simplex_claimed) xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); } + if (ap->ops->mode_filter) xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); - ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, - &dev->udma_mask); + ata_unpack_xfermask(xfer_mask, &dev->pio_mask, + &dev->mwdma_mask, &dev->udma_mask); } /** @@ -2752,7 +3000,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap, tf.protocol = ATA_PROT_NODATA; tf.nsect = dev->xfer_mode; - err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); + err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; @@ -2792,7 +3040,7 @@ static unsigned int ata_dev_init_params(struct ata_port *ap, tf.nsect = sectors; tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ - err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); + err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; @@ -3838,8 +4086,8 @@ fsm_start: static void ata_pio_task(void *_data) { - struct ata_port *ap = _data; - struct ata_queued_cmd *qc; + struct ata_queued_cmd *qc = _data; + struct ata_port *ap = qc->ap; u8 status; int poll_next; @@ -4392,7 +4640,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) ap = host_set->ports[i]; if (ap && - !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); @@ -4424,7 +4672,7 @@ static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, tf.flags |= ATA_TFLAG_DEVICE; tf.protocol = ATA_PROT_NODATA; - err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); + err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); if (err) printk(KERN_ERR "%s: ata command failed: %d\n", __FUNCTION__, err); @@ -4613,7 +4861,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, host->unique_id = ata_unique_id++; host->max_cmd_len = 12; - ap->flags = ATA_FLAG_PORT_DISABLED; + ap->flags = ATA_FLAG_DISABLED; ap->id = host->unique_id; ap->host = host; ap->ctl = ATA_DEVCTL_OBS; @@ -4628,6 +4876,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, ap->flags |= ent->host_flags; ap->ops = ent->port_ops; ap->cbl = ATA_CBL_NONE; + ap->sata_spd_limit = UINT_MAX; ap->active_tag = ATA_TAG_POISON; ap->last_ctl = 0xFF; @@ -5083,7 +5332,6 @@ EXPORT_SYMBOL_GPL(ata_sg_init); EXPORT_SYMBOL_GPL(ata_sg_init_one); EXPORT_SYMBOL_GPL(__ata_qc_complete); EXPORT_SYMBOL_GPL(ata_qc_issue_prot); -EXPORT_SYMBOL_GPL(ata_eng_timeout); EXPORT_SYMBOL_GPL(ata_tf_load); EXPORT_SYMBOL_GPL(ata_tf_read); EXPORT_SYMBOL_GPL(ata_noop_dev_select); @@ -5123,15 +5371,12 @@ EXPORT_SYMBOL_GPL(ata_busy_sleep); EXPORT_SYMBOL_GPL(ata_port_queue_task); EXPORT_SYMBOL_GPL(ata_scsi_ioctl); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); -EXPORT_SYMBOL_GPL(ata_scsi_error); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_release); EXPORT_SYMBOL_GPL(ata_host_intr); EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); EXPORT_SYMBOL_GPL(ata_scsi_simulate); -EXPORT_SYMBOL_GPL(ata_eh_qc_complete); -EXPORT_SYMBOL_GPL(ata_eh_qc_retry); EXPORT_SYMBOL_GPL(ata_pio_need_iordy); EXPORT_SYMBOL_GPL(ata_timing_compute); @@ -5153,3 +5398,8 @@ EXPORT_SYMBOL_GPL(ata_device_suspend); EXPORT_SYMBOL_GPL(ata_device_resume); EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); EXPORT_SYMBOL_GPL(ata_scsi_device_resume); + +EXPORT_SYMBOL_GPL(ata_scsi_error); +EXPORT_SYMBOL_GPL(ata_eng_timeout); +EXPORT_SYMBOL_GPL(ata_eh_qc_complete); +EXPORT_SYMBOL_GPL(ata_eh_qc_retry); diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c new file mode 100644 index 000000000000..e73f5612aea8 --- /dev/null +++ b/drivers/scsi/libata-eh.c @@ -0,0 +1,264 @@ +/* + * libata-eh.c - libata error handling + * + * Maintained by: Jeff Garzik + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2006 Tejun Heo + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * Hardware documentation available from http://www.t13.org/ and + * http://www.sata-io.org/ + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "libata.h" + +/** + * ata_scsi_timed_out - SCSI layer time out callback + * @cmd: timed out SCSI command + * + * Handles SCSI layer timeout. We race with normal completion of + * the qc for @cmd. If the qc is already gone, we lose and let + * the scsi command finish (EH_HANDLED). Otherwise, the qc has + * timed out and EH should be invoked. Prevent ata_qc_complete() + * from finishing it by setting EH_SCHEDULED and return + * EH_NOT_HANDLED. + * + * LOCKING: + * Called from timer context + * + * RETURNS: + * EH_HANDLED or EH_NOT_HANDLED + */ +enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; + unsigned long flags; + struct ata_queued_cmd *qc; + enum scsi_eh_timer_return ret = EH_HANDLED; + + DPRINTK("ENTER\n"); + + spin_lock_irqsave(&ap->host_set->lock, flags); + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc) { + WARN_ON(qc->scsicmd != cmd); + qc->flags |= ATA_QCFLAG_EH_SCHEDULED; + qc->err_mask |= AC_ERR_TIMEOUT; + ret = EH_NOT_HANDLED; + } + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + DPRINTK("EXIT, ret=%d\n", ret); + return ret; +} + +/** + * ata_scsi_error - SCSI layer error handler callback + * @host: SCSI host on which error occurred + * + * Handles SCSI-layer-thrown error events. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + * + * RETURNS: + * Zero. + */ +int ata_scsi_error(struct Scsi_Host *host) +{ + struct ata_port *ap = (struct ata_port *)&host->hostdata[0]; + + DPRINTK("ENTER\n"); + + /* synchronize with IRQ handler and port task */ + spin_unlock_wait(&ap->host_set->lock); + ata_port_flush_task(ap); + + WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); + + ap->ops->eng_timeout(ap); + + WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); + + scsi_eh_flush_done_q(&ap->eh_done_q); + + DPRINTK("EXIT\n"); + return 0; +} + +/** + * ata_qc_timeout - Handle timeout of queued command + * @qc: Command that timed out + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + */ +static void ata_qc_timeout(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_host_set *host_set = ap->host_set; + u8 host_stat = 0, drv_stat; + unsigned long flags; + + DPRINTK("ENTER\n"); + + ap->hsm_task_state = HSM_ST_IDLE; + + spin_lock_irqsave(&host_set->lock, flags); + + switch (qc->tf.protocol) { + + case ATA_PROT_DMA: + case ATA_PROT_ATAPI_DMA: + host_stat = ap->ops->bmdma_status(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + + /* fall through */ + + default: + ata_altstatus(ap); + drv_stat = ata_chk_status(ap); + + /* ack bmdma irq events */ + ap->ops->irq_clear(ap); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", + ap->id, qc->tf.command, drv_stat, host_stat); + + /* complete taskfile transaction */ + qc->err_mask |= ac_err_mask(drv_stat); + break; + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + ata_eh_qc_complete(qc); + + DPRINTK("EXIT\n"); +} + +/** + * ata_eng_timeout - Handle timeout of queued command + * @ap: Port on which timed-out command is active + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + */ +void ata_eng_timeout(struct ata_port *ap) +{ + DPRINTK("ENTER\n"); + + ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); + + DPRINTK("EXIT\n"); +} + +static void ata_eh_scsidone(struct scsi_cmnd *scmd) +{ + /* nada */ +} + +static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scsi_cmnd *scmd = qc->scsicmd; + unsigned long flags; + + spin_lock_irqsave(&ap->host_set->lock, flags); + qc->scsidone = ata_eh_scsidone; + __ata_qc_complete(qc); + WARN_ON(ata_tag_valid(qc->tag)); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + scsi_eh_finish_cmd(scmd, &ap->eh_done_q); +} + +/** + * ata_eh_qc_complete - Complete an active ATA command from EH + * @qc: Command to complete + * + * Indicate to the mid and upper layers that an ATA command has + * completed. To be used from EH. + */ +void ata_eh_qc_complete(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + scmd->retries = scmd->allowed; + __ata_eh_qc_complete(qc); +} + +/** + * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH + * @qc: Command to retry + * + * Indicate to the mid and upper layers that an ATA command + * should be retried. To be used from EH. + * + * SCSI midlayer limits the number of retries to scmd->allowed. + * scmd->retries is decremented for commands which get retried + * due to unrelated failures (qc->err_mask is zero). + */ +void ata_eh_qc_retry(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + if (!qc->err_mask && scmd->retries) + scmd->retries--; + __ata_eh_qc_complete(qc); +} diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index c1a4b29a9ae1..745fc263feeb 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -53,7 +53,6 @@ typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); static struct ata_device * ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); #define RW_RECOVERY_MPAGE 0x1 #define RW_RECOVERY_MPAGE_LEN 12 @@ -545,17 +544,12 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - /* - * Read the controller registers. - */ - WARN_ON(qc->ap->ops->tf_read == NULL); - qc->ap->ops->tf_read(qc->ap, tf); - /* * Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. */ - if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { + if (qc->err_mask || + tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { ata_to_sense_error(qc->ap->id, tf->command, tf->feature, &sb[1], &sb[2], &sb[3]); sb[1] &= 0x0f; @@ -620,17 +614,12 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc) cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - /* - * Read the controller registers. - */ - WARN_ON(qc->ap->ops->tf_read == NULL); - qc->ap->ops->tf_read(qc->ap, tf); - /* * Use ata_to_sense_error() to map status register bits * onto sense key, asc & ascq. */ - if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { + if (qc->err_mask || + tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { ata_to_sense_error(qc->ap->id, tf->command, tf->feature, &sb[2], &sb[12], &sb[13]); sb[2] &= 0x0f; @@ -723,141 +712,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev) return 0; /* scsi layer doesn't check return value, sigh */ } -/** - * ata_scsi_timed_out - SCSI layer time out callback - * @cmd: timed out SCSI command - * - * Handles SCSI layer timeout. We race with normal completion of - * the qc for @cmd. If the qc is already gone, we lose and let - * the scsi command finish (EH_HANDLED). Otherwise, the qc has - * timed out and EH should be invoked. Prevent ata_qc_complete() - * from finishing it by setting EH_SCHEDULED and return - * EH_NOT_HANDLED. - * - * LOCKING: - * Called from timer context - * - * RETURNS: - * EH_HANDLED or EH_NOT_HANDLED - */ -enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *host = cmd->device->host; - struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; - unsigned long flags; - struct ata_queued_cmd *qc; - enum scsi_eh_timer_return ret = EH_HANDLED; - - DPRINTK("ENTER\n"); - - spin_lock_irqsave(&ap->host_set->lock, flags); - qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc) { - WARN_ON(qc->scsicmd != cmd); - qc->flags |= ATA_QCFLAG_EH_SCHEDULED; - qc->err_mask |= AC_ERR_TIMEOUT; - ret = EH_NOT_HANDLED; - } - spin_unlock_irqrestore(&ap->host_set->lock, flags); - - DPRINTK("EXIT, ret=%d\n", ret); - return ret; -} - -/** - * ata_scsi_error - SCSI layer error handler callback - * @host: SCSI host on which error occurred - * - * Handles SCSI-layer-thrown error events. - * - * LOCKING: - * Inherited from SCSI layer (none, can sleep) - * - * RETURNS: - * Zero. - */ - -int ata_scsi_error(struct Scsi_Host *host) -{ - struct ata_port *ap; - unsigned long flags; - - DPRINTK("ENTER\n"); - - ap = (struct ata_port *) &host->hostdata[0]; - - spin_lock_irqsave(&ap->host_set->lock, flags); - WARN_ON(ap->flags & ATA_FLAG_IN_EH); - ap->flags |= ATA_FLAG_IN_EH; - WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); - spin_unlock_irqrestore(&ap->host_set->lock, flags); - - ata_port_flush_task(ap); - - ap->ops->eng_timeout(ap); - - WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); - - scsi_eh_flush_done_q(&ap->eh_done_q); - - spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags &= ~ATA_FLAG_IN_EH; - spin_unlock_irqrestore(&ap->host_set->lock, flags); - - DPRINTK("EXIT\n"); - return 0; -} - -static void ata_eh_scsidone(struct scsi_cmnd *scmd) -{ - /* nada */ -} - -static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct scsi_cmnd *scmd = qc->scsicmd; - unsigned long flags; - - spin_lock_irqsave(&ap->host_set->lock, flags); - qc->scsidone = ata_eh_scsidone; - __ata_qc_complete(qc); - WARN_ON(ata_tag_valid(qc->tag)); - spin_unlock_irqrestore(&ap->host_set->lock, flags); - - scsi_eh_finish_cmd(scmd, &ap->eh_done_q); -} - -/** - * ata_eh_qc_complete - Complete an active ATA command from EH - * @qc: Command to complete - * - * Indicate to the mid and upper layers that an ATA command has - * completed. To be used from EH. - */ -void ata_eh_qc_complete(struct ata_queued_cmd *qc) -{ - struct scsi_cmnd *scmd = qc->scsicmd; - scmd->retries = scmd->allowed; - __ata_eh_qc_complete(qc); -} - -/** - * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH - * @qc: Command to retry - * - * Indicate to the mid and upper layers that an ATA command - * should be retried. To be used from EH. - * - * SCSI midlayer limits the number of retries to scmd->allowed. - * This function might need to adjust scmd->retries for commands - * which get retried due to unrelated NCQ failures. - */ -void ata_eh_qc_retry(struct ata_queued_cmd *qc) -{ - __ata_eh_qc_complete(qc); -} - /** * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command * @qc: Storage for translated ATA taskfile @@ -1197,6 +1051,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm u64 block; u32 n_block; + qc->flags |= ATA_QCFLAG_IO; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || @@ -1343,11 +1198,14 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) */ if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && ((cdb[2] & 0x20) || need_sense)) { + qc->ap->ops->tf_read(qc->ap, &qc->tf); ata_gen_ata_desc_sense(qc); } else { if (!need_sense) { cmd->result = SAM_STAT_GOOD; } else { + qc->ap->ops->tf_read(qc->ap, &qc->tf); + /* TODO: decide which descriptor format to use * for 48b LBA devices and call that here * instead of the fixed desc, which is only @@ -2139,13 +1997,15 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 static void atapi_sense_complete(struct ata_queued_cmd *qc) { - if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) + if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { /* FIXME: not quite right; we don't want the * translation of taskfile registers into * a sense descriptors, since that's only * correct for ATA, not ATAPI */ + qc->ap->ops->tf_read(qc->ap, &qc->tf); ata_gen_ata_desc_sense(qc); + } qc->scsidone(qc->scsicmd); ata_qc_free(qc); @@ -2213,17 +2073,15 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) cmd->result = SAM_STAT_CHECK_CONDITION; atapi_request_sense(qc); return; - } - - else if (unlikely(err_mask)) + } else if (unlikely(err_mask)) { /* FIXME: not quite right; we don't want the * translation of taskfile registers into * a sense descriptors, since that's only * correct for ATA, not ATAPI */ + qc->ap->ops->tf_read(qc->ap, &qc->tf); ata_gen_ata_desc_sense(qc); - - else { + } else { u8 *scsicmd = cmd->cmnd; if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { @@ -2737,7 +2595,7 @@ void ata_scsi_scan_host(struct ata_port *ap) struct ata_device *dev; unsigned int i; - if (ap->flags & ATA_FLAG_PORT_DISABLED) + if (ap->flags & ATA_FLAG_DISABLED) return; for (i = 0; i < ATA_MAX_DEVICES; i++) { diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h index 1c755b14521a..31efc2e60b69 100644 --- a/drivers/scsi/libata.h +++ b/drivers/scsi/libata.h @@ -45,7 +45,20 @@ extern int libata_fua; extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, struct ata_device *dev); extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); +extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev); extern void ata_port_flush_task(struct ata_port *ap); +extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, + struct ata_taskfile *tf, const u8 *cdb, + int dma_dir, void *buf, unsigned int buflen); +extern int ata_down_sata_spd_limit(struct ata_port *ap); +extern int ata_set_sata_spd_needed(struct ata_port *ap); +extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, + int force_pio0); +extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev); +extern int ata_do_reset(struct ata_port *ap, + ata_reset_fn_t reset, + ata_postreset_fn_t postreset, + int verbose, unsigned int *classes); extern void ata_qc_free(struct ata_queued_cmd *qc); extern void ata_qc_issue(struct ata_queued_cmd *qc); extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); @@ -60,7 +73,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); extern struct scsi_transport_template ata_scsi_transport_template; extern void ata_scsi_scan_host(struct ata_port *ap); -extern int ata_scsi_error(struct Scsi_Host *host); extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen); @@ -90,4 +102,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, unsigned int (*actor) (struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen)); +/* libata-eh.c */ +extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); + #endif /* __LIBATA_H__ */ diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index ff4fec96271b..a4dcb4352206 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c @@ -456,7 +456,7 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) continue; handled = 1; adma_enter_reg_mode(ap); - if (ap->flags & ATA_FLAG_PORT_DISABLED) + if (ap->flags & ATA_FLAG_DISABLED) continue; pp = ap->private_data; if (!pp || pp->state != adma_state_pkt) @@ -481,7 +481,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) for (port_no = 0; port_no < host_set->n_ports; ++port_no) { struct ata_port *ap; ap = host_set->ports[port_no]; - if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { struct ata_queued_cmd *qc; struct adma_port_priv *pp = ap->private_data; if (!pp || pp->state != adma_state_mmio) diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index dc54f294fac8..fd9f2173f062 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -1397,7 +1397,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, } } - if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED)) + if (ap && (ap->flags & ATA_FLAG_DISABLED)) continue; err_mask = ac_err_mask(ata_status); diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index 8a99c3827426..be38f328a479 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c @@ -280,7 +280,7 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, ap = host_set->ports[i]; if (ap && - !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index cf0eeba47eb6..322525d84907 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -535,7 +535,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r ap = host_set->ports[i]; tmp = mask & (1 << (i + 1)); if (tmp && ap && - !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index a547c1272a5e..f86858962fbe 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c @@ -395,7 +395,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", sff1, sff0, port_no, sHST, sDST); handled = 1; - if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; struct qs_port_priv *pp = ap->private_data; if (!pp || pp->state != qs_state_pkt) @@ -428,7 +428,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) struct ata_port *ap; ap = host_set->ports[port_no]; if (ap && - !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; struct qs_port_priv *pp = ap->private_data; if (!pp || pp->state != qs_state_mmio) diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index 068c98a4111b..c34f6dabf418 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c @@ -770,7 +770,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs * for (i = 0; i < host_set->n_ports; i++) if (status & (1 << i)) { struct ata_port *ap = host_set->ports[i]; - if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { sil24_host_intr(host_set->ports[i]); handled++; } else diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 2d6b091a9eaf..45c78c399bfd 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c @@ -834,7 +834,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re tmp = mask & (1 << i); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); if (tmp && ap && - !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 5af6d5f9f4bd..b7d6a31628c2 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c @@ -229,7 +229,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, handled++; } - if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); diff --git a/include/linux/libata.h b/include/linux/libata.h index b0171e9accc4..cc6cc08e8010 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -120,10 +120,12 @@ enum { ATA_SHT_USE_CLUSTERING = 1, /* struct ata_device stuff */ - ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ - ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ - ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */ - ATA_DFLAG_CDB_INTR = (1 << 3), /* device asserts INTRQ when ready for CDB */ + ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ + ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ + ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ + ATA_DFLAG_CFG_MASK = (1 << 8) - 1, + + ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -133,33 +135,35 @@ enum { ATA_DEV_NONE = 5, /* no device */ /* struct ata_port flags */ - ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ + ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ /* (doesn't imply presence) */ - ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ - ATA_FLAG_SATA = (1 << 3), - ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ - ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */ - ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ - ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ - ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ - ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD - * doesn't handle PIO interrupts */ - ATA_FLAG_DEBUGMSG = (1 << 10), - ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ + ATA_FLAG_SATA = (1 << 1), + ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ + ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ + ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ + ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ + ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ + ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ + ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ + ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */ + ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD + * doesn't handle PIO interrupts */ - ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ + ATA_FLAG_DEBUGMSG = (1 << 17), + ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */ - ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ - ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ + ATA_FLAG_DISABLED = (1 << 19), /* port is disabled, ignore it */ + ATA_FLAG_SUSPENDED = (1 << 20), /* port is suspended */ - ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */ - ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */ + /* bits 24:31 of ap->flags are reserved for LLDD specific flags */ - ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ - ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ - ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ + /* struct ata_queued_cmd flags */ + ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ + ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ + ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, - ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */ + ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ + ATA_QCFLAG_EH_SCHEDULED = (1 << 4), /* EH scheduled */ /* host set flags */ ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ @@ -205,10 +209,13 @@ enum { /* size of buffer to pad xfers ending on unaligned boundaries */ ATA_DMA_PAD_SZ = 4, ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, - - /* Masks for port functions */ + + /* masks for port functions */ ATA_PORT_PRIMARY = (1 << 0), ATA_PORT_SECONDARY = (1 << 1), + + /* how hard are we gonna try to probe/recover devices */ + ATA_PROBE_MAX_TRIES = 3, }; enum hsm_task_states { @@ -394,6 +401,7 @@ struct ata_port { unsigned int mwdma_mask; unsigned int udma_mask; unsigned int cbl; /* cable type; ATA_CBL_xxx */ + unsigned int sata_spd_limit; /* SATA PHY speed limit */ struct ata_device device[ATA_MAX_DEVICES]; @@ -519,9 +527,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set); extern int ata_scsi_detect(struct scsi_host_template *sht); extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); -extern int ata_scsi_error(struct Scsi_Host *host); -extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); -extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); extern int ata_scsi_release(struct Scsi_Host *host); extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern int ata_scsi_device_resume(struct scsi_device *); @@ -570,7 +575,6 @@ extern void ata_bmdma_stop(struct ata_queued_cmd *qc); extern u8 ata_bmdma_status(struct ata_port *ap); extern void ata_bmdma_irq_clear(struct ata_port *ap); extern void __ata_qc_complete(struct ata_queued_cmd *qc); -extern void ata_eng_timeout(struct ata_port *ap); extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); @@ -625,6 +629,14 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); #endif /* CONFIG_PCI */ +/* + * EH + */ +extern int ata_scsi_error(struct Scsi_Host *host); +extern void ata_eng_timeout(struct ata_port *ap); +extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); +extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); + static inline int ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) @@ -678,6 +690,11 @@ static inline unsigned int ata_class_disabled(unsigned int class) return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; } +static inline unsigned int ata_class_absent(unsigned int class) +{ + return !ata_class_enabled(class) && !ata_class_disabled(class); +} + static inline unsigned int ata_dev_enabled(const struct ata_device *dev) { return ata_class_enabled(dev->class); @@ -688,6 +705,11 @@ static inline unsigned int ata_dev_disabled(const struct ata_device *dev) return ata_class_disabled(dev->class); } +static inline unsigned int ata_dev_absent(const struct ata_device *dev) +{ + return ata_class_absent(dev->class); +} + static inline u8 ata_chk_status(struct ata_port *ap) { return ap->ops->check_status(ap);