Merge branch 'upstream'
Conflicts:
drivers/scsi/libata-core.c
drivers/scsi/pdc_adma.c
drivers/scsi/sata_mv.c
drivers/scsi/sata_nv.c
drivers/scsi/sata_promise.c
drivers/scsi/sata_qstor.c
drivers/scsi/sata_sx4.c
drivers/scsi/sata_vsc.c
include/linux/libata.h
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e513c31..503f189 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -165,7 +165,7 @@
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
-libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
+libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 597e9e8..9de48dd 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -65,7 +65,6 @@
struct ata_device *dev,
u16 heads,
u16 sectors);
-static int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
struct ata_device *dev);
static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
@@ -409,7 +408,7 @@
return spd_str[spd - 1];
}
-static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
+void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
{
if (ata_dev_enabled(dev)) {
printk(KERN_WARNING "ata%u: dev %u disabled\n",
@@ -961,6 +960,7 @@
* @ap: Port to which the command is sent
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
* @dma_dir: Data tranfer direction of the command
* @buf: Data buffer of the command
* @buflen: Length of data buffer
@@ -975,10 +975,9 @@
* None. Should be called with kernel context, might sleep.
*/
-static unsigned
-ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
- struct ata_taskfile *tf,
- int dma_dir, void *buf, unsigned int buflen)
+unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, void *buf, unsigned int buflen)
{
u8 command = tf->command;
struct ata_queued_cmd *qc;
@@ -992,6 +991,8 @@
BUG_ON(qc == NULL);
qc->tf = *tf;
+ if (cdb)
+ memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
ata_sg_init_one(qc, buf, buflen);
@@ -1042,7 +1043,7 @@
*
* Kill the following code as soon as those drivers are fixed.
*/
- if (ap->flags & ATA_FLAG_PORT_DISABLED) {
+ if (ap->flags & ATA_FLAG_DISABLED) {
err_mask |= AC_ERR_SYSTEM;
ata_port_probe(ap);
}
@@ -1141,7 +1142,7 @@
tf.protocol = ATA_PROT_PIO;
- err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
+ err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS);
if (err_mask) {
rc = -EIO;
@@ -1238,7 +1239,7 @@
id[84], id[85], id[86], id[87], id[88]);
/* initialize to-be-configured parameters */
- dev->flags = 0;
+ dev->flags &= ~ATA_DFLAG_CFG_MASK;
dev->max_sectors = 0;
dev->cdb_len = 0;
dev->n_sectors = 0;
@@ -1381,11 +1382,18 @@
static int ata_bus_probe(struct ata_port *ap)
{
unsigned int classes[ATA_MAX_DEVICES];
- int i, rc, found = 0;
+ int tries[ATA_MAX_DEVICES];
+ int i, rc, down_xfermask;
struct ata_device *dev;
ata_port_probe(ap);
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ tries[i] = ATA_PROBE_MAX_TRIES;
+
+ retry:
+ down_xfermask = 0;
+
/* reset and determine device classes */
for (i = 0; i < ATA_MAX_DEVICES; i++)
classes[i] = ATA_DEV_UNKNOWN;
@@ -1399,7 +1407,7 @@
} else {
ap->ops->phy_reset(ap);
- if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
+ if (!(ap->flags & ATA_FLAG_DISABLED))
for (i = 0; i < ATA_MAX_DEVICES; i++)
classes[i] = ap->device[i].class;
@@ -1415,21 +1423,23 @@
dev = &ap->device[i];
dev->class = classes[i];
+ if (!tries[i]) {
+ ata_down_xfermask_limit(ap, dev, 1);
+ ata_dev_disable(ap, dev);
+ }
+
if (!ata_dev_enabled(dev))
continue;
- WARN_ON(dev->id != NULL);
- if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
- dev->class = ATA_DEV_NONE;
- continue;
- }
+ kfree(dev->id);
+ dev->id = NULL;
+ rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
+ if (rc)
+ goto fail;
- if (ata_dev_configure(ap, dev, 1)) {
- ata_dev_disable(ap, dev);
- continue;
- }
-
- found = 1;
+ rc = ata_dev_configure(ap, dev, 1);
+ if (rc)
+ goto fail;
}
/* configure transfer mode */
@@ -1438,12 +1448,18 @@
* return error code and failing device on failure as
* ata_set_mode() does.
*/
- if (found)
- ap->ops->set_mode(ap);
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ if (ata_dev_enabled(&ap->device[i])) {
+ ap->ops->set_mode(ap);
+ break;
+ }
rc = 0;
} else {
- while (ata_set_mode(ap, &dev))
- ata_dev_disable(ap, dev);
+ rc = ata_set_mode(ap, &dev);
+ if (rc) {
+ down_xfermask = 1;
+ goto fail;
+ }
}
for (i = 0; i < ATA_MAX_DEVICES; i++)
@@ -1454,6 +1470,24 @@
ata_port_disable(ap);
ap->ops->port_disable(ap);
return -ENODEV;
+
+ fail:
+ switch (rc) {
+ case -EINVAL:
+ case -ENODEV:
+ tries[dev->devno] = 0;
+ break;
+ case -EIO:
+ ata_down_sata_spd_limit(ap);
+ /* fall through */
+ default:
+ tries[dev->devno]--;
+ if (down_xfermask &&
+ ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1))
+ tries[dev->devno] = 0;
+ }
+
+ goto retry;
}
/**
@@ -1469,7 +1503,7 @@
void ata_port_probe(struct ata_port *ap)
{
- ap->flags &= ~ATA_FLAG_PORT_DISABLED;
+ ap->flags &= ~ATA_FLAG_DISABLED;
}
/**
@@ -1543,7 +1577,7 @@
else
ata_port_disable(ap);
- if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ if (ap->flags & ATA_FLAG_DISABLED)
return;
if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
@@ -1568,7 +1602,7 @@
void sata_phy_reset(struct ata_port *ap)
{
__sata_phy_reset(ap);
- if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ if (ap->flags & ATA_FLAG_DISABLED)
return;
ata_bus_reset(ap);
}
@@ -1607,7 +1641,121 @@
{
ap->device[0].class = ATA_DEV_NONE;
ap->device[1].class = ATA_DEV_NONE;
- ap->flags |= ATA_FLAG_PORT_DISABLED;
+ ap->flags |= ATA_FLAG_DISABLED;
+}
+
+/**
+ * ata_down_sata_spd_limit - adjust SATA spd limit downward
+ * @ap: Port to adjust SATA spd limit for
+ *
+ * Adjust SATA spd limit of @ap downward. Note that this
+ * function only adjusts the limit. The change must be applied
+ * using ata_set_sata_spd().
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int ata_down_sata_spd_limit(struct ata_port *ap)
+{
+ u32 spd, mask;
+ int highbit;
+
+ if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
+ return -EOPNOTSUPP;
+
+ mask = ap->sata_spd_limit;
+ if (mask <= 1)
+ return -EINVAL;
+ highbit = fls(mask) - 1;
+ mask &= ~(1 << highbit);
+
+ spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf;
+ if (spd <= 1)
+ return -EINVAL;
+ spd--;
+ mask &= (1 << spd) - 1;
+ if (!mask)
+ return -EINVAL;
+
+ ap->sata_spd_limit = mask;
+
+ printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n",
+ ap->id, sata_spd_string(fls(mask)));
+
+ return 0;
+}
+
+static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
+{
+ u32 spd, limit;
+
+ if (ap->sata_spd_limit == UINT_MAX)
+ limit = 0;
+ else
+ limit = fls(ap->sata_spd_limit);
+
+ spd = (*scontrol >> 4) & 0xf;
+ *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
+
+ return spd != limit;
+}
+
+/**
+ * ata_set_sata_spd_needed - is SATA spd configuration needed
+ * @ap: Port in question
+ *
+ * Test whether the spd limit in SControl matches
+ * @ap->sata_spd_limit. This function is used to determine
+ * whether hardreset is necessary to apply SATA spd
+ * configuration.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 1 if SATA spd configuration is needed, 0 otherwise.
+ */
+int ata_set_sata_spd_needed(struct ata_port *ap)
+{
+ u32 scontrol;
+
+ if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
+ return 0;
+
+ scontrol = scr_read(ap, SCR_CONTROL);
+
+ return __ata_set_sata_spd_needed(ap, &scontrol);
+}
+
+/**
+ * ata_set_sata_spd - set SATA spd according to spd limit
+ * @ap: Port to set SATA spd for
+ *
+ * Set SATA spd of @ap according to sata_spd_limit.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 if spd doesn't need to be changed, 1 if spd has been
+ * changed. -EOPNOTSUPP if SCR registers are inaccessible.
+ */
+static int ata_set_sata_spd(struct ata_port *ap)
+{
+ u32 scontrol;
+
+ if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
+ return -EOPNOTSUPP;
+
+ scontrol = scr_read(ap, SCR_CONTROL);
+ if (!__ata_set_sata_spd_needed(ap, &scontrol))
+ return 0;
+
+ scr_write(ap, SCR_CONTROL, scontrol);
+ return 1;
}
/*
@@ -1758,11 +1906,62 @@
return 0;
}
+/**
+ * ata_down_xfermask_limit - adjust dev xfer masks downward
+ * @ap: Port associated with device @dev
+ * @dev: Device to adjust xfer masks
+ * @force_pio0: Force PIO0
+ *
+ * Adjust xfer masks of @dev downward. Note that this function
+ * does not apply the change. Invoking ata_set_mode() afterwards
+ * will apply the limit.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
+ int force_pio0)
+{
+ unsigned long xfer_mask;
+ int highbit;
+
+ xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
+ dev->udma_mask);
+
+ if (!xfer_mask)
+ goto fail;
+ /* don't gear down to MWDMA from UDMA, go directly to PIO */
+ if (xfer_mask & ATA_MASK_UDMA)
+ xfer_mask &= ~ATA_MASK_MWDMA;
+
+ highbit = fls(xfer_mask) - 1;
+ xfer_mask &= ~(1 << highbit);
+ if (force_pio0)
+ xfer_mask &= 1 << ATA_SHIFT_PIO;
+ if (!xfer_mask)
+ goto fail;
+
+ ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
+ &dev->udma_mask);
+
+ printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n",
+ ap->id, dev->devno, ata_mode_string(xfer_mask));
+
+ return 0;
+
+ fail:
+ return -EINVAL;
+}
+
static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
{
unsigned int err_mask;
int rc;
+ dev->flags &= ~ATA_DFLAG_PIO;
if (dev->xfer_shift == ATA_SHIFT_PIO)
dev->flags |= ATA_DFLAG_PIO;
@@ -1775,12 +1974,8 @@
}
rc = ata_dev_revalidate(ap, dev, 0);
- if (rc) {
- printk(KERN_ERR
- "ata%u: failed to revalidate after set xfermode\n",
- ap->id);
+ if (rc)
return rc;
- }
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode);
@@ -1806,7 +2001,7 @@
* RETURNS:
* 0 on success, negative errno otherwise
*/
-static int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
+int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
{
struct ata_device *dev;
int i, rc = 0, used_dma = 0, found = 0;
@@ -2069,7 +2264,7 @@
* Obtains host_set lock.
*
* SIDE EFFECTS:
- * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
+ * Sets ATA_FLAG_DISABLED if bus reset fails.
*/
void ata_bus_reset(struct ata_port *ap)
@@ -2179,7 +2374,14 @@
void ata_std_probeinit(struct ata_port *ap)
{
if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
+ u32 spd;
+
sata_phy_resume(ap);
+
+ spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4;
+ if (spd)
+ ap->sata_spd_limit &= (1 << spd) - 1;
+
if (sata_dev_present(ap))
ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
}
@@ -2267,18 +2469,30 @@
DPRINTK("ENTER\n");
- /* Issue phy wake/reset */
+ if (ata_set_sata_spd_needed(ap)) {
+ /* SATA spec says nothing about how to reconfigure
+ * spd. To be on the safe side, turn off phy during
+ * reconfiguration. This works for at least ICH7 AHCI
+ * and Sil3124.
+ */
+ scontrol = scr_read(ap, SCR_CONTROL);
+ scontrol = (scontrol & 0x0f0) | 0x302;
+ scr_write_flush(ap, SCR_CONTROL, scontrol);
+
+ ata_set_sata_spd(ap);
+ }
+
+ /* issue phy wake/reset */
scontrol = scr_read(ap, SCR_CONTROL);
scontrol = (scontrol & 0x0f0) | 0x301;
scr_write_flush(ap, SCR_CONTROL, scontrol);
- /*
- * Couldn't find anything in SATA I/II specs, but AHCI-1.1
+ /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
* 10.4.2 says at least 1 ms.
*/
msleep(1);
- /* Bring phy back */
+ /* bring phy back */
sata_phy_resume(ap);
/* TODO: phy layer with polling, timeouts, etc. */
@@ -2385,9 +2599,9 @@
ata_std_postreset, classes);
}
-static int ata_do_reset(struct ata_port *ap,
- ata_reset_fn_t reset, ata_postreset_fn_t postreset,
- int verbose, unsigned int *classes)
+int ata_do_reset(struct ata_port *ap,
+ ata_reset_fn_t reset, ata_postreset_fn_t postreset,
+ int verbose, unsigned int *classes)
{
int i, rc;
@@ -2458,21 +2672,42 @@
if (probeinit)
probeinit(ap);
- if (softreset) {
+ if (softreset && !ata_set_sata_spd_needed(ap)) {
rc = ata_do_reset(ap, softreset, postreset, 0, classes);
if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
goto done;
+ printk(KERN_INFO "ata%u: softreset failed, will try "
+ "hardreset in 5 secs\n", ap->id);
+ ssleep(5);
}
if (!hardreset)
goto done;
- rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
- if (rc || classes[0] != ATA_DEV_UNKNOWN)
- goto done;
+ while (1) {
+ rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
+ if (rc == 0) {
+ if (classes[0] != ATA_DEV_UNKNOWN)
+ goto done;
+ break;
+ }
- if (softreset)
+ if (ata_down_sata_spd_limit(ap))
+ goto done;
+
+ printk(KERN_INFO "ata%u: hardreset failed, will retry "
+ "in 5 secs\n", ap->id);
+ ssleep(5);
+ }
+
+ if (softreset) {
+ printk(KERN_INFO "ata%u: hardreset succeeded without "
+ "classification, will retry softreset in 5 secs\n",
+ ap->id);
+ ssleep(5);
+
rc = ata_do_reset(ap, softreset, postreset, 0, classes);
+ }
done:
if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
@@ -2560,15 +2795,14 @@
int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
int post_reset)
{
- unsigned int class;
- u16 *id;
+ unsigned int class = dev->class;
+ u16 *id = NULL;
int rc;
- if (!ata_dev_enabled(dev))
- return -ENODEV;
-
- class = dev->class;
- id = NULL;
+ if (!ata_dev_enabled(dev)) {
+ rc = -ENODEV;
+ goto fail;
+ }
/* allocate & read ID data */
rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
@@ -2585,7 +2819,9 @@
dev->id = id;
/* configure device according to the new ID */
- return ata_dev_configure(ap, dev, 0);
+ rc = ata_dev_configure(ap, dev, 0);
+ if (rc == 0)
+ return 0;
fail:
printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
@@ -2687,23 +2923,34 @@
unsigned long xfer_mask;
int i;
- xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
- ap->udma_mask);
+ xfer_mask = ata_pack_xfermask(ap->pio_mask,
+ ap->mwdma_mask, ap->udma_mask);
+
+ /* Apply cable rule here. Don't apply it early because when
+ * we handle hot plug the cable type can itself change.
+ */
+ if (ap->cbl == ATA_CBL_PATA40)
+ xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
/* FIXME: Use port-wide xfermask for now */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *d = &ap->device[i];
- if (!ata_dev_enabled(d))
+
+ if (ata_dev_absent(d))
continue;
- xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
- d->udma_mask);
+
+ if (ata_dev_disabled(d)) {
+ /* to avoid violating device selection timing */
+ xfer_mask &= ata_pack_xfermask(d->pio_mask,
+ UINT_MAX, UINT_MAX);
+ continue;
+ }
+
+ xfer_mask &= ata_pack_xfermask(d->pio_mask,
+ d->mwdma_mask, d->udma_mask);
xfer_mask &= ata_id_xfermask(d->id);
if (ata_dma_blacklisted(d))
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- /* Apply cable rule here. Don't apply it early because when
- we handle hot plug the cable type can itself change */
- if (ap->cbl == ATA_CBL_PATA40)
- xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
}
if (ata_dma_blacklisted(dev))
@@ -2714,11 +2961,12 @@
if (hs->simplex_claimed)
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
}
+
if (ap->ops->mode_filter)
xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
- ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
- &dev->udma_mask);
+ ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
+ &dev->mwdma_mask, &dev->udma_mask);
}
/**
@@ -2752,7 +3000,7 @@
tf.protocol = ATA_PROT_NODATA;
tf.nsect = dev->xfer_mode;
- err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
+ err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
@@ -2792,7 +3040,7 @@
tf.nsect = sectors;
tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
- err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
+ err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
@@ -3838,8 +4086,8 @@
static void ata_pio_task(void *_data)
{
- struct ata_port *ap = _data;
- struct ata_queued_cmd *qc;
+ struct ata_queued_cmd *qc = _data;
+ struct ata_port *ap = qc->ap;
u8 status;
int poll_next;
@@ -4392,7 +4640,7 @@
ap = host_set->ports[i];
if (ap &&
- !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -4424,7 +4672,7 @@
tf.flags |= ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
- err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
+ err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
if (err)
printk(KERN_ERR "%s: ata command failed: %d\n",
__FUNCTION__, err);
@@ -4613,7 +4861,7 @@
host->unique_id = ata_unique_id++;
host->max_cmd_len = 12;
- ap->flags = ATA_FLAG_PORT_DISABLED;
+ ap->flags = ATA_FLAG_DISABLED;
ap->id = host->unique_id;
ap->host = host;
ap->ctl = ATA_DEVCTL_OBS;
@@ -4628,6 +4876,7 @@
ap->flags |= ent->host_flags;
ap->ops = ent->port_ops;
ap->cbl = ATA_CBL_NONE;
+ ap->sata_spd_limit = UINT_MAX;
ap->active_tag = ATA_TAG_POISON;
ap->last_ctl = 0xFF;
@@ -5083,7 +5332,6 @@
EXPORT_SYMBOL_GPL(ata_sg_init_one);
EXPORT_SYMBOL_GPL(__ata_qc_complete);
EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
-EXPORT_SYMBOL_GPL(ata_eng_timeout);
EXPORT_SYMBOL_GPL(ata_tf_load);
EXPORT_SYMBOL_GPL(ata_tf_read);
EXPORT_SYMBOL_GPL(ata_noop_dev_select);
@@ -5123,15 +5371,12 @@
EXPORT_SYMBOL_GPL(ata_port_queue_task);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
-EXPORT_SYMBOL_GPL(ata_scsi_error);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_release);
EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
-EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
-EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
EXPORT_SYMBOL_GPL(ata_timing_compute);
@@ -5153,3 +5398,8 @@
EXPORT_SYMBOL_GPL(ata_device_resume);
EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
+
+EXPORT_SYMBOL_GPL(ata_scsi_error);
+EXPORT_SYMBOL_GPL(ata_eng_timeout);
+EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
+EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
new file mode 100644
index 0000000..e73f561
--- /dev/null
+++ b/drivers/scsi/libata-eh.c
@@ -0,0 +1,264 @@
+/*
+ * libata-eh.c - libata error handling
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2006 Tejun Heo <htejun@gmail.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/ and
+ * http://www.sata-io.org/
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <linux/libata.h>
+
+#include "libata.h"
+
+/**
+ * ata_scsi_timed_out - SCSI layer time out callback
+ * @cmd: timed out SCSI command
+ *
+ * Handles SCSI layer timeout. We race with normal completion of
+ * the qc for @cmd. If the qc is already gone, we lose and let
+ * the scsi command finish (EH_HANDLED). Otherwise, the qc has
+ * timed out and EH should be invoked. Prevent ata_qc_complete()
+ * from finishing it by setting EH_SCHEDULED and return
+ * EH_NOT_HANDLED.
+ *
+ * LOCKING:
+ * Called from timer context
+ *
+ * RETURNS:
+ * EH_HANDLED or EH_NOT_HANDLED
+ */
+enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
+ unsigned long flags;
+ struct ata_queued_cmd *qc;
+ enum scsi_eh_timer_return ret = EH_HANDLED;
+
+ DPRINTK("ENTER\n");
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (qc) {
+ WARN_ON(qc->scsicmd != cmd);
+ qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ ret = EH_NOT_HANDLED;
+ }
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ DPRINTK("EXIT, ret=%d\n", ret);
+ return ret;
+}
+
+/**
+ * ata_scsi_error - SCSI layer error handler callback
+ * @host: SCSI host on which error occurred
+ *
+ * Handles SCSI-layer-thrown error events.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ *
+ * RETURNS:
+ * Zero.
+ */
+int ata_scsi_error(struct Scsi_Host *host)
+{
+ struct ata_port *ap = (struct ata_port *)&host->hostdata[0];
+
+ DPRINTK("ENTER\n");
+
+ /* synchronize with IRQ handler and port task */
+ spin_unlock_wait(&ap->host_set->lock);
+ ata_port_flush_task(ap);
+
+ WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
+
+ ap->ops->eng_timeout(ap);
+
+ WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
+
+ scsi_eh_flush_done_q(&ap->eh_done_q);
+
+ DPRINTK("EXIT\n");
+ return 0;
+}
+
+/**
+ * ata_qc_timeout - Handle timeout of queued command
+ * @qc: Command that timed out
+ *
+ * Some part of the kernel (currently, only the SCSI layer)
+ * has noticed that the active command on port @ap has not
+ * completed after a specified length of time. Handle this
+ * condition by disabling DMA (if necessary) and completing
+ * transactions, with error if necessary.
+ *
+ * This also handles the case of the "lost interrupt", where
+ * for some reason (possibly hardware bug, possibly driver bug)
+ * an interrupt was not delivered to the driver, even though the
+ * transaction completed successfully.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ */
+static void ata_qc_timeout(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_host_set *host_set = ap->host_set;
+ u8 host_stat = 0, drv_stat;
+ unsigned long flags;
+
+ DPRINTK("ENTER\n");
+
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ spin_lock_irqsave(&host_set->lock, flags);
+
+ switch (qc->tf.protocol) {
+
+ case ATA_PROT_DMA:
+ case ATA_PROT_ATAPI_DMA:
+ host_stat = ap->ops->bmdma_status(ap);
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+
+ /* fall through */
+
+ default:
+ ata_altstatus(ap);
+ drv_stat = ata_chk_status(ap);
+
+ /* ack bmdma irq events */
+ ap->ops->irq_clear(ap);
+
+ printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
+ ap->id, qc->tf.command, drv_stat, host_stat);
+
+ /* complete taskfile transaction */
+ qc->err_mask |= ac_err_mask(drv_stat);
+ break;
+ }
+
+ spin_unlock_irqrestore(&host_set->lock, flags);
+
+ ata_eh_qc_complete(qc);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_eng_timeout - Handle timeout of queued command
+ * @ap: Port on which timed-out command is active
+ *
+ * Some part of the kernel (currently, only the SCSI layer)
+ * has noticed that the active command on port @ap has not
+ * completed after a specified length of time. Handle this
+ * condition by disabling DMA (if necessary) and completing
+ * transactions, with error if necessary.
+ *
+ * This also handles the case of the "lost interrupt", where
+ * for some reason (possibly hardware bug, possibly driver bug)
+ * an interrupt was not delivered to the driver, even though the
+ * transaction completed successfully.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ */
+void ata_eng_timeout(struct ata_port *ap)
+{
+ DPRINTK("ENTER\n");
+
+ ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
+
+ DPRINTK("EXIT\n");
+}
+
+static void ata_eh_scsidone(struct scsi_cmnd *scmd)
+{
+ /* nada */
+}
+
+static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ qc->scsidone = ata_eh_scsidone;
+ __ata_qc_complete(qc);
+ WARN_ON(ata_tag_valid(qc->tag));
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+}
+
+/**
+ * ata_eh_qc_complete - Complete an active ATA command from EH
+ * @qc: Command to complete
+ *
+ * Indicate to the mid and upper layers that an ATA command has
+ * completed. To be used from EH.
+ */
+void ata_eh_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ scmd->retries = scmd->allowed;
+ __ata_eh_qc_complete(qc);
+}
+
+/**
+ * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
+ * @qc: Command to retry
+ *
+ * Indicate to the mid and upper layers that an ATA command
+ * should be retried. To be used from EH.
+ *
+ * SCSI midlayer limits the number of retries to scmd->allowed.
+ * scmd->retries is decremented for commands which get retried
+ * due to unrelated failures (qc->err_mask is zero).
+ */
+void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ if (!qc->err_mask && scmd->retries)
+ scmd->retries--;
+ __ata_eh_qc_complete(qc);
+}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index c1a4b29..745fc26 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -53,7 +53,6 @@
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
static struct ata_device *
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
-enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
#define RW_RECOVERY_MPAGE 0x1
#define RW_RECOVERY_MPAGE_LEN 12
@@ -546,16 +545,11 @@
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/*
- * Read the controller registers.
- */
- WARN_ON(qc->ap->ops->tf_read == NULL);
- qc->ap->ops->tf_read(qc->ap, tf);
-
- /*
* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
- if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ if (qc->err_mask ||
+ tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
&sb[1], &sb[2], &sb[3]);
sb[1] &= 0x0f;
@@ -621,16 +615,11 @@
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
/*
- * Read the controller registers.
- */
- WARN_ON(qc->ap->ops->tf_read == NULL);
- qc->ap->ops->tf_read(qc->ap, tf);
-
- /*
* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
- if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ if (qc->err_mask ||
+ tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
&sb[2], &sb[12], &sb[13]);
sb[2] &= 0x0f;
@@ -724,141 +713,6 @@
}
/**
- * ata_scsi_timed_out - SCSI layer time out callback
- * @cmd: timed out SCSI command
- *
- * Handles SCSI layer timeout. We race with normal completion of
- * the qc for @cmd. If the qc is already gone, we lose and let
- * the scsi command finish (EH_HANDLED). Otherwise, the qc has
- * timed out and EH should be invoked. Prevent ata_qc_complete()
- * from finishing it by setting EH_SCHEDULED and return
- * EH_NOT_HANDLED.
- *
- * LOCKING:
- * Called from timer context
- *
- * RETURNS:
- * EH_HANDLED or EH_NOT_HANDLED
- */
-enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *host = cmd->device->host;
- struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
- unsigned long flags;
- struct ata_queued_cmd *qc;
- enum scsi_eh_timer_return ret = EH_HANDLED;
-
- DPRINTK("ENTER\n");
-
- spin_lock_irqsave(&ap->host_set->lock, flags);
- qc = ata_qc_from_tag(ap, ap->active_tag);
- if (qc) {
- WARN_ON(qc->scsicmd != cmd);
- qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
- qc->err_mask |= AC_ERR_TIMEOUT;
- ret = EH_NOT_HANDLED;
- }
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
- DPRINTK("EXIT, ret=%d\n", ret);
- return ret;
-}
-
-/**
- * ata_scsi_error - SCSI layer error handler callback
- * @host: SCSI host on which error occurred
- *
- * Handles SCSI-layer-thrown error events.
- *
- * LOCKING:
- * Inherited from SCSI layer (none, can sleep)
- *
- * RETURNS:
- * Zero.
- */
-
-int ata_scsi_error(struct Scsi_Host *host)
-{
- struct ata_port *ap;
- unsigned long flags;
-
- DPRINTK("ENTER\n");
-
- ap = (struct ata_port *) &host->hostdata[0];
-
- spin_lock_irqsave(&ap->host_set->lock, flags);
- WARN_ON(ap->flags & ATA_FLAG_IN_EH);
- ap->flags |= ATA_FLAG_IN_EH;
- WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
- ata_port_flush_task(ap);
-
- ap->ops->eng_timeout(ap);
-
- WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
-
- scsi_eh_flush_done_q(&ap->eh_done_q);
-
- spin_lock_irqsave(&ap->host_set->lock, flags);
- ap->flags &= ~ATA_FLAG_IN_EH;
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
- DPRINTK("EXIT\n");
- return 0;
-}
-
-static void ata_eh_scsidone(struct scsi_cmnd *scmd)
-{
- /* nada */
-}
-
-static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scsi_cmnd *scmd = qc->scsicmd;
- unsigned long flags;
-
- spin_lock_irqsave(&ap->host_set->lock, flags);
- qc->scsidone = ata_eh_scsidone;
- __ata_qc_complete(qc);
- WARN_ON(ata_tag_valid(qc->tag));
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
- scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
-}
-
-/**
- * ata_eh_qc_complete - Complete an active ATA command from EH
- * @qc: Command to complete
- *
- * Indicate to the mid and upper layers that an ATA command has
- * completed. To be used from EH.
- */
-void ata_eh_qc_complete(struct ata_queued_cmd *qc)
-{
- struct scsi_cmnd *scmd = qc->scsicmd;
- scmd->retries = scmd->allowed;
- __ata_eh_qc_complete(qc);
-}
-
-/**
- * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
- * @qc: Command to retry
- *
- * Indicate to the mid and upper layers that an ATA command
- * should be retried. To be used from EH.
- *
- * SCSI midlayer limits the number of retries to scmd->allowed.
- * This function might need to adjust scmd->retries for commands
- * which get retried due to unrelated NCQ failures.
- */
-void ata_eh_qc_retry(struct ata_queued_cmd *qc)
-{
- __ata_eh_qc_complete(qc);
-}
-
-/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
* @scsicmd: SCSI command to translate
@@ -1197,6 +1051,7 @@
u64 block;
u32 n_block;
+ qc->flags |= ATA_QCFLAG_IO;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
@@ -1343,11 +1198,14 @@
*/
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense)) {
+ qc->ap->ops->tf_read(qc->ap, &qc->tf);
ata_gen_ata_desc_sense(qc);
} else {
if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
+ qc->ap->ops->tf_read(qc->ap, &qc->tf);
+
/* TODO: decide which descriptor format to use
* for 48b LBA devices and call that here
* instead of the fixed desc, which is only
@@ -2139,13 +1997,15 @@
static void atapi_sense_complete(struct ata_queued_cmd *qc)
{
- if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0))
+ if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
/* FIXME: not quite right; we don't want the
* translation of taskfile registers into
* a sense descriptors, since that's only
* correct for ATA, not ATAPI
*/
+ qc->ap->ops->tf_read(qc->ap, &qc->tf);
ata_gen_ata_desc_sense(qc);
+ }
qc->scsidone(qc->scsicmd);
ata_qc_free(qc);
@@ -2213,17 +2073,15 @@
cmd->result = SAM_STAT_CHECK_CONDITION;
atapi_request_sense(qc);
return;
- }
-
- else if (unlikely(err_mask))
+ } else if (unlikely(err_mask)) {
/* FIXME: not quite right; we don't want the
* translation of taskfile registers into
* a sense descriptors, since that's only
* correct for ATA, not ATAPI
*/
+ qc->ap->ops->tf_read(qc->ap, &qc->tf);
ata_gen_ata_desc_sense(qc);
-
- else {
+ } else {
u8 *scsicmd = cmd->cmnd;
if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
@@ -2737,7 +2595,7 @@
struct ata_device *dev;
unsigned int i;
- if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ if (ap->flags & ATA_FLAG_DISABLED)
return;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 1c755b1..31efc2e 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,7 +45,20 @@
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev);
extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
+extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev);
extern void ata_port_flush_task(struct ata_port *ap);
+extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, void *buf, unsigned int buflen);
+extern int ata_down_sata_spd_limit(struct ata_port *ap);
+extern int ata_set_sata_spd_needed(struct ata_port *ap);
+extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
+ int force_pio0);
+extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
+extern int ata_do_reset(struct ata_port *ap,
+ ata_reset_fn_t reset,
+ ata_postreset_fn_t postreset,
+ int verbose, unsigned int *classes);
extern void ata_qc_free(struct ata_queued_cmd *qc);
extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -60,7 +73,6 @@
extern struct scsi_transport_template ata_scsi_transport_template;
extern void ata_scsi_scan_host(struct ata_port *ap);
-extern int ata_scsi_error(struct Scsi_Host *host);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
@@ -90,4 +102,7 @@
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
+/* libata-eh.c */
+extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
+
#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index ff4fec9..a4dcb43 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -456,7 +456,7 @@
continue;
handled = 1;
adma_enter_reg_mode(ap);
- if (ap->flags & ATA_FLAG_PORT_DISABLED)
+ if (ap->flags & ATA_FLAG_DISABLED)
continue;
pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt)
@@ -481,7 +481,7 @@
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
struct ata_queued_cmd *qc;
struct adma_port_priv *pp = ap->private_data;
if (!pp || pp->state != adma_state_mmio)
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index dc54f29..fd9f217 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -1397,7 +1397,7 @@
}
}
- if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED))
+ if (ap && (ap->flags & ATA_FLAG_DISABLED))
continue;
err_mask = ac_err_mask(ata_status);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 8a99c38..be38f32 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -280,7 +280,7 @@
ap = host_set->ports[i];
if (ap &&
- !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index cf0eeba..322525d 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -535,7 +535,7 @@
ap = host_set->ports[i];
tmp = mask & (1 << (i + 1));
if (tmp && ap &&
- !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index a547c12..f868589 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -395,7 +395,7 @@
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
- if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_pkt)
@@ -428,7 +428,7 @@
struct ata_port *ap;
ap = host_set->ports[port_no];
if (ap &&
- !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_mmio)
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 068c98a..c34f6da 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -770,7 +770,7 @@
for (i = 0; i < host_set->n_ports; i++)
if (status & (1 << i)) {
struct ata_port *ap = host_set->ports[i];
- if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
sil24_host_intr(host_set->ports[i]);
handled++;
} else
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 2d6b091..45c78c3 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -834,7 +834,7 @@
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
if (tmp && ap &&
- !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 5af6d5f..b7d6a31 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -229,7 +229,7 @@
handled++;
}
- if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);