|
|
@@ -50,7 +50,6 @@
|
|
|
#include <linux/jiffies.h>
|
|
|
#include <linux/scatterlist.h>
|
|
|
#include <scsi/scsi.h>
|
|
|
-#include "scsi_priv.h"
|
|
|
#include <scsi/scsi_cmnd.h>
|
|
|
#include <scsi/scsi_host.h>
|
|
|
#include <linux/libata.h>
|
|
|
@@ -387,9 +386,13 @@ static const char *ata_mode_string(unsigned int xfer_mask)
|
|
|
"PIO2",
|
|
|
"PIO3",
|
|
|
"PIO4",
|
|
|
+ "PIO5",
|
|
|
+ "PIO6",
|
|
|
"MWDMA0",
|
|
|
"MWDMA1",
|
|
|
"MWDMA2",
|
|
|
+ "MWDMA3",
|
|
|
+ "MWDMA4",
|
|
|
"UDMA/16",
|
|
|
"UDMA/25",
|
|
|
"UDMA/33",
|
|
|
@@ -613,8 +616,11 @@ ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
|
|
|
if (r_err)
|
|
|
*r_err = err;
|
|
|
|
|
|
- /* see if device passed diags */
|
|
|
- if (err == 1)
|
|
|
+ /* see if device passed diags: if master then continue and warn later */
|
|
|
+ if (err == 0 && device == 0)
|
|
|
+ /* diagnostic fail : do nothing _YET_ */
|
|
|
+ ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
|
|
|
+ else if (err == 1)
|
|
|
/* do nothing */ ;
|
|
|
else if ((device == 0) && (err == 0x81))
|
|
|
/* do nothing */ ;
|
|
|
@@ -876,6 +882,23 @@ static unsigned int ata_id_xfermask(const u16 *id)
|
|
|
|
|
|
mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
|
|
|
|
|
|
+ if (ata_id_is_cfa(id)) {
|
|
|
+ /*
|
|
|
+ * Process compact flash extended modes
|
|
|
+ */
|
|
|
+ int pio = id[163] & 0x7;
|
|
|
+ int dma = (id[163] >> 3) & 7;
|
|
|
+
|
|
|
+ if (pio)
|
|
|
+ pio_mask |= (1 << 5);
|
|
|
+ if (pio > 1)
|
|
|
+ pio_mask |= (1 << 6);
|
|
|
+ if (dma)
|
|
|
+ mwdma_mask |= (1 << 3);
|
|
|
+ if (dma > 1)
|
|
|
+ mwdma_mask |= (1 << 4);
|
|
|
+ }
|
|
|
+
|
|
|
udma_mask = 0;
|
|
|
if (id[ATA_ID_FIELD_VALID] & (1 << 2))
|
|
|
udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
|
|
|
@@ -1320,7 +1343,7 @@ static void ata_dev_config_ncq(struct ata_device *dev,
|
|
|
}
|
|
|
|
|
|
if (ap->flags & ATA_FLAG_NCQ) {
|
|
|
- hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
|
|
|
+ hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
|
|
|
dev->flags |= ATA_DFLAG_NCQ;
|
|
|
}
|
|
|
|
|
|
@@ -1334,12 +1357,13 @@ static void ata_set_port_max_cmd_len(struct ata_port *ap)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- if (ap->host) {
|
|
|
- ap->host->max_cmd_len = 0;
|
|
|
+ if (ap->scsi_host) {
|
|
|
+ unsigned int len = 0;
|
|
|
+
|
|
|
for (i = 0; i < ATA_MAX_DEVICES; i++)
|
|
|
- ap->host->max_cmd_len = max_t(unsigned int,
|
|
|
- ap->host->max_cmd_len,
|
|
|
- ap->device[i].cdb_len);
|
|
|
+ len = max(len, ap->device[i].cdb_len);
|
|
|
+
|
|
|
+ ap->scsi_host->max_cmd_len = len;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1362,6 +1386,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
|
|
|
struct ata_port *ap = dev->ap;
|
|
|
const u16 *id = dev->id;
|
|
|
unsigned int xfer_mask;
|
|
|
+ char revbuf[7]; /* XYZ-99\0 */
|
|
|
int rc;
|
|
|
|
|
|
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
|
|
|
@@ -1405,6 +1430,15 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
|
|
|
|
|
|
/* ATA-specific feature tests */
|
|
|
if (dev->class == ATA_DEV_ATA) {
|
|
|
+ if (ata_id_is_cfa(id)) {
|
|
|
+ if (id[162] & 1) /* CPRM may make this media unusable */
|
|
|
+ ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
|
|
|
+ ap->id, dev->devno);
|
|
|
+ snprintf(revbuf, 7, "CFA");
|
|
|
+ }
|
|
|
+ else
|
|
|
+ snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
|
|
|
+
|
|
|
dev->n_sectors = ata_id_n_sectors(id);
|
|
|
|
|
|
if (ata_id_has_lba(id)) {
|
|
|
@@ -1423,9 +1457,9 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
|
|
|
|
|
|
/* print device info to dmesg */
|
|
|
if (ata_msg_drv(ap) && print_info)
|
|
|
- ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
|
|
|
+ ata_dev_printk(dev, KERN_INFO, "%s, "
|
|
|
"max %s, %Lu sectors: %s %s\n",
|
|
|
- ata_id_major_version(id),
|
|
|
+ revbuf,
|
|
|
ata_mode_string(xfer_mask),
|
|
|
(unsigned long long)dev->n_sectors,
|
|
|
lba_desc, ncq_desc);
|
|
|
@@ -1446,9 +1480,9 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
|
|
|
|
|
|
/* print device info to dmesg */
|
|
|
if (ata_msg_drv(ap) && print_info)
|
|
|
- ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
|
|
|
+ ata_dev_printk(dev, KERN_INFO, "%s, "
|
|
|
"max %s, %Lu sectors: CHS %u/%u/%u\n",
|
|
|
- ata_id_major_version(id),
|
|
|
+ revbuf,
|
|
|
ata_mode_string(xfer_mask),
|
|
|
(unsigned long long)dev->n_sectors,
|
|
|
dev->cylinders, dev->heads,
|
|
|
@@ -1492,6 +1526,18 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
|
|
|
cdb_intr_string);
|
|
|
}
|
|
|
|
|
|
+ if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
|
|
|
+ /* Let the user know. We don't want to disallow opens for
|
|
|
+ rescue purposes, or in case the vendor is just a blithering
|
|
|
+ idiot */
|
|
|
+ if (print_info) {
|
|
|
+ ata_dev_printk(dev, KERN_WARNING,
|
|
|
+"Drive reports diagnostics failure. This may indicate a drive\n");
|
|
|
+ ata_dev_printk(dev, KERN_WARNING,
|
|
|
+"fault or invalid emulation. Contact drive vendor for information.\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
ata_set_port_max_cmd_len(ap);
|
|
|
|
|
|
/* limit bridge transfers to udma5, 200 sectors */
|
|
|
@@ -1533,7 +1579,7 @@ err_out_nosup:
|
|
|
* Zero on success, negative errno otherwise.
|
|
|
*/
|
|
|
|
|
|
-static int ata_bus_probe(struct ata_port *ap)
|
|
|
+int ata_bus_probe(struct ata_port *ap)
|
|
|
{
|
|
|
unsigned int classes[ATA_MAX_DEVICES];
|
|
|
int tries[ATA_MAX_DEVICES];
|
|
|
@@ -1637,7 +1683,7 @@ static int ata_bus_probe(struct ata_port *ap)
|
|
|
* Modify @ap data structure such that the system
|
|
|
* thinks that the entire port is enabled.
|
|
|
*
|
|
|
- * LOCKING: host_set lock, or some other form of
|
|
|
+ * LOCKING: host lock, or some other form of
|
|
|
* serialization.
|
|
|
*/
|
|
|
|
|
|
@@ -1775,7 +1821,7 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
|
|
|
* never attempt to probe or communicate with devices
|
|
|
* on this port.
|
|
|
*
|
|
|
- * LOCKING: host_set lock, or some other form of
|
|
|
+ * LOCKING: host lock, or some other form of
|
|
|
* serialization.
|
|
|
*/
|
|
|
|
|
|
@@ -1906,10 +1952,11 @@ int sata_set_spd(struct ata_port *ap)
|
|
|
* drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
|
|
|
*/
|
|
|
/*
|
|
|
- * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
|
|
|
+ * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
|
|
|
* These were taken from ATA/ATAPI-6 standard, rev 0a, except
|
|
|
- * for PIO 5, which is a nonstandard extension and UDMA6, which
|
|
|
- * is currently supported only by Maxtor drives.
|
|
|
+ * for UDMA6, which is currently supported only by Maxtor drives.
|
|
|
+ *
|
|
|
+ * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
|
|
|
*/
|
|
|
|
|
|
static const struct ata_timing ata_timing[] = {
|
|
|
@@ -1919,6 +1966,8 @@ static const struct ata_timing ata_timing[] = {
|
|
|
{ XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
|
|
|
{ XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
|
|
|
|
|
|
+ { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
|
|
|
+ { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
|
|
|
{ XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
|
|
|
{ XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
|
|
|
{ XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
|
|
|
@@ -1933,7 +1982,8 @@ static const struct ata_timing ata_timing[] = {
|
|
|
{ XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
|
|
|
{ XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
|
|
|
|
|
|
-/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
|
|
|
+ { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
|
|
|
+ { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
|
|
|
{ XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
|
|
|
{ XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
|
|
|
|
|
|
@@ -2229,8 +2279,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
|
|
|
/* Record simplex status. If we selected DMA then the other
|
|
|
* host channels are not permitted to do so.
|
|
|
*/
|
|
|
- if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
|
|
|
- ap->host_set->simplex_claimed = 1;
|
|
|
+ if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
|
|
|
+ ap->host->simplex_claimed = 1;
|
|
|
|
|
|
/* step5: chip specific finalisation */
|
|
|
if (ap->ops->post_set_mode)
|
|
|
@@ -2252,7 +2302,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
|
|
|
* other threads.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
|
|
|
static inline void ata_tf_to_host(struct ata_port *ap,
|
|
|
@@ -2416,7 +2466,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* PCI/etc. bus probe sem.
|
|
|
- * Obtains host_set lock.
|
|
|
+ * Obtains host lock.
|
|
|
*
|
|
|
* SIDE EFFECTS:
|
|
|
* Sets ATA_FLAG_DISABLED if bus reset fails.
|
|
|
@@ -3045,20 +3095,16 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
|
|
|
* known limits including host controller limits, device
|
|
|
* blacklist, etc...
|
|
|
*
|
|
|
- * FIXME: The current implementation limits all transfer modes to
|
|
|
- * the fastest of the lowested device on the port. This is not
|
|
|
- * required on most controllers.
|
|
|
- *
|
|
|
* LOCKING:
|
|
|
* None.
|
|
|
*/
|
|
|
static void ata_dev_xfermask(struct ata_device *dev)
|
|
|
{
|
|
|
struct ata_port *ap = dev->ap;
|
|
|
- struct ata_host_set *hs = ap->host_set;
|
|
|
+ struct ata_host *host = ap->host;
|
|
|
unsigned long xfer_mask;
|
|
|
- int i;
|
|
|
|
|
|
+ /* controller modes available */
|
|
|
xfer_mask = ata_pack_xfermask(ap->pio_mask,
|
|
|
ap->mwdma_mask, ap->udma_mask);
|
|
|
|
|
|
@@ -3068,34 +3114,31 @@ static void ata_dev_xfermask(struct ata_device *dev)
|
|
|
if (ap->cbl == ATA_CBL_PATA40)
|
|
|
xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
|
|
|
|
|
|
- /* FIXME: Use port-wide xfermask for now */
|
|
|
- for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
|
|
- struct ata_device *d = &ap->device[i];
|
|
|
+ xfer_mask &= ata_pack_xfermask(dev->pio_mask,
|
|
|
+ dev->mwdma_mask, dev->udma_mask);
|
|
|
+ xfer_mask &= ata_id_xfermask(dev->id);
|
|
|
|
|
|
- if (ata_dev_absent(d))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (ata_dev_disabled(d)) {
|
|
|
- /* to avoid violating device selection timing */
|
|
|
- xfer_mask &= ata_pack_xfermask(d->pio_mask,
|
|
|
- UINT_MAX, UINT_MAX);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- xfer_mask &= ata_pack_xfermask(d->pio_mask,
|
|
|
- d->mwdma_mask, d->udma_mask);
|
|
|
- xfer_mask &= ata_id_xfermask(d->id);
|
|
|
- if (ata_dma_blacklisted(d))
|
|
|
- xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
|
|
+ /*
|
|
|
+ * CFA Advanced TrueIDE timings are not allowed on a shared
|
|
|
+ * cable
|
|
|
+ */
|
|
|
+ if (ata_dev_pair(dev)) {
|
|
|
+ /* No PIO5 or PIO6 */
|
|
|
+ xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
|
|
|
+ /* No MWDMA3 or MWDMA 4 */
|
|
|
+ xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
|
|
|
}
|
|
|
|
|
|
- if (ata_dma_blacklisted(dev))
|
|
|
+ if (ata_dma_blacklisted(dev)) {
|
|
|
+ xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
|
|
ata_dev_printk(dev, KERN_WARNING,
|
|
|
"device is on DMA blacklist, disabling DMA\n");
|
|
|
+ }
|
|
|
|
|
|
- if (hs->flags & ATA_HOST_SIMPLEX) {
|
|
|
- if (hs->simplex_claimed)
|
|
|
- xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
|
|
+ if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
|
|
|
+ xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
|
|
+ ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
|
|
|
+ "other device, disabling DMA\n");
|
|
|
}
|
|
|
|
|
|
if (ap->ops->mode_filter)
|
|
|
@@ -3185,7 +3228,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
|
|
|
* Unmap all mapped DMA memory associated with this command.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
|
|
|
static void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
@@ -3245,7 +3288,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
* associated with the current disk command.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
*/
|
|
|
static void ata_fill_sg(struct ata_queued_cmd *qc)
|
|
|
@@ -3297,7 +3340,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
|
|
|
* supplied PACKET command.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
* RETURNS: 0 when ATAPI DMA can be used
|
|
|
* nonzero otherwise
|
|
|
@@ -3319,7 +3362,7 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
|
|
|
* Prepare ATA taskfile for submission.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
void ata_qc_prep(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
@@ -3341,7 +3384,7 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
|
|
|
* to point to a single memory buffer, @buf of byte length @buflen.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
|
|
|
void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
|
|
@@ -3372,7 +3415,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
|
|
* elements.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
|
|
|
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
|
|
@@ -3391,7 +3434,7 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
|
|
* DMA-map the memory buffer associated with queued_cmd @qc.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
* RETURNS:
|
|
|
* Zero on success, negative on error.
|
|
|
@@ -3460,7 +3503,7 @@ skip_map:
|
|
|
* DMA-map the scatter-gather table associated with queued_cmd @qc.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
* RETURNS:
|
|
|
* Zero on success, negative on error.
|
|
|
@@ -3969,7 +4012,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
|
|
|
* Finish @qc which is running on standard HSM.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * If @in_wq is zero, spin_lock_irqsave(host_set lock).
|
|
|
+ * If @in_wq is zero, spin_lock_irqsave(host lock).
|
|
|
* Otherwise, none on entry and grabs host lock.
|
|
|
*/
|
|
|
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
|
|
@@ -3981,8 +4024,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
|
|
if (in_wq) {
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
|
|
|
- /* EH might have kicked in while host_set lock
|
|
|
- * is released.
|
|
|
+ /* EH might have kicked in while host lock is
|
|
|
+ * released.
|
|
|
*/
|
|
|
qc = ata_qc_from_tag(ap, qc->tag);
|
|
|
if (qc) {
|
|
|
@@ -4347,7 +4390,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
|
|
|
* in case something prevents using it.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
void ata_qc_free(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
@@ -4400,7 +4443,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
|
|
|
* command has completed, with either an ok or not-ok status.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
void ata_qc_complete(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
@@ -4463,7 +4506,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
|
|
|
* and commands are completed accordingly.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
* RETURNS:
|
|
|
* Number of completed commands on success, -errno otherwise.
|
|
|
@@ -4534,7 +4577,7 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
|
|
|
* writing the taskfile to hardware, starting the command.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
void ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
@@ -4595,7 +4638,7 @@ err:
|
|
|
* May be used as the qc_issue() entry in ata_port_operations.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
* RETURNS:
|
|
|
* Zero on success, AC_ERR_* mask on failure
|
|
|
@@ -4724,7 +4767,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
* handled via polling with interrupts disabled (nIEN bit).
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * spin_lock_irqsave(host_set lock)
|
|
|
+ * spin_lock_irqsave(host lock)
|
|
|
*
|
|
|
* RETURNS:
|
|
|
* One if interrupt was handled, zero if not (shared irq).
|
|
|
@@ -4811,14 +4854,14 @@ idle_irq:
|
|
|
/**
|
|
|
* ata_interrupt - Default ATA host interrupt handler
|
|
|
* @irq: irq line (unused)
|
|
|
- * @dev_instance: pointer to our ata_host_set information structure
|
|
|
+ * @dev_instance: pointer to our ata_host information structure
|
|
|
* @regs: unused
|
|
|
*
|
|
|
* Default interrupt handler for PCI IDE devices. Calls
|
|
|
* ata_host_intr() for each port that is not disabled.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
- * Obtains host_set lock during operation.
|
|
|
+ * Obtains host lock during operation.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
* IRQ_NONE or IRQ_HANDLED.
|
|
|
@@ -4826,18 +4869,18 @@ idle_irq:
|
|
|
|
|
|
irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
|
|
|
{
|
|
|
- struct ata_host_set *host_set = dev_instance;
|
|
|
+ struct ata_host *host = dev_instance;
|
|
|
unsigned int i;
|
|
|
unsigned int handled = 0;
|
|
|
unsigned long flags;
|
|
|
|
|
|
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
|
|
|
- spin_lock_irqsave(&host_set->lock, flags);
|
|
|
+ spin_lock_irqsave(&host->lock, flags);
|
|
|
|
|
|
- for (i = 0; i < host_set->n_ports; i++) {
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
struct ata_port *ap;
|
|
|
|
|
|
- ap = host_set->ports[i];
|
|
|
+ ap = host->ports[i];
|
|
|
if (ap &&
|
|
|
!(ap->flags & ATA_FLAG_DISABLED)) {
|
|
|
struct ata_queued_cmd *qc;
|
|
|
@@ -4849,7 +4892,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- spin_unlock_irqrestore(&host_set->lock, flags);
|
|
|
+ spin_unlock_irqrestore(&host->lock, flags);
|
|
|
|
|
|
return IRQ_RETVAL(handled);
|
|
|
}
|
|
|
@@ -5014,15 +5057,15 @@ int ata_flush_cache(struct ata_device *dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int ata_host_set_request_pm(struct ata_host_set *host_set,
|
|
|
- pm_message_t mesg, unsigned int action,
|
|
|
- unsigned int ehi_flags, int wait)
|
|
|
+static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
|
|
|
+ unsigned int action, unsigned int ehi_flags,
|
|
|
+ int wait)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int i, rc;
|
|
|
|
|
|
- for (i = 0; i < host_set->n_ports; i++) {
|
|
|
- struct ata_port *ap = host_set->ports[i];
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
+ struct ata_port *ap = host->ports[i];
|
|
|
|
|
|
/* Previous resume operation might still be in
|
|
|
* progress. Wait for PM_PENDING to clear.
|
|
|
@@ -5062,11 +5105,11 @@ static int ata_host_set_request_pm(struct ata_host_set *host_set,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ata_host_set_suspend - suspend host_set
|
|
|
- * @host_set: host_set to suspend
|
|
|
+ * ata_host_suspend - suspend host
|
|
|
+ * @host: host to suspend
|
|
|
* @mesg: PM message
|
|
|
*
|
|
|
- * Suspend @host_set. Actual operation is performed by EH. This
|
|
|
+ * Suspend @host. Actual operation is performed by EH. This
|
|
|
* function requests EH to perform PM operations and waits for EH
|
|
|
* to finish.
|
|
|
*
|
|
|
@@ -5076,11 +5119,11 @@ static int ata_host_set_request_pm(struct ata_host_set *host_set,
|
|
|
* RETURNS:
|
|
|
* 0 on success, -errno on failure.
|
|
|
*/
|
|
|
-int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
|
|
|
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
|
|
|
{
|
|
|
int i, j, rc;
|
|
|
|
|
|
- rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
|
|
|
+ rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
|
|
|
if (rc)
|
|
|
goto fail;
|
|
|
|
|
|
@@ -5088,8 +5131,8 @@ int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
|
|
|
* This happens if hotplug occurs between completion of device
|
|
|
* suspension and here.
|
|
|
*/
|
|
|
- for (i = 0; i < host_set->n_ports; i++) {
|
|
|
- struct ata_port *ap = host_set->ports[i];
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
+ struct ata_port *ap = host->ports[i];
|
|
|
|
|
|
for (j = 0; j < ATA_MAX_DEVICES; j++) {
|
|
|
struct ata_device *dev = &ap->device[j];
|
|
|
@@ -5104,30 +5147,30 @@ int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- host_set->dev->power.power_state = mesg;
|
|
|
+ host->dev->power.power_state = mesg;
|
|
|
return 0;
|
|
|
|
|
|
fail:
|
|
|
- ata_host_set_resume(host_set);
|
|
|
+ ata_host_resume(host);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ata_host_set_resume - resume host_set
|
|
|
- * @host_set: host_set to resume
|
|
|
+ * ata_host_resume - resume host
|
|
|
+ * @host: host to resume
|
|
|
*
|
|
|
- * Resume @host_set. Actual operation is performed by EH. This
|
|
|
+ * Resume @host. Actual operation is performed by EH. This
|
|
|
* function requests EH to perform PM operations and returns.
|
|
|
* Note that all resume operations are performed parallely.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* Kernel thread context (may sleep).
|
|
|
*/
|
|
|
-void ata_host_set_resume(struct ata_host_set *host_set)
|
|
|
+void ata_host_resume(struct ata_host *host)
|
|
|
{
|
|
|
- ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
|
|
|
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
|
|
|
- host_set->dev->power.power_state = PMSG_ON;
|
|
|
+ ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
|
|
|
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
|
|
|
+ host->dev->power.power_state = PMSG_ON;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -5184,10 +5227,10 @@ void ata_port_stop (struct ata_port *ap)
|
|
|
ata_pad_free(ap, dev);
|
|
|
}
|
|
|
|
|
|
-void ata_host_stop (struct ata_host_set *host_set)
|
|
|
+void ata_host_stop (struct ata_host *host)
|
|
|
{
|
|
|
- if (host_set->mmio_base)
|
|
|
- iounmap(host_set->mmio_base);
|
|
|
+ if (host->mmio_base)
|
|
|
+ iounmap(host->mmio_base);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -5209,7 +5252,7 @@ void ata_dev_init(struct ata_device *dev)
|
|
|
|
|
|
/* High bits of dev->flags are used to record warm plug
|
|
|
* requests which occur asynchronously. Synchronize using
|
|
|
- * host_set lock.
|
|
|
+ * host lock.
|
|
|
*/
|
|
|
spin_lock_irqsave(ap->lock, flags);
|
|
|
dev->flags &= ~ATA_DFLAG_INIT_MASK;
|
|
|
@@ -5223,46 +5266,42 @@ void ata_dev_init(struct ata_device *dev)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ata_host_init - Initialize an ata_port structure
|
|
|
+ * ata_port_init - Initialize an ata_port structure
|
|
|
* @ap: Structure to initialize
|
|
|
- * @host: associated SCSI mid-layer structure
|
|
|
- * @host_set: Collection of hosts to which @ap belongs
|
|
|
+ * @host: Collection of hosts to which @ap belongs
|
|
|
* @ent: Probe information provided by low-level driver
|
|
|
* @port_no: Port number associated with this ata_port
|
|
|
*
|
|
|
- * Initialize a new ata_port structure, and its associated
|
|
|
- * scsi_host.
|
|
|
+ * Initialize a new ata_port structure.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* Inherited from caller.
|
|
|
*/
|
|
|
-static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
|
|
|
- struct ata_host_set *host_set,
|
|
|
- const struct ata_probe_ent *ent, unsigned int port_no)
|
|
|
+void ata_port_init(struct ata_port *ap, struct ata_host *host,
|
|
|
+ const struct ata_probe_ent *ent, unsigned int port_no)
|
|
|
{
|
|
|
unsigned int i;
|
|
|
|
|
|
- host->max_id = 16;
|
|
|
- host->max_lun = 1;
|
|
|
- host->max_channel = 1;
|
|
|
- host->unique_id = ata_unique_id++;
|
|
|
- host->max_cmd_len = 12;
|
|
|
-
|
|
|
- ap->lock = &host_set->lock;
|
|
|
+ ap->lock = &host->lock;
|
|
|
ap->flags = ATA_FLAG_DISABLED;
|
|
|
- ap->id = host->unique_id;
|
|
|
- ap->host = host;
|
|
|
+ ap->id = ata_unique_id++;
|
|
|
ap->ctl = ATA_DEVCTL_OBS;
|
|
|
- ap->host_set = host_set;
|
|
|
+ ap->host = host;
|
|
|
ap->dev = ent->dev;
|
|
|
ap->port_no = port_no;
|
|
|
- ap->hard_port_no =
|
|
|
- ent->legacy_mode ? ent->hard_port_no : port_no;
|
|
|
- ap->pio_mask = ent->pio_mask;
|
|
|
- ap->mwdma_mask = ent->mwdma_mask;
|
|
|
- ap->udma_mask = ent->udma_mask;
|
|
|
- ap->flags |= ent->host_flags;
|
|
|
- ap->ops = ent->port_ops;
|
|
|
+ if (port_no == 1 && ent->pinfo2) {
|
|
|
+ ap->pio_mask = ent->pinfo2->pio_mask;
|
|
|
+ ap->mwdma_mask = ent->pinfo2->mwdma_mask;
|
|
|
+ ap->udma_mask = ent->pinfo2->udma_mask;
|
|
|
+ ap->flags |= ent->pinfo2->flags;
|
|
|
+ ap->ops = ent->pinfo2->port_ops;
|
|
|
+ } else {
|
|
|
+ ap->pio_mask = ent->pio_mask;
|
|
|
+ ap->mwdma_mask = ent->mwdma_mask;
|
|
|
+ ap->udma_mask = ent->udma_mask;
|
|
|
+ ap->flags |= ent->port_flags;
|
|
|
+ ap->ops = ent->port_ops;
|
|
|
+ }
|
|
|
ap->hw_sata_spd_limit = UINT_MAX;
|
|
|
ap->active_tag = ATA_TAG_POISON;
|
|
|
ap->last_ctl = 0xFF;
|
|
|
@@ -5303,9 +5342,30 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ata_host_add - Attach low-level ATA driver to system
|
|
|
+ * ata_port_init_shost - Initialize SCSI host associated with ATA port
|
|
|
+ * @ap: ATA port to initialize SCSI host for
|
|
|
+ * @shost: SCSI host associated with @ap
|
|
|
+ *
|
|
|
+ * Initialize SCSI host @shost associated with ATA port @ap.
|
|
|
+ *
|
|
|
+ * LOCKING:
|
|
|
+ * Inherited from caller.
|
|
|
+ */
|
|
|
+static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
|
|
|
+{
|
|
|
+ ap->scsi_host = shost;
|
|
|
+
|
|
|
+ shost->unique_id = ap->id;
|
|
|
+ shost->max_id = 16;
|
|
|
+ shost->max_lun = 1;
|
|
|
+ shost->max_channel = 1;
|
|
|
+ shost->max_cmd_len = 12;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ata_port_add - Attach low-level ATA driver to system
|
|
|
* @ent: Information provided by low-level driver
|
|
|
- * @host_set: Collections of ports to which we add
|
|
|
+ * @host: Collections of ports to which we add
|
|
|
* @port_no: Port number associated with this host
|
|
|
*
|
|
|
* Attach low-level ATA driver to system.
|
|
|
@@ -5316,43 +5376,55 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
|
|
|
* RETURNS:
|
|
|
* New ata_port on success, for NULL on error.
|
|
|
*/
|
|
|
-
|
|
|
-static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
|
|
|
- struct ata_host_set *host_set,
|
|
|
+static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
|
|
|
+ struct ata_host *host,
|
|
|
unsigned int port_no)
|
|
|
{
|
|
|
- struct Scsi_Host *host;
|
|
|
+ struct Scsi_Host *shost;
|
|
|
struct ata_port *ap;
|
|
|
- int rc;
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
if (!ent->port_ops->error_handler &&
|
|
|
- !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
|
|
|
+ !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
|
|
|
printk(KERN_ERR "ata%u: no reset mechanism available\n",
|
|
|
port_no);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
|
|
|
- if (!host)
|
|
|
+ shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
|
|
|
+ if (!shost)
|
|
|
return NULL;
|
|
|
|
|
|
- host->transportt = &ata_scsi_transport_template;
|
|
|
+ shost->transportt = &ata_scsi_transport_template;
|
|
|
|
|
|
- ap = ata_shost_to_port(host);
|
|
|
+ ap = ata_shost_to_port(shost);
|
|
|
|
|
|
- ata_host_init(ap, host, host_set, ent, port_no);
|
|
|
-
|
|
|
- rc = ap->ops->port_start(ap);
|
|
|
- if (rc)
|
|
|
- goto err_out;
|
|
|
+ ata_port_init(ap, host, ent, port_no);
|
|
|
+ ata_port_init_shost(ap, shost);
|
|
|
|
|
|
return ap;
|
|
|
+}
|
|
|
|
|
|
-err_out:
|
|
|
- scsi_host_put(host);
|
|
|
- return NULL;
|
|
|
+/**
|
|
|
+ * ata_sas_host_init - Initialize a host struct
|
|
|
+ * @host: host to initialize
|
|
|
+ * @dev: device host is attached to
|
|
|
+ * @flags: host flags
|
|
|
+ * @ops: port_ops
|
|
|
+ *
|
|
|
+ * LOCKING:
|
|
|
+ * PCI/etc. bus probe sem.
|
|
|
+ *
|
|
|
+ */
|
|
|
+
|
|
|
+void ata_host_init(struct ata_host *host, struct device *dev,
|
|
|
+ unsigned long flags, const struct ata_port_operations *ops)
|
|
|
+{
|
|
|
+ spin_lock_init(&host->lock);
|
|
|
+ host->dev = dev;
|
|
|
+ host->flags = flags;
|
|
|
+ host->ops = ops;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -5375,78 +5447,106 @@ err_out:
|
|
|
*/
|
|
|
int ata_device_add(const struct ata_probe_ent *ent)
|
|
|
{
|
|
|
- unsigned int count = 0, i;
|
|
|
+ unsigned int i;
|
|
|
struct device *dev = ent->dev;
|
|
|
- struct ata_host_set *host_set;
|
|
|
+ struct ata_host *host;
|
|
|
int rc;
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
/* alloc a container for our list of ATA ports (buses) */
|
|
|
- host_set = kzalloc(sizeof(struct ata_host_set) +
|
|
|
- (ent->n_ports * sizeof(void *)), GFP_KERNEL);
|
|
|
- if (!host_set)
|
|
|
+ host = kzalloc(sizeof(struct ata_host) +
|
|
|
+ (ent->n_ports * sizeof(void *)), GFP_KERNEL);
|
|
|
+ if (!host)
|
|
|
return 0;
|
|
|
- spin_lock_init(&host_set->lock);
|
|
|
|
|
|
- host_set->dev = dev;
|
|
|
- host_set->n_ports = ent->n_ports;
|
|
|
- host_set->irq = ent->irq;
|
|
|
- host_set->mmio_base = ent->mmio_base;
|
|
|
- host_set->private_data = ent->private_data;
|
|
|
- host_set->ops = ent->port_ops;
|
|
|
- host_set->flags = ent->host_set_flags;
|
|
|
+ ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
|
|
|
+ host->n_ports = ent->n_ports;
|
|
|
+ host->irq = ent->irq;
|
|
|
+ host->irq2 = ent->irq2;
|
|
|
+ host->mmio_base = ent->mmio_base;
|
|
|
+ host->private_data = ent->private_data;
|
|
|
|
|
|
/* register each port bound to this device */
|
|
|
- for (i = 0; i < ent->n_ports; i++) {
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
struct ata_port *ap;
|
|
|
unsigned long xfer_mode_mask;
|
|
|
+ int irq_line = ent->irq;
|
|
|
|
|
|
- ap = ata_host_add(ent, host_set, i);
|
|
|
+ ap = ata_port_add(ent, host, i);
|
|
|
if (!ap)
|
|
|
goto err_out;
|
|
|
|
|
|
- host_set->ports[i] = ap;
|
|
|
+ host->ports[i] = ap;
|
|
|
+
|
|
|
+ /* dummy? */
|
|
|
+ if (ent->dummy_port_mask & (1 << i)) {
|
|
|
+ ata_port_printk(ap, KERN_INFO, "DUMMY\n");
|
|
|
+ ap->ops = &ata_dummy_port_ops;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* start port */
|
|
|
+ rc = ap->ops->port_start(ap);
|
|
|
+ if (rc) {
|
|
|
+ host->ports[i] = NULL;
|
|
|
+ scsi_host_put(ap->scsi_host);
|
|
|
+ goto err_out;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Report the secondary IRQ for second channel legacy */
|
|
|
+ if (i == 1 && ent->irq2)
|
|
|
+ irq_line = ent->irq2;
|
|
|
+
|
|
|
xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
|
|
|
(ap->mwdma_mask << ATA_SHIFT_MWDMA) |
|
|
|
(ap->pio_mask << ATA_SHIFT_PIO);
|
|
|
|
|
|
/* print per-port info to dmesg */
|
|
|
ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
|
|
|
- "ctl 0x%lX bmdma 0x%lX irq %lu\n",
|
|
|
+ "ctl 0x%lX bmdma 0x%lX irq %d\n",
|
|
|
ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
|
|
|
ata_mode_string(xfer_mode_mask),
|
|
|
ap->ioaddr.cmd_addr,
|
|
|
ap->ioaddr.ctl_addr,
|
|
|
ap->ioaddr.bmdma_addr,
|
|
|
- ent->irq);
|
|
|
+ irq_line);
|
|
|
|
|
|
ata_chk_status(ap);
|
|
|
- host_set->ops->irq_clear(ap);
|
|
|
+ host->ops->irq_clear(ap);
|
|
|
ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
|
|
|
- count++;
|
|
|
}
|
|
|
|
|
|
- if (!count)
|
|
|
- goto err_free_ret;
|
|
|
-
|
|
|
- /* obtain irq, that is shared between channels */
|
|
|
+ /* obtain irq, that may be shared between channels */
|
|
|
rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
|
|
|
- DRV_NAME, host_set);
|
|
|
+ DRV_NAME, host);
|
|
|
if (rc) {
|
|
|
dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
|
|
|
ent->irq, rc);
|
|
|
goto err_out;
|
|
|
}
|
|
|
|
|
|
+ /* do we have a second IRQ for the other channel, eg legacy mode */
|
|
|
+ if (ent->irq2) {
|
|
|
+ /* We will get weird core code crashes later if this is true
|
|
|
+ so trap it now */
|
|
|
+ BUG_ON(ent->irq == ent->irq2);
|
|
|
+
|
|
|
+ rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
|
|
|
+ DRV_NAME, host);
|
|
|
+ if (rc) {
|
|
|
+ dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
|
|
|
+ ent->irq2, rc);
|
|
|
+ goto err_out_free_irq;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* perform each probe synchronously */
|
|
|
DPRINTK("probe begin\n");
|
|
|
- for (i = 0; i < count; i++) {
|
|
|
- struct ata_port *ap;
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
+ struct ata_port *ap = host->ports[i];
|
|
|
u32 scontrol;
|
|
|
int rc;
|
|
|
|
|
|
- ap = host_set->ports[i];
|
|
|
-
|
|
|
/* init sata_spd_limit to the current value */
|
|
|
if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
|
|
|
int spd = (scontrol >> 4) & 0xf;
|
|
|
@@ -5454,7 +5554,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
|
|
}
|
|
|
ap->sata_spd_limit = ap->hw_sata_spd_limit;
|
|
|
|
|
|
- rc = scsi_add_host(ap->host, dev);
|
|
|
+ rc = scsi_add_host(ap->scsi_host, dev);
|
|
|
if (rc) {
|
|
|
ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
|
|
|
/* FIXME: do something useful here */
|
|
|
@@ -5502,27 +5602,29 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
|
|
|
|
|
/* probes are done, now scan each port's disk(s) */
|
|
|
DPRINTK("host probe begin\n");
|
|
|
- for (i = 0; i < count; i++) {
|
|
|
- struct ata_port *ap = host_set->ports[i];
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
+ struct ata_port *ap = host->ports[i];
|
|
|
|
|
|
ata_scsi_scan_host(ap);
|
|
|
}
|
|
|
|
|
|
- dev_set_drvdata(dev, host_set);
|
|
|
+ dev_set_drvdata(dev, host);
|
|
|
|
|
|
VPRINTK("EXIT, returning %u\n", ent->n_ports);
|
|
|
return ent->n_ports; /* success */
|
|
|
|
|
|
+err_out_free_irq:
|
|
|
+ free_irq(ent->irq, host);
|
|
|
err_out:
|
|
|
- for (i = 0; i < count; i++) {
|
|
|
- struct ata_port *ap = host_set->ports[i];
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
+ struct ata_port *ap = host->ports[i];
|
|
|
if (ap) {
|
|
|
ap->ops->port_stop(ap);
|
|
|
- scsi_host_put(ap->host);
|
|
|
+ scsi_host_put(ap->scsi_host);
|
|
|
}
|
|
|
}
|
|
|
-err_free_ret:
|
|
|
- kfree(host_set);
|
|
|
+
|
|
|
+ kfree(host);
|
|
|
VPRINTK("EXIT, returning 0\n");
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -5582,12 +5684,12 @@ void ata_port_detach(struct ata_port *ap)
|
|
|
|
|
|
skip_eh:
|
|
|
/* remove the associated SCSI host */
|
|
|
- scsi_remove_host(ap->host);
|
|
|
+ scsi_remove_host(ap->scsi_host);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ata_host_set_remove - PCI layer callback for device removal
|
|
|
- * @host_set: ATA host set that was removed
|
|
|
+ * ata_host_remove - PCI layer callback for device removal
|
|
|
+ * @host: ATA host set that was removed
|
|
|
*
|
|
|
* Unregister all objects associated with this host set. Free those
|
|
|
* objects.
|
|
|
@@ -5596,36 +5698,39 @@ void ata_port_detach(struct ata_port *ap)
|
|
|
* Inherited from calling layer (may sleep).
|
|
|
*/
|
|
|
|
|
|
-void ata_host_set_remove(struct ata_host_set *host_set)
|
|
|
+void ata_host_remove(struct ata_host *host)
|
|
|
{
|
|
|
unsigned int i;
|
|
|
|
|
|
- for (i = 0; i < host_set->n_ports; i++)
|
|
|
- ata_port_detach(host_set->ports[i]);
|
|
|
+ for (i = 0; i < host->n_ports; i++)
|
|
|
+ ata_port_detach(host->ports[i]);
|
|
|
|
|
|
- free_irq(host_set->irq, host_set);
|
|
|
+ free_irq(host->irq, host);
|
|
|
+ if (host->irq2)
|
|
|
+ free_irq(host->irq2, host);
|
|
|
|
|
|
- for (i = 0; i < host_set->n_ports; i++) {
|
|
|
- struct ata_port *ap = host_set->ports[i];
|
|
|
+ for (i = 0; i < host->n_ports; i++) {
|
|
|
+ struct ata_port *ap = host->ports[i];
|
|
|
|
|
|
- ata_scsi_release(ap->host);
|
|
|
+ ata_scsi_release(ap->scsi_host);
|
|
|
|
|
|
if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
|
|
|
struct ata_ioports *ioaddr = &ap->ioaddr;
|
|
|
|
|
|
- if (ioaddr->cmd_addr == 0x1f0)
|
|
|
- release_region(0x1f0, 8);
|
|
|
- else if (ioaddr->cmd_addr == 0x170)
|
|
|
- release_region(0x170, 8);
|
|
|
+ /* FIXME: Add -ac IDE pci mods to remove these special cases */
|
|
|
+ if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
|
|
|
+ release_region(ATA_PRIMARY_CMD, 8);
|
|
|
+ else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
|
|
|
+ release_region(ATA_SECONDARY_CMD, 8);
|
|
|
}
|
|
|
|
|
|
- scsi_host_put(ap->host);
|
|
|
+ scsi_host_put(ap->scsi_host);
|
|
|
}
|
|
|
|
|
|
- if (host_set->ops->host_stop)
|
|
|
- host_set->ops->host_stop(host_set);
|
|
|
+ if (host->ops->host_stop)
|
|
|
+ host->ops->host_stop(host);
|
|
|
|
|
|
- kfree(host_set);
|
|
|
+ kfree(host);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -5642,9 +5747,9 @@ void ata_host_set_remove(struct ata_host_set *host_set)
|
|
|
* One.
|
|
|
*/
|
|
|
|
|
|
-int ata_scsi_release(struct Scsi_Host *host)
|
|
|
+int ata_scsi_release(struct Scsi_Host *shost)
|
|
|
{
|
|
|
- struct ata_port *ap = ata_shost_to_port(host);
|
|
|
+ struct ata_port *ap = ata_shost_to_port(shost);
|
|
|
|
|
|
DPRINTK("ENTER\n");
|
|
|
|
|
|
@@ -5655,6 +5760,31 @@ int ata_scsi_release(struct Scsi_Host *host)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+struct ata_probe_ent *
|
|
|
+ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
|
|
|
+{
|
|
|
+ struct ata_probe_ent *probe_ent;
|
|
|
+
|
|
|
+ probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
|
|
|
+ if (!probe_ent) {
|
|
|
+ printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
|
|
|
+ kobject_name(&(dev->kobj)));
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&probe_ent->node);
|
|
|
+ probe_ent->dev = dev;
|
|
|
+
|
|
|
+ probe_ent->sht = port->sht;
|
|
|
+ probe_ent->port_flags = port->flags;
|
|
|
+ probe_ent->pio_mask = port->pio_mask;
|
|
|
+ probe_ent->mwdma_mask = port->mwdma_mask;
|
|
|
+ probe_ent->udma_mask = port->udma_mask;
|
|
|
+ probe_ent->port_ops = port->port_ops;
|
|
|
+
|
|
|
+ return probe_ent;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ata_std_ports - initialize ioaddr with standard port offsets.
|
|
|
* @ioaddr: IO address structure to be initialized
|
|
|
@@ -5684,11 +5814,11 @@ void ata_std_ports(struct ata_ioports *ioaddr)
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
|
|
-void ata_pci_host_stop (struct ata_host_set *host_set)
|
|
|
+void ata_pci_host_stop (struct ata_host *host)
|
|
|
{
|
|
|
- struct pci_dev *pdev = to_pci_dev(host_set->dev);
|
|
|
+ struct pci_dev *pdev = to_pci_dev(host->dev);
|
|
|
|
|
|
- pci_iounmap(pdev, host_set->mmio_base);
|
|
|
+ pci_iounmap(pdev, host->mmio_base);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -5708,12 +5838,9 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
|
|
|
void ata_pci_remove_one (struct pci_dev *pdev)
|
|
|
{
|
|
|
struct device *dev = pci_dev_to_dev(pdev);
|
|
|
- struct ata_host_set *host_set = dev_get_drvdata(dev);
|
|
|
- struct ata_host_set *host_set2 = host_set->next;
|
|
|
+ struct ata_host *host = dev_get_drvdata(dev);
|
|
|
|
|
|
- ata_host_set_remove(host_set);
|
|
|
- if (host_set2)
|
|
|
- ata_host_set_remove(host_set2);
|
|
|
+ ata_host_remove(host);
|
|
|
|
|
|
pci_release_regions(pdev);
|
|
|
pci_disable_device(pdev);
|
|
|
@@ -5754,11 +5881,11 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
|
|
|
return (tmp == bits->val) ? 1 : 0;
|
|
|
}
|
|
|
|
|
|
-void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
+void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
|
|
|
{
|
|
|
pci_save_state(pdev);
|
|
|
|
|
|
- if (state.event == PM_EVENT_SUSPEND) {
|
|
|
+ if (mesg.event == PM_EVENT_SUSPEND) {
|
|
|
pci_disable_device(pdev);
|
|
|
pci_set_power_state(pdev, PCI_D3hot);
|
|
|
}
|
|
|
@@ -5772,37 +5899,26 @@ void ata_pci_device_do_resume(struct pci_dev *pdev)
|
|
|
pci_set_master(pdev);
|
|
|
}
|
|
|
|
|
|
-int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
|
+int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
|
|
|
{
|
|
|
- struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
|
|
|
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
|
|
|
int rc = 0;
|
|
|
|
|
|
- rc = ata_host_set_suspend(host_set, state);
|
|
|
+ rc = ata_host_suspend(host, mesg);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
- if (host_set->next) {
|
|
|
- rc = ata_host_set_suspend(host_set->next, state);
|
|
|
- if (rc) {
|
|
|
- ata_host_set_resume(host_set);
|
|
|
- return rc;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- ata_pci_device_do_suspend(pdev, state);
|
|
|
+ ata_pci_device_do_suspend(pdev, mesg);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int ata_pci_device_resume(struct pci_dev *pdev)
|
|
|
{
|
|
|
- struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
|
|
|
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
|
|
|
|
|
|
ata_pci_device_do_resume(pdev);
|
|
|
- ata_host_set_resume(host_set);
|
|
|
- if (host_set->next)
|
|
|
- ata_host_set_resume(host_set->next);
|
|
|
-
|
|
|
+ ata_host_resume(host);
|
|
|
return 0;
|
|
|
}
|
|
|
#endif /* CONFIG_PCI */
|
|
|
@@ -5901,6 +6017,39 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
|
|
|
return tmp;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Dummy port_ops
|
|
|
+ */
|
|
|
+static void ata_dummy_noret(struct ata_port *ap) { }
|
|
|
+static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
|
|
|
+static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
|
|
|
+
|
|
|
+static u8 ata_dummy_check_status(struct ata_port *ap)
|
|
|
+{
|
|
|
+ return ATA_DRDY;
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
|
|
|
+{
|
|
|
+ return AC_ERR_SYSTEM;
|
|
|
+}
|
|
|
+
|
|
|
+const struct ata_port_operations ata_dummy_port_ops = {
|
|
|
+ .port_disable = ata_port_disable,
|
|
|
+ .check_status = ata_dummy_check_status,
|
|
|
+ .check_altstatus = ata_dummy_check_status,
|
|
|
+ .dev_select = ata_noop_dev_select,
|
|
|
+ .qc_prep = ata_noop_qc_prep,
|
|
|
+ .qc_issue = ata_dummy_qc_issue,
|
|
|
+ .freeze = ata_dummy_noret,
|
|
|
+ .thaw = ata_dummy_noret,
|
|
|
+ .error_handler = ata_dummy_noret,
|
|
|
+ .post_internal_cmd = ata_dummy_qc_noret,
|
|
|
+ .irq_clear = ata_dummy_noret,
|
|
|
+ .port_start = ata_dummy_ret0,
|
|
|
+ .port_stop = ata_dummy_noret,
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* libata is essentially a library of internal helper functions for
|
|
|
* low-level ATA host controller drivers. As such, the API/ABI is
|
|
|
@@ -5911,11 +6060,13 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
|
|
|
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
|
|
|
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
|
|
|
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
|
|
|
+EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
|
|
|
EXPORT_SYMBOL_GPL(ata_std_bios_param);
|
|
|
EXPORT_SYMBOL_GPL(ata_std_ports);
|
|
|
+EXPORT_SYMBOL_GPL(ata_host_init);
|
|
|
EXPORT_SYMBOL_GPL(ata_device_add);
|
|
|
EXPORT_SYMBOL_GPL(ata_port_detach);
|
|
|
-EXPORT_SYMBOL_GPL(ata_host_set_remove);
|
|
|
+EXPORT_SYMBOL_GPL(ata_host_remove);
|
|
|
EXPORT_SYMBOL_GPL(ata_sg_init);
|
|
|
EXPORT_SYMBOL_GPL(ata_sg_init_one);
|
|
|
EXPORT_SYMBOL_GPL(ata_hsm_move);
|
|
|
@@ -5982,8 +6133,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
|
|
|
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
|
|
|
EXPORT_SYMBOL_GPL(ata_port_online);
|
|
|
EXPORT_SYMBOL_GPL(ata_port_offline);
|
|
|
-EXPORT_SYMBOL_GPL(ata_host_set_suspend);
|
|
|
-EXPORT_SYMBOL_GPL(ata_host_set_resume);
|
|
|
+EXPORT_SYMBOL_GPL(ata_host_suspend);
|
|
|
+EXPORT_SYMBOL_GPL(ata_host_resume);
|
|
|
EXPORT_SYMBOL_GPL(ata_id_string);
|
|
|
EXPORT_SYMBOL_GPL(ata_id_c_string);
|
|
|
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
|