|
@@ -119,6 +119,10 @@ int libata_noacpi = 0;
|
|
|
module_param_named(noacpi, libata_noacpi, int, 0444);
|
|
|
MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
|
|
|
|
|
|
+int libata_allow_tpm = 0;
|
|
|
+module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
|
|
|
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
|
|
|
+
|
|
|
MODULE_AUTHOR("Jeff Garzik");
|
|
|
MODULE_DESCRIPTION("Library module for ATA devices");
|
|
|
MODULE_LICENSE("GPL");
|
|
@@ -450,9 +454,9 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
|
|
|
* RETURNS:
|
|
|
* Packed xfer_mask.
|
|
|
*/
|
|
|
-static unsigned int ata_pack_xfermask(unsigned int pio_mask,
|
|
|
- unsigned int mwdma_mask,
|
|
|
- unsigned int udma_mask)
|
|
|
+unsigned long ata_pack_xfermask(unsigned long pio_mask,
|
|
|
+ unsigned long mwdma_mask,
|
|
|
+ unsigned long udma_mask)
|
|
|
{
|
|
|
return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
|
|
|
((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
|
|
@@ -469,10 +473,8 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask,
|
|
|
* Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
|
|
|
* Any NULL distination masks will be ignored.
|
|
|
*/
|
|
|
-static void ata_unpack_xfermask(unsigned int xfer_mask,
|
|
|
- unsigned int *pio_mask,
|
|
|
- unsigned int *mwdma_mask,
|
|
|
- unsigned int *udma_mask)
|
|
|
+void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
|
|
|
+ unsigned long *mwdma_mask, unsigned long *udma_mask)
|
|
|
{
|
|
|
if (pio_mask)
|
|
|
*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
|
|
@@ -486,9 +488,9 @@ static const struct ata_xfer_ent {
|
|
|
int shift, bits;
|
|
|
u8 base;
|
|
|
} ata_xfer_tbl[] = {
|
|
|
- { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
|
|
|
- { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
|
|
|
- { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
|
|
|
+ { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
|
|
|
+ { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
|
|
|
+ { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
|
|
|
{ -1, },
|
|
|
};
|
|
|
|
|
@@ -503,9 +505,9 @@ static const struct ata_xfer_ent {
|
|
|
* None.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
- * Matching XFER_* value, 0 if no match found.
|
|
|
+ * Matching XFER_* value, 0xff if no match found.
|
|
|
*/
|
|
|
-static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
|
|
|
+u8 ata_xfer_mask2mode(unsigned long xfer_mask)
|
|
|
{
|
|
|
int highbit = fls(xfer_mask) - 1;
|
|
|
const struct ata_xfer_ent *ent;
|
|
@@ -513,7 +515,7 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
|
|
|
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
|
|
|
if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
|
|
|
return ent->base + highbit - ent->shift;
|
|
|
- return 0;
|
|
|
+ return 0xff;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -528,13 +530,14 @@ static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
|
|
|
* RETURNS:
|
|
|
* Matching xfer_mask, 0 if no match found.
|
|
|
*/
|
|
|
-static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
|
|
|
+unsigned long ata_xfer_mode2mask(u8 xfer_mode)
|
|
|
{
|
|
|
const struct ata_xfer_ent *ent;
|
|
|
|
|
|
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
|
|
|
if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
|
|
|
- return 1 << (ent->shift + xfer_mode - ent->base);
|
|
|
+ return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
|
|
|
+ & ~((1 << ent->shift) - 1);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -550,7 +553,7 @@ static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
|
|
|
* RETURNS:
|
|
|
* Matching xfer_shift, -1 if no match found.
|
|
|
*/
|
|
|
-static int ata_xfer_mode2shift(unsigned int xfer_mode)
|
|
|
+int ata_xfer_mode2shift(unsigned long xfer_mode)
|
|
|
{
|
|
|
const struct ata_xfer_ent *ent;
|
|
|
|
|
@@ -574,7 +577,7 @@ static int ata_xfer_mode2shift(unsigned int xfer_mode)
|
|
|
* Constant C string representing highest speed listed in
|
|
|
* @mode_mask, or the constant C string "<n/a>".
|
|
|
*/
|
|
|
-static const char *ata_mode_string(unsigned int xfer_mask)
|
|
|
+const char *ata_mode_string(unsigned long xfer_mask)
|
|
|
{
|
|
|
static const char * const xfer_mode_str[] = {
|
|
|
"PIO0",
|
|
@@ -947,8 +950,8 @@ unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
|
|
|
if (r_err)
|
|
|
*r_err = err;
|
|
|
|
|
|
- /* see if device passed diags: if master then continue and warn later */
|
|
|
- if (err == 0 && dev->devno == 0)
|
|
|
+ /* see if device passed diags: continue and warn later */
|
|
|
+ if (err == 0)
|
|
|
/* diagnostic fail : do nothing _YET_ */
|
|
|
dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
|
|
|
else if (err == 1)
|
|
@@ -1285,48 +1288,6 @@ static int ata_hpa_resize(struct ata_device *dev)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ata_id_to_dma_mode - Identify DMA mode from id block
|
|
|
- * @dev: device to identify
|
|
|
- * @unknown: mode to assume if we cannot tell
|
|
|
- *
|
|
|
- * Set up the timing values for the device based upon the identify
|
|
|
- * reported values for the DMA mode. This function is used by drivers
|
|
|
- * which rely upon firmware configured modes, but wish to report the
|
|
|
- * mode correctly when possible.
|
|
|
- *
|
|
|
- * In addition we emit similarly formatted messages to the default
|
|
|
- * ata_dev_set_mode handler, in order to provide consistency of
|
|
|
- * presentation.
|
|
|
- */
|
|
|
-
|
|
|
-void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
|
|
|
-{
|
|
|
- unsigned int mask;
|
|
|
- u8 mode;
|
|
|
-
|
|
|
- /* Pack the DMA modes */
|
|
|
- mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
|
|
|
- if (dev->id[53] & 0x04)
|
|
|
- mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
|
|
|
-
|
|
|
- /* Select the mode in use */
|
|
|
- mode = ata_xfer_mask2mode(mask);
|
|
|
-
|
|
|
- if (mode != 0) {
|
|
|
- ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
|
|
|
- ata_mode_string(mask));
|
|
|
- } else {
|
|
|
- /* SWDMA perhaps ? */
|
|
|
- mode = unknown;
|
|
|
- ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
|
|
|
- }
|
|
|
-
|
|
|
- /* Configure the device reporting */
|
|
|
- dev->xfer_mode = mode;
|
|
|
- dev->xfer_shift = ata_xfer_mode2shift(mode);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ata_noop_dev_select - Select device 0/1 on ATA bus
|
|
|
* @ap: ATA channel to manipulate
|
|
@@ -1464,9 +1425,9 @@ static inline void ata_dump_id(const u16 *id)
|
|
|
* RETURNS:
|
|
|
* Computed xfermask
|
|
|
*/
|
|
|
-static unsigned int ata_id_xfermask(const u16 *id)
|
|
|
+unsigned long ata_id_xfermask(const u16 *id)
|
|
|
{
|
|
|
- unsigned int pio_mask, mwdma_mask, udma_mask;
|
|
|
+ unsigned long pio_mask, mwdma_mask, udma_mask;
|
|
|
|
|
|
/* Usual case. Word 53 indicates word 64 is valid */
|
|
|
if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
|
|
@@ -1519,7 +1480,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ata_port_queue_task - Queue port_task
|
|
|
+ * ata_pio_queue_task - Queue port_task
|
|
|
* @ap: The ata_port to queue port_task for
|
|
|
* @fn: workqueue function to be scheduled
|
|
|
* @data: data for @fn to use
|
|
@@ -1531,16 +1492,15 @@ static unsigned int ata_id_xfermask(const u16 *id)
|
|
|
* one task is active at any given time.
|
|
|
*
|
|
|
* libata core layer takes care of synchronization between
|
|
|
- * port_task and EH. ata_port_queue_task() may be ignored for EH
|
|
|
+ * port_task and EH. ata_pio_queue_task() may be ignored for EH
|
|
|
* synchronization.
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* Inherited from caller.
|
|
|
*/
|
|
|
-void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
|
|
|
- unsigned long delay)
|
|
|
+static void ata_pio_queue_task(struct ata_port *ap, void *data,
|
|
|
+ unsigned long delay)
|
|
|
{
|
|
|
- PREPARE_DELAYED_WORK(&ap->port_task, fn);
|
|
|
ap->port_task_data = data;
|
|
|
|
|
|
/* may fail if ata_port_flush_task() in progress */
|
|
@@ -2090,7 +2050,7 @@ int ata_dev_configure(struct ata_device *dev)
|
|
|
struct ata_eh_context *ehc = &dev->link->eh_context;
|
|
|
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
|
|
|
const u16 *id = dev->id;
|
|
|
- unsigned int xfer_mask;
|
|
|
+ unsigned long xfer_mask;
|
|
|
char revbuf[7]; /* XYZ-99\0 */
|
|
|
char fwrevbuf[ATA_ID_FW_REV_LEN+1];
|
|
|
char modelbuf[ATA_ID_PROD_LEN+1];
|
|
@@ -2161,8 +2121,14 @@ int ata_dev_configure(struct ata_device *dev)
|
|
|
"supports DRM functions and may "
|
|
|
"not be fully accessable.\n");
|
|
|
snprintf(revbuf, 7, "CFA");
|
|
|
- } else
|
|
|
+ } else {
|
|
|
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
|
|
|
+ /* Warn the user if the device has TPM extensions */
|
|
|
+ if (ata_id_has_tpm(id))
|
|
|
+ ata_dev_printk(dev, KERN_WARNING,
|
|
|
+ "supports DRM functions and may "
|
|
|
+ "not be fully accessable.\n");
|
|
|
+ }
|
|
|
|
|
|
dev->n_sectors = ata_id_n_sectors(id);
|
|
|
|
|
@@ -2295,19 +2261,8 @@ int ata_dev_configure(struct ata_device *dev)
|
|
|
dev->flags |= ATA_DFLAG_DIPM;
|
|
|
}
|
|
|
|
|
|
- if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
|
|
|
- /* Let the user know. We don't want to disallow opens for
|
|
|
- rescue purposes, or in case the vendor is just a blithering
|
|
|
- idiot */
|
|
|
- if (print_info) {
|
|
|
- ata_dev_printk(dev, KERN_WARNING,
|
|
|
-"Drive reports diagnostics failure. This may indicate a drive\n");
|
|
|
- ata_dev_printk(dev, KERN_WARNING,
|
|
|
-"fault or invalid emulation. Contact drive vendor for information.\n");
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* limit bridge transfers to udma5, 200 sectors */
|
|
|
+ /* Limit PATA drive on SATA cable bridge transfers to udma5,
|
|
|
+ 200 sectors */
|
|
|
if (ata_dev_knobble(dev)) {
|
|
|
if (ata_msg_drv(ap) && print_info)
|
|
|
ata_dev_printk(dev, KERN_INFO,
|
|
@@ -2336,6 +2291,21 @@ int ata_dev_configure(struct ata_device *dev)
|
|
|
if (ap->ops->dev_config)
|
|
|
ap->ops->dev_config(dev);
|
|
|
|
|
|
+ if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
|
|
|
+ /* Let the user know. We don't want to disallow opens for
|
|
|
+ rescue purposes, or in case the vendor is just a blithering
|
|
|
+ idiot. Do this after the dev_config call as some controllers
|
|
|
+ with buggy firmware may want to avoid reporting false device
|
|
|
+ bugs */
|
|
|
+
|
|
|
+ if (print_info) {
|
|
|
+ ata_dev_printk(dev, KERN_WARNING,
|
|
|
+"Drive reports diagnostics failure. This may indicate a drive\n");
|
|
|
+ ata_dev_printk(dev, KERN_WARNING,
|
|
|
+"fault or invalid emulation. Contact drive vendor for information.\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
if (ata_msg_probe(ap))
|
|
|
ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
|
|
|
__FUNCTION__, ata_chk_status(ap));
|
|
@@ -2386,6 +2356,18 @@ int ata_cable_unknown(struct ata_port *ap)
|
|
|
return ATA_CBL_PATA_UNK;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * ata_cable_ignore - return ignored PATA cable.
|
|
|
+ * @ap: port
|
|
|
+ *
|
|
|
+ * Helper method for drivers which don't use cable type to limit
|
|
|
+ * transfer mode.
|
|
|
+ */
|
|
|
+int ata_cable_ignore(struct ata_port *ap)
|
|
|
+{
|
|
|
+ return ATA_CBL_PATA_IGN;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ata_cable_sata - return SATA cable type
|
|
|
* @ap: port
|
|
@@ -2781,38 +2763,33 @@ int sata_set_spd(struct ata_link *link)
|
|
|
*/
|
|
|
|
|
|
static const struct ata_timing ata_timing[] = {
|
|
|
+/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
|
|
|
+ { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
|
|
|
+ { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
|
|
|
+ { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
|
|
|
+ { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
|
|
|
+ { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
|
|
|
+ { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
|
|
|
+ { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
|
|
|
|
|
|
- { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
|
|
|
- { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
|
|
|
- { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
|
|
|
- { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
|
|
|
+ { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
|
|
|
+ { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
|
|
|
+ { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
|
|
|
|
|
|
- { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
|
|
|
+ { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
|
|
|
+ { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
|
|
|
+ { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
|
|
|
{ XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
|
|
|
- { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
|
|
|
- { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
|
|
|
- { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
|
|
|
+ { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
|
|
|
|
|
|
/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
|
|
|
-
|
|
|
- { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
|
|
|
- { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
|
|
|
- { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
|
|
|
-
|
|
|
- { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
|
|
|
- { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
|
|
|
- { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
|
|
|
-
|
|
|
- { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
|
|
|
- { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
|
|
|
- { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
|
|
|
- { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
|
|
|
-
|
|
|
- { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
|
|
|
- { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
|
|
|
- { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
|
|
|
-
|
|
|
-/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
|
|
|
+ { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
|
|
|
+ { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
|
|
|
+ { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
|
|
|
+ { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
|
|
|
+ { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
|
|
|
+ { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
|
|
|
+ { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
|
|
|
|
|
|
{ 0xFF }
|
|
|
};
|
|
@@ -2845,14 +2822,16 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
|
|
|
if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
|
|
|
}
|
|
|
|
|
|
-static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
|
|
|
+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
|
|
|
{
|
|
|
- const struct ata_timing *t;
|
|
|
+ const struct ata_timing *t = ata_timing;
|
|
|
+
|
|
|
+ while (xfer_mode > t->mode)
|
|
|
+ t++;
|
|
|
|
|
|
- for (t = ata_timing; t->mode != speed; t++)
|
|
|
- if (t->mode == 0xFF)
|
|
|
- return NULL;
|
|
|
- return t;
|
|
|
+ if (xfer_mode == t->mode)
|
|
|
+ return t;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
int ata_timing_compute(struct ata_device *adev, unsigned short speed,
|
|
@@ -2926,6 +2905,57 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
|
|
|
+ * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
|
|
|
+ * @cycle: cycle duration in ns
|
|
|
+ *
|
|
|
+ * Return matching xfer mode for @cycle. The returned mode is of
|
|
|
+ * the transfer type specified by @xfer_shift. If @cycle is too
|
|
|
+ * slow for @xfer_shift, 0xff is returned. If @cycle is faster
|
|
|
+ * than the fastest known mode, the fasted mode is returned.
|
|
|
+ *
|
|
|
+ * LOCKING:
|
|
|
+ * None.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Matching xfer_mode, 0xff if no match found.
|
|
|
+ */
|
|
|
+u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
|
|
|
+{
|
|
|
+ u8 base_mode = 0xff, last_mode = 0xff;
|
|
|
+ const struct ata_xfer_ent *ent;
|
|
|
+ const struct ata_timing *t;
|
|
|
+
|
|
|
+ for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
|
|
|
+ if (ent->shift == xfer_shift)
|
|
|
+ base_mode = ent->base;
|
|
|
+
|
|
|
+ for (t = ata_timing_find_mode(base_mode);
|
|
|
+ t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
|
|
|
+ unsigned short this_cycle;
|
|
|
+
|
|
|
+ switch (xfer_shift) {
|
|
|
+ case ATA_SHIFT_PIO:
|
|
|
+ case ATA_SHIFT_MWDMA:
|
|
|
+ this_cycle = t->cycle;
|
|
|
+ break;
|
|
|
+ case ATA_SHIFT_UDMA:
|
|
|
+ this_cycle = t->udma;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return 0xff;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cycle > this_cycle)
|
|
|
+ break;
|
|
|
+
|
|
|
+ last_mode = t->mode;
|
|
|
+ }
|
|
|
+
|
|
|
+ return last_mode;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ata_down_xfermask_limit - adjust dev xfer masks downward
|
|
|
* @dev: Device to adjust xfer masks
|
|
@@ -2944,8 +2974,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
|
|
|
int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
|
|
|
{
|
|
|
char buf[32];
|
|
|
- unsigned int orig_mask, xfer_mask;
|
|
|
- unsigned int pio_mask, mwdma_mask, udma_mask;
|
|
|
+ unsigned long orig_mask, xfer_mask;
|
|
|
+ unsigned long pio_mask, mwdma_mask, udma_mask;
|
|
|
int quiet, highbit;
|
|
|
|
|
|
quiet = !!(sel & ATA_DNXFER_QUIET);
|
|
@@ -3039,7 +3069,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
|
|
|
|
|
|
/* Early MWDMA devices do DMA but don't allow DMA mode setting.
|
|
|
Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
|
|
|
- if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
|
|
|
+ if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
|
|
|
dev->dma_mode == XFER_MW_DMA_0 &&
|
|
|
(dev->id[63] >> 8) & 1)
|
|
|
err_mask &= ~AC_ERR_DEV;
|
|
@@ -3089,7 +3119,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
|
|
|
|
|
|
/* step 1: calculate xfer_mask */
|
|
|
ata_link_for_each_dev(dev, link) {
|
|
|
- unsigned int pio_mask, dma_mask;
|
|
|
+ unsigned long pio_mask, dma_mask;
|
|
|
unsigned int mode_mask;
|
|
|
|
|
|
if (!ata_dev_enabled(dev))
|
|
@@ -3115,7 +3145,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
|
|
|
dev->dma_mode = ata_xfer_mask2mode(dma_mask);
|
|
|
|
|
|
found = 1;
|
|
|
- if (dev->dma_mode)
|
|
|
+ if (dev->dma_mode != 0xff)
|
|
|
used_dma = 1;
|
|
|
}
|
|
|
if (!found)
|
|
@@ -3126,7 +3156,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
|
|
|
if (!ata_dev_enabled(dev))
|
|
|
continue;
|
|
|
|
|
|
- if (!dev->pio_mode) {
|
|
|
+ if (dev->pio_mode == 0xff) {
|
|
|
ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
|
|
|
rc = -EINVAL;
|
|
|
goto out;
|
|
@@ -3140,7 +3170,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
|
|
|
|
|
|
/* step 3: set host DMA timings */
|
|
|
ata_link_for_each_dev(dev, link) {
|
|
|
- if (!ata_dev_enabled(dev) || !dev->dma_mode)
|
|
|
+ if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
|
|
|
continue;
|
|
|
|
|
|
dev->xfer_mode = dev->dma_mode;
|
|
@@ -3172,31 +3202,6 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
|
|
|
- * @link: link on which timings will be programmed
|
|
|
- * @r_failed_dev: out paramter for failed device
|
|
|
- *
|
|
|
- * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
|
|
|
- * ata_set_mode() fails, pointer to the failing device is
|
|
|
- * returned in @r_failed_dev.
|
|
|
- *
|
|
|
- * LOCKING:
|
|
|
- * PCI/etc. bus probe sem.
|
|
|
- *
|
|
|
- * RETURNS:
|
|
|
- * 0 on success, negative errno otherwise
|
|
|
- */
|
|
|
-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
|
|
|
-{
|
|
|
- struct ata_port *ap = link->ap;
|
|
|
-
|
|
|
- /* has private set_mode? */
|
|
|
- if (ap->ops->set_mode)
|
|
|
- return ap->ops->set_mode(link, r_failed_dev);
|
|
|
- return ata_do_set_mode(link, r_failed_dev);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ata_tf_to_host - issue ATA taskfile to host controller
|
|
|
* @ap: port to which command is being issued
|
|
@@ -4363,7 +4368,14 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
|
|
tf.feature = SETFEATURES_XFER;
|
|
|
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
|
|
|
tf.protocol = ATA_PROT_NODATA;
|
|
|
- tf.nsect = dev->xfer_mode;
|
|
|
+ /* If we are using IORDY we must send the mode setting command */
|
|
|
+ if (ata_pio_need_iordy(dev))
|
|
|
+ tf.nsect = dev->xfer_mode;
|
|
|
+ /* If the device has IORDY and the controller does not - turn it off */
|
|
|
+ else if (ata_id_has_iordy(dev->id))
|
|
|
+ tf.nsect = 0x01;
|
|
|
+ else /* In the ancient relic department - skip all of this */
|
|
|
+ return 0;
|
|
|
|
|
|
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
|
|
|
|
@@ -4462,17 +4474,13 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
|
|
|
void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
- struct scatterlist *sg = qc->__sg;
|
|
|
+ struct scatterlist *sg = qc->sg;
|
|
|
int dir = qc->dma_dir;
|
|
|
void *pad_buf = NULL;
|
|
|
|
|
|
- WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
|
|
|
WARN_ON(sg == NULL);
|
|
|
|
|
|
- if (qc->flags & ATA_QCFLAG_SINGLE)
|
|
|
- WARN_ON(qc->n_elem > 1);
|
|
|
-
|
|
|
- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
|
|
|
+ VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
|
|
|
|
|
|
/* if we padded the buffer out to 32-bit bound, and data
|
|
|
* xfer direction is from-device, we must copy from the
|
|
@@ -4481,31 +4489,20 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
|
|
|
if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
|
|
|
pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
|
|
|
- if (qc->flags & ATA_QCFLAG_SG) {
|
|
|
- if (qc->n_elem)
|
|
|
- dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
|
|
|
- /* restore last sg */
|
|
|
- sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
|
|
|
- if (pad_buf) {
|
|
|
- struct scatterlist *psg = &qc->pad_sgent;
|
|
|
- void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
|
|
|
- memcpy(addr + psg->offset, pad_buf, qc->pad_len);
|
|
|
- kunmap_atomic(addr, KM_IRQ0);
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (qc->n_elem)
|
|
|
- dma_unmap_single(ap->dev,
|
|
|
- sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
|
|
|
- dir);
|
|
|
- /* restore sg */
|
|
|
- sg->length += qc->pad_len;
|
|
|
- if (pad_buf)
|
|
|
- memcpy(qc->buf_virt + sg->length - qc->pad_len,
|
|
|
- pad_buf, qc->pad_len);
|
|
|
+ if (qc->mapped_n_elem)
|
|
|
+ dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
|
|
|
+ /* restore last sg */
|
|
|
+ if (qc->last_sg)
|
|
|
+ *qc->last_sg = qc->saved_last_sg;
|
|
|
+ if (pad_buf) {
|
|
|
+ struct scatterlist *psg = &qc->extra_sg[1];
|
|
|
+ void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
|
|
|
+ memcpy(addr + psg->offset, pad_buf, qc->pad_len);
|
|
|
+ kunmap_atomic(addr, KM_IRQ0);
|
|
|
}
|
|
|
|
|
|
qc->flags &= ~ATA_QCFLAG_DMAMAP;
|
|
|
- qc->__sg = NULL;
|
|
|
+ qc->sg = NULL;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4523,13 +4520,10 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
struct scatterlist *sg;
|
|
|
- unsigned int idx;
|
|
|
+ unsigned int si, pi;
|
|
|
|
|
|
- WARN_ON(qc->__sg == NULL);
|
|
|
- WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
|
|
|
-
|
|
|
- idx = 0;
|
|
|
- ata_for_each_sg(sg, qc) {
|
|
|
+ pi = 0;
|
|
|
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
|
|
|
u32 addr, offset;
|
|
|
u32 sg_len, len;
|
|
|
|
|
@@ -4546,18 +4540,17 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
|
|
|
if ((offset + sg_len) > 0x10000)
|
|
|
len = 0x10000 - offset;
|
|
|
|
|
|
- ap->prd[idx].addr = cpu_to_le32(addr);
|
|
|
- ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
|
|
|
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
|
|
|
+ ap->prd[pi].addr = cpu_to_le32(addr);
|
|
|
+ ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
|
|
|
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
|
|
|
|
|
|
- idx++;
|
|
|
+ pi++;
|
|
|
sg_len -= len;
|
|
|
addr += len;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (idx)
|
|
|
- ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
|
|
+ ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4577,13 +4570,10 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
struct scatterlist *sg;
|
|
|
- unsigned int idx;
|
|
|
-
|
|
|
- WARN_ON(qc->__sg == NULL);
|
|
|
- WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
|
|
|
+ unsigned int si, pi;
|
|
|
|
|
|
- idx = 0;
|
|
|
- ata_for_each_sg(sg, qc) {
|
|
|
+ pi = 0;
|
|
|
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
|
|
|
u32 addr, offset;
|
|
|
u32 sg_len, len, blen;
|
|
|
|
|
@@ -4601,25 +4591,24 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
|
|
|
len = 0x10000 - offset;
|
|
|
|
|
|
blen = len & 0xffff;
|
|
|
- ap->prd[idx].addr = cpu_to_le32(addr);
|
|
|
+ ap->prd[pi].addr = cpu_to_le32(addr);
|
|
|
if (blen == 0) {
|
|
|
/* Some PATA chipsets like the CS5530 can't
|
|
|
cope with 0x0000 meaning 64K as the spec says */
|
|
|
- ap->prd[idx].flags_len = cpu_to_le32(0x8000);
|
|
|
+ ap->prd[pi].flags_len = cpu_to_le32(0x8000);
|
|
|
blen = 0x8000;
|
|
|
- ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
|
|
|
+ ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
|
|
|
}
|
|
|
- ap->prd[idx].flags_len = cpu_to_le32(blen);
|
|
|
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
|
|
|
+ ap->prd[pi].flags_len = cpu_to_le32(blen);
|
|
|
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
|
|
|
|
|
|
- idx++;
|
|
|
+ pi++;
|
|
|
sg_len -= len;
|
|
|
addr += len;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (idx)
|
|
|
- ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
|
|
+ ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4669,8 +4658,8 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
|
|
|
*/
|
|
|
static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
- if (qc->tf.protocol != ATA_PROT_ATAPI &&
|
|
|
- qc->tf.protocol != ATA_PROT_ATAPI_DMA)
|
|
|
+ if (qc->tf.protocol != ATAPI_PROT_PIO &&
|
|
|
+ qc->tf.protocol != ATAPI_PROT_DMA)
|
|
|
return 0;
|
|
|
|
|
|
if (qc->tf.flags & ATA_TFLAG_WRITE)
|
|
@@ -4755,33 +4744,6 @@ void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
|
|
|
|
|
|
void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
|
|
|
|
|
|
-/**
|
|
|
- * ata_sg_init_one - Associate command with memory buffer
|
|
|
- * @qc: Command to be associated
|
|
|
- * @buf: Memory buffer
|
|
|
- * @buflen: Length of memory buffer, in bytes.
|
|
|
- *
|
|
|
- * Initialize the data-related elements of queued_cmd @qc
|
|
|
- * to point to a single memory buffer, @buf of byte length @buflen.
|
|
|
- *
|
|
|
- * LOCKING:
|
|
|
- * spin_lock_irqsave(host lock)
|
|
|
- */
|
|
|
-
|
|
|
-void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
|
|
-{
|
|
|
- qc->flags |= ATA_QCFLAG_SINGLE;
|
|
|
-
|
|
|
- qc->__sg = &qc->sgent;
|
|
|
- qc->n_elem = 1;
|
|
|
- qc->orig_n_elem = 1;
|
|
|
- qc->buf_virt = buf;
|
|
|
- qc->nbytes = buflen;
|
|
|
- qc->cursg = qc->__sg;
|
|
|
-
|
|
|
- sg_init_one(&qc->sgent, buf, buflen);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ata_sg_init - Associate command with scatter-gather table.
|
|
|
* @qc: Command to be associated
|
|
@@ -4795,84 +4757,103 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
|
|
|
* LOCKING:
|
|
|
* spin_lock_irqsave(host lock)
|
|
|
*/
|
|
|
-
|
|
|
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
|
|
|
unsigned int n_elem)
|
|
|
{
|
|
|
- qc->flags |= ATA_QCFLAG_SG;
|
|
|
- qc->__sg = sg;
|
|
|
+ qc->sg = sg;
|
|
|
qc->n_elem = n_elem;
|
|
|
- qc->orig_n_elem = n_elem;
|
|
|
- qc->cursg = qc->__sg;
|
|
|
+ qc->cursg = qc->sg;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
|
|
|
- * @qc: Command with memory buffer to be mapped.
|
|
|
- *
|
|
|
- * DMA-map the memory buffer associated with queued_cmd @qc.
|
|
|
- *
|
|
|
- * LOCKING:
|
|
|
- * spin_lock_irqsave(host lock)
|
|
|
- *
|
|
|
- * RETURNS:
|
|
|
- * Zero on success, negative on error.
|
|
|
- */
|
|
|
-
|
|
|
-static int ata_sg_setup_one(struct ata_queued_cmd *qc)
|
|
|
+static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
|
|
|
+ unsigned int *n_elem_extra,
|
|
|
+ unsigned int *nbytes_extra)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
- int dir = qc->dma_dir;
|
|
|
- struct scatterlist *sg = qc->__sg;
|
|
|
- dma_addr_t dma_address;
|
|
|
- int trim_sg = 0;
|
|
|
+ unsigned int n_elem = qc->n_elem;
|
|
|
+ struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
|
|
|
+
|
|
|
+ *n_elem_extra = 0;
|
|
|
+ *nbytes_extra = 0;
|
|
|
+
|
|
|
+ /* needs padding? */
|
|
|
+ qc->pad_len = qc->nbytes & 3;
|
|
|
+
|
|
|
+ if (likely(!qc->pad_len))
|
|
|
+ return n_elem;
|
|
|
+
|
|
|
+ /* locate last sg and save it */
|
|
|
+ lsg = sg_last(qc->sg, n_elem);
|
|
|
+ qc->last_sg = lsg;
|
|
|
+ qc->saved_last_sg = *lsg;
|
|
|
+
|
|
|
+ sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
|
|
|
|
|
|
- /* we must lengthen transfers to end on a 32-bit boundary */
|
|
|
- qc->pad_len = sg->length & 3;
|
|
|
if (qc->pad_len) {
|
|
|
+ struct scatterlist *psg = &qc->extra_sg[1];
|
|
|
void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
- struct scatterlist *psg = &qc->pad_sgent;
|
|
|
+ unsigned int offset;
|
|
|
|
|
|
WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
|
|
|
|
|
|
memset(pad_buf, 0, ATA_DMA_PAD_SZ);
|
|
|
|
|
|
- if (qc->tf.flags & ATA_TFLAG_WRITE)
|
|
|
- memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
|
|
|
- qc->pad_len);
|
|
|
+ /* psg->page/offset are used to copy to-be-written
|
|
|
+ * data in this function or read data in ata_sg_clean.
|
|
|
+ */
|
|
|
+ offset = lsg->offset + lsg->length - qc->pad_len;
|
|
|
+ sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
|
|
|
+ qc->pad_len, offset_in_page(offset));
|
|
|
+
|
|
|
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
|
|
|
+ void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
|
|
|
+ memcpy(pad_buf, addr + psg->offset, qc->pad_len);
|
|
|
+ kunmap_atomic(addr, KM_IRQ0);
|
|
|
+ }
|
|
|
|
|
|
sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
|
|
|
- /* trim sg */
|
|
|
- sg->length -= qc->pad_len;
|
|
|
- if (sg->length == 0)
|
|
|
- trim_sg = 1;
|
|
|
|
|
|
- DPRINTK("padding done, sg->length=%u pad_len=%u\n",
|
|
|
- sg->length, qc->pad_len);
|
|
|
- }
|
|
|
+ /* Trim the last sg entry and chain the original and
|
|
|
+ * padding sg lists.
|
|
|
+ *
|
|
|
+ * Because chaining consumes one sg entry, one extra
|
|
|
+ * sg entry is allocated and the last sg entry is
|
|
|
+ * copied to it if the length isn't zero after padded
|
|
|
+ * amount is removed.
|
|
|
+ *
|
|
|
+ * If the last sg entry is completely replaced by
|
|
|
+ * padding sg entry, the first sg entry is skipped
|
|
|
+ * while chaining.
|
|
|
+ */
|
|
|
+ lsg->length -= qc->pad_len;
|
|
|
+ if (lsg->length) {
|
|
|
+ copy_lsg = &qc->extra_sg[0];
|
|
|
+ tsg = &qc->extra_sg[0];
|
|
|
+ } else {
|
|
|
+ n_elem--;
|
|
|
+ tsg = &qc->extra_sg[1];
|
|
|
+ }
|
|
|
|
|
|
- if (trim_sg) {
|
|
|
- qc->n_elem--;
|
|
|
- goto skip_map;
|
|
|
- }
|
|
|
+ esg = &qc->extra_sg[1];
|
|
|
|
|
|
- dma_address = dma_map_single(ap->dev, qc->buf_virt,
|
|
|
- sg->length, dir);
|
|
|
- if (dma_mapping_error(dma_address)) {
|
|
|
- /* restore sg */
|
|
|
- sg->length += qc->pad_len;
|
|
|
- return -1;
|
|
|
+ (*n_elem_extra)++;
|
|
|
+ (*nbytes_extra) += 4 - qc->pad_len;
|
|
|
}
|
|
|
|
|
|
- sg_dma_address(sg) = dma_address;
|
|
|
- sg_dma_len(sg) = sg->length;
|
|
|
+ if (copy_lsg)
|
|
|
+ sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
|
|
|
|
|
|
-skip_map:
|
|
|
- DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
|
|
|
- qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
|
|
|
+ sg_chain(lsg, 1, tsg);
|
|
|
+ sg_mark_end(esg);
|
|
|
|
|
|
- return 0;
|
|
|
+ /* sglist can't start with chaining sg entry, fast forward */
|
|
|
+ if (qc->sg == lsg) {
|
|
|
+ qc->sg = tsg;
|
|
|
+ qc->cursg = tsg;
|
|
|
+ }
|
|
|
+
|
|
|
+ return n_elem;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4888,75 +4869,30 @@ skip_map:
|
|
|
* Zero on success, negative on error.
|
|
|
*
|
|
|
*/
|
|
|
-
|
|
|
static int ata_sg_setup(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
- struct scatterlist *sg = qc->__sg;
|
|
|
- struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
|
|
|
- int n_elem, pre_n_elem, dir, trim_sg = 0;
|
|
|
+ unsigned int n_elem, n_elem_extra, nbytes_extra;
|
|
|
|
|
|
VPRINTK("ENTER, ata%u\n", ap->print_id);
|
|
|
- WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
|
|
|
|
|
|
- /* we must lengthen transfers to end on a 32-bit boundary */
|
|
|
- qc->pad_len = lsg->length & 3;
|
|
|
- if (qc->pad_len) {
|
|
|
- void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
- struct scatterlist *psg = &qc->pad_sgent;
|
|
|
- unsigned int offset;
|
|
|
-
|
|
|
- WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
|
|
|
+ n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
|
|
|
|
|
|
- memset(pad_buf, 0, ATA_DMA_PAD_SZ);
|
|
|
-
|
|
|
- /*
|
|
|
- * psg->page/offset are used to copy to-be-written
|
|
|
- * data in this function or read data in ata_sg_clean.
|
|
|
- */
|
|
|
- offset = lsg->offset + lsg->length - qc->pad_len;
|
|
|
- sg_init_table(psg, 1);
|
|
|
- sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
|
|
|
- qc->pad_len, offset_in_page(offset));
|
|
|
-
|
|
|
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
|
|
|
- void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
|
|
|
- memcpy(pad_buf, addr + psg->offset, qc->pad_len);
|
|
|
- kunmap_atomic(addr, KM_IRQ0);
|
|
|
+ if (n_elem) {
|
|
|
+ n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
|
|
|
+ if (n_elem < 1) {
|
|
|
+ /* restore last sg */
|
|
|
+ if (qc->last_sg)
|
|
|
+ *qc->last_sg = qc->saved_last_sg;
|
|
|
+ return -1;
|
|
|
}
|
|
|
-
|
|
|
- sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
|
|
|
- sg_dma_len(psg) = ATA_DMA_PAD_SZ;
|
|
|
- /* trim last sg */
|
|
|
- lsg->length -= qc->pad_len;
|
|
|
- if (lsg->length == 0)
|
|
|
- trim_sg = 1;
|
|
|
-
|
|
|
- DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
|
|
|
- qc->n_elem - 1, lsg->length, qc->pad_len);
|
|
|
- }
|
|
|
-
|
|
|
- pre_n_elem = qc->n_elem;
|
|
|
- if (trim_sg && pre_n_elem)
|
|
|
- pre_n_elem--;
|
|
|
-
|
|
|
- if (!pre_n_elem) {
|
|
|
- n_elem = 0;
|
|
|
- goto skip_map;
|
|
|
- }
|
|
|
-
|
|
|
- dir = qc->dma_dir;
|
|
|
- n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
|
|
|
- if (n_elem < 1) {
|
|
|
- /* restore last sg */
|
|
|
- lsg->length += qc->pad_len;
|
|
|
- return -1;
|
|
|
+ DPRINTK("%d sg elements mapped\n", n_elem);
|
|
|
}
|
|
|
|
|
|
- DPRINTK("%d sg elements mapped\n", n_elem);
|
|
|
-
|
|
|
-skip_map:
|
|
|
- qc->n_elem = n_elem;
|
|
|
+ qc->n_elem = qc->mapped_n_elem = n_elem;
|
|
|
+ qc->n_elem += n_elem_extra;
|
|
|
+ qc->nbytes += nbytes_extra;
|
|
|
+ qc->flags |= ATA_QCFLAG_DMAMAP;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -4985,7 +4921,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
|
|
|
|
|
/**
|
|
|
* ata_data_xfer - Transfer data by PIO
|
|
|
- * @adev: device to target
|
|
|
+ * @dev: device to target
|
|
|
* @buf: data buffer
|
|
|
* @buflen: buffer length
|
|
|
* @write_data: read/write
|
|
@@ -4994,37 +4930,44 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* Inherited from caller.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Bytes consumed.
|
|
|
*/
|
|
|
-void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
|
|
|
- unsigned int buflen, int write_data)
|
|
|
+unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
|
|
|
+ unsigned int buflen, int rw)
|
|
|
{
|
|
|
- struct ata_port *ap = adev->link->ap;
|
|
|
+ struct ata_port *ap = dev->link->ap;
|
|
|
+ void __iomem *data_addr = ap->ioaddr.data_addr;
|
|
|
unsigned int words = buflen >> 1;
|
|
|
|
|
|
/* Transfer multiple of 2 bytes */
|
|
|
- if (write_data)
|
|
|
- iowrite16_rep(ap->ioaddr.data_addr, buf, words);
|
|
|
+ if (rw == READ)
|
|
|
+ ioread16_rep(data_addr, buf, words);
|
|
|
else
|
|
|
- ioread16_rep(ap->ioaddr.data_addr, buf, words);
|
|
|
+ iowrite16_rep(data_addr, buf, words);
|
|
|
|
|
|
/* Transfer trailing 1 byte, if any. */
|
|
|
if (unlikely(buflen & 0x01)) {
|
|
|
- u16 align_buf[1] = { 0 };
|
|
|
+ __le16 align_buf[1] = { 0 };
|
|
|
unsigned char *trailing_buf = buf + buflen - 1;
|
|
|
|
|
|
- if (write_data) {
|
|
|
- memcpy(align_buf, trailing_buf, 1);
|
|
|
- iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
|
|
|
- } else {
|
|
|
- align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
|
|
|
+ if (rw == READ) {
|
|
|
+ align_buf[0] = cpu_to_le16(ioread16(data_addr));
|
|
|
memcpy(trailing_buf, align_buf, 1);
|
|
|
+ } else {
|
|
|
+ memcpy(align_buf, trailing_buf, 1);
|
|
|
+ iowrite16(le16_to_cpu(align_buf[0]), data_addr);
|
|
|
}
|
|
|
+ words++;
|
|
|
}
|
|
|
+
|
|
|
+ return words << 1;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ata_data_xfer_noirq - Transfer data by PIO
|
|
|
- * @adev: device to target
|
|
|
+ * @dev: device to target
|
|
|
* @buf: data buffer
|
|
|
* @buflen: buffer length
|
|
|
* @write_data: read/write
|
|
@@ -5034,14 +4977,21 @@ void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
|
|
|
*
|
|
|
* LOCKING:
|
|
|
* Inherited from caller.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * Bytes consumed.
|
|
|
*/
|
|
|
-void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
|
|
- unsigned int buflen, int write_data)
|
|
|
+unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
|
|
|
+ unsigned int buflen, int rw)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+ unsigned int consumed;
|
|
|
+
|
|
|
local_irq_save(flags);
|
|
|
- ata_data_xfer(adev, buf, buflen, write_data);
|
|
|
+ consumed = ata_data_xfer(dev, buf, buflen, rw);
|
|
|
local_irq_restore(flags);
|
|
|
+
|
|
|
+ return consumed;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -5152,13 +5102,13 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
|
|
|
ata_altstatus(ap); /* flush */
|
|
|
|
|
|
switch (qc->tf.protocol) {
|
|
|
- case ATA_PROT_ATAPI:
|
|
|
+ case ATAPI_PROT_PIO:
|
|
|
ap->hsm_task_state = HSM_ST;
|
|
|
break;
|
|
|
- case ATA_PROT_ATAPI_NODATA:
|
|
|
+ case ATAPI_PROT_NODATA:
|
|
|
ap->hsm_task_state = HSM_ST_LAST;
|
|
|
break;
|
|
|
- case ATA_PROT_ATAPI_DMA:
|
|
|
+ case ATAPI_PROT_DMA:
|
|
|
ap->hsm_task_state = HSM_ST_LAST;
|
|
|
/* initiate bmdma */
|
|
|
ap->ops->bmdma_start(qc);
|
|
@@ -5300,12 +5250,15 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
|
|
|
bytes = (bc_hi << 8) | bc_lo;
|
|
|
|
|
|
/* shall be cleared to zero, indicating xfer of data */
|
|
|
- if (ireason & (1 << 0))
|
|
|
+ if (unlikely(ireason & (1 << 0)))
|
|
|
goto err_out;
|
|
|
|
|
|
/* make sure transfer direction matches expected */
|
|
|
i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
|
|
|
- if (do_write != i_write)
|
|
|
+ if (unlikely(do_write != i_write))
|
|
|
+ goto err_out;
|
|
|
+
|
|
|
+ if (unlikely(!bytes))
|
|
|
goto err_out;
|
|
|
|
|
|
VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
|
|
@@ -5341,7 +5294,7 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *q
|
|
|
(qc->tf.flags & ATA_TFLAG_WRITE))
|
|
|
return 1;
|
|
|
|
|
|
- if (is_atapi_taskfile(&qc->tf) &&
|
|
|
+ if (ata_is_atapi(qc->tf.protocol) &&
|
|
|
!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
|
|
|
return 1;
|
|
|
}
|
|
@@ -5506,7 +5459,7 @@ fsm_start:
|
|
|
|
|
|
case HSM_ST:
|
|
|
/* complete command or read/write the data register */
|
|
|
- if (qc->tf.protocol == ATA_PROT_ATAPI) {
|
|
|
+ if (qc->tf.protocol == ATAPI_PROT_PIO) {
|
|
|
/* ATAPI PIO protocol */
|
|
|
if ((status & ATA_DRQ) == 0) {
|
|
|
/* No more data to transfer or device error.
|
|
@@ -5664,7 +5617,7 @@ fsm_start:
|
|
|
msleep(2);
|
|
|
status = ata_busy_wait(ap, ATA_BUSY, 10);
|
|
|
if (status & ATA_BUSY) {
|
|
|
- ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
|
|
|
+ ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
@@ -5805,6 +5758,22 @@ static void fill_result_tf(struct ata_queued_cmd *qc)
|
|
|
ap->ops->tf_read(ap, &qc->result_tf);
|
|
|
}
|
|
|
|
|
|
+static void ata_verify_xfer(struct ata_queued_cmd *qc)
|
|
|
+{
|
|
|
+ struct ata_device *dev = qc->dev;
|
|
|
+
|
|
|
+ if (ata_tag_internal(qc->tag))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (ata_is_nodata(qc->tf.protocol))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
|
|
|
+ return;
|
|
|
+
|
|
|
+ dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ata_qc_complete - Complete an active ATA command
|
|
|
* @qc: Command to complete
|
|
@@ -5876,6 +5845,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
|
|
|
+ ata_verify_xfer(qc);
|
|
|
+
|
|
|
__ata_qc_complete(qc);
|
|
|
} else {
|
|
|
if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
|
|
@@ -5938,30 +5910,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
|
|
|
return nr_done;
|
|
|
}
|
|
|
|
|
|
-static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
|
|
|
-{
|
|
|
- struct ata_port *ap = qc->ap;
|
|
|
-
|
|
|
- switch (qc->tf.protocol) {
|
|
|
- case ATA_PROT_NCQ:
|
|
|
- case ATA_PROT_DMA:
|
|
|
- case ATA_PROT_ATAPI_DMA:
|
|
|
- return 1;
|
|
|
-
|
|
|
- case ATA_PROT_ATAPI:
|
|
|
- case ATA_PROT_PIO:
|
|
|
- if (ap->flags & ATA_FLAG_PIO_DMA)
|
|
|
- return 1;
|
|
|
-
|
|
|
- /* fall through */
|
|
|
-
|
|
|
- default:
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* never reached */
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ata_qc_issue - issue taskfile to device
|
|
|
* @qc: command to issue to device
|
|
@@ -5978,6 +5926,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
{
|
|
|
struct ata_port *ap = qc->ap;
|
|
|
struct ata_link *link = qc->dev->link;
|
|
|
+ u8 prot = qc->tf.protocol;
|
|
|
|
|
|
/* Make sure only one non-NCQ command is outstanding. The
|
|
|
* check is skipped for old EH because it reuses active qc to
|
|
@@ -5985,7 +5934,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
*/
|
|
|
WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
|
|
|
|
|
|
- if (qc->tf.protocol == ATA_PROT_NCQ) {
|
|
|
+ if (ata_is_ncq(prot)) {
|
|
|
WARN_ON(link->sactive & (1 << qc->tag));
|
|
|
|
|
|
if (!link->sactive)
|
|
@@ -6001,17 +5950,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
qc->flags |= ATA_QCFLAG_ACTIVE;
|
|
|
ap->qc_active |= 1 << qc->tag;
|
|
|
|
|
|
- if (ata_should_dma_map(qc)) {
|
|
|
- if (qc->flags & ATA_QCFLAG_SG) {
|
|
|
- if (ata_sg_setup(qc))
|
|
|
- goto sg_err;
|
|
|
- } else if (qc->flags & ATA_QCFLAG_SINGLE) {
|
|
|
- if (ata_sg_setup_one(qc))
|
|
|
- goto sg_err;
|
|
|
- }
|
|
|
- } else {
|
|
|
- qc->flags &= ~ATA_QCFLAG_DMAMAP;
|
|
|
- }
|
|
|
+ /* We guarantee to LLDs that they will have at least one
|
|
|
+ * non-zero sg if the command is a data command.
|
|
|
+ */
|
|
|
+ BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
|
|
|
+
|
|
|
+ /* ata_sg_setup() may update nbytes */
|
|
|
+ qc->raw_nbytes = qc->nbytes;
|
|
|
+
|
|
|
+ if (ata_is_dma(prot) || (ata_is_pio(prot) &&
|
|
|
+ (ap->flags & ATA_FLAG_PIO_DMA)))
|
|
|
+ if (ata_sg_setup(qc))
|
|
|
+ goto sg_err;
|
|
|
|
|
|
/* if device is sleeping, schedule softreset and abort the link */
|
|
|
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
|
|
@@ -6029,7 +5979,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
|
|
return;
|
|
|
|
|
|
sg_err:
|
|
|
- qc->flags &= ~ATA_QCFLAG_DMAMAP;
|
|
|
qc->err_mask |= AC_ERR_SYSTEM;
|
|
|
err:
|
|
|
ata_qc_complete(qc);
|
|
@@ -6064,11 +6013,11 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
switch (qc->tf.protocol) {
|
|
|
case ATA_PROT_PIO:
|
|
|
case ATA_PROT_NODATA:
|
|
|
- case ATA_PROT_ATAPI:
|
|
|
- case ATA_PROT_ATAPI_NODATA:
|
|
|
+ case ATAPI_PROT_PIO:
|
|
|
+ case ATAPI_PROT_NODATA:
|
|
|
qc->tf.flags |= ATA_TFLAG_POLLING;
|
|
|
break;
|
|
|
- case ATA_PROT_ATAPI_DMA:
|
|
|
+ case ATAPI_PROT_DMA:
|
|
|
if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
|
|
|
/* see ata_dma_blacklisted() */
|
|
|
BUG();
|
|
@@ -6091,7 +6040,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
ap->hsm_task_state = HSM_ST_LAST;
|
|
|
|
|
|
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
|
|
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
|
|
|
+ ata_pio_queue_task(ap, qc, 0);
|
|
|
|
|
|
break;
|
|
|
|
|
@@ -6113,7 +6062,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
if (qc->tf.flags & ATA_TFLAG_WRITE) {
|
|
|
/* PIO data out protocol */
|
|
|
ap->hsm_task_state = HSM_ST_FIRST;
|
|
|
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
|
|
|
+ ata_pio_queue_task(ap, qc, 0);
|
|
|
|
|
|
/* always send first data block using
|
|
|
* the ata_pio_task() codepath.
|
|
@@ -6123,7 +6072,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
ap->hsm_task_state = HSM_ST;
|
|
|
|
|
|
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
|
|
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
|
|
|
+ ata_pio_queue_task(ap, qc, 0);
|
|
|
|
|
|
/* if polling, ata_pio_task() handles the rest.
|
|
|
* otherwise, interrupt handler takes over from here.
|
|
@@ -6132,8 +6081,8 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
|
|
|
break;
|
|
|
|
|
|
- case ATA_PROT_ATAPI:
|
|
|
- case ATA_PROT_ATAPI_NODATA:
|
|
|
+ case ATAPI_PROT_PIO:
|
|
|
+ case ATAPI_PROT_NODATA:
|
|
|
if (qc->tf.flags & ATA_TFLAG_POLLING)
|
|
|
ata_qc_set_polling(qc);
|
|
|
|
|
@@ -6144,10 +6093,10 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
/* send cdb by polling if no cdb interrupt */
|
|
|
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
|
|
|
(qc->tf.flags & ATA_TFLAG_POLLING))
|
|
|
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
|
|
|
+ ata_pio_queue_task(ap, qc, 0);
|
|
|
break;
|
|
|
|
|
|
- case ATA_PROT_ATAPI_DMA:
|
|
|
+ case ATAPI_PROT_DMA:
|
|
|
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
|
|
|
|
|
|
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
|
|
@@ -6156,7 +6105,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
|
|
|
|
|
|
/* send cdb by polling if no cdb interrupt */
|
|
|
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
|
|
|
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
|
|
|
+ ata_pio_queue_task(ap, qc, 0);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
@@ -6200,15 +6149,15 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
|
|
|
*/
|
|
|
|
|
|
/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
|
|
|
- * The flag was turned on only for atapi devices.
|
|
|
- * No need to check is_atapi_taskfile(&qc->tf) again.
|
|
|
+ * The flag was turned on only for atapi devices. No
|
|
|
+ * need to check ata_is_atapi(qc->tf.protocol) again.
|
|
|
*/
|
|
|
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
|
|
|
goto idle_irq;
|
|
|
break;
|
|
|
case HSM_ST_LAST:
|
|
|
if (qc->tf.protocol == ATA_PROT_DMA ||
|
|
|
- qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
|
|
|
+ qc->tf.protocol == ATAPI_PROT_DMA) {
|
|
|
/* check status of DMA engine */
|
|
|
host_stat = ap->ops->bmdma_status(ap);
|
|
|
VPRINTK("ata%u: host_stat 0x%X\n",
|
|
@@ -6250,7 +6199,7 @@ inline unsigned int ata_host_intr(struct ata_port *ap,
|
|
|
ata_hsm_move(ap, qc, status, 0);
|
|
|
|
|
|
if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
|
|
|
- qc->tf.protocol == ATA_PROT_ATAPI_DMA))
|
|
|
+ qc->tf.protocol == ATAPI_PROT_DMA))
|
|
|
ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
|
|
|
|
|
|
return 1; /* irq handled */
|
|
@@ -6772,7 +6721,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
|
|
|
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
|
|
|
#endif
|
|
|
|
|
|
- INIT_DELAYED_WORK(&ap->port_task, NULL);
|
|
|
+ INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
|
|
|
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
|
|
|
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
|
|
|
INIT_LIST_HEAD(&ap->eh_done_q);
|
|
@@ -7589,7 +7538,6 @@ EXPORT_SYMBOL_GPL(ata_host_register);
|
|
|
EXPORT_SYMBOL_GPL(ata_host_activate);
|
|
|
EXPORT_SYMBOL_GPL(ata_host_detach);
|
|
|
EXPORT_SYMBOL_GPL(ata_sg_init);
|
|
|
-EXPORT_SYMBOL_GPL(ata_sg_init_one);
|
|
|
EXPORT_SYMBOL_GPL(ata_hsm_move);
|
|
|
EXPORT_SYMBOL_GPL(ata_qc_complete);
|
|
|
EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
|
|
@@ -7601,6 +7549,13 @@ EXPORT_SYMBOL_GPL(ata_std_dev_select);
|
|
|
EXPORT_SYMBOL_GPL(sata_print_link_status);
|
|
|
EXPORT_SYMBOL_GPL(ata_tf_to_fis);
|
|
|
EXPORT_SYMBOL_GPL(ata_tf_from_fis);
|
|
|
+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
|
|
|
+EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
|
|
|
+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
|
|
|
+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
|
|
|
+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
|
|
|
+EXPORT_SYMBOL_GPL(ata_mode_string);
|
|
|
+EXPORT_SYMBOL_GPL(ata_id_xfermask);
|
|
|
EXPORT_SYMBOL_GPL(ata_check_status);
|
|
|
EXPORT_SYMBOL_GPL(ata_altstatus);
|
|
|
EXPORT_SYMBOL_GPL(ata_exec_command);
|
|
@@ -7643,7 +7598,6 @@ EXPORT_SYMBOL_GPL(ata_wait_register);
|
|
|
EXPORT_SYMBOL_GPL(ata_busy_sleep);
|
|
|
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
|
|
|
EXPORT_SYMBOL_GPL(ata_wait_ready);
|
|
|
-EXPORT_SYMBOL_GPL(ata_port_queue_task);
|
|
|
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
|
|
|
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
|
|
|
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
|
|
@@ -7662,18 +7616,20 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
|
|
|
#endif /* CONFIG_PM */
|
|
|
EXPORT_SYMBOL_GPL(ata_id_string);
|
|
|
EXPORT_SYMBOL_GPL(ata_id_c_string);
|
|
|
-EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
|
|
|
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
|
|
|
+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
|
|
|
EXPORT_SYMBOL_GPL(ata_timing_compute);
|
|
|
EXPORT_SYMBOL_GPL(ata_timing_merge);
|
|
|
+EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
|
|
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
EXPORT_SYMBOL_GPL(pci_test_config_bits);
|
|
|
EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
|
|
|
EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
|
|
|
EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
|
|
|
+EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
|
|
|
EXPORT_SYMBOL_GPL(ata_pci_init_one);
|
|
|
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
|
|
|
#ifdef CONFIG_PM
|
|
@@ -7715,4 +7671,5 @@ EXPORT_SYMBOL_GPL(ata_dev_try_classify);
|
|
|
EXPORT_SYMBOL_GPL(ata_cable_40wire);
|
|
|
EXPORT_SYMBOL_GPL(ata_cable_80wire);
|
|
|
EXPORT_SYMBOL_GPL(ata_cable_unknown);
|
|
|
+EXPORT_SYMBOL_GPL(ata_cable_ignore);
|
|
|
EXPORT_SYMBOL_GPL(ata_cable_sata);
|