|
@@ -361,7 +361,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
|
|
unsigned int count;
|
|
unsigned int count;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
|
|
|
|
|
|
+ if (host->dma_on) {
|
|
pr_err("PIO IRQ in DMA mode!\n");
|
|
pr_err("PIO IRQ in DMA mode!\n");
|
|
return;
|
|
return;
|
|
} else if (!data) {
|
|
} else if (!data) {
|
|
@@ -433,7 +433,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
|
|
*/
|
|
*/
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
|
if (data->flags & MMC_DATA_READ) {
|
|
- if (host->chan_rx && !host->force_pio)
|
|
|
|
|
|
+ if (host->dma_on)
|
|
tmio_mmc_check_bounce_buffer(host);
|
|
tmio_mmc_check_bounce_buffer(host);
|
|
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
|
|
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
|
|
host->mrq);
|
|
host->mrq);
|
|
@@ -470,7 +470,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
|
|
if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
|
|
if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
|
|
stat & TMIO_STAT_TXUNDERRUN)
|
|
stat & TMIO_STAT_TXUNDERRUN)
|
|
data->error = -EILSEQ;
|
|
data->error = -EILSEQ;
|
|
- if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
|
|
|
|
|
|
+ if (host->dma_on && (data->flags & MMC_DATA_WRITE)) {
|
|
u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
|
|
u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
|
|
bool done = false;
|
|
bool done = false;
|
|
|
|
|
|
@@ -494,7 +494,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
|
|
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
|
|
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
|
|
tmio_mmc_dataend_dma(host);
|
|
tmio_mmc_dataend_dma(host);
|
|
}
|
|
}
|
|
- } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
|
|
|
|
|
|
+ } else if (host->dma_on && (data->flags & MMC_DATA_READ)) {
|
|
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
|
|
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
|
|
tmio_mmc_dataend_dma(host);
|
|
tmio_mmc_dataend_dma(host);
|
|
} else {
|
|
} else {
|
|
@@ -547,7 +547,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
|
|
*/
|
|
*/
|
|
if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
|
|
if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
|
|
if (host->data->flags & MMC_DATA_READ) {
|
|
if (host->data->flags & MMC_DATA_READ) {
|
|
- if (host->force_pio || !host->chan_rx) {
|
|
|
|
|
|
+ if (!host->dma_on) {
|
|
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
|
|
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
|
|
} else {
|
|
} else {
|
|
tmio_mmc_disable_mmc_irqs(host,
|
|
tmio_mmc_disable_mmc_irqs(host,
|
|
@@ -555,7 +555,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
|
|
tasklet_schedule(&host->dma_issue);
|
|
tasklet_schedule(&host->dma_issue);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
- if (host->force_pio || !host->chan_tx) {
|
|
|
|
|
|
+ if (!host->dma_on) {
|
|
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
|
|
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
|
|
} else {
|
|
} else {
|
|
tmio_mmc_disable_mmc_irqs(host,
|
|
tmio_mmc_disable_mmc_irqs(host,
|
|
@@ -685,7 +685,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
|
|
|
|
|
|
tmio_mmc_init_sg(host, data);
|
|
tmio_mmc_init_sg(host, data);
|
|
host->data = data;
|
|
host->data = data;
|
|
- host->force_pio = false;
|
|
|
|
|
|
+ host->dma_on = false;
|
|
|
|
|
|
/* Set transfer length / blocksize */
|
|
/* Set transfer length / blocksize */
|
|
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
|
|
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
|