|
@@ -53,8 +53,6 @@ static void sdhci_finish_data(struct sdhci_host *);
|
|
|
static void sdhci_finish_command(struct sdhci_host *);
|
|
|
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
|
|
|
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
|
|
|
-static int sdhci_pre_dma_transfer(struct sdhci_host *host,
|
|
|
- struct mmc_data *data);
|
|
|
static int sdhci_do_get_cd(struct sdhci_host *host);
|
|
|
|
|
|
#ifdef CONFIG_PM
|
|
@@ -428,6 +426,31 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
|
|
|
DBG("PIO transfer complete.\n");
|
|
|
}
|
|
|
|
|
|
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
|
|
|
+ struct mmc_data *data, int cookie)
|
|
|
+{
|
|
|
+ int sg_count;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the data buffers are already mapped, return the previous
|
|
|
+ * dma_map_sg() result.
|
|
|
+ */
|
|
|
+ if (data->host_cookie == COOKIE_PRE_MAPPED)
|
|
|
+ return data->sg_count;
|
|
|
+
|
|
|
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
|
|
|
+ data->flags & MMC_DATA_WRITE ?
|
|
|
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
+
|
|
|
+ if (sg_count == 0)
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ data->sg_count = sg_count;
|
|
|
+ data->host_cookie = cookie;
|
|
|
+
|
|
|
+ return sg_count;
|
|
|
+}
|
|
|
+
|
|
|
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
|
|
|
{
|
|
|
local_irq_save(*flags);
|
|
@@ -462,41 +485,22 @@ static void sdhci_adma_mark_end(void *desc)
|
|
|
dma_desc->cmd |= cpu_to_le16(ADMA2_END);
|
|
|
}
|
|
|
|
|
|
-static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
- struct mmc_data *data)
|
|
|
+static void sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
+ struct mmc_data *data, int sg_count)
|
|
|
{
|
|
|
- int direction;
|
|
|
-
|
|
|
- void *desc;
|
|
|
- void *align;
|
|
|
- dma_addr_t addr;
|
|
|
- dma_addr_t align_addr;
|
|
|
- int len, offset;
|
|
|
-
|
|
|
struct scatterlist *sg;
|
|
|
- int i;
|
|
|
- char *buffer;
|
|
|
unsigned long flags;
|
|
|
+ dma_addr_t addr, align_addr;
|
|
|
+ void *desc, *align;
|
|
|
+ char *buffer;
|
|
|
+ int len, offset, i;
|
|
|
|
|
|
/*
|
|
|
* The spec does not specify endianness of descriptor table.
|
|
|
* We currently guess that it is LE.
|
|
|
*/
|
|
|
|
|
|
- if (data->flags & MMC_DATA_READ)
|
|
|
- direction = DMA_FROM_DEVICE;
|
|
|
- else
|
|
|
- direction = DMA_TO_DEVICE;
|
|
|
-
|
|
|
- host->align_addr = dma_map_single(mmc_dev(host->mmc),
|
|
|
- host->align_buffer, host->align_buffer_sz, direction);
|
|
|
- if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
|
|
|
- goto fail;
|
|
|
- BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
|
|
|
-
|
|
|
- host->sg_count = sdhci_pre_dma_transfer(host, data);
|
|
|
- if (host->sg_count < 0)
|
|
|
- goto unmap_align;
|
|
|
+ host->sg_count = sg_count;
|
|
|
|
|
|
desc = host->adma_table;
|
|
|
align = host->align_buffer;
|
|
@@ -508,10 +512,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
len = sg_dma_len(sg);
|
|
|
|
|
|
/*
|
|
|
- * The SDHCI specification states that ADMA
|
|
|
- * addresses must be 32-bit aligned. If they
|
|
|
- * aren't, then we use a bounce buffer for
|
|
|
- * the (up to three) bytes that screw up the
|
|
|
+ * The SDHCI specification states that ADMA addresses must
|
|
|
+ * be 32-bit aligned. If they aren't, then we use a bounce
|
|
|
+ * buffer for the (up to three) bytes that screw up the
|
|
|
* alignment.
|
|
|
*/
|
|
|
offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
|
|
@@ -555,92 +558,56 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
}
|
|
|
|
|
|
if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
|
|
|
- /*
|
|
|
- * Mark the last descriptor as the terminating descriptor
|
|
|
- */
|
|
|
+ /* Mark the last descriptor as the terminating descriptor */
|
|
|
if (desc != host->adma_table) {
|
|
|
desc -= host->desc_sz;
|
|
|
sdhci_adma_mark_end(desc);
|
|
|
}
|
|
|
} else {
|
|
|
- /*
|
|
|
- * Add a terminating entry.
|
|
|
- */
|
|
|
-
|
|
|
- /* nop, end, valid */
|
|
|
+ /* Add a terminating entry - nop, end, valid */
|
|
|
sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
|
|
|
}
|
|
|
-
|
|
|
- /*
|
|
|
- * Resync align buffer as we might have changed it.
|
|
|
- */
|
|
|
- if (data->flags & MMC_DATA_WRITE) {
|
|
|
- dma_sync_single_for_device(mmc_dev(host->mmc),
|
|
|
- host->align_addr, host->align_buffer_sz, direction);
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-
|
|
|
-unmap_align:
|
|
|
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
|
|
|
- host->align_buffer_sz, direction);
|
|
|
-fail:
|
|
|
- return -EINVAL;
|
|
|
}
|
|
|
|
|
|
static void sdhci_adma_table_post(struct sdhci_host *host,
|
|
|
struct mmc_data *data)
|
|
|
{
|
|
|
- int direction;
|
|
|
-
|
|
|
struct scatterlist *sg;
|
|
|
int i, size;
|
|
|
void *align;
|
|
|
char *buffer;
|
|
|
unsigned long flags;
|
|
|
- bool has_unaligned;
|
|
|
-
|
|
|
- if (data->flags & MMC_DATA_READ)
|
|
|
- direction = DMA_FROM_DEVICE;
|
|
|
- else
|
|
|
- direction = DMA_TO_DEVICE;
|
|
|
|
|
|
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
|
|
|
- host->align_buffer_sz, direction);
|
|
|
+ if (data->flags & MMC_DATA_READ) {
|
|
|
+ bool has_unaligned = false;
|
|
|
|
|
|
- /* Do a quick scan of the SG list for any unaligned mappings */
|
|
|
- has_unaligned = false;
|
|
|
- for_each_sg(data->sg, sg, host->sg_count, i)
|
|
|
- if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
|
- has_unaligned = true;
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* Do a quick scan of the SG list for any unaligned mappings */
|
|
|
+ for_each_sg(data->sg, sg, host->sg_count, i)
|
|
|
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
|
+ has_unaligned = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- if (has_unaligned && data->flags & MMC_DATA_READ) {
|
|
|
- dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
|
|
|
- data->sg_len, direction);
|
|
|
+ if (has_unaligned) {
|
|
|
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
|
|
|
+ data->sg_len, DMA_FROM_DEVICE);
|
|
|
|
|
|
- align = host->align_buffer;
|
|
|
+ align = host->align_buffer;
|
|
|
|
|
|
- for_each_sg(data->sg, sg, host->sg_count, i) {
|
|
|
- if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
|
- size = SDHCI_ADMA2_ALIGN -
|
|
|
- (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
|
|
|
+ for_each_sg(data->sg, sg, host->sg_count, i) {
|
|
|
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
|
+ size = SDHCI_ADMA2_ALIGN -
|
|
|
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
|
|
|
|
|
|
- buffer = sdhci_kmap_atomic(sg, &flags);
|
|
|
- memcpy(buffer, align, size);
|
|
|
- sdhci_kunmap_atomic(buffer, &flags);
|
|
|
+ buffer = sdhci_kmap_atomic(sg, &flags);
|
|
|
+ memcpy(buffer, align, size);
|
|
|
+ sdhci_kunmap_atomic(buffer, &flags);
|
|
|
|
|
|
- align += SDHCI_ADMA2_ALIGN;
|
|
|
+ align += SDHCI_ADMA2_ALIGN;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- if (data->host_cookie == COOKIE_MAPPED) {
|
|
|
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
|
|
- data->sg_len, direction);
|
|
|
- data->host_cookie = COOKIE_UNMAPPED;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
|
|
@@ -666,9 +633,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
|
|
|
if (!data)
|
|
|
target_timeout = cmd->busy_timeout * 1000;
|
|
|
else {
|
|
|
- target_timeout = data->timeout_ns / 1000;
|
|
|
- if (host->clock)
|
|
|
- target_timeout += data->timeout_clks / host->clock;
|
|
|
+ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
|
|
|
+ if (host->clock && data->timeout_clks) {
|
|
|
+ unsigned long long val;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * data->timeout_clks is in units of clock cycles.
|
|
|
+ * host->clock is in Hz. target_timeout is in us.
|
|
|
+ * Hence, us = 1000000 * cycles / Hz. Round up.
|
|
|
+ */
|
|
|
+ val = 1000000 * data->timeout_clks;
|
|
|
+ if (do_div(val, host->clock))
|
|
|
+ target_timeout++;
|
|
|
+ target_timeout += val;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -729,7 +707,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
|
|
|
{
|
|
|
u8 ctrl;
|
|
|
struct mmc_data *data = cmd->data;
|
|
|
- int ret;
|
|
|
|
|
|
WARN_ON(host->data);
|
|
|
|
|
@@ -748,63 +725,48 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
|
|
|
host->data_early = 0;
|
|
|
host->data->bytes_xfered = 0;
|
|
|
|
|
|
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
|
|
|
- host->flags |= SDHCI_REQ_USE_DMA;
|
|
|
-
|
|
|
- /*
|
|
|
- * FIXME: This doesn't account for merging when mapping the
|
|
|
- * scatterlist.
|
|
|
- */
|
|
|
- if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
|
- int broken, i;
|
|
|
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
|
|
|
struct scatterlist *sg;
|
|
|
+ unsigned int length_mask, offset_mask;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ host->flags |= SDHCI_REQ_USE_DMA;
|
|
|
|
|
|
- broken = 0;
|
|
|
+ /*
|
|
|
+ * FIXME: This doesn't account for merging when mapping the
|
|
|
+ * scatterlist.
|
|
|
+ *
|
|
|
+ * The assumption here being that alignment and lengths are
|
|
|
+ * the same after DMA mapping to device address space.
|
|
|
+ */
|
|
|
+ length_mask = 0;
|
|
|
+ offset_mask = 0;
|
|
|
if (host->flags & SDHCI_USE_ADMA) {
|
|
|
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
|
|
|
- broken = 1;
|
|
|
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
|
|
|
+ length_mask = 3;
|
|
|
+ /*
|
|
|
+ * As we use up to 3 byte chunks to work
|
|
|
+ * around alignment problems, we need to
|
|
|
+ * check the offset as well.
|
|
|
+ */
|
|
|
+ offset_mask = 3;
|
|
|
+ }
|
|
|
} else {
|
|
|
if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
|
|
|
- broken = 1;
|
|
|
+ length_mask = 3;
|
|
|
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
|
|
|
+ offset_mask = 3;
|
|
|
}
|
|
|
|
|
|
- if (unlikely(broken)) {
|
|
|
+ if (unlikely(length_mask | offset_mask)) {
|
|
|
for_each_sg(data->sg, sg, data->sg_len, i) {
|
|
|
- if (sg->length & 0x3) {
|
|
|
+ if (sg->length & length_mask) {
|
|
|
DBG("Reverting to PIO because of transfer size (%d)\n",
|
|
|
- sg->length);
|
|
|
+ sg->length);
|
|
|
host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
|
break;
|
|
|
}
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * The assumption here being that alignment is the same after
|
|
|
- * translation to device address space.
|
|
|
- */
|
|
|
- if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
|
- int broken, i;
|
|
|
- struct scatterlist *sg;
|
|
|
-
|
|
|
- broken = 0;
|
|
|
- if (host->flags & SDHCI_USE_ADMA) {
|
|
|
- /*
|
|
|
- * As we use 3 byte chunks to work around
|
|
|
- * alignment problems, we need to check this
|
|
|
- * quirk.
|
|
|
- */
|
|
|
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
|
|
|
- broken = 1;
|
|
|
- } else {
|
|
|
- if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
|
|
|
- broken = 1;
|
|
|
- }
|
|
|
-
|
|
|
- if (unlikely(broken)) {
|
|
|
- for_each_sg(data->sg, sg, data->sg_len, i) {
|
|
|
- if (sg->offset & 0x3) {
|
|
|
+ if (sg->offset & offset_mask) {
|
|
|
DBG("Reverting to PIO because of bad alignment\n");
|
|
|
host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
|
break;
|
|
@@ -814,39 +776,27 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
|
|
|
}
|
|
|
|
|
|
if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
|
- if (host->flags & SDHCI_USE_ADMA) {
|
|
|
- ret = sdhci_adma_table_pre(host, data);
|
|
|
- if (ret) {
|
|
|
- /*
|
|
|
- * This only happens when someone fed
|
|
|
- * us an invalid request.
|
|
|
- */
|
|
|
- WARN_ON(1);
|
|
|
- host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
|
- } else {
|
|
|
- sdhci_writel(host, host->adma_addr,
|
|
|
- SDHCI_ADMA_ADDRESS);
|
|
|
- if (host->flags & SDHCI_USE_64_BIT_DMA)
|
|
|
- sdhci_writel(host,
|
|
|
- (u64)host->adma_addr >> 32,
|
|
|
- SDHCI_ADMA_ADDRESS_HI);
|
|
|
- }
|
|
|
- } else {
|
|
|
- int sg_cnt;
|
|
|
+ int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
|
|
|
|
|
|
- sg_cnt = sdhci_pre_dma_transfer(host, data);
|
|
|
- if (sg_cnt <= 0) {
|
|
|
- /*
|
|
|
- * This only happens when someone fed
|
|
|
- * us an invalid request.
|
|
|
- */
|
|
|
- WARN_ON(1);
|
|
|
- host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
|
- } else {
|
|
|
- WARN_ON(sg_cnt != 1);
|
|
|
- sdhci_writel(host, sg_dma_address(data->sg),
|
|
|
- SDHCI_DMA_ADDRESS);
|
|
|
- }
|
|
|
+ if (sg_cnt <= 0) {
|
|
|
+ /*
|
|
|
+ * This only happens when someone fed
|
|
|
+ * us an invalid request.
|
|
|
+ */
|
|
|
+ WARN_ON(1);
|
|
|
+ host->flags &= ~SDHCI_REQ_USE_DMA;
|
|
|
+ } else if (host->flags & SDHCI_USE_ADMA) {
|
|
|
+ sdhci_adma_table_pre(host, data, sg_cnt);
|
|
|
+
|
|
|
+ sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
|
|
|
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
|
|
|
+ sdhci_writel(host,
|
|
|
+ (u64)host->adma_addr >> 32,
|
|
|
+ SDHCI_ADMA_ADDRESS_HI);
|
|
|
+ } else {
|
|
|
+ WARN_ON(sg_cnt != 1);
|
|
|
+ sdhci_writel(host, sg_dma_address(data->sg),
|
|
|
+ SDHCI_DMA_ADDRESS);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -946,19 +896,9 @@ static void sdhci_finish_data(struct sdhci_host *host)
|
|
|
data = host->data;
|
|
|
host->data = NULL;
|
|
|
|
|
|
- if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
|
- if (host->flags & SDHCI_USE_ADMA)
|
|
|
- sdhci_adma_table_post(host, data);
|
|
|
- else {
|
|
|
- if (data->host_cookie == COOKIE_MAPPED) {
|
|
|
- dma_unmap_sg(mmc_dev(host->mmc),
|
|
|
- data->sg, data->sg_len,
|
|
|
- (data->flags & MMC_DATA_READ) ?
|
|
|
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
|
|
- data->host_cookie = COOKIE_UNMAPPED;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
+ if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
|
|
|
+ (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
|
|
|
+ sdhci_adma_table_post(host, data);
|
|
|
|
|
|
/*
|
|
|
* The specification states that the block count register must
|
|
@@ -1003,6 +943,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
|
|
|
|
|
|
WARN_ON(host->cmd);
|
|
|
|
|
|
+ /* Initially, a command has no error */
|
|
|
+ cmd->error = 0;
|
|
|
+
|
|
|
/* Wait max 10 ms */
|
|
|
timeout = 10;
|
|
|
|
|
@@ -1097,8 +1040,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- host->cmd->error = 0;
|
|
|
-
|
|
|
/* Finished CMD23, now send actual command. */
|
|
|
if (host->cmd == host->mrq->sbc) {
|
|
|
host->cmd = NULL;
|
|
@@ -2114,39 +2055,12 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
struct sdhci_host *host = mmc_priv(mmc);
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
|
|
|
- if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
|
- if (data->host_cookie == COOKIE_GIVEN ||
|
|
|
- data->host_cookie == COOKIE_MAPPED)
|
|
|
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
|
|
|
- data->flags & MMC_DATA_WRITE ?
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- data->host_cookie = COOKIE_UNMAPPED;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static int sdhci_pre_dma_transfer(struct sdhci_host *host,
|
|
|
- struct mmc_data *data)
|
|
|
-{
|
|
|
- int sg_count;
|
|
|
-
|
|
|
- if (data->host_cookie == COOKIE_MAPPED) {
|
|
|
- data->host_cookie = COOKIE_GIVEN;
|
|
|
- return data->sg_count;
|
|
|
- }
|
|
|
+ if (data->host_cookie != COOKIE_UNMAPPED)
|
|
|
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
|
|
|
+ data->flags & MMC_DATA_WRITE ?
|
|
|
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
|
|
|
- WARN_ON(data->host_cookie == COOKIE_GIVEN);
|
|
|
-
|
|
|
- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
|
|
|
- data->flags & MMC_DATA_WRITE ?
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- if (sg_count == 0)
|
|
|
- return -ENOSPC;
|
|
|
-
|
|
|
- data->sg_count = sg_count;
|
|
|
- data->host_cookie = COOKIE_MAPPED;
|
|
|
-
|
|
|
- return sg_count;
|
|
|
+ data->host_cookie = COOKIE_UNMAPPED;
|
|
|
}
|
|
|
|
|
|
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
@@ -2157,7 +2071,7 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
mrq->data->host_cookie = COOKIE_UNMAPPED;
|
|
|
|
|
|
if (host->flags & SDHCI_REQ_USE_DMA)
|
|
|
- sdhci_pre_dma_transfer(host, mrq->data);
|
|
|
+ sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
|
|
|
}
|
|
|
|
|
|
static void sdhci_card_event(struct mmc_host *mmc)
|
|
@@ -2237,6 +2151,22 @@ static void sdhci_tasklet_finish(unsigned long param)
|
|
|
|
|
|
mrq = host->mrq;
|
|
|
|
|
|
+ /*
|
|
|
+ * Always unmap the data buffers if they were mapped by
|
|
|
+ * sdhci_prepare_data() whenever we finish with a request.
|
|
|
+ * This avoids leaking DMA mappings on error.
|
|
|
+ */
|
|
|
+ if (host->flags & SDHCI_REQ_USE_DMA) {
|
|
|
+ struct mmc_data *data = mrq->data;
|
|
|
+
|
|
|
+ if (data && data->host_cookie == COOKIE_MAPPED) {
|
|
|
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
|
|
|
+ (data->flags & MMC_DATA_READ) ?
|
|
|
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
|
|
+ data->host_cookie = COOKIE_UNMAPPED;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* The controller needs a reset of internal state machines
|
|
|
* upon error conditions.
|
|
@@ -2322,13 +2252,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (intmask & SDHCI_INT_TIMEOUT)
|
|
|
- host->cmd->error = -ETIMEDOUT;
|
|
|
- else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
|
|
|
- SDHCI_INT_INDEX))
|
|
|
- host->cmd->error = -EILSEQ;
|
|
|
+ if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
|
|
|
+ SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
|
|
|
+ if (intmask & SDHCI_INT_TIMEOUT)
|
|
|
+ host->cmd->error = -ETIMEDOUT;
|
|
|
+ else
|
|
|
+ host->cmd->error = -EILSEQ;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this command initiates a data phase and a response
|
|
|
+ * CRC error is signalled, the card can start transferring
|
|
|
+ * data - the card may have received the command without
|
|
|
+ * error. We must not terminate the mmc_request early.
|
|
|
+ *
|
|
|
+ * If the card did not receive the command or returned an
|
|
|
+ * error which prevented it sending data, the data phase
|
|
|
+ * will time out.
|
|
|
+ */
|
|
|
+ if (host->cmd->data &&
|
|
|
+ (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
|
|
|
+ SDHCI_INT_CRC) {
|
|
|
+ host->cmd = NULL;
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
- if (host->cmd->error) {
|
|
|
tasklet_schedule(&host->finish_tasklet);
|
|
|
return;
|
|
|
}
|
|
@@ -2857,6 +2804,36 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
|
|
|
|
|
|
+static int sdhci_set_dma_mask(struct sdhci_host *host)
|
|
|
+{
|
|
|
+ struct mmc_host *mmc = host->mmc;
|
|
|
+ struct device *dev = mmc_dev(mmc);
|
|
|
+ int ret = -EINVAL;
|
|
|
+
|
|
|
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
|
|
|
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
|
|
|
+
|
|
|
+ /* Try 64-bit mask if hardware is capable of it */
|
|
|
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
|
|
|
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
|
|
+ if (ret) {
|
|
|
+ pr_warn("%s: Failed to set 64-bit DMA mask.\n",
|
|
|
+ mmc_hostname(mmc));
|
|
|
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* 32-bit mask as default & fallback */
|
|
|
+ if (ret) {
|
|
|
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
|
+ if (ret)
|
|
|
+ pr_warn("%s: Failed to set 32-bit DMA mask.\n",
|
|
|
+ mmc_hostname(mmc));
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int sdhci_add_host(struct sdhci_host *host)
|
|
|
{
|
|
|
struct mmc_host *mmc;
|
|
@@ -2928,17 +2905,21 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
|
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
|
|
|
* implement.
|
|
|
*/
|
|
|
- if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
|
|
|
+ if (caps[0] & SDHCI_CAN_64BIT)
|
|
|
host->flags |= SDHCI_USE_64_BIT_DMA;
|
|
|
|
|
|
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
|
|
|
- if (host->ops->enable_dma) {
|
|
|
- if (host->ops->enable_dma(host)) {
|
|
|
- pr_warn("%s: No suitable DMA available - falling back to PIO\n",
|
|
|
- mmc_hostname(mmc));
|
|
|
- host->flags &=
|
|
|
- ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
|
|
|
- }
|
|
|
+ ret = sdhci_set_dma_mask(host);
|
|
|
+
|
|
|
+ if (!ret && host->ops->enable_dma)
|
|
|
+ ret = host->ops->enable_dma(host);
|
|
|
+
|
|
|
+ if (ret) {
|
|
|
+ pr_warn("%s: No suitable DMA available - falling back to PIO\n",
|
|
|
+ mmc_hostname(mmc));
|
|
|
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
|
|
|
+
|
|
|
+ ret = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2947,6 +2928,9 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
|
host->flags &= ~SDHCI_USE_SDMA;
|
|
|
|
|
|
if (host->flags & SDHCI_USE_ADMA) {
|
|
|
+ dma_addr_t dma;
|
|
|
+ void *buf;
|
|
|
+
|
|
|
/*
|
|
|
* The DMA descriptor table size is calculated as the maximum
|
|
|
* number of segments times 2, to allow for an alignment
|
|
@@ -2962,33 +2946,27 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
|
SDHCI_ADMA2_32_DESC_SZ;
|
|
|
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
|
|
|
}
|
|
|
- host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
|
|
|
- host->adma_table_sz,
|
|
|
- &host->adma_addr,
|
|
|
- GFP_KERNEL);
|
|
|
+
|
|
|
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
|
|
|
- host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
|
|
|
- if (!host->adma_table || !host->align_buffer) {
|
|
|
- if (host->adma_table)
|
|
|
- dma_free_coherent(mmc_dev(mmc),
|
|
|
- host->adma_table_sz,
|
|
|
- host->adma_table,
|
|
|
- host->adma_addr);
|
|
|
- kfree(host->align_buffer);
|
|
|
+ buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
|
|
|
+ host->adma_table_sz, &dma, GFP_KERNEL);
|
|
|
+ if (!buf) {
|
|
|
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
|
|
|
mmc_hostname(mmc));
|
|
|
host->flags &= ~SDHCI_USE_ADMA;
|
|
|
- host->adma_table = NULL;
|
|
|
- host->align_buffer = NULL;
|
|
|
- } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
|
|
|
+ } else if ((dma + host->align_buffer_sz) &
|
|
|
+ (SDHCI_ADMA2_DESC_ALIGN - 1)) {
|
|
|
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
|
|
|
mmc_hostname(mmc));
|
|
|
host->flags &= ~SDHCI_USE_ADMA;
|
|
|
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
|
|
|
- host->adma_table, host->adma_addr);
|
|
|
- kfree(host->align_buffer);
|
|
|
- host->adma_table = NULL;
|
|
|
- host->align_buffer = NULL;
|
|
|
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
|
|
|
+ host->adma_table_sz, buf, dma);
|
|
|
+ } else {
|
|
|
+ host->align_buffer = buf;
|
|
|
+ host->align_addr = dma;
|
|
|
+
|
|
|
+ host->adma_table = buf + host->align_buffer_sz;
|
|
|
+ host->adma_addr = dma + host->align_buffer_sz;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3072,14 +3050,14 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
|
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
|
|
|
host->timeout_clk *= 1000;
|
|
|
|
|
|
+ if (override_timeout_clk)
|
|
|
+ host->timeout_clk = override_timeout_clk;
|
|
|
+
|
|
|
mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
|
|
|
host->ops->get_max_timeout_count(host) : 1 << 27;
|
|
|
mmc->max_busy_timeout /= host->timeout_clk;
|
|
|
}
|
|
|
|
|
|
- if (override_timeout_clk)
|
|
|
- host->timeout_clk = override_timeout_clk;
|
|
|
-
|
|
|
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
|
|
|
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
|
|
|
|
@@ -3449,10 +3427,10 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
|
|
|
if (!IS_ERR(mmc->supply.vqmmc))
|
|
|
regulator_disable(mmc->supply.vqmmc);
|
|
|
|
|
|
- if (host->adma_table)
|
|
|
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
|
|
|
- host->adma_table, host->adma_addr);
|
|
|
- kfree(host->align_buffer);
|
|
|
+ if (host->align_buffer)
|
|
|
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
|
|
|
+ host->adma_table_sz, host->align_buffer,
|
|
|
+ host->align_addr);
|
|
|
|
|
|
host->adma_table = NULL;
|
|
|
host->align_buffer = NULL;
|