|
@@ -492,7 +492,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
host->align_buffer, host->align_buffer_sz, direction);
|
|
|
if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
|
|
|
goto fail;
|
|
|
- BUG_ON(host->align_addr & host->align_mask);
|
|
|
+ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
|
|
|
|
|
|
host->sg_count = sdhci_pre_dma_transfer(host, data);
|
|
|
if (host->sg_count < 0)
|
|
@@ -514,8 +514,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
* the (up to three) bytes that screw up the
|
|
|
* alignment.
|
|
|
*/
|
|
|
- offset = (host->align_sz - (addr & host->align_mask)) &
|
|
|
- host->align_mask;
|
|
|
+ offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
|
|
|
+ SDHCI_ADMA2_MASK;
|
|
|
if (offset) {
|
|
|
if (data->flags & MMC_DATA_WRITE) {
|
|
|
buffer = sdhci_kmap_atomic(sg, &flags);
|
|
@@ -529,8 +529,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
|
|
|
|
|
|
BUG_ON(offset > 65536);
|
|
|
|
|
|
- align += host->align_sz;
|
|
|
- align_addr += host->align_sz;
|
|
|
+ align += SDHCI_ADMA2_ALIGN;
|
|
|
+ align_addr += SDHCI_ADMA2_ALIGN;
|
|
|
|
|
|
desc += host->desc_sz;
|
|
|
|
|
@@ -611,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
|
|
|
/* Do a quick scan of the SG list for any unaligned mappings */
|
|
|
has_unaligned = false;
|
|
|
for_each_sg(data->sg, sg, host->sg_count, i)
|
|
|
- if (sg_dma_address(sg) & host->align_mask) {
|
|
|
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
|
has_unaligned = true;
|
|
|
break;
|
|
|
}
|
|
@@ -623,15 +623,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
|
|
|
align = host->align_buffer;
|
|
|
|
|
|
for_each_sg(data->sg, sg, host->sg_count, i) {
|
|
|
- if (sg_dma_address(sg) & host->align_mask) {
|
|
|
- size = host->align_sz -
|
|
|
- (sg_dma_address(sg) & host->align_mask);
|
|
|
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
|
|
|
+ size = SDHCI_ADMA2_ALIGN -
|
|
|
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
|
|
|
|
|
|
buffer = sdhci_kmap_atomic(sg, &flags);
|
|
|
memcpy(buffer, align, size);
|
|
|
sdhci_kunmap_atomic(buffer, &flags);
|
|
|
|
|
|
- align += host->align_sz;
|
|
|
+ align += SDHCI_ADMA2_ALIGN;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -2961,24 +2961,17 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
|
if (host->flags & SDHCI_USE_64_BIT_DMA) {
|
|
|
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
|
|
|
SDHCI_ADMA2_64_DESC_SZ;
|
|
|
- host->align_buffer_sz = SDHCI_MAX_SEGS *
|
|
|
- SDHCI_ADMA2_64_ALIGN;
|
|
|
host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
|
|
|
- host->align_sz = SDHCI_ADMA2_64_ALIGN;
|
|
|
- host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
|
|
|
} else {
|
|
|
host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
|
|
|
SDHCI_ADMA2_32_DESC_SZ;
|
|
|
- host->align_buffer_sz = SDHCI_MAX_SEGS *
|
|
|
- SDHCI_ADMA2_32_ALIGN;
|
|
|
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
|
|
|
- host->align_sz = SDHCI_ADMA2_32_ALIGN;
|
|
|
- host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
|
|
|
}
|
|
|
host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
|
|
|
host->adma_table_sz,
|
|
|
&host->adma_addr,
|
|
|
GFP_KERNEL);
|
|
|
+ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
|
|
|
host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
|
|
|
if (!host->adma_table || !host->align_buffer) {
|
|
|
if (host->adma_table)
|
|
@@ -2992,7 +2985,7 @@ int sdhci_add_host(struct sdhci_host *host)
|
|
|
host->flags &= ~SDHCI_USE_ADMA;
|
|
|
host->adma_table = NULL;
|
|
|
host->align_buffer = NULL;
|
|
|
- } else if (host->adma_addr & host->align_mask) {
|
|
|
+ } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
|
|
|
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
|
|
|
mmc_hostname(mmc));
|
|
|
host->flags &= ~SDHCI_USE_ADMA;
|