|
@@ -28,8 +28,7 @@
|
|
|
#include <linux/amba/bus.h>
|
|
|
#include <linux/clk.h>
|
|
|
#include <linux/scatterlist.h>
|
|
|
-#include <linux/gpio.h>
|
|
|
-#include <linux/of_gpio.h>
|
|
|
+#include <linux/of.h>
|
|
|
#include <linux/regulator/consumer.h>
|
|
|
#include <linux/dmaengine.h>
|
|
|
#include <linux/dma-mapping.h>
|
|
@@ -37,6 +36,7 @@
|
|
|
#include <linux/pm_runtime.h>
|
|
|
#include <linux/types.h>
|
|
|
#include <linux/pinctrl/consumer.h>
|
|
|
+#include <linux/reset.h>
|
|
|
|
|
|
#include <asm/div64.h>
|
|
|
#include <asm/io.h>
|
|
@@ -46,41 +46,77 @@
|
|
|
|
|
|
#define DRIVER_NAME "mmci-pl18x"
|
|
|
|
|
|
+#ifdef CONFIG_DMA_ENGINE
|
|
|
+void mmci_variant_init(struct mmci_host *host);
|
|
|
+#else
|
|
|
+static inline void mmci_variant_init(struct mmci_host *host) {}
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_MMC_STM32_SDMMC
|
|
|
+void sdmmc_variant_init(struct mmci_host *host);
|
|
|
+#else
|
|
|
+static inline void sdmmc_variant_init(struct mmci_host *host) {}
|
|
|
+#endif
|
|
|
+
|
|
|
static unsigned int fmax = 515633;
|
|
|
|
|
|
static struct variant_data variant_arm = {
|
|
|
.fifosize = 16 * 4,
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datalength_bits = 16,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
|
|
.f_max = 100000000,
|
|
|
.reversed_irq_handling = true,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_ROD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_arm_extended_fifo = {
|
|
|
.fifosize = 128 * 4,
|
|
|
.fifohalfsize = 64 * 4,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datalength_bits = 16,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
|
|
.f_max = 100000000,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_ROD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_arm_extended_fifo_hwfc = {
|
|
|
.fifosize = 128 * 4,
|
|
|
.fifohalfsize = 64 * 4,
|
|
|
.clkreg_enable = MCI_ARM_HWFCEN,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datalength_bits = 16,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
|
|
.f_max = 100000000,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_ROD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_u300 = {
|
|
@@ -88,7 +124,13 @@ static struct variant_data variant_u300 = {
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
.clkreg_enable = MCI_ST_U300_HWFCEN,
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datalength_bits = 16,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
|
|
.st_sdio = true,
|
|
|
.pwrreg_powerup = MCI_PWR_ON,
|
|
@@ -97,8 +139,10 @@ static struct variant_data variant_u300 = {
|
|
|
.pwrreg_clkgate = true,
|
|
|
.pwrreg_nopower = true,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_OD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_nomadik = {
|
|
@@ -106,7 +150,13 @@ static struct variant_data variant_nomadik = {
|
|
|
.fifohalfsize = 8 * 4,
|
|
|
.clkreg = MCI_CLK_ENABLE,
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datalength_bits = 24,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
|
|
.st_sdio = true,
|
|
|
.st_clkdiv = true,
|
|
@@ -116,8 +166,10 @@ static struct variant_data variant_nomadik = {
|
|
|
.pwrreg_clkgate = true,
|
|
|
.pwrreg_nopower = true,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_OD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_ux500 = {
|
|
@@ -127,7 +179,13 @@ static struct variant_data variant_ux500 = {
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datalength_bits = 24,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
|
|
.st_sdio = true,
|
|
|
.st_clkdiv = true,
|
|
@@ -141,8 +199,10 @@ static struct variant_data variant_ux500 = {
|
|
|
.busy_detect_mask = MCI_ST_BUSYENDMASK,
|
|
|
.pwrreg_nopower = true,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_OD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_ux500v2 = {
|
|
@@ -152,8 +212,14 @@ static struct variant_data variant_ux500v2 = {
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
|
|
|
.datalength_bits = 24,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
|
|
.st_sdio = true,
|
|
|
.st_clkdiv = true,
|
|
@@ -168,8 +234,10 @@ static struct variant_data variant_ux500v2 = {
|
|
|
.busy_detect_mask = MCI_ST_BUSYENDMASK,
|
|
|
.pwrreg_nopower = true,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_OD,
|
|
|
+ .init = mmci_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_stm32 = {
|
|
@@ -179,7 +247,14 @@ static struct variant_data variant_stm32 = {
|
|
|
.clkreg_enable = MCI_ST_UX500_HWFCEN,
|
|
|
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
|
|
|
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.datalength_bits = 24,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
|
|
.st_sdio = true,
|
|
|
.st_clkdiv = true,
|
|
@@ -187,6 +262,26 @@ static struct variant_data variant_stm32 = {
|
|
|
.f_max = 48000000,
|
|
|
.pwrreg_clkgate = true,
|
|
|
.pwrreg_nopower = true,
|
|
|
+ .init = mmci_variant_init,
|
|
|
+};
|
|
|
+
|
|
|
+static struct variant_data variant_stm32_sdmmc = {
|
|
|
+ .fifosize = 16 * 4,
|
|
|
+ .fifohalfsize = 8 * 4,
|
|
|
+ .f_max = 208000000,
|
|
|
+ .stm32_clkdiv = true,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
|
|
|
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
|
|
|
+ .datactrl_first = true,
|
|
|
+ .datacnt_useless = true,
|
|
|
+ .datalength_bits = 25,
|
|
|
+ .datactrl_blocksz = 14,
|
|
|
+ .stm32_idmabsize_mask = GENMASK(12, 5),
|
|
|
+ .init = sdmmc_variant_init,
|
|
|
};
|
|
|
|
|
|
static struct variant_data variant_qcom = {
|
|
@@ -197,15 +292,22 @@ static struct variant_data variant_qcom = {
|
|
|
MCI_QCOM_CLK_SELECT_IN_FBCLK,
|
|
|
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
|
|
|
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
|
|
|
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
|
|
|
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
|
|
|
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
|
|
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
|
|
|
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
|
|
|
.blksz_datactrl4 = true,
|
|
|
.datalength_bits = 24,
|
|
|
+ .datactrl_blocksz = 11,
|
|
|
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
|
|
.pwrreg_powerup = MCI_PWR_UP,
|
|
|
.f_max = 208000000,
|
|
|
.explicit_mclk_control = true,
|
|
|
.qcom_fifo = true,
|
|
|
.qcom_dml = true,
|
|
|
.mmcimask1 = true,
|
|
|
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
|
|
|
.start_err = MCI_STARTBITERR,
|
|
|
.opendrain = MCI_ROD,
|
|
|
.init = qcom_variant_init,
|
|
@@ -226,24 +328,6 @@ static int mmci_card_busy(struct mmc_host *mmc)
|
|
|
return busy;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Validate mmc prerequisites
|
|
|
- */
|
|
|
-static int mmci_validate_data(struct mmci_host *host,
|
|
|
- struct mmc_data *data)
|
|
|
-{
|
|
|
- if (!data)
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (!is_power_of_2(data->blksz)) {
|
|
|
- dev_err(mmc_dev(host->mmc),
|
|
|
- "unsupported block size (%d bytes)\n", data->blksz);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void mmci_reg_delay(struct mmci_host *host)
|
|
|
{
|
|
|
/*
|
|
@@ -262,7 +346,7 @@ static void mmci_reg_delay(struct mmci_host *host)
|
|
|
/*
|
|
|
* This must be called with host->lock held
|
|
|
*/
|
|
|
-static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
|
|
|
+void mmci_write_clkreg(struct mmci_host *host, u32 clk)
|
|
|
{
|
|
|
if (host->clk_reg != clk) {
|
|
|
host->clk_reg = clk;
|
|
@@ -273,7 +357,7 @@ static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
|
|
|
/*
|
|
|
* This must be called with host->lock held
|
|
|
*/
|
|
|
-static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
|
|
|
+void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
|
|
|
{
|
|
|
if (host->pwr_reg != pwr) {
|
|
|
host->pwr_reg = pwr;
|
|
@@ -357,6 +441,135 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
|
|
|
mmci_write_clkreg(host, clk);
|
|
|
}
|
|
|
|
|
|
+void mmci_dma_release(struct mmci_host *host)
|
|
|
+{
|
|
|
+ if (host->ops && host->ops->dma_release)
|
|
|
+ host->ops->dma_release(host);
|
|
|
+
|
|
|
+ host->use_dma = false;
|
|
|
+}
|
|
|
+
|
|
|
+void mmci_dma_setup(struct mmci_host *host)
|
|
|
+{
|
|
|
+ if (!host->ops || !host->ops->dma_setup)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (host->ops->dma_setup(host))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* initialize pre request cookie */
|
|
|
+ host->next_cookie = 1;
|
|
|
+
|
|
|
+ host->use_dma = true;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Validate mmc prerequisites
|
|
|
+ */
|
|
|
+static int mmci_validate_data(struct mmci_host *host,
|
|
|
+ struct mmc_data *data)
|
|
|
+{
|
|
|
+ if (!data)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!is_power_of_2(data->blksz)) {
|
|
|
+ dev_err(mmc_dev(host->mmc),
|
|
|
+ "unsupported block size (%d bytes)\n", data->blksz);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (host->ops && host->ops->validate_data)
|
|
|
+ return host->ops->validate_data(host, data);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!host->ops || !host->ops->prep_data)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ err = host->ops->prep_data(host, data, next);
|
|
|
+
|
|
|
+ if (next && !err)
|
|
|
+ data->host_cookie = ++host->next_cookie < 0 ?
|
|
|
+ 1 : host->next_cookie;
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
+ int err)
|
|
|
+{
|
|
|
+ if (host->ops && host->ops->unprep_data)
|
|
|
+ host->ops->unprep_data(host, data, err);
|
|
|
+
|
|
|
+ data->host_cookie = 0;
|
|
|
+}
|
|
|
+
|
|
|
+void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
+{
|
|
|
+ WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
|
|
|
+
|
|
|
+ if (host->ops && host->ops->get_next_data)
|
|
|
+ host->ops->get_next_data(host, data);
|
|
|
+}
|
|
|
+
|
|
|
+int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
|
|
|
+{
|
|
|
+ struct mmc_data *data = host->data;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!host->use_dma)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ ret = mmci_prep_data(host, data, false);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (!host->ops || !host->ops->dma_start)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* Okay, go for it. */
|
|
|
+ dev_vdbg(mmc_dev(host->mmc),
|
|
|
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
|
|
|
+ data->sg_len, data->blksz, data->blocks, data->flags);
|
|
|
+
|
|
|
+ host->ops->dma_start(host, &datactrl);
|
|
|
+
|
|
|
+ /* Trigger the DMA transfer */
|
|
|
+ mmci_write_datactrlreg(host, datactrl);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Let the MMCI say when the data is ended and it's time
|
|
|
+ * to fire next DMA request. When that happens, MMCI will
|
|
|
+ * call mmci_data_end()
|
|
|
+ */
|
|
|
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
|
|
|
+ host->base + MMCIMASK0);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
+{
|
|
|
+ if (!host->use_dma)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (host->ops && host->ops->dma_finalize)
|
|
|
+ host->ops->dma_finalize(host, data);
|
|
|
+}
|
|
|
+
|
|
|
+void mmci_dma_error(struct mmci_host *host)
|
|
|
+{
|
|
|
+ if (!host->use_dma)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (host->ops && host->ops->dma_error)
|
|
|
+ host->ops->dma_error(host);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
|
|
|
{
|
|
@@ -378,7 +591,7 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
|
|
|
if (host->singleirq) {
|
|
|
unsigned int mask0 = readl(base + MMCIMASK0);
|
|
|
|
|
|
- mask0 &= ~MCI_IRQ1MASK;
|
|
|
+ mask0 &= ~variant->irq_pio_mask;
|
|
|
mask0 |= mask;
|
|
|
|
|
|
writel(mask0, base + MMCIMASK0);
|
|
@@ -415,31 +628,50 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
|
|
|
* no custom DMA interfaces are supported.
|
|
|
*/
|
|
|
#ifdef CONFIG_DMA_ENGINE
|
|
|
-static void mmci_dma_setup(struct mmci_host *host)
|
|
|
+struct mmci_dmae_next {
|
|
|
+ struct dma_async_tx_descriptor *desc;
|
|
|
+ struct dma_chan *chan;
|
|
|
+};
|
|
|
+
|
|
|
+struct mmci_dmae_priv {
|
|
|
+ struct dma_chan *cur;
|
|
|
+ struct dma_chan *rx_channel;
|
|
|
+ struct dma_chan *tx_channel;
|
|
|
+ struct dma_async_tx_descriptor *desc_current;
|
|
|
+ struct mmci_dmae_next next_data;
|
|
|
+};
|
|
|
+
|
|
|
+int mmci_dmae_setup(struct mmci_host *host)
|
|
|
{
|
|
|
const char *rxname, *txname;
|
|
|
+ struct mmci_dmae_priv *dmae;
|
|
|
|
|
|
- host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
|
|
|
- host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
|
|
|
+ dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
|
|
|
+ if (!dmae)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- /* initialize pre request cookie */
|
|
|
- host->next_data.cookie = 1;
|
|
|
+ host->dma_priv = dmae;
|
|
|
+
|
|
|
+ dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
|
|
|
+ "rx");
|
|
|
+ dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
|
|
|
+ "tx");
|
|
|
|
|
|
/*
|
|
|
* If only an RX channel is specified, the driver will
|
|
|
* attempt to use it bidirectionally, however if it is
|
|
|
* is specified but cannot be located, DMA will be disabled.
|
|
|
*/
|
|
|
- if (host->dma_rx_channel && !host->dma_tx_channel)
|
|
|
- host->dma_tx_channel = host->dma_rx_channel;
|
|
|
+ if (dmae->rx_channel && !dmae->tx_channel)
|
|
|
+ dmae->tx_channel = dmae->rx_channel;
|
|
|
|
|
|
- if (host->dma_rx_channel)
|
|
|
- rxname = dma_chan_name(host->dma_rx_channel);
|
|
|
+ if (dmae->rx_channel)
|
|
|
+ rxname = dma_chan_name(dmae->rx_channel);
|
|
|
else
|
|
|
rxname = "none";
|
|
|
|
|
|
- if (host->dma_tx_channel)
|
|
|
- txname = dma_chan_name(host->dma_tx_channel);
|
|
|
+ if (dmae->tx_channel)
|
|
|
+ txname = dma_chan_name(dmae->tx_channel);
|
|
|
else
|
|
|
txname = "none";
|
|
|
|
|
@@ -450,66 +682,84 @@ static void mmci_dma_setup(struct mmci_host *host)
|
|
|
* Limit the maximum segment size in any SG entry according to
|
|
|
* the parameters of the DMA engine device.
|
|
|
*/
|
|
|
- if (host->dma_tx_channel) {
|
|
|
- struct device *dev = host->dma_tx_channel->device->dev;
|
|
|
+ if (dmae->tx_channel) {
|
|
|
+ struct device *dev = dmae->tx_channel->device->dev;
|
|
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
if (max_seg_size < host->mmc->max_seg_size)
|
|
|
host->mmc->max_seg_size = max_seg_size;
|
|
|
}
|
|
|
- if (host->dma_rx_channel) {
|
|
|
- struct device *dev = host->dma_rx_channel->device->dev;
|
|
|
+ if (dmae->rx_channel) {
|
|
|
+ struct device *dev = dmae->rx_channel->device->dev;
|
|
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
if (max_seg_size < host->mmc->max_seg_size)
|
|
|
host->mmc->max_seg_size = max_seg_size;
|
|
|
}
|
|
|
|
|
|
- if (host->ops && host->ops->dma_setup)
|
|
|
- host->ops->dma_setup(host);
|
|
|
+ if (!dmae->tx_channel || !dmae->rx_channel) {
|
|
|
+ mmci_dmae_release(host);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* This is used in or so inline it
|
|
|
* so it can be discarded.
|
|
|
*/
|
|
|
-static inline void mmci_dma_release(struct mmci_host *host)
|
|
|
+void mmci_dmae_release(struct mmci_host *host)
|
|
|
{
|
|
|
- if (host->dma_rx_channel)
|
|
|
- dma_release_channel(host->dma_rx_channel);
|
|
|
- if (host->dma_tx_channel)
|
|
|
- dma_release_channel(host->dma_tx_channel);
|
|
|
- host->dma_rx_channel = host->dma_tx_channel = NULL;
|
|
|
-}
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
|
|
|
-static void mmci_dma_data_error(struct mmci_host *host)
|
|
|
-{
|
|
|
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
|
|
- dmaengine_terminate_all(host->dma_current);
|
|
|
- host->dma_in_progress = false;
|
|
|
- host->dma_current = NULL;
|
|
|
- host->dma_desc_current = NULL;
|
|
|
- host->data->host_cookie = 0;
|
|
|
+ if (dmae->rx_channel)
|
|
|
+ dma_release_channel(dmae->rx_channel);
|
|
|
+ if (dmae->tx_channel)
|
|
|
+ dma_release_channel(dmae->tx_channel);
|
|
|
+ dmae->rx_channel = dmae->tx_channel = NULL;
|
|
|
}
|
|
|
|
|
|
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
{
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
struct dma_chan *chan;
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
|
- chan = host->dma_rx_channel;
|
|
|
+ chan = dmae->rx_channel;
|
|
|
else
|
|
|
- chan = host->dma_tx_channel;
|
|
|
+ chan = dmae->tx_channel;
|
|
|
|
|
|
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
|
|
|
mmc_get_dma_dir(data));
|
|
|
}
|
|
|
|
|
|
-static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
+void mmci_dmae_error(struct mmci_host *host)
|
|
|
{
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
+
|
|
|
+ if (!dma_inprogress(host))
|
|
|
+ return;
|
|
|
+
|
|
|
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
|
|
+ dmaengine_terminate_all(dmae->cur);
|
|
|
+ host->dma_in_progress = false;
|
|
|
+ dmae->cur = NULL;
|
|
|
+ dmae->desc_current = NULL;
|
|
|
+ host->data->host_cookie = 0;
|
|
|
+
|
|
|
+ mmci_dma_unmap(host, host->data);
|
|
|
+}
|
|
|
+
|
|
|
+void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
+{
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
u32 status;
|
|
|
int i;
|
|
|
|
|
|
+ if (!dma_inprogress(host))
|
|
|
+ return;
|
|
|
+
|
|
|
/* Wait up to 1ms for the DMA to complete */
|
|
|
for (i = 0; ; i++) {
|
|
|
status = readl(host->base + MMCISTATUS);
|
|
@@ -525,13 +775,12 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
|
|
*/
|
|
|
if (status & MCI_RXDATAAVLBLMASK) {
|
|
|
- mmci_dma_data_error(host);
|
|
|
+ mmci_dma_error(host);
|
|
|
if (!data->error)
|
|
|
data->error = -EIO;
|
|
|
- }
|
|
|
-
|
|
|
- if (!data->host_cookie)
|
|
|
+ } else if (!data->host_cookie) {
|
|
|
mmci_dma_unmap(host, data);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Use of DMA with scatter-gather is impossible.
|
|
@@ -543,15 +792,16 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
}
|
|
|
|
|
|
host->dma_in_progress = false;
|
|
|
- host->dma_current = NULL;
|
|
|
- host->dma_desc_current = NULL;
|
|
|
+ dmae->cur = NULL;
|
|
|
+ dmae->desc_current = NULL;
|
|
|
}
|
|
|
|
|
|
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
|
|
|
-static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
+static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
struct dma_chan **dma_chan,
|
|
|
struct dma_async_tx_descriptor **dma_desc)
|
|
|
{
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
struct variant_data *variant = host->variant;
|
|
|
struct dma_slave_config conf = {
|
|
|
.src_addr = host->phybase + MMCIFIFO,
|
|
@@ -570,10 +820,10 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ) {
|
|
|
conf.direction = DMA_DEV_TO_MEM;
|
|
|
- chan = host->dma_rx_channel;
|
|
|
+ chan = dmae->rx_channel;
|
|
|
} else {
|
|
|
conf.direction = DMA_MEM_TO_DEV;
|
|
|
- chan = host->dma_tx_channel;
|
|
|
+ chan = dmae->tx_channel;
|
|
|
}
|
|
|
|
|
|
/* If there's no DMA channel, fall back to PIO */
|
|
@@ -610,160 +860,137 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
-static inline int mmci_dma_prep_data(struct mmci_host *host,
|
|
|
- struct mmc_data *data)
|
|
|
+int mmci_dmae_prep_data(struct mmci_host *host,
|
|
|
+ struct mmc_data *data,
|
|
|
+ bool next)
|
|
|
{
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
+ struct mmci_dmae_next *nd = &dmae->next_data;
|
|
|
+
|
|
|
+ if (!host->use_dma)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (next)
|
|
|
+ return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
|
|
|
/* Check if next job is already prepared. */
|
|
|
- if (host->dma_current && host->dma_desc_current)
|
|
|
+ if (dmae->cur && dmae->desc_current)
|
|
|
return 0;
|
|
|
|
|
|
/* No job were prepared thus do it now. */
|
|
|
- return __mmci_dma_prep_data(host, data, &host->dma_current,
|
|
|
- &host->dma_desc_current);
|
|
|
-}
|
|
|
-
|
|
|
-static inline int mmci_dma_prep_next(struct mmci_host *host,
|
|
|
- struct mmc_data *data)
|
|
|
-{
|
|
|
- struct mmci_host_next *nd = &host->next_data;
|
|
|
- return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
|
|
|
+ return _mmci_dmae_prep_data(host, data, &dmae->cur,
|
|
|
+ &dmae->desc_current);
|
|
|
}
|
|
|
|
|
|
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
|
|
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
|
|
- ret = mmci_dma_prep_data(host, host->data);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- /* Okay, go for it. */
|
|
|
- dev_vdbg(mmc_dev(host->mmc),
|
|
|
- "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
|
|
|
- data->sg_len, data->blksz, data->blocks, data->flags);
|
|
|
host->dma_in_progress = true;
|
|
|
- dmaengine_submit(host->dma_desc_current);
|
|
|
- dma_async_issue_pending(host->dma_current);
|
|
|
+ dmaengine_submit(dmae->desc_current);
|
|
|
+ dma_async_issue_pending(dmae->cur);
|
|
|
|
|
|
if (host->variant->qcom_dml)
|
|
|
dml_start_xfer(host, data);
|
|
|
|
|
|
- datactrl |= MCI_DPSM_DMAENABLE;
|
|
|
+ *datactrl |= MCI_DPSM_DMAENABLE;
|
|
|
|
|
|
- /* Trigger the DMA transfer */
|
|
|
- mmci_write_datactrlreg(host, datactrl);
|
|
|
-
|
|
|
- /*
|
|
|
- * Let the MMCI say when the data is ended and it's time
|
|
|
- * to fire next DMA request. When that happens, MMCI will
|
|
|
- * call mmci_data_end()
|
|
|
- */
|
|
|
- writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
|
|
|
- host->base + MMCIMASK0);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
+void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
{
|
|
|
- struct mmci_host_next *next = &host->next_data;
|
|
|
-
|
|
|
- WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
|
|
|
- WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
|
|
|
-
|
|
|
- host->dma_desc_current = next->dma_desc;
|
|
|
- host->dma_current = next->dma_chan;
|
|
|
- next->dma_desc = NULL;
|
|
|
- next->dma_chan = NULL;
|
|
|
-}
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
+ struct mmci_dmae_next *next = &dmae->next_data;
|
|
|
|
|
|
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
-{
|
|
|
- struct mmci_host *host = mmc_priv(mmc);
|
|
|
- struct mmc_data *data = mrq->data;
|
|
|
- struct mmci_host_next *nd = &host->next_data;
|
|
|
-
|
|
|
- if (!data)
|
|
|
+ if (!host->use_dma)
|
|
|
return;
|
|
|
|
|
|
- BUG_ON(data->host_cookie);
|
|
|
+ WARN_ON(!data->host_cookie && (next->desc || next->chan));
|
|
|
|
|
|
- if (mmci_validate_data(host, data))
|
|
|
- return;
|
|
|
-
|
|
|
- if (!mmci_dma_prep_next(host, data))
|
|
|
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
|
|
+ dmae->desc_current = next->desc;
|
|
|
+ dmae->cur = next->chan;
|
|
|
+ next->desc = NULL;
|
|
|
+ next->chan = NULL;
|
|
|
}
|
|
|
|
|
|
-static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
- int err)
|
|
|
+void mmci_dmae_unprep_data(struct mmci_host *host,
|
|
|
+ struct mmc_data *data, int err)
|
|
|
+
|
|
|
{
|
|
|
- struct mmci_host *host = mmc_priv(mmc);
|
|
|
- struct mmc_data *data = mrq->data;
|
|
|
+ struct mmci_dmae_priv *dmae = host->dma_priv;
|
|
|
|
|
|
- if (!data || !data->host_cookie)
|
|
|
+ if (!host->use_dma)
|
|
|
return;
|
|
|
|
|
|
mmci_dma_unmap(host, data);
|
|
|
|
|
|
if (err) {
|
|
|
- struct mmci_host_next *next = &host->next_data;
|
|
|
+ struct mmci_dmae_next *next = &dmae->next_data;
|
|
|
struct dma_chan *chan;
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
|
- chan = host->dma_rx_channel;
|
|
|
+ chan = dmae->rx_channel;
|
|
|
else
|
|
|
- chan = host->dma_tx_channel;
|
|
|
+ chan = dmae->tx_channel;
|
|
|
dmaengine_terminate_all(chan);
|
|
|
|
|
|
- if (host->dma_desc_current == next->dma_desc)
|
|
|
- host->dma_desc_current = NULL;
|
|
|
+ if (dmae->desc_current == next->desc)
|
|
|
+ dmae->desc_current = NULL;
|
|
|
|
|
|
- if (host->dma_current == next->dma_chan) {
|
|
|
+ if (dmae->cur == next->chan) {
|
|
|
host->dma_in_progress = false;
|
|
|
- host->dma_current = NULL;
|
|
|
+ dmae->cur = NULL;
|
|
|
}
|
|
|
|
|
|
- next->dma_desc = NULL;
|
|
|
- next->dma_chan = NULL;
|
|
|
- data->host_cookie = 0;
|
|
|
+ next->desc = NULL;
|
|
|
+ next->chan = NULL;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#else
|
|
|
-/* Blank functions if the DMA engine is not available */
|
|
|
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
-{
|
|
|
-}
|
|
|
-static inline void mmci_dma_setup(struct mmci_host *host)
|
|
|
-{
|
|
|
-}
|
|
|
+static struct mmci_host_ops mmci_variant_ops = {
|
|
|
+ .prep_data = mmci_dmae_prep_data,
|
|
|
+ .unprep_data = mmci_dmae_unprep_data,
|
|
|
+ .get_next_data = mmci_dmae_get_next_data,
|
|
|
+ .dma_setup = mmci_dmae_setup,
|
|
|
+ .dma_release = mmci_dmae_release,
|
|
|
+ .dma_start = mmci_dmae_start,
|
|
|
+ .dma_finalize = mmci_dmae_finalize,
|
|
|
+ .dma_error = mmci_dmae_error,
|
|
|
+};
|
|
|
|
|
|
-static inline void mmci_dma_release(struct mmci_host *host)
|
|
|
+void mmci_variant_init(struct mmci_host *host)
|
|
|
{
|
|
|
+ host->ops = &mmci_variant_ops;
|
|
|
}
|
|
|
+#endif
|
|
|
|
|
|
-static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
{
|
|
|
-}
|
|
|
+ struct mmci_host *host = mmc_priv(mmc);
|
|
|
+ struct mmc_data *data = mrq->data;
|
|
|
|
|
|
-static inline void mmci_dma_finalize(struct mmci_host *host,
|
|
|
- struct mmc_data *data)
|
|
|
-{
|
|
|
-}
|
|
|
+ if (!data)
|
|
|
+ return;
|
|
|
|
|
|
-static inline void mmci_dma_data_error(struct mmci_host *host)
|
|
|
-{
|
|
|
+ WARN_ON(data->host_cookie);
|
|
|
+
|
|
|
+ if (mmci_validate_data(host, data))
|
|
|
+ return;
|
|
|
+
|
|
|
+ mmci_prep_data(host, data, true);
|
|
|
}
|
|
|
|
|
|
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
|
|
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
+ int err)
|
|
|
{
|
|
|
- return -ENOSYS;
|
|
|
-}
|
|
|
+ struct mmci_host *host = mmc_priv(mmc);
|
|
|
+ struct mmc_data *data = mrq->data;
|
|
|
|
|
|
-#define mmci_pre_request NULL
|
|
|
-#define mmci_post_request NULL
|
|
|
+ if (!data || !data->host_cookie)
|
|
|
+ return;
|
|
|
|
|
|
-#endif
|
|
|
+ mmci_unprep_data(host, data, err);
|
|
|
+}
|
|
|
|
|
|
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
{
|
|
@@ -793,11 +1020,11 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
BUG_ON(1 << blksz_bits != data->blksz);
|
|
|
|
|
|
if (variant->blksz_datactrl16)
|
|
|
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
|
|
|
+ datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
|
|
|
else if (variant->blksz_datactrl4)
|
|
|
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
|
|
|
+ datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
|
|
|
else
|
|
|
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
|
|
|
+ datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
|
|
|
|
|
|
if (data->flags & MMC_DATA_READ)
|
|
|
datactrl |= MCI_DPSM_DIRECTION;
|
|
@@ -831,7 +1058,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
* Attempt to use DMA operation mode, if this
|
|
|
* should fail, fall back to PIO mode
|
|
|
*/
|
|
|
- if (!mmci_dma_start_data(host, datactrl))
|
|
|
+ if (!mmci_dma_start(host, datactrl))
|
|
|
return;
|
|
|
|
|
|
/* IRQ mode, map the SG list for CPU reading/writing */
|
|
@@ -868,16 +1095,19 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|
|
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
|
|
|
cmd->opcode, cmd->arg, cmd->flags);
|
|
|
|
|
|
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
|
|
|
+ if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
|
|
|
writel(0, base + MMCICOMMAND);
|
|
|
mmci_reg_delay(host);
|
|
|
}
|
|
|
|
|
|
- c |= cmd->opcode | MCI_CPSM_ENABLE;
|
|
|
+ c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
|
|
|
if (cmd->flags & MMC_RSP_PRESENT) {
|
|
|
if (cmd->flags & MMC_RSP_136)
|
|
|
- c |= MCI_CPSM_LONGRSP;
|
|
|
- c |= MCI_CPSM_RESPONSE;
|
|
|
+ c |= host->variant->cmdreg_lrsp_crc;
|
|
|
+ else if (cmd->flags & MMC_RSP_CRC)
|
|
|
+ c |= host->variant->cmdreg_srsp_crc;
|
|
|
+ else
|
|
|
+ c |= host->variant->cmdreg_srsp;
|
|
|
}
|
|
|
if (/*interrupt*/0)
|
|
|
c |= MCI_CPSM_INTERRUPT;
|
|
@@ -895,21 +1125,22 @@ static void
|
|
|
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
unsigned int status)
|
|
|
{
|
|
|
+ unsigned int status_err;
|
|
|
+
|
|
|
/* Make sure we have data to handle */
|
|
|
if (!data)
|
|
|
return;
|
|
|
|
|
|
/* First check for errors */
|
|
|
- if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
|
|
|
- host->variant->start_err |
|
|
|
- MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
|
|
|
+ status_err = status & (host->variant->start_err |
|
|
|
+ MCI_DATACRCFAIL | MCI_DATATIMEOUT |
|
|
|
+ MCI_TXUNDERRUN | MCI_RXOVERRUN);
|
|
|
+
|
|
|
+ if (status_err) {
|
|
|
u32 remain, success;
|
|
|
|
|
|
/* Terminate the DMA transfer */
|
|
|
- if (dma_inprogress(host)) {
|
|
|
- mmci_dma_data_error(host);
|
|
|
- mmci_dma_unmap(host, data);
|
|
|
- }
|
|
|
+ mmci_dma_error(host);
|
|
|
|
|
|
/*
|
|
|
* Calculate how far we are into the transfer. Note that
|
|
@@ -918,22 +1149,26 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
* can be as much as a FIFO-worth of data ahead. This
|
|
|
* matters for FIFO overruns only.
|
|
|
*/
|
|
|
- remain = readl(host->base + MMCIDATACNT);
|
|
|
- success = data->blksz * data->blocks - remain;
|
|
|
+ if (!host->variant->datacnt_useless) {
|
|
|
+ remain = readl(host->base + MMCIDATACNT);
|
|
|
+ success = data->blksz * data->blocks - remain;
|
|
|
+ } else {
|
|
|
+ success = 0;
|
|
|
+ }
|
|
|
|
|
|
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
|
|
|
- status, success);
|
|
|
- if (status & MCI_DATACRCFAIL) {
|
|
|
+ status_err, success);
|
|
|
+ if (status_err & MCI_DATACRCFAIL) {
|
|
|
/* Last block was not successful */
|
|
|
success -= 1;
|
|
|
data->error = -EILSEQ;
|
|
|
- } else if (status & MCI_DATATIMEOUT) {
|
|
|
+ } else if (status_err & MCI_DATATIMEOUT) {
|
|
|
data->error = -ETIMEDOUT;
|
|
|
- } else if (status & MCI_STARTBITERR) {
|
|
|
+ } else if (status_err & MCI_STARTBITERR) {
|
|
|
data->error = -ECOMM;
|
|
|
- } else if (status & MCI_TXUNDERRUN) {
|
|
|
+ } else if (status_err & MCI_TXUNDERRUN) {
|
|
|
data->error = -EIO;
|
|
|
- } else if (status & MCI_RXOVERRUN) {
|
|
|
+ } else if (status_err & MCI_RXOVERRUN) {
|
|
|
if (success > host->variant->fifosize)
|
|
|
success -= host->variant->fifosize;
|
|
|
else
|
|
@@ -947,8 +1182,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
|
|
|
|
|
|
if (status & MCI_DATAEND || data->error) {
|
|
|
- if (dma_inprogress(host))
|
|
|
- mmci_dma_finalize(host, data);
|
|
|
+ mmci_dma_finalize(host, data);
|
|
|
+
|
|
|
mmci_stop_data(host);
|
|
|
|
|
|
if (!data->error)
|
|
@@ -1055,16 +1290,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
|
if ((!sbc && !cmd->data) || cmd->error) {
|
|
|
if (host->data) {
|
|
|
/* Terminate the DMA transfer */
|
|
|
- if (dma_inprogress(host)) {
|
|
|
- mmci_dma_data_error(host);
|
|
|
- mmci_dma_unmap(host, host->data);
|
|
|
- }
|
|
|
+ mmci_dma_error(host);
|
|
|
+
|
|
|
mmci_stop_data(host);
|
|
|
}
|
|
|
mmci_request_end(host, host->mrq);
|
|
|
} else if (sbc) {
|
|
|
mmci_start_command(host, host->mrq->cmd, 0);
|
|
|
- } else if (!(cmd->data->flags & MMC_DATA_READ)) {
|
|
|
+ } else if (!host->variant->datactrl_first &&
|
|
|
+ !(cmd->data->flags & MMC_DATA_READ)) {
|
|
|
mmci_start_data(host, cmd->data);
|
|
|
}
|
|
|
}
|
|
@@ -1264,7 +1498,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
|
|
|
if (status & host->mask1_reg)
|
|
|
mmci_pio_irq(irq, dev_id);
|
|
|
|
|
|
- status &= ~MCI_IRQ1MASK;
|
|
|
+ status &= ~host->variant->irq_pio_mask;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1328,7 +1562,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
if (mrq->data)
|
|
|
mmci_get_next_data(host, mrq->data);
|
|
|
|
|
|
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
|
|
|
+ if (mrq->data &&
|
|
|
+ (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
|
|
|
mmci_start_data(host, mrq->data);
|
|
|
|
|
|
if (mrq->sbc)
|
|
@@ -1438,8 +1673,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|
|
|
|
|
spin_lock_irqsave(&host->lock, flags);
|
|
|
|
|
|
- mmci_set_clkreg(host, ios->clock);
|
|
|
- mmci_write_pwrreg(host, pwr);
|
|
|
+ if (host->ops && host->ops->set_clkreg)
|
|
|
+ host->ops->set_clkreg(host, ios->clock);
|
|
|
+ else
|
|
|
+ mmci_set_clkreg(host, ios->clock);
|
|
|
+
|
|
|
+ if (host->ops && host->ops->set_pwrreg)
|
|
|
+ host->ops->set_pwrreg(host, pwr);
|
|
|
+ else
|
|
|
+ mmci_write_pwrreg(host, pwr);
|
|
|
+
|
|
|
mmci_reg_delay(host);
|
|
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
@@ -1518,6 +1761,12 @@ static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
|
|
|
host->pwr_reg_add |= MCI_ST_CMDDIREN;
|
|
|
if (of_get_property(np, "st,sig-pin-fbclk", NULL))
|
|
|
host->pwr_reg_add |= MCI_ST_FBCLKEN;
|
|
|
+ if (of_get_property(np, "st,sig-dir", NULL))
|
|
|
+ host->pwr_reg_add |= MCI_STM32_DIRPOL;
|
|
|
+ if (of_get_property(np, "st,neg-edge", NULL))
|
|
|
+ host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
|
|
|
+ if (of_get_property(np, "st,use-ckin", NULL))
|
|
|
+ host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
|
|
|
|
|
|
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
|
|
|
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
|
|
@@ -1644,6 +1893,8 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
*/
|
|
|
if (variant->st_clkdiv)
|
|
|
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
|
|
|
+ else if (variant->stm32_clkdiv)
|
|
|
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
|
|
|
else if (variant->explicit_mclk_control)
|
|
|
mmc->f_min = clk_round_rate(host->clk, 100000);
|
|
|
else
|
|
@@ -1665,6 +1916,12 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
|
|
|
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
|
|
|
|
|
|
+ host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
|
|
|
+ if (IS_ERR(host->rst)) {
|
|
|
+ ret = PTR_ERR(host->rst);
|
|
|
+ goto clk_disable;
|
|
|
+ }
|
|
|
+
|
|
|
/* Get regulators and the supported OCR mask */
|
|
|
ret = mmc_regulator_get_supply(mmc);
|
|
|
if (ret)
|
|
@@ -1675,13 +1932,6 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
else if (plat->ocr_mask)
|
|
|
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
|
|
|
|
|
|
- /* DT takes precedence over platform data. */
|
|
|
- if (!np) {
|
|
|
- if (!plat->cd_invert)
|
|
|
- mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
|
|
|
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
|
|
- }
|
|
|
-
|
|
|
/* We support these capabilities. */
|
|
|
mmc->caps |= MMC_CAP_CMD23;
|
|
|
|
|
@@ -1727,13 +1977,13 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
/*
|
|
|
* Block size can be up to 2048 bytes, but must be a power of two.
|
|
|
*/
|
|
|
- mmc->max_blk_size = 1 << 11;
|
|
|
+ mmc->max_blk_size = 1 << variant->datactrl_blocksz;
|
|
|
|
|
|
/*
|
|
|
* Limit the number of blocks transferred so that we don't overflow
|
|
|
* the maximum request size.
|
|
|
*/
|
|
|
- mmc->max_blk_count = mmc->max_req_size >> 11;
|
|
|
+ mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
|
|
|
|
|
|
spin_lock_init(&host->lock);
|
|
|
|
|
@@ -1749,30 +1999,16 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
* - not using DT but using a descriptor table, or
|
|
|
* - using a table of descriptors ALONGSIDE DT, or
|
|
|
* look up these descriptors named "cd" and "wp" right here, fail
|
|
|
- * silently of these do not exist and proceed to try platform data
|
|
|
+ * silently of these do not exist
|
|
|
*/
|
|
|
if (!np) {
|
|
|
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
|
|
|
- if (ret < 0) {
|
|
|
- if (ret == -EPROBE_DEFER)
|
|
|
- goto clk_disable;
|
|
|
- else if (gpio_is_valid(plat->gpio_cd)) {
|
|
|
- ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
|
|
|
- if (ret)
|
|
|
- goto clk_disable;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (ret == -EPROBE_DEFER)
|
|
|
+ goto clk_disable;
|
|
|
|
|
|
ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
|
|
|
- if (ret < 0) {
|
|
|
- if (ret == -EPROBE_DEFER)
|
|
|
- goto clk_disable;
|
|
|
- else if (gpio_is_valid(plat->gpio_wp)) {
|
|
|
- ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
|
|
|
- if (ret)
|
|
|
- goto clk_disable;
|
|
|
- }
|
|
|
- }
|
|
|
+ if (ret == -EPROBE_DEFER)
|
|
|
+ goto clk_disable;
|
|
|
}
|
|
|
|
|
|
ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
|
|
@@ -1789,7 +2025,7 @@ static int mmci_probe(struct amba_device *dev,
|
|
|
goto clk_disable;
|
|
|
}
|
|
|
|
|
|
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
|
|
|
+ writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
|
|
|
|
|
|
amba_set_drvdata(dev, mmc);
|
|
|
|
|
@@ -1876,7 +2112,8 @@ static void mmci_restore(struct mmci_host *host)
|
|
|
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
|
|
|
writel(host->pwr_reg, host->base + MMCIPOWER);
|
|
|
}
|
|
|
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
|
|
|
+ writel(MCI_IRQENABLE | host->variant->start_err,
|
|
|
+ host->base + MMCIMASK0);
|
|
|
mmci_reg_delay(host);
|
|
|
|
|
|
spin_unlock_irqrestore(&host->lock, flags);
|
|
@@ -1971,6 +2208,11 @@ static const struct amba_id mmci_ids[] = {
|
|
|
.mask = 0x00ffffff,
|
|
|
.data = &variant_stm32,
|
|
|
},
|
|
|
+ {
|
|
|
+ .id = 0x10153180,
|
|
|
+ .mask = 0xf0ffffff,
|
|
|
+ .data = &variant_stm32_sdmmc,
|
|
|
+ },
|
|
|
/* Qualcomm variants */
|
|
|
{
|
|
|
.id = 0x00051180,
|