meson-gx-mmc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. /*
  2. * Amlogic SD/eMMC driver for the GX/S905 family SoCs
  3. *
  4. * Copyright (c) 2016 BayLibre, SAS.
  5. * Author: Kevin Hilman <khilman@baylibre.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. * The full GNU General Public License is included in this distribution
  19. * in the file called COPYING.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/init.h>
  24. #include <linux/device.h>
  25. #include <linux/of_device.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/ioport.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/mmc/host.h>
  31. #include <linux/mmc/mmc.h>
  32. #include <linux/mmc/sdio.h>
  33. #include <linux/mmc/slot-gpio.h>
  34. #include <linux/io.h>
  35. #include <linux/clk.h>
  36. #include <linux/clk-provider.h>
  37. #include <linux/regulator/consumer.h>
  38. #define DRIVER_NAME "meson-gx-mmc"
  39. #define SD_EMMC_CLOCK 0x0
  40. #define CLK_DIV_SHIFT 0
  41. #define CLK_DIV_WIDTH 6
  42. #define CLK_DIV_MASK 0x3f
  43. #define CLK_DIV_MAX 63
  44. #define CLK_SRC_SHIFT 6
  45. #define CLK_SRC_WIDTH 2
  46. #define CLK_SRC_MASK 0x3
  47. #define CLK_SRC_XTAL 0 /* external crystal */
  48. #define CLK_SRC_XTAL_RATE 24000000
  49. #define CLK_SRC_PLL 1 /* FCLK_DIV2 */
  50. #define CLK_SRC_PLL_RATE 1000000000
  51. #define CLK_PHASE_SHIFT 8
  52. #define CLK_PHASE_MASK 0x3
  53. #define CLK_PHASE_0 0
  54. #define CLK_PHASE_90 1
  55. #define CLK_PHASE_180 2
  56. #define CLK_PHASE_270 3
  57. #define CLK_ALWAYS_ON BIT(24)
  58. #define SD_EMMC_DElAY 0x4
  59. #define SD_EMMC_ADJUST 0x8
  60. #define SD_EMMC_CALOUT 0x10
  61. #define SD_EMMC_START 0x40
  62. #define START_DESC_INIT BIT(0)
  63. #define START_DESC_BUSY BIT(1)
  64. #define START_DESC_ADDR_SHIFT 2
  65. #define START_DESC_ADDR_MASK (~0x3)
  66. #define SD_EMMC_CFG 0x44
  67. #define CFG_BUS_WIDTH_SHIFT 0
  68. #define CFG_BUS_WIDTH_MASK 0x3
  69. #define CFG_BUS_WIDTH_1 0x0
  70. #define CFG_BUS_WIDTH_4 0x1
  71. #define CFG_BUS_WIDTH_8 0x2
  72. #define CFG_DDR BIT(2)
  73. #define CFG_BLK_LEN_SHIFT 4
  74. #define CFG_BLK_LEN_MASK 0xf
  75. #define CFG_RESP_TIMEOUT_SHIFT 8
  76. #define CFG_RESP_TIMEOUT_MASK 0xf
  77. #define CFG_RC_CC_SHIFT 12
  78. #define CFG_RC_CC_MASK 0xf
  79. #define CFG_STOP_CLOCK BIT(22)
  80. #define CFG_CLK_ALWAYS_ON BIT(18)
  81. #define CFG_AUTO_CLK BIT(23)
  82. #define SD_EMMC_STATUS 0x48
  83. #define STATUS_BUSY BIT(31)
  84. #define SD_EMMC_IRQ_EN 0x4c
  85. #define IRQ_EN_MASK 0x3fff
  86. #define IRQ_RXD_ERR_SHIFT 0
  87. #define IRQ_RXD_ERR_MASK 0xff
  88. #define IRQ_TXD_ERR BIT(8)
  89. #define IRQ_DESC_ERR BIT(9)
  90. #define IRQ_RESP_ERR BIT(10)
  91. #define IRQ_RESP_TIMEOUT BIT(11)
  92. #define IRQ_DESC_TIMEOUT BIT(12)
  93. #define IRQ_END_OF_CHAIN BIT(13)
  94. #define IRQ_RESP_STATUS BIT(14)
  95. #define IRQ_SDIO BIT(15)
  96. #define SD_EMMC_CMD_CFG 0x50
  97. #define SD_EMMC_CMD_ARG 0x54
  98. #define SD_EMMC_CMD_DAT 0x58
  99. #define SD_EMMC_CMD_RSP 0x5c
  100. #define SD_EMMC_CMD_RSP1 0x60
  101. #define SD_EMMC_CMD_RSP2 0x64
  102. #define SD_EMMC_CMD_RSP3 0x68
  103. #define SD_EMMC_RXD 0x94
  104. #define SD_EMMC_TXD 0x94
  105. #define SD_EMMC_LAST_REG SD_EMMC_TXD
  106. #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
  107. #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
  108. #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
  109. #define MUX_CLK_NUM_PARENTS 2
  110. struct meson_host {
  111. struct device *dev;
  112. struct mmc_host *mmc;
  113. struct mmc_request *mrq;
  114. struct mmc_command *cmd;
  115. spinlock_t lock;
  116. void __iomem *regs;
  117. int irq;
  118. u32 ocr_mask;
  119. struct clk *core_clk;
  120. struct clk_mux mux;
  121. struct clk *mux_clk;
  122. struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
  123. unsigned long mux_parent_rate[MUX_CLK_NUM_PARENTS];
  124. struct clk_divider cfg_div;
  125. struct clk *cfg_div_clk;
  126. unsigned int bounce_buf_size;
  127. void *bounce_buf;
  128. dma_addr_t bounce_dma_addr;
  129. bool vqmmc_enabled;
  130. };
  131. struct sd_emmc_desc {
  132. u32 cmd_cfg;
  133. u32 cmd_arg;
  134. u32 cmd_data;
  135. u32 cmd_resp;
  136. };
  137. #define CMD_CFG_LENGTH_SHIFT 0
  138. #define CMD_CFG_LENGTH_MASK 0x1ff
  139. #define CMD_CFG_BLOCK_MODE BIT(9)
  140. #define CMD_CFG_R1B BIT(10)
  141. #define CMD_CFG_END_OF_CHAIN BIT(11)
  142. #define CMD_CFG_TIMEOUT_SHIFT 12
  143. #define CMD_CFG_TIMEOUT_MASK 0xf
  144. #define CMD_CFG_NO_RESP BIT(16)
  145. #define CMD_CFG_NO_CMD BIT(17)
  146. #define CMD_CFG_DATA_IO BIT(18)
  147. #define CMD_CFG_DATA_WR BIT(19)
  148. #define CMD_CFG_RESP_NOCRC BIT(20)
  149. #define CMD_CFG_RESP_128 BIT(21)
  150. #define CMD_CFG_RESP_NUM BIT(22)
  151. #define CMD_CFG_DATA_NUM BIT(23)
  152. #define CMD_CFG_CMD_INDEX_SHIFT 24
  153. #define CMD_CFG_CMD_INDEX_MASK 0x3f
  154. #define CMD_CFG_ERROR BIT(30)
  155. #define CMD_CFG_OWNER BIT(31)
  156. #define CMD_DATA_MASK (~0x3)
  157. #define CMD_DATA_BIG_ENDIAN BIT(1)
  158. #define CMD_DATA_SRAM BIT(0)
  159. #define CMD_RESP_MASK (~0x1)
  160. #define CMD_RESP_SRAM BIT(0)
  161. static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
  162. {
  163. struct mmc_host *mmc = host->mmc;
  164. int ret = 0;
  165. u32 cfg;
  166. if (clk_rate) {
  167. if (WARN_ON(clk_rate > mmc->f_max))
  168. clk_rate = mmc->f_max;
  169. else if (WARN_ON(clk_rate < mmc->f_min))
  170. clk_rate = mmc->f_min;
  171. }
  172. if (clk_rate == mmc->actual_clock)
  173. return 0;
  174. /* stop clock */
  175. cfg = readl(host->regs + SD_EMMC_CFG);
  176. if (!(cfg & CFG_STOP_CLOCK)) {
  177. cfg |= CFG_STOP_CLOCK;
  178. writel(cfg, host->regs + SD_EMMC_CFG);
  179. }
  180. dev_dbg(host->dev, "change clock rate %u -> %lu\n",
  181. mmc->actual_clock, clk_rate);
  182. if (clk_rate == 0) {
  183. mmc->actual_clock = 0;
  184. return 0;
  185. }
  186. ret = clk_set_rate(host->cfg_div_clk, clk_rate);
  187. if (ret)
  188. dev_warn(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
  189. clk_rate, ret);
  190. else if (clk_rate && clk_rate != clk_get_rate(host->cfg_div_clk))
  191. dev_warn(host->dev, "divider requested rate %lu != actual rate %lu: ret=%d\n",
  192. clk_rate, clk_get_rate(host->cfg_div_clk), ret);
  193. else
  194. mmc->actual_clock = clk_rate;
  195. /* (re)start clock, if non-zero */
  196. if (!ret && clk_rate) {
  197. cfg = readl(host->regs + SD_EMMC_CFG);
  198. cfg &= ~CFG_STOP_CLOCK;
  199. writel(cfg, host->regs + SD_EMMC_CFG);
  200. }
  201. return ret;
  202. }
  203. /*
  204. * The SD/eMMC IP block has an internal mux and divider used for
  205. * generating the MMC clock. Use the clock framework to create and
  206. * manage these clocks.
  207. */
  208. static int meson_mmc_clk_init(struct meson_host *host)
  209. {
  210. struct clk_init_data init;
  211. char clk_name[32];
  212. int i, ret = 0;
  213. const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
  214. unsigned int mux_parent_count = 0;
  215. const char *clk_div_parents[1];
  216. unsigned int f_min = UINT_MAX;
  217. u32 clk_reg, cfg;
  218. /* get the mux parents */
  219. for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
  220. char name[16];
  221. snprintf(name, sizeof(name), "clkin%d", i);
  222. host->mux_parent[i] = devm_clk_get(host->dev, name);
  223. if (IS_ERR(host->mux_parent[i])) {
  224. ret = PTR_ERR(host->mux_parent[i]);
  225. if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER)
  226. dev_err(host->dev, "Missing clock %s\n", name);
  227. host->mux_parent[i] = NULL;
  228. return ret;
  229. }
  230. host->mux_parent_rate[i] = clk_get_rate(host->mux_parent[i]);
  231. mux_parent_names[i] = __clk_get_name(host->mux_parent[i]);
  232. mux_parent_count++;
  233. if (host->mux_parent_rate[i] < f_min)
  234. f_min = host->mux_parent_rate[i];
  235. }
  236. /* cacluate f_min based on input clocks, and max divider value */
  237. if (f_min != UINT_MAX)
  238. f_min = DIV_ROUND_UP(CLK_SRC_XTAL_RATE, CLK_DIV_MAX);
  239. else
  240. f_min = 4000000; /* default min: 400 MHz */
  241. host->mmc->f_min = f_min;
  242. /* create the mux */
  243. snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
  244. init.name = clk_name;
  245. init.ops = &clk_mux_ops;
  246. init.flags = 0;
  247. init.parent_names = mux_parent_names;
  248. init.num_parents = mux_parent_count;
  249. host->mux.reg = host->regs + SD_EMMC_CLOCK;
  250. host->mux.shift = CLK_SRC_SHIFT;
  251. host->mux.mask = CLK_SRC_MASK;
  252. host->mux.flags = 0;
  253. host->mux.table = NULL;
  254. host->mux.hw.init = &init;
  255. host->mux_clk = devm_clk_register(host->dev, &host->mux.hw);
  256. if (WARN_ON(IS_ERR(host->mux_clk)))
  257. return PTR_ERR(host->mux_clk);
  258. /* create the divider */
  259. snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
  260. init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL);
  261. init.ops = &clk_divider_ops;
  262. init.flags = CLK_SET_RATE_PARENT;
  263. clk_div_parents[0] = __clk_get_name(host->mux_clk);
  264. init.parent_names = clk_div_parents;
  265. init.num_parents = ARRAY_SIZE(clk_div_parents);
  266. host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
  267. host->cfg_div.shift = CLK_DIV_SHIFT;
  268. host->cfg_div.width = CLK_DIV_WIDTH;
  269. host->cfg_div.hw.init = &init;
  270. host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
  271. CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
  272. host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw);
  273. if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk)))
  274. return PTR_ERR(host->cfg_div_clk);
  275. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  276. clk_reg = 0;
  277. clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT;
  278. clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT;
  279. clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT;
  280. clk_reg &= ~CLK_ALWAYS_ON;
  281. writel(clk_reg, host->regs + SD_EMMC_CLOCK);
  282. /* Ensure clock starts in "auto" mode, not "always on" */
  283. cfg = readl(host->regs + SD_EMMC_CFG);
  284. cfg &= ~CFG_CLK_ALWAYS_ON;
  285. cfg |= CFG_AUTO_CLK;
  286. writel(cfg, host->regs + SD_EMMC_CFG);
  287. ret = clk_prepare_enable(host->cfg_div_clk);
  288. if (!ret)
  289. ret = meson_mmc_clk_set(host, f_min);
  290. if (!ret)
  291. clk_disable_unprepare(host->cfg_div_clk);
  292. return ret;
  293. }
  294. static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  295. {
  296. struct meson_host *host = mmc_priv(mmc);
  297. u32 bus_width;
  298. u32 val, orig;
  299. /*
  300. * GPIO regulator, only controls switching between 1v8 and
  301. * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
  302. */
  303. switch (ios->power_mode) {
  304. case MMC_POWER_OFF:
  305. if (!IS_ERR(mmc->supply.vmmc))
  306. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  307. if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
  308. regulator_disable(mmc->supply.vqmmc);
  309. host->vqmmc_enabled = false;
  310. }
  311. break;
  312. case MMC_POWER_UP:
  313. if (!IS_ERR(mmc->supply.vmmc))
  314. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  315. break;
  316. case MMC_POWER_ON:
  317. if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
  318. int ret = regulator_enable(mmc->supply.vqmmc);
  319. if (ret < 0)
  320. dev_err(mmc_dev(mmc),
  321. "failed to enable vqmmc regulator\n");
  322. else
  323. host->vqmmc_enabled = true;
  324. }
  325. break;
  326. }
  327. meson_mmc_clk_set(host, ios->clock);
  328. /* Bus width */
  329. val = readl(host->regs + SD_EMMC_CFG);
  330. switch (ios->bus_width) {
  331. case MMC_BUS_WIDTH_1:
  332. bus_width = CFG_BUS_WIDTH_1;
  333. break;
  334. case MMC_BUS_WIDTH_4:
  335. bus_width = CFG_BUS_WIDTH_4;
  336. break;
  337. case MMC_BUS_WIDTH_8:
  338. bus_width = CFG_BUS_WIDTH_8;
  339. break;
  340. default:
  341. dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
  342. ios->bus_width);
  343. bus_width = CFG_BUS_WIDTH_4;
  344. return;
  345. }
  346. val = readl(host->regs + SD_EMMC_CFG);
  347. orig = val;
  348. val &= ~(CFG_BUS_WIDTH_MASK << CFG_BUS_WIDTH_SHIFT);
  349. val |= bus_width << CFG_BUS_WIDTH_SHIFT;
  350. val &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
  351. val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT;
  352. val &= ~(CFG_RESP_TIMEOUT_MASK << CFG_RESP_TIMEOUT_SHIFT);
  353. val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT;
  354. val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
  355. val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
  356. writel(val, host->regs + SD_EMMC_CFG);
  357. if (val != orig)
  358. dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
  359. __func__, orig, val);
  360. }
  361. static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq)
  362. {
  363. struct meson_host *host = mmc_priv(mmc);
  364. WARN_ON(host->mrq != mrq);
  365. host->mrq = NULL;
  366. host->cmd = NULL;
  367. mmc_request_done(host->mmc, mrq);
  368. return 0;
  369. }
  370. static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
  371. {
  372. struct meson_host *host = mmc_priv(mmc);
  373. struct sd_emmc_desc *desc, desc_tmp;
  374. u32 cfg;
  375. u8 blk_len, cmd_cfg_timeout;
  376. unsigned int xfer_bytes = 0;
  377. /* Setup descriptors */
  378. dma_rmb();
  379. desc = &desc_tmp;
  380. memset(desc, 0, sizeof(struct sd_emmc_desc));
  381. desc->cmd_cfg |= (cmd->opcode & CMD_CFG_CMD_INDEX_MASK) <<
  382. CMD_CFG_CMD_INDEX_SHIFT;
  383. desc->cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
  384. desc->cmd_arg = cmd->arg;
  385. /* Response */
  386. if (cmd->flags & MMC_RSP_PRESENT) {
  387. desc->cmd_cfg &= ~CMD_CFG_NO_RESP;
  388. if (cmd->flags & MMC_RSP_136)
  389. desc->cmd_cfg |= CMD_CFG_RESP_128;
  390. desc->cmd_cfg |= CMD_CFG_RESP_NUM;
  391. desc->cmd_resp = 0;
  392. if (!(cmd->flags & MMC_RSP_CRC))
  393. desc->cmd_cfg |= CMD_CFG_RESP_NOCRC;
  394. if (cmd->flags & MMC_RSP_BUSY)
  395. desc->cmd_cfg |= CMD_CFG_R1B;
  396. } else {
  397. desc->cmd_cfg |= CMD_CFG_NO_RESP;
  398. }
  399. /* data? */
  400. if (cmd->data) {
  401. desc->cmd_cfg |= CMD_CFG_DATA_IO;
  402. if (cmd->data->blocks > 1) {
  403. desc->cmd_cfg |= CMD_CFG_BLOCK_MODE;
  404. desc->cmd_cfg |=
  405. (cmd->data->blocks & CMD_CFG_LENGTH_MASK) <<
  406. CMD_CFG_LENGTH_SHIFT;
  407. /* check if block-size matches, if not update */
  408. cfg = readl(host->regs + SD_EMMC_CFG);
  409. blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
  410. blk_len >>= CFG_BLK_LEN_SHIFT;
  411. if (blk_len != ilog2(cmd->data->blksz)) {
  412. dev_warn(host->dev, "%s: update blk_len %d -> %d\n",
  413. __func__, blk_len,
  414. ilog2(cmd->data->blksz));
  415. blk_len = ilog2(cmd->data->blksz);
  416. cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
  417. cfg |= blk_len << CFG_BLK_LEN_SHIFT;
  418. writel(cfg, host->regs + SD_EMMC_CFG);
  419. }
  420. } else {
  421. desc->cmd_cfg &= ~CMD_CFG_BLOCK_MODE;
  422. desc->cmd_cfg |=
  423. (cmd->data->blksz & CMD_CFG_LENGTH_MASK) <<
  424. CMD_CFG_LENGTH_SHIFT;
  425. }
  426. cmd->data->bytes_xfered = 0;
  427. xfer_bytes = cmd->data->blksz * cmd->data->blocks;
  428. if (cmd->data->flags & MMC_DATA_WRITE) {
  429. desc->cmd_cfg |= CMD_CFG_DATA_WR;
  430. WARN_ON(xfer_bytes > host->bounce_buf_size);
  431. sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len,
  432. host->bounce_buf, xfer_bytes);
  433. cmd->data->bytes_xfered = xfer_bytes;
  434. dma_wmb();
  435. } else {
  436. desc->cmd_cfg &= ~CMD_CFG_DATA_WR;
  437. }
  438. if (xfer_bytes > 0) {
  439. desc->cmd_cfg &= ~CMD_CFG_DATA_NUM;
  440. desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
  441. } else {
  442. /* write data to data_addr */
  443. desc->cmd_cfg |= CMD_CFG_DATA_NUM;
  444. desc->cmd_data = 0;
  445. }
  446. cmd_cfg_timeout = 12;
  447. } else {
  448. desc->cmd_cfg &= ~CMD_CFG_DATA_IO;
  449. cmd_cfg_timeout = 10;
  450. }
  451. desc->cmd_cfg |= (cmd_cfg_timeout & CMD_CFG_TIMEOUT_MASK) <<
  452. CMD_CFG_TIMEOUT_SHIFT;
  453. host->cmd = cmd;
  454. /* Last descriptor */
  455. desc->cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  456. writel(desc->cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
  457. writel(desc->cmd_data, host->regs + SD_EMMC_CMD_DAT);
  458. writel(desc->cmd_resp, host->regs + SD_EMMC_CMD_RSP);
  459. wmb(); /* ensure descriptor is written before kicked */
  460. writel(desc->cmd_arg, host->regs + SD_EMMC_CMD_ARG);
  461. }
  462. static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  463. {
  464. struct meson_host *host = mmc_priv(mmc);
  465. WARN_ON(host->mrq != NULL);
  466. /* Stop execution */
  467. writel(0, host->regs + SD_EMMC_START);
  468. /* clear, ack, enable all interrupts */
  469. writel(0, host->regs + SD_EMMC_IRQ_EN);
  470. writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
  471. writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
  472. host->mrq = mrq;
  473. if (mrq->sbc)
  474. meson_mmc_start_cmd(mmc, mrq->sbc);
  475. else
  476. meson_mmc_start_cmd(mmc, mrq->cmd);
  477. }
  478. static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
  479. {
  480. struct meson_host *host = mmc_priv(mmc);
  481. if (cmd->flags & MMC_RSP_136) {
  482. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
  483. cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
  484. cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
  485. cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
  486. } else if (cmd->flags & MMC_RSP_PRESENT) {
  487. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
  488. }
  489. return 0;
  490. }
  491. static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
  492. {
  493. struct meson_host *host = dev_id;
  494. struct mmc_request *mrq;
  495. struct mmc_command *cmd;
  496. u32 irq_en, status, raw_status;
  497. irqreturn_t ret = IRQ_HANDLED;
  498. if (WARN_ON(!host))
  499. return IRQ_NONE;
  500. cmd = host->cmd;
  501. mrq = host->mrq;
  502. if (WARN_ON(!mrq))
  503. return IRQ_NONE;
  504. if (WARN_ON(!cmd))
  505. return IRQ_NONE;
  506. spin_lock(&host->lock);
  507. irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
  508. raw_status = readl(host->regs + SD_EMMC_STATUS);
  509. status = raw_status & irq_en;
  510. if (!status) {
  511. dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
  512. raw_status, irq_en);
  513. ret = IRQ_NONE;
  514. goto out;
  515. }
  516. cmd->error = 0;
  517. if (status & IRQ_RXD_ERR_MASK) {
  518. dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
  519. cmd->error = -EILSEQ;
  520. }
  521. if (status & IRQ_TXD_ERR) {
  522. dev_dbg(host->dev, "Unhandled IRQ: TXD error\n");
  523. cmd->error = -EILSEQ;
  524. }
  525. if (status & IRQ_DESC_ERR)
  526. dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n");
  527. if (status & IRQ_RESP_ERR) {
  528. dev_dbg(host->dev, "Unhandled IRQ: Response error\n");
  529. cmd->error = -EILSEQ;
  530. }
  531. if (status & IRQ_RESP_TIMEOUT) {
  532. dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n");
  533. cmd->error = -ETIMEDOUT;
  534. }
  535. if (status & IRQ_DESC_TIMEOUT) {
  536. dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n");
  537. cmd->error = -ETIMEDOUT;
  538. }
  539. if (status & IRQ_SDIO)
  540. dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
  541. if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS))
  542. ret = IRQ_WAKE_THREAD;
  543. else {
  544. dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
  545. status, cmd->opcode, cmd->arg,
  546. cmd->flags, mrq->stop ? 1 : 0);
  547. if (cmd->data) {
  548. struct mmc_data *data = cmd->data;
  549. dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
  550. data->blksz, data->blocks, data->flags,
  551. data->flags & MMC_DATA_WRITE ? "write" : "",
  552. data->flags & MMC_DATA_READ ? "read" : "");
  553. }
  554. }
  555. out:
  556. /* ack all (enabled) interrupts */
  557. writel(status, host->regs + SD_EMMC_STATUS);
  558. if (ret == IRQ_HANDLED) {
  559. meson_mmc_read_resp(host->mmc, cmd);
  560. meson_mmc_request_done(host->mmc, cmd->mrq);
  561. }
  562. spin_unlock(&host->lock);
  563. return ret;
  564. }
  565. static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
  566. {
  567. struct meson_host *host = dev_id;
  568. struct mmc_request *mrq = host->mrq;
  569. struct mmc_command *cmd = host->cmd;
  570. struct mmc_data *data;
  571. unsigned int xfer_bytes;
  572. int ret = IRQ_HANDLED;
  573. if (WARN_ON(!mrq))
  574. return IRQ_NONE;
  575. if (WARN_ON(!cmd))
  576. return IRQ_NONE;
  577. data = cmd->data;
  578. if (data) {
  579. xfer_bytes = data->blksz * data->blocks;
  580. if (data->flags & MMC_DATA_READ) {
  581. WARN_ON(xfer_bytes > host->bounce_buf_size);
  582. sg_copy_from_buffer(data->sg, data->sg_len,
  583. host->bounce_buf, xfer_bytes);
  584. data->bytes_xfered = xfer_bytes;
  585. }
  586. }
  587. meson_mmc_read_resp(host->mmc, cmd);
  588. if (!data || !data->stop || mrq->sbc)
  589. meson_mmc_request_done(host->mmc, mrq);
  590. else
  591. meson_mmc_start_cmd(host->mmc, data->stop);
  592. return ret;
  593. }
  594. /*
  595. * NOTE: we only need this until the GPIO/pinctrl driver can handle
  596. * interrupts. For now, the MMC core will use this for polling.
  597. */
  598. static int meson_mmc_get_cd(struct mmc_host *mmc)
  599. {
  600. int status = mmc_gpio_get_cd(mmc);
  601. if (status == -ENOSYS)
  602. return 1; /* assume present */
  603. return status;
  604. }
  605. static const struct mmc_host_ops meson_mmc_ops = {
  606. .request = meson_mmc_request,
  607. .set_ios = meson_mmc_set_ios,
  608. .get_cd = meson_mmc_get_cd,
  609. };
  610. static int meson_mmc_probe(struct platform_device *pdev)
  611. {
  612. struct resource *res;
  613. struct meson_host *host;
  614. struct mmc_host *mmc;
  615. int ret;
  616. mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
  617. if (!mmc)
  618. return -ENOMEM;
  619. host = mmc_priv(mmc);
  620. host->mmc = mmc;
  621. host->dev = &pdev->dev;
  622. dev_set_drvdata(&pdev->dev, host);
  623. spin_lock_init(&host->lock);
  624. /* Get regulators and the supported OCR mask */
  625. host->vqmmc_enabled = false;
  626. ret = mmc_regulator_get_supply(mmc);
  627. if (ret == -EPROBE_DEFER)
  628. goto free_host;
  629. ret = mmc_of_parse(mmc);
  630. if (ret) {
  631. dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
  632. goto free_host;
  633. }
  634. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  635. host->regs = devm_ioremap_resource(&pdev->dev, res);
  636. if (IS_ERR(host->regs)) {
  637. ret = PTR_ERR(host->regs);
  638. goto free_host;
  639. }
  640. host->irq = platform_get_irq(pdev, 0);
  641. if (host->irq == 0) {
  642. dev_err(&pdev->dev, "failed to get interrupt resource.\n");
  643. ret = -EINVAL;
  644. goto free_host;
  645. }
  646. host->core_clk = devm_clk_get(&pdev->dev, "core");
  647. if (IS_ERR(host->core_clk)) {
  648. ret = PTR_ERR(host->core_clk);
  649. goto free_host;
  650. }
  651. ret = clk_prepare_enable(host->core_clk);
  652. if (ret)
  653. goto free_host;
  654. ret = meson_mmc_clk_init(host);
  655. if (ret)
  656. goto free_host;
  657. /* Stop execution */
  658. writel(0, host->regs + SD_EMMC_START);
  659. /* clear, ack, enable all interrupts */
  660. writel(0, host->regs + SD_EMMC_IRQ_EN);
  661. writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
  662. ret = devm_request_threaded_irq(&pdev->dev, host->irq,
  663. meson_mmc_irq, meson_mmc_irq_thread,
  664. IRQF_SHARED, DRIVER_NAME, host);
  665. if (ret)
  666. goto free_host;
  667. /* data bounce buffer */
  668. host->bounce_buf_size = SZ_512K;
  669. host->bounce_buf =
  670. dma_alloc_coherent(host->dev, host->bounce_buf_size,
  671. &host->bounce_dma_addr, GFP_KERNEL);
  672. if (host->bounce_buf == NULL) {
  673. dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
  674. ret = -ENOMEM;
  675. goto free_host;
  676. }
  677. mmc->ops = &meson_mmc_ops;
  678. mmc_add_host(mmc);
  679. return 0;
  680. free_host:
  681. clk_disable_unprepare(host->cfg_div_clk);
  682. clk_disable_unprepare(host->core_clk);
  683. mmc_free_host(mmc);
  684. return ret;
  685. }
  686. static int meson_mmc_remove(struct platform_device *pdev)
  687. {
  688. struct meson_host *host = dev_get_drvdata(&pdev->dev);
  689. if (WARN_ON(!host))
  690. return 0;
  691. if (host->bounce_buf)
  692. dma_free_coherent(host->dev, host->bounce_buf_size,
  693. host->bounce_buf, host->bounce_dma_addr);
  694. clk_disable_unprepare(host->cfg_div_clk);
  695. clk_disable_unprepare(host->core_clk);
  696. mmc_free_host(host->mmc);
  697. return 0;
  698. }
  699. static const struct of_device_id meson_mmc_of_match[] = {
  700. { .compatible = "amlogic,meson-gx-mmc", },
  701. { .compatible = "amlogic,meson-gxbb-mmc", },
  702. { .compatible = "amlogic,meson-gxl-mmc", },
  703. { .compatible = "amlogic,meson-gxm-mmc", },
  704. {}
  705. };
  706. MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
  707. static struct platform_driver meson_mmc_driver = {
  708. .probe = meson_mmc_probe,
  709. .remove = meson_mmc_remove,
  710. .driver = {
  711. .name = DRIVER_NAME,
  712. .of_match_table = of_match_ptr(meson_mmc_of_match),
  713. },
  714. };
  715. module_platform_driver(meson_mmc_driver);
  716. MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
  717. MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
  718. MODULE_LICENSE("GPL v2");