meson-gx-mmc.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * Amlogic SD/eMMC driver for the GX/S905 family SoCs
  3. *
  4. * Copyright (c) 2016 BayLibre, SAS.
  5. * Author: Kevin Hilman <khilman@baylibre.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. * The full GNU General Public License is included in this distribution
  19. * in the file called COPYING.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/init.h>
  24. #include <linux/device.h>
  25. #include <linux/of_device.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/ioport.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/mmc/host.h>
  31. #include <linux/mmc/mmc.h>
  32. #include <linux/mmc/sdio.h>
  33. #include <linux/mmc/slot-gpio.h>
  34. #include <linux/io.h>
  35. #include <linux/clk.h>
  36. #include <linux/clk-provider.h>
  37. #include <linux/regulator/consumer.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/bitfield.h>
  40. #define DRIVER_NAME "meson-gx-mmc"
  41. #define SD_EMMC_CLOCK 0x0
  42. #define CLK_DIV_MASK GENMASK(5, 0)
  43. #define CLK_DIV_MAX 63
  44. #define CLK_SRC_MASK GENMASK(7, 6)
  45. #define CLK_SRC_XTAL 0 /* external crystal */
  46. #define CLK_SRC_XTAL_RATE 24000000
  47. #define CLK_SRC_PLL 1 /* FCLK_DIV2 */
  48. #define CLK_SRC_PLL_RATE 1000000000
  49. #define CLK_CORE_PHASE_MASK GENMASK(9, 8)
  50. #define CLK_TX_PHASE_MASK GENMASK(11, 10)
  51. #define CLK_RX_PHASE_MASK GENMASK(13, 12)
  52. #define CLK_PHASE_0 0
  53. #define CLK_PHASE_90 1
  54. #define CLK_PHASE_180 2
  55. #define CLK_PHASE_270 3
  56. #define CLK_ALWAYS_ON BIT(24)
  57. #define SD_EMMC_DElAY 0x4
  58. #define SD_EMMC_ADJUST 0x8
  59. #define SD_EMMC_CALOUT 0x10
  60. #define SD_EMMC_START 0x40
  61. #define START_DESC_INIT BIT(0)
  62. #define START_DESC_BUSY BIT(1)
  63. #define START_DESC_ADDR_MASK GENMASK(31, 2)
  64. #define SD_EMMC_CFG 0x44
  65. #define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
  66. #define CFG_BUS_WIDTH_1 0x0
  67. #define CFG_BUS_WIDTH_4 0x1
  68. #define CFG_BUS_WIDTH_8 0x2
  69. #define CFG_DDR BIT(2)
  70. #define CFG_BLK_LEN_MASK GENMASK(7, 4)
  71. #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
  72. #define CFG_RC_CC_MASK GENMASK(15, 12)
  73. #define CFG_STOP_CLOCK BIT(22)
  74. #define CFG_CLK_ALWAYS_ON BIT(18)
  75. #define CFG_CHK_DS BIT(20)
  76. #define CFG_AUTO_CLK BIT(23)
  77. #define SD_EMMC_STATUS 0x48
  78. #define STATUS_BUSY BIT(31)
  79. #define SD_EMMC_IRQ_EN 0x4c
  80. #define IRQ_EN_MASK GENMASK(13, 0)
  81. #define IRQ_RXD_ERR_MASK GENMASK(7, 0)
  82. #define IRQ_TXD_ERR BIT(8)
  83. #define IRQ_DESC_ERR BIT(9)
  84. #define IRQ_RESP_ERR BIT(10)
  85. #define IRQ_RESP_TIMEOUT BIT(11)
  86. #define IRQ_DESC_TIMEOUT BIT(12)
  87. #define IRQ_END_OF_CHAIN BIT(13)
  88. #define IRQ_RESP_STATUS BIT(14)
  89. #define IRQ_SDIO BIT(15)
  90. #define SD_EMMC_CMD_CFG 0x50
  91. #define SD_EMMC_CMD_ARG 0x54
  92. #define SD_EMMC_CMD_DAT 0x58
  93. #define SD_EMMC_CMD_RSP 0x5c
  94. #define SD_EMMC_CMD_RSP1 0x60
  95. #define SD_EMMC_CMD_RSP2 0x64
  96. #define SD_EMMC_CMD_RSP3 0x68
  97. #define SD_EMMC_RXD 0x94
  98. #define SD_EMMC_TXD 0x94
  99. #define SD_EMMC_LAST_REG SD_EMMC_TXD
  100. #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
  101. #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
  102. #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
  103. #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
  104. #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
  105. #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
  106. #define SD_EMMC_PRE_REQ_DONE BIT(0)
  107. #define SD_EMMC_DESC_CHAIN_MODE BIT(1)
  108. #define MUX_CLK_NUM_PARENTS 2
  109. struct meson_tuning_params {
  110. u8 core_phase;
  111. u8 tx_phase;
  112. u8 rx_phase;
  113. };
  114. struct sd_emmc_desc {
  115. u32 cmd_cfg;
  116. u32 cmd_arg;
  117. u32 cmd_data;
  118. u32 cmd_resp;
  119. };
  120. struct meson_host {
  121. struct device *dev;
  122. struct mmc_host *mmc;
  123. struct mmc_command *cmd;
  124. spinlock_t lock;
  125. void __iomem *regs;
  126. struct clk *core_clk;
  127. struct clk_mux mux;
  128. struct clk *mux_clk;
  129. unsigned long current_clock;
  130. struct clk_divider cfg_div;
  131. struct clk *cfg_div_clk;
  132. unsigned int bounce_buf_size;
  133. void *bounce_buf;
  134. dma_addr_t bounce_dma_addr;
  135. struct sd_emmc_desc *descs;
  136. dma_addr_t descs_dma_addr;
  137. struct meson_tuning_params tp;
  138. bool vqmmc_enabled;
  139. };
  140. #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
  141. #define CMD_CFG_BLOCK_MODE BIT(9)
  142. #define CMD_CFG_R1B BIT(10)
  143. #define CMD_CFG_END_OF_CHAIN BIT(11)
  144. #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
  145. #define CMD_CFG_NO_RESP BIT(16)
  146. #define CMD_CFG_NO_CMD BIT(17)
  147. #define CMD_CFG_DATA_IO BIT(18)
  148. #define CMD_CFG_DATA_WR BIT(19)
  149. #define CMD_CFG_RESP_NOCRC BIT(20)
  150. #define CMD_CFG_RESP_128 BIT(21)
  151. #define CMD_CFG_RESP_NUM BIT(22)
  152. #define CMD_CFG_DATA_NUM BIT(23)
  153. #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
  154. #define CMD_CFG_ERROR BIT(30)
  155. #define CMD_CFG_OWNER BIT(31)
  156. #define CMD_DATA_MASK GENMASK(31, 2)
  157. #define CMD_DATA_BIG_ENDIAN BIT(1)
  158. #define CMD_DATA_SRAM BIT(0)
  159. #define CMD_RESP_MASK GENMASK(31, 1)
  160. #define CMD_RESP_SRAM BIT(0)
  161. static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
  162. {
  163. unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
  164. if (!timeout)
  165. return SD_EMMC_CMD_TIMEOUT_DATA;
  166. timeout = roundup_pow_of_two(timeout);
  167. return min(timeout, 32768U); /* max. 2^15 ms */
  168. }
  169. static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
  170. {
  171. if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
  172. return cmd->mrq->cmd;
  173. else if (mmc_op_multi(cmd->opcode) &&
  174. (!cmd->mrq->sbc || cmd->error || cmd->data->error))
  175. return cmd->mrq->stop;
  176. else
  177. return NULL;
  178. }
  179. static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
  180. struct mmc_request *mrq)
  181. {
  182. struct mmc_data *data = mrq->data;
  183. struct scatterlist *sg;
  184. int i;
  185. bool use_desc_chain_mode = true;
  186. for_each_sg(data->sg, sg, data->sg_len, i)
  187. /* check for 8 byte alignment */
  188. if (sg->offset & 7) {
  189. WARN_ONCE(1, "unaligned scatterlist buffer\n");
  190. use_desc_chain_mode = false;
  191. break;
  192. }
  193. if (use_desc_chain_mode)
  194. data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
  195. }
  196. static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
  197. {
  198. return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
  199. }
  200. static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
  201. {
  202. return data && data->flags & MMC_DATA_READ &&
  203. !meson_mmc_desc_chain_mode(data);
  204. }
  205. static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
  206. {
  207. struct mmc_data *data = mrq->data;
  208. if (!data)
  209. return;
  210. meson_mmc_get_transfer_mode(mmc, mrq);
  211. data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
  212. if (!meson_mmc_desc_chain_mode(data))
  213. return;
  214. data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
  215. mmc_get_dma_dir(data));
  216. if (!data->sg_count)
  217. dev_err(mmc_dev(mmc), "dma_map_sg failed");
  218. }
  219. static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
  220. int err)
  221. {
  222. struct mmc_data *data = mrq->data;
  223. if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
  224. dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
  225. mmc_get_dma_dir(data));
  226. }
  227. static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
  228. {
  229. struct mmc_host *mmc = host->mmc;
  230. int ret;
  231. u32 cfg;
  232. if (clk_rate) {
  233. if (WARN_ON(clk_rate > mmc->f_max))
  234. clk_rate = mmc->f_max;
  235. else if (WARN_ON(clk_rate < mmc->f_min))
  236. clk_rate = mmc->f_min;
  237. }
  238. if (clk_rate == host->current_clock)
  239. return 0;
  240. /* stop clock */
  241. cfg = readl(host->regs + SD_EMMC_CFG);
  242. if (!(cfg & CFG_STOP_CLOCK)) {
  243. cfg |= CFG_STOP_CLOCK;
  244. writel(cfg, host->regs + SD_EMMC_CFG);
  245. }
  246. dev_dbg(host->dev, "change clock rate %u -> %lu\n",
  247. mmc->actual_clock, clk_rate);
  248. if (!clk_rate) {
  249. mmc->actual_clock = 0;
  250. host->current_clock = 0;
  251. /* return with clock being stopped */
  252. return 0;
  253. }
  254. ret = clk_set_rate(host->cfg_div_clk, clk_rate);
  255. if (ret) {
  256. dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
  257. clk_rate, ret);
  258. return ret;
  259. }
  260. mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
  261. host->current_clock = clk_rate;
  262. if (clk_rate != mmc->actual_clock)
  263. dev_dbg(host->dev,
  264. "divider requested rate %lu != actual rate %u\n",
  265. clk_rate, mmc->actual_clock);
  266. /* (re)start clock */
  267. cfg = readl(host->regs + SD_EMMC_CFG);
  268. cfg &= ~CFG_STOP_CLOCK;
  269. writel(cfg, host->regs + SD_EMMC_CFG);
  270. return 0;
  271. }
  272. /*
  273. * The SD/eMMC IP block has an internal mux and divider used for
  274. * generating the MMC clock. Use the clock framework to create and
  275. * manage these clocks.
  276. */
  277. static int meson_mmc_clk_init(struct meson_host *host)
  278. {
  279. struct clk_init_data init;
  280. char clk_name[32];
  281. int i, ret = 0;
  282. const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
  283. const char *clk_div_parents[1];
  284. u32 clk_reg, cfg;
  285. /* get the mux parents */
  286. for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
  287. struct clk *clk;
  288. char name[16];
  289. snprintf(name, sizeof(name), "clkin%d", i);
  290. clk = devm_clk_get(host->dev, name);
  291. if (IS_ERR(clk)) {
  292. if (clk != ERR_PTR(-EPROBE_DEFER))
  293. dev_err(host->dev, "Missing clock %s\n", name);
  294. return PTR_ERR(clk);
  295. }
  296. mux_parent_names[i] = __clk_get_name(clk);
  297. }
  298. /* create the mux */
  299. snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
  300. init.name = clk_name;
  301. init.ops = &clk_mux_ops;
  302. init.flags = 0;
  303. init.parent_names = mux_parent_names;
  304. init.num_parents = MUX_CLK_NUM_PARENTS;
  305. host->mux.reg = host->regs + SD_EMMC_CLOCK;
  306. host->mux.shift = __bf_shf(CLK_SRC_MASK);
  307. host->mux.mask = CLK_SRC_MASK;
  308. host->mux.flags = 0;
  309. host->mux.table = NULL;
  310. host->mux.hw.init = &init;
  311. host->mux_clk = devm_clk_register(host->dev, &host->mux.hw);
  312. if (WARN_ON(IS_ERR(host->mux_clk)))
  313. return PTR_ERR(host->mux_clk);
  314. /* create the divider */
  315. snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
  316. init.name = clk_name;
  317. init.ops = &clk_divider_ops;
  318. init.flags = CLK_SET_RATE_PARENT;
  319. clk_div_parents[0] = __clk_get_name(host->mux_clk);
  320. init.parent_names = clk_div_parents;
  321. init.num_parents = ARRAY_SIZE(clk_div_parents);
  322. host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
  323. host->cfg_div.shift = __bf_shf(CLK_DIV_MASK);
  324. host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK);
  325. host->cfg_div.hw.init = &init;
  326. host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
  327. CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
  328. host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw);
  329. if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk)))
  330. return PTR_ERR(host->cfg_div_clk);
  331. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  332. clk_reg = 0;
  333. clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase);
  334. clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase);
  335. clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase);
  336. clk_reg |= FIELD_PREP(CLK_SRC_MASK, CLK_SRC_XTAL);
  337. clk_reg |= FIELD_PREP(CLK_DIV_MASK, CLK_DIV_MAX);
  338. clk_reg &= ~CLK_ALWAYS_ON;
  339. writel(clk_reg, host->regs + SD_EMMC_CLOCK);
  340. /* Ensure clock starts in "auto" mode, not "always on" */
  341. cfg = readl(host->regs + SD_EMMC_CFG);
  342. cfg &= ~CFG_CLK_ALWAYS_ON;
  343. cfg |= CFG_AUTO_CLK;
  344. writel(cfg, host->regs + SD_EMMC_CFG);
  345. ret = clk_prepare_enable(host->cfg_div_clk);
  346. if (ret)
  347. return ret;
  348. /* Get the nearest minimum clock to 400KHz */
  349. host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000);
  350. ret = meson_mmc_clk_set(host, host->mmc->f_min);
  351. if (ret)
  352. clk_disable_unprepare(host->cfg_div_clk);
  353. return ret;
  354. }
  355. static void meson_mmc_set_tuning_params(struct mmc_host *mmc)
  356. {
  357. struct meson_host *host = mmc_priv(mmc);
  358. u32 regval;
  359. /* stop clock */
  360. regval = readl(host->regs + SD_EMMC_CFG);
  361. regval |= CFG_STOP_CLOCK;
  362. writel(regval, host->regs + SD_EMMC_CFG);
  363. regval = readl(host->regs + SD_EMMC_CLOCK);
  364. regval &= ~CLK_CORE_PHASE_MASK;
  365. regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase);
  366. regval &= ~CLK_TX_PHASE_MASK;
  367. regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase);
  368. regval &= ~CLK_RX_PHASE_MASK;
  369. regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase);
  370. writel(regval, host->regs + SD_EMMC_CLOCK);
  371. /* start clock */
  372. regval = readl(host->regs + SD_EMMC_CFG);
  373. regval &= ~CFG_STOP_CLOCK;
  374. writel(regval, host->regs + SD_EMMC_CFG);
  375. }
  376. static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  377. {
  378. struct meson_host *host = mmc_priv(mmc);
  379. u32 bus_width;
  380. u32 val, orig;
  381. /*
  382. * GPIO regulator, only controls switching between 1v8 and
  383. * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
  384. */
  385. switch (ios->power_mode) {
  386. case MMC_POWER_OFF:
  387. if (!IS_ERR(mmc->supply.vmmc))
  388. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  389. if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
  390. regulator_disable(mmc->supply.vqmmc);
  391. host->vqmmc_enabled = false;
  392. }
  393. break;
  394. case MMC_POWER_UP:
  395. if (!IS_ERR(mmc->supply.vmmc))
  396. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  397. break;
  398. case MMC_POWER_ON:
  399. if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
  400. int ret = regulator_enable(mmc->supply.vqmmc);
  401. if (ret < 0)
  402. dev_err(mmc_dev(mmc),
  403. "failed to enable vqmmc regulator\n");
  404. else
  405. host->vqmmc_enabled = true;
  406. }
  407. break;
  408. }
  409. meson_mmc_clk_set(host, ios->clock);
  410. /* Bus width */
  411. switch (ios->bus_width) {
  412. case MMC_BUS_WIDTH_1:
  413. bus_width = CFG_BUS_WIDTH_1;
  414. break;
  415. case MMC_BUS_WIDTH_4:
  416. bus_width = CFG_BUS_WIDTH_4;
  417. break;
  418. case MMC_BUS_WIDTH_8:
  419. bus_width = CFG_BUS_WIDTH_8;
  420. break;
  421. default:
  422. dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
  423. ios->bus_width);
  424. bus_width = CFG_BUS_WIDTH_4;
  425. }
  426. val = readl(host->regs + SD_EMMC_CFG);
  427. orig = val;
  428. val &= ~CFG_BUS_WIDTH_MASK;
  429. val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
  430. val &= ~CFG_DDR;
  431. if (ios->timing == MMC_TIMING_UHS_DDR50 ||
  432. ios->timing == MMC_TIMING_MMC_DDR52 ||
  433. ios->timing == MMC_TIMING_MMC_HS400)
  434. val |= CFG_DDR;
  435. val &= ~CFG_CHK_DS;
  436. if (ios->timing == MMC_TIMING_MMC_HS400)
  437. val |= CFG_CHK_DS;
  438. if (val != orig) {
  439. writel(val, host->regs + SD_EMMC_CFG);
  440. dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
  441. __func__, orig, val);
  442. }
  443. }
  444. static void meson_mmc_request_done(struct mmc_host *mmc,
  445. struct mmc_request *mrq)
  446. {
  447. struct meson_host *host = mmc_priv(mmc);
  448. host->cmd = NULL;
  449. mmc_request_done(host->mmc, mrq);
  450. }
  451. static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
  452. {
  453. struct meson_host *host = mmc_priv(mmc);
  454. u32 cfg, blksz_old;
  455. cfg = readl(host->regs + SD_EMMC_CFG);
  456. blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
  457. if (!is_power_of_2(blksz))
  458. dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
  459. blksz = ilog2(blksz);
  460. /* check if block-size matches, if not update */
  461. if (blksz == blksz_old)
  462. return;
  463. dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
  464. blksz_old, blksz);
  465. cfg &= ~CFG_BLK_LEN_MASK;
  466. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
  467. writel(cfg, host->regs + SD_EMMC_CFG);
  468. }
  469. static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
  470. {
  471. if (cmd->flags & MMC_RSP_PRESENT) {
  472. if (cmd->flags & MMC_RSP_136)
  473. *cmd_cfg |= CMD_CFG_RESP_128;
  474. *cmd_cfg |= CMD_CFG_RESP_NUM;
  475. if (!(cmd->flags & MMC_RSP_CRC))
  476. *cmd_cfg |= CMD_CFG_RESP_NOCRC;
  477. if (cmd->flags & MMC_RSP_BUSY)
  478. *cmd_cfg |= CMD_CFG_R1B;
  479. } else {
  480. *cmd_cfg |= CMD_CFG_NO_RESP;
  481. }
  482. }
  483. static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
  484. {
  485. struct meson_host *host = mmc_priv(mmc);
  486. struct sd_emmc_desc *desc = host->descs;
  487. struct mmc_data *data = host->cmd->data;
  488. struct scatterlist *sg;
  489. u32 start;
  490. int i;
  491. if (data->flags & MMC_DATA_WRITE)
  492. cmd_cfg |= CMD_CFG_DATA_WR;
  493. if (data->blocks > 1) {
  494. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  495. meson_mmc_set_blksz(mmc, data->blksz);
  496. }
  497. for_each_sg(data->sg, sg, data->sg_count, i) {
  498. unsigned int len = sg_dma_len(sg);
  499. if (data->blocks > 1)
  500. len /= data->blksz;
  501. desc[i].cmd_cfg = cmd_cfg;
  502. desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
  503. if (i > 0)
  504. desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
  505. desc[i].cmd_arg = host->cmd->arg;
  506. desc[i].cmd_resp = 0;
  507. desc[i].cmd_data = sg_dma_address(sg);
  508. }
  509. desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  510. dma_wmb(); /* ensure descriptor is written before kicked */
  511. start = host->descs_dma_addr | START_DESC_BUSY;
  512. writel(start, host->regs + SD_EMMC_START);
  513. }
  514. static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
  515. {
  516. struct meson_host *host = mmc_priv(mmc);
  517. struct mmc_data *data = cmd->data;
  518. u32 cmd_cfg = 0, cmd_data = 0;
  519. unsigned int xfer_bytes = 0;
  520. /* Setup descriptors */
  521. dma_rmb();
  522. host->cmd = cmd;
  523. cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
  524. cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
  525. meson_mmc_set_response_bits(cmd, &cmd_cfg);
  526. /* data? */
  527. if (data) {
  528. data->bytes_xfered = 0;
  529. cmd_cfg |= CMD_CFG_DATA_IO;
  530. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  531. ilog2(meson_mmc_get_timeout_msecs(data)));
  532. if (meson_mmc_desc_chain_mode(data)) {
  533. meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
  534. return;
  535. }
  536. if (data->blocks > 1) {
  537. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  538. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
  539. data->blocks);
  540. meson_mmc_set_blksz(mmc, data->blksz);
  541. } else {
  542. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
  543. }
  544. xfer_bytes = data->blksz * data->blocks;
  545. if (data->flags & MMC_DATA_WRITE) {
  546. cmd_cfg |= CMD_CFG_DATA_WR;
  547. WARN_ON(xfer_bytes > host->bounce_buf_size);
  548. sg_copy_to_buffer(data->sg, data->sg_len,
  549. host->bounce_buf, xfer_bytes);
  550. dma_wmb();
  551. }
  552. cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
  553. } else {
  554. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  555. ilog2(SD_EMMC_CMD_TIMEOUT));
  556. }
  557. /* Last descriptor */
  558. cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  559. writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
  560. writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
  561. writel(0, host->regs + SD_EMMC_CMD_RSP);
  562. wmb(); /* ensure descriptor is written before kicked */
  563. writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
  564. }
  565. static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  566. {
  567. struct meson_host *host = mmc_priv(mmc);
  568. bool needs_pre_post_req = mrq->data &&
  569. !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
  570. if (needs_pre_post_req) {
  571. meson_mmc_get_transfer_mode(mmc, mrq);
  572. if (!meson_mmc_desc_chain_mode(mrq->data))
  573. needs_pre_post_req = false;
  574. }
  575. if (needs_pre_post_req)
  576. meson_mmc_pre_req(mmc, mrq);
  577. /* Stop execution */
  578. writel(0, host->regs + SD_EMMC_START);
  579. meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
  580. if (needs_pre_post_req)
  581. meson_mmc_post_req(mmc, mrq, 0);
  582. }
  583. static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
  584. {
  585. struct meson_host *host = mmc_priv(mmc);
  586. if (cmd->flags & MMC_RSP_136) {
  587. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
  588. cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
  589. cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
  590. cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
  591. } else if (cmd->flags & MMC_RSP_PRESENT) {
  592. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
  593. }
  594. }
  595. static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
  596. {
  597. struct meson_host *host = dev_id;
  598. struct mmc_command *cmd;
  599. struct mmc_data *data;
  600. u32 irq_en, status, raw_status;
  601. irqreturn_t ret = IRQ_HANDLED;
  602. if (WARN_ON(!host))
  603. return IRQ_NONE;
  604. cmd = host->cmd;
  605. if (WARN_ON(!cmd))
  606. return IRQ_NONE;
  607. data = cmd->data;
  608. spin_lock(&host->lock);
  609. irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
  610. raw_status = readl(host->regs + SD_EMMC_STATUS);
  611. status = raw_status & irq_en;
  612. if (!status) {
  613. dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
  614. raw_status, irq_en);
  615. ret = IRQ_NONE;
  616. goto out;
  617. }
  618. meson_mmc_read_resp(host->mmc, cmd);
  619. cmd->error = 0;
  620. if (status & IRQ_RXD_ERR_MASK) {
  621. dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
  622. cmd->error = -EILSEQ;
  623. }
  624. if (status & IRQ_TXD_ERR) {
  625. dev_dbg(host->dev, "Unhandled IRQ: TXD error\n");
  626. cmd->error = -EILSEQ;
  627. }
  628. if (status & IRQ_DESC_ERR)
  629. dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n");
  630. if (status & IRQ_RESP_ERR) {
  631. dev_dbg(host->dev, "Unhandled IRQ: Response error\n");
  632. cmd->error = -EILSEQ;
  633. }
  634. if (status & IRQ_RESP_TIMEOUT) {
  635. dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n");
  636. cmd->error = -ETIMEDOUT;
  637. }
  638. if (status & IRQ_DESC_TIMEOUT) {
  639. dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n");
  640. cmd->error = -ETIMEDOUT;
  641. }
  642. if (status & IRQ_SDIO)
  643. dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
  644. if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
  645. if (data && !cmd->error)
  646. data->bytes_xfered = data->blksz * data->blocks;
  647. if (meson_mmc_bounce_buf_read(data) ||
  648. meson_mmc_get_next_command(cmd))
  649. ret = IRQ_WAKE_THREAD;
  650. } else {
  651. dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
  652. status, cmd->opcode, cmd->arg,
  653. cmd->flags, cmd->mrq->stop ? 1 : 0);
  654. if (cmd->data) {
  655. struct mmc_data *data = cmd->data;
  656. dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
  657. data->blksz, data->blocks, data->flags,
  658. data->flags & MMC_DATA_WRITE ? "write" : "",
  659. data->flags & MMC_DATA_READ ? "read" : "");
  660. }
  661. }
  662. out:
  663. /* ack all (enabled) interrupts */
  664. writel(status, host->regs + SD_EMMC_STATUS);
  665. if (ret == IRQ_HANDLED)
  666. meson_mmc_request_done(host->mmc, cmd->mrq);
  667. spin_unlock(&host->lock);
  668. return ret;
  669. }
  670. static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
  671. {
  672. struct meson_host *host = dev_id;
  673. struct mmc_command *next_cmd, *cmd = host->cmd;
  674. struct mmc_data *data;
  675. unsigned int xfer_bytes;
  676. if (WARN_ON(!cmd))
  677. return IRQ_NONE;
  678. data = cmd->data;
  679. if (meson_mmc_bounce_buf_read(data)) {
  680. xfer_bytes = data->blksz * data->blocks;
  681. WARN_ON(xfer_bytes > host->bounce_buf_size);
  682. sg_copy_from_buffer(data->sg, data->sg_len,
  683. host->bounce_buf, xfer_bytes);
  684. }
  685. next_cmd = meson_mmc_get_next_command(cmd);
  686. if (next_cmd)
  687. meson_mmc_start_cmd(host->mmc, next_cmd);
  688. else
  689. meson_mmc_request_done(host->mmc, cmd->mrq);
  690. return IRQ_HANDLED;
  691. }
  692. static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
  693. {
  694. struct meson_host *host = mmc_priv(mmc);
  695. struct meson_tuning_params tp_old = host->tp;
  696. int ret = -EINVAL, i, cmd_error;
  697. dev_info(mmc_dev(mmc), "(re)tuning...\n");
  698. for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) {
  699. host->tp.rx_phase = i;
  700. /* exclude the active parameter set if retuning */
  701. if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) &&
  702. mmc->doing_retune)
  703. continue;
  704. meson_mmc_set_tuning_params(mmc);
  705. ret = mmc_send_tuning(mmc, opcode, &cmd_error);
  706. if (!ret)
  707. break;
  708. }
  709. return ret;
  710. }
  711. /*
  712. * NOTE: we only need this until the GPIO/pinctrl driver can handle
  713. * interrupts. For now, the MMC core will use this for polling.
  714. */
  715. static int meson_mmc_get_cd(struct mmc_host *mmc)
  716. {
  717. int status = mmc_gpio_get_cd(mmc);
  718. if (status == -ENOSYS)
  719. return 1; /* assume present */
  720. return status;
  721. }
  722. static void meson_mmc_cfg_init(struct meson_host *host)
  723. {
  724. u32 cfg = 0;
  725. cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
  726. ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
  727. cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
  728. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
  729. writel(cfg, host->regs + SD_EMMC_CFG);
  730. }
  731. static const struct mmc_host_ops meson_mmc_ops = {
  732. .request = meson_mmc_request,
  733. .set_ios = meson_mmc_set_ios,
  734. .get_cd = meson_mmc_get_cd,
  735. .pre_req = meson_mmc_pre_req,
  736. .post_req = meson_mmc_post_req,
  737. .execute_tuning = meson_mmc_execute_tuning,
  738. };
  739. static int meson_mmc_probe(struct platform_device *pdev)
  740. {
  741. struct resource *res;
  742. struct meson_host *host;
  743. struct mmc_host *mmc;
  744. int ret, irq;
  745. mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
  746. if (!mmc)
  747. return -ENOMEM;
  748. host = mmc_priv(mmc);
  749. host->mmc = mmc;
  750. host->dev = &pdev->dev;
  751. dev_set_drvdata(&pdev->dev, host);
  752. spin_lock_init(&host->lock);
  753. /* Get regulators and the supported OCR mask */
  754. host->vqmmc_enabled = false;
  755. ret = mmc_regulator_get_supply(mmc);
  756. if (ret == -EPROBE_DEFER)
  757. goto free_host;
  758. ret = mmc_of_parse(mmc);
  759. if (ret) {
  760. if (ret != -EPROBE_DEFER)
  761. dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
  762. goto free_host;
  763. }
  764. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  765. host->regs = devm_ioremap_resource(&pdev->dev, res);
  766. if (IS_ERR(host->regs)) {
  767. ret = PTR_ERR(host->regs);
  768. goto free_host;
  769. }
  770. irq = platform_get_irq(pdev, 0);
  771. if (!irq) {
  772. dev_err(&pdev->dev, "failed to get interrupt resource.\n");
  773. ret = -EINVAL;
  774. goto free_host;
  775. }
  776. host->core_clk = devm_clk_get(&pdev->dev, "core");
  777. if (IS_ERR(host->core_clk)) {
  778. ret = PTR_ERR(host->core_clk);
  779. goto free_host;
  780. }
  781. ret = clk_prepare_enable(host->core_clk);
  782. if (ret)
  783. goto free_host;
  784. host->tp.core_phase = CLK_PHASE_180;
  785. host->tp.tx_phase = CLK_PHASE_0;
  786. host->tp.rx_phase = CLK_PHASE_0;
  787. ret = meson_mmc_clk_init(host);
  788. if (ret)
  789. goto err_core_clk;
  790. /* Stop execution */
  791. writel(0, host->regs + SD_EMMC_START);
  792. /* clear, ack, enable all interrupts */
  793. writel(0, host->regs + SD_EMMC_IRQ_EN);
  794. writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
  795. writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
  796. /* set config to sane default */
  797. meson_mmc_cfg_init(host);
  798. ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
  799. meson_mmc_irq_thread, IRQF_SHARED,
  800. NULL, host);
  801. if (ret)
  802. goto err_div_clk;
  803. mmc->caps |= MMC_CAP_CMD23;
  804. mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
  805. mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
  806. mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
  807. mmc->max_seg_size = mmc->max_req_size;
  808. /* data bounce buffer */
  809. host->bounce_buf_size = mmc->max_req_size;
  810. host->bounce_buf =
  811. dma_alloc_coherent(host->dev, host->bounce_buf_size,
  812. &host->bounce_dma_addr, GFP_KERNEL);
  813. if (host->bounce_buf == NULL) {
  814. dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
  815. ret = -ENOMEM;
  816. goto err_div_clk;
  817. }
  818. host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  819. &host->descs_dma_addr, GFP_KERNEL);
  820. if (!host->descs) {
  821. dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
  822. ret = -ENOMEM;
  823. goto err_bounce_buf;
  824. }
  825. mmc->ops = &meson_mmc_ops;
  826. mmc_add_host(mmc);
  827. return 0;
  828. err_bounce_buf:
  829. dma_free_coherent(host->dev, host->bounce_buf_size,
  830. host->bounce_buf, host->bounce_dma_addr);
  831. err_div_clk:
  832. clk_disable_unprepare(host->cfg_div_clk);
  833. err_core_clk:
  834. clk_disable_unprepare(host->core_clk);
  835. free_host:
  836. mmc_free_host(mmc);
  837. return ret;
  838. }
  839. static int meson_mmc_remove(struct platform_device *pdev)
  840. {
  841. struct meson_host *host = dev_get_drvdata(&pdev->dev);
  842. mmc_remove_host(host->mmc);
  843. /* disable interrupts */
  844. writel(0, host->regs + SD_EMMC_IRQ_EN);
  845. dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  846. host->descs, host->descs_dma_addr);
  847. dma_free_coherent(host->dev, host->bounce_buf_size,
  848. host->bounce_buf, host->bounce_dma_addr);
  849. clk_disable_unprepare(host->cfg_div_clk);
  850. clk_disable_unprepare(host->core_clk);
  851. mmc_free_host(host->mmc);
  852. return 0;
  853. }
  854. static const struct of_device_id meson_mmc_of_match[] = {
  855. { .compatible = "amlogic,meson-gx-mmc", },
  856. { .compatible = "amlogic,meson-gxbb-mmc", },
  857. { .compatible = "amlogic,meson-gxl-mmc", },
  858. { .compatible = "amlogic,meson-gxm-mmc", },
  859. {}
  860. };
  861. MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
  862. static struct platform_driver meson_mmc_driver = {
  863. .probe = meson_mmc_probe,
  864. .remove = meson_mmc_remove,
  865. .driver = {
  866. .name = DRIVER_NAME,
  867. .of_match_table = of_match_ptr(meson_mmc_of_match),
  868. },
  869. };
  870. module_platform_driver(meson_mmc_driver);
  871. MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
  872. MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
  873. MODULE_LICENSE("GPL v2");