meson-gx-mmc.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390
  1. /*
  2. * Amlogic SD/eMMC driver for the GX/S905 family SoCs
  3. *
  4. * Copyright (c) 2016 BayLibre, SAS.
  5. * Author: Kevin Hilman <khilman@baylibre.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. * The full GNU General Public License is included in this distribution
  19. * in the file called COPYING.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/init.h>
  24. #include <linux/device.h>
  25. #include <linux/of_device.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/ioport.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/mmc/host.h>
  31. #include <linux/mmc/mmc.h>
  32. #include <linux/mmc/sdio.h>
  33. #include <linux/mmc/slot-gpio.h>
  34. #include <linux/io.h>
  35. #include <linux/clk.h>
  36. #include <linux/clk-provider.h>
  37. #include <linux/regulator/consumer.h>
  38. #include <linux/reset.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/bitfield.h>
  41. #include <linux/pinctrl/consumer.h>
  42. #define DRIVER_NAME "meson-gx-mmc"
  43. #define SD_EMMC_CLOCK 0x0
  44. #define CLK_DIV_MASK GENMASK(5, 0)
  45. #define CLK_SRC_MASK GENMASK(7, 6)
  46. #define CLK_CORE_PHASE_MASK GENMASK(9, 8)
  47. #define CLK_TX_PHASE_MASK GENMASK(11, 10)
  48. #define CLK_RX_PHASE_MASK GENMASK(13, 12)
  49. #define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
  50. #define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
  51. #define CLK_V2_ALWAYS_ON BIT(24)
  52. #define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
  53. #define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
  54. #define CLK_V3_ALWAYS_ON BIT(28)
  55. #define CLK_DELAY_STEP_PS 200
  56. #define CLK_PHASE_STEP 30
  57. #define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP)
  58. #define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
  59. #define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
  60. #define CLK_ALWAYS_ON(h) (h->data->always_on)
  61. #define SD_EMMC_DELAY 0x4
  62. #define SD_EMMC_ADJUST 0x8
  63. #define SD_EMMC_DELAY1 0x4
  64. #define SD_EMMC_DELAY2 0x8
  65. #define SD_EMMC_V3_ADJUST 0xc
  66. #define SD_EMMC_CALOUT 0x10
  67. #define SD_EMMC_START 0x40
  68. #define START_DESC_INIT BIT(0)
  69. #define START_DESC_BUSY BIT(1)
  70. #define START_DESC_ADDR_MASK GENMASK(31, 2)
  71. #define SD_EMMC_CFG 0x44
  72. #define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
  73. #define CFG_BUS_WIDTH_1 0x0
  74. #define CFG_BUS_WIDTH_4 0x1
  75. #define CFG_BUS_WIDTH_8 0x2
  76. #define CFG_DDR BIT(2)
  77. #define CFG_BLK_LEN_MASK GENMASK(7, 4)
  78. #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
  79. #define CFG_RC_CC_MASK GENMASK(15, 12)
  80. #define CFG_STOP_CLOCK BIT(22)
  81. #define CFG_CLK_ALWAYS_ON BIT(18)
  82. #define CFG_CHK_DS BIT(20)
  83. #define CFG_AUTO_CLK BIT(23)
  84. #define SD_EMMC_STATUS 0x48
  85. #define STATUS_BUSY BIT(31)
  86. #define STATUS_DATI GENMASK(23, 16)
  87. #define SD_EMMC_IRQ_EN 0x4c
  88. #define IRQ_RXD_ERR_MASK GENMASK(7, 0)
  89. #define IRQ_TXD_ERR BIT(8)
  90. #define IRQ_DESC_ERR BIT(9)
  91. #define IRQ_RESP_ERR BIT(10)
  92. #define IRQ_CRC_ERR \
  93. (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR)
  94. #define IRQ_RESP_TIMEOUT BIT(11)
  95. #define IRQ_DESC_TIMEOUT BIT(12)
  96. #define IRQ_TIMEOUTS \
  97. (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT)
  98. #define IRQ_END_OF_CHAIN BIT(13)
  99. #define IRQ_RESP_STATUS BIT(14)
  100. #define IRQ_SDIO BIT(15)
  101. #define IRQ_EN_MASK \
  102. (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
  103. IRQ_SDIO)
  104. #define SD_EMMC_CMD_CFG 0x50
  105. #define SD_EMMC_CMD_ARG 0x54
  106. #define SD_EMMC_CMD_DAT 0x58
  107. #define SD_EMMC_CMD_RSP 0x5c
  108. #define SD_EMMC_CMD_RSP1 0x60
  109. #define SD_EMMC_CMD_RSP2 0x64
  110. #define SD_EMMC_CMD_RSP3 0x68
  111. #define SD_EMMC_RXD 0x94
  112. #define SD_EMMC_TXD 0x94
  113. #define SD_EMMC_LAST_REG SD_EMMC_TXD
  114. #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
  115. #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
  116. #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
  117. #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
  118. #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
  119. #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
  120. #define SD_EMMC_PRE_REQ_DONE BIT(0)
  121. #define SD_EMMC_DESC_CHAIN_MODE BIT(1)
  122. #define MUX_CLK_NUM_PARENTS 2
  123. struct meson_mmc_data {
  124. unsigned int tx_delay_mask;
  125. unsigned int rx_delay_mask;
  126. unsigned int always_on;
  127. };
  128. struct sd_emmc_desc {
  129. u32 cmd_cfg;
  130. u32 cmd_arg;
  131. u32 cmd_data;
  132. u32 cmd_resp;
  133. };
  134. struct meson_host {
  135. struct device *dev;
  136. struct meson_mmc_data *data;
  137. struct mmc_host *mmc;
  138. struct mmc_command *cmd;
  139. spinlock_t lock;
  140. void __iomem *regs;
  141. struct clk *core_clk;
  142. struct clk *mmc_clk;
  143. struct clk *rx_clk;
  144. struct clk *tx_clk;
  145. unsigned long req_rate;
  146. struct pinctrl *pinctrl;
  147. struct pinctrl_state *pins_default;
  148. struct pinctrl_state *pins_clk_gate;
  149. unsigned int bounce_buf_size;
  150. void *bounce_buf;
  151. dma_addr_t bounce_dma_addr;
  152. struct sd_emmc_desc *descs;
  153. dma_addr_t descs_dma_addr;
  154. bool vqmmc_enabled;
  155. };
  156. #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
  157. #define CMD_CFG_BLOCK_MODE BIT(9)
  158. #define CMD_CFG_R1B BIT(10)
  159. #define CMD_CFG_END_OF_CHAIN BIT(11)
  160. #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
  161. #define CMD_CFG_NO_RESP BIT(16)
  162. #define CMD_CFG_NO_CMD BIT(17)
  163. #define CMD_CFG_DATA_IO BIT(18)
  164. #define CMD_CFG_DATA_WR BIT(19)
  165. #define CMD_CFG_RESP_NOCRC BIT(20)
  166. #define CMD_CFG_RESP_128 BIT(21)
  167. #define CMD_CFG_RESP_NUM BIT(22)
  168. #define CMD_CFG_DATA_NUM BIT(23)
  169. #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
  170. #define CMD_CFG_ERROR BIT(30)
  171. #define CMD_CFG_OWNER BIT(31)
  172. #define CMD_DATA_MASK GENMASK(31, 2)
  173. #define CMD_DATA_BIG_ENDIAN BIT(1)
  174. #define CMD_DATA_SRAM BIT(0)
  175. #define CMD_RESP_MASK GENMASK(31, 1)
  176. #define CMD_RESP_SRAM BIT(0)
  177. struct meson_mmc_phase {
  178. struct clk_hw hw;
  179. void __iomem *reg;
  180. unsigned long phase_mask;
  181. unsigned long delay_mask;
  182. unsigned int delay_step_ps;
  183. };
  184. #define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw)
  185. static int meson_mmc_clk_get_phase(struct clk_hw *hw)
  186. {
  187. struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
  188. unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
  189. unsigned long period_ps, p, d;
  190. int degrees;
  191. u32 val;
  192. val = readl(mmc->reg);
  193. p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask);
  194. degrees = p * 360 / phase_num;
  195. if (mmc->delay_mask) {
  196. period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
  197. clk_get_rate(hw->clk));
  198. d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask);
  199. degrees += d * mmc->delay_step_ps * 360 / period_ps;
  200. degrees %= 360;
  201. }
  202. return degrees;
  203. }
  204. static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc,
  205. unsigned int phase,
  206. unsigned int delay)
  207. {
  208. u32 val;
  209. val = readl(mmc->reg);
  210. val &= ~mmc->phase_mask;
  211. val |= phase << __ffs(mmc->phase_mask);
  212. if (mmc->delay_mask) {
  213. val &= ~mmc->delay_mask;
  214. val |= delay << __ffs(mmc->delay_mask);
  215. }
  216. writel(val, mmc->reg);
  217. }
  218. static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees)
  219. {
  220. struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
  221. unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
  222. unsigned long period_ps, d = 0, r;
  223. uint64_t p;
  224. p = degrees % 360;
  225. if (!mmc->delay_mask) {
  226. p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num);
  227. } else {
  228. period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
  229. clk_get_rate(hw->clk));
  230. /* First compute the phase index (p), the remainder (r) is the
  231. * part we'll try to acheive using the delays (d).
  232. */
  233. r = do_div(p, 360 / phase_num);
  234. d = DIV_ROUND_CLOSEST(r * period_ps,
  235. 360 * mmc->delay_step_ps);
  236. d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask));
  237. }
  238. meson_mmc_apply_phase_delay(mmc, p, d);
  239. return 0;
  240. }
  241. static const struct clk_ops meson_mmc_clk_phase_ops = {
  242. .get_phase = meson_mmc_clk_get_phase,
  243. .set_phase = meson_mmc_clk_set_phase,
  244. };
  245. static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
  246. {
  247. unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
  248. if (!timeout)
  249. return SD_EMMC_CMD_TIMEOUT_DATA;
  250. timeout = roundup_pow_of_two(timeout);
  251. return min(timeout, 32768U); /* max. 2^15 ms */
  252. }
  253. static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
  254. {
  255. if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
  256. return cmd->mrq->cmd;
  257. else if (mmc_op_multi(cmd->opcode) &&
  258. (!cmd->mrq->sbc || cmd->error || cmd->data->error))
  259. return cmd->mrq->stop;
  260. else
  261. return NULL;
  262. }
  263. static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
  264. struct mmc_request *mrq)
  265. {
  266. struct mmc_data *data = mrq->data;
  267. struct scatterlist *sg;
  268. int i;
  269. bool use_desc_chain_mode = true;
  270. /*
  271. * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
  272. * reported. For some strange reason this occurs in descriptor
  273. * chain mode only. So let's fall back to bounce buffer mode
  274. * for command SD_IO_RW_EXTENDED.
  275. */
  276. if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
  277. return;
  278. for_each_sg(data->sg, sg, data->sg_len, i)
  279. /* check for 8 byte alignment */
  280. if (sg->offset & 7) {
  281. WARN_ONCE(1, "unaligned scatterlist buffer\n");
  282. use_desc_chain_mode = false;
  283. break;
  284. }
  285. if (use_desc_chain_mode)
  286. data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
  287. }
  288. static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
  289. {
  290. return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
  291. }
  292. static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
  293. {
  294. return data && data->flags & MMC_DATA_READ &&
  295. !meson_mmc_desc_chain_mode(data);
  296. }
  297. static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
  298. {
  299. struct mmc_data *data = mrq->data;
  300. if (!data)
  301. return;
  302. meson_mmc_get_transfer_mode(mmc, mrq);
  303. data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
  304. if (!meson_mmc_desc_chain_mode(data))
  305. return;
  306. data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
  307. mmc_get_dma_dir(data));
  308. if (!data->sg_count)
  309. dev_err(mmc_dev(mmc), "dma_map_sg failed");
  310. }
  311. static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
  312. int err)
  313. {
  314. struct mmc_data *data = mrq->data;
  315. if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
  316. dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
  317. mmc_get_dma_dir(data));
  318. }
  319. static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios)
  320. {
  321. if (ios->timing == MMC_TIMING_MMC_DDR52 ||
  322. ios->timing == MMC_TIMING_UHS_DDR50 ||
  323. ios->timing == MMC_TIMING_MMC_HS400)
  324. return true;
  325. return false;
  326. }
  327. /*
  328. * Gating the clock on this controller is tricky. It seems the mmc clock
  329. * is also used by the controller. It may crash during some operation if the
  330. * clock is stopped. The safest thing to do, whenever possible, is to keep
  331. * clock running at stop it at the pad using the pinmux.
  332. */
  333. static void meson_mmc_clk_gate(struct meson_host *host)
  334. {
  335. u32 cfg;
  336. if (host->pins_clk_gate) {
  337. pinctrl_select_state(host->pinctrl, host->pins_clk_gate);
  338. } else {
  339. /*
  340. * If the pinmux is not provided - default to the classic and
  341. * unsafe method
  342. */
  343. cfg = readl(host->regs + SD_EMMC_CFG);
  344. cfg |= CFG_STOP_CLOCK;
  345. writel(cfg, host->regs + SD_EMMC_CFG);
  346. }
  347. }
  348. static void meson_mmc_clk_ungate(struct meson_host *host)
  349. {
  350. u32 cfg;
  351. if (host->pins_clk_gate)
  352. pinctrl_select_state(host->pinctrl, host->pins_default);
  353. /* Make sure the clock is not stopped in the controller */
  354. cfg = readl(host->regs + SD_EMMC_CFG);
  355. cfg &= ~CFG_STOP_CLOCK;
  356. writel(cfg, host->regs + SD_EMMC_CFG);
  357. }
  358. static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
  359. {
  360. struct mmc_host *mmc = host->mmc;
  361. unsigned long rate = ios->clock;
  362. int ret;
  363. u32 cfg;
  364. /* DDR modes require higher module clock */
  365. if (meson_mmc_timing_is_ddr(ios))
  366. rate <<= 1;
  367. /* Same request - bail-out */
  368. if (host->req_rate == rate)
  369. return 0;
  370. /* stop clock */
  371. meson_mmc_clk_gate(host);
  372. host->req_rate = 0;
  373. if (!rate) {
  374. mmc->actual_clock = 0;
  375. /* return with clock being stopped */
  376. return 0;
  377. }
  378. /* Stop the clock during rate change to avoid glitches */
  379. cfg = readl(host->regs + SD_EMMC_CFG);
  380. cfg |= CFG_STOP_CLOCK;
  381. writel(cfg, host->regs + SD_EMMC_CFG);
  382. ret = clk_set_rate(host->mmc_clk, rate);
  383. if (ret) {
  384. dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
  385. rate, ret);
  386. return ret;
  387. }
  388. host->req_rate = rate;
  389. mmc->actual_clock = clk_get_rate(host->mmc_clk);
  390. /* We should report the real output frequency of the controller */
  391. if (meson_mmc_timing_is_ddr(ios))
  392. mmc->actual_clock >>= 1;
  393. dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
  394. if (ios->clock != mmc->actual_clock)
  395. dev_dbg(host->dev, "requested rate was %u\n", ios->clock);
  396. /* (re)start clock */
  397. meson_mmc_clk_ungate(host);
  398. return 0;
  399. }
  400. /*
  401. * The SD/eMMC IP block has an internal mux and divider used for
  402. * generating the MMC clock. Use the clock framework to create and
  403. * manage these clocks.
  404. */
  405. static int meson_mmc_clk_init(struct meson_host *host)
  406. {
  407. struct clk_init_data init;
  408. struct clk_mux *mux;
  409. struct clk_divider *div;
  410. struct meson_mmc_phase *core, *tx, *rx;
  411. struct clk *clk;
  412. char clk_name[32];
  413. int i, ret = 0;
  414. const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
  415. const char *clk_parent[1];
  416. u32 clk_reg;
  417. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  418. clk_reg = 0;
  419. clk_reg |= CLK_ALWAYS_ON(host);
  420. clk_reg |= CLK_DIV_MASK;
  421. writel(clk_reg, host->regs + SD_EMMC_CLOCK);
  422. /* get the mux parents */
  423. for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
  424. struct clk *clk;
  425. char name[16];
  426. snprintf(name, sizeof(name), "clkin%d", i);
  427. clk = devm_clk_get(host->dev, name);
  428. if (IS_ERR(clk)) {
  429. if (clk != ERR_PTR(-EPROBE_DEFER))
  430. dev_err(host->dev, "Missing clock %s\n", name);
  431. return PTR_ERR(clk);
  432. }
  433. mux_parent_names[i] = __clk_get_name(clk);
  434. }
  435. /* create the mux */
  436. mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL);
  437. if (!mux)
  438. return -ENOMEM;
  439. snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
  440. init.name = clk_name;
  441. init.ops = &clk_mux_ops;
  442. init.flags = 0;
  443. init.parent_names = mux_parent_names;
  444. init.num_parents = MUX_CLK_NUM_PARENTS;
  445. mux->reg = host->regs + SD_EMMC_CLOCK;
  446. mux->shift = __ffs(CLK_SRC_MASK);
  447. mux->mask = CLK_SRC_MASK >> mux->shift;
  448. mux->hw.init = &init;
  449. clk = devm_clk_register(host->dev, &mux->hw);
  450. if (WARN_ON(IS_ERR(clk)))
  451. return PTR_ERR(clk);
  452. /* create the divider */
  453. div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
  454. if (!div)
  455. return -ENOMEM;
  456. snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
  457. init.name = clk_name;
  458. init.ops = &clk_divider_ops;
  459. init.flags = CLK_SET_RATE_PARENT;
  460. clk_parent[0] = __clk_get_name(clk);
  461. init.parent_names = clk_parent;
  462. init.num_parents = 1;
  463. div->reg = host->regs + SD_EMMC_CLOCK;
  464. div->shift = __ffs(CLK_DIV_MASK);
  465. div->width = __builtin_popcountl(CLK_DIV_MASK);
  466. div->hw.init = &init;
  467. div->flags = CLK_DIVIDER_ONE_BASED;
  468. clk = devm_clk_register(host->dev, &div->hw);
  469. if (WARN_ON(IS_ERR(clk)))
  470. return PTR_ERR(clk);
  471. /* create the mmc core clock */
  472. core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL);
  473. if (!core)
  474. return -ENOMEM;
  475. snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev));
  476. init.name = clk_name;
  477. init.ops = &meson_mmc_clk_phase_ops;
  478. init.flags = CLK_SET_RATE_PARENT;
  479. clk_parent[0] = __clk_get_name(clk);
  480. init.parent_names = clk_parent;
  481. init.num_parents = 1;
  482. core->reg = host->regs + SD_EMMC_CLOCK;
  483. core->phase_mask = CLK_CORE_PHASE_MASK;
  484. core->hw.init = &init;
  485. host->mmc_clk = devm_clk_register(host->dev, &core->hw);
  486. if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk)))
  487. return PTR_ERR(host->mmc_clk);
  488. /* create the mmc tx clock */
  489. tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL);
  490. if (!tx)
  491. return -ENOMEM;
  492. snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev));
  493. init.name = clk_name;
  494. init.ops = &meson_mmc_clk_phase_ops;
  495. init.flags = 0;
  496. clk_parent[0] = __clk_get_name(host->mmc_clk);
  497. init.parent_names = clk_parent;
  498. init.num_parents = 1;
  499. tx->reg = host->regs + SD_EMMC_CLOCK;
  500. tx->phase_mask = CLK_TX_PHASE_MASK;
  501. tx->delay_mask = CLK_TX_DELAY_MASK(host);
  502. tx->delay_step_ps = CLK_DELAY_STEP_PS;
  503. tx->hw.init = &init;
  504. host->tx_clk = devm_clk_register(host->dev, &tx->hw);
  505. if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk)))
  506. return PTR_ERR(host->tx_clk);
  507. /* create the mmc rx clock */
  508. rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL);
  509. if (!rx)
  510. return -ENOMEM;
  511. snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev));
  512. init.name = clk_name;
  513. init.ops = &meson_mmc_clk_phase_ops;
  514. init.flags = 0;
  515. clk_parent[0] = __clk_get_name(host->mmc_clk);
  516. init.parent_names = clk_parent;
  517. init.num_parents = 1;
  518. rx->reg = host->regs + SD_EMMC_CLOCK;
  519. rx->phase_mask = CLK_RX_PHASE_MASK;
  520. rx->delay_mask = CLK_RX_DELAY_MASK(host);
  521. rx->delay_step_ps = CLK_DELAY_STEP_PS;
  522. rx->hw.init = &init;
  523. host->rx_clk = devm_clk_register(host->dev, &rx->hw);
  524. if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk)))
  525. return PTR_ERR(host->rx_clk);
  526. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  527. host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
  528. ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
  529. if (ret)
  530. return ret;
  531. /*
  532. * Set phases : These values are mostly the datasheet recommended ones
  533. * except for the Tx phase. Datasheet recommends 180 but some cards
  534. * fail at initialisation with it. 270 works just fine, it fixes these
  535. * initialisation issues and enable eMMC DDR52 mode.
  536. */
  537. clk_set_phase(host->mmc_clk, 180);
  538. clk_set_phase(host->tx_clk, 270);
  539. clk_set_phase(host->rx_clk, 0);
  540. return clk_prepare_enable(host->mmc_clk);
  541. }
  542. static void meson_mmc_shift_map(unsigned long *map, unsigned long shift)
  543. {
  544. DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM);
  545. DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM);
  546. /*
  547. * shift the bitmap right and reintroduce the dropped bits on the left
  548. * of the bitmap
  549. */
  550. bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM);
  551. bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift,
  552. CLK_PHASE_POINT_NUM);
  553. bitmap_or(map, left, right, CLK_PHASE_POINT_NUM);
  554. }
  555. static void meson_mmc_find_next_region(unsigned long *map,
  556. unsigned long *start,
  557. unsigned long *stop)
  558. {
  559. *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start);
  560. *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start);
  561. }
  562. static int meson_mmc_find_tuning_point(unsigned long *test)
  563. {
  564. unsigned long shift, stop, offset = 0, start = 0, size = 0;
  565. /* Get the all good/all bad situation out the way */
  566. if (bitmap_full(test, CLK_PHASE_POINT_NUM))
  567. return 0; /* All points are good so point 0 will do */
  568. else if (bitmap_empty(test, CLK_PHASE_POINT_NUM))
  569. return -EIO; /* No successful tuning point */
  570. /*
  571. * Now we know there is a least one region find. Make sure it does
  572. * not wrap by the shifting the bitmap if necessary
  573. */
  574. shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM);
  575. if (shift != 0)
  576. meson_mmc_shift_map(test, shift);
  577. while (start < CLK_PHASE_POINT_NUM) {
  578. meson_mmc_find_next_region(test, &start, &stop);
  579. if ((stop - start) > size) {
  580. offset = start;
  581. size = stop - start;
  582. }
  583. start = stop;
  584. }
  585. /* Get the center point of the region */
  586. offset += (size / 2);
  587. /* Shift the result back */
  588. offset = (offset + shift) % CLK_PHASE_POINT_NUM;
  589. return offset;
  590. }
  591. static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
  592. struct clk *clk)
  593. {
  594. int point, ret;
  595. DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM);
  596. dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n",
  597. __clk_get_name(clk));
  598. bitmap_zero(test, CLK_PHASE_POINT_NUM);
  599. /* Explore tuning points */
  600. for (point = 0; point < CLK_PHASE_POINT_NUM; point++) {
  601. clk_set_phase(clk, point * CLK_PHASE_STEP);
  602. ret = mmc_send_tuning(mmc, opcode, NULL);
  603. if (!ret)
  604. set_bit(point, test);
  605. }
  606. /* Find the optimal tuning point and apply it */
  607. point = meson_mmc_find_tuning_point(test);
  608. if (point < 0)
  609. return point; /* tuning failed */
  610. clk_set_phase(clk, point * CLK_PHASE_STEP);
  611. dev_dbg(mmc_dev(mmc), "success with phase: %d\n",
  612. clk_get_phase(clk));
  613. return 0;
  614. }
  615. static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
  616. {
  617. struct meson_host *host = mmc_priv(mmc);
  618. return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
  619. }
  620. static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  621. {
  622. struct meson_host *host = mmc_priv(mmc);
  623. u32 bus_width, val;
  624. int err;
  625. /*
  626. * GPIO regulator, only controls switching between 1v8 and
  627. * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
  628. */
  629. switch (ios->power_mode) {
  630. case MMC_POWER_OFF:
  631. if (!IS_ERR(mmc->supply.vmmc))
  632. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  633. if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
  634. regulator_disable(mmc->supply.vqmmc);
  635. host->vqmmc_enabled = false;
  636. }
  637. break;
  638. case MMC_POWER_UP:
  639. if (!IS_ERR(mmc->supply.vmmc))
  640. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  641. /* Reset rx phase */
  642. clk_set_phase(host->rx_clk, 0);
  643. break;
  644. case MMC_POWER_ON:
  645. if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
  646. int ret = regulator_enable(mmc->supply.vqmmc);
  647. if (ret < 0)
  648. dev_err(host->dev,
  649. "failed to enable vqmmc regulator\n");
  650. else
  651. host->vqmmc_enabled = true;
  652. }
  653. break;
  654. }
  655. /* Bus width */
  656. switch (ios->bus_width) {
  657. case MMC_BUS_WIDTH_1:
  658. bus_width = CFG_BUS_WIDTH_1;
  659. break;
  660. case MMC_BUS_WIDTH_4:
  661. bus_width = CFG_BUS_WIDTH_4;
  662. break;
  663. case MMC_BUS_WIDTH_8:
  664. bus_width = CFG_BUS_WIDTH_8;
  665. break;
  666. default:
  667. dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
  668. ios->bus_width);
  669. bus_width = CFG_BUS_WIDTH_4;
  670. }
  671. val = readl(host->regs + SD_EMMC_CFG);
  672. val &= ~CFG_BUS_WIDTH_MASK;
  673. val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
  674. val &= ~CFG_DDR;
  675. if (meson_mmc_timing_is_ddr(ios))
  676. val |= CFG_DDR;
  677. val &= ~CFG_CHK_DS;
  678. if (ios->timing == MMC_TIMING_MMC_HS400)
  679. val |= CFG_CHK_DS;
  680. err = meson_mmc_clk_set(host, ios);
  681. if (err)
  682. dev_err(host->dev, "Failed to set clock: %d\n,", err);
  683. writel(val, host->regs + SD_EMMC_CFG);
  684. dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
  685. }
  686. static void meson_mmc_request_done(struct mmc_host *mmc,
  687. struct mmc_request *mrq)
  688. {
  689. struct meson_host *host = mmc_priv(mmc);
  690. host->cmd = NULL;
  691. mmc_request_done(host->mmc, mrq);
  692. }
  693. static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
  694. {
  695. struct meson_host *host = mmc_priv(mmc);
  696. u32 cfg, blksz_old;
  697. cfg = readl(host->regs + SD_EMMC_CFG);
  698. blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
  699. if (!is_power_of_2(blksz))
  700. dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
  701. blksz = ilog2(blksz);
  702. /* check if block-size matches, if not update */
  703. if (blksz == blksz_old)
  704. return;
  705. dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
  706. blksz_old, blksz);
  707. cfg &= ~CFG_BLK_LEN_MASK;
  708. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
  709. writel(cfg, host->regs + SD_EMMC_CFG);
  710. }
  711. static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
  712. {
  713. if (cmd->flags & MMC_RSP_PRESENT) {
  714. if (cmd->flags & MMC_RSP_136)
  715. *cmd_cfg |= CMD_CFG_RESP_128;
  716. *cmd_cfg |= CMD_CFG_RESP_NUM;
  717. if (!(cmd->flags & MMC_RSP_CRC))
  718. *cmd_cfg |= CMD_CFG_RESP_NOCRC;
  719. if (cmd->flags & MMC_RSP_BUSY)
  720. *cmd_cfg |= CMD_CFG_R1B;
  721. } else {
  722. *cmd_cfg |= CMD_CFG_NO_RESP;
  723. }
  724. }
  725. static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
  726. {
  727. struct meson_host *host = mmc_priv(mmc);
  728. struct sd_emmc_desc *desc = host->descs;
  729. struct mmc_data *data = host->cmd->data;
  730. struct scatterlist *sg;
  731. u32 start;
  732. int i;
  733. if (data->flags & MMC_DATA_WRITE)
  734. cmd_cfg |= CMD_CFG_DATA_WR;
  735. if (data->blocks > 1) {
  736. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  737. meson_mmc_set_blksz(mmc, data->blksz);
  738. }
  739. for_each_sg(data->sg, sg, data->sg_count, i) {
  740. unsigned int len = sg_dma_len(sg);
  741. if (data->blocks > 1)
  742. len /= data->blksz;
  743. desc[i].cmd_cfg = cmd_cfg;
  744. desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
  745. if (i > 0)
  746. desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
  747. desc[i].cmd_arg = host->cmd->arg;
  748. desc[i].cmd_resp = 0;
  749. desc[i].cmd_data = sg_dma_address(sg);
  750. }
  751. desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  752. dma_wmb(); /* ensure descriptor is written before kicked */
  753. start = host->descs_dma_addr | START_DESC_BUSY;
  754. writel(start, host->regs + SD_EMMC_START);
  755. }
  756. static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
  757. {
  758. struct meson_host *host = mmc_priv(mmc);
  759. struct mmc_data *data = cmd->data;
  760. u32 cmd_cfg = 0, cmd_data = 0;
  761. unsigned int xfer_bytes = 0;
  762. /* Setup descriptors */
  763. dma_rmb();
  764. host->cmd = cmd;
  765. cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
  766. cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
  767. meson_mmc_set_response_bits(cmd, &cmd_cfg);
  768. /* data? */
  769. if (data) {
  770. data->bytes_xfered = 0;
  771. cmd_cfg |= CMD_CFG_DATA_IO;
  772. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  773. ilog2(meson_mmc_get_timeout_msecs(data)));
  774. if (meson_mmc_desc_chain_mode(data)) {
  775. meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
  776. return;
  777. }
  778. if (data->blocks > 1) {
  779. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  780. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
  781. data->blocks);
  782. meson_mmc_set_blksz(mmc, data->blksz);
  783. } else {
  784. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
  785. }
  786. xfer_bytes = data->blksz * data->blocks;
  787. if (data->flags & MMC_DATA_WRITE) {
  788. cmd_cfg |= CMD_CFG_DATA_WR;
  789. WARN_ON(xfer_bytes > host->bounce_buf_size);
  790. sg_copy_to_buffer(data->sg, data->sg_len,
  791. host->bounce_buf, xfer_bytes);
  792. dma_wmb();
  793. }
  794. cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
  795. } else {
  796. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  797. ilog2(SD_EMMC_CMD_TIMEOUT));
  798. }
  799. /* Last descriptor */
  800. cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  801. writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
  802. writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
  803. writel(0, host->regs + SD_EMMC_CMD_RSP);
  804. wmb(); /* ensure descriptor is written before kicked */
  805. writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
  806. }
  807. static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  808. {
  809. struct meson_host *host = mmc_priv(mmc);
  810. bool needs_pre_post_req = mrq->data &&
  811. !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
  812. if (needs_pre_post_req) {
  813. meson_mmc_get_transfer_mode(mmc, mrq);
  814. if (!meson_mmc_desc_chain_mode(mrq->data))
  815. needs_pre_post_req = false;
  816. }
  817. if (needs_pre_post_req)
  818. meson_mmc_pre_req(mmc, mrq);
  819. /* Stop execution */
  820. writel(0, host->regs + SD_EMMC_START);
  821. meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
  822. if (needs_pre_post_req)
  823. meson_mmc_post_req(mmc, mrq, 0);
  824. }
  825. static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
  826. {
  827. struct meson_host *host = mmc_priv(mmc);
  828. if (cmd->flags & MMC_RSP_136) {
  829. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
  830. cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
  831. cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
  832. cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
  833. } else if (cmd->flags & MMC_RSP_PRESENT) {
  834. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
  835. }
  836. }
  837. static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
  838. {
  839. struct meson_host *host = dev_id;
  840. struct mmc_command *cmd;
  841. struct mmc_data *data;
  842. u32 irq_en, status, raw_status;
  843. irqreturn_t ret = IRQ_NONE;
  844. if (WARN_ON(!host) || WARN_ON(!host->cmd))
  845. return IRQ_NONE;
  846. spin_lock(&host->lock);
  847. cmd = host->cmd;
  848. data = cmd->data;
  849. irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
  850. raw_status = readl(host->regs + SD_EMMC_STATUS);
  851. status = raw_status & irq_en;
  852. cmd->error = 0;
  853. if (status & IRQ_CRC_ERR) {
  854. dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
  855. cmd->error = -EILSEQ;
  856. ret = IRQ_HANDLED;
  857. goto out;
  858. }
  859. if (status & IRQ_TIMEOUTS) {
  860. dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
  861. cmd->error = -ETIMEDOUT;
  862. ret = IRQ_HANDLED;
  863. goto out;
  864. }
  865. meson_mmc_read_resp(host->mmc, cmd);
  866. if (status & IRQ_SDIO) {
  867. dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
  868. ret = IRQ_HANDLED;
  869. }
  870. if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
  871. if (data && !cmd->error)
  872. data->bytes_xfered = data->blksz * data->blocks;
  873. if (meson_mmc_bounce_buf_read(data) ||
  874. meson_mmc_get_next_command(cmd))
  875. ret = IRQ_WAKE_THREAD;
  876. else
  877. ret = IRQ_HANDLED;
  878. }
  879. out:
  880. /* ack all enabled interrupts */
  881. writel(irq_en, host->regs + SD_EMMC_STATUS);
  882. if (ret == IRQ_HANDLED)
  883. meson_mmc_request_done(host->mmc, cmd->mrq);
  884. else if (ret == IRQ_NONE)
  885. dev_warn(host->dev,
  886. "Unexpected IRQ! status=0x%08x, irq_en=0x%08x\n",
  887. raw_status, irq_en);
  888. spin_unlock(&host->lock);
  889. return ret;
  890. }
  891. static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
  892. {
  893. struct meson_host *host = dev_id;
  894. struct mmc_command *next_cmd, *cmd = host->cmd;
  895. struct mmc_data *data;
  896. unsigned int xfer_bytes;
  897. if (WARN_ON(!cmd))
  898. return IRQ_NONE;
  899. data = cmd->data;
  900. if (meson_mmc_bounce_buf_read(data)) {
  901. xfer_bytes = data->blksz * data->blocks;
  902. WARN_ON(xfer_bytes > host->bounce_buf_size);
  903. sg_copy_from_buffer(data->sg, data->sg_len,
  904. host->bounce_buf, xfer_bytes);
  905. }
  906. next_cmd = meson_mmc_get_next_command(cmd);
  907. if (next_cmd)
  908. meson_mmc_start_cmd(host->mmc, next_cmd);
  909. else
  910. meson_mmc_request_done(host->mmc, cmd->mrq);
  911. return IRQ_HANDLED;
  912. }
  913. /*
  914. * NOTE: we only need this until the GPIO/pinctrl driver can handle
  915. * interrupts. For now, the MMC core will use this for polling.
  916. */
  917. static int meson_mmc_get_cd(struct mmc_host *mmc)
  918. {
  919. int status = mmc_gpio_get_cd(mmc);
  920. if (status == -ENOSYS)
  921. return 1; /* assume present */
  922. return status;
  923. }
  924. static void meson_mmc_cfg_init(struct meson_host *host)
  925. {
  926. u32 cfg = 0;
  927. cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
  928. ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
  929. cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
  930. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
  931. writel(cfg, host->regs + SD_EMMC_CFG);
  932. }
  933. static int meson_mmc_card_busy(struct mmc_host *mmc)
  934. {
  935. struct meson_host *host = mmc_priv(mmc);
  936. u32 regval;
  937. regval = readl(host->regs + SD_EMMC_STATUS);
  938. /* We are only interrested in lines 0 to 3, so mask the other ones */
  939. return !(FIELD_GET(STATUS_DATI, regval) & 0xf);
  940. }
  941. static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
  942. {
  943. /* vqmmc regulator is available */
  944. if (!IS_ERR(mmc->supply.vqmmc)) {
  945. /*
  946. * The usual amlogic setup uses a GPIO to switch from one
  947. * regulator to the other. While the voltage ramp up is
  948. * pretty fast, care must be taken when switching from 3.3v
  949. * to 1.8v. Please make sure the regulator framework is aware
  950. * of your own regulator constraints
  951. */
  952. return mmc_regulator_set_vqmmc(mmc, ios);
  953. }
  954. /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
  955. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  956. return 0;
  957. return -EINVAL;
  958. }
  959. static const struct mmc_host_ops meson_mmc_ops = {
  960. .request = meson_mmc_request,
  961. .set_ios = meson_mmc_set_ios,
  962. .get_cd = meson_mmc_get_cd,
  963. .pre_req = meson_mmc_pre_req,
  964. .post_req = meson_mmc_post_req,
  965. .execute_tuning = meson_mmc_execute_tuning,
  966. .card_busy = meson_mmc_card_busy,
  967. .start_signal_voltage_switch = meson_mmc_voltage_switch,
  968. };
  969. static int meson_mmc_probe(struct platform_device *pdev)
  970. {
  971. struct resource *res;
  972. struct meson_host *host;
  973. struct mmc_host *mmc;
  974. int ret, irq;
  975. mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
  976. if (!mmc)
  977. return -ENOMEM;
  978. host = mmc_priv(mmc);
  979. host->mmc = mmc;
  980. host->dev = &pdev->dev;
  981. dev_set_drvdata(&pdev->dev, host);
  982. spin_lock_init(&host->lock);
  983. /* Get regulators and the supported OCR mask */
  984. host->vqmmc_enabled = false;
  985. ret = mmc_regulator_get_supply(mmc);
  986. if (ret)
  987. goto free_host;
  988. ret = mmc_of_parse(mmc);
  989. if (ret) {
  990. if (ret != -EPROBE_DEFER)
  991. dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
  992. goto free_host;
  993. }
  994. host->data = (struct meson_mmc_data *)
  995. of_device_get_match_data(&pdev->dev);
  996. if (!host->data) {
  997. ret = -EINVAL;
  998. goto free_host;
  999. }
  1000. ret = device_reset_optional(&pdev->dev);
  1001. if (ret) {
  1002. if (ret != -EPROBE_DEFER)
  1003. dev_err(&pdev->dev, "device reset failed: %d\n", ret);
  1004. return ret;
  1005. }
  1006. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1007. host->regs = devm_ioremap_resource(&pdev->dev, res);
  1008. if (IS_ERR(host->regs)) {
  1009. ret = PTR_ERR(host->regs);
  1010. goto free_host;
  1011. }
  1012. irq = platform_get_irq(pdev, 0);
  1013. if (irq <= 0) {
  1014. dev_err(&pdev->dev, "failed to get interrupt resource.\n");
  1015. ret = -EINVAL;
  1016. goto free_host;
  1017. }
  1018. host->pinctrl = devm_pinctrl_get(&pdev->dev);
  1019. if (IS_ERR(host->pinctrl)) {
  1020. ret = PTR_ERR(host->pinctrl);
  1021. goto free_host;
  1022. }
  1023. host->pins_default = pinctrl_lookup_state(host->pinctrl,
  1024. PINCTRL_STATE_DEFAULT);
  1025. if (IS_ERR(host->pins_default)) {
  1026. ret = PTR_ERR(host->pins_default);
  1027. goto free_host;
  1028. }
  1029. host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
  1030. "clk-gate");
  1031. if (IS_ERR(host->pins_clk_gate)) {
  1032. dev_warn(&pdev->dev,
  1033. "can't get clk-gate pinctrl, using clk_stop bit\n");
  1034. host->pins_clk_gate = NULL;
  1035. }
  1036. host->core_clk = devm_clk_get(&pdev->dev, "core");
  1037. if (IS_ERR(host->core_clk)) {
  1038. ret = PTR_ERR(host->core_clk);
  1039. goto free_host;
  1040. }
  1041. ret = clk_prepare_enable(host->core_clk);
  1042. if (ret)
  1043. goto free_host;
  1044. ret = meson_mmc_clk_init(host);
  1045. if (ret)
  1046. goto err_core_clk;
  1047. /* set config to sane default */
  1048. meson_mmc_cfg_init(host);
  1049. /* Stop execution */
  1050. writel(0, host->regs + SD_EMMC_START);
  1051. /* clear, ack and enable interrupts */
  1052. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1053. writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
  1054. host->regs + SD_EMMC_STATUS);
  1055. writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
  1056. host->regs + SD_EMMC_IRQ_EN);
  1057. ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
  1058. meson_mmc_irq_thread, IRQF_SHARED,
  1059. NULL, host);
  1060. if (ret)
  1061. goto err_init_clk;
  1062. mmc->caps |= MMC_CAP_CMD23;
  1063. mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
  1064. mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
  1065. mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
  1066. mmc->max_seg_size = mmc->max_req_size;
  1067. /* data bounce buffer */
  1068. host->bounce_buf_size = mmc->max_req_size;
  1069. host->bounce_buf =
  1070. dma_alloc_coherent(host->dev, host->bounce_buf_size,
  1071. &host->bounce_dma_addr, GFP_KERNEL);
  1072. if (host->bounce_buf == NULL) {
  1073. dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
  1074. ret = -ENOMEM;
  1075. goto err_init_clk;
  1076. }
  1077. host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1078. &host->descs_dma_addr, GFP_KERNEL);
  1079. if (!host->descs) {
  1080. dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
  1081. ret = -ENOMEM;
  1082. goto err_bounce_buf;
  1083. }
  1084. mmc->ops = &meson_mmc_ops;
  1085. mmc_add_host(mmc);
  1086. return 0;
  1087. err_bounce_buf:
  1088. dma_free_coherent(host->dev, host->bounce_buf_size,
  1089. host->bounce_buf, host->bounce_dma_addr);
  1090. err_init_clk:
  1091. clk_disable_unprepare(host->mmc_clk);
  1092. err_core_clk:
  1093. clk_disable_unprepare(host->core_clk);
  1094. free_host:
  1095. mmc_free_host(mmc);
  1096. return ret;
  1097. }
  1098. static int meson_mmc_remove(struct platform_device *pdev)
  1099. {
  1100. struct meson_host *host = dev_get_drvdata(&pdev->dev);
  1101. mmc_remove_host(host->mmc);
  1102. /* disable interrupts */
  1103. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1104. dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1105. host->descs, host->descs_dma_addr);
  1106. dma_free_coherent(host->dev, host->bounce_buf_size,
  1107. host->bounce_buf, host->bounce_dma_addr);
  1108. clk_disable_unprepare(host->mmc_clk);
  1109. clk_disable_unprepare(host->core_clk);
  1110. mmc_free_host(host->mmc);
  1111. return 0;
  1112. }
  1113. static const struct meson_mmc_data meson_gx_data = {
  1114. .tx_delay_mask = CLK_V2_TX_DELAY_MASK,
  1115. .rx_delay_mask = CLK_V2_RX_DELAY_MASK,
  1116. .always_on = CLK_V2_ALWAYS_ON,
  1117. };
  1118. static const struct meson_mmc_data meson_axg_data = {
  1119. .tx_delay_mask = CLK_V3_TX_DELAY_MASK,
  1120. .rx_delay_mask = CLK_V3_RX_DELAY_MASK,
  1121. .always_on = CLK_V3_ALWAYS_ON,
  1122. };
  1123. static const struct of_device_id meson_mmc_of_match[] = {
  1124. { .compatible = "amlogic,meson-gx-mmc", .data = &meson_gx_data },
  1125. { .compatible = "amlogic,meson-gxbb-mmc", .data = &meson_gx_data },
  1126. { .compatible = "amlogic,meson-gxl-mmc", .data = &meson_gx_data },
  1127. { .compatible = "amlogic,meson-gxm-mmc", .data = &meson_gx_data },
  1128. { .compatible = "amlogic,meson-axg-mmc", .data = &meson_axg_data },
  1129. {}
  1130. };
  1131. MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
  1132. static struct platform_driver meson_mmc_driver = {
  1133. .probe = meson_mmc_probe,
  1134. .remove = meson_mmc_remove,
  1135. .driver = {
  1136. .name = DRIVER_NAME,
  1137. .of_match_table = of_match_ptr(meson_mmc_of_match),
  1138. },
  1139. };
  1140. module_platform_driver(meson_mmc_driver);
  1141. MODULE_DESCRIPTION("Amlogic S905*/GX*/AXG SD/eMMC driver");
  1142. MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
  1143. MODULE_LICENSE("GPL v2");