meson-gx-mmc.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356
  1. /*
  2. * Amlogic SD/eMMC driver for the GX/S905 family SoCs
  3. *
  4. * Copyright (c) 2016 BayLibre, SAS.
  5. * Author: Kevin Hilman <khilman@baylibre.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. * The full GNU General Public License is included in this distribution
  19. * in the file called COPYING.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/init.h>
  24. #include <linux/device.h>
  25. #include <linux/of_device.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/ioport.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/mmc/host.h>
  31. #include <linux/mmc/mmc.h>
  32. #include <linux/mmc/sdio.h>
  33. #include <linux/mmc/slot-gpio.h>
  34. #include <linux/io.h>
  35. #include <linux/clk.h>
  36. #include <linux/clk-provider.h>
  37. #include <linux/regulator/consumer.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/bitfield.h>
  40. #define DRIVER_NAME "meson-gx-mmc"
  41. #define SD_EMMC_CLOCK 0x0
  42. #define CLK_DIV_MASK GENMASK(5, 0)
  43. #define CLK_SRC_MASK GENMASK(7, 6)
  44. #define CLK_CORE_PHASE_MASK GENMASK(9, 8)
  45. #define CLK_TX_PHASE_MASK GENMASK(11, 10)
  46. #define CLK_RX_PHASE_MASK GENMASK(13, 12)
  47. #define CLK_TX_DELAY_MASK GENMASK(19, 16)
  48. #define CLK_RX_DELAY_MASK GENMASK(23, 20)
  49. #define CLK_DELAY_STEP_PS 200
  50. #define CLK_PHASE_STEP 30
  51. #define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP)
  52. #define CLK_ALWAYS_ON BIT(24)
  53. #define SD_EMMC_DELAY 0x4
  54. #define SD_EMMC_ADJUST 0x8
  55. #define SD_EMMC_CALOUT 0x10
  56. #define SD_EMMC_START 0x40
  57. #define START_DESC_INIT BIT(0)
  58. #define START_DESC_BUSY BIT(1)
  59. #define START_DESC_ADDR_MASK GENMASK(31, 2)
  60. #define SD_EMMC_CFG 0x44
  61. #define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
  62. #define CFG_BUS_WIDTH_1 0x0
  63. #define CFG_BUS_WIDTH_4 0x1
  64. #define CFG_BUS_WIDTH_8 0x2
  65. #define CFG_DDR BIT(2)
  66. #define CFG_BLK_LEN_MASK GENMASK(7, 4)
  67. #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
  68. #define CFG_RC_CC_MASK GENMASK(15, 12)
  69. #define CFG_STOP_CLOCK BIT(22)
  70. #define CFG_CLK_ALWAYS_ON BIT(18)
  71. #define CFG_CHK_DS BIT(20)
  72. #define CFG_AUTO_CLK BIT(23)
  73. #define SD_EMMC_STATUS 0x48
  74. #define STATUS_BUSY BIT(31)
  75. #define STATUS_DATI GENMASK(23, 16)
  76. #define SD_EMMC_IRQ_EN 0x4c
  77. #define IRQ_RXD_ERR_MASK GENMASK(7, 0)
  78. #define IRQ_TXD_ERR BIT(8)
  79. #define IRQ_DESC_ERR BIT(9)
  80. #define IRQ_RESP_ERR BIT(10)
  81. #define IRQ_CRC_ERR \
  82. (IRQ_RXD_ERR_MASK | IRQ_TXD_ERR | IRQ_DESC_ERR | IRQ_RESP_ERR)
  83. #define IRQ_RESP_TIMEOUT BIT(11)
  84. #define IRQ_DESC_TIMEOUT BIT(12)
  85. #define IRQ_TIMEOUTS \
  86. (IRQ_RESP_TIMEOUT | IRQ_DESC_TIMEOUT)
  87. #define IRQ_END_OF_CHAIN BIT(13)
  88. #define IRQ_RESP_STATUS BIT(14)
  89. #define IRQ_SDIO BIT(15)
  90. #define IRQ_EN_MASK \
  91. (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
  92. IRQ_SDIO)
  93. #define SD_EMMC_CMD_CFG 0x50
  94. #define SD_EMMC_CMD_ARG 0x54
  95. #define SD_EMMC_CMD_DAT 0x58
  96. #define SD_EMMC_CMD_RSP 0x5c
  97. #define SD_EMMC_CMD_RSP1 0x60
  98. #define SD_EMMC_CMD_RSP2 0x64
  99. #define SD_EMMC_CMD_RSP3 0x68
  100. #define SD_EMMC_RXD 0x94
  101. #define SD_EMMC_TXD 0x94
  102. #define SD_EMMC_LAST_REG SD_EMMC_TXD
  103. #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
  104. #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
  105. #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
  106. #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
  107. #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
  108. #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
  109. #define SD_EMMC_PRE_REQ_DONE BIT(0)
  110. #define SD_EMMC_DESC_CHAIN_MODE BIT(1)
  111. #define MUX_CLK_NUM_PARENTS 2
  112. struct sd_emmc_desc {
  113. u32 cmd_cfg;
  114. u32 cmd_arg;
  115. u32 cmd_data;
  116. u32 cmd_resp;
  117. };
  118. struct meson_host {
  119. struct device *dev;
  120. struct mmc_host *mmc;
  121. struct mmc_command *cmd;
  122. spinlock_t lock;
  123. void __iomem *regs;
  124. struct clk *core_clk;
  125. struct clk *mmc_clk;
  126. struct clk *rx_clk;
  127. struct clk *tx_clk;
  128. unsigned long req_rate;
  129. struct pinctrl *pinctrl;
  130. struct pinctrl_state *pins_default;
  131. struct pinctrl_state *pins_clk_gate;
  132. unsigned int bounce_buf_size;
  133. void *bounce_buf;
  134. dma_addr_t bounce_dma_addr;
  135. struct sd_emmc_desc *descs;
  136. dma_addr_t descs_dma_addr;
  137. bool vqmmc_enabled;
  138. };
  139. #define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
  140. #define CMD_CFG_BLOCK_MODE BIT(9)
  141. #define CMD_CFG_R1B BIT(10)
  142. #define CMD_CFG_END_OF_CHAIN BIT(11)
  143. #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
  144. #define CMD_CFG_NO_RESP BIT(16)
  145. #define CMD_CFG_NO_CMD BIT(17)
  146. #define CMD_CFG_DATA_IO BIT(18)
  147. #define CMD_CFG_DATA_WR BIT(19)
  148. #define CMD_CFG_RESP_NOCRC BIT(20)
  149. #define CMD_CFG_RESP_128 BIT(21)
  150. #define CMD_CFG_RESP_NUM BIT(22)
  151. #define CMD_CFG_DATA_NUM BIT(23)
  152. #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
  153. #define CMD_CFG_ERROR BIT(30)
  154. #define CMD_CFG_OWNER BIT(31)
  155. #define CMD_DATA_MASK GENMASK(31, 2)
  156. #define CMD_DATA_BIG_ENDIAN BIT(1)
  157. #define CMD_DATA_SRAM BIT(0)
  158. #define CMD_RESP_MASK GENMASK(31, 1)
  159. #define CMD_RESP_SRAM BIT(0)
  160. struct meson_mmc_phase {
  161. struct clk_hw hw;
  162. void __iomem *reg;
  163. unsigned long phase_mask;
  164. unsigned long delay_mask;
  165. unsigned int delay_step_ps;
  166. };
  167. #define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw)
  168. static int meson_mmc_clk_get_phase(struct clk_hw *hw)
  169. {
  170. struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
  171. unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
  172. unsigned long period_ps, p, d;
  173. int degrees;
  174. u32 val;
  175. val = readl(mmc->reg);
  176. p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask);
  177. degrees = p * 360 / phase_num;
  178. if (mmc->delay_mask) {
  179. period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
  180. clk_get_rate(hw->clk));
  181. d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask);
  182. degrees += d * mmc->delay_step_ps * 360 / period_ps;
  183. degrees %= 360;
  184. }
  185. return degrees;
  186. }
  187. static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc,
  188. unsigned int phase,
  189. unsigned int delay)
  190. {
  191. u32 val;
  192. val = readl(mmc->reg);
  193. val &= ~mmc->phase_mask;
  194. val |= phase << __ffs(mmc->phase_mask);
  195. if (mmc->delay_mask) {
  196. val &= ~mmc->delay_mask;
  197. val |= delay << __ffs(mmc->delay_mask);
  198. }
  199. writel(val, mmc->reg);
  200. }
  201. static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees)
  202. {
  203. struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
  204. unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
  205. unsigned long period_ps, d = 0, r;
  206. uint64_t p;
  207. p = degrees % 360;
  208. if (!mmc->delay_mask) {
  209. p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num);
  210. } else {
  211. period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
  212. clk_get_rate(hw->clk));
  213. /* First compute the phase index (p), the remainder (r) is the
  214. * part we'll try to acheive using the delays (d).
  215. */
  216. r = do_div(p, 360 / phase_num);
  217. d = DIV_ROUND_CLOSEST(r * period_ps,
  218. 360 * mmc->delay_step_ps);
  219. d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask));
  220. }
  221. meson_mmc_apply_phase_delay(mmc, p, d);
  222. return 0;
  223. }
  224. static const struct clk_ops meson_mmc_clk_phase_ops = {
  225. .get_phase = meson_mmc_clk_get_phase,
  226. .set_phase = meson_mmc_clk_set_phase,
  227. };
  228. static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
  229. {
  230. unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
  231. if (!timeout)
  232. return SD_EMMC_CMD_TIMEOUT_DATA;
  233. timeout = roundup_pow_of_two(timeout);
  234. return min(timeout, 32768U); /* max. 2^15 ms */
  235. }
  236. static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd)
  237. {
  238. if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
  239. return cmd->mrq->cmd;
  240. else if (mmc_op_multi(cmd->opcode) &&
  241. (!cmd->mrq->sbc || cmd->error || cmd->data->error))
  242. return cmd->mrq->stop;
  243. else
  244. return NULL;
  245. }
  246. static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
  247. struct mmc_request *mrq)
  248. {
  249. struct mmc_data *data = mrq->data;
  250. struct scatterlist *sg;
  251. int i;
  252. bool use_desc_chain_mode = true;
  253. /*
  254. * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been
  255. * reported. For some strange reason this occurs in descriptor
  256. * chain mode only. So let's fall back to bounce buffer mode
  257. * for command SD_IO_RW_EXTENDED.
  258. */
  259. if (mrq->cmd->opcode == SD_IO_RW_EXTENDED)
  260. return;
  261. for_each_sg(data->sg, sg, data->sg_len, i)
  262. /* check for 8 byte alignment */
  263. if (sg->offset & 7) {
  264. WARN_ONCE(1, "unaligned scatterlist buffer\n");
  265. use_desc_chain_mode = false;
  266. break;
  267. }
  268. if (use_desc_chain_mode)
  269. data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE;
  270. }
  271. static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data)
  272. {
  273. return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE;
  274. }
  275. static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data)
  276. {
  277. return data && data->flags & MMC_DATA_READ &&
  278. !meson_mmc_desc_chain_mode(data);
  279. }
  280. static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
  281. {
  282. struct mmc_data *data = mrq->data;
  283. if (!data)
  284. return;
  285. meson_mmc_get_transfer_mode(mmc, mrq);
  286. data->host_cookie |= SD_EMMC_PRE_REQ_DONE;
  287. if (!meson_mmc_desc_chain_mode(data))
  288. return;
  289. data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
  290. mmc_get_dma_dir(data));
  291. if (!data->sg_count)
  292. dev_err(mmc_dev(mmc), "dma_map_sg failed");
  293. }
  294. static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
  295. int err)
  296. {
  297. struct mmc_data *data = mrq->data;
  298. if (data && meson_mmc_desc_chain_mode(data) && data->sg_count)
  299. dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
  300. mmc_get_dma_dir(data));
  301. }
  302. static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios)
  303. {
  304. if (ios->timing == MMC_TIMING_MMC_DDR52 ||
  305. ios->timing == MMC_TIMING_UHS_DDR50 ||
  306. ios->timing == MMC_TIMING_MMC_HS400)
  307. return true;
  308. return false;
  309. }
  310. /*
  311. * Gating the clock on this controller is tricky. It seems the mmc clock
  312. * is also used by the controller. It may crash during some operation if the
  313. * clock is stopped. The safest thing to do, whenever possible, is to keep
  314. * clock running at stop it at the pad using the pinmux.
  315. */
  316. static void meson_mmc_clk_gate(struct meson_host *host)
  317. {
  318. u32 cfg;
  319. if (host->pins_clk_gate) {
  320. pinctrl_select_state(host->pinctrl, host->pins_clk_gate);
  321. } else {
  322. /*
  323. * If the pinmux is not provided - default to the classic and
  324. * unsafe method
  325. */
  326. cfg = readl(host->regs + SD_EMMC_CFG);
  327. cfg |= CFG_STOP_CLOCK;
  328. writel(cfg, host->regs + SD_EMMC_CFG);
  329. }
  330. }
  331. static void meson_mmc_clk_ungate(struct meson_host *host)
  332. {
  333. u32 cfg;
  334. if (host->pins_clk_gate)
  335. pinctrl_select_state(host->pinctrl, host->pins_default);
  336. /* Make sure the clock is not stopped in the controller */
  337. cfg = readl(host->regs + SD_EMMC_CFG);
  338. cfg &= ~CFG_STOP_CLOCK;
  339. writel(cfg, host->regs + SD_EMMC_CFG);
  340. }
  341. static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
  342. {
  343. struct mmc_host *mmc = host->mmc;
  344. unsigned long rate = ios->clock;
  345. int ret;
  346. u32 cfg;
  347. /* DDR modes require higher module clock */
  348. if (meson_mmc_timing_is_ddr(ios))
  349. rate <<= 1;
  350. /* Same request - bail-out */
  351. if (host->req_rate == rate)
  352. return 0;
  353. /* stop clock */
  354. meson_mmc_clk_gate(host);
  355. host->req_rate = 0;
  356. if (!rate) {
  357. mmc->actual_clock = 0;
  358. /* return with clock being stopped */
  359. return 0;
  360. }
  361. /* Stop the clock during rate change to avoid glitches */
  362. cfg = readl(host->regs + SD_EMMC_CFG);
  363. cfg |= CFG_STOP_CLOCK;
  364. writel(cfg, host->regs + SD_EMMC_CFG);
  365. ret = clk_set_rate(host->mmc_clk, rate);
  366. if (ret) {
  367. dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
  368. rate, ret);
  369. return ret;
  370. }
  371. host->req_rate = rate;
  372. mmc->actual_clock = clk_get_rate(host->mmc_clk);
  373. /* We should report the real output frequency of the controller */
  374. if (meson_mmc_timing_is_ddr(ios))
  375. mmc->actual_clock >>= 1;
  376. dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
  377. if (ios->clock != mmc->actual_clock)
  378. dev_dbg(host->dev, "requested rate was %u\n", ios->clock);
  379. /* (re)start clock */
  380. meson_mmc_clk_ungate(host);
  381. return 0;
  382. }
  383. /*
  384. * The SD/eMMC IP block has an internal mux and divider used for
  385. * generating the MMC clock. Use the clock framework to create and
  386. * manage these clocks.
  387. */
  388. static int meson_mmc_clk_init(struct meson_host *host)
  389. {
  390. struct clk_init_data init;
  391. struct clk_mux *mux;
  392. struct clk_divider *div;
  393. struct meson_mmc_phase *core, *tx, *rx;
  394. struct clk *clk;
  395. char clk_name[32];
  396. int i, ret = 0;
  397. const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
  398. const char *clk_parent[1];
  399. u32 clk_reg;
  400. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  401. clk_reg = 0;
  402. clk_reg |= CLK_ALWAYS_ON;
  403. clk_reg |= CLK_DIV_MASK;
  404. writel(clk_reg, host->regs + SD_EMMC_CLOCK);
  405. /* get the mux parents */
  406. for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
  407. struct clk *clk;
  408. char name[16];
  409. snprintf(name, sizeof(name), "clkin%d", i);
  410. clk = devm_clk_get(host->dev, name);
  411. if (IS_ERR(clk)) {
  412. if (clk != ERR_PTR(-EPROBE_DEFER))
  413. dev_err(host->dev, "Missing clock %s\n", name);
  414. return PTR_ERR(clk);
  415. }
  416. mux_parent_names[i] = __clk_get_name(clk);
  417. }
  418. /* create the mux */
  419. mux = devm_kzalloc(host->dev, sizeof(*mux), GFP_KERNEL);
  420. if (!mux)
  421. return -ENOMEM;
  422. snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
  423. init.name = clk_name;
  424. init.ops = &clk_mux_ops;
  425. init.flags = 0;
  426. init.parent_names = mux_parent_names;
  427. init.num_parents = MUX_CLK_NUM_PARENTS;
  428. mux->reg = host->regs + SD_EMMC_CLOCK;
  429. mux->shift = __ffs(CLK_SRC_MASK);
  430. mux->mask = CLK_SRC_MASK >> mux->shift;
  431. mux->hw.init = &init;
  432. clk = devm_clk_register(host->dev, &mux->hw);
  433. if (WARN_ON(IS_ERR(clk)))
  434. return PTR_ERR(clk);
  435. /* create the divider */
  436. div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
  437. if (!div)
  438. return -ENOMEM;
  439. snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
  440. init.name = clk_name;
  441. init.ops = &clk_divider_ops;
  442. init.flags = CLK_SET_RATE_PARENT;
  443. clk_parent[0] = __clk_get_name(clk);
  444. init.parent_names = clk_parent;
  445. init.num_parents = 1;
  446. div->reg = host->regs + SD_EMMC_CLOCK;
  447. div->shift = __ffs(CLK_DIV_MASK);
  448. div->width = __builtin_popcountl(CLK_DIV_MASK);
  449. div->hw.init = &init;
  450. div->flags = CLK_DIVIDER_ONE_BASED;
  451. clk = devm_clk_register(host->dev, &div->hw);
  452. if (WARN_ON(IS_ERR(clk)))
  453. return PTR_ERR(clk);
  454. /* create the mmc core clock */
  455. core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL);
  456. if (!core)
  457. return -ENOMEM;
  458. snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev));
  459. init.name = clk_name;
  460. init.ops = &meson_mmc_clk_phase_ops;
  461. init.flags = CLK_SET_RATE_PARENT;
  462. clk_parent[0] = __clk_get_name(clk);
  463. init.parent_names = clk_parent;
  464. init.num_parents = 1;
  465. core->reg = host->regs + SD_EMMC_CLOCK;
  466. core->phase_mask = CLK_CORE_PHASE_MASK;
  467. core->hw.init = &init;
  468. host->mmc_clk = devm_clk_register(host->dev, &core->hw);
  469. if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk)))
  470. return PTR_ERR(host->mmc_clk);
  471. /* create the mmc tx clock */
  472. tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL);
  473. if (!tx)
  474. return -ENOMEM;
  475. snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev));
  476. init.name = clk_name;
  477. init.ops = &meson_mmc_clk_phase_ops;
  478. init.flags = 0;
  479. clk_parent[0] = __clk_get_name(host->mmc_clk);
  480. init.parent_names = clk_parent;
  481. init.num_parents = 1;
  482. tx->reg = host->regs + SD_EMMC_CLOCK;
  483. tx->phase_mask = CLK_TX_PHASE_MASK;
  484. tx->delay_mask = CLK_TX_DELAY_MASK;
  485. tx->delay_step_ps = CLK_DELAY_STEP_PS;
  486. tx->hw.init = &init;
  487. host->tx_clk = devm_clk_register(host->dev, &tx->hw);
  488. if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk)))
  489. return PTR_ERR(host->tx_clk);
  490. /* create the mmc rx clock */
  491. rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL);
  492. if (!rx)
  493. return -ENOMEM;
  494. snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev));
  495. init.name = clk_name;
  496. init.ops = &meson_mmc_clk_phase_ops;
  497. init.flags = 0;
  498. clk_parent[0] = __clk_get_name(host->mmc_clk);
  499. init.parent_names = clk_parent;
  500. init.num_parents = 1;
  501. rx->reg = host->regs + SD_EMMC_CLOCK;
  502. rx->phase_mask = CLK_RX_PHASE_MASK;
  503. rx->delay_mask = CLK_RX_DELAY_MASK;
  504. rx->delay_step_ps = CLK_DELAY_STEP_PS;
  505. rx->hw.init = &init;
  506. host->rx_clk = devm_clk_register(host->dev, &rx->hw);
  507. if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk)))
  508. return PTR_ERR(host->rx_clk);
  509. /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
  510. host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
  511. ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
  512. if (ret)
  513. return ret;
  514. /*
  515. * Set phases : These values are mostly the datasheet recommended ones
  516. * except for the Tx phase. Datasheet recommends 180 but some cards
  517. * fail at initialisation with it. 270 works just fine, it fixes these
  518. * initialisation issues and enable eMMC DDR52 mode.
  519. */
  520. clk_set_phase(host->mmc_clk, 180);
  521. clk_set_phase(host->tx_clk, 270);
  522. clk_set_phase(host->rx_clk, 0);
  523. return clk_prepare_enable(host->mmc_clk);
  524. }
  525. static void meson_mmc_shift_map(unsigned long *map, unsigned long shift)
  526. {
  527. DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM);
  528. DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM);
  529. /*
  530. * shift the bitmap right and reintroduce the dropped bits on the left
  531. * of the bitmap
  532. */
  533. bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM);
  534. bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift,
  535. CLK_PHASE_POINT_NUM);
  536. bitmap_or(map, left, right, CLK_PHASE_POINT_NUM);
  537. }
  538. static void meson_mmc_find_next_region(unsigned long *map,
  539. unsigned long *start,
  540. unsigned long *stop)
  541. {
  542. *start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start);
  543. *stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start);
  544. }
  545. static int meson_mmc_find_tuning_point(unsigned long *test)
  546. {
  547. unsigned long shift, stop, offset = 0, start = 0, size = 0;
  548. /* Get the all good/all bad situation out the way */
  549. if (bitmap_full(test, CLK_PHASE_POINT_NUM))
  550. return 0; /* All points are good so point 0 will do */
  551. else if (bitmap_empty(test, CLK_PHASE_POINT_NUM))
  552. return -EIO; /* No successful tuning point */
  553. /*
  554. * Now we know there is a least one region find. Make sure it does
  555. * not wrap by the shifting the bitmap if necessary
  556. */
  557. shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM);
  558. if (shift != 0)
  559. meson_mmc_shift_map(test, shift);
  560. while (start < CLK_PHASE_POINT_NUM) {
  561. meson_mmc_find_next_region(test, &start, &stop);
  562. if ((stop - start) > size) {
  563. offset = start;
  564. size = stop - start;
  565. }
  566. start = stop;
  567. }
  568. /* Get the center point of the region */
  569. offset += (size / 2);
  570. /* Shift the result back */
  571. offset = (offset + shift) % CLK_PHASE_POINT_NUM;
  572. return offset;
  573. }
  574. static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
  575. struct clk *clk)
  576. {
  577. int point, ret;
  578. DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM);
  579. dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n",
  580. __clk_get_name(clk));
  581. bitmap_zero(test, CLK_PHASE_POINT_NUM);
  582. /* Explore tuning points */
  583. for (point = 0; point < CLK_PHASE_POINT_NUM; point++) {
  584. clk_set_phase(clk, point * CLK_PHASE_STEP);
  585. ret = mmc_send_tuning(mmc, opcode, NULL);
  586. if (!ret)
  587. set_bit(point, test);
  588. }
  589. /* Find the optimal tuning point and apply it */
  590. point = meson_mmc_find_tuning_point(test);
  591. if (point < 0)
  592. return point; /* tuning failed */
  593. clk_set_phase(clk, point * CLK_PHASE_STEP);
  594. dev_dbg(mmc_dev(mmc), "success with phase: %d\n",
  595. clk_get_phase(clk));
  596. return 0;
  597. }
  598. static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
  599. {
  600. struct meson_host *host = mmc_priv(mmc);
  601. int ret;
  602. /*
  603. * If this is the initial tuning, try to get a sane Rx starting
  604. * phase before doing the actual tuning.
  605. */
  606. if (!mmc->doing_retune) {
  607. ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
  608. if (ret)
  609. return ret;
  610. }
  611. ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
  612. if (ret)
  613. return ret;
  614. return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
  615. }
  616. static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  617. {
  618. struct meson_host *host = mmc_priv(mmc);
  619. u32 bus_width, val;
  620. int err;
  621. /*
  622. * GPIO regulator, only controls switching between 1v8 and
  623. * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
  624. */
  625. switch (ios->power_mode) {
  626. case MMC_POWER_OFF:
  627. if (!IS_ERR(mmc->supply.vmmc))
  628. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
  629. if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
  630. regulator_disable(mmc->supply.vqmmc);
  631. host->vqmmc_enabled = false;
  632. }
  633. break;
  634. case MMC_POWER_UP:
  635. if (!IS_ERR(mmc->supply.vmmc))
  636. mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
  637. /* Reset phases */
  638. clk_set_phase(host->rx_clk, 0);
  639. clk_set_phase(host->tx_clk, 270);
  640. break;
  641. case MMC_POWER_ON:
  642. if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
  643. int ret = regulator_enable(mmc->supply.vqmmc);
  644. if (ret < 0)
  645. dev_err(host->dev,
  646. "failed to enable vqmmc regulator\n");
  647. else
  648. host->vqmmc_enabled = true;
  649. }
  650. break;
  651. }
  652. /* Bus width */
  653. switch (ios->bus_width) {
  654. case MMC_BUS_WIDTH_1:
  655. bus_width = CFG_BUS_WIDTH_1;
  656. break;
  657. case MMC_BUS_WIDTH_4:
  658. bus_width = CFG_BUS_WIDTH_4;
  659. break;
  660. case MMC_BUS_WIDTH_8:
  661. bus_width = CFG_BUS_WIDTH_8;
  662. break;
  663. default:
  664. dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
  665. ios->bus_width);
  666. bus_width = CFG_BUS_WIDTH_4;
  667. }
  668. val = readl(host->regs + SD_EMMC_CFG);
  669. val &= ~CFG_BUS_WIDTH_MASK;
  670. val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
  671. val &= ~CFG_DDR;
  672. if (meson_mmc_timing_is_ddr(ios))
  673. val |= CFG_DDR;
  674. val &= ~CFG_CHK_DS;
  675. if (ios->timing == MMC_TIMING_MMC_HS400)
  676. val |= CFG_CHK_DS;
  677. err = meson_mmc_clk_set(host, ios);
  678. if (err)
  679. dev_err(host->dev, "Failed to set clock: %d\n,", err);
  680. writel(val, host->regs + SD_EMMC_CFG);
  681. dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
  682. }
  683. static void meson_mmc_request_done(struct mmc_host *mmc,
  684. struct mmc_request *mrq)
  685. {
  686. struct meson_host *host = mmc_priv(mmc);
  687. host->cmd = NULL;
  688. mmc_request_done(host->mmc, mrq);
  689. }
  690. static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz)
  691. {
  692. struct meson_host *host = mmc_priv(mmc);
  693. u32 cfg, blksz_old;
  694. cfg = readl(host->regs + SD_EMMC_CFG);
  695. blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg);
  696. if (!is_power_of_2(blksz))
  697. dev_err(host->dev, "blksz %u is not a power of 2\n", blksz);
  698. blksz = ilog2(blksz);
  699. /* check if block-size matches, if not update */
  700. if (blksz == blksz_old)
  701. return;
  702. dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__,
  703. blksz_old, blksz);
  704. cfg &= ~CFG_BLK_LEN_MASK;
  705. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz);
  706. writel(cfg, host->regs + SD_EMMC_CFG);
  707. }
  708. static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg)
  709. {
  710. if (cmd->flags & MMC_RSP_PRESENT) {
  711. if (cmd->flags & MMC_RSP_136)
  712. *cmd_cfg |= CMD_CFG_RESP_128;
  713. *cmd_cfg |= CMD_CFG_RESP_NUM;
  714. if (!(cmd->flags & MMC_RSP_CRC))
  715. *cmd_cfg |= CMD_CFG_RESP_NOCRC;
  716. if (cmd->flags & MMC_RSP_BUSY)
  717. *cmd_cfg |= CMD_CFG_R1B;
  718. } else {
  719. *cmd_cfg |= CMD_CFG_NO_RESP;
  720. }
  721. }
  722. static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
  723. {
  724. struct meson_host *host = mmc_priv(mmc);
  725. struct sd_emmc_desc *desc = host->descs;
  726. struct mmc_data *data = host->cmd->data;
  727. struct scatterlist *sg;
  728. u32 start;
  729. int i;
  730. if (data->flags & MMC_DATA_WRITE)
  731. cmd_cfg |= CMD_CFG_DATA_WR;
  732. if (data->blocks > 1) {
  733. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  734. meson_mmc_set_blksz(mmc, data->blksz);
  735. }
  736. for_each_sg(data->sg, sg, data->sg_count, i) {
  737. unsigned int len = sg_dma_len(sg);
  738. if (data->blocks > 1)
  739. len /= data->blksz;
  740. desc[i].cmd_cfg = cmd_cfg;
  741. desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len);
  742. if (i > 0)
  743. desc[i].cmd_cfg |= CMD_CFG_NO_CMD;
  744. desc[i].cmd_arg = host->cmd->arg;
  745. desc[i].cmd_resp = 0;
  746. desc[i].cmd_data = sg_dma_address(sg);
  747. }
  748. desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  749. dma_wmb(); /* ensure descriptor is written before kicked */
  750. start = host->descs_dma_addr | START_DESC_BUSY;
  751. writel(start, host->regs + SD_EMMC_START);
  752. }
  753. static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
  754. {
  755. struct meson_host *host = mmc_priv(mmc);
  756. struct mmc_data *data = cmd->data;
  757. u32 cmd_cfg = 0, cmd_data = 0;
  758. unsigned int xfer_bytes = 0;
  759. /* Setup descriptors */
  760. dma_rmb();
  761. host->cmd = cmd;
  762. cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
  763. cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
  764. meson_mmc_set_response_bits(cmd, &cmd_cfg);
  765. /* data? */
  766. if (data) {
  767. data->bytes_xfered = 0;
  768. cmd_cfg |= CMD_CFG_DATA_IO;
  769. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  770. ilog2(meson_mmc_get_timeout_msecs(data)));
  771. if (meson_mmc_desc_chain_mode(data)) {
  772. meson_mmc_desc_chain_transfer(mmc, cmd_cfg);
  773. return;
  774. }
  775. if (data->blocks > 1) {
  776. cmd_cfg |= CMD_CFG_BLOCK_MODE;
  777. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK,
  778. data->blocks);
  779. meson_mmc_set_blksz(mmc, data->blksz);
  780. } else {
  781. cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz);
  782. }
  783. xfer_bytes = data->blksz * data->blocks;
  784. if (data->flags & MMC_DATA_WRITE) {
  785. cmd_cfg |= CMD_CFG_DATA_WR;
  786. WARN_ON(xfer_bytes > host->bounce_buf_size);
  787. sg_copy_to_buffer(data->sg, data->sg_len,
  788. host->bounce_buf, xfer_bytes);
  789. dma_wmb();
  790. }
  791. cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
  792. } else {
  793. cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK,
  794. ilog2(SD_EMMC_CMD_TIMEOUT));
  795. }
  796. /* Last descriptor */
  797. cmd_cfg |= CMD_CFG_END_OF_CHAIN;
  798. writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG);
  799. writel(cmd_data, host->regs + SD_EMMC_CMD_DAT);
  800. writel(0, host->regs + SD_EMMC_CMD_RSP);
  801. wmb(); /* ensure descriptor is written before kicked */
  802. writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG);
  803. }
  804. static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  805. {
  806. struct meson_host *host = mmc_priv(mmc);
  807. bool needs_pre_post_req = mrq->data &&
  808. !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE);
  809. if (needs_pre_post_req) {
  810. meson_mmc_get_transfer_mode(mmc, mrq);
  811. if (!meson_mmc_desc_chain_mode(mrq->data))
  812. needs_pre_post_req = false;
  813. }
  814. if (needs_pre_post_req)
  815. meson_mmc_pre_req(mmc, mrq);
  816. /* Stop execution */
  817. writel(0, host->regs + SD_EMMC_START);
  818. meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd);
  819. if (needs_pre_post_req)
  820. meson_mmc_post_req(mmc, mrq, 0);
  821. }
  822. static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
  823. {
  824. struct meson_host *host = mmc_priv(mmc);
  825. if (cmd->flags & MMC_RSP_136) {
  826. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3);
  827. cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2);
  828. cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1);
  829. cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP);
  830. } else if (cmd->flags & MMC_RSP_PRESENT) {
  831. cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP);
  832. }
  833. }
  834. static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
  835. {
  836. struct meson_host *host = dev_id;
  837. struct mmc_command *cmd;
  838. struct mmc_data *data;
  839. u32 irq_en, status, raw_status;
  840. irqreturn_t ret = IRQ_NONE;
  841. if (WARN_ON(!host) || WARN_ON(!host->cmd))
  842. return IRQ_NONE;
  843. spin_lock(&host->lock);
  844. cmd = host->cmd;
  845. data = cmd->data;
  846. irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
  847. raw_status = readl(host->regs + SD_EMMC_STATUS);
  848. status = raw_status & irq_en;
  849. cmd->error = 0;
  850. if (status & IRQ_CRC_ERR) {
  851. dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
  852. cmd->error = -EILSEQ;
  853. ret = IRQ_HANDLED;
  854. goto out;
  855. }
  856. if (status & IRQ_TIMEOUTS) {
  857. dev_dbg(host->dev, "Timeout - status 0x%08x\n", status);
  858. cmd->error = -ETIMEDOUT;
  859. ret = IRQ_HANDLED;
  860. goto out;
  861. }
  862. meson_mmc_read_resp(host->mmc, cmd);
  863. if (status & IRQ_SDIO) {
  864. dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
  865. ret = IRQ_HANDLED;
  866. }
  867. if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
  868. if (data && !cmd->error)
  869. data->bytes_xfered = data->blksz * data->blocks;
  870. if (meson_mmc_bounce_buf_read(data) ||
  871. meson_mmc_get_next_command(cmd))
  872. ret = IRQ_WAKE_THREAD;
  873. else
  874. ret = IRQ_HANDLED;
  875. }
  876. out:
  877. /* ack all enabled interrupts */
  878. writel(irq_en, host->regs + SD_EMMC_STATUS);
  879. if (ret == IRQ_HANDLED)
  880. meson_mmc_request_done(host->mmc, cmd->mrq);
  881. else if (ret == IRQ_NONE)
  882. dev_warn(host->dev,
  883. "Unexpected IRQ! status=0x%08x, irq_en=0x%08x\n",
  884. raw_status, irq_en);
  885. spin_unlock(&host->lock);
  886. return ret;
  887. }
  888. static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
  889. {
  890. struct meson_host *host = dev_id;
  891. struct mmc_command *next_cmd, *cmd = host->cmd;
  892. struct mmc_data *data;
  893. unsigned int xfer_bytes;
  894. if (WARN_ON(!cmd))
  895. return IRQ_NONE;
  896. data = cmd->data;
  897. if (meson_mmc_bounce_buf_read(data)) {
  898. xfer_bytes = data->blksz * data->blocks;
  899. WARN_ON(xfer_bytes > host->bounce_buf_size);
  900. sg_copy_from_buffer(data->sg, data->sg_len,
  901. host->bounce_buf, xfer_bytes);
  902. }
  903. next_cmd = meson_mmc_get_next_command(cmd);
  904. if (next_cmd)
  905. meson_mmc_start_cmd(host->mmc, next_cmd);
  906. else
  907. meson_mmc_request_done(host->mmc, cmd->mrq);
  908. return IRQ_HANDLED;
  909. }
  910. /*
  911. * NOTE: we only need this until the GPIO/pinctrl driver can handle
  912. * interrupts. For now, the MMC core will use this for polling.
  913. */
  914. static int meson_mmc_get_cd(struct mmc_host *mmc)
  915. {
  916. int status = mmc_gpio_get_cd(mmc);
  917. if (status == -ENOSYS)
  918. return 1; /* assume present */
  919. return status;
  920. }
  921. static void meson_mmc_cfg_init(struct meson_host *host)
  922. {
  923. u32 cfg = 0;
  924. cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
  925. ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
  926. cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP));
  927. cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE));
  928. writel(cfg, host->regs + SD_EMMC_CFG);
  929. }
  930. static int meson_mmc_card_busy(struct mmc_host *mmc)
  931. {
  932. struct meson_host *host = mmc_priv(mmc);
  933. u32 regval;
  934. regval = readl(host->regs + SD_EMMC_STATUS);
  935. /* We are only interrested in lines 0 to 3, so mask the other ones */
  936. return !(FIELD_GET(STATUS_DATI, regval) & 0xf);
  937. }
  938. static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
  939. {
  940. /* vqmmc regulator is available */
  941. if (!IS_ERR(mmc->supply.vqmmc)) {
  942. /*
  943. * The usual amlogic setup uses a GPIO to switch from one
  944. * regulator to the other. While the voltage ramp up is
  945. * pretty fast, care must be taken when switching from 3.3v
  946. * to 1.8v. Please make sure the regulator framework is aware
  947. * of your own regulator constraints
  948. */
  949. return mmc_regulator_set_vqmmc(mmc, ios);
  950. }
  951. /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
  952. if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  953. return 0;
  954. return -EINVAL;
  955. }
  956. static const struct mmc_host_ops meson_mmc_ops = {
  957. .request = meson_mmc_request,
  958. .set_ios = meson_mmc_set_ios,
  959. .get_cd = meson_mmc_get_cd,
  960. .pre_req = meson_mmc_pre_req,
  961. .post_req = meson_mmc_post_req,
  962. .execute_tuning = meson_mmc_execute_tuning,
  963. .card_busy = meson_mmc_card_busy,
  964. .start_signal_voltage_switch = meson_mmc_voltage_switch,
  965. };
  966. static int meson_mmc_probe(struct platform_device *pdev)
  967. {
  968. struct resource *res;
  969. struct meson_host *host;
  970. struct mmc_host *mmc;
  971. int ret, irq;
  972. mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
  973. if (!mmc)
  974. return -ENOMEM;
  975. host = mmc_priv(mmc);
  976. host->mmc = mmc;
  977. host->dev = &pdev->dev;
  978. dev_set_drvdata(&pdev->dev, host);
  979. spin_lock_init(&host->lock);
  980. /* Get regulators and the supported OCR mask */
  981. host->vqmmc_enabled = false;
  982. ret = mmc_regulator_get_supply(mmc);
  983. if (ret == -EPROBE_DEFER)
  984. goto free_host;
  985. ret = mmc_of_parse(mmc);
  986. if (ret) {
  987. if (ret != -EPROBE_DEFER)
  988. dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
  989. goto free_host;
  990. }
  991. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  992. host->regs = devm_ioremap_resource(&pdev->dev, res);
  993. if (IS_ERR(host->regs)) {
  994. ret = PTR_ERR(host->regs);
  995. goto free_host;
  996. }
  997. irq = platform_get_irq(pdev, 0);
  998. if (!irq) {
  999. dev_err(&pdev->dev, "failed to get interrupt resource.\n");
  1000. ret = -EINVAL;
  1001. goto free_host;
  1002. }
  1003. host->pinctrl = devm_pinctrl_get(&pdev->dev);
  1004. if (IS_ERR(host->pinctrl)) {
  1005. ret = PTR_ERR(host->pinctrl);
  1006. goto free_host;
  1007. }
  1008. host->pins_default = pinctrl_lookup_state(host->pinctrl,
  1009. PINCTRL_STATE_DEFAULT);
  1010. if (IS_ERR(host->pins_default)) {
  1011. ret = PTR_ERR(host->pins_default);
  1012. goto free_host;
  1013. }
  1014. host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl,
  1015. "clk-gate");
  1016. if (IS_ERR(host->pins_clk_gate)) {
  1017. dev_warn(&pdev->dev,
  1018. "can't get clk-gate pinctrl, using clk_stop bit\n");
  1019. host->pins_clk_gate = NULL;
  1020. }
  1021. host->core_clk = devm_clk_get(&pdev->dev, "core");
  1022. if (IS_ERR(host->core_clk)) {
  1023. ret = PTR_ERR(host->core_clk);
  1024. goto free_host;
  1025. }
  1026. ret = clk_prepare_enable(host->core_clk);
  1027. if (ret)
  1028. goto free_host;
  1029. ret = meson_mmc_clk_init(host);
  1030. if (ret)
  1031. goto err_core_clk;
  1032. /* set config to sane default */
  1033. meson_mmc_cfg_init(host);
  1034. /* Stop execution */
  1035. writel(0, host->regs + SD_EMMC_START);
  1036. /* clear, ack and enable interrupts */
  1037. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1038. writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
  1039. host->regs + SD_EMMC_STATUS);
  1040. writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
  1041. host->regs + SD_EMMC_IRQ_EN);
  1042. ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq,
  1043. meson_mmc_irq_thread, IRQF_SHARED,
  1044. NULL, host);
  1045. if (ret)
  1046. goto err_init_clk;
  1047. mmc->caps |= MMC_CAP_CMD23;
  1048. mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
  1049. mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
  1050. mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
  1051. mmc->max_seg_size = mmc->max_req_size;
  1052. /* data bounce buffer */
  1053. host->bounce_buf_size = mmc->max_req_size;
  1054. host->bounce_buf =
  1055. dma_alloc_coherent(host->dev, host->bounce_buf_size,
  1056. &host->bounce_dma_addr, GFP_KERNEL);
  1057. if (host->bounce_buf == NULL) {
  1058. dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
  1059. ret = -ENOMEM;
  1060. goto err_init_clk;
  1061. }
  1062. host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1063. &host->descs_dma_addr, GFP_KERNEL);
  1064. if (!host->descs) {
  1065. dev_err(host->dev, "Allocating descriptor DMA buffer failed\n");
  1066. ret = -ENOMEM;
  1067. goto err_bounce_buf;
  1068. }
  1069. mmc->ops = &meson_mmc_ops;
  1070. mmc_add_host(mmc);
  1071. return 0;
  1072. err_bounce_buf:
  1073. dma_free_coherent(host->dev, host->bounce_buf_size,
  1074. host->bounce_buf, host->bounce_dma_addr);
  1075. err_init_clk:
  1076. clk_disable_unprepare(host->mmc_clk);
  1077. err_core_clk:
  1078. clk_disable_unprepare(host->core_clk);
  1079. free_host:
  1080. mmc_free_host(mmc);
  1081. return ret;
  1082. }
  1083. static int meson_mmc_remove(struct platform_device *pdev)
  1084. {
  1085. struct meson_host *host = dev_get_drvdata(&pdev->dev);
  1086. mmc_remove_host(host->mmc);
  1087. /* disable interrupts */
  1088. writel(0, host->regs + SD_EMMC_IRQ_EN);
  1089. dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
  1090. host->descs, host->descs_dma_addr);
  1091. dma_free_coherent(host->dev, host->bounce_buf_size,
  1092. host->bounce_buf, host->bounce_dma_addr);
  1093. clk_disable_unprepare(host->mmc_clk);
  1094. clk_disable_unprepare(host->core_clk);
  1095. mmc_free_host(host->mmc);
  1096. return 0;
  1097. }
  1098. static const struct of_device_id meson_mmc_of_match[] = {
  1099. { .compatible = "amlogic,meson-gx-mmc", },
  1100. { .compatible = "amlogic,meson-gxbb-mmc", },
  1101. { .compatible = "amlogic,meson-gxl-mmc", },
  1102. { .compatible = "amlogic,meson-gxm-mmc", },
  1103. {}
  1104. };
  1105. MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
  1106. static struct platform_driver meson_mmc_driver = {
  1107. .probe = meson_mmc_probe,
  1108. .remove = meson_mmc_remove,
  1109. .driver = {
  1110. .name = DRIVER_NAME,
  1111. .of_match_table = of_match_ptr(meson_mmc_of_match),
  1112. },
  1113. };
  1114. module_platform_driver(meson_mmc_driver);
  1115. MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver");
  1116. MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
  1117. MODULE_LICENSE("GPL v2");