cqhci.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/highmem.h>
  14. #include <linux/io.h>
  15. #include <linux/module.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/slab.h>
  18. #include <linux/scatterlist.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/ktime.h>
  21. #include <linux/mmc/mmc.h>
  22. #include <linux/mmc/host.h>
  23. #include <linux/mmc/card.h>
  24. #include "cqhci.h"
  25. #define DCMD_SLOT 31
  26. #define NUM_SLOTS 32
  27. struct cqhci_slot {
  28. struct mmc_request *mrq;
  29. unsigned int flags;
  30. #define CQHCI_EXTERNAL_TIMEOUT BIT(0)
  31. #define CQHCI_COMPLETED BIT(1)
  32. #define CQHCI_HOST_CRC BIT(2)
  33. #define CQHCI_HOST_TIMEOUT BIT(3)
  34. #define CQHCI_HOST_OTHER BIT(4)
  35. };
  36. static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  37. {
  38. return cq_host->desc_base + (tag * cq_host->slot_sz);
  39. }
  40. static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  41. {
  42. u8 *desc = get_desc(cq_host, tag);
  43. return desc + cq_host->task_desc_len;
  44. }
  45. static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  46. {
  47. return cq_host->trans_desc_dma_base +
  48. (cq_host->mmc->max_segs * tag *
  49. cq_host->trans_desc_len);
  50. }
  51. static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  52. {
  53. return cq_host->trans_desc_base +
  54. (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
  55. }
  56. static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  57. {
  58. u8 *link_temp;
  59. dma_addr_t trans_temp;
  60. link_temp = get_link_desc(cq_host, tag);
  61. trans_temp = get_trans_desc_dma(cq_host, tag);
  62. memset(link_temp, 0, cq_host->link_desc_len);
  63. if (cq_host->link_desc_len > 8)
  64. *(link_temp + 8) = 0;
  65. if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  66. *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  67. return;
  68. }
  69. *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  70. if (cq_host->dma64) {
  71. __le64 *data_addr = (__le64 __force *)(link_temp + 4);
  72. data_addr[0] = cpu_to_le64(trans_temp);
  73. } else {
  74. __le32 *data_addr = (__le32 __force *)(link_temp + 4);
  75. data_addr[0] = cpu_to_le32(trans_temp);
  76. }
  77. }
  78. static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  79. {
  80. cqhci_writel(cq_host, set, CQHCI_ISTE);
  81. cqhci_writel(cq_host, set, CQHCI_ISGE);
  82. }
  83. #define DRV_NAME "cqhci"
  84. #define CQHCI_DUMP(f, x...) \
  85. pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
  86. static void cqhci_dumpregs(struct cqhci_host *cq_host)
  87. {
  88. struct mmc_host *mmc = cq_host->mmc;
  89. CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
  90. CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
  91. cqhci_readl(cq_host, CQHCI_CAP),
  92. cqhci_readl(cq_host, CQHCI_VER));
  93. CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
  94. cqhci_readl(cq_host, CQHCI_CFG),
  95. cqhci_readl(cq_host, CQHCI_CTL));
  96. CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
  97. cqhci_readl(cq_host, CQHCI_IS),
  98. cqhci_readl(cq_host, CQHCI_ISTE));
  99. CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
  100. cqhci_readl(cq_host, CQHCI_ISGE),
  101. cqhci_readl(cq_host, CQHCI_IC));
  102. CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
  103. cqhci_readl(cq_host, CQHCI_TDLBA),
  104. cqhci_readl(cq_host, CQHCI_TDLBAU));
  105. CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
  106. cqhci_readl(cq_host, CQHCI_TDBR),
  107. cqhci_readl(cq_host, CQHCI_TCN));
  108. CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
  109. cqhci_readl(cq_host, CQHCI_DQS),
  110. cqhci_readl(cq_host, CQHCI_DPT));
  111. CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
  112. cqhci_readl(cq_host, CQHCI_TCLR),
  113. cqhci_readl(cq_host, CQHCI_SSC1));
  114. CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
  115. cqhci_readl(cq_host, CQHCI_SSC2),
  116. cqhci_readl(cq_host, CQHCI_CRDCT));
  117. CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
  118. cqhci_readl(cq_host, CQHCI_RMEM),
  119. cqhci_readl(cq_host, CQHCI_TERRI));
  120. CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
  121. cqhci_readl(cq_host, CQHCI_CRI),
  122. cqhci_readl(cq_host, CQHCI_CRA));
  123. if (cq_host->ops->dumpregs)
  124. cq_host->ops->dumpregs(mmc);
  125. else
  126. CQHCI_DUMP(": ===========================================\n");
  127. }
  128. /**
  129. * The allocated descriptor table for task, link & transfer descritors
  130. * looks like:
  131. * |----------|
  132. * |task desc | |->|----------|
  133. * |----------| | |trans desc|
  134. * |link desc-|->| |----------|
  135. * |----------| .
  136. * . .
  137. * no. of slots max-segs
  138. * . |----------|
  139. * |----------|
  140. * The idea here is to create the [task+trans] table and mark & point the
  141. * link desc to the transfer desc table on a per slot basis.
  142. */
  143. static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
  144. {
  145. int i = 0;
  146. /* task descriptor can be 64/128 bit irrespective of arch */
  147. if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
  148. cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
  149. CQHCI_TASK_DESC_SZ, CQHCI_CFG);
  150. cq_host->task_desc_len = 16;
  151. } else {
  152. cq_host->task_desc_len = 8;
  153. }
  154. /*
  155. * 96 bits length of transfer desc instead of 128 bits which means
  156. * ADMA would expect next valid descriptor at the 96th bit
  157. * or 128th bit
  158. */
  159. if (cq_host->dma64) {
  160. if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
  161. cq_host->trans_desc_len = 12;
  162. else
  163. cq_host->trans_desc_len = 16;
  164. cq_host->link_desc_len = 16;
  165. } else {
  166. cq_host->trans_desc_len = 8;
  167. cq_host->link_desc_len = 8;
  168. }
  169. /* total size of a slot: 1 task & 1 transfer (link) */
  170. cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
  171. cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
  172. cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
  173. (cq_host->num_slots - 1);
  174. pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
  175. mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
  176. cq_host->slot_sz);
  177. /*
  178. * allocate a dma-mapped chunk of memory for the descriptors
  179. * allocate a dma-mapped chunk of memory for link descriptors
  180. * setup each link-desc memory offset per slot-number to
  181. * the descriptor table.
  182. */
  183. cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
  184. cq_host->desc_size,
  185. &cq_host->desc_dma_base,
  186. GFP_KERNEL);
  187. cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
  188. cq_host->data_size,
  189. &cq_host->trans_desc_dma_base,
  190. GFP_KERNEL);
  191. if (!cq_host->desc_base || !cq_host->trans_desc_base)
  192. return -ENOMEM;
  193. pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
  194. mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
  195. (unsigned long long)cq_host->desc_dma_base,
  196. (unsigned long long)cq_host->trans_desc_dma_base);
  197. for (; i < (cq_host->num_slots); i++)
  198. setup_trans_desc(cq_host, i);
  199. return 0;
  200. }
  201. static void __cqhci_enable(struct cqhci_host *cq_host)
  202. {
  203. struct mmc_host *mmc = cq_host->mmc;
  204. u32 cqcfg;
  205. cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
  206. /* Configuration must not be changed while enabled */
  207. if (cqcfg & CQHCI_ENABLE) {
  208. cqcfg &= ~CQHCI_ENABLE;
  209. cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
  210. }
  211. cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
  212. if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
  213. cqcfg |= CQHCI_DCMD;
  214. if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
  215. cqcfg |= CQHCI_TASK_DESC_SZ;
  216. cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
  217. cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
  218. CQHCI_TDLBA);
  219. cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
  220. CQHCI_TDLBAU);
  221. cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
  222. cqhci_set_irqs(cq_host, 0);
  223. cqcfg |= CQHCI_ENABLE;
  224. cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
  225. mmc->cqe_on = true;
  226. if (cq_host->ops->enable)
  227. cq_host->ops->enable(mmc);
  228. /* Ensure all writes are done before interrupts are enabled */
  229. wmb();
  230. cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
  231. cq_host->activated = true;
  232. }
  233. static void __cqhci_disable(struct cqhci_host *cq_host)
  234. {
  235. u32 cqcfg;
  236. cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
  237. cqcfg &= ~CQHCI_ENABLE;
  238. cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
  239. cq_host->mmc->cqe_on = false;
  240. cq_host->activated = false;
  241. }
  242. int cqhci_suspend(struct mmc_host *mmc)
  243. {
  244. struct cqhci_host *cq_host = mmc->cqe_private;
  245. if (cq_host->enabled)
  246. __cqhci_disable(cq_host);
  247. return 0;
  248. }
  249. EXPORT_SYMBOL(cqhci_suspend);
  250. int cqhci_resume(struct mmc_host *mmc)
  251. {
  252. /* Re-enable is done upon first request */
  253. return 0;
  254. }
  255. EXPORT_SYMBOL(cqhci_resume);
  256. static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
  257. {
  258. struct cqhci_host *cq_host = mmc->cqe_private;
  259. int err;
  260. if (cq_host->enabled)
  261. return 0;
  262. cq_host->rca = card->rca;
  263. err = cqhci_host_alloc_tdl(cq_host);
  264. if (err)
  265. return err;
  266. __cqhci_enable(cq_host);
  267. cq_host->enabled = true;
  268. #ifdef DEBUG
  269. cqhci_dumpregs(cq_host);
  270. #endif
  271. return 0;
  272. }
  273. /* CQHCI is idle and should halt immediately, so set a small timeout */
  274. #define CQHCI_OFF_TIMEOUT 100
  275. static void cqhci_off(struct mmc_host *mmc)
  276. {
  277. struct cqhci_host *cq_host = mmc->cqe_private;
  278. ktime_t timeout;
  279. bool timed_out;
  280. u32 reg;
  281. if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
  282. return;
  283. if (cq_host->ops->disable)
  284. cq_host->ops->disable(mmc, false);
  285. cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
  286. timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
  287. while (1) {
  288. timed_out = ktime_compare(ktime_get(), timeout) > 0;
  289. reg = cqhci_readl(cq_host, CQHCI_CTL);
  290. if ((reg & CQHCI_HALT) || timed_out)
  291. break;
  292. }
  293. if (timed_out)
  294. pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
  295. else
  296. pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
  297. mmc->cqe_on = false;
  298. }
  299. static void cqhci_disable(struct mmc_host *mmc)
  300. {
  301. struct cqhci_host *cq_host = mmc->cqe_private;
  302. if (!cq_host->enabled)
  303. return;
  304. cqhci_off(mmc);
  305. __cqhci_disable(cq_host);
  306. dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
  307. cq_host->trans_desc_base,
  308. cq_host->trans_desc_dma_base);
  309. dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
  310. cq_host->desc_base,
  311. cq_host->desc_dma_base);
  312. cq_host->trans_desc_base = NULL;
  313. cq_host->desc_base = NULL;
  314. cq_host->enabled = false;
  315. }
  316. static void cqhci_prep_task_desc(struct mmc_request *mrq,
  317. u64 *data, bool intr)
  318. {
  319. u32 req_flags = mrq->data->flags;
  320. *data = CQHCI_VALID(1) |
  321. CQHCI_END(1) |
  322. CQHCI_INT(intr) |
  323. CQHCI_ACT(0x5) |
  324. CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
  325. CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
  326. CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
  327. CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
  328. CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
  329. CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
  330. CQHCI_BLK_COUNT(mrq->data->blocks) |
  331. CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
  332. pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
  333. mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
  334. }
  335. static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
  336. {
  337. int sg_count;
  338. struct mmc_data *data = mrq->data;
  339. if (!data)
  340. return -EINVAL;
  341. sg_count = dma_map_sg(mmc_dev(host), data->sg,
  342. data->sg_len,
  343. (data->flags & MMC_DATA_WRITE) ?
  344. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  345. if (!sg_count) {
  346. pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
  347. return -ENOMEM;
  348. }
  349. return sg_count;
  350. }
  351. static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
  352. bool dma64)
  353. {
  354. __le32 *attr = (__le32 __force *)desc;
  355. *attr = (CQHCI_VALID(1) |
  356. CQHCI_END(end ? 1 : 0) |
  357. CQHCI_INT(0) |
  358. CQHCI_ACT(0x4) |
  359. CQHCI_DAT_LENGTH(len));
  360. if (dma64) {
  361. __le64 *dataddr = (__le64 __force *)(desc + 4);
  362. dataddr[0] = cpu_to_le64(addr);
  363. } else {
  364. __le32 *dataddr = (__le32 __force *)(desc + 4);
  365. dataddr[0] = cpu_to_le32(addr);
  366. }
  367. }
  368. static int cqhci_prep_tran_desc(struct mmc_request *mrq,
  369. struct cqhci_host *cq_host, int tag)
  370. {
  371. struct mmc_data *data = mrq->data;
  372. int i, sg_count, len;
  373. bool end = false;
  374. bool dma64 = cq_host->dma64;
  375. dma_addr_t addr;
  376. u8 *desc;
  377. struct scatterlist *sg;
  378. sg_count = cqhci_dma_map(mrq->host, mrq);
  379. if (sg_count < 0) {
  380. pr_err("%s: %s: unable to map sg lists, %d\n",
  381. mmc_hostname(mrq->host), __func__, sg_count);
  382. return sg_count;
  383. }
  384. desc = get_trans_desc(cq_host, tag);
  385. for_each_sg(data->sg, sg, sg_count, i) {
  386. addr = sg_dma_address(sg);
  387. len = sg_dma_len(sg);
  388. if ((i+1) == sg_count)
  389. end = true;
  390. cqhci_set_tran_desc(desc, addr, len, end, dma64);
  391. desc += cq_host->trans_desc_len;
  392. }
  393. return 0;
  394. }
  395. static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
  396. struct mmc_request *mrq)
  397. {
  398. u64 *task_desc = NULL;
  399. u64 data = 0;
  400. u8 resp_type;
  401. u8 *desc;
  402. __le64 *dataddr;
  403. struct cqhci_host *cq_host = mmc->cqe_private;
  404. u8 timing;
  405. if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
  406. resp_type = 0x0;
  407. timing = 0x1;
  408. } else {
  409. if (mrq->cmd->flags & MMC_RSP_R1B) {
  410. resp_type = 0x3;
  411. timing = 0x0;
  412. } else {
  413. resp_type = 0x2;
  414. timing = 0x1;
  415. }
  416. }
  417. task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
  418. memset(task_desc, 0, cq_host->task_desc_len);
  419. data |= (CQHCI_VALID(1) |
  420. CQHCI_END(1) |
  421. CQHCI_INT(1) |
  422. CQHCI_QBAR(1) |
  423. CQHCI_ACT(0x5) |
  424. CQHCI_CMD_INDEX(mrq->cmd->opcode) |
  425. CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
  426. *task_desc |= data;
  427. desc = (u8 *)task_desc;
  428. pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
  429. mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
  430. dataddr = (__le64 __force *)(desc + 4);
  431. dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
  432. }
  433. static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
  434. {
  435. struct mmc_data *data = mrq->data;
  436. if (data) {
  437. dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
  438. (data->flags & MMC_DATA_READ) ?
  439. DMA_FROM_DEVICE : DMA_TO_DEVICE);
  440. }
  441. }
  442. static inline int cqhci_tag(struct mmc_request *mrq)
  443. {
  444. return mrq->cmd ? DCMD_SLOT : mrq->tag;
  445. }
  446. static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  447. {
  448. int err = 0;
  449. u64 data = 0;
  450. u64 *task_desc = NULL;
  451. int tag = cqhci_tag(mrq);
  452. struct cqhci_host *cq_host = mmc->cqe_private;
  453. unsigned long flags;
  454. if (!cq_host->enabled) {
  455. pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
  456. return -EINVAL;
  457. }
  458. /* First request after resume has to re-enable */
  459. if (!cq_host->activated)
  460. __cqhci_enable(cq_host);
  461. if (!mmc->cqe_on) {
  462. cqhci_writel(cq_host, 0, CQHCI_CTL);
  463. mmc->cqe_on = true;
  464. pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
  465. if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
  466. pr_err("%s: cqhci: CQE failed to exit halt state\n",
  467. mmc_hostname(mmc));
  468. }
  469. if (cq_host->ops->enable)
  470. cq_host->ops->enable(mmc);
  471. }
  472. if (mrq->data) {
  473. task_desc = (__le64 __force *)get_desc(cq_host, tag);
  474. cqhci_prep_task_desc(mrq, &data, 1);
  475. *task_desc = cpu_to_le64(data);
  476. err = cqhci_prep_tran_desc(mrq, cq_host, tag);
  477. if (err) {
  478. pr_err("%s: cqhci: failed to setup tx desc: %d\n",
  479. mmc_hostname(mmc), err);
  480. return err;
  481. }
  482. } else {
  483. cqhci_prep_dcmd_desc(mmc, mrq);
  484. }
  485. spin_lock_irqsave(&cq_host->lock, flags);
  486. if (cq_host->recovery_halt) {
  487. err = -EBUSY;
  488. goto out_unlock;
  489. }
  490. cq_host->slot[tag].mrq = mrq;
  491. cq_host->slot[tag].flags = 0;
  492. cq_host->qcnt += 1;
  493. cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
  494. if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
  495. pr_debug("%s: cqhci: doorbell not set for tag %d\n",
  496. mmc_hostname(mmc), tag);
  497. out_unlock:
  498. spin_unlock_irqrestore(&cq_host->lock, flags);
  499. if (err)
  500. cqhci_post_req(mmc, mrq);
  501. return err;
  502. }
  503. static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
  504. bool notify)
  505. {
  506. struct cqhci_host *cq_host = mmc->cqe_private;
  507. if (!cq_host->recovery_halt) {
  508. cq_host->recovery_halt = true;
  509. pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
  510. wake_up(&cq_host->wait_queue);
  511. if (notify && mrq->recovery_notifier)
  512. mrq->recovery_notifier(mrq);
  513. }
  514. }
  515. static unsigned int cqhci_error_flags(int error1, int error2)
  516. {
  517. int error = error1 ? error1 : error2;
  518. switch (error) {
  519. case -EILSEQ:
  520. return CQHCI_HOST_CRC;
  521. case -ETIMEDOUT:
  522. return CQHCI_HOST_TIMEOUT;
  523. default:
  524. return CQHCI_HOST_OTHER;
  525. }
  526. }
  527. static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
  528. int data_error)
  529. {
  530. struct cqhci_host *cq_host = mmc->cqe_private;
  531. struct cqhci_slot *slot;
  532. u32 terri;
  533. int tag;
  534. spin_lock(&cq_host->lock);
  535. terri = cqhci_readl(cq_host, CQHCI_TERRI);
  536. pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
  537. mmc_hostname(mmc), status, cmd_error, data_error, terri);
  538. /* Forget about errors when recovery has already been triggered */
  539. if (cq_host->recovery_halt)
  540. goto out_unlock;
  541. if (!cq_host->qcnt) {
  542. WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
  543. mmc_hostname(mmc), status, cmd_error, data_error,
  544. terri);
  545. goto out_unlock;
  546. }
  547. if (CQHCI_TERRI_C_VALID(terri)) {
  548. tag = CQHCI_TERRI_C_TASK(terri);
  549. slot = &cq_host->slot[tag];
  550. if (slot->mrq) {
  551. slot->flags = cqhci_error_flags(cmd_error, data_error);
  552. cqhci_recovery_needed(mmc, slot->mrq, true);
  553. }
  554. }
  555. if (CQHCI_TERRI_D_VALID(terri)) {
  556. tag = CQHCI_TERRI_D_TASK(terri);
  557. slot = &cq_host->slot[tag];
  558. if (slot->mrq) {
  559. slot->flags = cqhci_error_flags(data_error, cmd_error);
  560. cqhci_recovery_needed(mmc, slot->mrq, true);
  561. }
  562. }
  563. if (!cq_host->recovery_halt) {
  564. /*
  565. * The only way to guarantee forward progress is to mark at
  566. * least one task in error, so if none is indicated, pick one.
  567. */
  568. for (tag = 0; tag < NUM_SLOTS; tag++) {
  569. slot = &cq_host->slot[tag];
  570. if (!slot->mrq)
  571. continue;
  572. slot->flags = cqhci_error_flags(data_error, cmd_error);
  573. cqhci_recovery_needed(mmc, slot->mrq, true);
  574. break;
  575. }
  576. }
  577. out_unlock:
  578. spin_unlock(&cq_host->lock);
  579. }
  580. static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
  581. {
  582. struct cqhci_host *cq_host = mmc->cqe_private;
  583. struct cqhci_slot *slot = &cq_host->slot[tag];
  584. struct mmc_request *mrq = slot->mrq;
  585. struct mmc_data *data;
  586. if (!mrq) {
  587. WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
  588. mmc_hostname(mmc), tag);
  589. return;
  590. }
  591. /* No completions allowed during recovery */
  592. if (cq_host->recovery_halt) {
  593. slot->flags |= CQHCI_COMPLETED;
  594. return;
  595. }
  596. slot->mrq = NULL;
  597. cq_host->qcnt -= 1;
  598. data = mrq->data;
  599. if (data) {
  600. if (data->error)
  601. data->bytes_xfered = 0;
  602. else
  603. data->bytes_xfered = data->blksz * data->blocks;
  604. }
  605. mmc_cqe_request_done(mmc, mrq);
  606. }
  607. irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
  608. int data_error)
  609. {
  610. u32 status;
  611. unsigned long tag = 0, comp_status;
  612. struct cqhci_host *cq_host = mmc->cqe_private;
  613. status = cqhci_readl(cq_host, CQHCI_IS);
  614. cqhci_writel(cq_host, status, CQHCI_IS);
  615. pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
  616. if ((status & CQHCI_IS_RED) || cmd_error || data_error)
  617. cqhci_error_irq(mmc, status, cmd_error, data_error);
  618. if (status & CQHCI_IS_TCC) {
  619. /* read TCN and complete the request */
  620. comp_status = cqhci_readl(cq_host, CQHCI_TCN);
  621. cqhci_writel(cq_host, comp_status, CQHCI_TCN);
  622. pr_debug("%s: cqhci: TCN: 0x%08lx\n",
  623. mmc_hostname(mmc), comp_status);
  624. spin_lock(&cq_host->lock);
  625. for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
  626. /* complete the corresponding mrq */
  627. pr_debug("%s: cqhci: completing tag %lu\n",
  628. mmc_hostname(mmc), tag);
  629. cqhci_finish_mrq(mmc, tag);
  630. }
  631. if (cq_host->waiting_for_idle && !cq_host->qcnt) {
  632. cq_host->waiting_for_idle = false;
  633. wake_up(&cq_host->wait_queue);
  634. }
  635. spin_unlock(&cq_host->lock);
  636. }
  637. if (status & CQHCI_IS_TCL)
  638. wake_up(&cq_host->wait_queue);
  639. if (status & CQHCI_IS_HAC)
  640. wake_up(&cq_host->wait_queue);
  641. return IRQ_HANDLED;
  642. }
  643. EXPORT_SYMBOL(cqhci_irq);
  644. static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
  645. {
  646. unsigned long flags;
  647. bool is_idle;
  648. spin_lock_irqsave(&cq_host->lock, flags);
  649. is_idle = !cq_host->qcnt || cq_host->recovery_halt;
  650. *ret = cq_host->recovery_halt ? -EBUSY : 0;
  651. cq_host->waiting_for_idle = !is_idle;
  652. spin_unlock_irqrestore(&cq_host->lock, flags);
  653. return is_idle;
  654. }
  655. static int cqhci_wait_for_idle(struct mmc_host *mmc)
  656. {
  657. struct cqhci_host *cq_host = mmc->cqe_private;
  658. int ret;
  659. wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
  660. return ret;
  661. }
  662. static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
  663. bool *recovery_needed)
  664. {
  665. struct cqhci_host *cq_host = mmc->cqe_private;
  666. int tag = cqhci_tag(mrq);
  667. struct cqhci_slot *slot = &cq_host->slot[tag];
  668. unsigned long flags;
  669. bool timed_out;
  670. spin_lock_irqsave(&cq_host->lock, flags);
  671. timed_out = slot->mrq == mrq;
  672. if (timed_out) {
  673. slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
  674. cqhci_recovery_needed(mmc, mrq, false);
  675. *recovery_needed = cq_host->recovery_halt;
  676. }
  677. spin_unlock_irqrestore(&cq_host->lock, flags);
  678. if (timed_out) {
  679. pr_err("%s: cqhci: timeout for tag %d\n",
  680. mmc_hostname(mmc), tag);
  681. cqhci_dumpregs(cq_host);
  682. }
  683. return timed_out;
  684. }
  685. static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
  686. {
  687. return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
  688. }
  689. static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
  690. {
  691. struct cqhci_host *cq_host = mmc->cqe_private;
  692. bool ret;
  693. u32 ctl;
  694. cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
  695. ctl = cqhci_readl(cq_host, CQHCI_CTL);
  696. ctl |= CQHCI_CLEAR_ALL_TASKS;
  697. cqhci_writel(cq_host, ctl, CQHCI_CTL);
  698. wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
  699. msecs_to_jiffies(timeout) + 1);
  700. cqhci_set_irqs(cq_host, 0);
  701. ret = cqhci_tasks_cleared(cq_host);
  702. if (!ret)
  703. pr_debug("%s: cqhci: Failed to clear tasks\n",
  704. mmc_hostname(mmc));
  705. return ret;
  706. }
  707. static bool cqhci_halted(struct cqhci_host *cq_host)
  708. {
  709. return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
  710. }
  711. static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
  712. {
  713. struct cqhci_host *cq_host = mmc->cqe_private;
  714. bool ret;
  715. u32 ctl;
  716. if (cqhci_halted(cq_host))
  717. return true;
  718. cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
  719. ctl = cqhci_readl(cq_host, CQHCI_CTL);
  720. ctl |= CQHCI_HALT;
  721. cqhci_writel(cq_host, ctl, CQHCI_CTL);
  722. wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
  723. msecs_to_jiffies(timeout) + 1);
  724. cqhci_set_irqs(cq_host, 0);
  725. ret = cqhci_halted(cq_host);
  726. if (!ret)
  727. pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
  728. return ret;
  729. }
  730. /*
  731. * After halting we expect to be able to use the command line. We interpret the
  732. * failure to halt to mean the data lines might still be in use (and the upper
  733. * layers will need to send a STOP command), so we set the timeout based on a
  734. * generous command timeout.
  735. */
  736. #define CQHCI_START_HALT_TIMEOUT 5
  737. static void cqhci_recovery_start(struct mmc_host *mmc)
  738. {
  739. struct cqhci_host *cq_host = mmc->cqe_private;
  740. pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
  741. WARN_ON(!cq_host->recovery_halt);
  742. cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
  743. if (cq_host->ops->disable)
  744. cq_host->ops->disable(mmc, true);
  745. mmc->cqe_on = false;
  746. }
  747. static int cqhci_error_from_flags(unsigned int flags)
  748. {
  749. if (!flags)
  750. return 0;
  751. /* CRC errors might indicate re-tuning so prefer to report that */
  752. if (flags & CQHCI_HOST_CRC)
  753. return -EILSEQ;
  754. if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
  755. return -ETIMEDOUT;
  756. return -EIO;
  757. }
  758. static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
  759. {
  760. struct cqhci_slot *slot = &cq_host->slot[tag];
  761. struct mmc_request *mrq = slot->mrq;
  762. struct mmc_data *data;
  763. if (!mrq)
  764. return;
  765. slot->mrq = NULL;
  766. cq_host->qcnt -= 1;
  767. data = mrq->data;
  768. if (data) {
  769. data->bytes_xfered = 0;
  770. data->error = cqhci_error_from_flags(slot->flags);
  771. } else {
  772. mrq->cmd->error = cqhci_error_from_flags(slot->flags);
  773. }
  774. mmc_cqe_request_done(cq_host->mmc, mrq);
  775. }
  776. static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
  777. {
  778. int i;
  779. for (i = 0; i < cq_host->num_slots; i++)
  780. cqhci_recover_mrq(cq_host, i);
  781. }
  782. /*
  783. * By now the command and data lines should be unused so there is no reason for
  784. * CQHCI to take a long time to halt, but if it doesn't halt there could be
  785. * problems clearing tasks, so be generous.
  786. */
  787. #define CQHCI_FINISH_HALT_TIMEOUT 20
  788. /* CQHCI could be expected to clear it's internal state pretty quickly */
  789. #define CQHCI_CLEAR_TIMEOUT 20
  790. static void cqhci_recovery_finish(struct mmc_host *mmc)
  791. {
  792. struct cqhci_host *cq_host = mmc->cqe_private;
  793. unsigned long flags;
  794. u32 cqcfg;
  795. bool ok;
  796. pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
  797. WARN_ON(!cq_host->recovery_halt);
  798. ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
  799. if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
  800. ok = false;
  801. /*
  802. * The specification contradicts itself, by saying that tasks cannot be
  803. * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
  804. * be disabled/re-enabled, but not to disable before clearing tasks.
  805. * Have a go anyway.
  806. */
  807. if (!ok) {
  808. pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
  809. cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
  810. cqcfg &= ~CQHCI_ENABLE;
  811. cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
  812. cqcfg |= CQHCI_ENABLE;
  813. cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
  814. /* Be sure that there are no tasks */
  815. ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
  816. if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
  817. ok = false;
  818. WARN_ON(!ok);
  819. }
  820. cqhci_recover_mrqs(cq_host);
  821. WARN_ON(cq_host->qcnt);
  822. spin_lock_irqsave(&cq_host->lock, flags);
  823. cq_host->qcnt = 0;
  824. cq_host->recovery_halt = false;
  825. mmc->cqe_on = false;
  826. spin_unlock_irqrestore(&cq_host->lock, flags);
  827. /* Ensure all writes are done before interrupts are re-enabled */
  828. wmb();
  829. cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
  830. cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
  831. pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
  832. }
  833. static const struct mmc_cqe_ops cqhci_cqe_ops = {
  834. .cqe_enable = cqhci_enable,
  835. .cqe_disable = cqhci_disable,
  836. .cqe_request = cqhci_request,
  837. .cqe_post_req = cqhci_post_req,
  838. .cqe_off = cqhci_off,
  839. .cqe_wait_for_idle = cqhci_wait_for_idle,
  840. .cqe_timeout = cqhci_timeout,
  841. .cqe_recovery_start = cqhci_recovery_start,
  842. .cqe_recovery_finish = cqhci_recovery_finish,
  843. };
  844. struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
  845. {
  846. struct cqhci_host *cq_host;
  847. struct resource *cqhci_memres = NULL;
  848. /* check and setup CMDQ interface */
  849. cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  850. "cqhci_mem");
  851. if (!cqhci_memres) {
  852. dev_dbg(&pdev->dev, "CMDQ not supported\n");
  853. return ERR_PTR(-EINVAL);
  854. }
  855. cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
  856. if (!cq_host)
  857. return ERR_PTR(-ENOMEM);
  858. cq_host->mmio = devm_ioremap(&pdev->dev,
  859. cqhci_memres->start,
  860. resource_size(cqhci_memres));
  861. if (!cq_host->mmio) {
  862. dev_err(&pdev->dev, "failed to remap cqhci regs\n");
  863. return ERR_PTR(-EBUSY);
  864. }
  865. dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
  866. return cq_host;
  867. }
  868. EXPORT_SYMBOL(cqhci_pltfm_init);
  869. static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
  870. {
  871. return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
  872. }
  873. static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
  874. {
  875. u32 ver = cqhci_readl(cq_host, CQHCI_VER);
  876. return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
  877. }
  878. int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
  879. bool dma64)
  880. {
  881. int err;
  882. cq_host->dma64 = dma64;
  883. cq_host->mmc = mmc;
  884. cq_host->mmc->cqe_private = cq_host;
  885. cq_host->num_slots = NUM_SLOTS;
  886. cq_host->dcmd_slot = DCMD_SLOT;
  887. mmc->cqe_ops = &cqhci_cqe_ops;
  888. mmc->cqe_qdepth = NUM_SLOTS;
  889. if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
  890. mmc->cqe_qdepth -= 1;
  891. cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
  892. sizeof(*cq_host->slot), GFP_KERNEL);
  893. if (!cq_host->slot) {
  894. err = -ENOMEM;
  895. goto out_err;
  896. }
  897. spin_lock_init(&cq_host->lock);
  898. init_completion(&cq_host->halt_comp);
  899. init_waitqueue_head(&cq_host->wait_queue);
  900. pr_info("%s: CQHCI version %u.%02u\n",
  901. mmc_hostname(mmc), cqhci_ver_major(cq_host),
  902. cqhci_ver_minor(cq_host));
  903. return 0;
  904. out_err:
  905. pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
  906. mmc_hostname(mmc), cqhci_ver_major(cq_host),
  907. cqhci_ver_minor(cq_host), err);
  908. return err;
  909. }
  910. EXPORT_SYMBOL(cqhci_init);
  911. MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
  912. MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
  913. MODULE_LICENSE("GPL v2");