mmc_ops.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "card.h"
  20. #include "host.h"
  21. #include "mmc_ops.h"
  22. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  23. static const u8 tuning_blk_pattern_4bit[] = {
  24. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  25. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  26. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  27. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  28. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  29. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  30. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  31. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  32. };
  33. static const u8 tuning_blk_pattern_8bit[] = {
  34. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  35. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  36. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  37. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  38. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  39. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  40. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  41. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  42. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  43. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  44. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  45. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  46. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  47. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  48. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  49. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  50. };
  51. int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  52. {
  53. int err;
  54. struct mmc_command cmd = {};
  55. cmd.opcode = MMC_SEND_STATUS;
  56. if (!mmc_host_is_spi(card->host))
  57. cmd.arg = card->rca << 16;
  58. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  59. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  60. if (err)
  61. return err;
  62. /* NOTE: callers are required to understand the difference
  63. * between "native" and SPI format status words!
  64. */
  65. if (status)
  66. *status = cmd.resp[0];
  67. return 0;
  68. }
  69. EXPORT_SYMBOL_GPL(__mmc_send_status);
  70. int mmc_send_status(struct mmc_card *card, u32 *status)
  71. {
  72. return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  73. }
  74. EXPORT_SYMBOL_GPL(mmc_send_status);
  75. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  76. {
  77. struct mmc_command cmd = {};
  78. cmd.opcode = MMC_SELECT_CARD;
  79. if (card) {
  80. cmd.arg = card->rca << 16;
  81. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  82. } else {
  83. cmd.arg = 0;
  84. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  85. }
  86. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  87. }
  88. int mmc_select_card(struct mmc_card *card)
  89. {
  90. return _mmc_select_card(card->host, card);
  91. }
  92. int mmc_deselect_cards(struct mmc_host *host)
  93. {
  94. return _mmc_select_card(host, NULL);
  95. }
  96. /*
  97. * Write the value specified in the device tree or board code into the optional
  98. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  99. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  100. * value is hardware dependant.
  101. * The presence of the DSR register can be determined from the CSD register,
  102. * bit 76.
  103. */
  104. int mmc_set_dsr(struct mmc_host *host)
  105. {
  106. struct mmc_command cmd = {};
  107. cmd.opcode = MMC_SET_DSR;
  108. cmd.arg = (host->dsr << 16) | 0xffff;
  109. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  110. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  111. }
  112. int mmc_go_idle(struct mmc_host *host)
  113. {
  114. int err;
  115. struct mmc_command cmd = {};
  116. /*
  117. * Non-SPI hosts need to prevent chipselect going active during
  118. * GO_IDLE; that would put chips into SPI mode. Remind them of
  119. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  120. *
  121. * SPI hosts ignore ios.chip_select; it's managed according to
  122. * rules that must accommodate non-MMC slaves which this layer
  123. * won't even know about.
  124. */
  125. if (!mmc_host_is_spi(host)) {
  126. mmc_set_chip_select(host, MMC_CS_HIGH);
  127. mmc_delay(1);
  128. }
  129. cmd.opcode = MMC_GO_IDLE_STATE;
  130. cmd.arg = 0;
  131. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  132. err = mmc_wait_for_cmd(host, &cmd, 0);
  133. mmc_delay(1);
  134. if (!mmc_host_is_spi(host)) {
  135. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  136. mmc_delay(1);
  137. }
  138. host->use_spi_crc = 0;
  139. return err;
  140. }
  141. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  142. {
  143. struct mmc_command cmd = {};
  144. int i, err = 0;
  145. cmd.opcode = MMC_SEND_OP_COND;
  146. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  147. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  148. for (i = 100; i; i--) {
  149. err = mmc_wait_for_cmd(host, &cmd, 0);
  150. if (err)
  151. break;
  152. /* if we're just probing, do a single pass */
  153. if (ocr == 0)
  154. break;
  155. /* otherwise wait until reset completes */
  156. if (mmc_host_is_spi(host)) {
  157. if (!(cmd.resp[0] & R1_SPI_IDLE))
  158. break;
  159. } else {
  160. if (cmd.resp[0] & MMC_CARD_BUSY)
  161. break;
  162. }
  163. err = -ETIMEDOUT;
  164. mmc_delay(10);
  165. }
  166. if (rocr && !mmc_host_is_spi(host))
  167. *rocr = cmd.resp[0];
  168. return err;
  169. }
  170. int mmc_set_relative_addr(struct mmc_card *card)
  171. {
  172. struct mmc_command cmd = {};
  173. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  174. cmd.arg = card->rca << 16;
  175. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  176. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  177. }
  178. static int
  179. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  180. {
  181. int err;
  182. struct mmc_command cmd = {};
  183. cmd.opcode = opcode;
  184. cmd.arg = arg;
  185. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  186. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  187. if (err)
  188. return err;
  189. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  190. return 0;
  191. }
  192. /*
  193. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  194. * buffer or on-stack buffer (with some overhead in callee).
  195. */
  196. static int
  197. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  198. u32 opcode, void *buf, unsigned len)
  199. {
  200. struct mmc_request mrq = {};
  201. struct mmc_command cmd = {};
  202. struct mmc_data data = {};
  203. struct scatterlist sg;
  204. mrq.cmd = &cmd;
  205. mrq.data = &data;
  206. cmd.opcode = opcode;
  207. cmd.arg = 0;
  208. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  209. * rely on callers to never use this with "native" calls for reading
  210. * CSD or CID. Native versions of those commands use the R2 type,
  211. * not R1 plus a data block.
  212. */
  213. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  214. data.blksz = len;
  215. data.blocks = 1;
  216. data.flags = MMC_DATA_READ;
  217. data.sg = &sg;
  218. data.sg_len = 1;
  219. sg_init_one(&sg, buf, len);
  220. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  221. /*
  222. * The spec states that CSR and CID accesses have a timeout
  223. * of 64 clock cycles.
  224. */
  225. data.timeout_ns = 0;
  226. data.timeout_clks = 64;
  227. } else
  228. mmc_set_data_timeout(&data, card);
  229. mmc_wait_for_req(host, &mrq);
  230. if (cmd.error)
  231. return cmd.error;
  232. if (data.error)
  233. return data.error;
  234. return 0;
  235. }
  236. static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
  237. {
  238. int ret, i;
  239. __be32 *csd_tmp;
  240. csd_tmp = kzalloc(16, GFP_KERNEL);
  241. if (!csd_tmp)
  242. return -ENOMEM;
  243. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  244. if (ret)
  245. goto err;
  246. for (i = 0; i < 4; i++)
  247. csd[i] = be32_to_cpu(csd_tmp[i]);
  248. err:
  249. kfree(csd_tmp);
  250. return ret;
  251. }
  252. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  253. {
  254. if (mmc_host_is_spi(card->host))
  255. return mmc_spi_send_csd(card, csd);
  256. return mmc_send_cxd_native(card->host, card->rca << 16, csd,
  257. MMC_SEND_CSD);
  258. }
  259. static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
  260. {
  261. int ret, i;
  262. __be32 *cid_tmp;
  263. cid_tmp = kzalloc(16, GFP_KERNEL);
  264. if (!cid_tmp)
  265. return -ENOMEM;
  266. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  267. if (ret)
  268. goto err;
  269. for (i = 0; i < 4; i++)
  270. cid[i] = be32_to_cpu(cid_tmp[i]);
  271. err:
  272. kfree(cid_tmp);
  273. return ret;
  274. }
  275. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  276. {
  277. if (mmc_host_is_spi(host))
  278. return mmc_spi_send_cid(host, cid);
  279. return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
  280. }
  281. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  282. {
  283. int err;
  284. u8 *ext_csd;
  285. if (!card || !new_ext_csd)
  286. return -EINVAL;
  287. if (!mmc_can_ext_csd(card))
  288. return -EOPNOTSUPP;
  289. /*
  290. * As the ext_csd is so large and mostly unused, we don't store the
  291. * raw block in mmc_card.
  292. */
  293. ext_csd = kzalloc(512, GFP_KERNEL);
  294. if (!ext_csd)
  295. return -ENOMEM;
  296. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  297. 512);
  298. if (err)
  299. kfree(ext_csd);
  300. else
  301. *new_ext_csd = ext_csd;
  302. return err;
  303. }
  304. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  305. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  306. {
  307. struct mmc_command cmd = {};
  308. int err;
  309. cmd.opcode = MMC_SPI_READ_OCR;
  310. cmd.arg = highcap ? (1 << 30) : 0;
  311. cmd.flags = MMC_RSP_SPI_R3;
  312. err = mmc_wait_for_cmd(host, &cmd, 0);
  313. *ocrp = cmd.resp[1];
  314. return err;
  315. }
  316. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  317. {
  318. struct mmc_command cmd = {};
  319. int err;
  320. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  321. cmd.flags = MMC_RSP_SPI_R1;
  322. cmd.arg = use_crc;
  323. err = mmc_wait_for_cmd(host, &cmd, 0);
  324. if (!err)
  325. host->use_spi_crc = use_crc;
  326. return err;
  327. }
  328. static int mmc_switch_status_error(struct mmc_host *host, u32 status)
  329. {
  330. if (mmc_host_is_spi(host)) {
  331. if (status & R1_SPI_ILLEGAL_COMMAND)
  332. return -EBADMSG;
  333. } else {
  334. if (status & 0xFDFFA000)
  335. pr_warn("%s: unexpected status %#x after switch\n",
  336. mmc_hostname(host), status);
  337. if (status & R1_SWITCH_ERROR)
  338. return -EBADMSG;
  339. }
  340. return 0;
  341. }
  342. /* Caller must hold re-tuning */
  343. int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
  344. {
  345. u32 status;
  346. int err;
  347. err = mmc_send_status(card, &status);
  348. if (!crc_err_fatal && err == -EILSEQ)
  349. return 0;
  350. if (err)
  351. return err;
  352. return mmc_switch_status_error(card->host, status);
  353. }
  354. int mmc_switch_status(struct mmc_card *card)
  355. {
  356. return __mmc_switch_status(card, true);
  357. }
  358. static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  359. bool send_status, bool retry_crc_err)
  360. {
  361. struct mmc_host *host = card->host;
  362. int err;
  363. unsigned long timeout;
  364. u32 status = 0;
  365. bool expired = false;
  366. bool busy = false;
  367. /* We have an unspecified cmd timeout, use the fallback value. */
  368. if (!timeout_ms)
  369. timeout_ms = MMC_OPS_TIMEOUT_MS;
  370. /*
  371. * In cases when not allowed to poll by using CMD13 or because we aren't
  372. * capable of polling by using ->card_busy(), then rely on waiting the
  373. * stated timeout to be sufficient.
  374. */
  375. if (!send_status && !host->ops->card_busy) {
  376. mmc_delay(timeout_ms);
  377. return 0;
  378. }
  379. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  380. do {
  381. /*
  382. * Due to the possibility of being preempted while polling,
  383. * check the expiration time first.
  384. */
  385. expired = time_after(jiffies, timeout);
  386. if (host->ops->card_busy) {
  387. busy = host->ops->card_busy(host);
  388. } else {
  389. err = mmc_send_status(card, &status);
  390. if (retry_crc_err && err == -EILSEQ) {
  391. busy = true;
  392. } else if (err) {
  393. return err;
  394. } else {
  395. err = mmc_switch_status_error(host, status);
  396. if (err)
  397. return err;
  398. busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
  399. }
  400. }
  401. /* Timeout if the device still remains busy. */
  402. if (expired && busy) {
  403. pr_err("%s: Card stuck being busy! %s\n",
  404. mmc_hostname(host), __func__);
  405. return -ETIMEDOUT;
  406. }
  407. } while (busy);
  408. return 0;
  409. }
  410. /**
  411. * __mmc_switch - modify EXT_CSD register
  412. * @card: the MMC card associated with the data transfer
  413. * @set: cmd set values
  414. * @index: EXT_CSD register index
  415. * @value: value to program into EXT_CSD register
  416. * @timeout_ms: timeout (ms) for operation performed by register write,
  417. * timeout of zero implies maximum possible timeout
  418. * @timing: new timing to change to
  419. * @use_busy_signal: use the busy signal as response type
  420. * @send_status: send status cmd to poll for busy
  421. * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  422. *
  423. * Modifies the EXT_CSD register for selected card.
  424. */
  425. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  426. unsigned int timeout_ms, unsigned char timing,
  427. bool use_busy_signal, bool send_status, bool retry_crc_err)
  428. {
  429. struct mmc_host *host = card->host;
  430. int err;
  431. struct mmc_command cmd = {};
  432. bool use_r1b_resp = use_busy_signal;
  433. unsigned char old_timing = host->ios.timing;
  434. mmc_retune_hold(host);
  435. /*
  436. * If the cmd timeout and the max_busy_timeout of the host are both
  437. * specified, let's validate them. A failure means we need to prevent
  438. * the host from doing hw busy detection, which is done by converting
  439. * to a R1 response instead of a R1B.
  440. */
  441. if (timeout_ms && host->max_busy_timeout &&
  442. (timeout_ms > host->max_busy_timeout))
  443. use_r1b_resp = false;
  444. cmd.opcode = MMC_SWITCH;
  445. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  446. (index << 16) |
  447. (value << 8) |
  448. set;
  449. cmd.flags = MMC_CMD_AC;
  450. if (use_r1b_resp) {
  451. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  452. /*
  453. * A busy_timeout of zero means the host can decide to use
  454. * whatever value it finds suitable.
  455. */
  456. cmd.busy_timeout = timeout_ms;
  457. } else {
  458. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  459. }
  460. if (index == EXT_CSD_SANITIZE_START)
  461. cmd.sanitize_busy = true;
  462. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  463. if (err)
  464. goto out;
  465. /* No need to check card status in case of unblocking command */
  466. if (!use_busy_signal)
  467. goto out;
  468. /*If SPI or used HW busy detection above, then we don't need to poll. */
  469. if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
  470. mmc_host_is_spi(host))
  471. goto out_tim;
  472. /* Let's try to poll to find out when the command is completed. */
  473. err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
  474. if (err)
  475. goto out;
  476. out_tim:
  477. /* Switch to new timing before check switch status. */
  478. if (timing)
  479. mmc_set_timing(host, timing);
  480. if (send_status) {
  481. err = mmc_switch_status(card);
  482. if (err && timing)
  483. mmc_set_timing(host, old_timing);
  484. }
  485. out:
  486. mmc_retune_release(host);
  487. return err;
  488. }
  489. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  490. unsigned int timeout_ms)
  491. {
  492. return __mmc_switch(card, set, index, value, timeout_ms, 0,
  493. true, true, false);
  494. }
  495. EXPORT_SYMBOL_GPL(mmc_switch);
  496. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  497. {
  498. struct mmc_request mrq = {};
  499. struct mmc_command cmd = {};
  500. struct mmc_data data = {};
  501. struct scatterlist sg;
  502. struct mmc_ios *ios = &host->ios;
  503. const u8 *tuning_block_pattern;
  504. int size, err = 0;
  505. u8 *data_buf;
  506. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  507. tuning_block_pattern = tuning_blk_pattern_8bit;
  508. size = sizeof(tuning_blk_pattern_8bit);
  509. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  510. tuning_block_pattern = tuning_blk_pattern_4bit;
  511. size = sizeof(tuning_blk_pattern_4bit);
  512. } else
  513. return -EINVAL;
  514. data_buf = kzalloc(size, GFP_KERNEL);
  515. if (!data_buf)
  516. return -ENOMEM;
  517. mrq.cmd = &cmd;
  518. mrq.data = &data;
  519. cmd.opcode = opcode;
  520. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  521. data.blksz = size;
  522. data.blocks = 1;
  523. data.flags = MMC_DATA_READ;
  524. /*
  525. * According to the tuning specs, Tuning process
  526. * is normally shorter 40 executions of CMD19,
  527. * and timeout value should be shorter than 150 ms
  528. */
  529. data.timeout_ns = 150 * NSEC_PER_MSEC;
  530. data.sg = &sg;
  531. data.sg_len = 1;
  532. sg_init_one(&sg, data_buf, size);
  533. mmc_wait_for_req(host, &mrq);
  534. if (cmd_error)
  535. *cmd_error = cmd.error;
  536. if (cmd.error) {
  537. err = cmd.error;
  538. goto out;
  539. }
  540. if (data.error) {
  541. err = data.error;
  542. goto out;
  543. }
  544. if (memcmp(data_buf, tuning_block_pattern, size))
  545. err = -EIO;
  546. out:
  547. kfree(data_buf);
  548. return err;
  549. }
  550. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  551. int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
  552. {
  553. struct mmc_command cmd = {};
  554. /*
  555. * eMMC specification specifies that CMD12 can be used to stop a tuning
  556. * command, but SD specification does not, so do nothing unless it is
  557. * eMMC.
  558. */
  559. if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
  560. return 0;
  561. cmd.opcode = MMC_STOP_TRANSMISSION;
  562. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  563. /*
  564. * For drivers that override R1 to R1b, set an arbitrary timeout based
  565. * on the tuning timeout i.e. 150ms.
  566. */
  567. cmd.busy_timeout = 150;
  568. return mmc_wait_for_cmd(host, &cmd, 0);
  569. }
  570. EXPORT_SYMBOL_GPL(mmc_abort_tuning);
  571. static int
  572. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  573. u8 len)
  574. {
  575. struct mmc_request mrq = {};
  576. struct mmc_command cmd = {};
  577. struct mmc_data data = {};
  578. struct scatterlist sg;
  579. u8 *data_buf;
  580. u8 *test_buf;
  581. int i, err;
  582. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  583. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  584. /* dma onto stack is unsafe/nonportable, but callers to this
  585. * routine normally provide temporary on-stack buffers ...
  586. */
  587. data_buf = kmalloc(len, GFP_KERNEL);
  588. if (!data_buf)
  589. return -ENOMEM;
  590. if (len == 8)
  591. test_buf = testdata_8bit;
  592. else if (len == 4)
  593. test_buf = testdata_4bit;
  594. else {
  595. pr_err("%s: Invalid bus_width %d\n",
  596. mmc_hostname(host), len);
  597. kfree(data_buf);
  598. return -EINVAL;
  599. }
  600. if (opcode == MMC_BUS_TEST_W)
  601. memcpy(data_buf, test_buf, len);
  602. mrq.cmd = &cmd;
  603. mrq.data = &data;
  604. cmd.opcode = opcode;
  605. cmd.arg = 0;
  606. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  607. * rely on callers to never use this with "native" calls for reading
  608. * CSD or CID. Native versions of those commands use the R2 type,
  609. * not R1 plus a data block.
  610. */
  611. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  612. data.blksz = len;
  613. data.blocks = 1;
  614. if (opcode == MMC_BUS_TEST_R)
  615. data.flags = MMC_DATA_READ;
  616. else
  617. data.flags = MMC_DATA_WRITE;
  618. data.sg = &sg;
  619. data.sg_len = 1;
  620. mmc_set_data_timeout(&data, card);
  621. sg_init_one(&sg, data_buf, len);
  622. mmc_wait_for_req(host, &mrq);
  623. err = 0;
  624. if (opcode == MMC_BUS_TEST_R) {
  625. for (i = 0; i < len / 4; i++)
  626. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  627. err = -EIO;
  628. break;
  629. }
  630. }
  631. kfree(data_buf);
  632. if (cmd.error)
  633. return cmd.error;
  634. if (data.error)
  635. return data.error;
  636. return err;
  637. }
  638. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  639. {
  640. int width;
  641. if (bus_width == MMC_BUS_WIDTH_8)
  642. width = 8;
  643. else if (bus_width == MMC_BUS_WIDTH_4)
  644. width = 4;
  645. else if (bus_width == MMC_BUS_WIDTH_1)
  646. return 0; /* no need for test */
  647. else
  648. return -EINVAL;
  649. /*
  650. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  651. * is a problem. This improves chances that the test will work.
  652. */
  653. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  654. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  655. }
  656. static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  657. {
  658. struct mmc_command cmd = {};
  659. unsigned int opcode;
  660. int err;
  661. if (!card->ext_csd.hpi) {
  662. pr_warn("%s: Card didn't support HPI command\n",
  663. mmc_hostname(card->host));
  664. return -EINVAL;
  665. }
  666. opcode = card->ext_csd.hpi_cmd;
  667. if (opcode == MMC_STOP_TRANSMISSION)
  668. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  669. else if (opcode == MMC_SEND_STATUS)
  670. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  671. cmd.opcode = opcode;
  672. cmd.arg = card->rca << 16 | 1;
  673. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  674. if (err) {
  675. pr_warn("%s: error %d interrupting operation. "
  676. "HPI command response %#x\n", mmc_hostname(card->host),
  677. err, cmd.resp[0]);
  678. return err;
  679. }
  680. if (status)
  681. *status = cmd.resp[0];
  682. return 0;
  683. }
  684. /**
  685. * mmc_interrupt_hpi - Issue for High priority Interrupt
  686. * @card: the MMC card associated with the HPI transfer
  687. *
  688. * Issued High Priority Interrupt, and check for card status
  689. * until out-of prg-state.
  690. */
  691. int mmc_interrupt_hpi(struct mmc_card *card)
  692. {
  693. int err;
  694. u32 status;
  695. unsigned long prg_wait;
  696. if (!card->ext_csd.hpi_en) {
  697. pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
  698. return 1;
  699. }
  700. mmc_claim_host(card->host);
  701. err = mmc_send_status(card, &status);
  702. if (err) {
  703. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  704. goto out;
  705. }
  706. switch (R1_CURRENT_STATE(status)) {
  707. case R1_STATE_IDLE:
  708. case R1_STATE_READY:
  709. case R1_STATE_STBY:
  710. case R1_STATE_TRAN:
  711. /*
  712. * In idle and transfer states, HPI is not needed and the caller
  713. * can issue the next intended command immediately
  714. */
  715. goto out;
  716. case R1_STATE_PRG:
  717. break;
  718. default:
  719. /* In all other states, it's illegal to issue HPI */
  720. pr_debug("%s: HPI cannot be sent. Card state=%d\n",
  721. mmc_hostname(card->host), R1_CURRENT_STATE(status));
  722. err = -EINVAL;
  723. goto out;
  724. }
  725. err = mmc_send_hpi_cmd(card, &status);
  726. if (err)
  727. goto out;
  728. prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
  729. do {
  730. err = mmc_send_status(card, &status);
  731. if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
  732. break;
  733. if (time_after(jiffies, prg_wait))
  734. err = -ETIMEDOUT;
  735. } while (!err);
  736. out:
  737. mmc_release_host(card->host);
  738. return err;
  739. }
  740. int mmc_can_ext_csd(struct mmc_card *card)
  741. {
  742. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  743. }
  744. /**
  745. * mmc_stop_bkops - stop ongoing BKOPS
  746. * @card: MMC card to check BKOPS
  747. *
  748. * Send HPI command to stop ongoing background operations to
  749. * allow rapid servicing of foreground operations, e.g. read/
  750. * writes. Wait until the card comes out of the programming state
  751. * to avoid errors in servicing read/write requests.
  752. */
  753. int mmc_stop_bkops(struct mmc_card *card)
  754. {
  755. int err = 0;
  756. err = mmc_interrupt_hpi(card);
  757. /*
  758. * If err is EINVAL, we can't issue an HPI.
  759. * It should complete the BKOPS.
  760. */
  761. if (!err || (err == -EINVAL)) {
  762. mmc_card_clr_doing_bkops(card);
  763. mmc_retune_release(card->host);
  764. err = 0;
  765. }
  766. return err;
  767. }
  768. static int mmc_read_bkops_status(struct mmc_card *card)
  769. {
  770. int err;
  771. u8 *ext_csd;
  772. mmc_claim_host(card->host);
  773. err = mmc_get_ext_csd(card, &ext_csd);
  774. mmc_release_host(card->host);
  775. if (err)
  776. return err;
  777. card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
  778. card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
  779. kfree(ext_csd);
  780. return 0;
  781. }
  782. /**
  783. * mmc_start_bkops - start BKOPS for supported cards
  784. * @card: MMC card to start BKOPS
  785. * @from_exception: A flag to indicate if this function was
  786. * called due to an exception raised by the card
  787. *
  788. * Start background operations whenever requested.
  789. * When the urgent BKOPS bit is set in a R1 command response
  790. * then background operations should be started immediately.
  791. */
  792. void mmc_start_bkops(struct mmc_card *card, bool from_exception)
  793. {
  794. int err;
  795. int timeout;
  796. bool use_busy_signal;
  797. if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
  798. return;
  799. err = mmc_read_bkops_status(card);
  800. if (err) {
  801. pr_err("%s: Failed to read bkops status: %d\n",
  802. mmc_hostname(card->host), err);
  803. return;
  804. }
  805. if (!card->ext_csd.raw_bkops_status)
  806. return;
  807. if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
  808. from_exception)
  809. return;
  810. if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
  811. timeout = MMC_OPS_TIMEOUT_MS;
  812. use_busy_signal = true;
  813. } else {
  814. timeout = 0;
  815. use_busy_signal = false;
  816. }
  817. mmc_retune_hold(card->host);
  818. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  819. EXT_CSD_BKOPS_START, 1, timeout, 0,
  820. use_busy_signal, true, false);
  821. if (err) {
  822. pr_warn("%s: Error %d starting bkops\n",
  823. mmc_hostname(card->host), err);
  824. mmc_retune_release(card->host);
  825. return;
  826. }
  827. /*
  828. * For urgent bkops status (LEVEL_2 and more)
  829. * bkops executed synchronously, otherwise
  830. * the operation is in progress
  831. */
  832. if (!use_busy_signal)
  833. mmc_card_set_doing_bkops(card);
  834. else
  835. mmc_retune_release(card->host);
  836. }
  837. EXPORT_SYMBOL(mmc_start_bkops);
  838. /*
  839. * Flush the cache to the non-volatile storage.
  840. */
  841. int mmc_flush_cache(struct mmc_card *card)
  842. {
  843. int err = 0;
  844. if (mmc_card_mmc(card) &&
  845. (card->ext_csd.cache_size > 0) &&
  846. (card->ext_csd.cache_ctrl & 1)) {
  847. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  848. EXT_CSD_FLUSH_CACHE, 1, 0);
  849. if (err)
  850. pr_err("%s: cache flush error %d\n",
  851. mmc_hostname(card->host), err);
  852. }
  853. return err;
  854. }
  855. EXPORT_SYMBOL(mmc_flush_cache);
  856. static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
  857. {
  858. u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
  859. int err;
  860. if (!card->ext_csd.cmdq_support)
  861. return -EOPNOTSUPP;
  862. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
  863. val, card->ext_csd.generic_cmd6_time);
  864. if (!err)
  865. card->ext_csd.cmdq_en = enable;
  866. return err;
  867. }
  868. int mmc_cmdq_enable(struct mmc_card *card)
  869. {
  870. return mmc_cmdq_switch(card, true);
  871. }
  872. EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
  873. int mmc_cmdq_disable(struct mmc_card *card)
  874. {
  875. return mmc_cmdq_switch(card, false);
  876. }
  877. EXPORT_SYMBOL_GPL(mmc_cmdq_disable);