mmc_ops.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "host.h"
  20. #include "mmc_ops.h"
  21. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  22. static const u8 tuning_blk_pattern_4bit[] = {
  23. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  24. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  25. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  26. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  27. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  28. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  29. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  30. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  31. };
  32. static const u8 tuning_blk_pattern_8bit[] = {
  33. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  34. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  35. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  36. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  37. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  38. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  39. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  40. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  41. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  42. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  43. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  44. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  45. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  46. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  47. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  48. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  49. };
  50. int mmc_send_status(struct mmc_card *card, u32 *status)
  51. {
  52. int err;
  53. struct mmc_command cmd = {};
  54. cmd.opcode = MMC_SEND_STATUS;
  55. if (!mmc_host_is_spi(card->host))
  56. cmd.arg = card->rca << 16;
  57. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  58. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  59. if (err)
  60. return err;
  61. /* NOTE: callers are required to understand the difference
  62. * between "native" and SPI format status words!
  63. */
  64. if (status)
  65. *status = cmd.resp[0];
  66. return 0;
  67. }
  68. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  69. {
  70. struct mmc_command cmd = {};
  71. cmd.opcode = MMC_SELECT_CARD;
  72. if (card) {
  73. cmd.arg = card->rca << 16;
  74. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  75. } else {
  76. cmd.arg = 0;
  77. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  78. }
  79. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  80. }
  81. int mmc_select_card(struct mmc_card *card)
  82. {
  83. return _mmc_select_card(card->host, card);
  84. }
  85. int mmc_deselect_cards(struct mmc_host *host)
  86. {
  87. return _mmc_select_card(host, NULL);
  88. }
  89. /*
  90. * Write the value specified in the device tree or board code into the optional
  91. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  92. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  93. * value is hardware dependant.
  94. * The presence of the DSR register can be determined from the CSD register,
  95. * bit 76.
  96. */
  97. int mmc_set_dsr(struct mmc_host *host)
  98. {
  99. struct mmc_command cmd = {};
  100. cmd.opcode = MMC_SET_DSR;
  101. cmd.arg = (host->dsr << 16) | 0xffff;
  102. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  103. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  104. }
  105. int mmc_go_idle(struct mmc_host *host)
  106. {
  107. int err;
  108. struct mmc_command cmd = {};
  109. /*
  110. * Non-SPI hosts need to prevent chipselect going active during
  111. * GO_IDLE; that would put chips into SPI mode. Remind them of
  112. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  113. *
  114. * SPI hosts ignore ios.chip_select; it's managed according to
  115. * rules that must accommodate non-MMC slaves which this layer
  116. * won't even know about.
  117. */
  118. if (!mmc_host_is_spi(host)) {
  119. mmc_set_chip_select(host, MMC_CS_HIGH);
  120. mmc_delay(1);
  121. }
  122. cmd.opcode = MMC_GO_IDLE_STATE;
  123. cmd.arg = 0;
  124. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  125. err = mmc_wait_for_cmd(host, &cmd, 0);
  126. mmc_delay(1);
  127. if (!mmc_host_is_spi(host)) {
  128. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  129. mmc_delay(1);
  130. }
  131. host->use_spi_crc = 0;
  132. return err;
  133. }
  134. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  135. {
  136. struct mmc_command cmd = {};
  137. int i, err = 0;
  138. cmd.opcode = MMC_SEND_OP_COND;
  139. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  140. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  141. for (i = 100; i; i--) {
  142. err = mmc_wait_for_cmd(host, &cmd, 0);
  143. if (err)
  144. break;
  145. /* if we're just probing, do a single pass */
  146. if (ocr == 0)
  147. break;
  148. /* otherwise wait until reset completes */
  149. if (mmc_host_is_spi(host)) {
  150. if (!(cmd.resp[0] & R1_SPI_IDLE))
  151. break;
  152. } else {
  153. if (cmd.resp[0] & MMC_CARD_BUSY)
  154. break;
  155. }
  156. err = -ETIMEDOUT;
  157. mmc_delay(10);
  158. }
  159. if (rocr && !mmc_host_is_spi(host))
  160. *rocr = cmd.resp[0];
  161. return err;
  162. }
  163. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  164. {
  165. int err;
  166. struct mmc_command cmd = {};
  167. cmd.opcode = MMC_ALL_SEND_CID;
  168. cmd.arg = 0;
  169. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  170. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  171. if (err)
  172. return err;
  173. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  174. return 0;
  175. }
  176. int mmc_set_relative_addr(struct mmc_card *card)
  177. {
  178. struct mmc_command cmd = {};
  179. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  180. cmd.arg = card->rca << 16;
  181. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  182. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  183. }
  184. static int
  185. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  186. {
  187. int err;
  188. struct mmc_command cmd = {};
  189. cmd.opcode = opcode;
  190. cmd.arg = arg;
  191. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  192. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  193. if (err)
  194. return err;
  195. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  196. return 0;
  197. }
  198. /*
  199. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  200. * buffer or on-stack buffer (with some overhead in callee).
  201. */
  202. static int
  203. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  204. u32 opcode, void *buf, unsigned len)
  205. {
  206. struct mmc_request mrq = {};
  207. struct mmc_command cmd = {};
  208. struct mmc_data data = {};
  209. struct scatterlist sg;
  210. mrq.cmd = &cmd;
  211. mrq.data = &data;
  212. cmd.opcode = opcode;
  213. cmd.arg = 0;
  214. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  215. * rely on callers to never use this with "native" calls for reading
  216. * CSD or CID. Native versions of those commands use the R2 type,
  217. * not R1 plus a data block.
  218. */
  219. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  220. data.blksz = len;
  221. data.blocks = 1;
  222. data.flags = MMC_DATA_READ;
  223. data.sg = &sg;
  224. data.sg_len = 1;
  225. sg_init_one(&sg, buf, len);
  226. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  227. /*
  228. * The spec states that CSR and CID accesses have a timeout
  229. * of 64 clock cycles.
  230. */
  231. data.timeout_ns = 0;
  232. data.timeout_clks = 64;
  233. } else
  234. mmc_set_data_timeout(&data, card);
  235. mmc_wait_for_req(host, &mrq);
  236. if (cmd.error)
  237. return cmd.error;
  238. if (data.error)
  239. return data.error;
  240. return 0;
  241. }
  242. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  243. {
  244. int ret, i;
  245. u32 *csd_tmp;
  246. if (!mmc_host_is_spi(card->host))
  247. return mmc_send_cxd_native(card->host, card->rca << 16,
  248. csd, MMC_SEND_CSD);
  249. csd_tmp = kzalloc(16, GFP_KERNEL);
  250. if (!csd_tmp)
  251. return -ENOMEM;
  252. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  253. if (ret)
  254. goto err;
  255. for (i = 0;i < 4;i++)
  256. csd[i] = be32_to_cpu(csd_tmp[i]);
  257. err:
  258. kfree(csd_tmp);
  259. return ret;
  260. }
  261. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  262. {
  263. int ret, i;
  264. u32 *cid_tmp;
  265. if (!mmc_host_is_spi(host)) {
  266. if (!host->card)
  267. return -EINVAL;
  268. return mmc_send_cxd_native(host, host->card->rca << 16,
  269. cid, MMC_SEND_CID);
  270. }
  271. cid_tmp = kzalloc(16, GFP_KERNEL);
  272. if (!cid_tmp)
  273. return -ENOMEM;
  274. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  275. if (ret)
  276. goto err;
  277. for (i = 0;i < 4;i++)
  278. cid[i] = be32_to_cpu(cid_tmp[i]);
  279. err:
  280. kfree(cid_tmp);
  281. return ret;
  282. }
  283. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  284. {
  285. int err;
  286. u8 *ext_csd;
  287. if (!card || !new_ext_csd)
  288. return -EINVAL;
  289. if (!mmc_can_ext_csd(card))
  290. return -EOPNOTSUPP;
  291. /*
  292. * As the ext_csd is so large and mostly unused, we don't store the
  293. * raw block in mmc_card.
  294. */
  295. ext_csd = kzalloc(512, GFP_KERNEL);
  296. if (!ext_csd)
  297. return -ENOMEM;
  298. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  299. 512);
  300. if (err)
  301. kfree(ext_csd);
  302. else
  303. *new_ext_csd = ext_csd;
  304. return err;
  305. }
  306. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  307. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  308. {
  309. struct mmc_command cmd = {};
  310. int err;
  311. cmd.opcode = MMC_SPI_READ_OCR;
  312. cmd.arg = highcap ? (1 << 30) : 0;
  313. cmd.flags = MMC_RSP_SPI_R3;
  314. err = mmc_wait_for_cmd(host, &cmd, 0);
  315. *ocrp = cmd.resp[1];
  316. return err;
  317. }
  318. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  319. {
  320. struct mmc_command cmd = {};
  321. int err;
  322. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  323. cmd.flags = MMC_RSP_SPI_R1;
  324. cmd.arg = use_crc;
  325. err = mmc_wait_for_cmd(host, &cmd, 0);
  326. if (!err)
  327. host->use_spi_crc = use_crc;
  328. return err;
  329. }
  330. static int mmc_switch_status_error(struct mmc_host *host, u32 status)
  331. {
  332. if (mmc_host_is_spi(host)) {
  333. if (status & R1_SPI_ILLEGAL_COMMAND)
  334. return -EBADMSG;
  335. } else {
  336. if (status & 0xFDFFA000)
  337. pr_warn("%s: unexpected status %#x after switch\n",
  338. mmc_hostname(host), status);
  339. if (status & R1_SWITCH_ERROR)
  340. return -EBADMSG;
  341. }
  342. return 0;
  343. }
  344. /* Caller must hold re-tuning */
  345. int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
  346. {
  347. u32 status;
  348. int err;
  349. err = mmc_send_status(card, &status);
  350. if (!crc_err_fatal && err == -EILSEQ)
  351. return 0;
  352. if (err)
  353. return err;
  354. return mmc_switch_status_error(card->host, status);
  355. }
  356. int mmc_switch_status(struct mmc_card *card)
  357. {
  358. return __mmc_switch_status(card, true);
  359. }
  360. static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  361. bool send_status, bool retry_crc_err)
  362. {
  363. struct mmc_host *host = card->host;
  364. int err;
  365. unsigned long timeout;
  366. u32 status = 0;
  367. bool expired = false;
  368. bool busy = false;
  369. /* We have an unspecified cmd timeout, use the fallback value. */
  370. if (!timeout_ms)
  371. timeout_ms = MMC_OPS_TIMEOUT_MS;
  372. /*
  373. * In cases when not allowed to poll by using CMD13 or because we aren't
  374. * capable of polling by using ->card_busy(), then rely on waiting the
  375. * stated timeout to be sufficient.
  376. */
  377. if (!send_status && !host->ops->card_busy) {
  378. mmc_delay(timeout_ms);
  379. return 0;
  380. }
  381. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  382. do {
  383. /*
  384. * Due to the possibility of being preempted while polling,
  385. * check the expiration time first.
  386. */
  387. expired = time_after(jiffies, timeout);
  388. if (host->ops->card_busy) {
  389. busy = host->ops->card_busy(host);
  390. } else {
  391. err = mmc_send_status(card, &status);
  392. if (retry_crc_err && err == -EILSEQ) {
  393. busy = true;
  394. } else if (err) {
  395. return err;
  396. } else {
  397. err = mmc_switch_status_error(host, status);
  398. if (err)
  399. return err;
  400. busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
  401. }
  402. }
  403. /* Timeout if the device still remains busy. */
  404. if (expired && busy) {
  405. pr_err("%s: Card stuck being busy! %s\n",
  406. mmc_hostname(host), __func__);
  407. return -ETIMEDOUT;
  408. }
  409. } while (busy);
  410. return 0;
  411. }
  412. /**
  413. * __mmc_switch - modify EXT_CSD register
  414. * @card: the MMC card associated with the data transfer
  415. * @set: cmd set values
  416. * @index: EXT_CSD register index
  417. * @value: value to program into EXT_CSD register
  418. * @timeout_ms: timeout (ms) for operation performed by register write,
  419. * timeout of zero implies maximum possible timeout
  420. * @timing: new timing to change to
  421. * @use_busy_signal: use the busy signal as response type
  422. * @send_status: send status cmd to poll for busy
  423. * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  424. *
  425. * Modifies the EXT_CSD register for selected card.
  426. */
  427. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  428. unsigned int timeout_ms, unsigned char timing,
  429. bool use_busy_signal, bool send_status, bool retry_crc_err)
  430. {
  431. struct mmc_host *host = card->host;
  432. int err;
  433. struct mmc_command cmd = {};
  434. bool use_r1b_resp = use_busy_signal;
  435. unsigned char old_timing = host->ios.timing;
  436. mmc_retune_hold(host);
  437. /*
  438. * If the cmd timeout and the max_busy_timeout of the host are both
  439. * specified, let's validate them. A failure means we need to prevent
  440. * the host from doing hw busy detection, which is done by converting
  441. * to a R1 response instead of a R1B.
  442. */
  443. if (timeout_ms && host->max_busy_timeout &&
  444. (timeout_ms > host->max_busy_timeout))
  445. use_r1b_resp = false;
  446. cmd.opcode = MMC_SWITCH;
  447. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  448. (index << 16) |
  449. (value << 8) |
  450. set;
  451. cmd.flags = MMC_CMD_AC;
  452. if (use_r1b_resp) {
  453. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  454. /*
  455. * A busy_timeout of zero means the host can decide to use
  456. * whatever value it finds suitable.
  457. */
  458. cmd.busy_timeout = timeout_ms;
  459. } else {
  460. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  461. }
  462. if (index == EXT_CSD_SANITIZE_START)
  463. cmd.sanitize_busy = true;
  464. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  465. if (err)
  466. goto out;
  467. /* No need to check card status in case of unblocking command */
  468. if (!use_busy_signal)
  469. goto out;
  470. /*If SPI or used HW busy detection above, then we don't need to poll. */
  471. if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
  472. mmc_host_is_spi(host))
  473. goto out_tim;
  474. /* Let's try to poll to find out when the command is completed. */
  475. err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
  476. if (err)
  477. goto out;
  478. out_tim:
  479. /* Switch to new timing before check switch status. */
  480. if (timing)
  481. mmc_set_timing(host, timing);
  482. if (send_status) {
  483. err = mmc_switch_status(card);
  484. if (err && timing)
  485. mmc_set_timing(host, old_timing);
  486. }
  487. out:
  488. mmc_retune_release(host);
  489. return err;
  490. }
  491. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  492. unsigned int timeout_ms)
  493. {
  494. return __mmc_switch(card, set, index, value, timeout_ms, 0,
  495. true, true, false);
  496. }
  497. EXPORT_SYMBOL_GPL(mmc_switch);
  498. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  499. {
  500. struct mmc_request mrq = {};
  501. struct mmc_command cmd = {};
  502. struct mmc_data data = {};
  503. struct scatterlist sg;
  504. struct mmc_ios *ios = &host->ios;
  505. const u8 *tuning_block_pattern;
  506. int size, err = 0;
  507. u8 *data_buf;
  508. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  509. tuning_block_pattern = tuning_blk_pattern_8bit;
  510. size = sizeof(tuning_blk_pattern_8bit);
  511. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  512. tuning_block_pattern = tuning_blk_pattern_4bit;
  513. size = sizeof(tuning_blk_pattern_4bit);
  514. } else
  515. return -EINVAL;
  516. data_buf = kzalloc(size, GFP_KERNEL);
  517. if (!data_buf)
  518. return -ENOMEM;
  519. mrq.cmd = &cmd;
  520. mrq.data = &data;
  521. cmd.opcode = opcode;
  522. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  523. data.blksz = size;
  524. data.blocks = 1;
  525. data.flags = MMC_DATA_READ;
  526. /*
  527. * According to the tuning specs, Tuning process
  528. * is normally shorter 40 executions of CMD19,
  529. * and timeout value should be shorter than 150 ms
  530. */
  531. data.timeout_ns = 150 * NSEC_PER_MSEC;
  532. data.sg = &sg;
  533. data.sg_len = 1;
  534. sg_init_one(&sg, data_buf, size);
  535. mmc_wait_for_req(host, &mrq);
  536. if (cmd_error)
  537. *cmd_error = cmd.error;
  538. if (cmd.error) {
  539. err = cmd.error;
  540. goto out;
  541. }
  542. if (data.error) {
  543. err = data.error;
  544. goto out;
  545. }
  546. if (memcmp(data_buf, tuning_block_pattern, size))
  547. err = -EIO;
  548. out:
  549. kfree(data_buf);
  550. return err;
  551. }
  552. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  553. int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
  554. {
  555. struct mmc_command cmd = {};
  556. /*
  557. * eMMC specification specifies that CMD12 can be used to stop a tuning
  558. * command, but SD specification does not, so do nothing unless it is
  559. * eMMC.
  560. */
  561. if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
  562. return 0;
  563. cmd.opcode = MMC_STOP_TRANSMISSION;
  564. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  565. /*
  566. * For drivers that override R1 to R1b, set an arbitrary timeout based
  567. * on the tuning timeout i.e. 150ms.
  568. */
  569. cmd.busy_timeout = 150;
  570. return mmc_wait_for_cmd(host, &cmd, 0);
  571. }
  572. EXPORT_SYMBOL_GPL(mmc_abort_tuning);
  573. static int
  574. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  575. u8 len)
  576. {
  577. struct mmc_request mrq = {};
  578. struct mmc_command cmd = {};
  579. struct mmc_data data = {};
  580. struct scatterlist sg;
  581. u8 *data_buf;
  582. u8 *test_buf;
  583. int i, err;
  584. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  585. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  586. /* dma onto stack is unsafe/nonportable, but callers to this
  587. * routine normally provide temporary on-stack buffers ...
  588. */
  589. data_buf = kmalloc(len, GFP_KERNEL);
  590. if (!data_buf)
  591. return -ENOMEM;
  592. if (len == 8)
  593. test_buf = testdata_8bit;
  594. else if (len == 4)
  595. test_buf = testdata_4bit;
  596. else {
  597. pr_err("%s: Invalid bus_width %d\n",
  598. mmc_hostname(host), len);
  599. kfree(data_buf);
  600. return -EINVAL;
  601. }
  602. if (opcode == MMC_BUS_TEST_W)
  603. memcpy(data_buf, test_buf, len);
  604. mrq.cmd = &cmd;
  605. mrq.data = &data;
  606. cmd.opcode = opcode;
  607. cmd.arg = 0;
  608. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  609. * rely on callers to never use this with "native" calls for reading
  610. * CSD or CID. Native versions of those commands use the R2 type,
  611. * not R1 plus a data block.
  612. */
  613. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  614. data.blksz = len;
  615. data.blocks = 1;
  616. if (opcode == MMC_BUS_TEST_R)
  617. data.flags = MMC_DATA_READ;
  618. else
  619. data.flags = MMC_DATA_WRITE;
  620. data.sg = &sg;
  621. data.sg_len = 1;
  622. mmc_set_data_timeout(&data, card);
  623. sg_init_one(&sg, data_buf, len);
  624. mmc_wait_for_req(host, &mrq);
  625. err = 0;
  626. if (opcode == MMC_BUS_TEST_R) {
  627. for (i = 0; i < len / 4; i++)
  628. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  629. err = -EIO;
  630. break;
  631. }
  632. }
  633. kfree(data_buf);
  634. if (cmd.error)
  635. return cmd.error;
  636. if (data.error)
  637. return data.error;
  638. return err;
  639. }
  640. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  641. {
  642. int width;
  643. if (bus_width == MMC_BUS_WIDTH_8)
  644. width = 8;
  645. else if (bus_width == MMC_BUS_WIDTH_4)
  646. width = 4;
  647. else if (bus_width == MMC_BUS_WIDTH_1)
  648. return 0; /* no need for test */
  649. else
  650. return -EINVAL;
  651. /*
  652. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  653. * is a problem. This improves chances that the test will work.
  654. */
  655. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  656. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  657. }
  658. int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  659. {
  660. struct mmc_command cmd = {};
  661. unsigned int opcode;
  662. int err;
  663. if (!card->ext_csd.hpi) {
  664. pr_warn("%s: Card didn't support HPI command\n",
  665. mmc_hostname(card->host));
  666. return -EINVAL;
  667. }
  668. opcode = card->ext_csd.hpi_cmd;
  669. if (opcode == MMC_STOP_TRANSMISSION)
  670. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  671. else if (opcode == MMC_SEND_STATUS)
  672. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  673. cmd.opcode = opcode;
  674. cmd.arg = card->rca << 16 | 1;
  675. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  676. if (err) {
  677. pr_warn("%s: error %d interrupting operation. "
  678. "HPI command response %#x\n", mmc_hostname(card->host),
  679. err, cmd.resp[0]);
  680. return err;
  681. }
  682. if (status)
  683. *status = cmd.resp[0];
  684. return 0;
  685. }
  686. int mmc_can_ext_csd(struct mmc_card *card)
  687. {
  688. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  689. }