mmc_ops.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "mmc_ops.h"
  20. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  21. static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
  22. bool ignore_crc)
  23. {
  24. int err;
  25. struct mmc_command cmd = {0};
  26. BUG_ON(!card);
  27. BUG_ON(!card->host);
  28. cmd.opcode = MMC_SEND_STATUS;
  29. if (!mmc_host_is_spi(card->host))
  30. cmd.arg = card->rca << 16;
  31. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  32. if (ignore_crc)
  33. cmd.flags &= ~MMC_RSP_CRC;
  34. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  35. if (err)
  36. return err;
  37. /* NOTE: callers are required to understand the difference
  38. * between "native" and SPI format status words!
  39. */
  40. if (status)
  41. *status = cmd.resp[0];
  42. return 0;
  43. }
  44. int mmc_send_status(struct mmc_card *card, u32 *status)
  45. {
  46. return __mmc_send_status(card, status, false);
  47. }
  48. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  49. {
  50. int err;
  51. struct mmc_command cmd = {0};
  52. BUG_ON(!host);
  53. cmd.opcode = MMC_SELECT_CARD;
  54. if (card) {
  55. cmd.arg = card->rca << 16;
  56. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  57. } else {
  58. cmd.arg = 0;
  59. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  60. }
  61. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  62. if (err)
  63. return err;
  64. return 0;
  65. }
  66. int mmc_select_card(struct mmc_card *card)
  67. {
  68. BUG_ON(!card);
  69. return _mmc_select_card(card->host, card);
  70. }
  71. int mmc_deselect_cards(struct mmc_host *host)
  72. {
  73. return _mmc_select_card(host, NULL);
  74. }
  75. /*
  76. * Write the value specified in the device tree or board code into the optional
  77. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  78. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  79. * value is hardware dependant.
  80. * The presence of the DSR register can be determined from the CSD register,
  81. * bit 76.
  82. */
  83. int mmc_set_dsr(struct mmc_host *host)
  84. {
  85. struct mmc_command cmd = {0};
  86. cmd.opcode = MMC_SET_DSR;
  87. cmd.arg = (host->dsr << 16) | 0xffff;
  88. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  89. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  90. }
  91. int mmc_go_idle(struct mmc_host *host)
  92. {
  93. int err;
  94. struct mmc_command cmd = {0};
  95. /*
  96. * Non-SPI hosts need to prevent chipselect going active during
  97. * GO_IDLE; that would put chips into SPI mode. Remind them of
  98. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  99. *
  100. * SPI hosts ignore ios.chip_select; it's managed according to
  101. * rules that must accommodate non-MMC slaves which this layer
  102. * won't even know about.
  103. */
  104. if (!mmc_host_is_spi(host)) {
  105. mmc_set_chip_select(host, MMC_CS_HIGH);
  106. mmc_delay(1);
  107. }
  108. cmd.opcode = MMC_GO_IDLE_STATE;
  109. cmd.arg = 0;
  110. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  111. err = mmc_wait_for_cmd(host, &cmd, 0);
  112. mmc_delay(1);
  113. if (!mmc_host_is_spi(host)) {
  114. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  115. mmc_delay(1);
  116. }
  117. host->use_spi_crc = 0;
  118. return err;
  119. }
  120. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  121. {
  122. struct mmc_command cmd = {0};
  123. int i, err = 0;
  124. BUG_ON(!host);
  125. cmd.opcode = MMC_SEND_OP_COND;
  126. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  127. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  128. for (i = 100; i; i--) {
  129. err = mmc_wait_for_cmd(host, &cmd, 0);
  130. if (err)
  131. break;
  132. /* if we're just probing, do a single pass */
  133. if (ocr == 0)
  134. break;
  135. /* otherwise wait until reset completes */
  136. if (mmc_host_is_spi(host)) {
  137. if (!(cmd.resp[0] & R1_SPI_IDLE))
  138. break;
  139. } else {
  140. if (cmd.resp[0] & MMC_CARD_BUSY)
  141. break;
  142. }
  143. err = -ETIMEDOUT;
  144. mmc_delay(10);
  145. }
  146. if (rocr && !mmc_host_is_spi(host))
  147. *rocr = cmd.resp[0];
  148. return err;
  149. }
  150. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  151. {
  152. int err;
  153. struct mmc_command cmd = {0};
  154. BUG_ON(!host);
  155. BUG_ON(!cid);
  156. cmd.opcode = MMC_ALL_SEND_CID;
  157. cmd.arg = 0;
  158. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  159. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  160. if (err)
  161. return err;
  162. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  163. return 0;
  164. }
  165. int mmc_set_relative_addr(struct mmc_card *card)
  166. {
  167. int err;
  168. struct mmc_command cmd = {0};
  169. BUG_ON(!card);
  170. BUG_ON(!card->host);
  171. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  172. cmd.arg = card->rca << 16;
  173. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  174. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  175. if (err)
  176. return err;
  177. return 0;
  178. }
  179. static int
  180. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  181. {
  182. int err;
  183. struct mmc_command cmd = {0};
  184. BUG_ON(!host);
  185. BUG_ON(!cxd);
  186. cmd.opcode = opcode;
  187. cmd.arg = arg;
  188. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  189. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  190. if (err)
  191. return err;
  192. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  193. return 0;
  194. }
  195. /*
  196. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  197. * buffer or on-stack buffer (with some overhead in callee).
  198. */
  199. static int
  200. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  201. u32 opcode, void *buf, unsigned len)
  202. {
  203. struct mmc_request mrq = {NULL};
  204. struct mmc_command cmd = {0};
  205. struct mmc_data data = {0};
  206. struct scatterlist sg;
  207. mrq.cmd = &cmd;
  208. mrq.data = &data;
  209. cmd.opcode = opcode;
  210. cmd.arg = 0;
  211. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  212. * rely on callers to never use this with "native" calls for reading
  213. * CSD or CID. Native versions of those commands use the R2 type,
  214. * not R1 plus a data block.
  215. */
  216. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  217. data.blksz = len;
  218. data.blocks = 1;
  219. data.flags = MMC_DATA_READ;
  220. data.sg = &sg;
  221. data.sg_len = 1;
  222. sg_init_one(&sg, buf, len);
  223. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  224. /*
  225. * The spec states that CSR and CID accesses have a timeout
  226. * of 64 clock cycles.
  227. */
  228. data.timeout_ns = 0;
  229. data.timeout_clks = 64;
  230. } else
  231. mmc_set_data_timeout(&data, card);
  232. mmc_wait_for_req(host, &mrq);
  233. if (cmd.error)
  234. return cmd.error;
  235. if (data.error)
  236. return data.error;
  237. return 0;
  238. }
  239. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  240. {
  241. int ret, i;
  242. u32 *csd_tmp;
  243. if (!mmc_host_is_spi(card->host))
  244. return mmc_send_cxd_native(card->host, card->rca << 16,
  245. csd, MMC_SEND_CSD);
  246. csd_tmp = kzalloc(16, GFP_KERNEL);
  247. if (!csd_tmp)
  248. return -ENOMEM;
  249. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  250. if (ret)
  251. goto err;
  252. for (i = 0;i < 4;i++)
  253. csd[i] = be32_to_cpu(csd_tmp[i]);
  254. err:
  255. kfree(csd_tmp);
  256. return ret;
  257. }
  258. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  259. {
  260. int ret, i;
  261. u32 *cid_tmp;
  262. if (!mmc_host_is_spi(host)) {
  263. if (!host->card)
  264. return -EINVAL;
  265. return mmc_send_cxd_native(host, host->card->rca << 16,
  266. cid, MMC_SEND_CID);
  267. }
  268. cid_tmp = kzalloc(16, GFP_KERNEL);
  269. if (!cid_tmp)
  270. return -ENOMEM;
  271. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  272. if (ret)
  273. goto err;
  274. for (i = 0;i < 4;i++)
  275. cid[i] = be32_to_cpu(cid_tmp[i]);
  276. err:
  277. kfree(cid_tmp);
  278. return ret;
  279. }
  280. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  281. {
  282. int err;
  283. u8 *ext_csd;
  284. if (!card || !new_ext_csd)
  285. return -EINVAL;
  286. if (!mmc_can_ext_csd(card))
  287. return -EOPNOTSUPP;
  288. /*
  289. * As the ext_csd is so large and mostly unused, we don't store the
  290. * raw block in mmc_card.
  291. */
  292. ext_csd = kzalloc(512, GFP_KERNEL);
  293. if (!ext_csd)
  294. return -ENOMEM;
  295. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  296. 512);
  297. if (err)
  298. kfree(ext_csd);
  299. else
  300. *new_ext_csd = ext_csd;
  301. return err;
  302. }
  303. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  304. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  305. {
  306. struct mmc_command cmd = {0};
  307. int err;
  308. cmd.opcode = MMC_SPI_READ_OCR;
  309. cmd.arg = highcap ? (1 << 30) : 0;
  310. cmd.flags = MMC_RSP_SPI_R3;
  311. err = mmc_wait_for_cmd(host, &cmd, 0);
  312. *ocrp = cmd.resp[1];
  313. return err;
  314. }
  315. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  316. {
  317. struct mmc_command cmd = {0};
  318. int err;
  319. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  320. cmd.flags = MMC_RSP_SPI_R1;
  321. cmd.arg = use_crc;
  322. err = mmc_wait_for_cmd(host, &cmd, 0);
  323. if (!err)
  324. host->use_spi_crc = use_crc;
  325. return err;
  326. }
  327. /**
  328. * __mmc_switch - modify EXT_CSD register
  329. * @card: the MMC card associated with the data transfer
  330. * @set: cmd set values
  331. * @index: EXT_CSD register index
  332. * @value: value to program into EXT_CSD register
  333. * @timeout_ms: timeout (ms) for operation performed by register write,
  334. * timeout of zero implies maximum possible timeout
  335. * @use_busy_signal: use the busy signal as response type
  336. * @send_status: send status cmd to poll for busy
  337. * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
  338. *
  339. * Modifies the EXT_CSD register for selected card.
  340. */
  341. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  342. unsigned int timeout_ms, bool use_busy_signal, bool send_status,
  343. bool ignore_crc)
  344. {
  345. struct mmc_host *host = card->host;
  346. int err;
  347. struct mmc_command cmd = {0};
  348. unsigned long timeout;
  349. u32 status = 0;
  350. bool use_r1b_resp = use_busy_signal;
  351. /*
  352. * If the cmd timeout and the max_busy_timeout of the host are both
  353. * specified, let's validate them. A failure means we need to prevent
  354. * the host from doing hw busy detection, which is done by converting
  355. * to a R1 response instead of a R1B.
  356. */
  357. if (timeout_ms && host->max_busy_timeout &&
  358. (timeout_ms > host->max_busy_timeout))
  359. use_r1b_resp = false;
  360. cmd.opcode = MMC_SWITCH;
  361. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  362. (index << 16) |
  363. (value << 8) |
  364. set;
  365. cmd.flags = MMC_CMD_AC;
  366. if (use_r1b_resp) {
  367. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  368. /*
  369. * A busy_timeout of zero means the host can decide to use
  370. * whatever value it finds suitable.
  371. */
  372. cmd.busy_timeout = timeout_ms;
  373. } else {
  374. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  375. }
  376. if (index == EXT_CSD_SANITIZE_START)
  377. cmd.sanitize_busy = true;
  378. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  379. if (err)
  380. return err;
  381. /* No need to check card status in case of unblocking command */
  382. if (!use_busy_signal)
  383. return 0;
  384. /*
  385. * CRC errors shall only be ignored in cases were CMD13 is used to poll
  386. * to detect busy completion.
  387. */
  388. if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
  389. ignore_crc = false;
  390. /* We have an unspecified cmd timeout, use the fallback value. */
  391. if (!timeout_ms)
  392. timeout_ms = MMC_OPS_TIMEOUT_MS;
  393. /* Must check status to be sure of no errors. */
  394. timeout = jiffies + msecs_to_jiffies(timeout_ms);
  395. do {
  396. if (send_status) {
  397. err = __mmc_send_status(card, &status, ignore_crc);
  398. if (err)
  399. return err;
  400. }
  401. if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
  402. break;
  403. if (mmc_host_is_spi(host))
  404. break;
  405. /*
  406. * We are not allowed to issue a status command and the host
  407. * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
  408. * rely on waiting for the stated timeout to be sufficient.
  409. */
  410. if (!send_status) {
  411. mmc_delay(timeout_ms);
  412. return 0;
  413. }
  414. /* Timeout if the device never leaves the program state. */
  415. if (time_after(jiffies, timeout)) {
  416. pr_err("%s: Card stuck in programming state! %s\n",
  417. mmc_hostname(host), __func__);
  418. return -ETIMEDOUT;
  419. }
  420. } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
  421. if (mmc_host_is_spi(host)) {
  422. if (status & R1_SPI_ILLEGAL_COMMAND)
  423. return -EBADMSG;
  424. } else {
  425. if (status & 0xFDFFA000)
  426. pr_warn("%s: unexpected status %#x after switch\n",
  427. mmc_hostname(host), status);
  428. if (status & R1_SWITCH_ERROR)
  429. return -EBADMSG;
  430. }
  431. return 0;
  432. }
  433. EXPORT_SYMBOL_GPL(__mmc_switch);
  434. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  435. unsigned int timeout_ms)
  436. {
  437. return __mmc_switch(card, set, index, value, timeout_ms, true, true,
  438. false);
  439. }
  440. EXPORT_SYMBOL_GPL(mmc_switch);
  441. int mmc_send_tuning(struct mmc_host *host)
  442. {
  443. struct mmc_request mrq = {NULL};
  444. struct mmc_command cmd = {0};
  445. struct mmc_data data = {0};
  446. struct scatterlist sg;
  447. struct mmc_ios *ios = &host->ios;
  448. const u8 *tuning_block_pattern;
  449. int size, err = 0;
  450. u8 *data_buf;
  451. u32 opcode;
  452. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  453. tuning_block_pattern = tuning_blk_pattern_8bit;
  454. size = sizeof(tuning_blk_pattern_8bit);
  455. opcode = MMC_SEND_TUNING_BLOCK_HS200;
  456. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  457. tuning_block_pattern = tuning_blk_pattern_4bit;
  458. size = sizeof(tuning_blk_pattern_4bit);
  459. opcode = MMC_SEND_TUNING_BLOCK;
  460. } else
  461. return -EINVAL;
  462. data_buf = kzalloc(size, GFP_KERNEL);
  463. if (!data_buf)
  464. return -ENOMEM;
  465. mrq.cmd = &cmd;
  466. mrq.data = &data;
  467. cmd.opcode = opcode;
  468. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  469. data.blksz = size;
  470. data.blocks = 1;
  471. data.flags = MMC_DATA_READ;
  472. /*
  473. * According to the tuning specs, Tuning process
  474. * is normally shorter 40 executions of CMD19,
  475. * and timeout value should be shorter than 150 ms
  476. */
  477. data.timeout_ns = 150 * NSEC_PER_MSEC;
  478. data.sg = &sg;
  479. data.sg_len = 1;
  480. sg_init_one(&sg, data_buf, size);
  481. mmc_wait_for_req(host, &mrq);
  482. if (cmd.error) {
  483. err = cmd.error;
  484. goto out;
  485. }
  486. if (data.error) {
  487. err = data.error;
  488. goto out;
  489. }
  490. if (memcmp(data_buf, tuning_block_pattern, size))
  491. err = -EIO;
  492. out:
  493. kfree(data_buf);
  494. return err;
  495. }
  496. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  497. static int
  498. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  499. u8 len)
  500. {
  501. struct mmc_request mrq = {NULL};
  502. struct mmc_command cmd = {0};
  503. struct mmc_data data = {0};
  504. struct scatterlist sg;
  505. u8 *data_buf;
  506. u8 *test_buf;
  507. int i, err;
  508. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  509. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  510. /* dma onto stack is unsafe/nonportable, but callers to this
  511. * routine normally provide temporary on-stack buffers ...
  512. */
  513. data_buf = kmalloc(len, GFP_KERNEL);
  514. if (!data_buf)
  515. return -ENOMEM;
  516. if (len == 8)
  517. test_buf = testdata_8bit;
  518. else if (len == 4)
  519. test_buf = testdata_4bit;
  520. else {
  521. pr_err("%s: Invalid bus_width %d\n",
  522. mmc_hostname(host), len);
  523. kfree(data_buf);
  524. return -EINVAL;
  525. }
  526. if (opcode == MMC_BUS_TEST_W)
  527. memcpy(data_buf, test_buf, len);
  528. mrq.cmd = &cmd;
  529. mrq.data = &data;
  530. cmd.opcode = opcode;
  531. cmd.arg = 0;
  532. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  533. * rely on callers to never use this with "native" calls for reading
  534. * CSD or CID. Native versions of those commands use the R2 type,
  535. * not R1 plus a data block.
  536. */
  537. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  538. data.blksz = len;
  539. data.blocks = 1;
  540. if (opcode == MMC_BUS_TEST_R)
  541. data.flags = MMC_DATA_READ;
  542. else
  543. data.flags = MMC_DATA_WRITE;
  544. data.sg = &sg;
  545. data.sg_len = 1;
  546. mmc_set_data_timeout(&data, card);
  547. sg_init_one(&sg, data_buf, len);
  548. mmc_wait_for_req(host, &mrq);
  549. err = 0;
  550. if (opcode == MMC_BUS_TEST_R) {
  551. for (i = 0; i < len / 4; i++)
  552. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  553. err = -EIO;
  554. break;
  555. }
  556. }
  557. kfree(data_buf);
  558. if (cmd.error)
  559. return cmd.error;
  560. if (data.error)
  561. return data.error;
  562. return err;
  563. }
  564. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  565. {
  566. int err, width;
  567. if (bus_width == MMC_BUS_WIDTH_8)
  568. width = 8;
  569. else if (bus_width == MMC_BUS_WIDTH_4)
  570. width = 4;
  571. else if (bus_width == MMC_BUS_WIDTH_1)
  572. return 0; /* no need for test */
  573. else
  574. return -EINVAL;
  575. /*
  576. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  577. * is a problem. This improves chances that the test will work.
  578. */
  579. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  580. err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  581. return err;
  582. }
  583. int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  584. {
  585. struct mmc_command cmd = {0};
  586. unsigned int opcode;
  587. int err;
  588. if (!card->ext_csd.hpi) {
  589. pr_warn("%s: Card didn't support HPI command\n",
  590. mmc_hostname(card->host));
  591. return -EINVAL;
  592. }
  593. opcode = card->ext_csd.hpi_cmd;
  594. if (opcode == MMC_STOP_TRANSMISSION)
  595. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  596. else if (opcode == MMC_SEND_STATUS)
  597. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  598. cmd.opcode = opcode;
  599. cmd.arg = card->rca << 16 | 1;
  600. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  601. if (err) {
  602. pr_warn("%s: error %d interrupting operation. "
  603. "HPI command response %#x\n", mmc_hostname(card->host),
  604. err, cmd.resp[0]);
  605. return err;
  606. }
  607. if (status)
  608. *status = cmd.resp[0];
  609. return 0;
  610. }
  611. int mmc_can_ext_csd(struct mmc_card *card)
  612. {
  613. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  614. }