bcmsdh.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/mmc/sdio.h>
  25. #include <linux/mmc/sdio_func.h>
  26. #include <linux/mmc/card.h>
  27. #include <linux/platform_data/brcmfmac-sdio.h>
  28. #include <defs.h>
  29. #include <brcm_hw_ids.h>
  30. #include <brcmu_utils.h>
  31. #include <brcmu_wifi.h>
  32. #include <soc.h>
  33. #include "dhd_bus.h"
  34. #include "dhd_dbg.h"
  35. #include "sdio_host.h"
  36. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  37. static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
  38. {
  39. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  40. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  41. brcmf_dbg(INTR, "OOB intr triggered\n");
  42. /* out-of-band interrupt is level-triggered which won't
  43. * be cleared until dpc
  44. */
  45. if (sdiodev->irq_en) {
  46. disable_irq_nosync(irq);
  47. sdiodev->irq_en = false;
  48. }
  49. brcmf_sdbrcm_isr(sdiodev->bus);
  50. return IRQ_HANDLED;
  51. }
  52. static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
  53. {
  54. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  55. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  56. brcmf_dbg(INTR, "IB intr triggered\n");
  57. brcmf_sdbrcm_isr(sdiodev->bus);
  58. }
  59. /* dummy handler for SDIO function 2 interrupt */
  60. static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
  61. {
  62. }
  63. int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
  64. {
  65. int ret = 0;
  66. u8 data;
  67. unsigned long flags;
  68. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  69. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  70. sdiodev->pdata->oob_irq_nr);
  71. ret = request_irq(sdiodev->pdata->oob_irq_nr,
  72. brcmf_sdio_oob_irqhandler,
  73. sdiodev->pdata->oob_irq_flags,
  74. "brcmf_oob_intr",
  75. &sdiodev->func[1]->dev);
  76. if (ret != 0) {
  77. brcmf_err("request_irq failed %d\n", ret);
  78. return ret;
  79. }
  80. sdiodev->oob_irq_requested = true;
  81. spin_lock_init(&sdiodev->irq_en_lock);
  82. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  83. sdiodev->irq_en = true;
  84. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  85. ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
  86. if (ret != 0) {
  87. brcmf_err("enable_irq_wake failed %d\n", ret);
  88. return ret;
  89. }
  90. sdiodev->irq_wake = true;
  91. sdio_claim_host(sdiodev->func[1]);
  92. /* must configure SDIO_CCCR_IENx to enable irq */
  93. data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  94. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  95. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  96. /* redirect, configure and enable io for interrupt signal */
  97. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  98. if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  99. data |= SDIO_SEPINT_ACT_HI;
  100. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  101. sdio_release_host(sdiodev->func[1]);
  102. } else {
  103. brcmf_dbg(SDIO, "Entering\n");
  104. sdio_claim_host(sdiodev->func[1]);
  105. sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
  106. sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
  107. sdio_release_host(sdiodev->func[1]);
  108. }
  109. return 0;
  110. }
  111. int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  112. {
  113. brcmf_dbg(SDIO, "Entering\n");
  114. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  115. sdio_claim_host(sdiodev->func[1]);
  116. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  117. brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  118. sdio_release_host(sdiodev->func[1]);
  119. if (sdiodev->oob_irq_requested) {
  120. sdiodev->oob_irq_requested = false;
  121. if (sdiodev->irq_wake) {
  122. disable_irq_wake(sdiodev->pdata->oob_irq_nr);
  123. sdiodev->irq_wake = false;
  124. }
  125. free_irq(sdiodev->pdata->oob_irq_nr,
  126. &sdiodev->func[1]->dev);
  127. sdiodev->irq_en = false;
  128. }
  129. } else {
  130. sdio_claim_host(sdiodev->func[1]);
  131. sdio_release_irq(sdiodev->func[2]);
  132. sdio_release_irq(sdiodev->func[1]);
  133. sdio_release_host(sdiodev->func[1]);
  134. }
  135. return 0;
  136. }
  137. static int
  138. brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  139. {
  140. int err = 0, i;
  141. u8 addr[3];
  142. s32 retry;
  143. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  144. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  145. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  146. for (i = 0; i < 3; i++) {
  147. retry = 0;
  148. do {
  149. if (retry)
  150. usleep_range(1000, 2000);
  151. err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
  152. SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
  153. &addr[i]);
  154. } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  155. if (err) {
  156. brcmf_err("failed at addr:0x%0x\n",
  157. SBSDIO_FUNC1_SBADDRLOW + i);
  158. break;
  159. }
  160. }
  161. return err;
  162. }
  163. static int
  164. brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  165. {
  166. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  167. int err = 0;
  168. if (bar0 != sdiodev->sbwad) {
  169. err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
  170. if (err)
  171. return err;
  172. sdiodev->sbwad = bar0;
  173. }
  174. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  175. if (width == 4)
  176. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  177. return 0;
  178. }
  179. int
  180. brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  181. void *data, bool write)
  182. {
  183. u8 func_num, reg_size;
  184. s32 retry = 0;
  185. int ret;
  186. /*
  187. * figure out how to read the register based on address range
  188. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  189. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  190. * The rest: function 1 silicon backplane core registers
  191. */
  192. if ((addr & ~REG_F0_REG_MASK) == 0) {
  193. func_num = SDIO_FUNC_0;
  194. reg_size = 1;
  195. } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
  196. func_num = SDIO_FUNC_1;
  197. reg_size = 1;
  198. } else {
  199. func_num = SDIO_FUNC_1;
  200. reg_size = 4;
  201. ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
  202. if (ret)
  203. goto done;
  204. }
  205. do {
  206. if (!write)
  207. memset(data, 0, reg_size);
  208. if (retry) /* wait for 1 ms till bus get settled down */
  209. usleep_range(1000, 2000);
  210. if (reg_size == 1)
  211. ret = brcmf_sdioh_request_byte(sdiodev, write,
  212. func_num, addr, data);
  213. else
  214. ret = brcmf_sdioh_request_word(sdiodev, write,
  215. func_num, addr, data, 4);
  216. } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  217. done:
  218. if (ret != 0)
  219. brcmf_err("failed with %d\n", ret);
  220. return ret;
  221. }
  222. u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  223. {
  224. u8 data;
  225. int retval;
  226. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  227. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
  228. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  229. if (ret)
  230. *ret = retval;
  231. return data;
  232. }
  233. u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  234. {
  235. u32 data;
  236. int retval;
  237. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  238. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
  239. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  240. if (ret)
  241. *ret = retval;
  242. return data;
  243. }
  244. void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  245. u8 data, int *ret)
  246. {
  247. int retval;
  248. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  249. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
  250. if (ret)
  251. *ret = retval;
  252. }
  253. void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  254. u32 data, int *ret)
  255. {
  256. int retval;
  257. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  258. retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
  259. if (ret)
  260. *ret = retval;
  261. }
  262. static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  263. bool write, u32 addr, struct sk_buff *pkt)
  264. {
  265. unsigned int req_sz;
  266. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  267. if (brcmf_pm_resume_error(sdiodev))
  268. return -EIO;
  269. /* Single skb use the standard mmc interface */
  270. req_sz = pkt->len + 3;
  271. req_sz &= (uint)~3;
  272. if (write)
  273. return sdio_memcpy_toio(sdiodev->func[fn], addr,
  274. ((u8 *)(pkt->data)),
  275. req_sz);
  276. else if (fn == 1)
  277. return sdio_memcpy_fromio(sdiodev->func[fn],
  278. ((u8 *)(pkt->data)),
  279. addr, req_sz);
  280. else
  281. /* function 2 read is FIFO operation */
  282. return sdio_readsb(sdiodev->func[fn],
  283. ((u8 *)(pkt->data)), addr,
  284. req_sz);
  285. }
  286. /**
  287. * brcmf_sdio_sglist_rw - SDIO interface function for block data access
  288. * @sdiodev: brcmfmac sdio device
  289. * @fn: SDIO function number
  290. * @write: direction flag
  291. * @addr: dongle memory address as source/destination
  292. * @pkt: skb pointer
  293. *
  294. * This function takes the respbonsibility as the interface function to MMC
  295. * stack for block data access. It assumes that the skb passed down by the
  296. * caller has already been padded and aligned.
  297. */
  298. static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  299. bool write, u32 addr,
  300. struct sk_buff_head *pktlist)
  301. {
  302. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  303. unsigned int max_req_sz, orig_offset, dst_offset;
  304. unsigned short max_seg_cnt, seg_sz;
  305. unsigned char *pkt_data, *orig_data, *dst_data;
  306. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  307. struct sk_buff_head local_list, *target_list;
  308. struct mmc_request mmc_req;
  309. struct mmc_command mmc_cmd;
  310. struct mmc_data mmc_dat;
  311. struct sg_table st;
  312. struct scatterlist *sgl;
  313. int ret = 0;
  314. if (!pktlist->qlen)
  315. return -EINVAL;
  316. brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  317. if (brcmf_pm_resume_error(sdiodev))
  318. return -EIO;
  319. target_list = pktlist;
  320. /* for host with broken sg support, prepare a page aligned list */
  321. __skb_queue_head_init(&local_list);
  322. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  323. req_sz = 0;
  324. skb_queue_walk(pktlist, pkt_next)
  325. req_sz += pkt_next->len;
  326. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  327. while (req_sz > PAGE_SIZE) {
  328. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  329. if (pkt_next == NULL) {
  330. ret = -ENOMEM;
  331. goto exit;
  332. }
  333. __skb_queue_tail(&local_list, pkt_next);
  334. req_sz -= PAGE_SIZE;
  335. }
  336. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  337. if (pkt_next == NULL) {
  338. ret = -ENOMEM;
  339. goto exit;
  340. }
  341. __skb_queue_tail(&local_list, pkt_next);
  342. target_list = &local_list;
  343. }
  344. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  345. max_req_sz = sdiodev->max_request_size;
  346. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  347. target_list->qlen);
  348. seg_sz = target_list->qlen;
  349. pkt_offset = 0;
  350. pkt_next = target_list->next;
  351. if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
  352. ret = -ENOMEM;
  353. goto exit;
  354. }
  355. memset(&mmc_req, 0, sizeof(struct mmc_request));
  356. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  357. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  358. mmc_dat.sg = st.sgl;
  359. mmc_dat.blksz = func_blk_sz;
  360. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  361. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  362. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  363. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  364. mmc_cmd.arg |= 1<<27; /* block mode */
  365. /* for function 1 the addr will be incremented */
  366. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  367. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  368. mmc_req.cmd = &mmc_cmd;
  369. mmc_req.data = &mmc_dat;
  370. while (seg_sz) {
  371. req_sz = 0;
  372. sg_cnt = 0;
  373. sgl = st.sgl;
  374. /* prep sg table */
  375. while (pkt_next != (struct sk_buff *)target_list) {
  376. pkt_data = pkt_next->data + pkt_offset;
  377. sg_data_sz = pkt_next->len - pkt_offset;
  378. if (sg_data_sz > sdiodev->max_segment_size)
  379. sg_data_sz = sdiodev->max_segment_size;
  380. if (sg_data_sz > max_req_sz - req_sz)
  381. sg_data_sz = max_req_sz - req_sz;
  382. sg_set_buf(sgl, pkt_data, sg_data_sz);
  383. sg_cnt++;
  384. sgl = sg_next(sgl);
  385. req_sz += sg_data_sz;
  386. pkt_offset += sg_data_sz;
  387. if (pkt_offset == pkt_next->len) {
  388. pkt_offset = 0;
  389. pkt_next = pkt_next->next;
  390. }
  391. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  392. break;
  393. }
  394. seg_sz -= sg_cnt;
  395. if (req_sz % func_blk_sz != 0) {
  396. brcmf_err("sg request length %u is not %u aligned\n",
  397. req_sz, func_blk_sz);
  398. ret = -ENOTBLK;
  399. goto exit;
  400. }
  401. mmc_dat.sg_len = sg_cnt;
  402. mmc_dat.blocks = req_sz / func_blk_sz;
  403. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  404. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  405. /* incrementing addr for function 1 */
  406. if (fn == 1)
  407. addr += req_sz;
  408. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  409. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  410. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  411. if (ret != 0) {
  412. brcmf_err("CMD53 sg block %s failed %d\n",
  413. write ? "write" : "read", ret);
  414. ret = -EIO;
  415. break;
  416. }
  417. }
  418. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  419. local_pkt_next = local_list.next;
  420. orig_offset = 0;
  421. skb_queue_walk(pktlist, pkt_next) {
  422. dst_offset = 0;
  423. do {
  424. req_sz = local_pkt_next->len - orig_offset;
  425. req_sz = min_t(uint, pkt_next->len - dst_offset,
  426. req_sz);
  427. orig_data = local_pkt_next->data + orig_offset;
  428. dst_data = pkt_next->data + dst_offset;
  429. memcpy(dst_data, orig_data, req_sz);
  430. orig_offset += req_sz;
  431. dst_offset += req_sz;
  432. if (orig_offset == local_pkt_next->len) {
  433. orig_offset = 0;
  434. local_pkt_next = local_pkt_next->next;
  435. }
  436. if (dst_offset == pkt_next->len)
  437. break;
  438. } while (!skb_queue_empty(&local_list));
  439. }
  440. }
  441. exit:
  442. sg_free_table(&st);
  443. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  444. brcmu_pkt_buf_free_skb(pkt_next);
  445. return ret;
  446. }
  447. int
  448. brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  449. uint flags, u8 *buf, uint nbytes)
  450. {
  451. struct sk_buff *mypkt;
  452. int err;
  453. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  454. if (!mypkt) {
  455. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  456. nbytes);
  457. return -EIO;
  458. }
  459. err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
  460. if (!err)
  461. memcpy(buf, mypkt->data, nbytes);
  462. brcmu_pkt_buf_free_skb(mypkt);
  463. return err;
  464. }
  465. int
  466. brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  467. uint flags, struct sk_buff *pkt)
  468. {
  469. uint width;
  470. int err = 0;
  471. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  472. fn, addr, pkt->len);
  473. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  474. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  475. if (err)
  476. goto done;
  477. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
  478. done:
  479. return err;
  480. }
  481. int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  482. uint flags, struct sk_buff_head *pktq, uint totlen)
  483. {
  484. struct sk_buff *glom_skb;
  485. struct sk_buff *skb;
  486. uint width;
  487. int err = 0;
  488. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  489. fn, addr, pktq->qlen);
  490. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  491. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  492. if (err)
  493. goto done;
  494. if (pktq->qlen == 1)
  495. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
  496. else if (!sdiodev->sg_support) {
  497. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  498. if (!glom_skb)
  499. return -ENOMEM;
  500. err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
  501. if (err)
  502. goto done;
  503. skb_queue_walk(pktq, skb) {
  504. memcpy(skb->data, glom_skb->data, skb->len);
  505. skb_pull(glom_skb, skb->len);
  506. }
  507. } else
  508. err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
  509. done:
  510. return err;
  511. }
  512. int
  513. brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  514. uint flags, u8 *buf, uint nbytes)
  515. {
  516. struct sk_buff *mypkt;
  517. uint width;
  518. int err;
  519. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  520. if (!mypkt) {
  521. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  522. nbytes);
  523. return -EIO;
  524. }
  525. memcpy(mypkt->data, buf, nbytes);
  526. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  527. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  528. if (!err)
  529. err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
  530. brcmu_pkt_buf_free_skb(mypkt);
  531. return err;
  532. }
  533. int
  534. brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  535. uint flags, struct sk_buff_head *pktq)
  536. {
  537. struct sk_buff *skb;
  538. uint width;
  539. int err;
  540. brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
  541. fn, addr, pktq->qlen);
  542. width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
  543. err = brcmf_sdio_addrprep(sdiodev, width, &addr);
  544. if (err)
  545. return err;
  546. if (pktq->qlen == 1 || !sdiodev->sg_support)
  547. skb_queue_walk(pktq, skb) {
  548. err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
  549. if (err)
  550. break;
  551. }
  552. else
  553. err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
  554. return err;
  555. }
  556. int
  557. brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  558. u8 *data, uint size)
  559. {
  560. int bcmerror = 0;
  561. struct sk_buff *pkt;
  562. u32 sdaddr;
  563. uint dsize;
  564. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  565. pkt = dev_alloc_skb(dsize);
  566. if (!pkt) {
  567. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  568. return -EIO;
  569. }
  570. pkt->priority = 0;
  571. /* Determine initial transfer parameters */
  572. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  573. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  574. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  575. else
  576. dsize = size;
  577. sdio_claim_host(sdiodev->func[1]);
  578. /* Do the transfer(s) */
  579. while (size) {
  580. /* Set the backplane window to include the start address */
  581. bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
  582. if (bcmerror)
  583. break;
  584. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  585. write ? "write" : "read", dsize,
  586. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  587. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  588. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  589. skb_put(pkt, dsize);
  590. if (write)
  591. memcpy(pkt->data, data, dsize);
  592. bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
  593. sdaddr, pkt);
  594. if (bcmerror) {
  595. brcmf_err("membytes transfer failed\n");
  596. break;
  597. }
  598. if (!write)
  599. memcpy(data, pkt->data, dsize);
  600. skb_trim(pkt, dsize);
  601. /* Adjust for next transfer (if any) */
  602. size -= dsize;
  603. if (size) {
  604. data += dsize;
  605. address += dsize;
  606. sdaddr = 0;
  607. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  608. }
  609. }
  610. dev_kfree_skb(pkt);
  611. /* Return the window to backplane enumeration space for core access */
  612. if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  613. brcmf_err("FAILED to set window back to 0x%x\n",
  614. sdiodev->sbwad);
  615. sdio_release_host(sdiodev->func[1]);
  616. return bcmerror;
  617. }
  618. int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  619. {
  620. char t_func = (char)fn;
  621. brcmf_dbg(SDIO, "Enter\n");
  622. /* issue abort cmd52 command through F0 */
  623. brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
  624. SDIO_CCCR_ABORT, &t_func);
  625. brcmf_dbg(SDIO, "Exit\n");
  626. return 0;
  627. }
  628. int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
  629. {
  630. u32 regs = 0;
  631. int ret = 0;
  632. ret = brcmf_sdioh_attach(sdiodev);
  633. if (ret)
  634. goto out;
  635. regs = SI_ENUM_BASE;
  636. /* try to attach to the target device */
  637. sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
  638. if (!sdiodev->bus) {
  639. brcmf_err("device attach failed\n");
  640. ret = -ENODEV;
  641. goto out;
  642. }
  643. out:
  644. if (ret)
  645. brcmf_sdio_remove(sdiodev);
  646. return ret;
  647. }
  648. int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
  649. {
  650. sdiodev->bus_if->state = BRCMF_BUS_DOWN;
  651. if (sdiodev->bus) {
  652. brcmf_sdbrcm_disconnect(sdiodev->bus);
  653. sdiodev->bus = NULL;
  654. }
  655. brcmf_sdioh_detach(sdiodev);
  656. sdiodev->sbwad = 0;
  657. return 0;
  658. }
  659. void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
  660. {
  661. if (enable)
  662. brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
  663. else
  664. brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
  665. }