bcmsdh.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/mmc/sdio.h>
  25. #include <linux/mmc/core.h>
  26. #include <linux/mmc/sdio_func.h>
  27. #include <linux/mmc/sdio_ids.h>
  28. #include <linux/mmc/card.h>
  29. #include <linux/mmc/host.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/platform_data/brcmfmac-sdio.h>
  32. #include <linux/suspend.h>
  33. #include <linux/errno.h>
  34. #include <linux/module.h>
  35. #include <net/cfg80211.h>
  36. #include <defs.h>
  37. #include <brcm_hw_ids.h>
  38. #include <brcmu_utils.h>
  39. #include <brcmu_wifi.h>
  40. #include <soc.h>
  41. #include "dhd_bus.h"
  42. #include "dhd_dbg.h"
  43. #include "sdio_host.h"
  44. #include "sdio_chip.h"
  45. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  46. #define DMA_ALIGN_MASK 0x03
  47. #define SDIO_FUNC1_BLOCKSIZE 64
  48. #define SDIO_FUNC2_BLOCKSIZE 512
  49. /* Maximum milliseconds to wait for F2 to come up */
  50. #define SDIO_WAIT_F2RDY 3000
  51. static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
  52. {
  53. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  54. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  55. brcmf_dbg(INTR, "OOB intr triggered\n");
  56. /* out-of-band interrupt is level-triggered which won't
  57. * be cleared until dpc
  58. */
  59. if (sdiodev->irq_en) {
  60. disable_irq_nosync(irq);
  61. sdiodev->irq_en = false;
  62. }
  63. brcmf_sdio_isr(sdiodev->bus);
  64. return IRQ_HANDLED;
  65. }
  66. static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
  67. {
  68. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  69. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  70. brcmf_dbg(INTR, "IB intr triggered\n");
  71. brcmf_sdio_isr(sdiodev->bus);
  72. }
  73. /* dummy handler for SDIO function 2 interrupt */
  74. static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
  75. {
  76. }
  77. static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
  78. {
  79. bool is_err = false;
  80. #ifdef CONFIG_PM_SLEEP
  81. is_err = atomic_read(&sdiodev->suspend);
  82. #endif
  83. return is_err;
  84. }
  85. static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
  86. wait_queue_head_t *wq)
  87. {
  88. #ifdef CONFIG_PM_SLEEP
  89. int retry = 0;
  90. while (atomic_read(&sdiodev->suspend) && retry++ != 30)
  91. wait_event_timeout(*wq, false, HZ/100);
  92. #endif
  93. }
  94. int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
  95. {
  96. int ret = 0;
  97. u8 data;
  98. unsigned long flags;
  99. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  100. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  101. sdiodev->pdata->oob_irq_nr);
  102. ret = request_irq(sdiodev->pdata->oob_irq_nr,
  103. brcmf_sdiod_oob_irqhandler,
  104. sdiodev->pdata->oob_irq_flags,
  105. "brcmf_oob_intr",
  106. &sdiodev->func[1]->dev);
  107. if (ret != 0) {
  108. brcmf_err("request_irq failed %d\n", ret);
  109. return ret;
  110. }
  111. sdiodev->oob_irq_requested = true;
  112. spin_lock_init(&sdiodev->irq_en_lock);
  113. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  114. sdiodev->irq_en = true;
  115. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  116. ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
  117. if (ret != 0) {
  118. brcmf_err("enable_irq_wake failed %d\n", ret);
  119. return ret;
  120. }
  121. sdiodev->irq_wake = true;
  122. sdio_claim_host(sdiodev->func[1]);
  123. /* must configure SDIO_CCCR_IENx to enable irq */
  124. data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  125. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  126. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  127. /* redirect, configure and enable io for interrupt signal */
  128. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  129. if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  130. data |= SDIO_SEPINT_ACT_HI;
  131. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  132. sdio_release_host(sdiodev->func[1]);
  133. } else {
  134. brcmf_dbg(SDIO, "Entering\n");
  135. sdio_claim_host(sdiodev->func[1]);
  136. sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
  137. sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
  138. sdio_release_host(sdiodev->func[1]);
  139. }
  140. return 0;
  141. }
  142. int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  143. {
  144. brcmf_dbg(SDIO, "Entering\n");
  145. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  146. sdio_claim_host(sdiodev->func[1]);
  147. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  148. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  149. sdio_release_host(sdiodev->func[1]);
  150. if (sdiodev->oob_irq_requested) {
  151. sdiodev->oob_irq_requested = false;
  152. if (sdiodev->irq_wake) {
  153. disable_irq_wake(sdiodev->pdata->oob_irq_nr);
  154. sdiodev->irq_wake = false;
  155. }
  156. free_irq(sdiodev->pdata->oob_irq_nr,
  157. &sdiodev->func[1]->dev);
  158. sdiodev->irq_en = false;
  159. }
  160. } else {
  161. sdio_claim_host(sdiodev->func[1]);
  162. sdio_release_irq(sdiodev->func[2]);
  163. sdio_release_irq(sdiodev->func[1]);
  164. sdio_release_host(sdiodev->func[1]);
  165. }
  166. return 0;
  167. }
  168. static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
  169. uint regaddr, u8 byte)
  170. {
  171. int err_ret;
  172. /*
  173. * Can only directly write to some F0 registers.
  174. * Handle CCCR_IENx and CCCR_ABORT command
  175. * as a special case.
  176. */
  177. if ((regaddr == SDIO_CCCR_ABORT) ||
  178. (regaddr == SDIO_CCCR_IENx))
  179. sdio_writeb(func, byte, regaddr, &err_ret);
  180. else
  181. sdio_f0_writeb(func, byte, regaddr, &err_ret);
  182. return err_ret;
  183. }
  184. static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
  185. u32 addr, u8 regsz, void *data, bool write)
  186. {
  187. struct sdio_func *func;
  188. int ret;
  189. brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  190. write, fn, addr, regsz);
  191. brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
  192. if (brcmf_sdiod_pm_resume_error(sdiodev))
  193. return -EIO;
  194. /* only allow byte access on F0 */
  195. if (WARN_ON(regsz > 1 && !fn))
  196. return -EINVAL;
  197. func = sdiodev->func[fn];
  198. switch (regsz) {
  199. case sizeof(u8):
  200. if (write) {
  201. if (fn)
  202. sdio_writeb(func, *(u8 *)data, addr, &ret);
  203. else
  204. ret = brcmf_sdiod_f0_writeb(func, addr,
  205. *(u8 *)data);
  206. } else {
  207. if (fn)
  208. *(u8 *)data = sdio_readb(func, addr, &ret);
  209. else
  210. *(u8 *)data = sdio_f0_readb(func, addr, &ret);
  211. }
  212. break;
  213. case sizeof(u16):
  214. if (write)
  215. sdio_writew(func, *(u16 *)data, addr, &ret);
  216. else
  217. *(u16 *)data = sdio_readw(func, addr, &ret);
  218. break;
  219. case sizeof(u32):
  220. if (write)
  221. sdio_writel(func, *(u32 *)data, addr, &ret);
  222. else
  223. *(u32 *)data = sdio_readl(func, addr, &ret);
  224. break;
  225. default:
  226. brcmf_err("invalid size: %d\n", regsz);
  227. break;
  228. }
  229. if (ret) {
  230. /*
  231. * SleepCSR register access can fail when
  232. * waking up the device so reduce this noise
  233. * in the logs.
  234. */
  235. if (addr != SBSDIO_FUNC1_SLEEPCSR)
  236. brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
  237. write ? "write" : "read", fn, addr, ret);
  238. else
  239. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  240. write ? "write" : "read", fn, addr, ret);
  241. }
  242. return ret;
  243. }
  244. static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  245. u8 regsz, void *data, bool write)
  246. {
  247. u8 func_num;
  248. s32 retry = 0;
  249. int ret;
  250. if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
  251. return -ENOMEDIUM;
  252. /*
  253. * figure out how to read the register based on address range
  254. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  255. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  256. * The rest: function 1 silicon backplane core registers
  257. */
  258. if ((addr & ~REG_F0_REG_MASK) == 0)
  259. func_num = SDIO_FUNC_0;
  260. else
  261. func_num = SDIO_FUNC_1;
  262. do {
  263. if (!write)
  264. memset(data, 0, regsz);
  265. /* for retry wait for 1 ms till bus get settled down */
  266. if (retry)
  267. usleep_range(1000, 2000);
  268. ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
  269. data, write);
  270. } while (ret != 0 && ret != -ENOMEDIUM &&
  271. retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  272. if (ret == -ENOMEDIUM)
  273. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
  274. else if (ret != 0)
  275. brcmf_err("failed with %d\n", ret);
  276. return ret;
  277. }
  278. static int
  279. brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  280. {
  281. int err = 0, i;
  282. u8 addr[3];
  283. if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
  284. return -ENOMEDIUM;
  285. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  286. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  287. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  288. for (i = 0; i < 3; i++) {
  289. err = brcmf_sdiod_regrw_helper(sdiodev,
  290. SBSDIO_FUNC1_SBADDRLOW + i,
  291. sizeof(u8), &addr[i], true);
  292. if (err) {
  293. brcmf_err("failed at addr: 0x%0x\n",
  294. SBSDIO_FUNC1_SBADDRLOW + i);
  295. break;
  296. }
  297. }
  298. return err;
  299. }
  300. static int
  301. brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  302. {
  303. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  304. int err = 0;
  305. if (bar0 != sdiodev->sbwad) {
  306. err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
  307. if (err)
  308. return err;
  309. sdiodev->sbwad = bar0;
  310. }
  311. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  312. if (width == 4)
  313. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  314. return 0;
  315. }
  316. u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  317. {
  318. u8 data;
  319. int retval;
  320. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  321. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  322. false);
  323. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  324. if (ret)
  325. *ret = retval;
  326. return data;
  327. }
  328. u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  329. {
  330. u32 data;
  331. int retval;
  332. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  333. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  334. if (retval)
  335. goto done;
  336. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  337. false);
  338. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  339. done:
  340. if (ret)
  341. *ret = retval;
  342. return data;
  343. }
  344. void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  345. u8 data, int *ret)
  346. {
  347. int retval;
  348. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  349. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  350. true);
  351. if (ret)
  352. *ret = retval;
  353. }
  354. void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  355. u32 data, int *ret)
  356. {
  357. int retval;
  358. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  359. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  360. if (retval)
  361. goto done;
  362. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  363. true);
  364. done:
  365. if (ret)
  366. *ret = retval;
  367. }
  368. static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  369. bool write, u32 addr, struct sk_buff *pkt)
  370. {
  371. unsigned int req_sz;
  372. int err;
  373. brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  374. if (brcmf_sdiod_pm_resume_error(sdiodev))
  375. return -EIO;
  376. /* Single skb use the standard mmc interface */
  377. req_sz = pkt->len + 3;
  378. req_sz &= (uint)~3;
  379. if (write)
  380. err = sdio_memcpy_toio(sdiodev->func[fn], addr,
  381. ((u8 *)(pkt->data)), req_sz);
  382. else if (fn == 1)
  383. err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
  384. addr, req_sz);
  385. else
  386. /* function 2 read is FIFO operation */
  387. err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
  388. req_sz);
  389. if (err == -ENOMEDIUM)
  390. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
  391. return err;
  392. }
  393. /**
  394. * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  395. * @sdiodev: brcmfmac sdio device
  396. * @fn: SDIO function number
  397. * @write: direction flag
  398. * @addr: dongle memory address as source/destination
  399. * @pkt: skb pointer
  400. *
  401. * This function takes the respbonsibility as the interface function to MMC
  402. * stack for block data access. It assumes that the skb passed down by the
  403. * caller has already been padded and aligned.
  404. */
  405. static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  406. bool write, u32 addr,
  407. struct sk_buff_head *pktlist)
  408. {
  409. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  410. unsigned int max_req_sz, orig_offset, dst_offset;
  411. unsigned short max_seg_cnt, seg_sz;
  412. unsigned char *pkt_data, *orig_data, *dst_data;
  413. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  414. struct sk_buff_head local_list, *target_list;
  415. struct mmc_request mmc_req;
  416. struct mmc_command mmc_cmd;
  417. struct mmc_data mmc_dat;
  418. struct sg_table st;
  419. struct scatterlist *sgl;
  420. int ret = 0;
  421. if (!pktlist->qlen)
  422. return -EINVAL;
  423. brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  424. if (brcmf_sdiod_pm_resume_error(sdiodev))
  425. return -EIO;
  426. target_list = pktlist;
  427. /* for host with broken sg support, prepare a page aligned list */
  428. __skb_queue_head_init(&local_list);
  429. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  430. req_sz = 0;
  431. skb_queue_walk(pktlist, pkt_next)
  432. req_sz += pkt_next->len;
  433. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  434. while (req_sz > PAGE_SIZE) {
  435. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  436. if (pkt_next == NULL) {
  437. ret = -ENOMEM;
  438. goto exit;
  439. }
  440. __skb_queue_tail(&local_list, pkt_next);
  441. req_sz -= PAGE_SIZE;
  442. }
  443. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  444. if (pkt_next == NULL) {
  445. ret = -ENOMEM;
  446. goto exit;
  447. }
  448. __skb_queue_tail(&local_list, pkt_next);
  449. target_list = &local_list;
  450. }
  451. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  452. max_req_sz = sdiodev->max_request_size;
  453. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  454. target_list->qlen);
  455. seg_sz = target_list->qlen;
  456. pkt_offset = 0;
  457. pkt_next = target_list->next;
  458. if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
  459. ret = -ENOMEM;
  460. goto exit;
  461. }
  462. memset(&mmc_req, 0, sizeof(struct mmc_request));
  463. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  464. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  465. mmc_dat.sg = st.sgl;
  466. mmc_dat.blksz = func_blk_sz;
  467. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  468. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  469. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  470. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  471. mmc_cmd.arg |= 1<<27; /* block mode */
  472. /* for function 1 the addr will be incremented */
  473. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  474. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  475. mmc_req.cmd = &mmc_cmd;
  476. mmc_req.data = &mmc_dat;
  477. while (seg_sz) {
  478. req_sz = 0;
  479. sg_cnt = 0;
  480. sgl = st.sgl;
  481. /* prep sg table */
  482. while (pkt_next != (struct sk_buff *)target_list) {
  483. pkt_data = pkt_next->data + pkt_offset;
  484. sg_data_sz = pkt_next->len - pkt_offset;
  485. if (sg_data_sz > sdiodev->max_segment_size)
  486. sg_data_sz = sdiodev->max_segment_size;
  487. if (sg_data_sz > max_req_sz - req_sz)
  488. sg_data_sz = max_req_sz - req_sz;
  489. sg_set_buf(sgl, pkt_data, sg_data_sz);
  490. sg_cnt++;
  491. sgl = sg_next(sgl);
  492. req_sz += sg_data_sz;
  493. pkt_offset += sg_data_sz;
  494. if (pkt_offset == pkt_next->len) {
  495. pkt_offset = 0;
  496. pkt_next = pkt_next->next;
  497. }
  498. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  499. break;
  500. }
  501. seg_sz -= sg_cnt;
  502. if (req_sz % func_blk_sz != 0) {
  503. brcmf_err("sg request length %u is not %u aligned\n",
  504. req_sz, func_blk_sz);
  505. ret = -ENOTBLK;
  506. goto exit;
  507. }
  508. mmc_dat.sg_len = sg_cnt;
  509. mmc_dat.blocks = req_sz / func_blk_sz;
  510. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  511. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  512. /* incrementing addr for function 1 */
  513. if (fn == 1)
  514. addr += req_sz;
  515. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  516. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  517. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  518. if (ret == -ENOMEDIUM) {
  519. brcmf_bus_change_state(sdiodev->bus_if,
  520. BRCMF_BUS_NOMEDIUM);
  521. break;
  522. } else if (ret != 0) {
  523. brcmf_err("CMD53 sg block %s failed %d\n",
  524. write ? "write" : "read", ret);
  525. ret = -EIO;
  526. break;
  527. }
  528. }
  529. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  530. local_pkt_next = local_list.next;
  531. orig_offset = 0;
  532. skb_queue_walk(pktlist, pkt_next) {
  533. dst_offset = 0;
  534. do {
  535. req_sz = local_pkt_next->len - orig_offset;
  536. req_sz = min_t(uint, pkt_next->len - dst_offset,
  537. req_sz);
  538. orig_data = local_pkt_next->data + orig_offset;
  539. dst_data = pkt_next->data + dst_offset;
  540. memcpy(dst_data, orig_data, req_sz);
  541. orig_offset += req_sz;
  542. dst_offset += req_sz;
  543. if (orig_offset == local_pkt_next->len) {
  544. orig_offset = 0;
  545. local_pkt_next = local_pkt_next->next;
  546. }
  547. if (dst_offset == pkt_next->len)
  548. break;
  549. } while (!skb_queue_empty(&local_list));
  550. }
  551. }
  552. exit:
  553. sg_free_table(&st);
  554. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  555. brcmu_pkt_buf_free_skb(pkt_next);
  556. return ret;
  557. }
  558. int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  559. {
  560. struct sk_buff *mypkt;
  561. int err;
  562. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  563. if (!mypkt) {
  564. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  565. nbytes);
  566. return -EIO;
  567. }
  568. err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
  569. if (!err)
  570. memcpy(buf, mypkt->data, nbytes);
  571. brcmu_pkt_buf_free_skb(mypkt);
  572. return err;
  573. }
  574. int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
  575. {
  576. u32 addr = sdiodev->sbwad;
  577. int err = 0;
  578. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
  579. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  580. if (err)
  581. goto done;
  582. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
  583. done:
  584. return err;
  585. }
  586. int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
  587. struct sk_buff_head *pktq, uint totlen)
  588. {
  589. struct sk_buff *glom_skb;
  590. struct sk_buff *skb;
  591. u32 addr = sdiodev->sbwad;
  592. int err = 0;
  593. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
  594. addr, pktq->qlen);
  595. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  596. if (err)
  597. goto done;
  598. if (pktq->qlen == 1)
  599. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  600. pktq->next);
  601. else if (!sdiodev->sg_support) {
  602. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  603. if (!glom_skb)
  604. return -ENOMEM;
  605. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  606. glom_skb);
  607. if (err)
  608. goto done;
  609. skb_queue_walk(pktq, skb) {
  610. memcpy(skb->data, glom_skb->data, skb->len);
  611. skb_pull(glom_skb, skb->len);
  612. }
  613. } else
  614. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
  615. pktq);
  616. done:
  617. return err;
  618. }
  619. int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  620. {
  621. struct sk_buff *mypkt;
  622. u32 addr = sdiodev->sbwad;
  623. int err;
  624. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  625. if (!mypkt) {
  626. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  627. nbytes);
  628. return -EIO;
  629. }
  630. memcpy(mypkt->data, buf, nbytes);
  631. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  632. if (!err)
  633. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
  634. mypkt);
  635. brcmu_pkt_buf_free_skb(mypkt);
  636. return err;
  637. }
  638. int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
  639. struct sk_buff_head *pktq)
  640. {
  641. struct sk_buff *skb;
  642. u32 addr = sdiodev->sbwad;
  643. int err;
  644. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
  645. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  646. if (err)
  647. return err;
  648. if (pktq->qlen == 1 || !sdiodev->sg_support)
  649. skb_queue_walk(pktq, skb) {
  650. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
  651. addr, skb);
  652. if (err)
  653. break;
  654. }
  655. else
  656. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
  657. pktq);
  658. return err;
  659. }
  660. int
  661. brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  662. u8 *data, uint size)
  663. {
  664. int bcmerror = 0;
  665. struct sk_buff *pkt;
  666. u32 sdaddr;
  667. uint dsize;
  668. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  669. pkt = dev_alloc_skb(dsize);
  670. if (!pkt) {
  671. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  672. return -EIO;
  673. }
  674. pkt->priority = 0;
  675. /* Determine initial transfer parameters */
  676. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  677. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  678. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  679. else
  680. dsize = size;
  681. sdio_claim_host(sdiodev->func[1]);
  682. /* Do the transfer(s) */
  683. while (size) {
  684. /* Set the backplane window to include the start address */
  685. bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
  686. if (bcmerror)
  687. break;
  688. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  689. write ? "write" : "read", dsize,
  690. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  691. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  692. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  693. skb_put(pkt, dsize);
  694. if (write)
  695. memcpy(pkt->data, data, dsize);
  696. bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
  697. sdaddr, pkt);
  698. if (bcmerror) {
  699. brcmf_err("membytes transfer failed\n");
  700. break;
  701. }
  702. if (!write)
  703. memcpy(data, pkt->data, dsize);
  704. skb_trim(pkt, dsize);
  705. /* Adjust for next transfer (if any) */
  706. size -= dsize;
  707. if (size) {
  708. data += dsize;
  709. address += dsize;
  710. sdaddr = 0;
  711. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  712. }
  713. }
  714. dev_kfree_skb(pkt);
  715. /* Return the window to backplane enumeration space for core access */
  716. if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  717. brcmf_err("FAILED to set window back to 0x%x\n",
  718. sdiodev->sbwad);
  719. sdio_release_host(sdiodev->func[1]);
  720. return bcmerror;
  721. }
  722. int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  723. {
  724. char t_func = (char)fn;
  725. brcmf_dbg(SDIO, "Enter\n");
  726. /* issue abort cmd52 command through F0 */
  727. brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
  728. sizeof(t_func), &t_func, true);
  729. brcmf_dbg(SDIO, "Exit\n");
  730. return 0;
  731. }
  732. static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
  733. {
  734. if (sdiodev->bus) {
  735. brcmf_sdio_remove(sdiodev->bus);
  736. sdiodev->bus = NULL;
  737. }
  738. /* Disable Function 2 */
  739. sdio_claim_host(sdiodev->func[2]);
  740. sdio_disable_func(sdiodev->func[2]);
  741. sdio_release_host(sdiodev->func[2]);
  742. /* Disable Function 1 */
  743. sdio_claim_host(sdiodev->func[1]);
  744. sdio_disable_func(sdiodev->func[1]);
  745. sdio_release_host(sdiodev->func[1]);
  746. sdiodev->sbwad = 0;
  747. return 0;
  748. }
  749. static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
  750. {
  751. struct sdio_func *func;
  752. struct mmc_host *host;
  753. uint max_blocks;
  754. int ret = 0;
  755. sdiodev->num_funcs = 2;
  756. sdio_claim_host(sdiodev->func[1]);
  757. ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
  758. if (ret) {
  759. brcmf_err("Failed to set F1 blocksize\n");
  760. sdio_release_host(sdiodev->func[1]);
  761. goto out;
  762. }
  763. ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
  764. if (ret) {
  765. brcmf_err("Failed to set F2 blocksize\n");
  766. sdio_release_host(sdiodev->func[1]);
  767. goto out;
  768. }
  769. /* increase F2 timeout */
  770. sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
  771. /* Enable Function 1 */
  772. ret = sdio_enable_func(sdiodev->func[1]);
  773. sdio_release_host(sdiodev->func[1]);
  774. if (ret) {
  775. brcmf_err("Failed to enable F1: err=%d\n", ret);
  776. goto out;
  777. }
  778. /*
  779. * determine host related variables after brcmf_sdiod_probe()
  780. * as func->cur_blksize is properly set and F2 init has been
  781. * completed successfully.
  782. */
  783. func = sdiodev->func[2];
  784. host = func->card->host;
  785. sdiodev->sg_support = host->max_segs > 1;
  786. max_blocks = min_t(uint, host->max_blk_count, 511u);
  787. sdiodev->max_request_size = min_t(uint, host->max_req_size,
  788. max_blocks * func->cur_blksize);
  789. sdiodev->max_segment_count = min_t(uint, host->max_segs,
  790. SG_MAX_SINGLE_ALLOC);
  791. sdiodev->max_segment_size = host->max_seg_size;
  792. /* try to attach to the target device */
  793. sdiodev->bus = brcmf_sdio_probe(sdiodev);
  794. if (!sdiodev->bus) {
  795. ret = -ENODEV;
  796. goto out;
  797. }
  798. out:
  799. if (ret)
  800. brcmf_sdiod_remove(sdiodev);
  801. return ret;
  802. }
  803. /* devices we support, null terminated */
  804. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  805. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
  806. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
  807. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
  808. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
  809. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
  810. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
  811. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
  812. SDIO_DEVICE_ID_BROADCOM_4335_4339)},
  813. { /* end: all zeroes */ },
  814. };
  815. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  816. static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
  817. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  818. const struct sdio_device_id *id)
  819. {
  820. int err;
  821. struct brcmf_sdio_dev *sdiodev;
  822. struct brcmf_bus *bus_if;
  823. brcmf_dbg(SDIO, "Enter\n");
  824. brcmf_dbg(SDIO, "Class=%x\n", func->class);
  825. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  826. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  827. brcmf_dbg(SDIO, "Function#: %d\n", func->num);
  828. /* Consume func num 1 but dont do anything with it. */
  829. if (func->num == 1)
  830. return 0;
  831. /* Ignore anything but func 2 */
  832. if (func->num != 2)
  833. return -ENODEV;
  834. bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
  835. if (!bus_if)
  836. return -ENOMEM;
  837. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  838. if (!sdiodev) {
  839. kfree(bus_if);
  840. return -ENOMEM;
  841. }
  842. /* store refs to functions used. mmc_card does
  843. * not hold the F0 function pointer.
  844. */
  845. sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
  846. sdiodev->func[0]->num = 0;
  847. sdiodev->func[1] = func->card->sdio_func[0];
  848. sdiodev->func[2] = func;
  849. sdiodev->bus_if = bus_if;
  850. bus_if->bus_priv.sdio = sdiodev;
  851. bus_if->proto_type = BRCMF_PROTO_BCDC;
  852. dev_set_drvdata(&func->dev, bus_if);
  853. dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
  854. sdiodev->dev = &sdiodev->func[1]->dev;
  855. sdiodev->pdata = brcmfmac_sdio_pdata;
  856. atomic_set(&sdiodev->suspend, false);
  857. init_waitqueue_head(&sdiodev->request_word_wait);
  858. init_waitqueue_head(&sdiodev->request_buffer_wait);
  859. brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
  860. err = brcmf_sdiod_probe(sdiodev);
  861. if (err) {
  862. brcmf_err("F2 error, probe failed %d...\n", err);
  863. goto fail;
  864. }
  865. brcmf_dbg(SDIO, "F2 init completed...\n");
  866. return 0;
  867. fail:
  868. dev_set_drvdata(&func->dev, NULL);
  869. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  870. kfree(sdiodev->func[0]);
  871. kfree(sdiodev);
  872. kfree(bus_if);
  873. return err;
  874. }
  875. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  876. {
  877. struct brcmf_bus *bus_if;
  878. struct brcmf_sdio_dev *sdiodev;
  879. brcmf_dbg(SDIO, "Enter\n");
  880. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  881. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  882. brcmf_dbg(SDIO, "Function: %d\n", func->num);
  883. if (func->num != 1 && func->num != 2)
  884. return;
  885. bus_if = dev_get_drvdata(&func->dev);
  886. if (bus_if) {
  887. sdiodev = bus_if->bus_priv.sdio;
  888. brcmf_sdiod_remove(sdiodev);
  889. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  890. dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
  891. kfree(bus_if);
  892. kfree(sdiodev->func[0]);
  893. kfree(sdiodev);
  894. }
  895. brcmf_dbg(SDIO, "Exit\n");
  896. }
  897. #ifdef CONFIG_PM_SLEEP
  898. static int brcmf_ops_sdio_suspend(struct device *dev)
  899. {
  900. mmc_pm_flag_t sdio_flags;
  901. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  902. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  903. int ret = 0;
  904. brcmf_dbg(SDIO, "\n");
  905. atomic_set(&sdiodev->suspend, true);
  906. sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
  907. if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
  908. brcmf_err("Host can't keep power while suspended\n");
  909. return -EINVAL;
  910. }
  911. ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
  912. if (ret) {
  913. brcmf_err("Failed to set pm_flags\n");
  914. return ret;
  915. }
  916. brcmf_sdio_wd_timer(sdiodev->bus, 0);
  917. return ret;
  918. }
  919. static int brcmf_ops_sdio_resume(struct device *dev)
  920. {
  921. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  922. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  923. brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
  924. atomic_set(&sdiodev->suspend, false);
  925. return 0;
  926. }
  927. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  928. .suspend = brcmf_ops_sdio_suspend,
  929. .resume = brcmf_ops_sdio_resume,
  930. };
  931. #endif /* CONFIG_PM_SLEEP */
  932. static struct sdio_driver brcmf_sdmmc_driver = {
  933. .probe = brcmf_ops_sdio_probe,
  934. .remove = brcmf_ops_sdio_remove,
  935. .name = BRCMFMAC_SDIO_PDATA_NAME,
  936. .id_table = brcmf_sdmmc_ids,
  937. #ifdef CONFIG_PM_SLEEP
  938. .drv = {
  939. .pm = &brcmf_sdio_pm_ops,
  940. },
  941. #endif /* CONFIG_PM_SLEEP */
  942. };
  943. static int brcmf_sdio_pd_probe(struct platform_device *pdev)
  944. {
  945. brcmf_dbg(SDIO, "Enter\n");
  946. brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
  947. if (brcmfmac_sdio_pdata->power_on)
  948. brcmfmac_sdio_pdata->power_on();
  949. return 0;
  950. }
  951. static int brcmf_sdio_pd_remove(struct platform_device *pdev)
  952. {
  953. brcmf_dbg(SDIO, "Enter\n");
  954. if (brcmfmac_sdio_pdata->power_off)
  955. brcmfmac_sdio_pdata->power_off();
  956. sdio_unregister_driver(&brcmf_sdmmc_driver);
  957. return 0;
  958. }
  959. static struct platform_driver brcmf_sdio_pd = {
  960. .remove = brcmf_sdio_pd_remove,
  961. .driver = {
  962. .name = BRCMFMAC_SDIO_PDATA_NAME,
  963. .owner = THIS_MODULE,
  964. }
  965. };
  966. void brcmf_sdio_register(void)
  967. {
  968. int ret;
  969. ret = sdio_register_driver(&brcmf_sdmmc_driver);
  970. if (ret)
  971. brcmf_err("sdio_register_driver failed: %d\n", ret);
  972. }
  973. void brcmf_sdio_exit(void)
  974. {
  975. brcmf_dbg(SDIO, "Enter\n");
  976. if (brcmfmac_sdio_pdata)
  977. platform_driver_unregister(&brcmf_sdio_pd);
  978. else
  979. sdio_unregister_driver(&brcmf_sdmmc_driver);
  980. }
  981. void __init brcmf_sdio_init(void)
  982. {
  983. int ret;
  984. brcmf_dbg(SDIO, "Enter\n");
  985. ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
  986. if (ret == -ENODEV)
  987. brcmf_dbg(SDIO, "No platform data available.\n");
  988. }