bcmsdh.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/mmc/sdio.h>
  25. #include <linux/mmc/core.h>
  26. #include <linux/mmc/sdio_func.h>
  27. #include <linux/mmc/sdio_ids.h>
  28. #include <linux/mmc/card.h>
  29. #include <linux/mmc/host.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/platform_data/brcmfmac-sdio.h>
  32. #include <linux/suspend.h>
  33. #include <linux/errno.h>
  34. #include <linux/module.h>
  35. #include <net/cfg80211.h>
  36. #include <defs.h>
  37. #include <brcm_hw_ids.h>
  38. #include <brcmu_utils.h>
  39. #include <brcmu_wifi.h>
  40. #include <soc.h>
  41. #include "dhd_bus.h"
  42. #include "dhd_dbg.h"
  43. #include "sdio_host.h"
  44. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  45. #define DMA_ALIGN_MASK 0x03
  46. #define SDIO_FUNC1_BLOCKSIZE 64
  47. #define SDIO_FUNC2_BLOCKSIZE 512
  48. /* Maximum milliseconds to wait for F2 to come up */
  49. #define SDIO_WAIT_F2RDY 3000
  50. #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
  51. #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
  52. static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
  53. module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
  54. MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
  55. static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
  56. {
  57. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  58. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  59. brcmf_dbg(INTR, "OOB intr triggered\n");
  60. /* out-of-band interrupt is level-triggered which won't
  61. * be cleared until dpc
  62. */
  63. if (sdiodev->irq_en) {
  64. disable_irq_nosync(irq);
  65. sdiodev->irq_en = false;
  66. }
  67. brcmf_sdio_isr(sdiodev->bus);
  68. return IRQ_HANDLED;
  69. }
  70. static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
  71. {
  72. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  73. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  74. brcmf_dbg(INTR, "IB intr triggered\n");
  75. brcmf_sdio_isr(sdiodev->bus);
  76. }
  77. /* dummy handler for SDIO function 2 interrupt */
  78. static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
  79. {
  80. }
  81. static bool brcmf_sdiod_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
  82. {
  83. bool is_err = false;
  84. #ifdef CONFIG_PM_SLEEP
  85. is_err = atomic_read(&sdiodev->suspend);
  86. #endif
  87. return is_err;
  88. }
  89. static void brcmf_sdiod_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
  90. wait_queue_head_t *wq)
  91. {
  92. #ifdef CONFIG_PM_SLEEP
  93. int retry = 0;
  94. while (atomic_read(&sdiodev->suspend) && retry++ != 30)
  95. wait_event_timeout(*wq, false, HZ/100);
  96. #endif
  97. }
  98. int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
  99. {
  100. int ret = 0;
  101. u8 data;
  102. unsigned long flags;
  103. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  104. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  105. sdiodev->pdata->oob_irq_nr);
  106. ret = request_irq(sdiodev->pdata->oob_irq_nr,
  107. brcmf_sdiod_oob_irqhandler,
  108. sdiodev->pdata->oob_irq_flags,
  109. "brcmf_oob_intr",
  110. &sdiodev->func[1]->dev);
  111. if (ret != 0) {
  112. brcmf_err("request_irq failed %d\n", ret);
  113. return ret;
  114. }
  115. sdiodev->oob_irq_requested = true;
  116. spin_lock_init(&sdiodev->irq_en_lock);
  117. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  118. sdiodev->irq_en = true;
  119. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  120. ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
  121. if (ret != 0) {
  122. brcmf_err("enable_irq_wake failed %d\n", ret);
  123. return ret;
  124. }
  125. sdiodev->irq_wake = true;
  126. sdio_claim_host(sdiodev->func[1]);
  127. /* must configure SDIO_CCCR_IENx to enable irq */
  128. data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  129. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  130. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  131. /* redirect, configure and enable io for interrupt signal */
  132. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  133. if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  134. data |= SDIO_SEPINT_ACT_HI;
  135. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  136. sdio_release_host(sdiodev->func[1]);
  137. } else {
  138. brcmf_dbg(SDIO, "Entering\n");
  139. sdio_claim_host(sdiodev->func[1]);
  140. sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
  141. sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
  142. sdio_release_host(sdiodev->func[1]);
  143. }
  144. return 0;
  145. }
  146. int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  147. {
  148. brcmf_dbg(SDIO, "Entering\n");
  149. if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
  150. sdio_claim_host(sdiodev->func[1]);
  151. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  152. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  153. sdio_release_host(sdiodev->func[1]);
  154. if (sdiodev->oob_irq_requested) {
  155. sdiodev->oob_irq_requested = false;
  156. if (sdiodev->irq_wake) {
  157. disable_irq_wake(sdiodev->pdata->oob_irq_nr);
  158. sdiodev->irq_wake = false;
  159. }
  160. free_irq(sdiodev->pdata->oob_irq_nr,
  161. &sdiodev->func[1]->dev);
  162. sdiodev->irq_en = false;
  163. }
  164. } else {
  165. sdio_claim_host(sdiodev->func[1]);
  166. sdio_release_irq(sdiodev->func[2]);
  167. sdio_release_irq(sdiodev->func[1]);
  168. sdio_release_host(sdiodev->func[1]);
  169. }
  170. return 0;
  171. }
  172. static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
  173. uint regaddr, u8 byte)
  174. {
  175. int err_ret;
  176. /*
  177. * Can only directly write to some F0 registers.
  178. * Handle CCCR_IENx and CCCR_ABORT command
  179. * as a special case.
  180. */
  181. if ((regaddr == SDIO_CCCR_ABORT) ||
  182. (regaddr == SDIO_CCCR_IENx))
  183. sdio_writeb(func, byte, regaddr, &err_ret);
  184. else
  185. sdio_f0_writeb(func, byte, regaddr, &err_ret);
  186. return err_ret;
  187. }
  188. static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
  189. u32 addr, u8 regsz, void *data, bool write)
  190. {
  191. struct sdio_func *func;
  192. int ret;
  193. brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  194. write, fn, addr, regsz);
  195. brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
  196. if (brcmf_sdiod_pm_resume_error(sdiodev))
  197. return -EIO;
  198. /* only allow byte access on F0 */
  199. if (WARN_ON(regsz > 1 && !fn))
  200. return -EINVAL;
  201. func = sdiodev->func[fn];
  202. switch (regsz) {
  203. case sizeof(u8):
  204. if (write) {
  205. if (fn)
  206. sdio_writeb(func, *(u8 *)data, addr, &ret);
  207. else
  208. ret = brcmf_sdiod_f0_writeb(func, addr,
  209. *(u8 *)data);
  210. } else {
  211. if (fn)
  212. *(u8 *)data = sdio_readb(func, addr, &ret);
  213. else
  214. *(u8 *)data = sdio_f0_readb(func, addr, &ret);
  215. }
  216. break;
  217. case sizeof(u16):
  218. if (write)
  219. sdio_writew(func, *(u16 *)data, addr, &ret);
  220. else
  221. *(u16 *)data = sdio_readw(func, addr, &ret);
  222. break;
  223. case sizeof(u32):
  224. if (write)
  225. sdio_writel(func, *(u32 *)data, addr, &ret);
  226. else
  227. *(u32 *)data = sdio_readl(func, addr, &ret);
  228. break;
  229. default:
  230. brcmf_err("invalid size: %d\n", regsz);
  231. break;
  232. }
  233. if (ret)
  234. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  235. write ? "write" : "read", fn, addr, ret);
  236. return ret;
  237. }
  238. static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  239. u8 regsz, void *data, bool write)
  240. {
  241. u8 func;
  242. s32 retry = 0;
  243. int ret;
  244. if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
  245. return -ENOMEDIUM;
  246. /*
  247. * figure out how to read the register based on address range
  248. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  249. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  250. * The rest: function 1 silicon backplane core registers
  251. */
  252. if ((addr & ~REG_F0_REG_MASK) == 0)
  253. func = SDIO_FUNC_0;
  254. else
  255. func = SDIO_FUNC_1;
  256. do {
  257. if (!write)
  258. memset(data, 0, regsz);
  259. /* for retry wait for 1 ms till bus get settled down */
  260. if (retry)
  261. usleep_range(1000, 2000);
  262. ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
  263. data, write);
  264. } while (ret != 0 && ret != -ENOMEDIUM &&
  265. retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  266. if (ret == -ENOMEDIUM)
  267. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
  268. else if (ret != 0) {
  269. /*
  270. * SleepCSR register access can fail when
  271. * waking up the device so reduce this noise
  272. * in the logs.
  273. */
  274. if (addr != SBSDIO_FUNC1_SLEEPCSR)
  275. brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
  276. write ? "write" : "read", func, addr, ret);
  277. else
  278. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  279. write ? "write" : "read", func, addr, ret);
  280. }
  281. return ret;
  282. }
  283. static int
  284. brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  285. {
  286. int err = 0, i;
  287. u8 addr[3];
  288. if (sdiodev->bus_if->state == BRCMF_BUS_NOMEDIUM)
  289. return -ENOMEDIUM;
  290. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  291. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  292. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  293. for (i = 0; i < 3; i++) {
  294. err = brcmf_sdiod_regrw_helper(sdiodev,
  295. SBSDIO_FUNC1_SBADDRLOW + i,
  296. sizeof(u8), &addr[i], true);
  297. if (err) {
  298. brcmf_err("failed at addr: 0x%0x\n",
  299. SBSDIO_FUNC1_SBADDRLOW + i);
  300. break;
  301. }
  302. }
  303. return err;
  304. }
  305. static int
  306. brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  307. {
  308. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  309. int err = 0;
  310. if (bar0 != sdiodev->sbwad) {
  311. err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
  312. if (err)
  313. return err;
  314. sdiodev->sbwad = bar0;
  315. }
  316. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  317. if (width == 4)
  318. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  319. return 0;
  320. }
  321. u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  322. {
  323. u8 data;
  324. int retval;
  325. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  326. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  327. false);
  328. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  329. if (ret)
  330. *ret = retval;
  331. return data;
  332. }
  333. u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  334. {
  335. u32 data;
  336. int retval;
  337. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  338. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  339. if (retval)
  340. goto done;
  341. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  342. false);
  343. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  344. done:
  345. if (ret)
  346. *ret = retval;
  347. return data;
  348. }
  349. void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  350. u8 data, int *ret)
  351. {
  352. int retval;
  353. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  354. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  355. true);
  356. if (ret)
  357. *ret = retval;
  358. }
  359. void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  360. u32 data, int *ret)
  361. {
  362. int retval;
  363. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  364. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  365. if (retval)
  366. goto done;
  367. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  368. true);
  369. done:
  370. if (ret)
  371. *ret = retval;
  372. }
  373. static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  374. bool write, u32 addr, struct sk_buff *pkt)
  375. {
  376. unsigned int req_sz;
  377. int err;
  378. brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  379. if (brcmf_sdiod_pm_resume_error(sdiodev))
  380. return -EIO;
  381. /* Single skb use the standard mmc interface */
  382. req_sz = pkt->len + 3;
  383. req_sz &= (uint)~3;
  384. if (write)
  385. err = sdio_memcpy_toio(sdiodev->func[fn], addr,
  386. ((u8 *)(pkt->data)), req_sz);
  387. else if (fn == 1)
  388. err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
  389. addr, req_sz);
  390. else
  391. /* function 2 read is FIFO operation */
  392. err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
  393. req_sz);
  394. if (err == -ENOMEDIUM)
  395. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
  396. return err;
  397. }
  398. /**
  399. * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  400. * @sdiodev: brcmfmac sdio device
  401. * @fn: SDIO function number
  402. * @write: direction flag
  403. * @addr: dongle memory address as source/destination
  404. * @pkt: skb pointer
  405. *
  406. * This function takes the respbonsibility as the interface function to MMC
  407. * stack for block data access. It assumes that the skb passed down by the
  408. * caller has already been padded and aligned.
  409. */
  410. static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  411. bool write, u32 addr,
  412. struct sk_buff_head *pktlist)
  413. {
  414. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  415. unsigned int max_req_sz, orig_offset, dst_offset;
  416. unsigned short max_seg_cnt, seg_sz;
  417. unsigned char *pkt_data, *orig_data, *dst_data;
  418. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  419. struct sk_buff_head local_list, *target_list;
  420. struct mmc_request mmc_req;
  421. struct mmc_command mmc_cmd;
  422. struct mmc_data mmc_dat;
  423. struct scatterlist *sgl;
  424. int ret = 0;
  425. if (!pktlist->qlen)
  426. return -EINVAL;
  427. brcmf_sdiod_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
  428. if (brcmf_sdiod_pm_resume_error(sdiodev))
  429. return -EIO;
  430. target_list = pktlist;
  431. /* for host with broken sg support, prepare a page aligned list */
  432. __skb_queue_head_init(&local_list);
  433. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  434. req_sz = 0;
  435. skb_queue_walk(pktlist, pkt_next)
  436. req_sz += pkt_next->len;
  437. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  438. while (req_sz > PAGE_SIZE) {
  439. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  440. if (pkt_next == NULL) {
  441. ret = -ENOMEM;
  442. goto exit;
  443. }
  444. __skb_queue_tail(&local_list, pkt_next);
  445. req_sz -= PAGE_SIZE;
  446. }
  447. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  448. if (pkt_next == NULL) {
  449. ret = -ENOMEM;
  450. goto exit;
  451. }
  452. __skb_queue_tail(&local_list, pkt_next);
  453. target_list = &local_list;
  454. }
  455. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  456. max_req_sz = sdiodev->max_request_size;
  457. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  458. target_list->qlen);
  459. seg_sz = target_list->qlen;
  460. pkt_offset = 0;
  461. pkt_next = target_list->next;
  462. memset(&mmc_req, 0, sizeof(struct mmc_request));
  463. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  464. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  465. mmc_dat.sg = sdiodev->sgtable.sgl;
  466. mmc_dat.blksz = func_blk_sz;
  467. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  468. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  469. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  470. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  471. mmc_cmd.arg |= 1<<27; /* block mode */
  472. /* for function 1 the addr will be incremented */
  473. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  474. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  475. mmc_req.cmd = &mmc_cmd;
  476. mmc_req.data = &mmc_dat;
  477. while (seg_sz) {
  478. req_sz = 0;
  479. sg_cnt = 0;
  480. sgl = sdiodev->sgtable.sgl;
  481. /* prep sg table */
  482. while (pkt_next != (struct sk_buff *)target_list) {
  483. pkt_data = pkt_next->data + pkt_offset;
  484. sg_data_sz = pkt_next->len - pkt_offset;
  485. if (sg_data_sz > sdiodev->max_segment_size)
  486. sg_data_sz = sdiodev->max_segment_size;
  487. if (sg_data_sz > max_req_sz - req_sz)
  488. sg_data_sz = max_req_sz - req_sz;
  489. sg_set_buf(sgl, pkt_data, sg_data_sz);
  490. sg_cnt++;
  491. sgl = sg_next(sgl);
  492. req_sz += sg_data_sz;
  493. pkt_offset += sg_data_sz;
  494. if (pkt_offset == pkt_next->len) {
  495. pkt_offset = 0;
  496. pkt_next = pkt_next->next;
  497. }
  498. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  499. break;
  500. }
  501. seg_sz -= sg_cnt;
  502. if (req_sz % func_blk_sz != 0) {
  503. brcmf_err("sg request length %u is not %u aligned\n",
  504. req_sz, func_blk_sz);
  505. ret = -ENOTBLK;
  506. goto exit;
  507. }
  508. mmc_dat.sg_len = sg_cnt;
  509. mmc_dat.blocks = req_sz / func_blk_sz;
  510. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  511. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  512. /* incrementing addr for function 1 */
  513. if (fn == 1)
  514. addr += req_sz;
  515. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  516. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  517. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  518. if (ret == -ENOMEDIUM) {
  519. brcmf_bus_change_state(sdiodev->bus_if,
  520. BRCMF_BUS_NOMEDIUM);
  521. break;
  522. } else if (ret != 0) {
  523. brcmf_err("CMD53 sg block %s failed %d\n",
  524. write ? "write" : "read", ret);
  525. ret = -EIO;
  526. break;
  527. }
  528. }
  529. if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
  530. local_pkt_next = local_list.next;
  531. orig_offset = 0;
  532. skb_queue_walk(pktlist, pkt_next) {
  533. dst_offset = 0;
  534. do {
  535. req_sz = local_pkt_next->len - orig_offset;
  536. req_sz = min_t(uint, pkt_next->len - dst_offset,
  537. req_sz);
  538. orig_data = local_pkt_next->data + orig_offset;
  539. dst_data = pkt_next->data + dst_offset;
  540. memcpy(dst_data, orig_data, req_sz);
  541. orig_offset += req_sz;
  542. dst_offset += req_sz;
  543. if (orig_offset == local_pkt_next->len) {
  544. orig_offset = 0;
  545. local_pkt_next = local_pkt_next->next;
  546. }
  547. if (dst_offset == pkt_next->len)
  548. break;
  549. } while (!skb_queue_empty(&local_list));
  550. }
  551. }
  552. exit:
  553. sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
  554. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  555. brcmu_pkt_buf_free_skb(pkt_next);
  556. return ret;
  557. }
  558. int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  559. {
  560. struct sk_buff *mypkt;
  561. int err;
  562. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  563. if (!mypkt) {
  564. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  565. nbytes);
  566. return -EIO;
  567. }
  568. err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
  569. if (!err)
  570. memcpy(buf, mypkt->data, nbytes);
  571. brcmu_pkt_buf_free_skb(mypkt);
  572. return err;
  573. }
  574. int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
  575. {
  576. u32 addr = sdiodev->sbwad;
  577. int err = 0;
  578. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
  579. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  580. if (err)
  581. goto done;
  582. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
  583. done:
  584. return err;
  585. }
  586. int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
  587. struct sk_buff_head *pktq, uint totlen)
  588. {
  589. struct sk_buff *glom_skb;
  590. struct sk_buff *skb;
  591. u32 addr = sdiodev->sbwad;
  592. int err = 0;
  593. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
  594. addr, pktq->qlen);
  595. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  596. if (err)
  597. goto done;
  598. if (pktq->qlen == 1)
  599. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  600. pktq->next);
  601. else if (!sdiodev->sg_support) {
  602. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  603. if (!glom_skb)
  604. return -ENOMEM;
  605. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  606. glom_skb);
  607. if (err)
  608. goto done;
  609. skb_queue_walk(pktq, skb) {
  610. memcpy(skb->data, glom_skb->data, skb->len);
  611. skb_pull(glom_skb, skb->len);
  612. }
  613. } else
  614. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
  615. pktq);
  616. done:
  617. return err;
  618. }
  619. int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  620. {
  621. struct sk_buff *mypkt;
  622. u32 addr = sdiodev->sbwad;
  623. int err;
  624. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  625. if (!mypkt) {
  626. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  627. nbytes);
  628. return -EIO;
  629. }
  630. memcpy(mypkt->data, buf, nbytes);
  631. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  632. if (!err)
  633. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
  634. mypkt);
  635. brcmu_pkt_buf_free_skb(mypkt);
  636. return err;
  637. }
  638. int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
  639. struct sk_buff_head *pktq)
  640. {
  641. struct sk_buff *skb;
  642. u32 addr = sdiodev->sbwad;
  643. int err;
  644. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
  645. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  646. if (err)
  647. return err;
  648. if (pktq->qlen == 1 || !sdiodev->sg_support)
  649. skb_queue_walk(pktq, skb) {
  650. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
  651. addr, skb);
  652. if (err)
  653. break;
  654. }
  655. else
  656. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
  657. pktq);
  658. return err;
  659. }
  660. int
  661. brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  662. u8 *data, uint size)
  663. {
  664. int bcmerror = 0;
  665. struct sk_buff *pkt;
  666. u32 sdaddr;
  667. uint dsize;
  668. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  669. pkt = dev_alloc_skb(dsize);
  670. if (!pkt) {
  671. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  672. return -EIO;
  673. }
  674. pkt->priority = 0;
  675. /* Determine initial transfer parameters */
  676. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  677. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  678. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  679. else
  680. dsize = size;
  681. sdio_claim_host(sdiodev->func[1]);
  682. /* Do the transfer(s) */
  683. while (size) {
  684. /* Set the backplane window to include the start address */
  685. bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
  686. if (bcmerror)
  687. break;
  688. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  689. write ? "write" : "read", dsize,
  690. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  691. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  692. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  693. skb_put(pkt, dsize);
  694. if (write)
  695. memcpy(pkt->data, data, dsize);
  696. bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
  697. sdaddr, pkt);
  698. if (bcmerror) {
  699. brcmf_err("membytes transfer failed\n");
  700. break;
  701. }
  702. if (!write)
  703. memcpy(data, pkt->data, dsize);
  704. skb_trim(pkt, 0);
  705. /* Adjust for next transfer (if any) */
  706. size -= dsize;
  707. if (size) {
  708. data += dsize;
  709. address += dsize;
  710. sdaddr = 0;
  711. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  712. }
  713. }
  714. dev_kfree_skb(pkt);
  715. /* Return the window to backplane enumeration space for core access */
  716. if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  717. brcmf_err("FAILED to set window back to 0x%x\n",
  718. sdiodev->sbwad);
  719. sdio_release_host(sdiodev->func[1]);
  720. return bcmerror;
  721. }
  722. int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  723. {
  724. char t_func = (char)fn;
  725. brcmf_dbg(SDIO, "Enter\n");
  726. /* issue abort cmd52 command through F0 */
  727. brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
  728. sizeof(t_func), &t_func, true);
  729. brcmf_dbg(SDIO, "Exit\n");
  730. return 0;
  731. }
  732. static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
  733. {
  734. uint nents;
  735. int err;
  736. if (!sdiodev->sg_support)
  737. return;
  738. nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
  739. nents += (nents >> 4) + 1;
  740. WARN_ON(nents > sdiodev->max_segment_count);
  741. brcmf_dbg(TRACE, "nents=%d\n", nents);
  742. err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
  743. if (err < 0) {
  744. brcmf_err("allocation failed: disable scatter-gather");
  745. sdiodev->sg_support = false;
  746. }
  747. sdiodev->txglomsz = brcmf_sdiod_txglomsz;
  748. }
  749. static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
  750. {
  751. if (sdiodev->bus) {
  752. brcmf_sdio_remove(sdiodev->bus);
  753. sdiodev->bus = NULL;
  754. }
  755. /* Disable Function 2 */
  756. sdio_claim_host(sdiodev->func[2]);
  757. sdio_disable_func(sdiodev->func[2]);
  758. sdio_release_host(sdiodev->func[2]);
  759. /* Disable Function 1 */
  760. sdio_claim_host(sdiodev->func[1]);
  761. sdio_disable_func(sdiodev->func[1]);
  762. sdio_release_host(sdiodev->func[1]);
  763. sg_free_table(&sdiodev->sgtable);
  764. sdiodev->sbwad = 0;
  765. return 0;
  766. }
  767. static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
  768. {
  769. struct sdio_func *func;
  770. struct mmc_host *host;
  771. uint max_blocks;
  772. int ret = 0;
  773. sdiodev->num_funcs = 2;
  774. sdio_claim_host(sdiodev->func[1]);
  775. ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
  776. if (ret) {
  777. brcmf_err("Failed to set F1 blocksize\n");
  778. sdio_release_host(sdiodev->func[1]);
  779. goto out;
  780. }
  781. ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
  782. if (ret) {
  783. brcmf_err("Failed to set F2 blocksize\n");
  784. sdio_release_host(sdiodev->func[1]);
  785. goto out;
  786. }
  787. /* increase F2 timeout */
  788. sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
  789. /* Enable Function 1 */
  790. ret = sdio_enable_func(sdiodev->func[1]);
  791. sdio_release_host(sdiodev->func[1]);
  792. if (ret) {
  793. brcmf_err("Failed to enable F1: err=%d\n", ret);
  794. goto out;
  795. }
  796. /*
  797. * determine host related variables after brcmf_sdiod_probe()
  798. * as func->cur_blksize is properly set and F2 init has been
  799. * completed successfully.
  800. */
  801. func = sdiodev->func[2];
  802. host = func->card->host;
  803. sdiodev->sg_support = host->max_segs > 1;
  804. max_blocks = min_t(uint, host->max_blk_count, 511u);
  805. sdiodev->max_request_size = min_t(uint, host->max_req_size,
  806. max_blocks * func->cur_blksize);
  807. sdiodev->max_segment_count = min_t(uint, host->max_segs,
  808. SG_MAX_SINGLE_ALLOC);
  809. sdiodev->max_segment_size = host->max_seg_size;
  810. /* allocate scatter-gather table. sg support
  811. * will be disabled upon allocation failure.
  812. */
  813. brcmf_sdiod_sgtable_alloc(sdiodev);
  814. /* try to attach to the target device */
  815. sdiodev->bus = brcmf_sdio_probe(sdiodev);
  816. if (!sdiodev->bus) {
  817. ret = -ENODEV;
  818. goto out;
  819. }
  820. out:
  821. if (ret)
  822. brcmf_sdiod_remove(sdiodev);
  823. return ret;
  824. }
  825. /* devices we support, null terminated */
  826. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  827. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43143)},
  828. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43241)},
  829. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
  830. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
  831. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
  832. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
  833. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
  834. SDIO_DEVICE_ID_BROADCOM_4335_4339)},
  835. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
  836. { /* end: all zeroes */ },
  837. };
  838. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  839. static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
  840. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  841. const struct sdio_device_id *id)
  842. {
  843. int err;
  844. struct brcmf_sdio_dev *sdiodev;
  845. struct brcmf_bus *bus_if;
  846. brcmf_dbg(SDIO, "Enter\n");
  847. brcmf_dbg(SDIO, "Class=%x\n", func->class);
  848. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  849. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  850. brcmf_dbg(SDIO, "Function#: %d\n", func->num);
  851. /* Consume func num 1 but dont do anything with it. */
  852. if (func->num == 1)
  853. return 0;
  854. /* Ignore anything but func 2 */
  855. if (func->num != 2)
  856. return -ENODEV;
  857. bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
  858. if (!bus_if)
  859. return -ENOMEM;
  860. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  861. if (!sdiodev) {
  862. kfree(bus_if);
  863. return -ENOMEM;
  864. }
  865. /* store refs to functions used. mmc_card does
  866. * not hold the F0 function pointer.
  867. */
  868. sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
  869. sdiodev->func[0]->num = 0;
  870. sdiodev->func[1] = func->card->sdio_func[0];
  871. sdiodev->func[2] = func;
  872. sdiodev->bus_if = bus_if;
  873. bus_if->bus_priv.sdio = sdiodev;
  874. bus_if->proto_type = BRCMF_PROTO_BCDC;
  875. dev_set_drvdata(&func->dev, bus_if);
  876. dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
  877. sdiodev->dev = &sdiodev->func[1]->dev;
  878. sdiodev->pdata = brcmfmac_sdio_pdata;
  879. atomic_set(&sdiodev->suspend, false);
  880. init_waitqueue_head(&sdiodev->request_word_wait);
  881. init_waitqueue_head(&sdiodev->request_buffer_wait);
  882. brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
  883. err = brcmf_sdiod_probe(sdiodev);
  884. if (err) {
  885. brcmf_err("F2 error, probe failed %d...\n", err);
  886. goto fail;
  887. }
  888. brcmf_dbg(SDIO, "F2 init completed...\n");
  889. return 0;
  890. fail:
  891. dev_set_drvdata(&func->dev, NULL);
  892. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  893. kfree(sdiodev->func[0]);
  894. kfree(sdiodev);
  895. kfree(bus_if);
  896. return err;
  897. }
  898. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  899. {
  900. struct brcmf_bus *bus_if;
  901. struct brcmf_sdio_dev *sdiodev;
  902. brcmf_dbg(SDIO, "Enter\n");
  903. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  904. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  905. brcmf_dbg(SDIO, "Function: %d\n", func->num);
  906. if (func->num != 1 && func->num != 2)
  907. return;
  908. bus_if = dev_get_drvdata(&func->dev);
  909. if (bus_if) {
  910. sdiodev = bus_if->bus_priv.sdio;
  911. brcmf_sdiod_remove(sdiodev);
  912. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  913. dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
  914. kfree(bus_if);
  915. kfree(sdiodev->func[0]);
  916. kfree(sdiodev);
  917. }
  918. brcmf_dbg(SDIO, "Exit\n");
  919. }
  920. #ifdef CONFIG_PM_SLEEP
  921. static int brcmf_ops_sdio_suspend(struct device *dev)
  922. {
  923. mmc_pm_flag_t sdio_flags;
  924. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  925. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  926. int ret = 0;
  927. brcmf_dbg(SDIO, "Enter\n");
  928. sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
  929. if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
  930. brcmf_err("Host can't keep power while suspended\n");
  931. return -EINVAL;
  932. }
  933. atomic_set(&sdiodev->suspend, true);
  934. ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
  935. if (ret) {
  936. brcmf_err("Failed to set pm_flags\n");
  937. atomic_set(&sdiodev->suspend, false);
  938. return ret;
  939. }
  940. brcmf_sdio_wd_timer(sdiodev->bus, 0);
  941. return ret;
  942. }
  943. static int brcmf_ops_sdio_resume(struct device *dev)
  944. {
  945. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  946. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  947. brcmf_dbg(SDIO, "Enter\n");
  948. brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
  949. atomic_set(&sdiodev->suspend, false);
  950. return 0;
  951. }
  952. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  953. .suspend = brcmf_ops_sdio_suspend,
  954. .resume = brcmf_ops_sdio_resume,
  955. };
  956. #endif /* CONFIG_PM_SLEEP */
  957. static struct sdio_driver brcmf_sdmmc_driver = {
  958. .probe = brcmf_ops_sdio_probe,
  959. .remove = brcmf_ops_sdio_remove,
  960. .name = BRCMFMAC_SDIO_PDATA_NAME,
  961. .id_table = brcmf_sdmmc_ids,
  962. .drv = {
  963. .owner = THIS_MODULE,
  964. #ifdef CONFIG_PM_SLEEP
  965. .pm = &brcmf_sdio_pm_ops,
  966. #endif /* CONFIG_PM_SLEEP */
  967. },
  968. };
  969. static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
  970. {
  971. brcmf_dbg(SDIO, "Enter\n");
  972. brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
  973. if (brcmfmac_sdio_pdata->power_on)
  974. brcmfmac_sdio_pdata->power_on();
  975. return 0;
  976. }
  977. static int brcmf_sdio_pd_remove(struct platform_device *pdev)
  978. {
  979. brcmf_dbg(SDIO, "Enter\n");
  980. if (brcmfmac_sdio_pdata->power_off)
  981. brcmfmac_sdio_pdata->power_off();
  982. sdio_unregister_driver(&brcmf_sdmmc_driver);
  983. return 0;
  984. }
  985. static struct platform_driver brcmf_sdio_pd = {
  986. .remove = brcmf_sdio_pd_remove,
  987. .driver = {
  988. .name = BRCMFMAC_SDIO_PDATA_NAME,
  989. .owner = THIS_MODULE,
  990. }
  991. };
  992. void brcmf_sdio_register(void)
  993. {
  994. int ret;
  995. ret = sdio_register_driver(&brcmf_sdmmc_driver);
  996. if (ret)
  997. brcmf_err("sdio_register_driver failed: %d\n", ret);
  998. }
  999. void brcmf_sdio_exit(void)
  1000. {
  1001. brcmf_dbg(SDIO, "Enter\n");
  1002. if (brcmfmac_sdio_pdata)
  1003. platform_driver_unregister(&brcmf_sdio_pd);
  1004. else
  1005. sdio_unregister_driver(&brcmf_sdmmc_driver);
  1006. }
  1007. void __init brcmf_sdio_init(void)
  1008. {
  1009. int ret;
  1010. brcmf_dbg(SDIO, "Enter\n");
  1011. ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
  1012. if (ret == -ENODEV)
  1013. brcmf_dbg(SDIO, "No platform data available.\n");
  1014. }