bcmsdh.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/mmc/sdio.h>
  26. #include <linux/mmc/core.h>
  27. #include <linux/mmc/sdio_func.h>
  28. #include <linux/mmc/card.h>
  29. #include <linux/mmc/host.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/suspend.h>
  32. #include <linux/errno.h>
  33. #include <linux/module.h>
  34. #include <linux/acpi.h>
  35. #include <net/cfg80211.h>
  36. #include <defs.h>
  37. #include <brcm_hw_ids.h>
  38. #include <brcmu_utils.h>
  39. #include <brcmu_wifi.h>
  40. #include <chipcommon.h>
  41. #include <soc.h>
  42. #include "chip.h"
  43. #include "bus.h"
  44. #include "debug.h"
  45. #include "sdio.h"
  46. #include "core.h"
  47. #include "common.h"
  48. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  49. #define DMA_ALIGN_MASK 0x03
  50. #define SDIO_FUNC1_BLOCKSIZE 64
  51. #define SDIO_FUNC2_BLOCKSIZE 512
  52. /* Maximum milliseconds to wait for F2 to come up */
  53. #define SDIO_WAIT_F2RDY 3000
  54. #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
  55. struct brcmf_sdiod_freezer {
  56. atomic_t freezing;
  57. atomic_t thread_count;
  58. u32 frozen_count;
  59. wait_queue_head_t thread_freeze;
  60. struct completion resumed;
  61. };
  62. static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
  63. {
  64. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  65. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  66. brcmf_dbg(INTR, "OOB intr triggered\n");
  67. /* out-of-band interrupt is level-triggered which won't
  68. * be cleared until dpc
  69. */
  70. if (sdiodev->irq_en) {
  71. disable_irq_nosync(irq);
  72. sdiodev->irq_en = false;
  73. }
  74. brcmf_sdio_isr(sdiodev->bus);
  75. return IRQ_HANDLED;
  76. }
  77. static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
  78. {
  79. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  80. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  81. brcmf_dbg(INTR, "IB intr triggered\n");
  82. brcmf_sdio_isr(sdiodev->bus);
  83. }
  84. /* dummy handler for SDIO function 2 interrupt */
  85. static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
  86. {
  87. }
  88. int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
  89. {
  90. struct brcmfmac_sdio_pd *pdata;
  91. int ret = 0;
  92. u8 data;
  93. u32 addr, gpiocontrol;
  94. pdata = &sdiodev->settings->bus.sdio;
  95. if (pdata->oob_irq_supported) {
  96. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  97. pdata->oob_irq_nr);
  98. spin_lock_init(&sdiodev->irq_en_lock);
  99. sdiodev->irq_en = true;
  100. ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
  101. pdata->oob_irq_flags, "brcmf_oob_intr",
  102. &sdiodev->func1->dev);
  103. if (ret != 0) {
  104. brcmf_err("request_irq failed %d\n", ret);
  105. return ret;
  106. }
  107. sdiodev->oob_irq_requested = true;
  108. ret = enable_irq_wake(pdata->oob_irq_nr);
  109. if (ret != 0) {
  110. brcmf_err("enable_irq_wake failed %d\n", ret);
  111. return ret;
  112. }
  113. sdiodev->irq_wake = true;
  114. sdio_claim_host(sdiodev->func1);
  115. if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
  116. /* assign GPIO to SDIO core */
  117. addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
  118. gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
  119. gpiocontrol |= 0x2;
  120. brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
  121. brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
  122. 0xf, &ret);
  123. brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
  124. brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
  125. }
  126. /* must configure SDIO_CCCR_IENx to enable irq */
  127. data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
  128. data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
  129. SDIO_CCCR_IEN_FUNC0;
  130. brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  131. /* redirect, configure and enable io for interrupt signal */
  132. data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
  133. if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  134. data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
  135. brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
  136. data, &ret);
  137. sdio_release_host(sdiodev->func1);
  138. } else {
  139. brcmf_dbg(SDIO, "Entering\n");
  140. sdio_claim_host(sdiodev->func1);
  141. sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
  142. sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
  143. sdio_release_host(sdiodev->func1);
  144. sdiodev->sd_irq_requested = true;
  145. }
  146. return 0;
  147. }
  148. void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  149. {
  150. brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
  151. sdiodev->oob_irq_requested,
  152. sdiodev->sd_irq_requested);
  153. if (sdiodev->oob_irq_requested) {
  154. struct brcmfmac_sdio_pd *pdata;
  155. pdata = &sdiodev->settings->bus.sdio;
  156. sdio_claim_host(sdiodev->func1);
  157. brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  158. brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  159. sdio_release_host(sdiodev->func1);
  160. sdiodev->oob_irq_requested = false;
  161. if (sdiodev->irq_wake) {
  162. disable_irq_wake(pdata->oob_irq_nr);
  163. sdiodev->irq_wake = false;
  164. }
  165. free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
  166. sdiodev->irq_en = false;
  167. sdiodev->oob_irq_requested = false;
  168. }
  169. if (sdiodev->sd_irq_requested) {
  170. sdio_claim_host(sdiodev->func1);
  171. sdio_release_irq(sdiodev->func2);
  172. sdio_release_irq(sdiodev->func1);
  173. sdio_release_host(sdiodev->func1);
  174. sdiodev->sd_irq_requested = false;
  175. }
  176. }
  177. void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
  178. enum brcmf_sdiod_state state)
  179. {
  180. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
  181. state == sdiodev->state)
  182. return;
  183. brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
  184. switch (sdiodev->state) {
  185. case BRCMF_SDIOD_DATA:
  186. /* any other state means bus interface is down */
  187. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
  188. break;
  189. case BRCMF_SDIOD_DOWN:
  190. /* transition from DOWN to DATA means bus interface is up */
  191. if (state == BRCMF_SDIOD_DATA)
  192. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
  193. break;
  194. default:
  195. break;
  196. }
  197. sdiodev->state = state;
  198. }
  199. static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
  200. u32 addr)
  201. {
  202. u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
  203. int err = 0, i;
  204. if (bar0 == sdiodev->sbwad)
  205. return 0;
  206. v = bar0 >> 8;
  207. for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
  208. brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
  209. v & 0xff, &err);
  210. if (!err)
  211. sdiodev->sbwad = bar0;
  212. return err;
  213. }
  214. u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  215. {
  216. u32 data = 0;
  217. int retval;
  218. retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
  219. if (retval)
  220. goto out;
  221. addr &= SBSDIO_SB_OFT_ADDR_MASK;
  222. addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  223. data = sdio_readl(sdiodev->func1, addr, &retval);
  224. out:
  225. if (ret)
  226. *ret = retval;
  227. return data;
  228. }
  229. void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
  230. u32 data, int *ret)
  231. {
  232. int retval;
  233. retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
  234. if (retval)
  235. goto out;
  236. addr &= SBSDIO_SB_OFT_ADDR_MASK;
  237. addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  238. sdio_writel(sdiodev->func1, data, addr, &retval);
  239. out:
  240. if (ret)
  241. *ret = retval;
  242. }
  243. static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
  244. struct sdio_func *func, u32 addr,
  245. struct sk_buff *skb)
  246. {
  247. unsigned int req_sz;
  248. int err;
  249. /* Single skb use the standard mmc interface */
  250. req_sz = skb->len + 3;
  251. req_sz &= (uint)~3;
  252. switch (func->num) {
  253. case 1:
  254. err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
  255. req_sz);
  256. break;
  257. case 2:
  258. err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
  259. break;
  260. default:
  261. /* bail out as things are really fishy here */
  262. WARN(1, "invalid sdio function number: %d\n", func->num);
  263. err = -ENOMEDIUM;
  264. };
  265. if (err == -ENOMEDIUM)
  266. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  267. return err;
  268. }
  269. static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
  270. struct sdio_func *func, u32 addr,
  271. struct sk_buff *skb)
  272. {
  273. unsigned int req_sz;
  274. int err;
  275. /* Single skb use the standard mmc interface */
  276. req_sz = skb->len + 3;
  277. req_sz &= (uint)~3;
  278. err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
  279. if (err == -ENOMEDIUM)
  280. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  281. return err;
  282. }
  283. /**
  284. * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  285. * @sdiodev: brcmfmac sdio device
  286. * @func: SDIO function
  287. * @write: direction flag
  288. * @addr: dongle memory address as source/destination
  289. * @pkt: skb pointer
  290. *
  291. * This function takes the respbonsibility as the interface function to MMC
  292. * stack for block data access. It assumes that the skb passed down by the
  293. * caller has already been padded and aligned.
  294. */
  295. static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
  296. struct sdio_func *func,
  297. bool write, u32 addr,
  298. struct sk_buff_head *pktlist)
  299. {
  300. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  301. unsigned int max_req_sz, orig_offset, dst_offset;
  302. unsigned short max_seg_cnt, seg_sz;
  303. unsigned char *pkt_data, *orig_data, *dst_data;
  304. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  305. struct sk_buff_head local_list, *target_list;
  306. struct mmc_request mmc_req;
  307. struct mmc_command mmc_cmd;
  308. struct mmc_data mmc_dat;
  309. struct scatterlist *sgl;
  310. int ret = 0;
  311. if (!pktlist->qlen)
  312. return -EINVAL;
  313. target_list = pktlist;
  314. /* for host with broken sg support, prepare a page aligned list */
  315. __skb_queue_head_init(&local_list);
  316. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  317. req_sz = 0;
  318. skb_queue_walk(pktlist, pkt_next)
  319. req_sz += pkt_next->len;
  320. req_sz = ALIGN(req_sz, func->cur_blksize);
  321. while (req_sz > PAGE_SIZE) {
  322. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  323. if (pkt_next == NULL) {
  324. ret = -ENOMEM;
  325. goto exit;
  326. }
  327. __skb_queue_tail(&local_list, pkt_next);
  328. req_sz -= PAGE_SIZE;
  329. }
  330. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  331. if (pkt_next == NULL) {
  332. ret = -ENOMEM;
  333. goto exit;
  334. }
  335. __skb_queue_tail(&local_list, pkt_next);
  336. target_list = &local_list;
  337. }
  338. func_blk_sz = func->cur_blksize;
  339. max_req_sz = sdiodev->max_request_size;
  340. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  341. target_list->qlen);
  342. seg_sz = target_list->qlen;
  343. pkt_offset = 0;
  344. pkt_next = target_list->next;
  345. memset(&mmc_req, 0, sizeof(struct mmc_request));
  346. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  347. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  348. mmc_dat.sg = sdiodev->sgtable.sgl;
  349. mmc_dat.blksz = func_blk_sz;
  350. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  351. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  352. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  353. mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
  354. mmc_cmd.arg |= 1 << 27; /* block mode */
  355. /* for function 1 the addr will be incremented */
  356. mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
  357. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  358. mmc_req.cmd = &mmc_cmd;
  359. mmc_req.data = &mmc_dat;
  360. while (seg_sz) {
  361. req_sz = 0;
  362. sg_cnt = 0;
  363. sgl = sdiodev->sgtable.sgl;
  364. /* prep sg table */
  365. while (pkt_next != (struct sk_buff *)target_list) {
  366. pkt_data = pkt_next->data + pkt_offset;
  367. sg_data_sz = pkt_next->len - pkt_offset;
  368. if (sg_data_sz > sdiodev->max_segment_size)
  369. sg_data_sz = sdiodev->max_segment_size;
  370. if (sg_data_sz > max_req_sz - req_sz)
  371. sg_data_sz = max_req_sz - req_sz;
  372. sg_set_buf(sgl, pkt_data, sg_data_sz);
  373. sg_cnt++;
  374. sgl = sg_next(sgl);
  375. req_sz += sg_data_sz;
  376. pkt_offset += sg_data_sz;
  377. if (pkt_offset == pkt_next->len) {
  378. pkt_offset = 0;
  379. pkt_next = pkt_next->next;
  380. }
  381. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  382. break;
  383. }
  384. seg_sz -= sg_cnt;
  385. if (req_sz % func_blk_sz != 0) {
  386. brcmf_err("sg request length %u is not %u aligned\n",
  387. req_sz, func_blk_sz);
  388. ret = -ENOTBLK;
  389. goto exit;
  390. }
  391. mmc_dat.sg_len = sg_cnt;
  392. mmc_dat.blocks = req_sz / func_blk_sz;
  393. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  394. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  395. /* incrementing addr for function 1 */
  396. if (func->num == 1)
  397. addr += req_sz;
  398. mmc_set_data_timeout(&mmc_dat, func->card);
  399. mmc_wait_for_req(func->card->host, &mmc_req);
  400. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  401. if (ret == -ENOMEDIUM) {
  402. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  403. break;
  404. } else if (ret != 0) {
  405. brcmf_err("CMD53 sg block %s failed %d\n",
  406. write ? "write" : "read", ret);
  407. ret = -EIO;
  408. break;
  409. }
  410. }
  411. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  412. local_pkt_next = local_list.next;
  413. orig_offset = 0;
  414. skb_queue_walk(pktlist, pkt_next) {
  415. dst_offset = 0;
  416. do {
  417. req_sz = local_pkt_next->len - orig_offset;
  418. req_sz = min_t(uint, pkt_next->len - dst_offset,
  419. req_sz);
  420. orig_data = local_pkt_next->data + orig_offset;
  421. dst_data = pkt_next->data + dst_offset;
  422. memcpy(dst_data, orig_data, req_sz);
  423. orig_offset += req_sz;
  424. dst_offset += req_sz;
  425. if (orig_offset == local_pkt_next->len) {
  426. orig_offset = 0;
  427. local_pkt_next = local_pkt_next->next;
  428. }
  429. if (dst_offset == pkt_next->len)
  430. break;
  431. } while (!skb_queue_empty(&local_list));
  432. }
  433. }
  434. exit:
  435. sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
  436. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  437. brcmu_pkt_buf_free_skb(pkt_next);
  438. return ret;
  439. }
  440. int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  441. {
  442. struct sk_buff *mypkt;
  443. int err;
  444. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  445. if (!mypkt) {
  446. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  447. nbytes);
  448. return -EIO;
  449. }
  450. err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
  451. if (!err)
  452. memcpy(buf, mypkt->data, nbytes);
  453. brcmu_pkt_buf_free_skb(mypkt);
  454. return err;
  455. }
  456. int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
  457. {
  458. u32 addr = sdiodev->cc_core->base;
  459. int err = 0;
  460. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
  461. err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
  462. if (err)
  463. goto done;
  464. addr &= SBSDIO_SB_OFT_ADDR_MASK;
  465. addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  466. err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
  467. done:
  468. return err;
  469. }
  470. int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
  471. struct sk_buff_head *pktq, uint totlen)
  472. {
  473. struct sk_buff *glom_skb = NULL;
  474. struct sk_buff *skb;
  475. u32 addr = sdiodev->cc_core->base;
  476. int err = 0;
  477. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
  478. addr, pktq->qlen);
  479. err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
  480. if (err)
  481. goto done;
  482. addr &= SBSDIO_SB_OFT_ADDR_MASK;
  483. addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  484. if (pktq->qlen == 1)
  485. err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
  486. pktq->next);
  487. else if (!sdiodev->sg_support) {
  488. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  489. if (!glom_skb)
  490. return -ENOMEM;
  491. err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
  492. glom_skb);
  493. if (err)
  494. goto done;
  495. skb_queue_walk(pktq, skb) {
  496. memcpy(skb->data, glom_skb->data, skb->len);
  497. skb_pull(glom_skb, skb->len);
  498. }
  499. } else
  500. err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
  501. addr, pktq);
  502. done:
  503. brcmu_pkt_buf_free_skb(glom_skb);
  504. return err;
  505. }
  506. int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  507. {
  508. struct sk_buff *mypkt;
  509. u32 addr = sdiodev->cc_core->base;
  510. int err;
  511. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  512. if (!mypkt) {
  513. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  514. nbytes);
  515. return -EIO;
  516. }
  517. memcpy(mypkt->data, buf, nbytes);
  518. err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
  519. if (err)
  520. return err;
  521. addr &= SBSDIO_SB_OFT_ADDR_MASK;
  522. addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  523. if (!err)
  524. err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
  525. mypkt);
  526. brcmu_pkt_buf_free_skb(mypkt);
  527. return err;
  528. }
  529. int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
  530. struct sk_buff_head *pktq)
  531. {
  532. struct sk_buff *skb;
  533. u32 addr = sdiodev->cc_core->base;
  534. int err;
  535. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
  536. err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
  537. if (err)
  538. return err;
  539. addr &= SBSDIO_SB_OFT_ADDR_MASK;
  540. addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  541. if (pktq->qlen == 1 || !sdiodev->sg_support) {
  542. skb_queue_walk(pktq, skb) {
  543. err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
  544. addr, skb);
  545. if (err)
  546. break;
  547. }
  548. } else {
  549. err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
  550. addr, pktq);
  551. }
  552. return err;
  553. }
  554. int
  555. brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  556. u8 *data, uint size)
  557. {
  558. int err = 0;
  559. struct sk_buff *pkt;
  560. u32 sdaddr;
  561. uint dsize;
  562. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  563. pkt = dev_alloc_skb(dsize);
  564. if (!pkt) {
  565. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  566. return -EIO;
  567. }
  568. pkt->priority = 0;
  569. /* Determine initial transfer parameters */
  570. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  571. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  572. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  573. else
  574. dsize = size;
  575. sdio_claim_host(sdiodev->func1);
  576. /* Do the transfer(s) */
  577. while (size) {
  578. /* Set the backplane window to include the start address */
  579. err = brcmf_sdiod_set_backplane_window(sdiodev, address);
  580. if (err)
  581. break;
  582. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  583. write ? "write" : "read", dsize,
  584. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  585. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  586. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  587. skb_put(pkt, dsize);
  588. if (write) {
  589. memcpy(pkt->data, data, dsize);
  590. err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
  591. sdaddr, pkt);
  592. } else {
  593. err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
  594. sdaddr, pkt);
  595. }
  596. if (err) {
  597. brcmf_err("membytes transfer failed\n");
  598. break;
  599. }
  600. if (!write)
  601. memcpy(data, pkt->data, dsize);
  602. skb_trim(pkt, 0);
  603. /* Adjust for next transfer (if any) */
  604. size -= dsize;
  605. if (size) {
  606. data += dsize;
  607. address += dsize;
  608. sdaddr = 0;
  609. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  610. }
  611. }
  612. dev_kfree_skb(pkt);
  613. sdio_release_host(sdiodev->func1);
  614. return err;
  615. }
  616. int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
  617. {
  618. brcmf_dbg(SDIO, "Enter\n");
  619. /* Issue abort cmd52 command through F0 */
  620. brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
  621. brcmf_dbg(SDIO, "Exit\n");
  622. return 0;
  623. }
  624. void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
  625. {
  626. struct sdio_func *func;
  627. struct mmc_host *host;
  628. uint max_blocks;
  629. uint nents;
  630. int err;
  631. func = sdiodev->func2;
  632. host = func->card->host;
  633. sdiodev->sg_support = host->max_segs > 1;
  634. max_blocks = min_t(uint, host->max_blk_count, 511u);
  635. sdiodev->max_request_size = min_t(uint, host->max_req_size,
  636. max_blocks * func->cur_blksize);
  637. sdiodev->max_segment_count = min_t(uint, host->max_segs,
  638. SG_MAX_SINGLE_ALLOC);
  639. sdiodev->max_segment_size = host->max_seg_size;
  640. if (!sdiodev->sg_support)
  641. return;
  642. nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
  643. sdiodev->settings->bus.sdio.txglomsz);
  644. nents += (nents >> 4) + 1;
  645. WARN_ON(nents > sdiodev->max_segment_count);
  646. brcmf_dbg(TRACE, "nents=%d\n", nents);
  647. err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
  648. if (err < 0) {
  649. brcmf_err("allocation failed: disable scatter-gather");
  650. sdiodev->sg_support = false;
  651. }
  652. sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
  653. }
  654. #ifdef CONFIG_PM_SLEEP
  655. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  656. {
  657. sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
  658. if (!sdiodev->freezer)
  659. return -ENOMEM;
  660. atomic_set(&sdiodev->freezer->thread_count, 0);
  661. atomic_set(&sdiodev->freezer->freezing, 0);
  662. init_waitqueue_head(&sdiodev->freezer->thread_freeze);
  663. init_completion(&sdiodev->freezer->resumed);
  664. return 0;
  665. }
  666. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  667. {
  668. if (sdiodev->freezer) {
  669. WARN_ON(atomic_read(&sdiodev->freezer->freezing));
  670. kfree(sdiodev->freezer);
  671. }
  672. }
  673. static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
  674. {
  675. atomic_t *expect = &sdiodev->freezer->thread_count;
  676. int res = 0;
  677. sdiodev->freezer->frozen_count = 0;
  678. reinit_completion(&sdiodev->freezer->resumed);
  679. atomic_set(&sdiodev->freezer->freezing, 1);
  680. brcmf_sdio_trigger_dpc(sdiodev->bus);
  681. wait_event(sdiodev->freezer->thread_freeze,
  682. atomic_read(expect) == sdiodev->freezer->frozen_count);
  683. sdio_claim_host(sdiodev->func1);
  684. res = brcmf_sdio_sleep(sdiodev->bus, true);
  685. sdio_release_host(sdiodev->func1);
  686. return res;
  687. }
  688. static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
  689. {
  690. sdio_claim_host(sdiodev->func1);
  691. brcmf_sdio_sleep(sdiodev->bus, false);
  692. sdio_release_host(sdiodev->func1);
  693. atomic_set(&sdiodev->freezer->freezing, 0);
  694. complete_all(&sdiodev->freezer->resumed);
  695. }
  696. bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
  697. {
  698. return atomic_read(&sdiodev->freezer->freezing);
  699. }
  700. void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
  701. {
  702. if (!brcmf_sdiod_freezing(sdiodev))
  703. return;
  704. sdiodev->freezer->frozen_count++;
  705. wake_up(&sdiodev->freezer->thread_freeze);
  706. wait_for_completion(&sdiodev->freezer->resumed);
  707. }
  708. void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
  709. {
  710. atomic_inc(&sdiodev->freezer->thread_count);
  711. }
  712. void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
  713. {
  714. atomic_dec(&sdiodev->freezer->thread_count);
  715. }
  716. #else
  717. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  718. {
  719. return 0;
  720. }
  721. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  722. {
  723. }
  724. #endif /* CONFIG_PM_SLEEP */
  725. static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
  726. {
  727. sdiodev->state = BRCMF_SDIOD_DOWN;
  728. if (sdiodev->bus) {
  729. brcmf_sdio_remove(sdiodev->bus);
  730. sdiodev->bus = NULL;
  731. }
  732. brcmf_sdiod_freezer_detach(sdiodev);
  733. /* Disable Function 2 */
  734. sdio_claim_host(sdiodev->func2);
  735. sdio_disable_func(sdiodev->func2);
  736. sdio_release_host(sdiodev->func2);
  737. /* Disable Function 1 */
  738. sdio_claim_host(sdiodev->func1);
  739. sdio_disable_func(sdiodev->func1);
  740. sdio_release_host(sdiodev->func1);
  741. sg_free_table(&sdiodev->sgtable);
  742. sdiodev->sbwad = 0;
  743. pm_runtime_allow(sdiodev->func1->card->host->parent);
  744. return 0;
  745. }
  746. static void brcmf_sdiod_host_fixup(struct mmc_host *host)
  747. {
  748. /* runtime-pm powers off the device */
  749. pm_runtime_forbid(host->parent);
  750. /* avoid removal detection upon resume */
  751. host->caps |= MMC_CAP_NONREMOVABLE;
  752. }
  753. static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
  754. {
  755. int ret = 0;
  756. sdio_claim_host(sdiodev->func1);
  757. ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
  758. if (ret) {
  759. brcmf_err("Failed to set F1 blocksize\n");
  760. sdio_release_host(sdiodev->func1);
  761. goto out;
  762. }
  763. ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
  764. if (ret) {
  765. brcmf_err("Failed to set F2 blocksize\n");
  766. sdio_release_host(sdiodev->func1);
  767. goto out;
  768. }
  769. /* increase F2 timeout */
  770. sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
  771. /* Enable Function 1 */
  772. ret = sdio_enable_func(sdiodev->func1);
  773. sdio_release_host(sdiodev->func1);
  774. if (ret) {
  775. brcmf_err("Failed to enable F1: err=%d\n", ret);
  776. goto out;
  777. }
  778. ret = brcmf_sdiod_freezer_attach(sdiodev);
  779. if (ret)
  780. goto out;
  781. /* try to attach to the target device */
  782. sdiodev->bus = brcmf_sdio_probe(sdiodev);
  783. if (!sdiodev->bus) {
  784. ret = -ENODEV;
  785. goto out;
  786. }
  787. brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
  788. out:
  789. if (ret)
  790. brcmf_sdiod_remove(sdiodev);
  791. return ret;
  792. }
  793. #define BRCMF_SDIO_DEVICE(dev_id) \
  794. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
  795. /* devices we support, null terminated */
  796. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  797. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
  798. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
  799. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
  800. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
  801. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
  802. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
  803. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
  804. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
  805. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
  806. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
  807. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
  808. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
  809. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
  810. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
  811. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
  812. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
  813. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
  814. { /* end: all zeroes */ }
  815. };
  816. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  817. static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
  818. int val)
  819. {
  820. #if IS_ENABLED(CONFIG_ACPI)
  821. struct acpi_device *adev;
  822. adev = ACPI_COMPANION(dev);
  823. if (adev)
  824. adev->flags.power_manageable = 0;
  825. #endif
  826. }
  827. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  828. const struct sdio_device_id *id)
  829. {
  830. int err;
  831. struct brcmf_sdio_dev *sdiodev;
  832. struct brcmf_bus *bus_if;
  833. struct device *dev;
  834. brcmf_dbg(SDIO, "Enter\n");
  835. brcmf_dbg(SDIO, "Class=%x\n", func->class);
  836. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  837. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  838. brcmf_dbg(SDIO, "Function#: %d\n", func->num);
  839. dev = &func->dev;
  840. /* Set MMC_QUIRK_LENIENT_FN0 for this card */
  841. func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
  842. /* prohibit ACPI power management for this device */
  843. brcmf_sdiod_acpi_set_power_manageable(dev, 0);
  844. /* Consume func num 1 but dont do anything with it. */
  845. if (func->num == 1)
  846. return 0;
  847. /* Ignore anything but func 2 */
  848. if (func->num != 2)
  849. return -ENODEV;
  850. bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
  851. if (!bus_if)
  852. return -ENOMEM;
  853. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  854. if (!sdiodev) {
  855. kfree(bus_if);
  856. return -ENOMEM;
  857. }
  858. /* store refs to functions used. mmc_card does
  859. * not hold the F0 function pointer.
  860. */
  861. sdiodev->func1 = func->card->sdio_func[0];
  862. sdiodev->func2 = func;
  863. sdiodev->bus_if = bus_if;
  864. bus_if->bus_priv.sdio = sdiodev;
  865. bus_if->proto_type = BRCMF_PROTO_BCDC;
  866. dev_set_drvdata(&func->dev, bus_if);
  867. dev_set_drvdata(&sdiodev->func1->dev, bus_if);
  868. sdiodev->dev = &sdiodev->func1->dev;
  869. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
  870. brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
  871. err = brcmf_sdiod_probe(sdiodev);
  872. if (err) {
  873. brcmf_err("F2 error, probe failed %d...\n", err);
  874. goto fail;
  875. }
  876. brcmf_dbg(SDIO, "F2 init completed...\n");
  877. return 0;
  878. fail:
  879. dev_set_drvdata(&func->dev, NULL);
  880. dev_set_drvdata(&sdiodev->func1->dev, NULL);
  881. kfree(sdiodev);
  882. kfree(bus_if);
  883. return err;
  884. }
  885. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  886. {
  887. struct brcmf_bus *bus_if;
  888. struct brcmf_sdio_dev *sdiodev;
  889. brcmf_dbg(SDIO, "Enter\n");
  890. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  891. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  892. brcmf_dbg(SDIO, "Function: %d\n", func->num);
  893. bus_if = dev_get_drvdata(&func->dev);
  894. if (bus_if) {
  895. sdiodev = bus_if->bus_priv.sdio;
  896. /* start by unregistering irqs */
  897. brcmf_sdiod_intr_unregister(sdiodev);
  898. if (func->num != 1)
  899. return;
  900. /* only proceed with rest of cleanup if func 1 */
  901. brcmf_sdiod_remove(sdiodev);
  902. dev_set_drvdata(&sdiodev->func1->dev, NULL);
  903. dev_set_drvdata(&sdiodev->func2->dev, NULL);
  904. kfree(bus_if);
  905. kfree(sdiodev);
  906. }
  907. brcmf_dbg(SDIO, "Exit\n");
  908. }
  909. void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
  910. {
  911. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  912. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  913. brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
  914. sdiodev->wowl_enabled = enabled;
  915. }
  916. #ifdef CONFIG_PM_SLEEP
  917. static int brcmf_ops_sdio_suspend(struct device *dev)
  918. {
  919. struct sdio_func *func;
  920. struct brcmf_bus *bus_if;
  921. struct brcmf_sdio_dev *sdiodev;
  922. mmc_pm_flag_t sdio_flags;
  923. func = container_of(dev, struct sdio_func, dev);
  924. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  925. if (func->num != 1)
  926. return 0;
  927. bus_if = dev_get_drvdata(dev);
  928. sdiodev = bus_if->bus_priv.sdio;
  929. brcmf_sdiod_freezer_on(sdiodev);
  930. brcmf_sdio_wd_timer(sdiodev->bus, 0);
  931. sdio_flags = MMC_PM_KEEP_POWER;
  932. if (sdiodev->wowl_enabled) {
  933. if (sdiodev->settings->bus.sdio.oob_irq_supported)
  934. enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
  935. else
  936. sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
  937. }
  938. if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
  939. brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
  940. return 0;
  941. }
  942. static int brcmf_ops_sdio_resume(struct device *dev)
  943. {
  944. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  945. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  946. struct sdio_func *func = container_of(dev, struct sdio_func, dev);
  947. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  948. if (func->num != 2)
  949. return 0;
  950. brcmf_sdiod_freezer_off(sdiodev);
  951. return 0;
  952. }
  953. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  954. .suspend = brcmf_ops_sdio_suspend,
  955. .resume = brcmf_ops_sdio_resume,
  956. };
  957. #endif /* CONFIG_PM_SLEEP */
  958. static struct sdio_driver brcmf_sdmmc_driver = {
  959. .probe = brcmf_ops_sdio_probe,
  960. .remove = brcmf_ops_sdio_remove,
  961. .name = KBUILD_MODNAME,
  962. .id_table = brcmf_sdmmc_ids,
  963. .drv = {
  964. .owner = THIS_MODULE,
  965. #ifdef CONFIG_PM_SLEEP
  966. .pm = &brcmf_sdio_pm_ops,
  967. #endif /* CONFIG_PM_SLEEP */
  968. .coredump = brcmf_dev_coredump,
  969. },
  970. };
  971. void brcmf_sdio_register(void)
  972. {
  973. int ret;
  974. ret = sdio_register_driver(&brcmf_sdmmc_driver);
  975. if (ret)
  976. brcmf_err("sdio_register_driver failed: %d\n", ret);
  977. }
  978. void brcmf_sdio_exit(void)
  979. {
  980. brcmf_dbg(SDIO, "Enter\n");
  981. sdio_unregister_driver(&brcmf_sdmmc_driver);
  982. }