bcmsdh.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/mmc/sdio.h>
  25. #include <linux/mmc/core.h>
  26. #include <linux/mmc/sdio_func.h>
  27. #include <linux/mmc/card.h>
  28. #include <linux/mmc/host.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/suspend.h>
  31. #include <linux/errno.h>
  32. #include <linux/module.h>
  33. #include <linux/acpi.h>
  34. #include <net/cfg80211.h>
  35. #include <defs.h>
  36. #include <brcm_hw_ids.h>
  37. #include <brcmu_utils.h>
  38. #include <brcmu_wifi.h>
  39. #include <chipcommon.h>
  40. #include <soc.h>
  41. #include "chip.h"
  42. #include "bus.h"
  43. #include "debug.h"
  44. #include "sdio.h"
  45. #include "core.h"
  46. #include "common.h"
  47. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  48. #define DMA_ALIGN_MASK 0x03
  49. #define SDIO_FUNC1_BLOCKSIZE 64
  50. #define SDIO_FUNC2_BLOCKSIZE 512
  51. /* Maximum milliseconds to wait for F2 to come up */
  52. #define SDIO_WAIT_F2RDY 3000
  53. #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
  54. struct brcmf_sdiod_freezer {
  55. atomic_t freezing;
  56. atomic_t thread_count;
  57. u32 frozen_count;
  58. wait_queue_head_t thread_freeze;
  59. struct completion resumed;
  60. };
  61. static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
  62. {
  63. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  64. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  65. brcmf_dbg(INTR, "OOB intr triggered\n");
  66. /* out-of-band interrupt is level-triggered which won't
  67. * be cleared until dpc
  68. */
  69. if (sdiodev->irq_en) {
  70. disable_irq_nosync(irq);
  71. sdiodev->irq_en = false;
  72. }
  73. brcmf_sdio_isr(sdiodev->bus);
  74. return IRQ_HANDLED;
  75. }
  76. static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
  77. {
  78. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  79. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  80. brcmf_dbg(INTR, "IB intr triggered\n");
  81. brcmf_sdio_isr(sdiodev->bus);
  82. }
  83. /* dummy handler for SDIO function 2 interrupt */
  84. static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
  85. {
  86. }
  87. int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
  88. {
  89. struct brcmfmac_sdio_pd *pdata;
  90. int ret = 0;
  91. u8 data;
  92. u32 addr, gpiocontrol;
  93. unsigned long flags;
  94. pdata = &sdiodev->settings->bus.sdio;
  95. if (pdata->oob_irq_supported) {
  96. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  97. pdata->oob_irq_nr);
  98. ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
  99. pdata->oob_irq_flags, "brcmf_oob_intr",
  100. &sdiodev->func[1]->dev);
  101. if (ret != 0) {
  102. brcmf_err("request_irq failed %d\n", ret);
  103. return ret;
  104. }
  105. sdiodev->oob_irq_requested = true;
  106. spin_lock_init(&sdiodev->irq_en_lock);
  107. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  108. sdiodev->irq_en = true;
  109. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  110. ret = enable_irq_wake(pdata->oob_irq_nr);
  111. if (ret != 0) {
  112. brcmf_err("enable_irq_wake failed %d\n", ret);
  113. return ret;
  114. }
  115. sdiodev->irq_wake = true;
  116. sdio_claim_host(sdiodev->func[1]);
  117. if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
  118. /* assign GPIO to SDIO core */
  119. addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
  120. gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
  121. gpiocontrol |= 0x2;
  122. brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
  123. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
  124. &ret);
  125. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
  126. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
  127. }
  128. /* must configure SDIO_CCCR_IENx to enable irq */
  129. data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  130. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  131. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  132. /* redirect, configure and enable io for interrupt signal */
  133. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  134. if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  135. data |= SDIO_SEPINT_ACT_HI;
  136. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  137. sdio_release_host(sdiodev->func[1]);
  138. } else {
  139. brcmf_dbg(SDIO, "Entering\n");
  140. sdio_claim_host(sdiodev->func[1]);
  141. sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
  142. sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
  143. sdio_release_host(sdiodev->func[1]);
  144. sdiodev->sd_irq_requested = true;
  145. }
  146. return 0;
  147. }
  148. void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  149. {
  150. brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
  151. sdiodev->oob_irq_requested,
  152. sdiodev->sd_irq_requested);
  153. if (sdiodev->oob_irq_requested) {
  154. struct brcmfmac_sdio_pd *pdata;
  155. pdata = &sdiodev->settings->bus.sdio;
  156. sdio_claim_host(sdiodev->func[1]);
  157. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  158. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  159. sdio_release_host(sdiodev->func[1]);
  160. sdiodev->oob_irq_requested = false;
  161. if (sdiodev->irq_wake) {
  162. disable_irq_wake(pdata->oob_irq_nr);
  163. sdiodev->irq_wake = false;
  164. }
  165. free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
  166. sdiodev->irq_en = false;
  167. sdiodev->oob_irq_requested = false;
  168. }
  169. if (sdiodev->sd_irq_requested) {
  170. sdio_claim_host(sdiodev->func[1]);
  171. sdio_release_irq(sdiodev->func[2]);
  172. sdio_release_irq(sdiodev->func[1]);
  173. sdio_release_host(sdiodev->func[1]);
  174. sdiodev->sd_irq_requested = false;
  175. }
  176. }
  177. void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
  178. enum brcmf_sdiod_state state)
  179. {
  180. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
  181. state == sdiodev->state)
  182. return;
  183. brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
  184. switch (sdiodev->state) {
  185. case BRCMF_SDIOD_DATA:
  186. /* any other state means bus interface is down */
  187. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
  188. break;
  189. case BRCMF_SDIOD_DOWN:
  190. /* transition from DOWN to DATA means bus interface is up */
  191. if (state == BRCMF_SDIOD_DATA)
  192. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
  193. break;
  194. default:
  195. break;
  196. }
  197. sdiodev->state = state;
  198. }
  199. static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
  200. uint regaddr, u8 byte)
  201. {
  202. int err_ret;
  203. /*
  204. * Can only directly write to some F0 registers.
  205. * Handle CCCR_IENx and CCCR_ABORT command
  206. * as a special case.
  207. */
  208. if ((regaddr == SDIO_CCCR_ABORT) ||
  209. (regaddr == SDIO_CCCR_IENx))
  210. sdio_writeb(func, byte, regaddr, &err_ret);
  211. else
  212. sdio_f0_writeb(func, byte, regaddr, &err_ret);
  213. return err_ret;
  214. }
  215. static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
  216. u32 addr, u8 regsz, void *data, bool write)
  217. {
  218. struct sdio_func *func;
  219. int ret = -EINVAL;
  220. brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  221. write, fn, addr, regsz);
  222. /* only allow byte access on F0 */
  223. if (WARN_ON(regsz > 1 && !fn))
  224. return -EINVAL;
  225. func = sdiodev->func[fn];
  226. switch (regsz) {
  227. case sizeof(u8):
  228. if (write) {
  229. if (fn)
  230. sdio_writeb(func, *(u8 *)data, addr, &ret);
  231. else
  232. ret = brcmf_sdiod_f0_writeb(func, addr,
  233. *(u8 *)data);
  234. } else {
  235. if (fn)
  236. *(u8 *)data = sdio_readb(func, addr, &ret);
  237. else
  238. *(u8 *)data = sdio_f0_readb(func, addr, &ret);
  239. }
  240. break;
  241. case sizeof(u16):
  242. if (write)
  243. sdio_writew(func, *(u16 *)data, addr, &ret);
  244. else
  245. *(u16 *)data = sdio_readw(func, addr, &ret);
  246. break;
  247. case sizeof(u32):
  248. if (write)
  249. sdio_writel(func, *(u32 *)data, addr, &ret);
  250. else
  251. *(u32 *)data = sdio_readl(func, addr, &ret);
  252. break;
  253. default:
  254. brcmf_err("invalid size: %d\n", regsz);
  255. break;
  256. }
  257. if (ret)
  258. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  259. write ? "write" : "read", fn, addr, ret);
  260. return ret;
  261. }
  262. static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  263. u8 regsz, void *data, bool write)
  264. {
  265. u8 func;
  266. s32 retry = 0;
  267. int ret;
  268. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
  269. return -ENOMEDIUM;
  270. /*
  271. * figure out how to read the register based on address range
  272. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  273. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  274. * The rest: function 1 silicon backplane core registers
  275. */
  276. if ((addr & ~REG_F0_REG_MASK) == 0)
  277. func = SDIO_FUNC_0;
  278. else
  279. func = SDIO_FUNC_1;
  280. do {
  281. if (!write)
  282. memset(data, 0, regsz);
  283. /* for retry wait for 1 ms till bus get settled down */
  284. if (retry)
  285. usleep_range(1000, 2000);
  286. ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
  287. data, write);
  288. } while (ret != 0 && ret != -ENOMEDIUM &&
  289. retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  290. if (ret == -ENOMEDIUM)
  291. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  292. else if (ret != 0) {
  293. /*
  294. * SleepCSR register access can fail when
  295. * waking up the device so reduce this noise
  296. * in the logs.
  297. */
  298. if (addr != SBSDIO_FUNC1_SLEEPCSR)
  299. brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
  300. write ? "write" : "read", func, addr, ret);
  301. else
  302. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  303. write ? "write" : "read", func, addr, ret);
  304. }
  305. return ret;
  306. }
  307. static int
  308. brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  309. {
  310. int err = 0, i;
  311. u8 addr[3];
  312. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
  313. return -ENOMEDIUM;
  314. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  315. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  316. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  317. for (i = 0; i < 3; i++) {
  318. err = brcmf_sdiod_regrw_helper(sdiodev,
  319. SBSDIO_FUNC1_SBADDRLOW + i,
  320. sizeof(u8), &addr[i], true);
  321. if (err) {
  322. brcmf_err("failed at addr: 0x%0x\n",
  323. SBSDIO_FUNC1_SBADDRLOW + i);
  324. break;
  325. }
  326. }
  327. return err;
  328. }
  329. static int
  330. brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  331. {
  332. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  333. int err = 0;
  334. if (bar0 != sdiodev->sbwad) {
  335. err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
  336. if (err)
  337. return err;
  338. sdiodev->sbwad = bar0;
  339. }
  340. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  341. if (width == 4)
  342. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  343. return 0;
  344. }
  345. u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  346. {
  347. u8 data;
  348. int retval;
  349. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  350. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  351. false);
  352. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  353. if (ret)
  354. *ret = retval;
  355. return data;
  356. }
  357. u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  358. {
  359. u32 data = 0;
  360. int retval;
  361. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  362. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  363. if (retval)
  364. goto done;
  365. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  366. false);
  367. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  368. done:
  369. if (ret)
  370. *ret = retval;
  371. return data;
  372. }
  373. void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  374. u8 data, int *ret)
  375. {
  376. int retval;
  377. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  378. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  379. true);
  380. if (ret)
  381. *ret = retval;
  382. }
  383. void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  384. u32 data, int *ret)
  385. {
  386. int retval;
  387. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  388. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  389. if (retval)
  390. goto done;
  391. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  392. true);
  393. done:
  394. if (ret)
  395. *ret = retval;
  396. }
  397. static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  398. bool write, u32 addr, struct sk_buff *pkt)
  399. {
  400. unsigned int req_sz;
  401. int err;
  402. /* Single skb use the standard mmc interface */
  403. req_sz = pkt->len + 3;
  404. req_sz &= (uint)~3;
  405. if (write)
  406. err = sdio_memcpy_toio(sdiodev->func[fn], addr,
  407. ((u8 *)(pkt->data)), req_sz);
  408. else if (fn == 1)
  409. err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
  410. addr, req_sz);
  411. else
  412. /* function 2 read is FIFO operation */
  413. err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
  414. req_sz);
  415. if (err == -ENOMEDIUM)
  416. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  417. return err;
  418. }
  419. /**
  420. * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  421. * @sdiodev: brcmfmac sdio device
  422. * @fn: SDIO function number
  423. * @write: direction flag
  424. * @addr: dongle memory address as source/destination
  425. * @pkt: skb pointer
  426. *
  427. * This function takes the respbonsibility as the interface function to MMC
  428. * stack for block data access. It assumes that the skb passed down by the
  429. * caller has already been padded and aligned.
  430. */
  431. static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  432. bool write, u32 addr,
  433. struct sk_buff_head *pktlist)
  434. {
  435. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  436. unsigned int max_req_sz, orig_offset, dst_offset;
  437. unsigned short max_seg_cnt, seg_sz;
  438. unsigned char *pkt_data, *orig_data, *dst_data;
  439. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  440. struct sk_buff_head local_list, *target_list;
  441. struct mmc_request mmc_req;
  442. struct mmc_command mmc_cmd;
  443. struct mmc_data mmc_dat;
  444. struct scatterlist *sgl;
  445. int ret = 0;
  446. if (!pktlist->qlen)
  447. return -EINVAL;
  448. target_list = pktlist;
  449. /* for host with broken sg support, prepare a page aligned list */
  450. __skb_queue_head_init(&local_list);
  451. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  452. req_sz = 0;
  453. skb_queue_walk(pktlist, pkt_next)
  454. req_sz += pkt_next->len;
  455. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  456. while (req_sz > PAGE_SIZE) {
  457. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  458. if (pkt_next == NULL) {
  459. ret = -ENOMEM;
  460. goto exit;
  461. }
  462. __skb_queue_tail(&local_list, pkt_next);
  463. req_sz -= PAGE_SIZE;
  464. }
  465. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  466. if (pkt_next == NULL) {
  467. ret = -ENOMEM;
  468. goto exit;
  469. }
  470. __skb_queue_tail(&local_list, pkt_next);
  471. target_list = &local_list;
  472. }
  473. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  474. max_req_sz = sdiodev->max_request_size;
  475. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  476. target_list->qlen);
  477. seg_sz = target_list->qlen;
  478. pkt_offset = 0;
  479. pkt_next = target_list->next;
  480. memset(&mmc_req, 0, sizeof(struct mmc_request));
  481. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  482. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  483. mmc_dat.sg = sdiodev->sgtable.sgl;
  484. mmc_dat.blksz = func_blk_sz;
  485. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  486. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  487. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  488. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  489. mmc_cmd.arg |= 1<<27; /* block mode */
  490. /* for function 1 the addr will be incremented */
  491. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  492. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  493. mmc_req.cmd = &mmc_cmd;
  494. mmc_req.data = &mmc_dat;
  495. while (seg_sz) {
  496. req_sz = 0;
  497. sg_cnt = 0;
  498. sgl = sdiodev->sgtable.sgl;
  499. /* prep sg table */
  500. while (pkt_next != (struct sk_buff *)target_list) {
  501. pkt_data = pkt_next->data + pkt_offset;
  502. sg_data_sz = pkt_next->len - pkt_offset;
  503. if (sg_data_sz > sdiodev->max_segment_size)
  504. sg_data_sz = sdiodev->max_segment_size;
  505. if (sg_data_sz > max_req_sz - req_sz)
  506. sg_data_sz = max_req_sz - req_sz;
  507. sg_set_buf(sgl, pkt_data, sg_data_sz);
  508. sg_cnt++;
  509. sgl = sg_next(sgl);
  510. req_sz += sg_data_sz;
  511. pkt_offset += sg_data_sz;
  512. if (pkt_offset == pkt_next->len) {
  513. pkt_offset = 0;
  514. pkt_next = pkt_next->next;
  515. }
  516. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  517. break;
  518. }
  519. seg_sz -= sg_cnt;
  520. if (req_sz % func_blk_sz != 0) {
  521. brcmf_err("sg request length %u is not %u aligned\n",
  522. req_sz, func_blk_sz);
  523. ret = -ENOTBLK;
  524. goto exit;
  525. }
  526. mmc_dat.sg_len = sg_cnt;
  527. mmc_dat.blocks = req_sz / func_blk_sz;
  528. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  529. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  530. /* incrementing addr for function 1 */
  531. if (fn == 1)
  532. addr += req_sz;
  533. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  534. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  535. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  536. if (ret == -ENOMEDIUM) {
  537. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  538. break;
  539. } else if (ret != 0) {
  540. brcmf_err("CMD53 sg block %s failed %d\n",
  541. write ? "write" : "read", ret);
  542. ret = -EIO;
  543. break;
  544. }
  545. }
  546. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  547. local_pkt_next = local_list.next;
  548. orig_offset = 0;
  549. skb_queue_walk(pktlist, pkt_next) {
  550. dst_offset = 0;
  551. do {
  552. req_sz = local_pkt_next->len - orig_offset;
  553. req_sz = min_t(uint, pkt_next->len - dst_offset,
  554. req_sz);
  555. orig_data = local_pkt_next->data + orig_offset;
  556. dst_data = pkt_next->data + dst_offset;
  557. memcpy(dst_data, orig_data, req_sz);
  558. orig_offset += req_sz;
  559. dst_offset += req_sz;
  560. if (orig_offset == local_pkt_next->len) {
  561. orig_offset = 0;
  562. local_pkt_next = local_pkt_next->next;
  563. }
  564. if (dst_offset == pkt_next->len)
  565. break;
  566. } while (!skb_queue_empty(&local_list));
  567. }
  568. }
  569. exit:
  570. sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
  571. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  572. brcmu_pkt_buf_free_skb(pkt_next);
  573. return ret;
  574. }
  575. int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  576. {
  577. struct sk_buff *mypkt;
  578. int err;
  579. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  580. if (!mypkt) {
  581. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  582. nbytes);
  583. return -EIO;
  584. }
  585. err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
  586. if (!err)
  587. memcpy(buf, mypkt->data, nbytes);
  588. brcmu_pkt_buf_free_skb(mypkt);
  589. return err;
  590. }
  591. int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
  592. {
  593. u32 addr = sdiodev->sbwad;
  594. int err = 0;
  595. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
  596. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  597. if (err)
  598. goto done;
  599. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
  600. done:
  601. return err;
  602. }
  603. int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
  604. struct sk_buff_head *pktq, uint totlen)
  605. {
  606. struct sk_buff *glom_skb;
  607. struct sk_buff *skb;
  608. u32 addr = sdiodev->sbwad;
  609. int err = 0;
  610. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
  611. addr, pktq->qlen);
  612. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  613. if (err)
  614. goto done;
  615. if (pktq->qlen == 1)
  616. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  617. pktq->next);
  618. else if (!sdiodev->sg_support) {
  619. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  620. if (!glom_skb)
  621. return -ENOMEM;
  622. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  623. glom_skb);
  624. if (err) {
  625. brcmu_pkt_buf_free_skb(glom_skb);
  626. goto done;
  627. }
  628. skb_queue_walk(pktq, skb) {
  629. memcpy(skb->data, glom_skb->data, skb->len);
  630. skb_pull(glom_skb, skb->len);
  631. }
  632. } else
  633. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
  634. pktq);
  635. done:
  636. return err;
  637. }
  638. int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  639. {
  640. struct sk_buff *mypkt;
  641. u32 addr = sdiodev->sbwad;
  642. int err;
  643. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  644. if (!mypkt) {
  645. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  646. nbytes);
  647. return -EIO;
  648. }
  649. memcpy(mypkt->data, buf, nbytes);
  650. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  651. if (!err)
  652. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
  653. mypkt);
  654. brcmu_pkt_buf_free_skb(mypkt);
  655. return err;
  656. }
  657. int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
  658. struct sk_buff_head *pktq)
  659. {
  660. struct sk_buff *skb;
  661. u32 addr = sdiodev->sbwad;
  662. int err;
  663. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
  664. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  665. if (err)
  666. return err;
  667. if (pktq->qlen == 1 || !sdiodev->sg_support)
  668. skb_queue_walk(pktq, skb) {
  669. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
  670. addr, skb);
  671. if (err)
  672. break;
  673. }
  674. else
  675. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
  676. pktq);
  677. return err;
  678. }
  679. int
  680. brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  681. u8 *data, uint size)
  682. {
  683. int bcmerror = 0;
  684. struct sk_buff *pkt;
  685. u32 sdaddr;
  686. uint dsize;
  687. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  688. pkt = dev_alloc_skb(dsize);
  689. if (!pkt) {
  690. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  691. return -EIO;
  692. }
  693. pkt->priority = 0;
  694. /* Determine initial transfer parameters */
  695. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  696. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  697. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  698. else
  699. dsize = size;
  700. sdio_claim_host(sdiodev->func[1]);
  701. /* Do the transfer(s) */
  702. while (size) {
  703. /* Set the backplane window to include the start address */
  704. bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
  705. if (bcmerror)
  706. break;
  707. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  708. write ? "write" : "read", dsize,
  709. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  710. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  711. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  712. skb_put(pkt, dsize);
  713. if (write)
  714. memcpy(pkt->data, data, dsize);
  715. bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
  716. sdaddr, pkt);
  717. if (bcmerror) {
  718. brcmf_err("membytes transfer failed\n");
  719. break;
  720. }
  721. if (!write)
  722. memcpy(data, pkt->data, dsize);
  723. skb_trim(pkt, 0);
  724. /* Adjust for next transfer (if any) */
  725. size -= dsize;
  726. if (size) {
  727. data += dsize;
  728. address += dsize;
  729. sdaddr = 0;
  730. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  731. }
  732. }
  733. dev_kfree_skb(pkt);
  734. /* Return the window to backplane enumeration space for core access */
  735. if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  736. brcmf_err("FAILED to set window back to 0x%x\n",
  737. sdiodev->sbwad);
  738. sdio_release_host(sdiodev->func[1]);
  739. return bcmerror;
  740. }
  741. int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  742. {
  743. char t_func = (char)fn;
  744. brcmf_dbg(SDIO, "Enter\n");
  745. /* issue abort cmd52 command through F0 */
  746. brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
  747. sizeof(t_func), &t_func, true);
  748. brcmf_dbg(SDIO, "Exit\n");
  749. return 0;
  750. }
  751. void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
  752. {
  753. struct sdio_func *func;
  754. struct mmc_host *host;
  755. uint max_blocks;
  756. uint nents;
  757. int err;
  758. func = sdiodev->func[2];
  759. host = func->card->host;
  760. sdiodev->sg_support = host->max_segs > 1;
  761. max_blocks = min_t(uint, host->max_blk_count, 511u);
  762. sdiodev->max_request_size = min_t(uint, host->max_req_size,
  763. max_blocks * func->cur_blksize);
  764. sdiodev->max_segment_count = min_t(uint, host->max_segs,
  765. SG_MAX_SINGLE_ALLOC);
  766. sdiodev->max_segment_size = host->max_seg_size;
  767. if (!sdiodev->sg_support)
  768. return;
  769. nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
  770. sdiodev->settings->bus.sdio.txglomsz);
  771. nents += (nents >> 4) + 1;
  772. WARN_ON(nents > sdiodev->max_segment_count);
  773. brcmf_dbg(TRACE, "nents=%d\n", nents);
  774. err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
  775. if (err < 0) {
  776. brcmf_err("allocation failed: disable scatter-gather");
  777. sdiodev->sg_support = false;
  778. }
  779. sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
  780. }
  781. #ifdef CONFIG_PM_SLEEP
  782. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  783. {
  784. sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
  785. if (!sdiodev->freezer)
  786. return -ENOMEM;
  787. atomic_set(&sdiodev->freezer->thread_count, 0);
  788. atomic_set(&sdiodev->freezer->freezing, 0);
  789. init_waitqueue_head(&sdiodev->freezer->thread_freeze);
  790. init_completion(&sdiodev->freezer->resumed);
  791. return 0;
  792. }
  793. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  794. {
  795. if (sdiodev->freezer) {
  796. WARN_ON(atomic_read(&sdiodev->freezer->freezing));
  797. kfree(sdiodev->freezer);
  798. }
  799. }
  800. static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
  801. {
  802. atomic_t *expect = &sdiodev->freezer->thread_count;
  803. int res = 0;
  804. sdiodev->freezer->frozen_count = 0;
  805. reinit_completion(&sdiodev->freezer->resumed);
  806. atomic_set(&sdiodev->freezer->freezing, 1);
  807. brcmf_sdio_trigger_dpc(sdiodev->bus);
  808. wait_event(sdiodev->freezer->thread_freeze,
  809. atomic_read(expect) == sdiodev->freezer->frozen_count);
  810. sdio_claim_host(sdiodev->func[1]);
  811. res = brcmf_sdio_sleep(sdiodev->bus, true);
  812. sdio_release_host(sdiodev->func[1]);
  813. return res;
  814. }
  815. static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
  816. {
  817. sdio_claim_host(sdiodev->func[1]);
  818. brcmf_sdio_sleep(sdiodev->bus, false);
  819. sdio_release_host(sdiodev->func[1]);
  820. atomic_set(&sdiodev->freezer->freezing, 0);
  821. complete_all(&sdiodev->freezer->resumed);
  822. }
  823. bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
  824. {
  825. return atomic_read(&sdiodev->freezer->freezing);
  826. }
  827. void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
  828. {
  829. if (!brcmf_sdiod_freezing(sdiodev))
  830. return;
  831. sdiodev->freezer->frozen_count++;
  832. wake_up(&sdiodev->freezer->thread_freeze);
  833. wait_for_completion(&sdiodev->freezer->resumed);
  834. }
  835. void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
  836. {
  837. atomic_inc(&sdiodev->freezer->thread_count);
  838. }
  839. void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
  840. {
  841. atomic_dec(&sdiodev->freezer->thread_count);
  842. }
  843. #else
  844. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  845. {
  846. return 0;
  847. }
  848. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  849. {
  850. }
  851. #endif /* CONFIG_PM_SLEEP */
  852. static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
  853. {
  854. sdiodev->state = BRCMF_SDIOD_DOWN;
  855. if (sdiodev->bus) {
  856. brcmf_sdio_remove(sdiodev->bus);
  857. sdiodev->bus = NULL;
  858. }
  859. brcmf_sdiod_freezer_detach(sdiodev);
  860. /* Disable Function 2 */
  861. sdio_claim_host(sdiodev->func[2]);
  862. sdio_disable_func(sdiodev->func[2]);
  863. sdio_release_host(sdiodev->func[2]);
  864. /* Disable Function 1 */
  865. sdio_claim_host(sdiodev->func[1]);
  866. sdio_disable_func(sdiodev->func[1]);
  867. sdio_release_host(sdiodev->func[1]);
  868. sg_free_table(&sdiodev->sgtable);
  869. sdiodev->sbwad = 0;
  870. pm_runtime_allow(sdiodev->func[1]->card->host->parent);
  871. return 0;
  872. }
  873. static void brcmf_sdiod_host_fixup(struct mmc_host *host)
  874. {
  875. /* runtime-pm powers off the device */
  876. pm_runtime_forbid(host->parent);
  877. /* avoid removal detection upon resume */
  878. host->caps |= MMC_CAP_NONREMOVABLE;
  879. }
  880. static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
  881. {
  882. int ret = 0;
  883. sdiodev->num_funcs = 2;
  884. sdio_claim_host(sdiodev->func[1]);
  885. ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
  886. if (ret) {
  887. brcmf_err("Failed to set F1 blocksize\n");
  888. sdio_release_host(sdiodev->func[1]);
  889. goto out;
  890. }
  891. ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
  892. if (ret) {
  893. brcmf_err("Failed to set F2 blocksize\n");
  894. sdio_release_host(sdiodev->func[1]);
  895. goto out;
  896. }
  897. /* increase F2 timeout */
  898. sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
  899. /* Enable Function 1 */
  900. ret = sdio_enable_func(sdiodev->func[1]);
  901. sdio_release_host(sdiodev->func[1]);
  902. if (ret) {
  903. brcmf_err("Failed to enable F1: err=%d\n", ret);
  904. goto out;
  905. }
  906. ret = brcmf_sdiod_freezer_attach(sdiodev);
  907. if (ret)
  908. goto out;
  909. /* try to attach to the target device */
  910. sdiodev->bus = brcmf_sdio_probe(sdiodev);
  911. if (!sdiodev->bus) {
  912. ret = -ENODEV;
  913. goto out;
  914. }
  915. brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
  916. out:
  917. if (ret)
  918. brcmf_sdiod_remove(sdiodev);
  919. return ret;
  920. }
  921. #define BRCMF_SDIO_DEVICE(dev_id) \
  922. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
  923. /* devices we support, null terminated */
  924. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  925. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
  926. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
  927. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
  928. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
  929. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
  930. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
  931. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
  932. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
  933. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
  934. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
  935. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
  936. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
  937. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
  938. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
  939. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
  940. { /* end: all zeroes */ }
  941. };
  942. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  943. static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
  944. int val)
  945. {
  946. #if IS_ENABLED(CONFIG_ACPI)
  947. struct acpi_device *adev;
  948. adev = ACPI_COMPANION(dev);
  949. if (adev)
  950. adev->flags.power_manageable = 0;
  951. #endif
  952. }
  953. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  954. const struct sdio_device_id *id)
  955. {
  956. int err;
  957. struct brcmf_sdio_dev *sdiodev;
  958. struct brcmf_bus *bus_if;
  959. struct device *dev;
  960. brcmf_dbg(SDIO, "Enter\n");
  961. brcmf_dbg(SDIO, "Class=%x\n", func->class);
  962. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  963. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  964. brcmf_dbg(SDIO, "Function#: %d\n", func->num);
  965. dev = &func->dev;
  966. /* prohibit ACPI power management for this device */
  967. brcmf_sdiod_acpi_set_power_manageable(dev, 0);
  968. /* Consume func num 1 but dont do anything with it. */
  969. if (func->num == 1)
  970. return 0;
  971. /* Ignore anything but func 2 */
  972. if (func->num != 2)
  973. return -ENODEV;
  974. bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
  975. if (!bus_if)
  976. return -ENOMEM;
  977. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  978. if (!sdiodev) {
  979. kfree(bus_if);
  980. return -ENOMEM;
  981. }
  982. /* store refs to functions used. mmc_card does
  983. * not hold the F0 function pointer.
  984. */
  985. sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
  986. sdiodev->func[0]->num = 0;
  987. sdiodev->func[1] = func->card->sdio_func[0];
  988. sdiodev->func[2] = func;
  989. sdiodev->bus_if = bus_if;
  990. bus_if->bus_priv.sdio = sdiodev;
  991. bus_if->proto_type = BRCMF_PROTO_BCDC;
  992. dev_set_drvdata(&func->dev, bus_if);
  993. dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
  994. sdiodev->dev = &sdiodev->func[1]->dev;
  995. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
  996. brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
  997. err = brcmf_sdiod_probe(sdiodev);
  998. if (err) {
  999. brcmf_err("F2 error, probe failed %d...\n", err);
  1000. goto fail;
  1001. }
  1002. brcmf_dbg(SDIO, "F2 init completed...\n");
  1003. return 0;
  1004. fail:
  1005. dev_set_drvdata(&func->dev, NULL);
  1006. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  1007. kfree(sdiodev->func[0]);
  1008. kfree(sdiodev);
  1009. kfree(bus_if);
  1010. return err;
  1011. }
  1012. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  1013. {
  1014. struct brcmf_bus *bus_if;
  1015. struct brcmf_sdio_dev *sdiodev;
  1016. brcmf_dbg(SDIO, "Enter\n");
  1017. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  1018. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  1019. brcmf_dbg(SDIO, "Function: %d\n", func->num);
  1020. bus_if = dev_get_drvdata(&func->dev);
  1021. if (bus_if) {
  1022. sdiodev = bus_if->bus_priv.sdio;
  1023. /* start by unregistering irqs */
  1024. brcmf_sdiod_intr_unregister(sdiodev);
  1025. if (func->num != 1)
  1026. return;
  1027. /* only proceed with rest of cleanup if func 1 */
  1028. brcmf_sdiod_remove(sdiodev);
  1029. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  1030. dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
  1031. kfree(bus_if);
  1032. kfree(sdiodev->func[0]);
  1033. kfree(sdiodev);
  1034. }
  1035. brcmf_dbg(SDIO, "Exit\n");
  1036. }
  1037. void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
  1038. {
  1039. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1040. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  1041. brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
  1042. sdiodev->wowl_enabled = enabled;
  1043. }
  1044. #ifdef CONFIG_PM_SLEEP
  1045. static int brcmf_ops_sdio_suspend(struct device *dev)
  1046. {
  1047. struct sdio_func *func;
  1048. struct brcmf_bus *bus_if;
  1049. struct brcmf_sdio_dev *sdiodev;
  1050. mmc_pm_flag_t sdio_flags;
  1051. func = container_of(dev, struct sdio_func, dev);
  1052. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  1053. if (func->num != SDIO_FUNC_1)
  1054. return 0;
  1055. bus_if = dev_get_drvdata(dev);
  1056. sdiodev = bus_if->bus_priv.sdio;
  1057. brcmf_sdiod_freezer_on(sdiodev);
  1058. brcmf_sdio_wd_timer(sdiodev->bus, 0);
  1059. sdio_flags = MMC_PM_KEEP_POWER;
  1060. if (sdiodev->wowl_enabled) {
  1061. if (sdiodev->settings->bus.sdio.oob_irq_supported)
  1062. enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
  1063. else
  1064. sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
  1065. }
  1066. if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
  1067. brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
  1068. return 0;
  1069. }
  1070. static int brcmf_ops_sdio_resume(struct device *dev)
  1071. {
  1072. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1073. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  1074. struct sdio_func *func = container_of(dev, struct sdio_func, dev);
  1075. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  1076. if (func->num != SDIO_FUNC_2)
  1077. return 0;
  1078. brcmf_sdiod_freezer_off(sdiodev);
  1079. return 0;
  1080. }
  1081. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  1082. .suspend = brcmf_ops_sdio_suspend,
  1083. .resume = brcmf_ops_sdio_resume,
  1084. };
  1085. #endif /* CONFIG_PM_SLEEP */
  1086. static struct sdio_driver brcmf_sdmmc_driver = {
  1087. .probe = brcmf_ops_sdio_probe,
  1088. .remove = brcmf_ops_sdio_remove,
  1089. .name = KBUILD_MODNAME,
  1090. .id_table = brcmf_sdmmc_ids,
  1091. .drv = {
  1092. .owner = THIS_MODULE,
  1093. #ifdef CONFIG_PM_SLEEP
  1094. .pm = &brcmf_sdio_pm_ops,
  1095. #endif /* CONFIG_PM_SLEEP */
  1096. },
  1097. };
  1098. void brcmf_sdio_register(void)
  1099. {
  1100. int ret;
  1101. ret = sdio_register_driver(&brcmf_sdmmc_driver);
  1102. if (ret)
  1103. brcmf_err("sdio_register_driver failed: %d\n", ret);
  1104. }
  1105. void brcmf_sdio_exit(void)
  1106. {
  1107. brcmf_dbg(SDIO, "Enter\n");
  1108. sdio_unregister_driver(&brcmf_sdmmc_driver);
  1109. }