bcmsdh.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/mmc/sdio.h>
  26. #include <linux/mmc/core.h>
  27. #include <linux/mmc/sdio_func.h>
  28. #include <linux/mmc/card.h>
  29. #include <linux/mmc/host.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/suspend.h>
  32. #include <linux/errno.h>
  33. #include <linux/module.h>
  34. #include <linux/acpi.h>
  35. #include <net/cfg80211.h>
  36. #include <defs.h>
  37. #include <brcm_hw_ids.h>
  38. #include <brcmu_utils.h>
  39. #include <brcmu_wifi.h>
  40. #include <chipcommon.h>
  41. #include <soc.h>
  42. #include "chip.h"
  43. #include "bus.h"
  44. #include "debug.h"
  45. #include "sdio.h"
  46. #include "core.h"
  47. #include "common.h"
  48. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  49. #define DMA_ALIGN_MASK 0x03
  50. #define SDIO_FUNC1_BLOCKSIZE 64
  51. #define SDIO_FUNC2_BLOCKSIZE 512
  52. /* Maximum milliseconds to wait for F2 to come up */
  53. #define SDIO_WAIT_F2RDY 3000
  54. #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
  55. struct brcmf_sdiod_freezer {
  56. atomic_t freezing;
  57. atomic_t thread_count;
  58. u32 frozen_count;
  59. wait_queue_head_t thread_freeze;
  60. struct completion resumed;
  61. };
  62. static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
  63. {
  64. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  65. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  66. brcmf_dbg(INTR, "OOB intr triggered\n");
  67. /* out-of-band interrupt is level-triggered which won't
  68. * be cleared until dpc
  69. */
  70. if (sdiodev->irq_en) {
  71. disable_irq_nosync(irq);
  72. sdiodev->irq_en = false;
  73. }
  74. brcmf_sdio_isr(sdiodev->bus);
  75. return IRQ_HANDLED;
  76. }
  77. static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
  78. {
  79. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  80. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  81. brcmf_dbg(INTR, "IB intr triggered\n");
  82. brcmf_sdio_isr(sdiodev->bus);
  83. }
  84. /* dummy handler for SDIO function 2 interrupt */
  85. static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
  86. {
  87. }
  88. int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
  89. {
  90. struct brcmfmac_sdio_pd *pdata;
  91. int ret = 0;
  92. u8 data;
  93. u32 addr, gpiocontrol;
  94. pdata = &sdiodev->settings->bus.sdio;
  95. if (pdata->oob_irq_supported) {
  96. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  97. pdata->oob_irq_nr);
  98. spin_lock_init(&sdiodev->irq_en_lock);
  99. sdiodev->irq_en = true;
  100. ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
  101. pdata->oob_irq_flags, "brcmf_oob_intr",
  102. &sdiodev->func[1]->dev);
  103. if (ret != 0) {
  104. brcmf_err("request_irq failed %d\n", ret);
  105. return ret;
  106. }
  107. sdiodev->oob_irq_requested = true;
  108. ret = enable_irq_wake(pdata->oob_irq_nr);
  109. if (ret != 0) {
  110. brcmf_err("enable_irq_wake failed %d\n", ret);
  111. return ret;
  112. }
  113. sdiodev->irq_wake = true;
  114. sdio_claim_host(sdiodev->func[1]);
  115. if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
  116. /* assign GPIO to SDIO core */
  117. addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
  118. gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
  119. gpiocontrol |= 0x2;
  120. brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
  121. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
  122. &ret);
  123. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
  124. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
  125. }
  126. /* must configure SDIO_CCCR_IENx to enable irq */
  127. data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  128. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  129. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  130. /* redirect, configure and enable io for interrupt signal */
  131. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  132. if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  133. data |= SDIO_SEPINT_ACT_HI;
  134. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  135. sdio_release_host(sdiodev->func[1]);
  136. } else {
  137. brcmf_dbg(SDIO, "Entering\n");
  138. sdio_claim_host(sdiodev->func[1]);
  139. sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
  140. sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
  141. sdio_release_host(sdiodev->func[1]);
  142. sdiodev->sd_irq_requested = true;
  143. }
  144. return 0;
  145. }
  146. void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  147. {
  148. brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
  149. sdiodev->oob_irq_requested,
  150. sdiodev->sd_irq_requested);
  151. if (sdiodev->oob_irq_requested) {
  152. struct brcmfmac_sdio_pd *pdata;
  153. pdata = &sdiodev->settings->bus.sdio;
  154. sdio_claim_host(sdiodev->func[1]);
  155. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  156. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  157. sdio_release_host(sdiodev->func[1]);
  158. sdiodev->oob_irq_requested = false;
  159. if (sdiodev->irq_wake) {
  160. disable_irq_wake(pdata->oob_irq_nr);
  161. sdiodev->irq_wake = false;
  162. }
  163. free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
  164. sdiodev->irq_en = false;
  165. sdiodev->oob_irq_requested = false;
  166. }
  167. if (sdiodev->sd_irq_requested) {
  168. sdio_claim_host(sdiodev->func[1]);
  169. sdio_release_irq(sdiodev->func[2]);
  170. sdio_release_irq(sdiodev->func[1]);
  171. sdio_release_host(sdiodev->func[1]);
  172. sdiodev->sd_irq_requested = false;
  173. }
  174. }
  175. void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
  176. enum brcmf_sdiod_state state)
  177. {
  178. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
  179. state == sdiodev->state)
  180. return;
  181. brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
  182. switch (sdiodev->state) {
  183. case BRCMF_SDIOD_DATA:
  184. /* any other state means bus interface is down */
  185. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
  186. break;
  187. case BRCMF_SDIOD_DOWN:
  188. /* transition from DOWN to DATA means bus interface is up */
  189. if (state == BRCMF_SDIOD_DATA)
  190. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
  191. break;
  192. default:
  193. break;
  194. }
  195. sdiodev->state = state;
  196. }
  197. static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
  198. uint regaddr, u8 byte)
  199. {
  200. int err_ret;
  201. /*
  202. * Can only directly write to some F0 registers.
  203. * Handle CCCR_IENx and CCCR_ABORT command
  204. * as a special case.
  205. */
  206. if ((regaddr == SDIO_CCCR_ABORT) ||
  207. (regaddr == SDIO_CCCR_IENx))
  208. sdio_writeb(func, byte, regaddr, &err_ret);
  209. else
  210. sdio_f0_writeb(func, byte, regaddr, &err_ret);
  211. return err_ret;
  212. }
  213. static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
  214. u32 addr, u8 regsz, void *data, bool write)
  215. {
  216. struct sdio_func *func;
  217. int ret = -EINVAL;
  218. brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  219. write, fn, addr, regsz);
  220. /* only allow byte access on F0 */
  221. if (WARN_ON(regsz > 1 && !fn))
  222. return -EINVAL;
  223. func = sdiodev->func[fn];
  224. switch (regsz) {
  225. case sizeof(u8):
  226. if (write) {
  227. if (fn)
  228. sdio_writeb(func, *(u8 *)data, addr, &ret);
  229. else
  230. ret = brcmf_sdiod_f0_writeb(func, addr,
  231. *(u8 *)data);
  232. } else {
  233. if (fn)
  234. *(u8 *)data = sdio_readb(func, addr, &ret);
  235. else
  236. *(u8 *)data = sdio_f0_readb(func, addr, &ret);
  237. }
  238. break;
  239. case sizeof(u16):
  240. if (write)
  241. sdio_writew(func, *(u16 *)data, addr, &ret);
  242. else
  243. *(u16 *)data = sdio_readw(func, addr, &ret);
  244. break;
  245. case sizeof(u32):
  246. if (write)
  247. sdio_writel(func, *(u32 *)data, addr, &ret);
  248. else
  249. *(u32 *)data = sdio_readl(func, addr, &ret);
  250. break;
  251. default:
  252. brcmf_err("invalid size: %d\n", regsz);
  253. break;
  254. }
  255. if (ret)
  256. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  257. write ? "write" : "read", fn, addr, ret);
  258. return ret;
  259. }
  260. static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  261. u8 regsz, void *data, bool write)
  262. {
  263. u8 func;
  264. s32 retry = 0;
  265. int ret;
  266. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
  267. return -ENOMEDIUM;
  268. /*
  269. * figure out how to read the register based on address range
  270. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  271. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  272. * The rest: function 1 silicon backplane core registers
  273. */
  274. if ((addr & ~REG_F0_REG_MASK) == 0)
  275. func = SDIO_FUNC_0;
  276. else
  277. func = SDIO_FUNC_1;
  278. do {
  279. if (!write)
  280. memset(data, 0, regsz);
  281. /* for retry wait for 1 ms till bus get settled down */
  282. if (retry)
  283. usleep_range(1000, 2000);
  284. ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
  285. data, write);
  286. } while (ret != 0 && ret != -ENOMEDIUM &&
  287. retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  288. if (ret == -ENOMEDIUM)
  289. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  290. else if (ret != 0) {
  291. /*
  292. * SleepCSR register access can fail when
  293. * waking up the device so reduce this noise
  294. * in the logs.
  295. */
  296. if (addr != SBSDIO_FUNC1_SLEEPCSR)
  297. brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
  298. write ? "write" : "read", func, addr, ret);
  299. else
  300. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  301. write ? "write" : "read", func, addr, ret);
  302. }
  303. return ret;
  304. }
  305. static int
  306. brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  307. {
  308. int err = 0, i;
  309. u8 addr[3];
  310. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
  311. return -ENOMEDIUM;
  312. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  313. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  314. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  315. for (i = 0; i < 3; i++) {
  316. err = brcmf_sdiod_regrw_helper(sdiodev,
  317. SBSDIO_FUNC1_SBADDRLOW + i,
  318. sizeof(u8), &addr[i], true);
  319. if (err) {
  320. brcmf_err("failed at addr: 0x%0x\n",
  321. SBSDIO_FUNC1_SBADDRLOW + i);
  322. break;
  323. }
  324. }
  325. return err;
  326. }
  327. static int
  328. brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  329. {
  330. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  331. int err = 0;
  332. if (bar0 != sdiodev->sbwad) {
  333. err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
  334. if (err)
  335. return err;
  336. sdiodev->sbwad = bar0;
  337. }
  338. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  339. if (width == 4)
  340. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  341. return 0;
  342. }
  343. u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  344. {
  345. u8 data;
  346. int retval;
  347. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  348. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  349. false);
  350. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  351. if (ret)
  352. *ret = retval;
  353. return data;
  354. }
  355. u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  356. {
  357. u32 data = 0;
  358. int retval;
  359. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  360. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  361. if (retval)
  362. goto done;
  363. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  364. false);
  365. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  366. done:
  367. if (ret)
  368. *ret = retval;
  369. return data;
  370. }
  371. void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  372. u8 data, int *ret)
  373. {
  374. int retval;
  375. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  376. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  377. true);
  378. if (ret)
  379. *ret = retval;
  380. }
  381. void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  382. u32 data, int *ret)
  383. {
  384. int retval;
  385. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  386. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  387. if (retval)
  388. goto done;
  389. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  390. true);
  391. done:
  392. if (ret)
  393. *ret = retval;
  394. }
  395. static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  396. bool write, u32 addr, struct sk_buff *pkt)
  397. {
  398. unsigned int req_sz;
  399. int err;
  400. /* Single skb use the standard mmc interface */
  401. req_sz = pkt->len + 3;
  402. req_sz &= (uint)~3;
  403. if (write)
  404. err = sdio_memcpy_toio(sdiodev->func[fn], addr,
  405. ((u8 *)(pkt->data)), req_sz);
  406. else if (fn == 1)
  407. err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
  408. addr, req_sz);
  409. else
  410. /* function 2 read is FIFO operation */
  411. err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
  412. req_sz);
  413. if (err == -ENOMEDIUM)
  414. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  415. return err;
  416. }
  417. /**
  418. * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  419. * @sdiodev: brcmfmac sdio device
  420. * @fn: SDIO function number
  421. * @write: direction flag
  422. * @addr: dongle memory address as source/destination
  423. * @pkt: skb pointer
  424. *
  425. * This function takes the respbonsibility as the interface function to MMC
  426. * stack for block data access. It assumes that the skb passed down by the
  427. * caller has already been padded and aligned.
  428. */
  429. static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  430. bool write, u32 addr,
  431. struct sk_buff_head *pktlist)
  432. {
  433. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  434. unsigned int max_req_sz, orig_offset, dst_offset;
  435. unsigned short max_seg_cnt, seg_sz;
  436. unsigned char *pkt_data, *orig_data, *dst_data;
  437. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  438. struct sk_buff_head local_list, *target_list;
  439. struct mmc_request mmc_req;
  440. struct mmc_command mmc_cmd;
  441. struct mmc_data mmc_dat;
  442. struct scatterlist *sgl;
  443. int ret = 0;
  444. if (!pktlist->qlen)
  445. return -EINVAL;
  446. target_list = pktlist;
  447. /* for host with broken sg support, prepare a page aligned list */
  448. __skb_queue_head_init(&local_list);
  449. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  450. req_sz = 0;
  451. skb_queue_walk(pktlist, pkt_next)
  452. req_sz += pkt_next->len;
  453. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  454. while (req_sz > PAGE_SIZE) {
  455. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  456. if (pkt_next == NULL) {
  457. ret = -ENOMEM;
  458. goto exit;
  459. }
  460. __skb_queue_tail(&local_list, pkt_next);
  461. req_sz -= PAGE_SIZE;
  462. }
  463. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  464. if (pkt_next == NULL) {
  465. ret = -ENOMEM;
  466. goto exit;
  467. }
  468. __skb_queue_tail(&local_list, pkt_next);
  469. target_list = &local_list;
  470. }
  471. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  472. max_req_sz = sdiodev->max_request_size;
  473. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  474. target_list->qlen);
  475. seg_sz = target_list->qlen;
  476. pkt_offset = 0;
  477. pkt_next = target_list->next;
  478. memset(&mmc_req, 0, sizeof(struct mmc_request));
  479. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  480. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  481. mmc_dat.sg = sdiodev->sgtable.sgl;
  482. mmc_dat.blksz = func_blk_sz;
  483. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  484. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  485. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  486. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  487. mmc_cmd.arg |= 1<<27; /* block mode */
  488. /* for function 1 the addr will be incremented */
  489. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  490. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  491. mmc_req.cmd = &mmc_cmd;
  492. mmc_req.data = &mmc_dat;
  493. while (seg_sz) {
  494. req_sz = 0;
  495. sg_cnt = 0;
  496. sgl = sdiodev->sgtable.sgl;
  497. /* prep sg table */
  498. while (pkt_next != (struct sk_buff *)target_list) {
  499. pkt_data = pkt_next->data + pkt_offset;
  500. sg_data_sz = pkt_next->len - pkt_offset;
  501. if (sg_data_sz > sdiodev->max_segment_size)
  502. sg_data_sz = sdiodev->max_segment_size;
  503. if (sg_data_sz > max_req_sz - req_sz)
  504. sg_data_sz = max_req_sz - req_sz;
  505. sg_set_buf(sgl, pkt_data, sg_data_sz);
  506. sg_cnt++;
  507. sgl = sg_next(sgl);
  508. req_sz += sg_data_sz;
  509. pkt_offset += sg_data_sz;
  510. if (pkt_offset == pkt_next->len) {
  511. pkt_offset = 0;
  512. pkt_next = pkt_next->next;
  513. }
  514. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  515. break;
  516. }
  517. seg_sz -= sg_cnt;
  518. if (req_sz % func_blk_sz != 0) {
  519. brcmf_err("sg request length %u is not %u aligned\n",
  520. req_sz, func_blk_sz);
  521. ret = -ENOTBLK;
  522. goto exit;
  523. }
  524. mmc_dat.sg_len = sg_cnt;
  525. mmc_dat.blocks = req_sz / func_blk_sz;
  526. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  527. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  528. /* incrementing addr for function 1 */
  529. if (fn == 1)
  530. addr += req_sz;
  531. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  532. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  533. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  534. if (ret == -ENOMEDIUM) {
  535. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  536. break;
  537. } else if (ret != 0) {
  538. brcmf_err("CMD53 sg block %s failed %d\n",
  539. write ? "write" : "read", ret);
  540. ret = -EIO;
  541. break;
  542. }
  543. }
  544. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  545. local_pkt_next = local_list.next;
  546. orig_offset = 0;
  547. skb_queue_walk(pktlist, pkt_next) {
  548. dst_offset = 0;
  549. do {
  550. req_sz = local_pkt_next->len - orig_offset;
  551. req_sz = min_t(uint, pkt_next->len - dst_offset,
  552. req_sz);
  553. orig_data = local_pkt_next->data + orig_offset;
  554. dst_data = pkt_next->data + dst_offset;
  555. memcpy(dst_data, orig_data, req_sz);
  556. orig_offset += req_sz;
  557. dst_offset += req_sz;
  558. if (orig_offset == local_pkt_next->len) {
  559. orig_offset = 0;
  560. local_pkt_next = local_pkt_next->next;
  561. }
  562. if (dst_offset == pkt_next->len)
  563. break;
  564. } while (!skb_queue_empty(&local_list));
  565. }
  566. }
  567. exit:
  568. sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
  569. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  570. brcmu_pkt_buf_free_skb(pkt_next);
  571. return ret;
  572. }
  573. int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  574. {
  575. struct sk_buff *mypkt;
  576. int err;
  577. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  578. if (!mypkt) {
  579. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  580. nbytes);
  581. return -EIO;
  582. }
  583. err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
  584. if (!err)
  585. memcpy(buf, mypkt->data, nbytes);
  586. brcmu_pkt_buf_free_skb(mypkt);
  587. return err;
  588. }
  589. int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
  590. {
  591. u32 addr = sdiodev->sbwad;
  592. int err = 0;
  593. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
  594. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  595. if (err)
  596. goto done;
  597. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
  598. done:
  599. return err;
  600. }
  601. int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
  602. struct sk_buff_head *pktq, uint totlen)
  603. {
  604. struct sk_buff *glom_skb = NULL;
  605. struct sk_buff *skb;
  606. u32 addr = sdiodev->sbwad;
  607. int err = 0;
  608. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
  609. addr, pktq->qlen);
  610. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  611. if (err)
  612. goto done;
  613. if (pktq->qlen == 1)
  614. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  615. pktq->next);
  616. else if (!sdiodev->sg_support) {
  617. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  618. if (!glom_skb)
  619. return -ENOMEM;
  620. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  621. glom_skb);
  622. if (err)
  623. goto done;
  624. skb_queue_walk(pktq, skb) {
  625. memcpy(skb->data, glom_skb->data, skb->len);
  626. skb_pull(glom_skb, skb->len);
  627. }
  628. } else
  629. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
  630. pktq);
  631. done:
  632. brcmu_pkt_buf_free_skb(glom_skb);
  633. return err;
  634. }
  635. int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  636. {
  637. struct sk_buff *mypkt;
  638. u32 addr = sdiodev->sbwad;
  639. int err;
  640. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  641. if (!mypkt) {
  642. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  643. nbytes);
  644. return -EIO;
  645. }
  646. memcpy(mypkt->data, buf, nbytes);
  647. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  648. if (!err)
  649. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
  650. mypkt);
  651. brcmu_pkt_buf_free_skb(mypkt);
  652. return err;
  653. }
  654. int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
  655. struct sk_buff_head *pktq)
  656. {
  657. struct sk_buff *skb;
  658. u32 addr = sdiodev->sbwad;
  659. int err;
  660. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
  661. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  662. if (err)
  663. return err;
  664. if (pktq->qlen == 1 || !sdiodev->sg_support)
  665. skb_queue_walk(pktq, skb) {
  666. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
  667. addr, skb);
  668. if (err)
  669. break;
  670. }
  671. else
  672. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
  673. pktq);
  674. return err;
  675. }
  676. int
  677. brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  678. u8 *data, uint size)
  679. {
  680. int bcmerror = 0;
  681. struct sk_buff *pkt;
  682. u32 sdaddr;
  683. uint dsize;
  684. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  685. pkt = dev_alloc_skb(dsize);
  686. if (!pkt) {
  687. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  688. return -EIO;
  689. }
  690. pkt->priority = 0;
  691. /* Determine initial transfer parameters */
  692. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  693. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  694. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  695. else
  696. dsize = size;
  697. sdio_claim_host(sdiodev->func[1]);
  698. /* Do the transfer(s) */
  699. while (size) {
  700. /* Set the backplane window to include the start address */
  701. bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
  702. if (bcmerror)
  703. break;
  704. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  705. write ? "write" : "read", dsize,
  706. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  707. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  708. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  709. skb_put(pkt, dsize);
  710. if (write)
  711. memcpy(pkt->data, data, dsize);
  712. bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
  713. sdaddr, pkt);
  714. if (bcmerror) {
  715. brcmf_err("membytes transfer failed\n");
  716. break;
  717. }
  718. if (!write)
  719. memcpy(data, pkt->data, dsize);
  720. skb_trim(pkt, 0);
  721. /* Adjust for next transfer (if any) */
  722. size -= dsize;
  723. if (size) {
  724. data += dsize;
  725. address += dsize;
  726. sdaddr = 0;
  727. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  728. }
  729. }
  730. dev_kfree_skb(pkt);
  731. /* Return the window to backplane enumeration space for core access */
  732. if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  733. brcmf_err("FAILED to set window back to 0x%x\n",
  734. sdiodev->sbwad);
  735. sdio_release_host(sdiodev->func[1]);
  736. return bcmerror;
  737. }
  738. int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  739. {
  740. char t_func = (char)fn;
  741. brcmf_dbg(SDIO, "Enter\n");
  742. /* issue abort cmd52 command through F0 */
  743. brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
  744. sizeof(t_func), &t_func, true);
  745. brcmf_dbg(SDIO, "Exit\n");
  746. return 0;
  747. }
  748. void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
  749. {
  750. struct sdio_func *func;
  751. struct mmc_host *host;
  752. uint max_blocks;
  753. uint nents;
  754. int err;
  755. func = sdiodev->func[2];
  756. host = func->card->host;
  757. sdiodev->sg_support = host->max_segs > 1;
  758. max_blocks = min_t(uint, host->max_blk_count, 511u);
  759. sdiodev->max_request_size = min_t(uint, host->max_req_size,
  760. max_blocks * func->cur_blksize);
  761. sdiodev->max_segment_count = min_t(uint, host->max_segs,
  762. SG_MAX_SINGLE_ALLOC);
  763. sdiodev->max_segment_size = host->max_seg_size;
  764. if (!sdiodev->sg_support)
  765. return;
  766. nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
  767. sdiodev->settings->bus.sdio.txglomsz);
  768. nents += (nents >> 4) + 1;
  769. WARN_ON(nents > sdiodev->max_segment_count);
  770. brcmf_dbg(TRACE, "nents=%d\n", nents);
  771. err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
  772. if (err < 0) {
  773. brcmf_err("allocation failed: disable scatter-gather");
  774. sdiodev->sg_support = false;
  775. }
  776. sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
  777. }
  778. #ifdef CONFIG_PM_SLEEP
  779. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  780. {
  781. sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
  782. if (!sdiodev->freezer)
  783. return -ENOMEM;
  784. atomic_set(&sdiodev->freezer->thread_count, 0);
  785. atomic_set(&sdiodev->freezer->freezing, 0);
  786. init_waitqueue_head(&sdiodev->freezer->thread_freeze);
  787. init_completion(&sdiodev->freezer->resumed);
  788. return 0;
  789. }
  790. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  791. {
  792. if (sdiodev->freezer) {
  793. WARN_ON(atomic_read(&sdiodev->freezer->freezing));
  794. kfree(sdiodev->freezer);
  795. }
  796. }
  797. static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
  798. {
  799. atomic_t *expect = &sdiodev->freezer->thread_count;
  800. int res = 0;
  801. sdiodev->freezer->frozen_count = 0;
  802. reinit_completion(&sdiodev->freezer->resumed);
  803. atomic_set(&sdiodev->freezer->freezing, 1);
  804. brcmf_sdio_trigger_dpc(sdiodev->bus);
  805. wait_event(sdiodev->freezer->thread_freeze,
  806. atomic_read(expect) == sdiodev->freezer->frozen_count);
  807. sdio_claim_host(sdiodev->func[1]);
  808. res = brcmf_sdio_sleep(sdiodev->bus, true);
  809. sdio_release_host(sdiodev->func[1]);
  810. return res;
  811. }
  812. static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
  813. {
  814. sdio_claim_host(sdiodev->func[1]);
  815. brcmf_sdio_sleep(sdiodev->bus, false);
  816. sdio_release_host(sdiodev->func[1]);
  817. atomic_set(&sdiodev->freezer->freezing, 0);
  818. complete_all(&sdiodev->freezer->resumed);
  819. }
  820. bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
  821. {
  822. return atomic_read(&sdiodev->freezer->freezing);
  823. }
  824. void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
  825. {
  826. if (!brcmf_sdiod_freezing(sdiodev))
  827. return;
  828. sdiodev->freezer->frozen_count++;
  829. wake_up(&sdiodev->freezer->thread_freeze);
  830. wait_for_completion(&sdiodev->freezer->resumed);
  831. }
  832. void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
  833. {
  834. atomic_inc(&sdiodev->freezer->thread_count);
  835. }
  836. void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
  837. {
  838. atomic_dec(&sdiodev->freezer->thread_count);
  839. }
  840. #else
  841. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  842. {
  843. return 0;
  844. }
  845. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  846. {
  847. }
  848. #endif /* CONFIG_PM_SLEEP */
  849. static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
  850. {
  851. sdiodev->state = BRCMF_SDIOD_DOWN;
  852. if (sdiodev->bus) {
  853. brcmf_sdio_remove(sdiodev->bus);
  854. sdiodev->bus = NULL;
  855. }
  856. brcmf_sdiod_freezer_detach(sdiodev);
  857. /* Disable Function 2 */
  858. sdio_claim_host(sdiodev->func[2]);
  859. sdio_disable_func(sdiodev->func[2]);
  860. sdio_release_host(sdiodev->func[2]);
  861. /* Disable Function 1 */
  862. sdio_claim_host(sdiodev->func[1]);
  863. sdio_disable_func(sdiodev->func[1]);
  864. sdio_release_host(sdiodev->func[1]);
  865. sg_free_table(&sdiodev->sgtable);
  866. sdiodev->sbwad = 0;
  867. pm_runtime_allow(sdiodev->func[1]->card->host->parent);
  868. return 0;
  869. }
  870. static void brcmf_sdiod_host_fixup(struct mmc_host *host)
  871. {
  872. /* runtime-pm powers off the device */
  873. pm_runtime_forbid(host->parent);
  874. /* avoid removal detection upon resume */
  875. host->caps |= MMC_CAP_NONREMOVABLE;
  876. }
  877. static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
  878. {
  879. int ret = 0;
  880. sdiodev->num_funcs = 2;
  881. sdio_claim_host(sdiodev->func[1]);
  882. ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
  883. if (ret) {
  884. brcmf_err("Failed to set F1 blocksize\n");
  885. sdio_release_host(sdiodev->func[1]);
  886. goto out;
  887. }
  888. ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
  889. if (ret) {
  890. brcmf_err("Failed to set F2 blocksize\n");
  891. sdio_release_host(sdiodev->func[1]);
  892. goto out;
  893. }
  894. /* increase F2 timeout */
  895. sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
  896. /* Enable Function 1 */
  897. ret = sdio_enable_func(sdiodev->func[1]);
  898. sdio_release_host(sdiodev->func[1]);
  899. if (ret) {
  900. brcmf_err("Failed to enable F1: err=%d\n", ret);
  901. goto out;
  902. }
  903. ret = brcmf_sdiod_freezer_attach(sdiodev);
  904. if (ret)
  905. goto out;
  906. /* try to attach to the target device */
  907. sdiodev->bus = brcmf_sdio_probe(sdiodev);
  908. if (!sdiodev->bus) {
  909. ret = -ENODEV;
  910. goto out;
  911. }
  912. brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
  913. out:
  914. if (ret)
  915. brcmf_sdiod_remove(sdiodev);
  916. return ret;
  917. }
  918. #define BRCMF_SDIO_DEVICE(dev_id) \
  919. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
  920. /* devices we support, null terminated */
  921. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  922. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
  923. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
  924. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
  925. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
  926. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
  927. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
  928. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
  929. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
  930. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
  931. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
  932. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
  933. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
  934. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
  935. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
  936. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
  937. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
  938. { /* end: all zeroes */ }
  939. };
  940. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  941. static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
  942. int val)
  943. {
  944. #if IS_ENABLED(CONFIG_ACPI)
  945. struct acpi_device *adev;
  946. adev = ACPI_COMPANION(dev);
  947. if (adev)
  948. adev->flags.power_manageable = 0;
  949. #endif
  950. }
  951. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  952. const struct sdio_device_id *id)
  953. {
  954. int err;
  955. struct brcmf_sdio_dev *sdiodev;
  956. struct brcmf_bus *bus_if;
  957. struct device *dev;
  958. brcmf_dbg(SDIO, "Enter\n");
  959. brcmf_dbg(SDIO, "Class=%x\n", func->class);
  960. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  961. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  962. brcmf_dbg(SDIO, "Function#: %d\n", func->num);
  963. dev = &func->dev;
  964. /* prohibit ACPI power management for this device */
  965. brcmf_sdiod_acpi_set_power_manageable(dev, 0);
  966. /* Consume func num 1 but dont do anything with it. */
  967. if (func->num == 1)
  968. return 0;
  969. /* Ignore anything but func 2 */
  970. if (func->num != 2)
  971. return -ENODEV;
  972. bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
  973. if (!bus_if)
  974. return -ENOMEM;
  975. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  976. if (!sdiodev) {
  977. kfree(bus_if);
  978. return -ENOMEM;
  979. }
  980. /* store refs to functions used. mmc_card does
  981. * not hold the F0 function pointer.
  982. */
  983. sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
  984. sdiodev->func[0]->num = 0;
  985. sdiodev->func[1] = func->card->sdio_func[0];
  986. sdiodev->func[2] = func;
  987. sdiodev->bus_if = bus_if;
  988. bus_if->bus_priv.sdio = sdiodev;
  989. bus_if->proto_type = BRCMF_PROTO_BCDC;
  990. dev_set_drvdata(&func->dev, bus_if);
  991. dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
  992. sdiodev->dev = &sdiodev->func[1]->dev;
  993. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
  994. brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
  995. err = brcmf_sdiod_probe(sdiodev);
  996. if (err) {
  997. brcmf_err("F2 error, probe failed %d...\n", err);
  998. goto fail;
  999. }
  1000. brcmf_dbg(SDIO, "F2 init completed...\n");
  1001. return 0;
  1002. fail:
  1003. dev_set_drvdata(&func->dev, NULL);
  1004. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  1005. kfree(sdiodev->func[0]);
  1006. kfree(sdiodev);
  1007. kfree(bus_if);
  1008. return err;
  1009. }
  1010. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  1011. {
  1012. struct brcmf_bus *bus_if;
  1013. struct brcmf_sdio_dev *sdiodev;
  1014. brcmf_dbg(SDIO, "Enter\n");
  1015. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  1016. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  1017. brcmf_dbg(SDIO, "Function: %d\n", func->num);
  1018. bus_if = dev_get_drvdata(&func->dev);
  1019. if (bus_if) {
  1020. sdiodev = bus_if->bus_priv.sdio;
  1021. /* start by unregistering irqs */
  1022. brcmf_sdiod_intr_unregister(sdiodev);
  1023. if (func->num != 1)
  1024. return;
  1025. /* only proceed with rest of cleanup if func 1 */
  1026. brcmf_sdiod_remove(sdiodev);
  1027. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  1028. dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
  1029. kfree(bus_if);
  1030. kfree(sdiodev->func[0]);
  1031. kfree(sdiodev);
  1032. }
  1033. brcmf_dbg(SDIO, "Exit\n");
  1034. }
  1035. void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
  1036. {
  1037. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1038. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  1039. brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
  1040. sdiodev->wowl_enabled = enabled;
  1041. }
  1042. #ifdef CONFIG_PM_SLEEP
  1043. static int brcmf_ops_sdio_suspend(struct device *dev)
  1044. {
  1045. struct sdio_func *func;
  1046. struct brcmf_bus *bus_if;
  1047. struct brcmf_sdio_dev *sdiodev;
  1048. mmc_pm_flag_t sdio_flags;
  1049. func = container_of(dev, struct sdio_func, dev);
  1050. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  1051. if (func->num != SDIO_FUNC_1)
  1052. return 0;
  1053. bus_if = dev_get_drvdata(dev);
  1054. sdiodev = bus_if->bus_priv.sdio;
  1055. brcmf_sdiod_freezer_on(sdiodev);
  1056. brcmf_sdio_wd_timer(sdiodev->bus, 0);
  1057. sdio_flags = MMC_PM_KEEP_POWER;
  1058. if (sdiodev->wowl_enabled) {
  1059. if (sdiodev->settings->bus.sdio.oob_irq_supported)
  1060. enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
  1061. else
  1062. sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
  1063. }
  1064. if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
  1065. brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
  1066. return 0;
  1067. }
  1068. static int brcmf_ops_sdio_resume(struct device *dev)
  1069. {
  1070. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1071. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  1072. struct sdio_func *func = container_of(dev, struct sdio_func, dev);
  1073. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  1074. if (func->num != SDIO_FUNC_2)
  1075. return 0;
  1076. brcmf_sdiod_freezer_off(sdiodev);
  1077. return 0;
  1078. }
  1079. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  1080. .suspend = brcmf_ops_sdio_suspend,
  1081. .resume = brcmf_ops_sdio_resume,
  1082. };
  1083. #endif /* CONFIG_PM_SLEEP */
  1084. static struct sdio_driver brcmf_sdmmc_driver = {
  1085. .probe = brcmf_ops_sdio_probe,
  1086. .remove = brcmf_ops_sdio_remove,
  1087. .name = KBUILD_MODNAME,
  1088. .id_table = brcmf_sdmmc_ids,
  1089. .drv = {
  1090. .owner = THIS_MODULE,
  1091. #ifdef CONFIG_PM_SLEEP
  1092. .pm = &brcmf_sdio_pm_ops,
  1093. #endif /* CONFIG_PM_SLEEP */
  1094. },
  1095. };
  1096. void brcmf_sdio_register(void)
  1097. {
  1098. int ret;
  1099. ret = sdio_register_driver(&brcmf_sdmmc_driver);
  1100. if (ret)
  1101. brcmf_err("sdio_register_driver failed: %d\n", ret);
  1102. }
  1103. void brcmf_sdio_exit(void)
  1104. {
  1105. brcmf_dbg(SDIO, "Enter\n");
  1106. sdio_unregister_driver(&brcmf_sdmmc_driver);
  1107. }