bcmsdh.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* ****************** SDIO CARD Interface Functions **************************/
  17. #include <linux/types.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/pci.h>
  20. #include <linux/pci_ids.h>
  21. #include <linux/sched.h>
  22. #include <linux/completion.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/mmc/sdio.h>
  25. #include <linux/mmc/core.h>
  26. #include <linux/mmc/sdio_func.h>
  27. #include <linux/mmc/card.h>
  28. #include <linux/mmc/host.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/suspend.h>
  31. #include <linux/errno.h>
  32. #include <linux/module.h>
  33. #include <linux/acpi.h>
  34. #include <net/cfg80211.h>
  35. #include <defs.h>
  36. #include <brcm_hw_ids.h>
  37. #include <brcmu_utils.h>
  38. #include <brcmu_wifi.h>
  39. #include <chipcommon.h>
  40. #include <soc.h>
  41. #include "chip.h"
  42. #include "bus.h"
  43. #include "debug.h"
  44. #include "sdio.h"
  45. #include "core.h"
  46. #include "common.h"
  47. #define SDIOH_API_ACCESS_RETRY_LIMIT 2
  48. #define DMA_ALIGN_MASK 0x03
  49. #define SDIO_FUNC1_BLOCKSIZE 64
  50. #define SDIO_FUNC2_BLOCKSIZE 512
  51. /* Maximum milliseconds to wait for F2 to come up */
  52. #define SDIO_WAIT_F2RDY 3000
  53. #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
  54. struct brcmf_sdiod_freezer {
  55. atomic_t freezing;
  56. atomic_t thread_count;
  57. u32 frozen_count;
  58. wait_queue_head_t thread_freeze;
  59. struct completion resumed;
  60. };
  61. static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
  62. {
  63. struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
  64. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  65. brcmf_dbg(INTR, "OOB intr triggered\n");
  66. /* out-of-band interrupt is level-triggered which won't
  67. * be cleared until dpc
  68. */
  69. if (sdiodev->irq_en) {
  70. disable_irq_nosync(irq);
  71. sdiodev->irq_en = false;
  72. }
  73. brcmf_sdio_isr(sdiodev->bus);
  74. return IRQ_HANDLED;
  75. }
  76. static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
  77. {
  78. struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
  79. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  80. brcmf_dbg(INTR, "IB intr triggered\n");
  81. brcmf_sdio_isr(sdiodev->bus);
  82. }
  83. /* dummy handler for SDIO function 2 interrupt */
  84. static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
  85. {
  86. }
  87. int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
  88. {
  89. struct brcmfmac_sdio_pd *pdata;
  90. int ret = 0;
  91. u8 data;
  92. u32 addr, gpiocontrol;
  93. unsigned long flags;
  94. pdata = &sdiodev->settings->bus.sdio;
  95. if (pdata->oob_irq_supported) {
  96. brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
  97. pdata->oob_irq_nr);
  98. ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
  99. pdata->oob_irq_flags, "brcmf_oob_intr",
  100. &sdiodev->func[1]->dev);
  101. if (ret != 0) {
  102. brcmf_err("request_irq failed %d\n", ret);
  103. return ret;
  104. }
  105. sdiodev->oob_irq_requested = true;
  106. spin_lock_init(&sdiodev->irq_en_lock);
  107. spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
  108. sdiodev->irq_en = true;
  109. spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
  110. ret = enable_irq_wake(pdata->oob_irq_nr);
  111. if (ret != 0) {
  112. brcmf_err("enable_irq_wake failed %d\n", ret);
  113. return ret;
  114. }
  115. sdiodev->irq_wake = true;
  116. sdio_claim_host(sdiodev->func[1]);
  117. if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
  118. /* assign GPIO to SDIO core */
  119. addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
  120. gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
  121. gpiocontrol |= 0x2;
  122. brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
  123. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
  124. &ret);
  125. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
  126. brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
  127. }
  128. /* must configure SDIO_CCCR_IENx to enable irq */
  129. data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
  130. data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
  131. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
  132. /* redirect, configure and enable io for interrupt signal */
  133. data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
  134. if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
  135. data |= SDIO_SEPINT_ACT_HI;
  136. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
  137. sdio_release_host(sdiodev->func[1]);
  138. } else {
  139. brcmf_dbg(SDIO, "Entering\n");
  140. sdio_claim_host(sdiodev->func[1]);
  141. sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
  142. sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
  143. sdio_release_host(sdiodev->func[1]);
  144. }
  145. return 0;
  146. }
  147. int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
  148. {
  149. struct brcmfmac_sdio_pd *pdata;
  150. brcmf_dbg(SDIO, "Entering\n");
  151. pdata = &sdiodev->settings->bus.sdio;
  152. if (pdata->oob_irq_supported) {
  153. sdio_claim_host(sdiodev->func[1]);
  154. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
  155. brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
  156. sdio_release_host(sdiodev->func[1]);
  157. if (sdiodev->oob_irq_requested) {
  158. sdiodev->oob_irq_requested = false;
  159. if (sdiodev->irq_wake) {
  160. disable_irq_wake(pdata->oob_irq_nr);
  161. sdiodev->irq_wake = false;
  162. }
  163. free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
  164. sdiodev->irq_en = false;
  165. }
  166. } else {
  167. sdio_claim_host(sdiodev->func[1]);
  168. sdio_release_irq(sdiodev->func[2]);
  169. sdio_release_irq(sdiodev->func[1]);
  170. sdio_release_host(sdiodev->func[1]);
  171. }
  172. return 0;
  173. }
  174. void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
  175. enum brcmf_sdiod_state state)
  176. {
  177. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
  178. state == sdiodev->state)
  179. return;
  180. brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
  181. switch (sdiodev->state) {
  182. case BRCMF_SDIOD_DATA:
  183. /* any other state means bus interface is down */
  184. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
  185. break;
  186. case BRCMF_SDIOD_DOWN:
  187. /* transition from DOWN to DATA means bus interface is up */
  188. if (state == BRCMF_SDIOD_DATA)
  189. brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
  190. break;
  191. default:
  192. break;
  193. }
  194. sdiodev->state = state;
  195. }
  196. static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
  197. uint regaddr, u8 byte)
  198. {
  199. int err_ret;
  200. /*
  201. * Can only directly write to some F0 registers.
  202. * Handle CCCR_IENx and CCCR_ABORT command
  203. * as a special case.
  204. */
  205. if ((regaddr == SDIO_CCCR_ABORT) ||
  206. (regaddr == SDIO_CCCR_IENx))
  207. sdio_writeb(func, byte, regaddr, &err_ret);
  208. else
  209. sdio_f0_writeb(func, byte, regaddr, &err_ret);
  210. return err_ret;
  211. }
  212. static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
  213. u32 addr, u8 regsz, void *data, bool write)
  214. {
  215. struct sdio_func *func;
  216. int ret;
  217. brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
  218. write, fn, addr, regsz);
  219. /* only allow byte access on F0 */
  220. if (WARN_ON(regsz > 1 && !fn))
  221. return -EINVAL;
  222. func = sdiodev->func[fn];
  223. switch (regsz) {
  224. case sizeof(u8):
  225. if (write) {
  226. if (fn)
  227. sdio_writeb(func, *(u8 *)data, addr, &ret);
  228. else
  229. ret = brcmf_sdiod_f0_writeb(func, addr,
  230. *(u8 *)data);
  231. } else {
  232. if (fn)
  233. *(u8 *)data = sdio_readb(func, addr, &ret);
  234. else
  235. *(u8 *)data = sdio_f0_readb(func, addr, &ret);
  236. }
  237. break;
  238. case sizeof(u16):
  239. if (write)
  240. sdio_writew(func, *(u16 *)data, addr, &ret);
  241. else
  242. *(u16 *)data = sdio_readw(func, addr, &ret);
  243. break;
  244. case sizeof(u32):
  245. if (write)
  246. sdio_writel(func, *(u32 *)data, addr, &ret);
  247. else
  248. *(u32 *)data = sdio_readl(func, addr, &ret);
  249. break;
  250. default:
  251. brcmf_err("invalid size: %d\n", regsz);
  252. break;
  253. }
  254. if (ret)
  255. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  256. write ? "write" : "read", fn, addr, ret);
  257. return ret;
  258. }
  259. static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  260. u8 regsz, void *data, bool write)
  261. {
  262. u8 func;
  263. s32 retry = 0;
  264. int ret;
  265. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
  266. return -ENOMEDIUM;
  267. /*
  268. * figure out how to read the register based on address range
  269. * 0x00 ~ 0x7FF: function 0 CCCR and FBR
  270. * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
  271. * The rest: function 1 silicon backplane core registers
  272. */
  273. if ((addr & ~REG_F0_REG_MASK) == 0)
  274. func = SDIO_FUNC_0;
  275. else
  276. func = SDIO_FUNC_1;
  277. do {
  278. if (!write)
  279. memset(data, 0, regsz);
  280. /* for retry wait for 1 ms till bus get settled down */
  281. if (retry)
  282. usleep_range(1000, 2000);
  283. ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
  284. data, write);
  285. } while (ret != 0 && ret != -ENOMEDIUM &&
  286. retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
  287. if (ret == -ENOMEDIUM)
  288. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  289. else if (ret != 0) {
  290. /*
  291. * SleepCSR register access can fail when
  292. * waking up the device so reduce this noise
  293. * in the logs.
  294. */
  295. if (addr != SBSDIO_FUNC1_SLEEPCSR)
  296. brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
  297. write ? "write" : "read", func, addr, ret);
  298. else
  299. brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
  300. write ? "write" : "read", func, addr, ret);
  301. }
  302. return ret;
  303. }
  304. static int
  305. brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
  306. {
  307. int err = 0, i;
  308. u8 addr[3];
  309. if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
  310. return -ENOMEDIUM;
  311. addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
  312. addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
  313. addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
  314. for (i = 0; i < 3; i++) {
  315. err = brcmf_sdiod_regrw_helper(sdiodev,
  316. SBSDIO_FUNC1_SBADDRLOW + i,
  317. sizeof(u8), &addr[i], true);
  318. if (err) {
  319. brcmf_err("failed at addr: 0x%0x\n",
  320. SBSDIO_FUNC1_SBADDRLOW + i);
  321. break;
  322. }
  323. }
  324. return err;
  325. }
  326. static int
  327. brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
  328. {
  329. uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
  330. int err = 0;
  331. if (bar0 != sdiodev->sbwad) {
  332. err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
  333. if (err)
  334. return err;
  335. sdiodev->sbwad = bar0;
  336. }
  337. *addr &= SBSDIO_SB_OFT_ADDR_MASK;
  338. if (width == 4)
  339. *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  340. return 0;
  341. }
  342. u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  343. {
  344. u8 data;
  345. int retval;
  346. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  347. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  348. false);
  349. brcmf_dbg(SDIO, "data:0x%02x\n", data);
  350. if (ret)
  351. *ret = retval;
  352. return data;
  353. }
  354. u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
  355. {
  356. u32 data;
  357. int retval;
  358. brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
  359. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  360. if (retval)
  361. goto done;
  362. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  363. false);
  364. brcmf_dbg(SDIO, "data:0x%08x\n", data);
  365. done:
  366. if (ret)
  367. *ret = retval;
  368. return data;
  369. }
  370. void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
  371. u8 data, int *ret)
  372. {
  373. int retval;
  374. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
  375. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  376. true);
  377. if (ret)
  378. *ret = retval;
  379. }
  380. void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  381. u32 data, int *ret)
  382. {
  383. int retval;
  384. brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
  385. retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
  386. if (retval)
  387. goto done;
  388. retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
  389. true);
  390. done:
  391. if (ret)
  392. *ret = retval;
  393. }
  394. static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
  395. bool write, u32 addr, struct sk_buff *pkt)
  396. {
  397. unsigned int req_sz;
  398. int err;
  399. /* Single skb use the standard mmc interface */
  400. req_sz = pkt->len + 3;
  401. req_sz &= (uint)~3;
  402. if (write)
  403. err = sdio_memcpy_toio(sdiodev->func[fn], addr,
  404. ((u8 *)(pkt->data)), req_sz);
  405. else if (fn == 1)
  406. err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
  407. addr, req_sz);
  408. else
  409. /* function 2 read is FIFO operation */
  410. err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
  411. req_sz);
  412. if (err == -ENOMEDIUM)
  413. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  414. return err;
  415. }
  416. /**
  417. * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  418. * @sdiodev: brcmfmac sdio device
  419. * @fn: SDIO function number
  420. * @write: direction flag
  421. * @addr: dongle memory address as source/destination
  422. * @pkt: skb pointer
  423. *
  424. * This function takes the respbonsibility as the interface function to MMC
  425. * stack for block data access. It assumes that the skb passed down by the
  426. * caller has already been padded and aligned.
  427. */
  428. static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
  429. bool write, u32 addr,
  430. struct sk_buff_head *pktlist)
  431. {
  432. unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
  433. unsigned int max_req_sz, orig_offset, dst_offset;
  434. unsigned short max_seg_cnt, seg_sz;
  435. unsigned char *pkt_data, *orig_data, *dst_data;
  436. struct sk_buff *pkt_next = NULL, *local_pkt_next;
  437. struct sk_buff_head local_list, *target_list;
  438. struct mmc_request mmc_req;
  439. struct mmc_command mmc_cmd;
  440. struct mmc_data mmc_dat;
  441. struct scatterlist *sgl;
  442. int ret = 0;
  443. if (!pktlist->qlen)
  444. return -EINVAL;
  445. target_list = pktlist;
  446. /* for host with broken sg support, prepare a page aligned list */
  447. __skb_queue_head_init(&local_list);
  448. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  449. req_sz = 0;
  450. skb_queue_walk(pktlist, pkt_next)
  451. req_sz += pkt_next->len;
  452. req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
  453. while (req_sz > PAGE_SIZE) {
  454. pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
  455. if (pkt_next == NULL) {
  456. ret = -ENOMEM;
  457. goto exit;
  458. }
  459. __skb_queue_tail(&local_list, pkt_next);
  460. req_sz -= PAGE_SIZE;
  461. }
  462. pkt_next = brcmu_pkt_buf_get_skb(req_sz);
  463. if (pkt_next == NULL) {
  464. ret = -ENOMEM;
  465. goto exit;
  466. }
  467. __skb_queue_tail(&local_list, pkt_next);
  468. target_list = &local_list;
  469. }
  470. func_blk_sz = sdiodev->func[fn]->cur_blksize;
  471. max_req_sz = sdiodev->max_request_size;
  472. max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
  473. target_list->qlen);
  474. seg_sz = target_list->qlen;
  475. pkt_offset = 0;
  476. pkt_next = target_list->next;
  477. memset(&mmc_req, 0, sizeof(struct mmc_request));
  478. memset(&mmc_cmd, 0, sizeof(struct mmc_command));
  479. memset(&mmc_dat, 0, sizeof(struct mmc_data));
  480. mmc_dat.sg = sdiodev->sgtable.sgl;
  481. mmc_dat.blksz = func_blk_sz;
  482. mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
  483. mmc_cmd.opcode = SD_IO_RW_EXTENDED;
  484. mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
  485. mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
  486. mmc_cmd.arg |= 1<<27; /* block mode */
  487. /* for function 1 the addr will be incremented */
  488. mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
  489. mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
  490. mmc_req.cmd = &mmc_cmd;
  491. mmc_req.data = &mmc_dat;
  492. while (seg_sz) {
  493. req_sz = 0;
  494. sg_cnt = 0;
  495. sgl = sdiodev->sgtable.sgl;
  496. /* prep sg table */
  497. while (pkt_next != (struct sk_buff *)target_list) {
  498. pkt_data = pkt_next->data + pkt_offset;
  499. sg_data_sz = pkt_next->len - pkt_offset;
  500. if (sg_data_sz > sdiodev->max_segment_size)
  501. sg_data_sz = sdiodev->max_segment_size;
  502. if (sg_data_sz > max_req_sz - req_sz)
  503. sg_data_sz = max_req_sz - req_sz;
  504. sg_set_buf(sgl, pkt_data, sg_data_sz);
  505. sg_cnt++;
  506. sgl = sg_next(sgl);
  507. req_sz += sg_data_sz;
  508. pkt_offset += sg_data_sz;
  509. if (pkt_offset == pkt_next->len) {
  510. pkt_offset = 0;
  511. pkt_next = pkt_next->next;
  512. }
  513. if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
  514. break;
  515. }
  516. seg_sz -= sg_cnt;
  517. if (req_sz % func_blk_sz != 0) {
  518. brcmf_err("sg request length %u is not %u aligned\n",
  519. req_sz, func_blk_sz);
  520. ret = -ENOTBLK;
  521. goto exit;
  522. }
  523. mmc_dat.sg_len = sg_cnt;
  524. mmc_dat.blocks = req_sz / func_blk_sz;
  525. mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
  526. mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
  527. /* incrementing addr for function 1 */
  528. if (fn == 1)
  529. addr += req_sz;
  530. mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
  531. mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
  532. ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
  533. if (ret == -ENOMEDIUM) {
  534. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
  535. break;
  536. } else if (ret != 0) {
  537. brcmf_err("CMD53 sg block %s failed %d\n",
  538. write ? "write" : "read", ret);
  539. ret = -EIO;
  540. break;
  541. }
  542. }
  543. if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
  544. local_pkt_next = local_list.next;
  545. orig_offset = 0;
  546. skb_queue_walk(pktlist, pkt_next) {
  547. dst_offset = 0;
  548. do {
  549. req_sz = local_pkt_next->len - orig_offset;
  550. req_sz = min_t(uint, pkt_next->len - dst_offset,
  551. req_sz);
  552. orig_data = local_pkt_next->data + orig_offset;
  553. dst_data = pkt_next->data + dst_offset;
  554. memcpy(dst_data, orig_data, req_sz);
  555. orig_offset += req_sz;
  556. dst_offset += req_sz;
  557. if (orig_offset == local_pkt_next->len) {
  558. orig_offset = 0;
  559. local_pkt_next = local_pkt_next->next;
  560. }
  561. if (dst_offset == pkt_next->len)
  562. break;
  563. } while (!skb_queue_empty(&local_list));
  564. }
  565. }
  566. exit:
  567. sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
  568. while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
  569. brcmu_pkt_buf_free_skb(pkt_next);
  570. return ret;
  571. }
  572. int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  573. {
  574. struct sk_buff *mypkt;
  575. int err;
  576. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  577. if (!mypkt) {
  578. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  579. nbytes);
  580. return -EIO;
  581. }
  582. err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
  583. if (!err)
  584. memcpy(buf, mypkt->data, nbytes);
  585. brcmu_pkt_buf_free_skb(mypkt);
  586. return err;
  587. }
  588. int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
  589. {
  590. u32 addr = sdiodev->sbwad;
  591. int err = 0;
  592. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
  593. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  594. if (err)
  595. goto done;
  596. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
  597. done:
  598. return err;
  599. }
  600. int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
  601. struct sk_buff_head *pktq, uint totlen)
  602. {
  603. struct sk_buff *glom_skb;
  604. struct sk_buff *skb;
  605. u32 addr = sdiodev->sbwad;
  606. int err = 0;
  607. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
  608. addr, pktq->qlen);
  609. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  610. if (err)
  611. goto done;
  612. if (pktq->qlen == 1)
  613. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  614. pktq->next);
  615. else if (!sdiodev->sg_support) {
  616. glom_skb = brcmu_pkt_buf_get_skb(totlen);
  617. if (!glom_skb)
  618. return -ENOMEM;
  619. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
  620. glom_skb);
  621. if (err)
  622. goto done;
  623. skb_queue_walk(pktq, skb) {
  624. memcpy(skb->data, glom_skb->data, skb->len);
  625. skb_pull(glom_skb, skb->len);
  626. }
  627. } else
  628. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
  629. pktq);
  630. done:
  631. return err;
  632. }
  633. int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
  634. {
  635. struct sk_buff *mypkt;
  636. u32 addr = sdiodev->sbwad;
  637. int err;
  638. mypkt = brcmu_pkt_buf_get_skb(nbytes);
  639. if (!mypkt) {
  640. brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
  641. nbytes);
  642. return -EIO;
  643. }
  644. memcpy(mypkt->data, buf, nbytes);
  645. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  646. if (!err)
  647. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
  648. mypkt);
  649. brcmu_pkt_buf_free_skb(mypkt);
  650. return err;
  651. }
  652. int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
  653. struct sk_buff_head *pktq)
  654. {
  655. struct sk_buff *skb;
  656. u32 addr = sdiodev->sbwad;
  657. int err;
  658. brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
  659. err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
  660. if (err)
  661. return err;
  662. if (pktq->qlen == 1 || !sdiodev->sg_support)
  663. skb_queue_walk(pktq, skb) {
  664. err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
  665. addr, skb);
  666. if (err)
  667. break;
  668. }
  669. else
  670. err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
  671. pktq);
  672. return err;
  673. }
  674. int
  675. brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
  676. u8 *data, uint size)
  677. {
  678. int bcmerror = 0;
  679. struct sk_buff *pkt;
  680. u32 sdaddr;
  681. uint dsize;
  682. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  683. pkt = dev_alloc_skb(dsize);
  684. if (!pkt) {
  685. brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
  686. return -EIO;
  687. }
  688. pkt->priority = 0;
  689. /* Determine initial transfer parameters */
  690. sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
  691. if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
  692. dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
  693. else
  694. dsize = size;
  695. sdio_claim_host(sdiodev->func[1]);
  696. /* Do the transfer(s) */
  697. while (size) {
  698. /* Set the backplane window to include the start address */
  699. bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
  700. if (bcmerror)
  701. break;
  702. brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
  703. write ? "write" : "read", dsize,
  704. sdaddr, address & SBSDIO_SBWINDOW_MASK);
  705. sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
  706. sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
  707. skb_put(pkt, dsize);
  708. if (write)
  709. memcpy(pkt->data, data, dsize);
  710. bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
  711. sdaddr, pkt);
  712. if (bcmerror) {
  713. brcmf_err("membytes transfer failed\n");
  714. break;
  715. }
  716. if (!write)
  717. memcpy(data, pkt->data, dsize);
  718. skb_trim(pkt, 0);
  719. /* Adjust for next transfer (if any) */
  720. size -= dsize;
  721. if (size) {
  722. data += dsize;
  723. address += dsize;
  724. sdaddr = 0;
  725. dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
  726. }
  727. }
  728. dev_kfree_skb(pkt);
  729. /* Return the window to backplane enumeration space for core access */
  730. if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
  731. brcmf_err("FAILED to set window back to 0x%x\n",
  732. sdiodev->sbwad);
  733. sdio_release_host(sdiodev->func[1]);
  734. return bcmerror;
  735. }
  736. int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
  737. {
  738. char t_func = (char)fn;
  739. brcmf_dbg(SDIO, "Enter\n");
  740. /* issue abort cmd52 command through F0 */
  741. brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
  742. sizeof(t_func), &t_func, true);
  743. brcmf_dbg(SDIO, "Exit\n");
  744. return 0;
  745. }
  746. void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
  747. {
  748. struct sdio_func *func;
  749. struct mmc_host *host;
  750. uint max_blocks;
  751. uint nents;
  752. int err;
  753. func = sdiodev->func[2];
  754. host = func->card->host;
  755. sdiodev->sg_support = host->max_segs > 1;
  756. max_blocks = min_t(uint, host->max_blk_count, 511u);
  757. sdiodev->max_request_size = min_t(uint, host->max_req_size,
  758. max_blocks * func->cur_blksize);
  759. sdiodev->max_segment_count = min_t(uint, host->max_segs,
  760. SG_MAX_SINGLE_ALLOC);
  761. sdiodev->max_segment_size = host->max_seg_size;
  762. if (!sdiodev->sg_support)
  763. return;
  764. nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
  765. sdiodev->settings->bus.sdio.txglomsz);
  766. nents += (nents >> 4) + 1;
  767. WARN_ON(nents > sdiodev->max_segment_count);
  768. brcmf_dbg(TRACE, "nents=%d\n", nents);
  769. err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
  770. if (err < 0) {
  771. brcmf_err("allocation failed: disable scatter-gather");
  772. sdiodev->sg_support = false;
  773. }
  774. sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
  775. }
  776. #ifdef CONFIG_PM_SLEEP
  777. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  778. {
  779. sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
  780. if (!sdiodev->freezer)
  781. return -ENOMEM;
  782. atomic_set(&sdiodev->freezer->thread_count, 0);
  783. atomic_set(&sdiodev->freezer->freezing, 0);
  784. init_waitqueue_head(&sdiodev->freezer->thread_freeze);
  785. init_completion(&sdiodev->freezer->resumed);
  786. return 0;
  787. }
  788. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  789. {
  790. if (sdiodev->freezer) {
  791. WARN_ON(atomic_read(&sdiodev->freezer->freezing));
  792. kfree(sdiodev->freezer);
  793. }
  794. }
  795. static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
  796. {
  797. atomic_t *expect = &sdiodev->freezer->thread_count;
  798. int res = 0;
  799. sdiodev->freezer->frozen_count = 0;
  800. reinit_completion(&sdiodev->freezer->resumed);
  801. atomic_set(&sdiodev->freezer->freezing, 1);
  802. brcmf_sdio_trigger_dpc(sdiodev->bus);
  803. wait_event(sdiodev->freezer->thread_freeze,
  804. atomic_read(expect) == sdiodev->freezer->frozen_count);
  805. sdio_claim_host(sdiodev->func[1]);
  806. res = brcmf_sdio_sleep(sdiodev->bus, true);
  807. sdio_release_host(sdiodev->func[1]);
  808. return res;
  809. }
  810. static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
  811. {
  812. sdio_claim_host(sdiodev->func[1]);
  813. brcmf_sdio_sleep(sdiodev->bus, false);
  814. sdio_release_host(sdiodev->func[1]);
  815. atomic_set(&sdiodev->freezer->freezing, 0);
  816. complete_all(&sdiodev->freezer->resumed);
  817. }
  818. bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
  819. {
  820. return atomic_read(&sdiodev->freezer->freezing);
  821. }
  822. void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
  823. {
  824. if (!brcmf_sdiod_freezing(sdiodev))
  825. return;
  826. sdiodev->freezer->frozen_count++;
  827. wake_up(&sdiodev->freezer->thread_freeze);
  828. wait_for_completion(&sdiodev->freezer->resumed);
  829. }
  830. void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
  831. {
  832. atomic_inc(&sdiodev->freezer->thread_count);
  833. }
  834. void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
  835. {
  836. atomic_dec(&sdiodev->freezer->thread_count);
  837. }
  838. #else
  839. static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
  840. {
  841. return 0;
  842. }
  843. static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
  844. {
  845. }
  846. #endif /* CONFIG_PM_SLEEP */
  847. static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
  848. {
  849. sdiodev->state = BRCMF_SDIOD_DOWN;
  850. if (sdiodev->bus) {
  851. brcmf_sdio_remove(sdiodev->bus);
  852. sdiodev->bus = NULL;
  853. }
  854. brcmf_sdiod_freezer_detach(sdiodev);
  855. /* Disable Function 2 */
  856. sdio_claim_host(sdiodev->func[2]);
  857. sdio_disable_func(sdiodev->func[2]);
  858. sdio_release_host(sdiodev->func[2]);
  859. /* Disable Function 1 */
  860. sdio_claim_host(sdiodev->func[1]);
  861. sdio_disable_func(sdiodev->func[1]);
  862. sdio_release_host(sdiodev->func[1]);
  863. sg_free_table(&sdiodev->sgtable);
  864. sdiodev->sbwad = 0;
  865. pm_runtime_allow(sdiodev->func[1]->card->host->parent);
  866. return 0;
  867. }
  868. static void brcmf_sdiod_host_fixup(struct mmc_host *host)
  869. {
  870. /* runtime-pm powers off the device */
  871. pm_runtime_forbid(host->parent);
  872. /* avoid removal detection upon resume */
  873. host->caps |= MMC_CAP_NONREMOVABLE;
  874. }
  875. static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
  876. {
  877. int ret = 0;
  878. sdiodev->num_funcs = 2;
  879. sdio_claim_host(sdiodev->func[1]);
  880. ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
  881. if (ret) {
  882. brcmf_err("Failed to set F1 blocksize\n");
  883. sdio_release_host(sdiodev->func[1]);
  884. goto out;
  885. }
  886. ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
  887. if (ret) {
  888. brcmf_err("Failed to set F2 blocksize\n");
  889. sdio_release_host(sdiodev->func[1]);
  890. goto out;
  891. }
  892. /* increase F2 timeout */
  893. sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
  894. /* Enable Function 1 */
  895. ret = sdio_enable_func(sdiodev->func[1]);
  896. sdio_release_host(sdiodev->func[1]);
  897. if (ret) {
  898. brcmf_err("Failed to enable F1: err=%d\n", ret);
  899. goto out;
  900. }
  901. ret = brcmf_sdiod_freezer_attach(sdiodev);
  902. if (ret)
  903. goto out;
  904. /* try to attach to the target device */
  905. sdiodev->bus = brcmf_sdio_probe(sdiodev);
  906. if (!sdiodev->bus) {
  907. ret = -ENODEV;
  908. goto out;
  909. }
  910. brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
  911. out:
  912. if (ret)
  913. brcmf_sdiod_remove(sdiodev);
  914. return ret;
  915. }
  916. #define BRCMF_SDIO_DEVICE(dev_id) \
  917. {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
  918. /* devices we support, null terminated */
  919. static const struct sdio_device_id brcmf_sdmmc_ids[] = {
  920. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
  921. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
  922. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
  923. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
  924. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
  925. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
  926. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
  927. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
  928. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
  929. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
  930. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
  931. BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
  932. { /* end: all zeroes */ }
  933. };
  934. MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
  935. static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
  936. int val)
  937. {
  938. #if IS_ENABLED(CONFIG_ACPI)
  939. struct acpi_device *adev;
  940. adev = ACPI_COMPANION(dev);
  941. if (adev)
  942. adev->flags.power_manageable = 0;
  943. #endif
  944. }
  945. static int brcmf_ops_sdio_probe(struct sdio_func *func,
  946. const struct sdio_device_id *id)
  947. {
  948. int err;
  949. struct brcmf_sdio_dev *sdiodev;
  950. struct brcmf_bus *bus_if;
  951. struct device *dev;
  952. brcmf_dbg(SDIO, "Enter\n");
  953. brcmf_dbg(SDIO, "Class=%x\n", func->class);
  954. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  955. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  956. brcmf_dbg(SDIO, "Function#: %d\n", func->num);
  957. dev = &func->dev;
  958. /* prohibit ACPI power management for this device */
  959. brcmf_sdiod_acpi_set_power_manageable(dev, 0);
  960. /* Consume func num 1 but dont do anything with it. */
  961. if (func->num == 1)
  962. return 0;
  963. /* Ignore anything but func 2 */
  964. if (func->num != 2)
  965. return -ENODEV;
  966. bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
  967. if (!bus_if)
  968. return -ENOMEM;
  969. sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
  970. if (!sdiodev) {
  971. kfree(bus_if);
  972. return -ENOMEM;
  973. }
  974. /* store refs to functions used. mmc_card does
  975. * not hold the F0 function pointer.
  976. */
  977. sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
  978. sdiodev->func[0]->num = 0;
  979. sdiodev->func[1] = func->card->sdio_func[0];
  980. sdiodev->func[2] = func;
  981. sdiodev->bus_if = bus_if;
  982. bus_if->bus_priv.sdio = sdiodev;
  983. bus_if->proto_type = BRCMF_PROTO_BCDC;
  984. dev_set_drvdata(&func->dev, bus_if);
  985. dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
  986. sdiodev->dev = &sdiodev->func[1]->dev;
  987. brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
  988. brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
  989. err = brcmf_sdiod_probe(sdiodev);
  990. if (err) {
  991. brcmf_err("F2 error, probe failed %d...\n", err);
  992. goto fail;
  993. }
  994. brcmf_dbg(SDIO, "F2 init completed...\n");
  995. return 0;
  996. fail:
  997. dev_set_drvdata(&func->dev, NULL);
  998. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  999. kfree(sdiodev->func[0]);
  1000. kfree(sdiodev);
  1001. kfree(bus_if);
  1002. return err;
  1003. }
  1004. static void brcmf_ops_sdio_remove(struct sdio_func *func)
  1005. {
  1006. struct brcmf_bus *bus_if;
  1007. struct brcmf_sdio_dev *sdiodev;
  1008. brcmf_dbg(SDIO, "Enter\n");
  1009. brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
  1010. brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
  1011. brcmf_dbg(SDIO, "Function: %d\n", func->num);
  1012. if (func->num != 1)
  1013. return;
  1014. bus_if = dev_get_drvdata(&func->dev);
  1015. if (bus_if) {
  1016. sdiodev = bus_if->bus_priv.sdio;
  1017. brcmf_sdiod_remove(sdiodev);
  1018. dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
  1019. dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
  1020. kfree(bus_if);
  1021. kfree(sdiodev->func[0]);
  1022. kfree(sdiodev);
  1023. }
  1024. brcmf_dbg(SDIO, "Exit\n");
  1025. }
  1026. void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
  1027. {
  1028. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1029. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  1030. brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
  1031. sdiodev->wowl_enabled = enabled;
  1032. }
  1033. #ifdef CONFIG_PM_SLEEP
  1034. static int brcmf_ops_sdio_suspend(struct device *dev)
  1035. {
  1036. struct sdio_func *func;
  1037. struct brcmf_bus *bus_if;
  1038. struct brcmf_sdio_dev *sdiodev;
  1039. mmc_pm_flag_t sdio_flags;
  1040. func = container_of(dev, struct sdio_func, dev);
  1041. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  1042. if (func->num != SDIO_FUNC_1)
  1043. return 0;
  1044. bus_if = dev_get_drvdata(dev);
  1045. sdiodev = bus_if->bus_priv.sdio;
  1046. brcmf_sdiod_freezer_on(sdiodev);
  1047. brcmf_sdio_wd_timer(sdiodev->bus, 0);
  1048. sdio_flags = MMC_PM_KEEP_POWER;
  1049. if (sdiodev->wowl_enabled) {
  1050. if (sdiodev->settings->bus.sdio.oob_irq_supported)
  1051. enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
  1052. else
  1053. sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
  1054. }
  1055. if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
  1056. brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
  1057. return 0;
  1058. }
  1059. static int brcmf_ops_sdio_resume(struct device *dev)
  1060. {
  1061. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  1062. struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
  1063. struct sdio_func *func = container_of(dev, struct sdio_func, dev);
  1064. brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
  1065. if (func->num != SDIO_FUNC_2)
  1066. return 0;
  1067. brcmf_sdiod_freezer_off(sdiodev);
  1068. return 0;
  1069. }
  1070. static const struct dev_pm_ops brcmf_sdio_pm_ops = {
  1071. .suspend = brcmf_ops_sdio_suspend,
  1072. .resume = brcmf_ops_sdio_resume,
  1073. };
  1074. #endif /* CONFIG_PM_SLEEP */
  1075. static struct sdio_driver brcmf_sdmmc_driver = {
  1076. .probe = brcmf_ops_sdio_probe,
  1077. .remove = brcmf_ops_sdio_remove,
  1078. .name = KBUILD_MODNAME,
  1079. .id_table = brcmf_sdmmc_ids,
  1080. .drv = {
  1081. .owner = THIS_MODULE,
  1082. #ifdef CONFIG_PM_SLEEP
  1083. .pm = &brcmf_sdio_pm_ops,
  1084. #endif /* CONFIG_PM_SLEEP */
  1085. },
  1086. };
  1087. void brcmf_sdio_register(void)
  1088. {
  1089. int ret;
  1090. ret = sdio_register_driver(&brcmf_sdmmc_driver);
  1091. if (ret)
  1092. brcmf_err("sdio_register_driver failed: %d\n", ret);
  1093. }
  1094. void brcmf_sdio_exit(void)
  1095. {
  1096. brcmf_dbg(SDIO, "Enter\n");
  1097. sdio_unregister_driver(&brcmf_sdmmc_driver);
  1098. }