core.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "core.h"
  24. #include "bus.h"
  25. #include "debug.h"
  26. #include "fwil_types.h"
  27. #include "p2p.h"
  28. #include "cfg80211.h"
  29. #include "fwil.h"
  30. #include "fwsignal.h"
  31. #include "feature.h"
  32. #include "proto.h"
  33. #include "pcie.h"
  34. MODULE_AUTHOR("Broadcom Corporation");
  35. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  36. MODULE_LICENSE("Dual BSD/GPL");
  37. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  38. /* AMPDU rx reordering definitions */
  39. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  40. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  41. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  42. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  43. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  44. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  45. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  46. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  47. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  48. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  49. /* Error bits */
  50. int brcmf_msg_level;
  51. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  52. MODULE_PARM_DESC(debug, "level of debug output");
  53. /* P2P0 enable */
  54. static int brcmf_p2p_enable;
  55. #ifdef CONFIG_BRCMDBG
  56. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  57. MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
  58. #endif
  59. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  60. {
  61. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  62. brcmf_err("ifidx %d out of range\n", ifidx);
  63. return "<if_bad>";
  64. }
  65. if (drvr->iflist[ifidx] == NULL) {
  66. brcmf_err("null i/f %d\n", ifidx);
  67. return "<if_null>";
  68. }
  69. if (drvr->iflist[ifidx]->ndev)
  70. return drvr->iflist[ifidx]->ndev->name;
  71. return "<if_none>";
  72. }
  73. static void _brcmf_set_multicast_list(struct work_struct *work)
  74. {
  75. struct brcmf_if *ifp;
  76. struct net_device *ndev;
  77. struct netdev_hw_addr *ha;
  78. u32 cmd_value, cnt;
  79. __le32 cnt_le;
  80. char *buf, *bufp;
  81. u32 buflen;
  82. s32 err;
  83. ifp = container_of(work, struct brcmf_if, multicast_work);
  84. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  85. ndev = ifp->ndev;
  86. /* Determine initial value of allmulti flag */
  87. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  88. /* Send down the multicast list first. */
  89. cnt = netdev_mc_count(ndev);
  90. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  91. buf = kmalloc(buflen, GFP_ATOMIC);
  92. if (!buf)
  93. return;
  94. bufp = buf;
  95. cnt_le = cpu_to_le32(cnt);
  96. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  97. bufp += sizeof(cnt_le);
  98. netdev_for_each_mc_addr(ha, ndev) {
  99. if (!cnt)
  100. break;
  101. memcpy(bufp, ha->addr, ETH_ALEN);
  102. bufp += ETH_ALEN;
  103. cnt--;
  104. }
  105. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  106. if (err < 0) {
  107. brcmf_err("Setting mcast_list failed, %d\n", err);
  108. cmd_value = cnt ? true : cmd_value;
  109. }
  110. kfree(buf);
  111. /*
  112. * Now send the allmulti setting. This is based on the setting in the
  113. * net_device flags, but might be modified above to be turned on if we
  114. * were trying to set some addresses and dongle rejected it...
  115. */
  116. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  117. if (err < 0)
  118. brcmf_err("Setting allmulti failed, %d\n", err);
  119. /*Finally, pick up the PROMISC flag */
  120. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  121. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  122. if (err < 0)
  123. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  124. err);
  125. }
  126. static void
  127. _brcmf_set_mac_address(struct work_struct *work)
  128. {
  129. struct brcmf_if *ifp;
  130. s32 err;
  131. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  132. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  133. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  134. ETH_ALEN);
  135. if (err < 0) {
  136. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  137. } else {
  138. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  139. ifp->mac_addr);
  140. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  141. }
  142. }
  143. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  144. {
  145. struct brcmf_if *ifp = netdev_priv(ndev);
  146. struct sockaddr *sa = (struct sockaddr *)addr;
  147. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  148. schedule_work(&ifp->setmacaddr_work);
  149. return 0;
  150. }
  151. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  152. {
  153. struct brcmf_if *ifp = netdev_priv(ndev);
  154. schedule_work(&ifp->multicast_work);
  155. }
  156. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  157. struct net_device *ndev)
  158. {
  159. int ret;
  160. struct brcmf_if *ifp = netdev_priv(ndev);
  161. struct brcmf_pub *drvr = ifp->drvr;
  162. struct ethhdr *eh = (struct ethhdr *)(skb->data);
  163. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  164. /* Can the device send data? */
  165. if (drvr->bus_if->state != BRCMF_BUS_UP) {
  166. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  167. netif_stop_queue(ndev);
  168. dev_kfree_skb(skb);
  169. ret = -ENODEV;
  170. goto done;
  171. }
  172. if (!drvr->iflist[ifp->bssidx]) {
  173. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  174. netif_stop_queue(ndev);
  175. dev_kfree_skb(skb);
  176. ret = -ENODEV;
  177. goto done;
  178. }
  179. /* Make sure there's enough room for any header */
  180. if (skb_headroom(skb) < drvr->hdrlen) {
  181. struct sk_buff *skb2;
  182. brcmf_dbg(INFO, "%s: insufficient headroom\n",
  183. brcmf_ifname(drvr, ifp->bssidx));
  184. drvr->bus_if->tx_realloc++;
  185. skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
  186. dev_kfree_skb(skb);
  187. skb = skb2;
  188. if (skb == NULL) {
  189. brcmf_err("%s: skb_realloc_headroom failed\n",
  190. brcmf_ifname(drvr, ifp->bssidx));
  191. ret = -ENOMEM;
  192. goto done;
  193. }
  194. }
  195. /* validate length for ether packet */
  196. if (skb->len < sizeof(*eh)) {
  197. ret = -EINVAL;
  198. dev_kfree_skb(skb);
  199. goto done;
  200. }
  201. if (eh->h_proto == htons(ETH_P_PAE))
  202. atomic_inc(&ifp->pend_8021x_cnt);
  203. ret = brcmf_fws_process_skb(ifp, skb);
  204. done:
  205. if (ret) {
  206. ifp->stats.tx_dropped++;
  207. } else {
  208. ifp->stats.tx_packets++;
  209. ifp->stats.tx_bytes += skb->len;
  210. }
  211. /* Return ok: we always eat the packet */
  212. return NETDEV_TX_OK;
  213. }
  214. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  215. enum brcmf_netif_stop_reason reason, bool state)
  216. {
  217. unsigned long flags;
  218. if (!ifp || !ifp->ndev)
  219. return;
  220. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  221. ifp->bssidx, ifp->netif_stop, reason, state);
  222. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  223. if (state) {
  224. if (!ifp->netif_stop)
  225. netif_stop_queue(ifp->ndev);
  226. ifp->netif_stop |= reason;
  227. } else {
  228. ifp->netif_stop &= ~reason;
  229. if (!ifp->netif_stop)
  230. netif_wake_queue(ifp->ndev);
  231. }
  232. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  233. }
  234. void brcmf_txflowblock(struct device *dev, bool state)
  235. {
  236. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  237. struct brcmf_pub *drvr = bus_if->drvr;
  238. brcmf_dbg(TRACE, "Enter\n");
  239. brcmf_fws_bus_blocked(drvr, state);
  240. }
  241. void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  242. {
  243. skb->dev = ifp->ndev;
  244. skb->protocol = eth_type_trans(skb, skb->dev);
  245. if (skb->pkt_type == PACKET_MULTICAST)
  246. ifp->stats.multicast++;
  247. /* Process special event packets */
  248. brcmf_fweh_process_skb(ifp->drvr, skb);
  249. if (!(ifp->ndev->flags & IFF_UP)) {
  250. brcmu_pkt_buf_free_skb(skb);
  251. return;
  252. }
  253. ifp->stats.rx_bytes += skb->len;
  254. ifp->stats.rx_packets++;
  255. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  256. if (in_interrupt())
  257. netif_rx(skb);
  258. else
  259. /* If the receive is not processed inside an ISR,
  260. * the softirqd must be woken explicitly to service
  261. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  262. */
  263. netif_rx_ni(skb);
  264. }
  265. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  266. u8 start, u8 end,
  267. struct sk_buff_head *skb_list)
  268. {
  269. /* initialize return list */
  270. __skb_queue_head_init(skb_list);
  271. if (rfi->pend_pkts == 0) {
  272. brcmf_dbg(INFO, "no packets in reorder queue\n");
  273. return;
  274. }
  275. do {
  276. if (rfi->pktslots[start]) {
  277. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  278. rfi->pktslots[start] = NULL;
  279. }
  280. start++;
  281. if (start > rfi->max_idx)
  282. start = 0;
  283. } while (start != end);
  284. rfi->pend_pkts -= skb_queue_len(skb_list);
  285. }
  286. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  287. struct sk_buff *pkt)
  288. {
  289. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  290. struct brcmf_ampdu_rx_reorder *rfi;
  291. struct sk_buff_head reorder_list;
  292. struct sk_buff *pnext;
  293. u8 flags;
  294. u32 buf_size;
  295. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  296. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  297. /* validate flags and flow id */
  298. if (flags == 0xFF) {
  299. brcmf_err("invalid flags...so ignore this packet\n");
  300. brcmf_netif_rx(ifp, pkt);
  301. return;
  302. }
  303. rfi = ifp->drvr->reorder_flows[flow_id];
  304. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  305. brcmf_dbg(INFO, "flow-%d: delete\n",
  306. flow_id);
  307. if (rfi == NULL) {
  308. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  309. flow_id);
  310. brcmf_netif_rx(ifp, pkt);
  311. return;
  312. }
  313. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  314. &reorder_list);
  315. /* add the last packet */
  316. __skb_queue_tail(&reorder_list, pkt);
  317. kfree(rfi);
  318. ifp->drvr->reorder_flows[flow_id] = NULL;
  319. goto netif_rx;
  320. }
  321. /* from here on we need a flow reorder instance */
  322. if (rfi == NULL) {
  323. buf_size = sizeof(*rfi);
  324. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  325. buf_size += (max_idx + 1) * sizeof(pkt);
  326. /* allocate space for flow reorder info */
  327. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  328. flow_id, max_idx);
  329. rfi = kzalloc(buf_size, GFP_ATOMIC);
  330. if (rfi == NULL) {
  331. brcmf_err("failed to alloc buffer\n");
  332. brcmf_netif_rx(ifp, pkt);
  333. return;
  334. }
  335. ifp->drvr->reorder_flows[flow_id] = rfi;
  336. rfi->pktslots = (struct sk_buff **)(rfi+1);
  337. rfi->max_idx = max_idx;
  338. }
  339. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  340. if (rfi->pend_pkts) {
  341. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  342. rfi->exp_idx,
  343. &reorder_list);
  344. WARN_ON(rfi->pend_pkts);
  345. } else {
  346. __skb_queue_head_init(&reorder_list);
  347. }
  348. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  349. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  350. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  351. rfi->pktslots[rfi->cur_idx] = pkt;
  352. rfi->pend_pkts++;
  353. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  354. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  355. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  356. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  357. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  358. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  359. /* still in the current hole */
  360. /* enqueue the current on the buffer chain */
  361. if (rfi->pktslots[cur_idx] != NULL) {
  362. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  363. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  364. rfi->pktslots[cur_idx] = NULL;
  365. }
  366. rfi->pktslots[cur_idx] = pkt;
  367. rfi->pend_pkts++;
  368. rfi->cur_idx = cur_idx;
  369. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  370. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  371. /* can return now as there is no reorder
  372. * list to process.
  373. */
  374. return;
  375. }
  376. if (rfi->exp_idx == cur_idx) {
  377. if (rfi->pktslots[cur_idx] != NULL) {
  378. brcmf_dbg(INFO, "error buffer pending..free it\n");
  379. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  380. rfi->pktslots[cur_idx] = NULL;
  381. }
  382. rfi->pktslots[cur_idx] = pkt;
  383. rfi->pend_pkts++;
  384. /* got the expected one. flush from current to expected
  385. * and update expected
  386. */
  387. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  388. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  389. rfi->cur_idx = cur_idx;
  390. rfi->exp_idx = exp_idx;
  391. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  392. &reorder_list);
  393. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  394. flow_id, skb_queue_len(&reorder_list),
  395. rfi->pend_pkts);
  396. } else {
  397. u8 end_idx;
  398. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  399. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  400. cur_idx, exp_idx);
  401. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  402. end_idx = rfi->exp_idx;
  403. else
  404. end_idx = exp_idx;
  405. /* flush pkts first */
  406. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  407. &reorder_list);
  408. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  409. __skb_queue_tail(&reorder_list, pkt);
  410. } else {
  411. rfi->pktslots[cur_idx] = pkt;
  412. rfi->pend_pkts++;
  413. }
  414. rfi->exp_idx = exp_idx;
  415. rfi->cur_idx = cur_idx;
  416. }
  417. } else {
  418. /* explicity window move updating the expected index */
  419. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  420. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  421. flow_id, flags, rfi->exp_idx, exp_idx);
  422. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  423. end_idx = rfi->exp_idx;
  424. else
  425. end_idx = exp_idx;
  426. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  427. &reorder_list);
  428. __skb_queue_tail(&reorder_list, pkt);
  429. /* set the new expected idx */
  430. rfi->exp_idx = exp_idx;
  431. }
  432. netif_rx:
  433. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  434. __skb_unlink(pkt, &reorder_list);
  435. brcmf_netif_rx(ifp, pkt);
  436. }
  437. }
  438. void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
  439. {
  440. struct brcmf_if *ifp;
  441. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  442. struct brcmf_pub *drvr = bus_if->drvr;
  443. struct brcmf_skb_reorder_data *rd;
  444. u8 ifidx;
  445. int ret;
  446. brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
  447. /* process and remove protocol-specific header */
  448. ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
  449. ifp = drvr->iflist[ifidx];
  450. if (ret || !ifp || !ifp->ndev) {
  451. if ((ret != -ENODATA) && ifp)
  452. ifp->stats.rx_errors++;
  453. brcmu_pkt_buf_free_skb(skb);
  454. return;
  455. }
  456. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  457. if (rd->reorder)
  458. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  459. else
  460. brcmf_netif_rx(ifp, skb);
  461. }
  462. void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
  463. bool success)
  464. {
  465. struct brcmf_if *ifp;
  466. struct ethhdr *eh;
  467. u16 type;
  468. ifp = drvr->iflist[ifidx];
  469. if (!ifp)
  470. goto done;
  471. eh = (struct ethhdr *)(txp->data);
  472. type = ntohs(eh->h_proto);
  473. if (type == ETH_P_PAE) {
  474. atomic_dec(&ifp->pend_8021x_cnt);
  475. if (waitqueue_active(&ifp->pend_8021x_wait))
  476. wake_up(&ifp->pend_8021x_wait);
  477. }
  478. if (!success)
  479. ifp->stats.tx_errors++;
  480. done:
  481. brcmu_pkt_buf_free_skb(txp);
  482. }
  483. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  484. {
  485. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  486. struct brcmf_pub *drvr = bus_if->drvr;
  487. u8 ifidx;
  488. /* await txstatus signal for firmware if active */
  489. if (brcmf_fws_fc_active(drvr->fws)) {
  490. if (!success)
  491. brcmf_fws_bustxfail(drvr->fws, txp);
  492. } else {
  493. if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
  494. brcmu_pkt_buf_free_skb(txp);
  495. else
  496. brcmf_txfinalize(drvr, txp, ifidx, success);
  497. }
  498. }
  499. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  500. {
  501. struct brcmf_if *ifp = netdev_priv(ndev);
  502. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  503. return &ifp->stats;
  504. }
  505. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  506. struct ethtool_drvinfo *info)
  507. {
  508. struct brcmf_if *ifp = netdev_priv(ndev);
  509. struct brcmf_pub *drvr = ifp->drvr;
  510. char drev[BRCMU_DOTREV_LEN] = "n/a";
  511. if (drvr->revinfo.result == 0)
  512. brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
  513. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  514. strlcpy(info->version, drev, sizeof(info->version));
  515. strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
  516. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  517. sizeof(info->bus_info));
  518. }
  519. static const struct ethtool_ops brcmf_ethtool_ops = {
  520. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  521. };
  522. static int brcmf_netdev_stop(struct net_device *ndev)
  523. {
  524. struct brcmf_if *ifp = netdev_priv(ndev);
  525. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  526. brcmf_cfg80211_down(ndev);
  527. /* Set state and stop OS transmissions */
  528. netif_stop_queue(ndev);
  529. return 0;
  530. }
  531. static int brcmf_netdev_open(struct net_device *ndev)
  532. {
  533. struct brcmf_if *ifp = netdev_priv(ndev);
  534. struct brcmf_pub *drvr = ifp->drvr;
  535. struct brcmf_bus *bus_if = drvr->bus_if;
  536. u32 toe_ol;
  537. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  538. /* If bus is not ready, can't continue */
  539. if (bus_if->state != BRCMF_BUS_UP) {
  540. brcmf_err("failed bus is not ready\n");
  541. return -EAGAIN;
  542. }
  543. atomic_set(&ifp->pend_8021x_cnt, 0);
  544. /* Get current TOE mode from dongle */
  545. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  546. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  547. ndev->features |= NETIF_F_IP_CSUM;
  548. else
  549. ndev->features &= ~NETIF_F_IP_CSUM;
  550. if (brcmf_cfg80211_up(ndev)) {
  551. brcmf_err("failed to bring up cfg80211\n");
  552. return -EIO;
  553. }
  554. /* Allow transmit calls */
  555. netif_start_queue(ndev);
  556. return 0;
  557. }
  558. static const struct net_device_ops brcmf_netdev_ops_pri = {
  559. .ndo_open = brcmf_netdev_open,
  560. .ndo_stop = brcmf_netdev_stop,
  561. .ndo_get_stats = brcmf_netdev_get_stats,
  562. .ndo_start_xmit = brcmf_netdev_start_xmit,
  563. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  564. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  565. };
  566. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  567. {
  568. struct brcmf_pub *drvr = ifp->drvr;
  569. struct net_device *ndev;
  570. s32 err;
  571. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  572. ifp->mac_addr);
  573. ndev = ifp->ndev;
  574. /* set appropriate operations */
  575. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  576. ndev->hard_header_len += drvr->hdrlen;
  577. ndev->ethtool_ops = &brcmf_ethtool_ops;
  578. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  579. drvr->hdrlen;
  580. /* set the mac address */
  581. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  582. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  583. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  584. if (rtnl_locked)
  585. err = register_netdevice(ndev);
  586. else
  587. err = register_netdev(ndev);
  588. if (err != 0) {
  589. brcmf_err("couldn't register the net device\n");
  590. goto fail;
  591. }
  592. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  593. ndev->destructor = brcmf_cfg80211_free_netdev;
  594. return 0;
  595. fail:
  596. drvr->iflist[ifp->bssidx] = NULL;
  597. ndev->netdev_ops = NULL;
  598. free_netdev(ndev);
  599. return -EBADE;
  600. }
  601. static int brcmf_net_p2p_open(struct net_device *ndev)
  602. {
  603. brcmf_dbg(TRACE, "Enter\n");
  604. return brcmf_cfg80211_up(ndev);
  605. }
  606. static int brcmf_net_p2p_stop(struct net_device *ndev)
  607. {
  608. brcmf_dbg(TRACE, "Enter\n");
  609. return brcmf_cfg80211_down(ndev);
  610. }
  611. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  612. struct net_device *ndev)
  613. {
  614. if (skb)
  615. dev_kfree_skb_any(skb);
  616. return NETDEV_TX_OK;
  617. }
  618. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  619. .ndo_open = brcmf_net_p2p_open,
  620. .ndo_stop = brcmf_net_p2p_stop,
  621. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  622. };
  623. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  624. {
  625. struct net_device *ndev;
  626. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  627. ifp->mac_addr);
  628. ndev = ifp->ndev;
  629. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  630. /* set the mac address */
  631. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  632. if (register_netdev(ndev) != 0) {
  633. brcmf_err("couldn't register the p2p net device\n");
  634. goto fail;
  635. }
  636. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  637. return 0;
  638. fail:
  639. ifp->drvr->iflist[ifp->bssidx] = NULL;
  640. ndev->netdev_ops = NULL;
  641. free_netdev(ndev);
  642. return -EBADE;
  643. }
  644. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  645. char *name, u8 *mac_addr)
  646. {
  647. struct brcmf_if *ifp;
  648. struct net_device *ndev;
  649. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  650. ifp = drvr->iflist[bssidx];
  651. /*
  652. * Delete the existing interface before overwriting it
  653. * in case we missed the BRCMF_E_IF_DEL event.
  654. */
  655. if (ifp) {
  656. brcmf_err("ERROR: netdev:%s already exists\n",
  657. ifp->ndev->name);
  658. if (ifidx) {
  659. netif_stop_queue(ifp->ndev);
  660. unregister_netdev(ifp->ndev);
  661. free_netdev(ifp->ndev);
  662. drvr->iflist[bssidx] = NULL;
  663. } else {
  664. brcmf_err("ignore IF event\n");
  665. return ERR_PTR(-EINVAL);
  666. }
  667. }
  668. if (!brcmf_p2p_enable && bssidx == 1) {
  669. /* this is P2P_DEVICE interface */
  670. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  671. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  672. if (!ifp)
  673. return ERR_PTR(-ENOMEM);
  674. } else {
  675. brcmf_dbg(INFO, "allocate netdev interface\n");
  676. /* Allocate netdev, including space for private structure */
  677. ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
  678. ether_setup);
  679. if (!ndev)
  680. return ERR_PTR(-ENOMEM);
  681. ifp = netdev_priv(ndev);
  682. ifp->ndev = ndev;
  683. }
  684. ifp->drvr = drvr;
  685. drvr->iflist[bssidx] = ifp;
  686. ifp->ifidx = ifidx;
  687. ifp->bssidx = bssidx;
  688. init_waitqueue_head(&ifp->pend_8021x_wait);
  689. spin_lock_init(&ifp->netif_stop_lock);
  690. if (mac_addr != NULL)
  691. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  692. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  693. current->pid, name, ifp->mac_addr);
  694. return ifp;
  695. }
  696. static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  697. {
  698. struct brcmf_if *ifp;
  699. ifp = drvr->iflist[bssidx];
  700. drvr->iflist[bssidx] = NULL;
  701. if (!ifp) {
  702. brcmf_err("Null interface, idx=%d\n", bssidx);
  703. return;
  704. }
  705. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  706. if (ifp->ndev) {
  707. if (bssidx == 0) {
  708. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  709. rtnl_lock();
  710. brcmf_netdev_stop(ifp->ndev);
  711. rtnl_unlock();
  712. }
  713. } else {
  714. netif_stop_queue(ifp->ndev);
  715. }
  716. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  717. cancel_work_sync(&ifp->setmacaddr_work);
  718. cancel_work_sync(&ifp->multicast_work);
  719. }
  720. /* unregister will take care of freeing it */
  721. unregister_netdev(ifp->ndev);
  722. }
  723. }
  724. void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
  725. {
  726. if (drvr->iflist[bssidx]) {
  727. brcmf_fws_del_interface(drvr->iflist[bssidx]);
  728. brcmf_del_if(drvr, bssidx);
  729. }
  730. }
  731. int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
  732. {
  733. int ifidx;
  734. int bsscfgidx;
  735. bool available;
  736. int highest;
  737. available = false;
  738. bsscfgidx = 2;
  739. highest = 2;
  740. for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
  741. if (drvr->iflist[ifidx]) {
  742. if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
  743. bsscfgidx = highest + 1;
  744. else if (drvr->iflist[ifidx]->bssidx > highest)
  745. highest = drvr->iflist[ifidx]->bssidx;
  746. } else {
  747. available = true;
  748. }
  749. }
  750. return available ? bsscfgidx : -ENOMEM;
  751. }
  752. int brcmf_attach(struct device *dev)
  753. {
  754. struct brcmf_pub *drvr = NULL;
  755. int ret = 0;
  756. brcmf_dbg(TRACE, "Enter\n");
  757. /* Allocate primary brcmf_info */
  758. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  759. if (!drvr)
  760. return -ENOMEM;
  761. mutex_init(&drvr->proto_block);
  762. /* Link to bus module */
  763. drvr->hdrlen = 0;
  764. drvr->bus_if = dev_get_drvdata(dev);
  765. drvr->bus_if->drvr = drvr;
  766. /* create device debugfs folder */
  767. brcmf_debugfs_attach(drvr);
  768. /* Attach and link in the protocol */
  769. ret = brcmf_proto_attach(drvr);
  770. if (ret != 0) {
  771. brcmf_err("brcmf_prot_attach failed\n");
  772. goto fail;
  773. }
  774. /* attach firmware event handler */
  775. brcmf_fweh_attach(drvr);
  776. return ret;
  777. fail:
  778. brcmf_detach(dev);
  779. return ret;
  780. }
  781. static int brcmf_revinfo_read(struct seq_file *s, void *data)
  782. {
  783. struct brcmf_bus *bus_if = dev_get_drvdata(s->private);
  784. struct brcmf_rev_info *ri = &bus_if->drvr->revinfo;
  785. char drev[BRCMU_DOTREV_LEN];
  786. char brev[BRCMU_BOARDREV_LEN];
  787. seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid);
  788. seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid);
  789. seq_printf(s, "radiorev: %s\n", brcmu_dotrev_str(ri->radiorev, drev));
  790. seq_printf(s, "chipnum: %u (%x)\n", ri->chipnum, ri->chipnum);
  791. seq_printf(s, "chiprev: %u\n", ri->chiprev);
  792. seq_printf(s, "chippkg: %u\n", ri->chippkg);
  793. seq_printf(s, "corerev: %u\n", ri->corerev);
  794. seq_printf(s, "boardid: 0x%04x\n", ri->boardid);
  795. seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor);
  796. seq_printf(s, "boardrev: %s\n", brcmu_boardrev_str(ri->boardrev, brev));
  797. seq_printf(s, "driverrev: %s\n", brcmu_dotrev_str(ri->driverrev, drev));
  798. seq_printf(s, "ucoderev: %u\n", ri->ucoderev);
  799. seq_printf(s, "bus: %u\n", ri->bus);
  800. seq_printf(s, "phytype: %u\n", ri->phytype);
  801. seq_printf(s, "phyrev: %u\n", ri->phyrev);
  802. seq_printf(s, "anarev: %u\n", ri->anarev);
  803. seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
  804. return 0;
  805. }
  806. int brcmf_bus_start(struct device *dev)
  807. {
  808. int ret = -1;
  809. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  810. struct brcmf_pub *drvr = bus_if->drvr;
  811. struct brcmf_if *ifp;
  812. struct brcmf_if *p2p_ifp;
  813. brcmf_dbg(TRACE, "\n");
  814. /* add primary networking interface */
  815. ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
  816. if (IS_ERR(ifp))
  817. return PTR_ERR(ifp);
  818. if (brcmf_p2p_enable)
  819. p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
  820. else
  821. p2p_ifp = NULL;
  822. if (IS_ERR(p2p_ifp))
  823. p2p_ifp = NULL;
  824. /* signal bus ready */
  825. brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
  826. /* Bus is ready, do any initialization */
  827. ret = brcmf_c_preinit_dcmds(ifp);
  828. if (ret < 0)
  829. goto fail;
  830. brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
  831. /* assure we have chipid before feature attach */
  832. if (!bus_if->chip) {
  833. bus_if->chip = drvr->revinfo.chipnum;
  834. bus_if->chiprev = drvr->revinfo.chiprev;
  835. brcmf_dbg(INFO, "firmware revinfo: chip %x (%d) rev %d\n",
  836. bus_if->chip, bus_if->chip, bus_if->chiprev);
  837. }
  838. brcmf_feat_attach(drvr);
  839. ret = brcmf_fws_init(drvr);
  840. if (ret < 0)
  841. goto fail;
  842. brcmf_fws_add_interface(ifp);
  843. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
  844. if (drvr->config == NULL) {
  845. ret = -ENOMEM;
  846. goto fail;
  847. }
  848. ret = brcmf_fweh_activate_events(ifp);
  849. if (ret < 0)
  850. goto fail;
  851. ret = brcmf_net_attach(ifp, false);
  852. fail:
  853. if (ret < 0) {
  854. brcmf_err("failed: %d\n", ret);
  855. brcmf_cfg80211_detach(drvr->config);
  856. if (drvr->fws) {
  857. brcmf_fws_del_interface(ifp);
  858. brcmf_fws_deinit(drvr);
  859. }
  860. if (drvr->iflist[0]) {
  861. free_netdev(ifp->ndev);
  862. drvr->iflist[0] = NULL;
  863. }
  864. if (p2p_ifp) {
  865. free_netdev(p2p_ifp->ndev);
  866. drvr->iflist[1] = NULL;
  867. }
  868. return ret;
  869. }
  870. if ((brcmf_p2p_enable) && (p2p_ifp))
  871. if (brcmf_net_p2p_attach(p2p_ifp) < 0)
  872. brcmf_p2p_enable = 0;
  873. return 0;
  874. }
  875. void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
  876. {
  877. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  878. struct brcmf_pub *drvr = bus_if->drvr;
  879. if (drvr) {
  880. drvr->hdrlen += len;
  881. }
  882. }
  883. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  884. {
  885. brcmf_dbg(TRACE, "Enter\n");
  886. if (drvr) {
  887. /* Stop the bus module */
  888. brcmf_bus_stop(drvr->bus_if);
  889. }
  890. }
  891. void brcmf_dev_reset(struct device *dev)
  892. {
  893. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  894. struct brcmf_pub *drvr = bus_if->drvr;
  895. if (drvr == NULL)
  896. return;
  897. if (drvr->iflist[0])
  898. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  899. }
  900. void brcmf_detach(struct device *dev)
  901. {
  902. s32 i;
  903. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  904. struct brcmf_pub *drvr = bus_if->drvr;
  905. brcmf_dbg(TRACE, "Enter\n");
  906. if (drvr == NULL)
  907. return;
  908. /* stop firmware event handling */
  909. brcmf_fweh_detach(drvr);
  910. if (drvr->config)
  911. brcmf_p2p_detach(&drvr->config->p2p);
  912. brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
  913. /* make sure primary interface removed last */
  914. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  915. brcmf_remove_interface(drvr, i);
  916. brcmf_cfg80211_detach(drvr->config);
  917. brcmf_fws_deinit(drvr);
  918. brcmf_bus_detach(drvr);
  919. brcmf_proto_detach(drvr);
  920. brcmf_debugfs_detach(drvr);
  921. bus_if->drvr = NULL;
  922. kfree(drvr);
  923. }
  924. s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
  925. {
  926. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  927. struct brcmf_if *ifp = bus_if->drvr->iflist[0];
  928. return brcmf_fil_iovar_data_set(ifp, name, data, len);
  929. }
  930. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  931. {
  932. return atomic_read(&ifp->pend_8021x_cnt);
  933. }
  934. int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
  935. {
  936. int err;
  937. err = wait_event_timeout(ifp->pend_8021x_wait,
  938. !brcmf_get_pend_8021x_cnt(ifp),
  939. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  940. WARN_ON(!err);
  941. return !err;
  942. }
  943. void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
  944. {
  945. struct brcmf_pub *drvr = bus->drvr;
  946. struct net_device *ndev;
  947. int ifidx;
  948. brcmf_dbg(TRACE, "%d -> %d\n", bus->state, state);
  949. bus->state = state;
  950. if (state == BRCMF_BUS_UP) {
  951. for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
  952. if ((drvr->iflist[ifidx]) &&
  953. (drvr->iflist[ifidx]->ndev)) {
  954. ndev = drvr->iflist[ifidx]->ndev;
  955. if (netif_queue_stopped(ndev))
  956. netif_wake_queue(ndev);
  957. }
  958. }
  959. }
  960. }
  961. static void brcmf_driver_register(struct work_struct *work)
  962. {
  963. #ifdef CONFIG_BRCMFMAC_SDIO
  964. brcmf_sdio_register();
  965. #endif
  966. #ifdef CONFIG_BRCMFMAC_USB
  967. brcmf_usb_register();
  968. #endif
  969. #ifdef CONFIG_BRCMFMAC_PCIE
  970. brcmf_pcie_register();
  971. #endif
  972. }
  973. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  974. static int __init brcmfmac_module_init(void)
  975. {
  976. brcmf_debugfs_init();
  977. #ifdef CONFIG_BRCMFMAC_SDIO
  978. brcmf_sdio_init();
  979. #endif
  980. if (!schedule_work(&brcmf_driver_work))
  981. return -EBUSY;
  982. return 0;
  983. }
  984. static void __exit brcmfmac_module_exit(void)
  985. {
  986. cancel_work_sync(&brcmf_driver_work);
  987. #ifdef CONFIG_BRCMFMAC_SDIO
  988. brcmf_sdio_exit();
  989. #endif
  990. #ifdef CONFIG_BRCMFMAC_USB
  991. brcmf_usb_exit();
  992. #endif
  993. #ifdef CONFIG_BRCMFMAC_PCIE
  994. brcmf_pcie_exit();
  995. #endif
  996. brcmf_debugfs_exit();
  997. }
  998. module_init(brcmfmac_module_init);
  999. module_exit(brcmfmac_module_exit);