dhd_linux.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "dhd.h"
  24. #include "dhd_bus.h"
  25. #include "dhd_dbg.h"
  26. #include "fwil_types.h"
  27. #include "p2p.h"
  28. #include "wl_cfg80211.h"
  29. #include "fwil.h"
  30. #include "fwsignal.h"
  31. #include "proto.h"
  32. MODULE_AUTHOR("Broadcom Corporation");
  33. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  34. MODULE_LICENSE("Dual BSD/GPL");
  35. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  36. /* AMPDU rx reordering definitions */
  37. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  38. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  39. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  40. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  41. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  42. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  43. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  44. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  45. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  46. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  47. /* Error bits */
  48. int brcmf_msg_level;
  49. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  50. MODULE_PARM_DESC(debug, "level of debug output");
  51. /* P2P0 enable */
  52. static int brcmf_p2p_enable;
  53. #ifdef CONFIG_BRCMDBG
  54. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  55. MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
  56. #endif
  57. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  58. {
  59. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  60. brcmf_err("ifidx %d out of range\n", ifidx);
  61. return "<if_bad>";
  62. }
  63. if (drvr->iflist[ifidx] == NULL) {
  64. brcmf_err("null i/f %d\n", ifidx);
  65. return "<if_null>";
  66. }
  67. if (drvr->iflist[ifidx]->ndev)
  68. return drvr->iflist[ifidx]->ndev->name;
  69. return "<if_none>";
  70. }
  71. static void _brcmf_set_multicast_list(struct work_struct *work)
  72. {
  73. struct brcmf_if *ifp;
  74. struct net_device *ndev;
  75. struct netdev_hw_addr *ha;
  76. u32 cmd_value, cnt;
  77. __le32 cnt_le;
  78. char *buf, *bufp;
  79. u32 buflen;
  80. s32 err;
  81. ifp = container_of(work, struct brcmf_if, multicast_work);
  82. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  83. ndev = ifp->ndev;
  84. /* Determine initial value of allmulti flag */
  85. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  86. /* Send down the multicast list first. */
  87. cnt = netdev_mc_count(ndev);
  88. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  89. buf = kmalloc(buflen, GFP_ATOMIC);
  90. if (!buf)
  91. return;
  92. bufp = buf;
  93. cnt_le = cpu_to_le32(cnt);
  94. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  95. bufp += sizeof(cnt_le);
  96. netdev_for_each_mc_addr(ha, ndev) {
  97. if (!cnt)
  98. break;
  99. memcpy(bufp, ha->addr, ETH_ALEN);
  100. bufp += ETH_ALEN;
  101. cnt--;
  102. }
  103. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  104. if (err < 0) {
  105. brcmf_err("Setting mcast_list failed, %d\n", err);
  106. cmd_value = cnt ? true : cmd_value;
  107. }
  108. kfree(buf);
  109. /*
  110. * Now send the allmulti setting. This is based on the setting in the
  111. * net_device flags, but might be modified above to be turned on if we
  112. * were trying to set some addresses and dongle rejected it...
  113. */
  114. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  115. if (err < 0)
  116. brcmf_err("Setting allmulti failed, %d\n", err);
  117. /*Finally, pick up the PROMISC flag */
  118. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  119. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  120. if (err < 0)
  121. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  122. err);
  123. }
  124. static void
  125. _brcmf_set_mac_address(struct work_struct *work)
  126. {
  127. struct brcmf_if *ifp;
  128. s32 err;
  129. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  130. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  131. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  132. ETH_ALEN);
  133. if (err < 0) {
  134. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  135. } else {
  136. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  137. ifp->mac_addr);
  138. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  139. }
  140. }
  141. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  142. {
  143. struct brcmf_if *ifp = netdev_priv(ndev);
  144. struct sockaddr *sa = (struct sockaddr *)addr;
  145. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  146. schedule_work(&ifp->setmacaddr_work);
  147. return 0;
  148. }
  149. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  150. {
  151. struct brcmf_if *ifp = netdev_priv(ndev);
  152. schedule_work(&ifp->multicast_work);
  153. }
  154. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  155. struct net_device *ndev)
  156. {
  157. int ret;
  158. struct brcmf_if *ifp = netdev_priv(ndev);
  159. struct brcmf_pub *drvr = ifp->drvr;
  160. struct ethhdr *eh = (struct ethhdr *)(skb->data);
  161. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  162. /* Can the device send data? */
  163. if (drvr->bus_if->state != BRCMF_BUS_DATA) {
  164. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  165. netif_stop_queue(ndev);
  166. dev_kfree_skb(skb);
  167. ret = -ENODEV;
  168. goto done;
  169. }
  170. if (!drvr->iflist[ifp->bssidx]) {
  171. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  172. netif_stop_queue(ndev);
  173. dev_kfree_skb(skb);
  174. ret = -ENODEV;
  175. goto done;
  176. }
  177. /* Make sure there's enough room for any header */
  178. if (skb_headroom(skb) < drvr->hdrlen) {
  179. struct sk_buff *skb2;
  180. brcmf_dbg(INFO, "%s: insufficient headroom\n",
  181. brcmf_ifname(drvr, ifp->bssidx));
  182. drvr->bus_if->tx_realloc++;
  183. skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
  184. dev_kfree_skb(skb);
  185. skb = skb2;
  186. if (skb == NULL) {
  187. brcmf_err("%s: skb_realloc_headroom failed\n",
  188. brcmf_ifname(drvr, ifp->bssidx));
  189. ret = -ENOMEM;
  190. goto done;
  191. }
  192. }
  193. /* validate length for ether packet */
  194. if (skb->len < sizeof(*eh)) {
  195. ret = -EINVAL;
  196. dev_kfree_skb(skb);
  197. goto done;
  198. }
  199. if (eh->h_proto == htons(ETH_P_PAE))
  200. atomic_inc(&ifp->pend_8021x_cnt);
  201. ret = brcmf_fws_process_skb(ifp, skb);
  202. done:
  203. if (ret) {
  204. ifp->stats.tx_dropped++;
  205. } else {
  206. ifp->stats.tx_packets++;
  207. ifp->stats.tx_bytes += skb->len;
  208. }
  209. /* Return ok: we always eat the packet */
  210. return NETDEV_TX_OK;
  211. }
  212. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  213. enum brcmf_netif_stop_reason reason, bool state)
  214. {
  215. unsigned long flags;
  216. if (!ifp || !ifp->ndev)
  217. return;
  218. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  219. ifp->bssidx, ifp->netif_stop, reason, state);
  220. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  221. if (state) {
  222. if (!ifp->netif_stop)
  223. netif_stop_queue(ifp->ndev);
  224. ifp->netif_stop |= reason;
  225. } else {
  226. ifp->netif_stop &= ~reason;
  227. if (!ifp->netif_stop)
  228. netif_wake_queue(ifp->ndev);
  229. }
  230. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  231. }
  232. void brcmf_txflowblock(struct device *dev, bool state)
  233. {
  234. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  235. struct brcmf_pub *drvr = bus_if->drvr;
  236. brcmf_dbg(TRACE, "Enter\n");
  237. brcmf_fws_bus_blocked(drvr, state);
  238. }
  239. static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  240. {
  241. skb->dev = ifp->ndev;
  242. skb->protocol = eth_type_trans(skb, skb->dev);
  243. if (skb->pkt_type == PACKET_MULTICAST)
  244. ifp->stats.multicast++;
  245. /* Process special event packets */
  246. brcmf_fweh_process_skb(ifp->drvr, skb);
  247. if (!(ifp->ndev->flags & IFF_UP)) {
  248. brcmu_pkt_buf_free_skb(skb);
  249. return;
  250. }
  251. ifp->stats.rx_bytes += skb->len;
  252. ifp->stats.rx_packets++;
  253. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  254. if (in_interrupt())
  255. netif_rx(skb);
  256. else
  257. /* If the receive is not processed inside an ISR,
  258. * the softirqd must be woken explicitly to service
  259. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  260. */
  261. netif_rx_ni(skb);
  262. }
  263. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  264. u8 start, u8 end,
  265. struct sk_buff_head *skb_list)
  266. {
  267. /* initialize return list */
  268. __skb_queue_head_init(skb_list);
  269. if (rfi->pend_pkts == 0) {
  270. brcmf_dbg(INFO, "no packets in reorder queue\n");
  271. return;
  272. }
  273. do {
  274. if (rfi->pktslots[start]) {
  275. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  276. rfi->pktslots[start] = NULL;
  277. }
  278. start++;
  279. if (start > rfi->max_idx)
  280. start = 0;
  281. } while (start != end);
  282. rfi->pend_pkts -= skb_queue_len(skb_list);
  283. }
  284. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  285. struct sk_buff *pkt)
  286. {
  287. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  288. struct brcmf_ampdu_rx_reorder *rfi;
  289. struct sk_buff_head reorder_list;
  290. struct sk_buff *pnext;
  291. u8 flags;
  292. u32 buf_size;
  293. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  294. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  295. /* validate flags and flow id */
  296. if (flags == 0xFF) {
  297. brcmf_err("invalid flags...so ignore this packet\n");
  298. brcmf_netif_rx(ifp, pkt);
  299. return;
  300. }
  301. rfi = ifp->drvr->reorder_flows[flow_id];
  302. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  303. brcmf_dbg(INFO, "flow-%d: delete\n",
  304. flow_id);
  305. if (rfi == NULL) {
  306. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  307. flow_id);
  308. brcmf_netif_rx(ifp, pkt);
  309. return;
  310. }
  311. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  312. &reorder_list);
  313. /* add the last packet */
  314. __skb_queue_tail(&reorder_list, pkt);
  315. kfree(rfi);
  316. ifp->drvr->reorder_flows[flow_id] = NULL;
  317. goto netif_rx;
  318. }
  319. /* from here on we need a flow reorder instance */
  320. if (rfi == NULL) {
  321. buf_size = sizeof(*rfi);
  322. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  323. buf_size += (max_idx + 1) * sizeof(pkt);
  324. /* allocate space for flow reorder info */
  325. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  326. flow_id, max_idx);
  327. rfi = kzalloc(buf_size, GFP_ATOMIC);
  328. if (rfi == NULL) {
  329. brcmf_err("failed to alloc buffer\n");
  330. brcmf_netif_rx(ifp, pkt);
  331. return;
  332. }
  333. ifp->drvr->reorder_flows[flow_id] = rfi;
  334. rfi->pktslots = (struct sk_buff **)(rfi+1);
  335. rfi->max_idx = max_idx;
  336. }
  337. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  338. if (rfi->pend_pkts) {
  339. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  340. rfi->exp_idx,
  341. &reorder_list);
  342. WARN_ON(rfi->pend_pkts);
  343. } else {
  344. __skb_queue_head_init(&reorder_list);
  345. }
  346. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  347. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  348. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  349. rfi->pktslots[rfi->cur_idx] = pkt;
  350. rfi->pend_pkts++;
  351. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  352. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  353. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  354. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  355. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  356. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  357. /* still in the current hole */
  358. /* enqueue the current on the buffer chain */
  359. if (rfi->pktslots[cur_idx] != NULL) {
  360. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  361. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  362. rfi->pktslots[cur_idx] = NULL;
  363. }
  364. rfi->pktslots[cur_idx] = pkt;
  365. rfi->pend_pkts++;
  366. rfi->cur_idx = cur_idx;
  367. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  368. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  369. /* can return now as there is no reorder
  370. * list to process.
  371. */
  372. return;
  373. }
  374. if (rfi->exp_idx == cur_idx) {
  375. if (rfi->pktslots[cur_idx] != NULL) {
  376. brcmf_dbg(INFO, "error buffer pending..free it\n");
  377. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  378. rfi->pktslots[cur_idx] = NULL;
  379. }
  380. rfi->pktslots[cur_idx] = pkt;
  381. rfi->pend_pkts++;
  382. /* got the expected one. flush from current to expected
  383. * and update expected
  384. */
  385. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  386. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  387. rfi->cur_idx = cur_idx;
  388. rfi->exp_idx = exp_idx;
  389. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  390. &reorder_list);
  391. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  392. flow_id, skb_queue_len(&reorder_list),
  393. rfi->pend_pkts);
  394. } else {
  395. u8 end_idx;
  396. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  397. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  398. cur_idx, exp_idx);
  399. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  400. end_idx = rfi->exp_idx;
  401. else
  402. end_idx = exp_idx;
  403. /* flush pkts first */
  404. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  405. &reorder_list);
  406. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  407. __skb_queue_tail(&reorder_list, pkt);
  408. } else {
  409. rfi->pktslots[cur_idx] = pkt;
  410. rfi->pend_pkts++;
  411. }
  412. rfi->exp_idx = exp_idx;
  413. rfi->cur_idx = cur_idx;
  414. }
  415. } else {
  416. /* explicity window move updating the expected index */
  417. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  418. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  419. flow_id, flags, rfi->exp_idx, exp_idx);
  420. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  421. end_idx = rfi->exp_idx;
  422. else
  423. end_idx = exp_idx;
  424. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  425. &reorder_list);
  426. __skb_queue_tail(&reorder_list, pkt);
  427. /* set the new expected idx */
  428. rfi->exp_idx = exp_idx;
  429. }
  430. netif_rx:
  431. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  432. __skb_unlink(pkt, &reorder_list);
  433. brcmf_netif_rx(ifp, pkt);
  434. }
  435. }
  436. void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
  437. {
  438. struct brcmf_if *ifp;
  439. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  440. struct brcmf_pub *drvr = bus_if->drvr;
  441. struct brcmf_skb_reorder_data *rd;
  442. u8 ifidx;
  443. int ret;
  444. brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
  445. /* process and remove protocol-specific header */
  446. ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
  447. ifp = drvr->iflist[ifidx];
  448. if (ret || !ifp || !ifp->ndev) {
  449. if ((ret != -ENODATA) && ifp)
  450. ifp->stats.rx_errors++;
  451. brcmu_pkt_buf_free_skb(skb);
  452. return;
  453. }
  454. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  455. if (rd->reorder)
  456. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  457. else
  458. brcmf_netif_rx(ifp, skb);
  459. }
  460. void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
  461. bool success)
  462. {
  463. struct brcmf_if *ifp;
  464. struct ethhdr *eh;
  465. u16 type;
  466. ifp = drvr->iflist[ifidx];
  467. if (!ifp)
  468. goto done;
  469. eh = (struct ethhdr *)(txp->data);
  470. type = ntohs(eh->h_proto);
  471. if (type == ETH_P_PAE) {
  472. atomic_dec(&ifp->pend_8021x_cnt);
  473. if (waitqueue_active(&ifp->pend_8021x_wait))
  474. wake_up(&ifp->pend_8021x_wait);
  475. }
  476. if (!success)
  477. ifp->stats.tx_errors++;
  478. done:
  479. brcmu_pkt_buf_free_skb(txp);
  480. }
  481. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  482. {
  483. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  484. struct brcmf_pub *drvr = bus_if->drvr;
  485. u8 ifidx;
  486. /* await txstatus signal for firmware if active */
  487. if (brcmf_fws_fc_active(drvr->fws)) {
  488. if (!success)
  489. brcmf_fws_bustxfail(drvr->fws, txp);
  490. } else {
  491. if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
  492. brcmu_pkt_buf_free_skb(txp);
  493. else
  494. brcmf_txfinalize(drvr, txp, ifidx, success);
  495. }
  496. }
  497. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  498. {
  499. struct brcmf_if *ifp = netdev_priv(ndev);
  500. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  501. return &ifp->stats;
  502. }
  503. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  504. struct ethtool_drvinfo *info)
  505. {
  506. struct brcmf_if *ifp = netdev_priv(ndev);
  507. struct brcmf_pub *drvr = ifp->drvr;
  508. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  509. snprintf(info->version, sizeof(info->version), "n/a");
  510. strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
  511. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  512. sizeof(info->bus_info));
  513. }
  514. static const struct ethtool_ops brcmf_ethtool_ops = {
  515. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  516. };
  517. static int brcmf_netdev_stop(struct net_device *ndev)
  518. {
  519. struct brcmf_if *ifp = netdev_priv(ndev);
  520. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  521. brcmf_cfg80211_down(ndev);
  522. /* Set state and stop OS transmissions */
  523. netif_stop_queue(ndev);
  524. return 0;
  525. }
  526. static int brcmf_netdev_open(struct net_device *ndev)
  527. {
  528. struct brcmf_if *ifp = netdev_priv(ndev);
  529. struct brcmf_pub *drvr = ifp->drvr;
  530. struct brcmf_bus *bus_if = drvr->bus_if;
  531. u32 toe_ol;
  532. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  533. /* If bus is not ready, can't continue */
  534. if (bus_if->state != BRCMF_BUS_DATA) {
  535. brcmf_err("failed bus is not ready\n");
  536. return -EAGAIN;
  537. }
  538. atomic_set(&ifp->pend_8021x_cnt, 0);
  539. /* Get current TOE mode from dongle */
  540. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  541. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  542. ndev->features |= NETIF_F_IP_CSUM;
  543. else
  544. ndev->features &= ~NETIF_F_IP_CSUM;
  545. if (brcmf_cfg80211_up(ndev)) {
  546. brcmf_err("failed to bring up cfg80211\n");
  547. return -EIO;
  548. }
  549. /* Allow transmit calls */
  550. netif_start_queue(ndev);
  551. return 0;
  552. }
  553. static const struct net_device_ops brcmf_netdev_ops_pri = {
  554. .ndo_open = brcmf_netdev_open,
  555. .ndo_stop = brcmf_netdev_stop,
  556. .ndo_get_stats = brcmf_netdev_get_stats,
  557. .ndo_start_xmit = brcmf_netdev_start_xmit,
  558. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  559. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  560. };
  561. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  562. {
  563. struct brcmf_pub *drvr = ifp->drvr;
  564. struct net_device *ndev;
  565. s32 err;
  566. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  567. ifp->mac_addr);
  568. ndev = ifp->ndev;
  569. /* set appropriate operations */
  570. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  571. ndev->hard_header_len += drvr->hdrlen;
  572. ndev->ethtool_ops = &brcmf_ethtool_ops;
  573. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  574. drvr->hdrlen;
  575. /* set the mac address */
  576. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  577. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  578. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  579. if (rtnl_locked)
  580. err = register_netdevice(ndev);
  581. else
  582. err = register_netdev(ndev);
  583. if (err != 0) {
  584. brcmf_err("couldn't register the net device\n");
  585. goto fail;
  586. }
  587. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  588. ndev->destructor = brcmf_cfg80211_free_netdev;
  589. return 0;
  590. fail:
  591. drvr->iflist[ifp->bssidx] = NULL;
  592. ndev->netdev_ops = NULL;
  593. free_netdev(ndev);
  594. return -EBADE;
  595. }
  596. static int brcmf_net_p2p_open(struct net_device *ndev)
  597. {
  598. brcmf_dbg(TRACE, "Enter\n");
  599. return brcmf_cfg80211_up(ndev);
  600. }
  601. static int brcmf_net_p2p_stop(struct net_device *ndev)
  602. {
  603. brcmf_dbg(TRACE, "Enter\n");
  604. return brcmf_cfg80211_down(ndev);
  605. }
  606. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  607. struct net_device *ndev)
  608. {
  609. if (skb)
  610. dev_kfree_skb_any(skb);
  611. return NETDEV_TX_OK;
  612. }
  613. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  614. .ndo_open = brcmf_net_p2p_open,
  615. .ndo_stop = brcmf_net_p2p_stop,
  616. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  617. };
  618. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  619. {
  620. struct net_device *ndev;
  621. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  622. ifp->mac_addr);
  623. ndev = ifp->ndev;
  624. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  625. /* set the mac address */
  626. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  627. if (register_netdev(ndev) != 0) {
  628. brcmf_err("couldn't register the p2p net device\n");
  629. goto fail;
  630. }
  631. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  632. return 0;
  633. fail:
  634. ifp->drvr->iflist[ifp->bssidx] = NULL;
  635. ndev->netdev_ops = NULL;
  636. free_netdev(ndev);
  637. return -EBADE;
  638. }
  639. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  640. char *name, u8 *mac_addr)
  641. {
  642. struct brcmf_if *ifp;
  643. struct net_device *ndev;
  644. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  645. ifp = drvr->iflist[bssidx];
  646. /*
  647. * Delete the existing interface before overwriting it
  648. * in case we missed the BRCMF_E_IF_DEL event.
  649. */
  650. if (ifp) {
  651. brcmf_err("ERROR: netdev:%s already exists\n",
  652. ifp->ndev->name);
  653. if (ifidx) {
  654. netif_stop_queue(ifp->ndev);
  655. unregister_netdev(ifp->ndev);
  656. free_netdev(ifp->ndev);
  657. drvr->iflist[bssidx] = NULL;
  658. } else {
  659. brcmf_err("ignore IF event\n");
  660. return ERR_PTR(-EINVAL);
  661. }
  662. }
  663. if (!brcmf_p2p_enable && bssidx == 1) {
  664. /* this is P2P_DEVICE interface */
  665. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  666. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  667. if (!ifp)
  668. return ERR_PTR(-ENOMEM);
  669. } else {
  670. brcmf_dbg(INFO, "allocate netdev interface\n");
  671. /* Allocate netdev, including space for private structure */
  672. ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
  673. if (!ndev)
  674. return ERR_PTR(-ENOMEM);
  675. ifp = netdev_priv(ndev);
  676. ifp->ndev = ndev;
  677. }
  678. ifp->drvr = drvr;
  679. drvr->iflist[bssidx] = ifp;
  680. ifp->ifidx = ifidx;
  681. ifp->bssidx = bssidx;
  682. init_waitqueue_head(&ifp->pend_8021x_wait);
  683. spin_lock_init(&ifp->netif_stop_lock);
  684. if (mac_addr != NULL)
  685. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  686. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  687. current->pid, name, ifp->mac_addr);
  688. return ifp;
  689. }
  690. void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  691. {
  692. struct brcmf_if *ifp;
  693. ifp = drvr->iflist[bssidx];
  694. drvr->iflist[bssidx] = NULL;
  695. if (!ifp) {
  696. brcmf_err("Null interface, idx=%d\n", bssidx);
  697. return;
  698. }
  699. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  700. if (ifp->ndev) {
  701. if (bssidx == 0) {
  702. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  703. rtnl_lock();
  704. brcmf_netdev_stop(ifp->ndev);
  705. rtnl_unlock();
  706. }
  707. } else {
  708. netif_stop_queue(ifp->ndev);
  709. }
  710. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  711. cancel_work_sync(&ifp->setmacaddr_work);
  712. cancel_work_sync(&ifp->multicast_work);
  713. }
  714. /* unregister will take care of freeing it */
  715. unregister_netdev(ifp->ndev);
  716. } else {
  717. kfree(ifp);
  718. }
  719. }
  720. int brcmf_attach(struct device *dev)
  721. {
  722. struct brcmf_pub *drvr = NULL;
  723. int ret = 0;
  724. brcmf_dbg(TRACE, "Enter\n");
  725. /* Allocate primary brcmf_info */
  726. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  727. if (!drvr)
  728. return -ENOMEM;
  729. mutex_init(&drvr->proto_block);
  730. /* Link to bus module */
  731. drvr->hdrlen = 0;
  732. drvr->bus_if = dev_get_drvdata(dev);
  733. drvr->bus_if->drvr = drvr;
  734. /* create device debugfs folder */
  735. brcmf_debugfs_attach(drvr);
  736. /* Attach and link in the protocol */
  737. ret = brcmf_proto_attach(drvr);
  738. if (ret != 0) {
  739. brcmf_err("brcmf_prot_attach failed\n");
  740. goto fail;
  741. }
  742. /* attach firmware event handler */
  743. brcmf_fweh_attach(drvr);
  744. return ret;
  745. fail:
  746. brcmf_detach(dev);
  747. return ret;
  748. }
  749. int brcmf_bus_start(struct device *dev)
  750. {
  751. int ret = -1;
  752. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  753. struct brcmf_pub *drvr = bus_if->drvr;
  754. struct brcmf_if *ifp;
  755. struct brcmf_if *p2p_ifp;
  756. brcmf_dbg(TRACE, "\n");
  757. /* add primary networking interface */
  758. ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
  759. if (IS_ERR(ifp))
  760. return PTR_ERR(ifp);
  761. if (brcmf_p2p_enable)
  762. p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
  763. else
  764. p2p_ifp = NULL;
  765. if (IS_ERR(p2p_ifp))
  766. p2p_ifp = NULL;
  767. /* signal bus ready */
  768. brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
  769. /* Bus is ready, do any initialization */
  770. ret = brcmf_c_preinit_dcmds(ifp);
  771. if (ret < 0)
  772. goto fail;
  773. ret = brcmf_fws_init(drvr);
  774. if (ret < 0)
  775. goto fail;
  776. brcmf_fws_add_interface(ifp);
  777. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
  778. if (drvr->config == NULL) {
  779. ret = -ENOMEM;
  780. goto fail;
  781. }
  782. ret = brcmf_fweh_activate_events(ifp);
  783. if (ret < 0)
  784. goto fail;
  785. ret = brcmf_net_attach(ifp, false);
  786. fail:
  787. if (ret < 0) {
  788. brcmf_err("failed: %d\n", ret);
  789. brcmf_cfg80211_detach(drvr->config);
  790. if (drvr->fws) {
  791. brcmf_fws_del_interface(ifp);
  792. brcmf_fws_deinit(drvr);
  793. }
  794. if (drvr->iflist[0]) {
  795. free_netdev(ifp->ndev);
  796. drvr->iflist[0] = NULL;
  797. }
  798. if (p2p_ifp) {
  799. free_netdev(p2p_ifp->ndev);
  800. drvr->iflist[1] = NULL;
  801. }
  802. return ret;
  803. }
  804. if ((brcmf_p2p_enable) && (p2p_ifp))
  805. if (brcmf_net_p2p_attach(p2p_ifp) < 0)
  806. brcmf_p2p_enable = 0;
  807. return 0;
  808. }
  809. void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
  810. {
  811. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  812. struct brcmf_pub *drvr = bus_if->drvr;
  813. if (drvr) {
  814. drvr->hdrlen += len;
  815. }
  816. }
  817. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  818. {
  819. brcmf_dbg(TRACE, "Enter\n");
  820. if (drvr) {
  821. /* Stop the bus module */
  822. brcmf_bus_stop(drvr->bus_if);
  823. }
  824. }
  825. void brcmf_dev_reset(struct device *dev)
  826. {
  827. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  828. struct brcmf_pub *drvr = bus_if->drvr;
  829. if (drvr == NULL)
  830. return;
  831. if (drvr->iflist[0])
  832. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  833. }
  834. void brcmf_detach(struct device *dev)
  835. {
  836. s32 i;
  837. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  838. struct brcmf_pub *drvr = bus_if->drvr;
  839. brcmf_dbg(TRACE, "Enter\n");
  840. if (drvr == NULL)
  841. return;
  842. /* stop firmware event handling */
  843. brcmf_fweh_detach(drvr);
  844. brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
  845. /* make sure primary interface removed last */
  846. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  847. if (drvr->iflist[i]) {
  848. brcmf_fws_del_interface(drvr->iflist[i]);
  849. brcmf_del_if(drvr, i);
  850. }
  851. brcmf_cfg80211_detach(drvr->config);
  852. brcmf_fws_deinit(drvr);
  853. brcmf_bus_detach(drvr);
  854. brcmf_proto_detach(drvr);
  855. brcmf_debugfs_detach(drvr);
  856. bus_if->drvr = NULL;
  857. kfree(drvr);
  858. }
  859. s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
  860. {
  861. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  862. struct brcmf_if *ifp = bus_if->drvr->iflist[0];
  863. return brcmf_fil_iovar_data_set(ifp, name, data, len);
  864. }
  865. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  866. {
  867. return atomic_read(&ifp->pend_8021x_cnt);
  868. }
  869. int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
  870. {
  871. struct brcmf_if *ifp = netdev_priv(ndev);
  872. int err;
  873. err = wait_event_timeout(ifp->pend_8021x_wait,
  874. !brcmf_get_pend_8021x_cnt(ifp),
  875. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  876. WARN_ON(!err);
  877. return !err;
  878. }
  879. /*
  880. * return chip id and rev of the device encoded in u32.
  881. */
  882. u32 brcmf_get_chip_info(struct brcmf_if *ifp)
  883. {
  884. struct brcmf_bus *bus = ifp->drvr->bus_if;
  885. return bus->chip << 4 | bus->chiprev;
  886. }
  887. static void brcmf_driver_register(struct work_struct *work)
  888. {
  889. #ifdef CONFIG_BRCMFMAC_SDIO
  890. brcmf_sdio_register();
  891. #endif
  892. #ifdef CONFIG_BRCMFMAC_USB
  893. brcmf_usb_register();
  894. #endif
  895. }
  896. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  897. static int __init brcmfmac_module_init(void)
  898. {
  899. brcmf_debugfs_init();
  900. #ifdef CONFIG_BRCMFMAC_SDIO
  901. brcmf_sdio_init();
  902. #endif
  903. if (!schedule_work(&brcmf_driver_work))
  904. return -EBUSY;
  905. return 0;
  906. }
  907. static void __exit brcmfmac_module_exit(void)
  908. {
  909. cancel_work_sync(&brcmf_driver_work);
  910. #ifdef CONFIG_BRCMFMAC_SDIO
  911. brcmf_sdio_exit();
  912. #endif
  913. #ifdef CONFIG_BRCMFMAC_USB
  914. brcmf_usb_exit();
  915. #endif
  916. brcmf_debugfs_exit();
  917. }
  918. module_init(brcmfmac_module_init);
  919. module_exit(brcmfmac_module_exit);