core.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "core.h"
  24. #include "bus.h"
  25. #include "debug.h"
  26. #include "fwil_types.h"
  27. #include "p2p.h"
  28. #include "cfg80211.h"
  29. #include "fwil.h"
  30. #include "fwsignal.h"
  31. #include "feature.h"
  32. #include "proto.h"
  33. #include "pcie.h"
  34. MODULE_AUTHOR("Broadcom Corporation");
  35. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  36. MODULE_LICENSE("Dual BSD/GPL");
  37. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  38. /* AMPDU rx reordering definitions */
  39. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  40. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  41. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  42. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  43. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  44. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  45. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  46. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  47. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  48. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  49. /* Error bits */
  50. int brcmf_msg_level;
  51. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  52. MODULE_PARM_DESC(debug, "level of debug output");
  53. /* P2P0 enable */
  54. static int brcmf_p2p_enable;
  55. #ifdef CONFIG_BRCMDBG
  56. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  57. MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
  58. #endif
  59. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  60. {
  61. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  62. brcmf_err("ifidx %d out of range\n", ifidx);
  63. return "<if_bad>";
  64. }
  65. if (drvr->iflist[ifidx] == NULL) {
  66. brcmf_err("null i/f %d\n", ifidx);
  67. return "<if_null>";
  68. }
  69. if (drvr->iflist[ifidx]->ndev)
  70. return drvr->iflist[ifidx]->ndev->name;
  71. return "<if_none>";
  72. }
  73. static void _brcmf_set_multicast_list(struct work_struct *work)
  74. {
  75. struct brcmf_if *ifp;
  76. struct net_device *ndev;
  77. struct netdev_hw_addr *ha;
  78. u32 cmd_value, cnt;
  79. __le32 cnt_le;
  80. char *buf, *bufp;
  81. u32 buflen;
  82. s32 err;
  83. ifp = container_of(work, struct brcmf_if, multicast_work);
  84. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  85. ndev = ifp->ndev;
  86. /* Determine initial value of allmulti flag */
  87. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  88. /* Send down the multicast list first. */
  89. cnt = netdev_mc_count(ndev);
  90. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  91. buf = kmalloc(buflen, GFP_ATOMIC);
  92. if (!buf)
  93. return;
  94. bufp = buf;
  95. cnt_le = cpu_to_le32(cnt);
  96. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  97. bufp += sizeof(cnt_le);
  98. netdev_for_each_mc_addr(ha, ndev) {
  99. if (!cnt)
  100. break;
  101. memcpy(bufp, ha->addr, ETH_ALEN);
  102. bufp += ETH_ALEN;
  103. cnt--;
  104. }
  105. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  106. if (err < 0) {
  107. brcmf_err("Setting mcast_list failed, %d\n", err);
  108. cmd_value = cnt ? true : cmd_value;
  109. }
  110. kfree(buf);
  111. /*
  112. * Now send the allmulti setting. This is based on the setting in the
  113. * net_device flags, but might be modified above to be turned on if we
  114. * were trying to set some addresses and dongle rejected it...
  115. */
  116. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  117. if (err < 0)
  118. brcmf_err("Setting allmulti failed, %d\n", err);
  119. /*Finally, pick up the PROMISC flag */
  120. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  121. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  122. if (err < 0)
  123. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  124. err);
  125. }
  126. static void
  127. _brcmf_set_mac_address(struct work_struct *work)
  128. {
  129. struct brcmf_if *ifp;
  130. s32 err;
  131. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  132. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  133. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  134. ETH_ALEN);
  135. if (err < 0) {
  136. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  137. } else {
  138. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  139. ifp->mac_addr);
  140. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  141. }
  142. }
  143. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  144. {
  145. struct brcmf_if *ifp = netdev_priv(ndev);
  146. struct sockaddr *sa = (struct sockaddr *)addr;
  147. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  148. schedule_work(&ifp->setmacaddr_work);
  149. return 0;
  150. }
  151. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  152. {
  153. struct brcmf_if *ifp = netdev_priv(ndev);
  154. schedule_work(&ifp->multicast_work);
  155. }
  156. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  157. struct net_device *ndev)
  158. {
  159. int ret;
  160. struct brcmf_if *ifp = netdev_priv(ndev);
  161. struct brcmf_pub *drvr = ifp->drvr;
  162. struct ethhdr *eh = (struct ethhdr *)(skb->data);
  163. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  164. /* Can the device send data? */
  165. if (drvr->bus_if->state != BRCMF_BUS_DATA) {
  166. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  167. netif_stop_queue(ndev);
  168. dev_kfree_skb(skb);
  169. ret = -ENODEV;
  170. goto done;
  171. }
  172. if (!drvr->iflist[ifp->bssidx]) {
  173. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  174. netif_stop_queue(ndev);
  175. dev_kfree_skb(skb);
  176. ret = -ENODEV;
  177. goto done;
  178. }
  179. /* Make sure there's enough room for any header */
  180. if (skb_headroom(skb) < drvr->hdrlen) {
  181. struct sk_buff *skb2;
  182. brcmf_dbg(INFO, "%s: insufficient headroom\n",
  183. brcmf_ifname(drvr, ifp->bssidx));
  184. drvr->bus_if->tx_realloc++;
  185. skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
  186. dev_kfree_skb(skb);
  187. skb = skb2;
  188. if (skb == NULL) {
  189. brcmf_err("%s: skb_realloc_headroom failed\n",
  190. brcmf_ifname(drvr, ifp->bssidx));
  191. ret = -ENOMEM;
  192. goto done;
  193. }
  194. }
  195. /* validate length for ether packet */
  196. if (skb->len < sizeof(*eh)) {
  197. ret = -EINVAL;
  198. dev_kfree_skb(skb);
  199. goto done;
  200. }
  201. if (eh->h_proto == htons(ETH_P_PAE))
  202. atomic_inc(&ifp->pend_8021x_cnt);
  203. ret = brcmf_fws_process_skb(ifp, skb);
  204. done:
  205. if (ret) {
  206. ifp->stats.tx_dropped++;
  207. } else {
  208. ifp->stats.tx_packets++;
  209. ifp->stats.tx_bytes += skb->len;
  210. }
  211. /* Return ok: we always eat the packet */
  212. return NETDEV_TX_OK;
  213. }
  214. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  215. enum brcmf_netif_stop_reason reason, bool state)
  216. {
  217. unsigned long flags;
  218. if (!ifp || !ifp->ndev)
  219. return;
  220. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  221. ifp->bssidx, ifp->netif_stop, reason, state);
  222. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  223. if (state) {
  224. if (!ifp->netif_stop)
  225. netif_stop_queue(ifp->ndev);
  226. ifp->netif_stop |= reason;
  227. } else {
  228. ifp->netif_stop &= ~reason;
  229. if (!ifp->netif_stop)
  230. netif_wake_queue(ifp->ndev);
  231. }
  232. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  233. }
  234. void brcmf_txflowblock(struct device *dev, bool state)
  235. {
  236. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  237. struct brcmf_pub *drvr = bus_if->drvr;
  238. brcmf_dbg(TRACE, "Enter\n");
  239. brcmf_fws_bus_blocked(drvr, state);
  240. }
  241. void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  242. {
  243. skb->dev = ifp->ndev;
  244. skb->protocol = eth_type_trans(skb, skb->dev);
  245. if (skb->pkt_type == PACKET_MULTICAST)
  246. ifp->stats.multicast++;
  247. /* Process special event packets */
  248. brcmf_fweh_process_skb(ifp->drvr, skb);
  249. if (!(ifp->ndev->flags & IFF_UP)) {
  250. brcmu_pkt_buf_free_skb(skb);
  251. return;
  252. }
  253. ifp->stats.rx_bytes += skb->len;
  254. ifp->stats.rx_packets++;
  255. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  256. if (in_interrupt())
  257. netif_rx(skb);
  258. else
  259. /* If the receive is not processed inside an ISR,
  260. * the softirqd must be woken explicitly to service
  261. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  262. */
  263. netif_rx_ni(skb);
  264. }
  265. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  266. u8 start, u8 end,
  267. struct sk_buff_head *skb_list)
  268. {
  269. /* initialize return list */
  270. __skb_queue_head_init(skb_list);
  271. if (rfi->pend_pkts == 0) {
  272. brcmf_dbg(INFO, "no packets in reorder queue\n");
  273. return;
  274. }
  275. do {
  276. if (rfi->pktslots[start]) {
  277. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  278. rfi->pktslots[start] = NULL;
  279. }
  280. start++;
  281. if (start > rfi->max_idx)
  282. start = 0;
  283. } while (start != end);
  284. rfi->pend_pkts -= skb_queue_len(skb_list);
  285. }
  286. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  287. struct sk_buff *pkt)
  288. {
  289. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  290. struct brcmf_ampdu_rx_reorder *rfi;
  291. struct sk_buff_head reorder_list;
  292. struct sk_buff *pnext;
  293. u8 flags;
  294. u32 buf_size;
  295. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  296. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  297. /* validate flags and flow id */
  298. if (flags == 0xFF) {
  299. brcmf_err("invalid flags...so ignore this packet\n");
  300. brcmf_netif_rx(ifp, pkt);
  301. return;
  302. }
  303. rfi = ifp->drvr->reorder_flows[flow_id];
  304. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  305. brcmf_dbg(INFO, "flow-%d: delete\n",
  306. flow_id);
  307. if (rfi == NULL) {
  308. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  309. flow_id);
  310. brcmf_netif_rx(ifp, pkt);
  311. return;
  312. }
  313. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  314. &reorder_list);
  315. /* add the last packet */
  316. __skb_queue_tail(&reorder_list, pkt);
  317. kfree(rfi);
  318. ifp->drvr->reorder_flows[flow_id] = NULL;
  319. goto netif_rx;
  320. }
  321. /* from here on we need a flow reorder instance */
  322. if (rfi == NULL) {
  323. buf_size = sizeof(*rfi);
  324. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  325. buf_size += (max_idx + 1) * sizeof(pkt);
  326. /* allocate space for flow reorder info */
  327. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  328. flow_id, max_idx);
  329. rfi = kzalloc(buf_size, GFP_ATOMIC);
  330. if (rfi == NULL) {
  331. brcmf_err("failed to alloc buffer\n");
  332. brcmf_netif_rx(ifp, pkt);
  333. return;
  334. }
  335. ifp->drvr->reorder_flows[flow_id] = rfi;
  336. rfi->pktslots = (struct sk_buff **)(rfi+1);
  337. rfi->max_idx = max_idx;
  338. }
  339. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  340. if (rfi->pend_pkts) {
  341. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  342. rfi->exp_idx,
  343. &reorder_list);
  344. WARN_ON(rfi->pend_pkts);
  345. } else {
  346. __skb_queue_head_init(&reorder_list);
  347. }
  348. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  349. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  350. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  351. rfi->pktslots[rfi->cur_idx] = pkt;
  352. rfi->pend_pkts++;
  353. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  354. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  355. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  356. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  357. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  358. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  359. /* still in the current hole */
  360. /* enqueue the current on the buffer chain */
  361. if (rfi->pktslots[cur_idx] != NULL) {
  362. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  363. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  364. rfi->pktslots[cur_idx] = NULL;
  365. }
  366. rfi->pktslots[cur_idx] = pkt;
  367. rfi->pend_pkts++;
  368. rfi->cur_idx = cur_idx;
  369. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  370. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  371. /* can return now as there is no reorder
  372. * list to process.
  373. */
  374. return;
  375. }
  376. if (rfi->exp_idx == cur_idx) {
  377. if (rfi->pktslots[cur_idx] != NULL) {
  378. brcmf_dbg(INFO, "error buffer pending..free it\n");
  379. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  380. rfi->pktslots[cur_idx] = NULL;
  381. }
  382. rfi->pktslots[cur_idx] = pkt;
  383. rfi->pend_pkts++;
  384. /* got the expected one. flush from current to expected
  385. * and update expected
  386. */
  387. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  388. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  389. rfi->cur_idx = cur_idx;
  390. rfi->exp_idx = exp_idx;
  391. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  392. &reorder_list);
  393. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  394. flow_id, skb_queue_len(&reorder_list),
  395. rfi->pend_pkts);
  396. } else {
  397. u8 end_idx;
  398. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  399. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  400. cur_idx, exp_idx);
  401. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  402. end_idx = rfi->exp_idx;
  403. else
  404. end_idx = exp_idx;
  405. /* flush pkts first */
  406. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  407. &reorder_list);
  408. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  409. __skb_queue_tail(&reorder_list, pkt);
  410. } else {
  411. rfi->pktslots[cur_idx] = pkt;
  412. rfi->pend_pkts++;
  413. }
  414. rfi->exp_idx = exp_idx;
  415. rfi->cur_idx = cur_idx;
  416. }
  417. } else {
  418. /* explicity window move updating the expected index */
  419. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  420. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  421. flow_id, flags, rfi->exp_idx, exp_idx);
  422. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  423. end_idx = rfi->exp_idx;
  424. else
  425. end_idx = exp_idx;
  426. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  427. &reorder_list);
  428. __skb_queue_tail(&reorder_list, pkt);
  429. /* set the new expected idx */
  430. rfi->exp_idx = exp_idx;
  431. }
  432. netif_rx:
  433. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  434. __skb_unlink(pkt, &reorder_list);
  435. brcmf_netif_rx(ifp, pkt);
  436. }
  437. }
  438. void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
  439. {
  440. struct brcmf_if *ifp;
  441. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  442. struct brcmf_pub *drvr = bus_if->drvr;
  443. struct brcmf_skb_reorder_data *rd;
  444. u8 ifidx;
  445. int ret;
  446. brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
  447. /* process and remove protocol-specific header */
  448. ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
  449. ifp = drvr->iflist[ifidx];
  450. if (ret || !ifp || !ifp->ndev) {
  451. if ((ret != -ENODATA) && ifp)
  452. ifp->stats.rx_errors++;
  453. brcmu_pkt_buf_free_skb(skb);
  454. return;
  455. }
  456. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  457. if (rd->reorder)
  458. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  459. else
  460. brcmf_netif_rx(ifp, skb);
  461. }
  462. void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
  463. bool success)
  464. {
  465. struct brcmf_if *ifp;
  466. struct ethhdr *eh;
  467. u16 type;
  468. ifp = drvr->iflist[ifidx];
  469. if (!ifp)
  470. goto done;
  471. eh = (struct ethhdr *)(txp->data);
  472. type = ntohs(eh->h_proto);
  473. if (type == ETH_P_PAE) {
  474. atomic_dec(&ifp->pend_8021x_cnt);
  475. if (waitqueue_active(&ifp->pend_8021x_wait))
  476. wake_up(&ifp->pend_8021x_wait);
  477. }
  478. if (!success)
  479. ifp->stats.tx_errors++;
  480. done:
  481. brcmu_pkt_buf_free_skb(txp);
  482. }
  483. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  484. {
  485. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  486. struct brcmf_pub *drvr = bus_if->drvr;
  487. u8 ifidx;
  488. /* await txstatus signal for firmware if active */
  489. if (brcmf_fws_fc_active(drvr->fws)) {
  490. if (!success)
  491. brcmf_fws_bustxfail(drvr->fws, txp);
  492. } else {
  493. if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
  494. brcmu_pkt_buf_free_skb(txp);
  495. else
  496. brcmf_txfinalize(drvr, txp, ifidx, success);
  497. }
  498. }
  499. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  500. {
  501. struct brcmf_if *ifp = netdev_priv(ndev);
  502. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  503. return &ifp->stats;
  504. }
  505. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  506. struct ethtool_drvinfo *info)
  507. {
  508. struct brcmf_if *ifp = netdev_priv(ndev);
  509. struct brcmf_pub *drvr = ifp->drvr;
  510. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  511. snprintf(info->version, sizeof(info->version), "n/a");
  512. strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
  513. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  514. sizeof(info->bus_info));
  515. }
  516. static const struct ethtool_ops brcmf_ethtool_ops = {
  517. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  518. };
  519. static int brcmf_netdev_stop(struct net_device *ndev)
  520. {
  521. struct brcmf_if *ifp = netdev_priv(ndev);
  522. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  523. brcmf_cfg80211_down(ndev);
  524. /* Set state and stop OS transmissions */
  525. netif_stop_queue(ndev);
  526. return 0;
  527. }
  528. static int brcmf_netdev_open(struct net_device *ndev)
  529. {
  530. struct brcmf_if *ifp = netdev_priv(ndev);
  531. struct brcmf_pub *drvr = ifp->drvr;
  532. struct brcmf_bus *bus_if = drvr->bus_if;
  533. u32 toe_ol;
  534. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  535. /* If bus is not ready, can't continue */
  536. if (bus_if->state != BRCMF_BUS_DATA) {
  537. brcmf_err("failed bus is not ready\n");
  538. return -EAGAIN;
  539. }
  540. atomic_set(&ifp->pend_8021x_cnt, 0);
  541. /* Get current TOE mode from dongle */
  542. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  543. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  544. ndev->features |= NETIF_F_IP_CSUM;
  545. else
  546. ndev->features &= ~NETIF_F_IP_CSUM;
  547. if (brcmf_cfg80211_up(ndev)) {
  548. brcmf_err("failed to bring up cfg80211\n");
  549. return -EIO;
  550. }
  551. /* Allow transmit calls */
  552. netif_start_queue(ndev);
  553. return 0;
  554. }
  555. static const struct net_device_ops brcmf_netdev_ops_pri = {
  556. .ndo_open = brcmf_netdev_open,
  557. .ndo_stop = brcmf_netdev_stop,
  558. .ndo_get_stats = brcmf_netdev_get_stats,
  559. .ndo_start_xmit = brcmf_netdev_start_xmit,
  560. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  561. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  562. };
  563. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  564. {
  565. struct brcmf_pub *drvr = ifp->drvr;
  566. struct net_device *ndev;
  567. s32 err;
  568. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  569. ifp->mac_addr);
  570. ndev = ifp->ndev;
  571. /* set appropriate operations */
  572. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  573. ndev->hard_header_len += drvr->hdrlen;
  574. ndev->ethtool_ops = &brcmf_ethtool_ops;
  575. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  576. drvr->hdrlen;
  577. /* set the mac address */
  578. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  579. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  580. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  581. if (rtnl_locked)
  582. err = register_netdevice(ndev);
  583. else
  584. err = register_netdev(ndev);
  585. if (err != 0) {
  586. brcmf_err("couldn't register the net device\n");
  587. goto fail;
  588. }
  589. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  590. ndev->destructor = brcmf_cfg80211_free_netdev;
  591. return 0;
  592. fail:
  593. drvr->iflist[ifp->bssidx] = NULL;
  594. ndev->netdev_ops = NULL;
  595. free_netdev(ndev);
  596. return -EBADE;
  597. }
  598. static int brcmf_net_p2p_open(struct net_device *ndev)
  599. {
  600. brcmf_dbg(TRACE, "Enter\n");
  601. return brcmf_cfg80211_up(ndev);
  602. }
  603. static int brcmf_net_p2p_stop(struct net_device *ndev)
  604. {
  605. brcmf_dbg(TRACE, "Enter\n");
  606. return brcmf_cfg80211_down(ndev);
  607. }
  608. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  609. struct net_device *ndev)
  610. {
  611. if (skb)
  612. dev_kfree_skb_any(skb);
  613. return NETDEV_TX_OK;
  614. }
  615. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  616. .ndo_open = brcmf_net_p2p_open,
  617. .ndo_stop = brcmf_net_p2p_stop,
  618. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  619. };
  620. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  621. {
  622. struct net_device *ndev;
  623. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  624. ifp->mac_addr);
  625. ndev = ifp->ndev;
  626. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  627. /* set the mac address */
  628. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  629. if (register_netdev(ndev) != 0) {
  630. brcmf_err("couldn't register the p2p net device\n");
  631. goto fail;
  632. }
  633. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  634. return 0;
  635. fail:
  636. ifp->drvr->iflist[ifp->bssidx] = NULL;
  637. ndev->netdev_ops = NULL;
  638. free_netdev(ndev);
  639. return -EBADE;
  640. }
  641. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  642. char *name, u8 *mac_addr)
  643. {
  644. struct brcmf_if *ifp;
  645. struct net_device *ndev;
  646. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  647. ifp = drvr->iflist[bssidx];
  648. /*
  649. * Delete the existing interface before overwriting it
  650. * in case we missed the BRCMF_E_IF_DEL event.
  651. */
  652. if (ifp) {
  653. brcmf_err("ERROR: netdev:%s already exists\n",
  654. ifp->ndev->name);
  655. if (ifidx) {
  656. netif_stop_queue(ifp->ndev);
  657. unregister_netdev(ifp->ndev);
  658. free_netdev(ifp->ndev);
  659. drvr->iflist[bssidx] = NULL;
  660. } else {
  661. brcmf_err("ignore IF event\n");
  662. return ERR_PTR(-EINVAL);
  663. }
  664. }
  665. if (!brcmf_p2p_enable && bssidx == 1) {
  666. /* this is P2P_DEVICE interface */
  667. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  668. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  669. if (!ifp)
  670. return ERR_PTR(-ENOMEM);
  671. } else {
  672. brcmf_dbg(INFO, "allocate netdev interface\n");
  673. /* Allocate netdev, including space for private structure */
  674. ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
  675. ether_setup);
  676. if (!ndev)
  677. return ERR_PTR(-ENOMEM);
  678. ifp = netdev_priv(ndev);
  679. ifp->ndev = ndev;
  680. }
  681. ifp->drvr = drvr;
  682. drvr->iflist[bssidx] = ifp;
  683. ifp->ifidx = ifidx;
  684. ifp->bssidx = bssidx;
  685. init_waitqueue_head(&ifp->pend_8021x_wait);
  686. spin_lock_init(&ifp->netif_stop_lock);
  687. if (mac_addr != NULL)
  688. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  689. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  690. current->pid, name, ifp->mac_addr);
  691. return ifp;
  692. }
  693. static void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  694. {
  695. struct brcmf_if *ifp;
  696. ifp = drvr->iflist[bssidx];
  697. drvr->iflist[bssidx] = NULL;
  698. if (!ifp) {
  699. brcmf_err("Null interface, idx=%d\n", bssidx);
  700. return;
  701. }
  702. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  703. if (ifp->ndev) {
  704. if (bssidx == 0) {
  705. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  706. rtnl_lock();
  707. brcmf_netdev_stop(ifp->ndev);
  708. rtnl_unlock();
  709. }
  710. } else {
  711. netif_stop_queue(ifp->ndev);
  712. }
  713. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  714. cancel_work_sync(&ifp->setmacaddr_work);
  715. cancel_work_sync(&ifp->multicast_work);
  716. }
  717. /* unregister will take care of freeing it */
  718. unregister_netdev(ifp->ndev);
  719. } else {
  720. kfree(ifp);
  721. }
  722. }
  723. void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
  724. {
  725. if (drvr->iflist[bssidx]) {
  726. brcmf_fws_del_interface(drvr->iflist[bssidx]);
  727. brcmf_del_if(drvr, bssidx);
  728. }
  729. }
  730. int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
  731. {
  732. int ifidx;
  733. int bsscfgidx;
  734. bool available;
  735. int highest;
  736. available = false;
  737. bsscfgidx = 2;
  738. highest = 2;
  739. for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
  740. if (drvr->iflist[ifidx]) {
  741. if (drvr->iflist[ifidx]->bssidx == bsscfgidx)
  742. bsscfgidx = highest + 1;
  743. else if (drvr->iflist[ifidx]->bssidx > highest)
  744. highest = drvr->iflist[ifidx]->bssidx;
  745. } else {
  746. available = true;
  747. }
  748. }
  749. return available ? bsscfgidx : -ENOMEM;
  750. }
  751. int brcmf_attach(struct device *dev)
  752. {
  753. struct brcmf_pub *drvr = NULL;
  754. int ret = 0;
  755. brcmf_dbg(TRACE, "Enter\n");
  756. /* Allocate primary brcmf_info */
  757. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  758. if (!drvr)
  759. return -ENOMEM;
  760. mutex_init(&drvr->proto_block);
  761. /* Link to bus module */
  762. drvr->hdrlen = 0;
  763. drvr->bus_if = dev_get_drvdata(dev);
  764. drvr->bus_if->drvr = drvr;
  765. /* create device debugfs folder */
  766. brcmf_debugfs_attach(drvr);
  767. /* Attach and link in the protocol */
  768. ret = brcmf_proto_attach(drvr);
  769. if (ret != 0) {
  770. brcmf_err("brcmf_prot_attach failed\n");
  771. goto fail;
  772. }
  773. /* attach firmware event handler */
  774. brcmf_fweh_attach(drvr);
  775. return ret;
  776. fail:
  777. brcmf_detach(dev);
  778. return ret;
  779. }
  780. int brcmf_bus_start(struct device *dev)
  781. {
  782. int ret = -1;
  783. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  784. struct brcmf_pub *drvr = bus_if->drvr;
  785. struct brcmf_if *ifp;
  786. struct brcmf_if *p2p_ifp;
  787. brcmf_dbg(TRACE, "\n");
  788. /* add primary networking interface */
  789. ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
  790. if (IS_ERR(ifp))
  791. return PTR_ERR(ifp);
  792. if (brcmf_p2p_enable)
  793. p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
  794. else
  795. p2p_ifp = NULL;
  796. if (IS_ERR(p2p_ifp))
  797. p2p_ifp = NULL;
  798. /* signal bus ready */
  799. brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
  800. /* Bus is ready, do any initialization */
  801. ret = brcmf_c_preinit_dcmds(ifp);
  802. if (ret < 0)
  803. goto fail;
  804. brcmf_feat_attach(drvr);
  805. ret = brcmf_fws_init(drvr);
  806. if (ret < 0)
  807. goto fail;
  808. brcmf_fws_add_interface(ifp);
  809. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
  810. if (drvr->config == NULL) {
  811. ret = -ENOMEM;
  812. goto fail;
  813. }
  814. ret = brcmf_fweh_activate_events(ifp);
  815. if (ret < 0)
  816. goto fail;
  817. ret = brcmf_net_attach(ifp, false);
  818. fail:
  819. if (ret < 0) {
  820. brcmf_err("failed: %d\n", ret);
  821. brcmf_cfg80211_detach(drvr->config);
  822. if (drvr->fws) {
  823. brcmf_fws_del_interface(ifp);
  824. brcmf_fws_deinit(drvr);
  825. }
  826. if (drvr->iflist[0]) {
  827. free_netdev(ifp->ndev);
  828. drvr->iflist[0] = NULL;
  829. }
  830. if (p2p_ifp) {
  831. free_netdev(p2p_ifp->ndev);
  832. drvr->iflist[1] = NULL;
  833. }
  834. return ret;
  835. }
  836. if ((brcmf_p2p_enable) && (p2p_ifp))
  837. if (brcmf_net_p2p_attach(p2p_ifp) < 0)
  838. brcmf_p2p_enable = 0;
  839. return 0;
  840. }
  841. void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
  842. {
  843. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  844. struct brcmf_pub *drvr = bus_if->drvr;
  845. if (drvr) {
  846. drvr->hdrlen += len;
  847. }
  848. }
  849. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  850. {
  851. brcmf_dbg(TRACE, "Enter\n");
  852. if (drvr) {
  853. /* Stop the bus module */
  854. brcmf_bus_stop(drvr->bus_if);
  855. }
  856. }
  857. void brcmf_dev_reset(struct device *dev)
  858. {
  859. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  860. struct brcmf_pub *drvr = bus_if->drvr;
  861. if (drvr == NULL)
  862. return;
  863. if (drvr->iflist[0])
  864. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  865. }
  866. void brcmf_detach(struct device *dev)
  867. {
  868. s32 i;
  869. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  870. struct brcmf_pub *drvr = bus_if->drvr;
  871. brcmf_dbg(TRACE, "Enter\n");
  872. if (drvr == NULL)
  873. return;
  874. /* stop firmware event handling */
  875. brcmf_fweh_detach(drvr);
  876. brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
  877. /* make sure primary interface removed last */
  878. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  879. brcmf_remove_interface(drvr, i);
  880. brcmf_cfg80211_detach(drvr->config);
  881. brcmf_fws_deinit(drvr);
  882. brcmf_bus_detach(drvr);
  883. brcmf_proto_detach(drvr);
  884. brcmf_debugfs_detach(drvr);
  885. bus_if->drvr = NULL;
  886. kfree(drvr);
  887. }
  888. s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
  889. {
  890. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  891. struct brcmf_if *ifp = bus_if->drvr->iflist[0];
  892. return brcmf_fil_iovar_data_set(ifp, name, data, len);
  893. }
  894. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  895. {
  896. return atomic_read(&ifp->pend_8021x_cnt);
  897. }
  898. int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
  899. {
  900. struct brcmf_if *ifp = netdev_priv(ndev);
  901. int err;
  902. err = wait_event_timeout(ifp->pend_8021x_wait,
  903. !brcmf_get_pend_8021x_cnt(ifp),
  904. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  905. WARN_ON(!err);
  906. return !err;
  907. }
  908. static void brcmf_driver_register(struct work_struct *work)
  909. {
  910. #ifdef CONFIG_BRCMFMAC_SDIO
  911. brcmf_sdio_register();
  912. #endif
  913. #ifdef CONFIG_BRCMFMAC_USB
  914. brcmf_usb_register();
  915. #endif
  916. #ifdef CONFIG_BRCMFMAC_PCIE
  917. brcmf_pcie_register();
  918. #endif
  919. }
  920. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  921. static int __init brcmfmac_module_init(void)
  922. {
  923. brcmf_debugfs_init();
  924. #ifdef CONFIG_BRCMFMAC_SDIO
  925. brcmf_sdio_init();
  926. #endif
  927. if (!schedule_work(&brcmf_driver_work))
  928. return -EBUSY;
  929. return 0;
  930. }
  931. static void __exit brcmfmac_module_exit(void)
  932. {
  933. cancel_work_sync(&brcmf_driver_work);
  934. #ifdef CONFIG_BRCMFMAC_SDIO
  935. brcmf_sdio_exit();
  936. #endif
  937. #ifdef CONFIG_BRCMFMAC_USB
  938. brcmf_usb_exit();
  939. #endif
  940. #ifdef CONFIG_BRCMFMAC_PCIE
  941. brcmf_pcie_exit();
  942. #endif
  943. brcmf_debugfs_exit();
  944. }
  945. module_init(brcmfmac_module_init);
  946. module_exit(brcmfmac_module_exit);