dhd_linux.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/module.h>
  19. #include <net/cfg80211.h>
  20. #include <net/rtnetlink.h>
  21. #include <brcmu_utils.h>
  22. #include <brcmu_wifi.h>
  23. #include "dhd.h"
  24. #include "dhd_bus.h"
  25. #include "dhd_dbg.h"
  26. #include "fwil_types.h"
  27. #include "p2p.h"
  28. #include "wl_cfg80211.h"
  29. #include "fwil.h"
  30. #include "fwsignal.h"
  31. #include "feature.h"
  32. #include "proto.h"
  33. MODULE_AUTHOR("Broadcom Corporation");
  34. MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
  35. MODULE_LICENSE("Dual BSD/GPL");
  36. #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
  37. /* AMPDU rx reordering definitions */
  38. #define BRCMF_RXREORDER_FLOWID_OFFSET 0
  39. #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
  40. #define BRCMF_RXREORDER_FLAGS_OFFSET 4
  41. #define BRCMF_RXREORDER_CURIDX_OFFSET 6
  42. #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
  43. #define BRCMF_RXREORDER_DEL_FLOW 0x01
  44. #define BRCMF_RXREORDER_FLUSH_ALL 0x02
  45. #define BRCMF_RXREORDER_CURIDX_VALID 0x04
  46. #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
  47. #define BRCMF_RXREORDER_NEW_HOLE 0x10
  48. /* Error bits */
  49. int brcmf_msg_level;
  50. module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
  51. MODULE_PARM_DESC(debug, "level of debug output");
  52. /* P2P0 enable */
  53. static int brcmf_p2p_enable;
  54. #ifdef CONFIG_BRCMDBG
  55. module_param_named(p2pon, brcmf_p2p_enable, int, 0);
  56. MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
  57. #endif
  58. char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
  59. {
  60. if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
  61. brcmf_err("ifidx %d out of range\n", ifidx);
  62. return "<if_bad>";
  63. }
  64. if (drvr->iflist[ifidx] == NULL) {
  65. brcmf_err("null i/f %d\n", ifidx);
  66. return "<if_null>";
  67. }
  68. if (drvr->iflist[ifidx]->ndev)
  69. return drvr->iflist[ifidx]->ndev->name;
  70. return "<if_none>";
  71. }
  72. static void _brcmf_set_multicast_list(struct work_struct *work)
  73. {
  74. struct brcmf_if *ifp;
  75. struct net_device *ndev;
  76. struct netdev_hw_addr *ha;
  77. u32 cmd_value, cnt;
  78. __le32 cnt_le;
  79. char *buf, *bufp;
  80. u32 buflen;
  81. s32 err;
  82. ifp = container_of(work, struct brcmf_if, multicast_work);
  83. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  84. ndev = ifp->ndev;
  85. /* Determine initial value of allmulti flag */
  86. cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
  87. /* Send down the multicast list first. */
  88. cnt = netdev_mc_count(ndev);
  89. buflen = sizeof(cnt) + (cnt * ETH_ALEN);
  90. buf = kmalloc(buflen, GFP_ATOMIC);
  91. if (!buf)
  92. return;
  93. bufp = buf;
  94. cnt_le = cpu_to_le32(cnt);
  95. memcpy(bufp, &cnt_le, sizeof(cnt_le));
  96. bufp += sizeof(cnt_le);
  97. netdev_for_each_mc_addr(ha, ndev) {
  98. if (!cnt)
  99. break;
  100. memcpy(bufp, ha->addr, ETH_ALEN);
  101. bufp += ETH_ALEN;
  102. cnt--;
  103. }
  104. err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
  105. if (err < 0) {
  106. brcmf_err("Setting mcast_list failed, %d\n", err);
  107. cmd_value = cnt ? true : cmd_value;
  108. }
  109. kfree(buf);
  110. /*
  111. * Now send the allmulti setting. This is based on the setting in the
  112. * net_device flags, but might be modified above to be turned on if we
  113. * were trying to set some addresses and dongle rejected it...
  114. */
  115. err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
  116. if (err < 0)
  117. brcmf_err("Setting allmulti failed, %d\n", err);
  118. /*Finally, pick up the PROMISC flag */
  119. cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
  120. err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
  121. if (err < 0)
  122. brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
  123. err);
  124. }
  125. static void
  126. _brcmf_set_mac_address(struct work_struct *work)
  127. {
  128. struct brcmf_if *ifp;
  129. s32 err;
  130. ifp = container_of(work, struct brcmf_if, setmacaddr_work);
  131. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  132. err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
  133. ETH_ALEN);
  134. if (err < 0) {
  135. brcmf_err("Setting cur_etheraddr failed, %d\n", err);
  136. } else {
  137. brcmf_dbg(TRACE, "MAC address updated to %pM\n",
  138. ifp->mac_addr);
  139. memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  140. }
  141. }
  142. static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
  143. {
  144. struct brcmf_if *ifp = netdev_priv(ndev);
  145. struct sockaddr *sa = (struct sockaddr *)addr;
  146. memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
  147. schedule_work(&ifp->setmacaddr_work);
  148. return 0;
  149. }
  150. static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
  151. {
  152. struct brcmf_if *ifp = netdev_priv(ndev);
  153. schedule_work(&ifp->multicast_work);
  154. }
  155. static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
  156. struct net_device *ndev)
  157. {
  158. int ret;
  159. struct brcmf_if *ifp = netdev_priv(ndev);
  160. struct brcmf_pub *drvr = ifp->drvr;
  161. struct ethhdr *eh = (struct ethhdr *)(skb->data);
  162. brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
  163. /* Can the device send data? */
  164. if (drvr->bus_if->state != BRCMF_BUS_DATA) {
  165. brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state);
  166. netif_stop_queue(ndev);
  167. dev_kfree_skb(skb);
  168. ret = -ENODEV;
  169. goto done;
  170. }
  171. if (!drvr->iflist[ifp->bssidx]) {
  172. brcmf_err("bad ifidx %d\n", ifp->bssidx);
  173. netif_stop_queue(ndev);
  174. dev_kfree_skb(skb);
  175. ret = -ENODEV;
  176. goto done;
  177. }
  178. /* Make sure there's enough room for any header */
  179. if (skb_headroom(skb) < drvr->hdrlen) {
  180. struct sk_buff *skb2;
  181. brcmf_dbg(INFO, "%s: insufficient headroom\n",
  182. brcmf_ifname(drvr, ifp->bssidx));
  183. drvr->bus_if->tx_realloc++;
  184. skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
  185. dev_kfree_skb(skb);
  186. skb = skb2;
  187. if (skb == NULL) {
  188. brcmf_err("%s: skb_realloc_headroom failed\n",
  189. brcmf_ifname(drvr, ifp->bssidx));
  190. ret = -ENOMEM;
  191. goto done;
  192. }
  193. }
  194. /* validate length for ether packet */
  195. if (skb->len < sizeof(*eh)) {
  196. ret = -EINVAL;
  197. dev_kfree_skb(skb);
  198. goto done;
  199. }
  200. if (eh->h_proto == htons(ETH_P_PAE))
  201. atomic_inc(&ifp->pend_8021x_cnt);
  202. ret = brcmf_fws_process_skb(ifp, skb);
  203. done:
  204. if (ret) {
  205. ifp->stats.tx_dropped++;
  206. } else {
  207. ifp->stats.tx_packets++;
  208. ifp->stats.tx_bytes += skb->len;
  209. }
  210. /* Return ok: we always eat the packet */
  211. return NETDEV_TX_OK;
  212. }
  213. void brcmf_txflowblock_if(struct brcmf_if *ifp,
  214. enum brcmf_netif_stop_reason reason, bool state)
  215. {
  216. unsigned long flags;
  217. if (!ifp || !ifp->ndev)
  218. return;
  219. brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
  220. ifp->bssidx, ifp->netif_stop, reason, state);
  221. spin_lock_irqsave(&ifp->netif_stop_lock, flags);
  222. if (state) {
  223. if (!ifp->netif_stop)
  224. netif_stop_queue(ifp->ndev);
  225. ifp->netif_stop |= reason;
  226. } else {
  227. ifp->netif_stop &= ~reason;
  228. if (!ifp->netif_stop)
  229. netif_wake_queue(ifp->ndev);
  230. }
  231. spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
  232. }
  233. void brcmf_txflowblock(struct device *dev, bool state)
  234. {
  235. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  236. struct brcmf_pub *drvr = bus_if->drvr;
  237. brcmf_dbg(TRACE, "Enter\n");
  238. brcmf_fws_bus_blocked(drvr, state);
  239. }
  240. void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
  241. {
  242. skb->dev = ifp->ndev;
  243. skb->protocol = eth_type_trans(skb, skb->dev);
  244. if (skb->pkt_type == PACKET_MULTICAST)
  245. ifp->stats.multicast++;
  246. /* Process special event packets */
  247. brcmf_fweh_process_skb(ifp->drvr, skb);
  248. if (!(ifp->ndev->flags & IFF_UP)) {
  249. brcmu_pkt_buf_free_skb(skb);
  250. return;
  251. }
  252. ifp->stats.rx_bytes += skb->len;
  253. ifp->stats.rx_packets++;
  254. brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
  255. if (in_interrupt())
  256. netif_rx(skb);
  257. else
  258. /* If the receive is not processed inside an ISR,
  259. * the softirqd must be woken explicitly to service
  260. * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
  261. */
  262. netif_rx_ni(skb);
  263. }
  264. static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
  265. u8 start, u8 end,
  266. struct sk_buff_head *skb_list)
  267. {
  268. /* initialize return list */
  269. __skb_queue_head_init(skb_list);
  270. if (rfi->pend_pkts == 0) {
  271. brcmf_dbg(INFO, "no packets in reorder queue\n");
  272. return;
  273. }
  274. do {
  275. if (rfi->pktslots[start]) {
  276. __skb_queue_tail(skb_list, rfi->pktslots[start]);
  277. rfi->pktslots[start] = NULL;
  278. }
  279. start++;
  280. if (start > rfi->max_idx)
  281. start = 0;
  282. } while (start != end);
  283. rfi->pend_pkts -= skb_queue_len(skb_list);
  284. }
  285. static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
  286. struct sk_buff *pkt)
  287. {
  288. u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
  289. struct brcmf_ampdu_rx_reorder *rfi;
  290. struct sk_buff_head reorder_list;
  291. struct sk_buff *pnext;
  292. u8 flags;
  293. u32 buf_size;
  294. flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
  295. flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
  296. /* validate flags and flow id */
  297. if (flags == 0xFF) {
  298. brcmf_err("invalid flags...so ignore this packet\n");
  299. brcmf_netif_rx(ifp, pkt);
  300. return;
  301. }
  302. rfi = ifp->drvr->reorder_flows[flow_id];
  303. if (flags & BRCMF_RXREORDER_DEL_FLOW) {
  304. brcmf_dbg(INFO, "flow-%d: delete\n",
  305. flow_id);
  306. if (rfi == NULL) {
  307. brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
  308. flow_id);
  309. brcmf_netif_rx(ifp, pkt);
  310. return;
  311. }
  312. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
  313. &reorder_list);
  314. /* add the last packet */
  315. __skb_queue_tail(&reorder_list, pkt);
  316. kfree(rfi);
  317. ifp->drvr->reorder_flows[flow_id] = NULL;
  318. goto netif_rx;
  319. }
  320. /* from here on we need a flow reorder instance */
  321. if (rfi == NULL) {
  322. buf_size = sizeof(*rfi);
  323. max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  324. buf_size += (max_idx + 1) * sizeof(pkt);
  325. /* allocate space for flow reorder info */
  326. brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
  327. flow_id, max_idx);
  328. rfi = kzalloc(buf_size, GFP_ATOMIC);
  329. if (rfi == NULL) {
  330. brcmf_err("failed to alloc buffer\n");
  331. brcmf_netif_rx(ifp, pkt);
  332. return;
  333. }
  334. ifp->drvr->reorder_flows[flow_id] = rfi;
  335. rfi->pktslots = (struct sk_buff **)(rfi+1);
  336. rfi->max_idx = max_idx;
  337. }
  338. if (flags & BRCMF_RXREORDER_NEW_HOLE) {
  339. if (rfi->pend_pkts) {
  340. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
  341. rfi->exp_idx,
  342. &reorder_list);
  343. WARN_ON(rfi->pend_pkts);
  344. } else {
  345. __skb_queue_head_init(&reorder_list);
  346. }
  347. rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  348. rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  349. rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
  350. rfi->pktslots[rfi->cur_idx] = pkt;
  351. rfi->pend_pkts++;
  352. brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
  353. flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
  354. } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
  355. cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
  356. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  357. if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
  358. /* still in the current hole */
  359. /* enqueue the current on the buffer chain */
  360. if (rfi->pktslots[cur_idx] != NULL) {
  361. brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
  362. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  363. rfi->pktslots[cur_idx] = NULL;
  364. }
  365. rfi->pktslots[cur_idx] = pkt;
  366. rfi->pend_pkts++;
  367. rfi->cur_idx = cur_idx;
  368. brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
  369. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  370. /* can return now as there is no reorder
  371. * list to process.
  372. */
  373. return;
  374. }
  375. if (rfi->exp_idx == cur_idx) {
  376. if (rfi->pktslots[cur_idx] != NULL) {
  377. brcmf_dbg(INFO, "error buffer pending..free it\n");
  378. brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
  379. rfi->pktslots[cur_idx] = NULL;
  380. }
  381. rfi->pktslots[cur_idx] = pkt;
  382. rfi->pend_pkts++;
  383. /* got the expected one. flush from current to expected
  384. * and update expected
  385. */
  386. brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
  387. flow_id, cur_idx, exp_idx, rfi->pend_pkts);
  388. rfi->cur_idx = cur_idx;
  389. rfi->exp_idx = exp_idx;
  390. brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
  391. &reorder_list);
  392. brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
  393. flow_id, skb_queue_len(&reorder_list),
  394. rfi->pend_pkts);
  395. } else {
  396. u8 end_idx;
  397. brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
  398. flow_id, flags, rfi->cur_idx, rfi->exp_idx,
  399. cur_idx, exp_idx);
  400. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  401. end_idx = rfi->exp_idx;
  402. else
  403. end_idx = exp_idx;
  404. /* flush pkts first */
  405. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  406. &reorder_list);
  407. if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
  408. __skb_queue_tail(&reorder_list, pkt);
  409. } else {
  410. rfi->pktslots[cur_idx] = pkt;
  411. rfi->pend_pkts++;
  412. }
  413. rfi->exp_idx = exp_idx;
  414. rfi->cur_idx = cur_idx;
  415. }
  416. } else {
  417. /* explicity window move updating the expected index */
  418. exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
  419. brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
  420. flow_id, flags, rfi->exp_idx, exp_idx);
  421. if (flags & BRCMF_RXREORDER_FLUSH_ALL)
  422. end_idx = rfi->exp_idx;
  423. else
  424. end_idx = exp_idx;
  425. brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
  426. &reorder_list);
  427. __skb_queue_tail(&reorder_list, pkt);
  428. /* set the new expected idx */
  429. rfi->exp_idx = exp_idx;
  430. }
  431. netif_rx:
  432. skb_queue_walk_safe(&reorder_list, pkt, pnext) {
  433. __skb_unlink(pkt, &reorder_list);
  434. brcmf_netif_rx(ifp, pkt);
  435. }
  436. }
  437. void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
  438. {
  439. struct brcmf_if *ifp;
  440. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  441. struct brcmf_pub *drvr = bus_if->drvr;
  442. struct brcmf_skb_reorder_data *rd;
  443. u8 ifidx;
  444. int ret;
  445. brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
  446. /* process and remove protocol-specific header */
  447. ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
  448. ifp = drvr->iflist[ifidx];
  449. if (ret || !ifp || !ifp->ndev) {
  450. if ((ret != -ENODATA) && ifp)
  451. ifp->stats.rx_errors++;
  452. brcmu_pkt_buf_free_skb(skb);
  453. return;
  454. }
  455. rd = (struct brcmf_skb_reorder_data *)skb->cb;
  456. if (rd->reorder)
  457. brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
  458. else
  459. brcmf_netif_rx(ifp, skb);
  460. }
  461. void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
  462. bool success)
  463. {
  464. struct brcmf_if *ifp;
  465. struct ethhdr *eh;
  466. u16 type;
  467. ifp = drvr->iflist[ifidx];
  468. if (!ifp)
  469. goto done;
  470. eh = (struct ethhdr *)(txp->data);
  471. type = ntohs(eh->h_proto);
  472. if (type == ETH_P_PAE) {
  473. atomic_dec(&ifp->pend_8021x_cnt);
  474. if (waitqueue_active(&ifp->pend_8021x_wait))
  475. wake_up(&ifp->pend_8021x_wait);
  476. }
  477. if (!success)
  478. ifp->stats.tx_errors++;
  479. done:
  480. brcmu_pkt_buf_free_skb(txp);
  481. }
  482. void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
  483. {
  484. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  485. struct brcmf_pub *drvr = bus_if->drvr;
  486. u8 ifidx;
  487. /* await txstatus signal for firmware if active */
  488. if (brcmf_fws_fc_active(drvr->fws)) {
  489. if (!success)
  490. brcmf_fws_bustxfail(drvr->fws, txp);
  491. } else {
  492. if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
  493. brcmu_pkt_buf_free_skb(txp);
  494. else
  495. brcmf_txfinalize(drvr, txp, ifidx, success);
  496. }
  497. }
  498. static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
  499. {
  500. struct brcmf_if *ifp = netdev_priv(ndev);
  501. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  502. return &ifp->stats;
  503. }
  504. static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
  505. struct ethtool_drvinfo *info)
  506. {
  507. struct brcmf_if *ifp = netdev_priv(ndev);
  508. struct brcmf_pub *drvr = ifp->drvr;
  509. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  510. snprintf(info->version, sizeof(info->version), "n/a");
  511. strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
  512. strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
  513. sizeof(info->bus_info));
  514. }
  515. static const struct ethtool_ops brcmf_ethtool_ops = {
  516. .get_drvinfo = brcmf_ethtool_get_drvinfo,
  517. };
  518. static int brcmf_netdev_stop(struct net_device *ndev)
  519. {
  520. struct brcmf_if *ifp = netdev_priv(ndev);
  521. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  522. brcmf_cfg80211_down(ndev);
  523. /* Set state and stop OS transmissions */
  524. netif_stop_queue(ndev);
  525. return 0;
  526. }
  527. static int brcmf_netdev_open(struct net_device *ndev)
  528. {
  529. struct brcmf_if *ifp = netdev_priv(ndev);
  530. struct brcmf_pub *drvr = ifp->drvr;
  531. struct brcmf_bus *bus_if = drvr->bus_if;
  532. u32 toe_ol;
  533. brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
  534. /* If bus is not ready, can't continue */
  535. if (bus_if->state != BRCMF_BUS_DATA) {
  536. brcmf_err("failed bus is not ready\n");
  537. return -EAGAIN;
  538. }
  539. atomic_set(&ifp->pend_8021x_cnt, 0);
  540. /* Get current TOE mode from dongle */
  541. if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
  542. && (toe_ol & TOE_TX_CSUM_OL) != 0)
  543. ndev->features |= NETIF_F_IP_CSUM;
  544. else
  545. ndev->features &= ~NETIF_F_IP_CSUM;
  546. if (brcmf_cfg80211_up(ndev)) {
  547. brcmf_err("failed to bring up cfg80211\n");
  548. return -EIO;
  549. }
  550. /* Allow transmit calls */
  551. netif_start_queue(ndev);
  552. return 0;
  553. }
  554. static const struct net_device_ops brcmf_netdev_ops_pri = {
  555. .ndo_open = brcmf_netdev_open,
  556. .ndo_stop = brcmf_netdev_stop,
  557. .ndo_get_stats = brcmf_netdev_get_stats,
  558. .ndo_start_xmit = brcmf_netdev_start_xmit,
  559. .ndo_set_mac_address = brcmf_netdev_set_mac_address,
  560. .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
  561. };
  562. int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
  563. {
  564. struct brcmf_pub *drvr = ifp->drvr;
  565. struct net_device *ndev;
  566. s32 err;
  567. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  568. ifp->mac_addr);
  569. ndev = ifp->ndev;
  570. /* set appropriate operations */
  571. ndev->netdev_ops = &brcmf_netdev_ops_pri;
  572. ndev->hard_header_len += drvr->hdrlen;
  573. ndev->ethtool_ops = &brcmf_ethtool_ops;
  574. drvr->rxsz = ndev->mtu + ndev->hard_header_len +
  575. drvr->hdrlen;
  576. /* set the mac address */
  577. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  578. INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
  579. INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
  580. if (rtnl_locked)
  581. err = register_netdevice(ndev);
  582. else
  583. err = register_netdev(ndev);
  584. if (err != 0) {
  585. brcmf_err("couldn't register the net device\n");
  586. goto fail;
  587. }
  588. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  589. ndev->destructor = brcmf_cfg80211_free_netdev;
  590. return 0;
  591. fail:
  592. drvr->iflist[ifp->bssidx] = NULL;
  593. ndev->netdev_ops = NULL;
  594. free_netdev(ndev);
  595. return -EBADE;
  596. }
  597. static int brcmf_net_p2p_open(struct net_device *ndev)
  598. {
  599. brcmf_dbg(TRACE, "Enter\n");
  600. return brcmf_cfg80211_up(ndev);
  601. }
  602. static int brcmf_net_p2p_stop(struct net_device *ndev)
  603. {
  604. brcmf_dbg(TRACE, "Enter\n");
  605. return brcmf_cfg80211_down(ndev);
  606. }
  607. static netdev_tx_t brcmf_net_p2p_start_xmit(struct sk_buff *skb,
  608. struct net_device *ndev)
  609. {
  610. if (skb)
  611. dev_kfree_skb_any(skb);
  612. return NETDEV_TX_OK;
  613. }
  614. static const struct net_device_ops brcmf_netdev_ops_p2p = {
  615. .ndo_open = brcmf_net_p2p_open,
  616. .ndo_stop = brcmf_net_p2p_stop,
  617. .ndo_start_xmit = brcmf_net_p2p_start_xmit
  618. };
  619. static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
  620. {
  621. struct net_device *ndev;
  622. brcmf_dbg(TRACE, "Enter, idx=%d mac=%pM\n", ifp->bssidx,
  623. ifp->mac_addr);
  624. ndev = ifp->ndev;
  625. ndev->netdev_ops = &brcmf_netdev_ops_p2p;
  626. /* set the mac address */
  627. memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
  628. if (register_netdev(ndev) != 0) {
  629. brcmf_err("couldn't register the p2p net device\n");
  630. goto fail;
  631. }
  632. brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
  633. return 0;
  634. fail:
  635. ifp->drvr->iflist[ifp->bssidx] = NULL;
  636. ndev->netdev_ops = NULL;
  637. free_netdev(ndev);
  638. return -EBADE;
  639. }
  640. struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
  641. char *name, u8 *mac_addr)
  642. {
  643. struct brcmf_if *ifp;
  644. struct net_device *ndev;
  645. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifidx);
  646. ifp = drvr->iflist[bssidx];
  647. /*
  648. * Delete the existing interface before overwriting it
  649. * in case we missed the BRCMF_E_IF_DEL event.
  650. */
  651. if (ifp) {
  652. brcmf_err("ERROR: netdev:%s already exists\n",
  653. ifp->ndev->name);
  654. if (ifidx) {
  655. netif_stop_queue(ifp->ndev);
  656. unregister_netdev(ifp->ndev);
  657. free_netdev(ifp->ndev);
  658. drvr->iflist[bssidx] = NULL;
  659. } else {
  660. brcmf_err("ignore IF event\n");
  661. return ERR_PTR(-EINVAL);
  662. }
  663. }
  664. if (!brcmf_p2p_enable && bssidx == 1) {
  665. /* this is P2P_DEVICE interface */
  666. brcmf_dbg(INFO, "allocate non-netdev interface\n");
  667. ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
  668. if (!ifp)
  669. return ERR_PTR(-ENOMEM);
  670. } else {
  671. brcmf_dbg(INFO, "allocate netdev interface\n");
  672. /* Allocate netdev, including space for private structure */
  673. ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
  674. if (!ndev)
  675. return ERR_PTR(-ENOMEM);
  676. ifp = netdev_priv(ndev);
  677. ifp->ndev = ndev;
  678. }
  679. ifp->drvr = drvr;
  680. drvr->iflist[bssidx] = ifp;
  681. ifp->ifidx = ifidx;
  682. ifp->bssidx = bssidx;
  683. init_waitqueue_head(&ifp->pend_8021x_wait);
  684. spin_lock_init(&ifp->netif_stop_lock);
  685. if (mac_addr != NULL)
  686. memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
  687. brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
  688. current->pid, name, ifp->mac_addr);
  689. return ifp;
  690. }
  691. void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx)
  692. {
  693. struct brcmf_if *ifp;
  694. ifp = drvr->iflist[bssidx];
  695. drvr->iflist[bssidx] = NULL;
  696. if (!ifp) {
  697. brcmf_err("Null interface, idx=%d\n", bssidx);
  698. return;
  699. }
  700. brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
  701. if (ifp->ndev) {
  702. if (bssidx == 0) {
  703. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  704. rtnl_lock();
  705. brcmf_netdev_stop(ifp->ndev);
  706. rtnl_unlock();
  707. }
  708. } else {
  709. netif_stop_queue(ifp->ndev);
  710. }
  711. if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
  712. cancel_work_sync(&ifp->setmacaddr_work);
  713. cancel_work_sync(&ifp->multicast_work);
  714. }
  715. /* unregister will take care of freeing it */
  716. unregister_netdev(ifp->ndev);
  717. } else {
  718. kfree(ifp);
  719. }
  720. }
  721. int brcmf_attach(struct device *dev)
  722. {
  723. struct brcmf_pub *drvr = NULL;
  724. int ret = 0;
  725. brcmf_dbg(TRACE, "Enter\n");
  726. /* Allocate primary brcmf_info */
  727. drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
  728. if (!drvr)
  729. return -ENOMEM;
  730. mutex_init(&drvr->proto_block);
  731. /* Link to bus module */
  732. drvr->hdrlen = 0;
  733. drvr->bus_if = dev_get_drvdata(dev);
  734. drvr->bus_if->drvr = drvr;
  735. /* create device debugfs folder */
  736. brcmf_debugfs_attach(drvr);
  737. /* Attach and link in the protocol */
  738. ret = brcmf_proto_attach(drvr);
  739. if (ret != 0) {
  740. brcmf_err("brcmf_prot_attach failed\n");
  741. goto fail;
  742. }
  743. /* attach firmware event handler */
  744. brcmf_fweh_attach(drvr);
  745. return ret;
  746. fail:
  747. brcmf_detach(dev);
  748. return ret;
  749. }
  750. int brcmf_bus_start(struct device *dev)
  751. {
  752. int ret = -1;
  753. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  754. struct brcmf_pub *drvr = bus_if->drvr;
  755. struct brcmf_if *ifp;
  756. struct brcmf_if *p2p_ifp;
  757. brcmf_dbg(TRACE, "\n");
  758. /* add primary networking interface */
  759. ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
  760. if (IS_ERR(ifp))
  761. return PTR_ERR(ifp);
  762. if (brcmf_p2p_enable)
  763. p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
  764. else
  765. p2p_ifp = NULL;
  766. if (IS_ERR(p2p_ifp))
  767. p2p_ifp = NULL;
  768. /* signal bus ready */
  769. brcmf_bus_change_state(bus_if, BRCMF_BUS_DATA);
  770. /* Bus is ready, do any initialization */
  771. ret = brcmf_c_preinit_dcmds(ifp);
  772. if (ret < 0)
  773. goto fail;
  774. brcmf_feat_attach(drvr);
  775. ret = brcmf_fws_init(drvr);
  776. if (ret < 0)
  777. goto fail;
  778. brcmf_fws_add_interface(ifp);
  779. drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
  780. if (drvr->config == NULL) {
  781. ret = -ENOMEM;
  782. goto fail;
  783. }
  784. ret = brcmf_fweh_activate_events(ifp);
  785. if (ret < 0)
  786. goto fail;
  787. ret = brcmf_net_attach(ifp, false);
  788. fail:
  789. if (ret < 0) {
  790. brcmf_err("failed: %d\n", ret);
  791. brcmf_cfg80211_detach(drvr->config);
  792. if (drvr->fws) {
  793. brcmf_fws_del_interface(ifp);
  794. brcmf_fws_deinit(drvr);
  795. }
  796. if (drvr->iflist[0]) {
  797. free_netdev(ifp->ndev);
  798. drvr->iflist[0] = NULL;
  799. }
  800. if (p2p_ifp) {
  801. free_netdev(p2p_ifp->ndev);
  802. drvr->iflist[1] = NULL;
  803. }
  804. return ret;
  805. }
  806. if ((brcmf_p2p_enable) && (p2p_ifp))
  807. if (brcmf_net_p2p_attach(p2p_ifp) < 0)
  808. brcmf_p2p_enable = 0;
  809. return 0;
  810. }
  811. void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
  812. {
  813. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  814. struct brcmf_pub *drvr = bus_if->drvr;
  815. if (drvr) {
  816. drvr->hdrlen += len;
  817. }
  818. }
  819. static void brcmf_bus_detach(struct brcmf_pub *drvr)
  820. {
  821. brcmf_dbg(TRACE, "Enter\n");
  822. if (drvr) {
  823. /* Stop the bus module */
  824. brcmf_bus_stop(drvr->bus_if);
  825. }
  826. }
  827. void brcmf_dev_reset(struct device *dev)
  828. {
  829. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  830. struct brcmf_pub *drvr = bus_if->drvr;
  831. if (drvr == NULL)
  832. return;
  833. if (drvr->iflist[0])
  834. brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1);
  835. }
  836. void brcmf_detach(struct device *dev)
  837. {
  838. s32 i;
  839. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  840. struct brcmf_pub *drvr = bus_if->drvr;
  841. brcmf_dbg(TRACE, "Enter\n");
  842. if (drvr == NULL)
  843. return;
  844. /* stop firmware event handling */
  845. brcmf_fweh_detach(drvr);
  846. brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
  847. /* make sure primary interface removed last */
  848. for (i = BRCMF_MAX_IFS-1; i > -1; i--)
  849. if (drvr->iflist[i]) {
  850. brcmf_fws_del_interface(drvr->iflist[i]);
  851. brcmf_del_if(drvr, i);
  852. }
  853. brcmf_cfg80211_detach(drvr->config);
  854. brcmf_fws_deinit(drvr);
  855. brcmf_bus_detach(drvr);
  856. brcmf_proto_detach(drvr);
  857. brcmf_debugfs_detach(drvr);
  858. bus_if->drvr = NULL;
  859. kfree(drvr);
  860. }
  861. s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len)
  862. {
  863. struct brcmf_bus *bus_if = dev_get_drvdata(dev);
  864. struct brcmf_if *ifp = bus_if->drvr->iflist[0];
  865. return brcmf_fil_iovar_data_set(ifp, name, data, len);
  866. }
  867. static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp)
  868. {
  869. return atomic_read(&ifp->pend_8021x_cnt);
  870. }
  871. int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
  872. {
  873. struct brcmf_if *ifp = netdev_priv(ndev);
  874. int err;
  875. err = wait_event_timeout(ifp->pend_8021x_wait,
  876. !brcmf_get_pend_8021x_cnt(ifp),
  877. msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
  878. WARN_ON(!err);
  879. return !err;
  880. }
  881. static void brcmf_driver_register(struct work_struct *work)
  882. {
  883. #ifdef CONFIG_BRCMFMAC_SDIO
  884. brcmf_sdio_register();
  885. #endif
  886. #ifdef CONFIG_BRCMFMAC_USB
  887. brcmf_usb_register();
  888. #endif
  889. }
  890. static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
  891. static int __init brcmfmac_module_init(void)
  892. {
  893. brcmf_debugfs_init();
  894. #ifdef CONFIG_BRCMFMAC_SDIO
  895. brcmf_sdio_init();
  896. #endif
  897. if (!schedule_work(&brcmf_driver_work))
  898. return -EBUSY;
  899. return 0;
  900. }
  901. static void __exit brcmfmac_module_exit(void)
  902. {
  903. cancel_work_sync(&brcmf_driver_work);
  904. #ifdef CONFIG_BRCMFMAC_SDIO
  905. brcmf_sdio_exit();
  906. #endif
  907. #ifdef CONFIG_BRCMFMAC_USB
  908. brcmf_usb_exit();
  909. #endif
  910. brcmf_debugfs_exit();
  911. }
  912. module_init(brcmfmac_module_init);
  913. module_exit(brcmfmac_module_exit);