txrx.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082
  1. /*
  2. * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <net/ieee80211_radiotap.h>
  18. #include <linux/if_arp.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/ip.h>
  21. #include <linux/ipv6.h>
  22. #include <net/ipv6.h>
  23. #include <linux/prefetch.h>
  24. #include "wil6210.h"
  25. #include "wmi.h"
  26. #include "txrx.h"
  27. #include "trace.h"
  28. static bool rtap_include_phy_info;
  29. module_param(rtap_include_phy_info, bool, 0444);
  30. MODULE_PARM_DESC(rtap_include_phy_info,
  31. " Include PHY info in the radiotap header, default - no");
  32. bool rx_align_2;
  33. module_param(rx_align_2, bool, 0444);
  34. MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
  35. static inline uint wil_rx_snaplen(void)
  36. {
  37. return rx_align_2 ? 6 : 0;
  38. }
  39. static inline int wil_vring_is_empty(struct vring *vring)
  40. {
  41. return vring->swhead == vring->swtail;
  42. }
  43. static inline u32 wil_vring_next_tail(struct vring *vring)
  44. {
  45. return (vring->swtail + 1) % vring->size;
  46. }
  47. static inline void wil_vring_advance_head(struct vring *vring, int n)
  48. {
  49. vring->swhead = (vring->swhead + n) % vring->size;
  50. }
  51. static inline int wil_vring_is_full(struct vring *vring)
  52. {
  53. return wil_vring_next_tail(vring) == vring->swhead;
  54. }
  55. /* Used space in Tx Vring */
  56. static inline int wil_vring_used_tx(struct vring *vring)
  57. {
  58. u32 swhead = vring->swhead;
  59. u32 swtail = vring->swtail;
  60. return (vring->size + swhead - swtail) % vring->size;
  61. }
  62. /* Available space in Tx Vring */
  63. static inline int wil_vring_avail_tx(struct vring *vring)
  64. {
  65. return vring->size - wil_vring_used_tx(vring) - 1;
  66. }
  67. /* wil_vring_wmark_low - low watermark for available descriptor space */
  68. static inline int wil_vring_wmark_low(struct vring *vring)
  69. {
  70. return vring->size/8;
  71. }
  72. /* wil_vring_wmark_high - high watermark for available descriptor space */
  73. static inline int wil_vring_wmark_high(struct vring *vring)
  74. {
  75. return vring->size/4;
  76. }
  77. /* returns true if num avail descriptors is lower than wmark_low */
  78. static inline int wil_vring_avail_low(struct vring *vring)
  79. {
  80. return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
  81. }
  82. /* returns true if num avail descriptors is higher than wmark_high */
  83. static inline int wil_vring_avail_high(struct vring *vring)
  84. {
  85. return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
  86. }
  87. /* wil_val_in_range - check if value in [min,max) */
  88. static inline bool wil_val_in_range(int val, int min, int max)
  89. {
  90. return val >= min && val < max;
  91. }
  92. static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
  93. {
  94. struct device *dev = wil_to_dev(wil);
  95. size_t sz = vring->size * sizeof(vring->va[0]);
  96. uint i;
  97. wil_dbg_misc(wil, "vring_alloc:\n");
  98. BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
  99. vring->swhead = 0;
  100. vring->swtail = 0;
  101. vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
  102. if (!vring->ctx) {
  103. vring->va = NULL;
  104. return -ENOMEM;
  105. }
  106. /* vring->va should be aligned on its size rounded up to power of 2
  107. * This is granted by the dma_alloc_coherent
  108. */
  109. vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
  110. if (!vring->va) {
  111. kfree(vring->ctx);
  112. vring->ctx = NULL;
  113. return -ENOMEM;
  114. }
  115. /* initially, all descriptors are SW owned
  116. * For Tx and Rx, ownership bit is at the same location, thus
  117. * we can use any
  118. */
  119. for (i = 0; i < vring->size; i++) {
  120. volatile struct vring_tx_desc *_d = &vring->va[i].tx;
  121. _d->dma.status = TX_DMA_STATUS_DU;
  122. }
  123. wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
  124. vring->va, &vring->pa, vring->ctx);
  125. return 0;
  126. }
  127. static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
  128. struct wil_ctx *ctx)
  129. {
  130. dma_addr_t pa = wil_desc_addr(&d->dma.addr);
  131. u16 dmalen = le16_to_cpu(d->dma.length);
  132. switch (ctx->mapped_as) {
  133. case wil_mapped_as_single:
  134. dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
  135. break;
  136. case wil_mapped_as_page:
  137. dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
  138. break;
  139. default:
  140. break;
  141. }
  142. }
  143. static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
  144. int tx)
  145. {
  146. struct device *dev = wil_to_dev(wil);
  147. size_t sz = vring->size * sizeof(vring->va[0]);
  148. lockdep_assert_held(&wil->mutex);
  149. if (tx) {
  150. int vring_index = vring - wil->vring_tx;
  151. wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
  152. vring_index, vring->size, vring->va,
  153. &vring->pa, vring->ctx);
  154. } else {
  155. wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
  156. vring->size, vring->va,
  157. &vring->pa, vring->ctx);
  158. }
  159. while (!wil_vring_is_empty(vring)) {
  160. dma_addr_t pa;
  161. u16 dmalen;
  162. struct wil_ctx *ctx;
  163. if (tx) {
  164. struct vring_tx_desc dd, *d = &dd;
  165. volatile struct vring_tx_desc *_d =
  166. &vring->va[vring->swtail].tx;
  167. ctx = &vring->ctx[vring->swtail];
  168. if (!ctx) {
  169. wil_dbg_txrx(wil,
  170. "ctx(%d) was already completed\n",
  171. vring->swtail);
  172. vring->swtail = wil_vring_next_tail(vring);
  173. continue;
  174. }
  175. *d = *_d;
  176. wil_txdesc_unmap(dev, d, ctx);
  177. if (ctx->skb)
  178. dev_kfree_skb_any(ctx->skb);
  179. vring->swtail = wil_vring_next_tail(vring);
  180. } else { /* rx */
  181. struct vring_rx_desc dd, *d = &dd;
  182. volatile struct vring_rx_desc *_d =
  183. &vring->va[vring->swhead].rx;
  184. ctx = &vring->ctx[vring->swhead];
  185. *d = *_d;
  186. pa = wil_desc_addr(&d->dma.addr);
  187. dmalen = le16_to_cpu(d->dma.length);
  188. dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
  189. kfree_skb(ctx->skb);
  190. wil_vring_advance_head(vring, 1);
  191. }
  192. }
  193. dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
  194. kfree(vring->ctx);
  195. vring->pa = 0;
  196. vring->va = NULL;
  197. vring->ctx = NULL;
  198. }
  199. /**
  200. * Allocate one skb for Rx VRING
  201. *
  202. * Safe to call from IRQ
  203. */
  204. static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
  205. u32 i, int headroom)
  206. {
  207. struct device *dev = wil_to_dev(wil);
  208. unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
  209. struct vring_rx_desc dd, *d = &dd;
  210. volatile struct vring_rx_desc *_d = &vring->va[i].rx;
  211. dma_addr_t pa;
  212. struct sk_buff *skb = dev_alloc_skb(sz + headroom);
  213. if (unlikely(!skb))
  214. return -ENOMEM;
  215. skb_reserve(skb, headroom);
  216. skb_put(skb, sz);
  217. pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
  218. if (unlikely(dma_mapping_error(dev, pa))) {
  219. kfree_skb(skb);
  220. return -ENOMEM;
  221. }
  222. d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
  223. wil_desc_addr_set(&d->dma.addr, pa);
  224. /* ip_length don't care */
  225. /* b11 don't care */
  226. /* error don't care */
  227. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  228. d->dma.length = cpu_to_le16(sz);
  229. *_d = *d;
  230. vring->ctx[i].skb = skb;
  231. return 0;
  232. }
  233. /**
  234. * Adds radiotap header
  235. *
  236. * Any error indicated as "Bad FCS"
  237. *
  238. * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
  239. * - Rx descriptor: 32 bytes
  240. * - Phy info
  241. */
  242. static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
  243. struct sk_buff *skb)
  244. {
  245. struct wireless_dev *wdev = wil->wdev;
  246. struct wil6210_rtap {
  247. struct ieee80211_radiotap_header rthdr;
  248. /* fields should be in the order of bits in rthdr.it_present */
  249. /* flags */
  250. u8 flags;
  251. /* channel */
  252. __le16 chnl_freq __aligned(2);
  253. __le16 chnl_flags;
  254. /* MCS */
  255. u8 mcs_present;
  256. u8 mcs_flags;
  257. u8 mcs_index;
  258. } __packed;
  259. struct wil6210_rtap_vendor {
  260. struct wil6210_rtap rtap;
  261. /* vendor */
  262. u8 vendor_oui[3] __aligned(2);
  263. u8 vendor_ns;
  264. __le16 vendor_skip;
  265. u8 vendor_data[0];
  266. } __packed;
  267. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  268. struct wil6210_rtap_vendor *rtap_vendor;
  269. int rtap_len = sizeof(struct wil6210_rtap);
  270. int phy_length = 0; /* phy info header size, bytes */
  271. static char phy_data[128];
  272. struct ieee80211_channel *ch = wdev->preset_chandef.chan;
  273. if (rtap_include_phy_info) {
  274. rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
  275. /* calculate additional length */
  276. if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
  277. /**
  278. * PHY info starts from 8-byte boundary
  279. * there are 8-byte lines, last line may be partially
  280. * written (HW bug), thus FW configures for last line
  281. * to be excessive. Driver skips this last line.
  282. */
  283. int len = min_t(int, 8 + sizeof(phy_data),
  284. wil_rxdesc_phy_length(d));
  285. if (len > 8) {
  286. void *p = skb_tail_pointer(skb);
  287. void *pa = PTR_ALIGN(p, 8);
  288. if (skb_tailroom(skb) >= len + (pa - p)) {
  289. phy_length = len - 8;
  290. memcpy(phy_data, pa, phy_length);
  291. }
  292. }
  293. }
  294. rtap_len += phy_length;
  295. }
  296. if (skb_headroom(skb) < rtap_len &&
  297. pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
  298. wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
  299. return;
  300. }
  301. rtap_vendor = (void *)skb_push(skb, rtap_len);
  302. memset(rtap_vendor, 0, rtap_len);
  303. rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
  304. rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
  305. rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
  306. (1 << IEEE80211_RADIOTAP_FLAGS) |
  307. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  308. (1 << IEEE80211_RADIOTAP_MCS));
  309. if (d->dma.status & RX_DMA_STATUS_ERROR)
  310. rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
  311. rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
  312. rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
  313. rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
  314. rtap_vendor->rtap.mcs_flags = 0;
  315. rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
  316. if (rtap_include_phy_info) {
  317. rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
  318. IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
  319. /* OUI for Wilocity 04:ce:14 */
  320. rtap_vendor->vendor_oui[0] = 0x04;
  321. rtap_vendor->vendor_oui[1] = 0xce;
  322. rtap_vendor->vendor_oui[2] = 0x14;
  323. rtap_vendor->vendor_ns = 1;
  324. /* Rx descriptor + PHY data */
  325. rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
  326. phy_length);
  327. memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
  328. memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
  329. phy_length);
  330. }
  331. }
  332. /* similar to ieee80211_ version, but FC contain only 1-st byte */
  333. static inline int wil_is_back_req(u8 fc)
  334. {
  335. return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
  336. (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
  337. }
  338. /**
  339. * reap 1 frame from @swhead
  340. *
  341. * Rx descriptor copied to skb->cb
  342. *
  343. * Safe to call from IRQ
  344. */
  345. static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
  346. struct vring *vring)
  347. {
  348. struct device *dev = wil_to_dev(wil);
  349. struct net_device *ndev = wil_to_ndev(wil);
  350. volatile struct vring_rx_desc *_d;
  351. struct vring_rx_desc *d;
  352. struct sk_buff *skb;
  353. dma_addr_t pa;
  354. unsigned int snaplen = wil_rx_snaplen();
  355. unsigned int sz = mtu_max + ETH_HLEN + snaplen;
  356. u16 dmalen;
  357. u8 ftype;
  358. int cid;
  359. int i;
  360. struct wil_net_stats *stats;
  361. BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
  362. again:
  363. if (unlikely(wil_vring_is_empty(vring)))
  364. return NULL;
  365. i = (int)vring->swhead;
  366. _d = &vring->va[i].rx;
  367. if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
  368. /* it is not error, we just reached end of Rx done area */
  369. return NULL;
  370. }
  371. skb = vring->ctx[i].skb;
  372. vring->ctx[i].skb = NULL;
  373. wil_vring_advance_head(vring, 1);
  374. if (!skb) {
  375. wil_err(wil, "No Rx skb at [%d]\n", i);
  376. goto again;
  377. }
  378. d = wil_skb_rxdesc(skb);
  379. *d = *_d;
  380. pa = wil_desc_addr(&d->dma.addr);
  381. dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
  382. dmalen = le16_to_cpu(d->dma.length);
  383. trace_wil6210_rx(i, d);
  384. wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
  385. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  386. (const void *)d, sizeof(*d), false);
  387. cid = wil_rxdesc_cid(d);
  388. stats = &wil->sta[cid].stats;
  389. if (unlikely(dmalen > sz)) {
  390. wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
  391. stats->rx_large_frame++;
  392. kfree_skb(skb);
  393. goto again;
  394. }
  395. skb_trim(skb, dmalen);
  396. prefetch(skb->data);
  397. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  398. skb->data, skb_headlen(skb), false);
  399. stats->last_mcs_rx = wil_rxdesc_mcs(d);
  400. if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
  401. stats->rx_per_mcs[stats->last_mcs_rx]++;
  402. /* use radiotap header only if required */
  403. if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
  404. wil_rx_add_radiotap_header(wil, skb);
  405. /* no extra checks if in sniffer mode */
  406. if (ndev->type != ARPHRD_ETHER)
  407. return skb;
  408. /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
  409. * Driver should recognize it by frame type, that is found
  410. * in Rx descriptor. If type is not data, it is 802.11 frame as is
  411. */
  412. ftype = wil_rxdesc_ftype(d) << 2;
  413. if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
  414. u8 fc1 = wil_rxdesc_fc1(d);
  415. int mid = wil_rxdesc_mid(d);
  416. int tid = wil_rxdesc_tid(d);
  417. u16 seq = wil_rxdesc_seq(d);
  418. wil_dbg_txrx(wil,
  419. "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  420. fc1, mid, cid, tid, seq);
  421. stats->rx_non_data_frame++;
  422. if (wil_is_back_req(fc1)) {
  423. wil_dbg_txrx(wil,
  424. "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
  425. mid, cid, tid, seq);
  426. wil_rx_bar(wil, cid, tid, seq);
  427. } else {
  428. /* print again all info. One can enable only this
  429. * without overhead for printing every Rx frame
  430. */
  431. wil_dbg_txrx(wil,
  432. "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  433. fc1, mid, cid, tid, seq);
  434. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  435. (const void *)d, sizeof(*d), false);
  436. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  437. skb->data, skb_headlen(skb), false);
  438. }
  439. kfree_skb(skb);
  440. goto again;
  441. }
  442. if (unlikely(skb->len < ETH_HLEN + snaplen)) {
  443. wil_err(wil, "Short frame, len = %d\n", skb->len);
  444. stats->rx_short_frame++;
  445. kfree_skb(skb);
  446. goto again;
  447. }
  448. /* L4 IDENT is on when HW calculated checksum, check status
  449. * and in case of error drop the packet
  450. * higher stack layers will handle retransmission (if required)
  451. */
  452. if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
  453. /* L4 protocol identified, csum calculated */
  454. if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
  455. skb->ip_summed = CHECKSUM_UNNECESSARY;
  456. /* If HW reports bad checksum, let IP stack re-check it
  457. * For example, HW don't understand Microsoft IP stack that
  458. * mis-calculates TCP checksum - if it should be 0x0,
  459. * it writes 0xffff in violation of RFC 1624
  460. */
  461. }
  462. if (snaplen) {
  463. /* Packet layout
  464. * +-------+-------+---------+------------+------+
  465. * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
  466. * +-------+-------+---------+------------+------+
  467. * Need to remove SNAP, shifting SA and DA forward
  468. */
  469. memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
  470. skb_pull(skb, snaplen);
  471. }
  472. return skb;
  473. }
  474. /**
  475. * allocate and fill up to @count buffers in rx ring
  476. * buffers posted at @swtail
  477. */
  478. static int wil_rx_refill(struct wil6210_priv *wil, int count)
  479. {
  480. struct net_device *ndev = wil_to_ndev(wil);
  481. struct vring *v = &wil->vring_rx;
  482. u32 next_tail;
  483. int rc = 0;
  484. int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
  485. WIL6210_RTAP_SIZE : 0;
  486. for (; next_tail = wil_vring_next_tail(v),
  487. (next_tail != v->swhead) && (count-- > 0);
  488. v->swtail = next_tail) {
  489. rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
  490. if (unlikely(rc)) {
  491. wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
  492. rc, v->swtail);
  493. break;
  494. }
  495. }
  496. /* make sure all writes to descriptors (shared memory) are done before
  497. * committing them to HW
  498. */
  499. wmb();
  500. wil_w(wil, v->hwtail, v->swtail);
  501. return rc;
  502. }
  503. /**
  504. * reverse_memcmp - Compare two areas of memory, in reverse order
  505. * @cs: One area of memory
  506. * @ct: Another area of memory
  507. * @count: The size of the area.
  508. *
  509. * Cut'n'paste from original memcmp (see lib/string.c)
  510. * with minimal modifications
  511. */
  512. static int reverse_memcmp(const void *cs, const void *ct, size_t count)
  513. {
  514. const unsigned char *su1, *su2;
  515. int res = 0;
  516. for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
  517. --su1, --su2, count--) {
  518. res = *su1 - *su2;
  519. if (res)
  520. break;
  521. }
  522. return res;
  523. }
  524. static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
  525. {
  526. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  527. int cid = wil_rxdesc_cid(d);
  528. int tid = wil_rxdesc_tid(d);
  529. int key_id = wil_rxdesc_key_id(d);
  530. int mc = wil_rxdesc_mcast(d);
  531. struct wil_sta_info *s = &wil->sta[cid];
  532. struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
  533. &s->tid_crypto_rx[tid];
  534. struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
  535. const u8 *pn = (u8 *)&d->mac.pn_15_0;
  536. if (!cc->key_set) {
  537. wil_err_ratelimited(wil,
  538. "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
  539. cid, tid, mc, key_id);
  540. return -EINVAL;
  541. }
  542. if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
  543. wil_err_ratelimited(wil,
  544. "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
  545. cid, tid, mc, key_id, pn, cc->pn);
  546. return -EINVAL;
  547. }
  548. memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
  549. return 0;
  550. }
  551. /*
  552. * Pass Rx packet to the netif. Update statistics.
  553. * Called in softirq context (NAPI poll).
  554. */
  555. void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
  556. {
  557. gro_result_t rc = GRO_NORMAL;
  558. struct wil6210_priv *wil = ndev_to_wil(ndev);
  559. struct wireless_dev *wdev = wil_to_wdev(wil);
  560. unsigned int len = skb->len;
  561. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  562. int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
  563. int security = wil_rxdesc_security(d);
  564. struct ethhdr *eth = (void *)skb->data;
  565. /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
  566. * is not suitable, need to look at data
  567. */
  568. int mcast = is_multicast_ether_addr(eth->h_dest);
  569. struct wil_net_stats *stats = &wil->sta[cid].stats;
  570. struct sk_buff *xmit_skb = NULL;
  571. static const char * const gro_res_str[] = {
  572. [GRO_MERGED] = "GRO_MERGED",
  573. [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
  574. [GRO_HELD] = "GRO_HELD",
  575. [GRO_NORMAL] = "GRO_NORMAL",
  576. [GRO_DROP] = "GRO_DROP",
  577. };
  578. if (ndev->features & NETIF_F_RXHASH)
  579. /* fake L4 to ensure it won't be re-calculated later
  580. * set hash to any non-zero value to activate rps
  581. * mechanism, core will be chosen according
  582. * to user-level rps configuration.
  583. */
  584. skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
  585. skb_orphan(skb);
  586. if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
  587. rc = GRO_DROP;
  588. dev_kfree_skb(skb);
  589. stats->rx_replay++;
  590. goto stats;
  591. }
  592. if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
  593. if (mcast) {
  594. /* send multicast frames both to higher layers in
  595. * local net stack and back to the wireless medium
  596. */
  597. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  598. } else {
  599. int xmit_cid = wil_find_cid(wil, eth->h_dest);
  600. if (xmit_cid >= 0) {
  601. /* The destination station is associated to
  602. * this AP (in this VLAN), so send the frame
  603. * directly to it and do not pass it to local
  604. * net stack.
  605. */
  606. xmit_skb = skb;
  607. skb = NULL;
  608. }
  609. }
  610. }
  611. if (xmit_skb) {
  612. /* Send to wireless media and increase priority by 256 to
  613. * keep the received priority instead of reclassifying
  614. * the frame (see cfg80211_classify8021d).
  615. */
  616. xmit_skb->dev = ndev;
  617. xmit_skb->priority += 256;
  618. xmit_skb->protocol = htons(ETH_P_802_3);
  619. skb_reset_network_header(xmit_skb);
  620. skb_reset_mac_header(xmit_skb);
  621. wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
  622. dev_queue_xmit(xmit_skb);
  623. }
  624. if (skb) { /* deliver to local stack */
  625. skb->protocol = eth_type_trans(skb, ndev);
  626. rc = napi_gro_receive(&wil->napi_rx, skb);
  627. wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
  628. len, gro_res_str[rc]);
  629. }
  630. stats:
  631. /* statistics. rc set to GRO_NORMAL for AP bridging */
  632. if (unlikely(rc == GRO_DROP)) {
  633. ndev->stats.rx_dropped++;
  634. stats->rx_dropped++;
  635. wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
  636. } else {
  637. ndev->stats.rx_packets++;
  638. stats->rx_packets++;
  639. ndev->stats.rx_bytes += len;
  640. stats->rx_bytes += len;
  641. if (mcast)
  642. ndev->stats.multicast++;
  643. }
  644. }
  645. /**
  646. * Proceed all completed skb's from Rx VRING
  647. *
  648. * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
  649. */
  650. void wil_rx_handle(struct wil6210_priv *wil, int *quota)
  651. {
  652. struct net_device *ndev = wil_to_ndev(wil);
  653. struct vring *v = &wil->vring_rx;
  654. struct sk_buff *skb;
  655. if (unlikely(!v->va)) {
  656. wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
  657. return;
  658. }
  659. wil_dbg_txrx(wil, "rx_handle\n");
  660. while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
  661. (*quota)--;
  662. if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
  663. skb->dev = ndev;
  664. skb_reset_mac_header(skb);
  665. skb->ip_summed = CHECKSUM_UNNECESSARY;
  666. skb->pkt_type = PACKET_OTHERHOST;
  667. skb->protocol = htons(ETH_P_802_2);
  668. wil_netif_rx_any(skb, ndev);
  669. } else {
  670. wil_rx_reorder(wil, skb);
  671. }
  672. }
  673. wil_rx_refill(wil, v->size);
  674. }
  675. int wil_rx_init(struct wil6210_priv *wil, u16 size)
  676. {
  677. struct vring *vring = &wil->vring_rx;
  678. int rc;
  679. wil_dbg_misc(wil, "rx_init\n");
  680. if (vring->va) {
  681. wil_err(wil, "Rx ring already allocated\n");
  682. return -EINVAL;
  683. }
  684. vring->size = size;
  685. rc = wil_vring_alloc(wil, vring);
  686. if (rc)
  687. return rc;
  688. rc = wmi_rx_chain_add(wil, vring);
  689. if (rc)
  690. goto err_free;
  691. rc = wil_rx_refill(wil, vring->size);
  692. if (rc)
  693. goto err_free;
  694. return 0;
  695. err_free:
  696. wil_vring_free(wil, vring, 0);
  697. return rc;
  698. }
  699. void wil_rx_fini(struct wil6210_priv *wil)
  700. {
  701. struct vring *vring = &wil->vring_rx;
  702. wil_dbg_misc(wil, "rx_fini\n");
  703. if (vring->va)
  704. wil_vring_free(wil, vring, 0);
  705. }
  706. static inline void wil_tx_data_init(struct vring_tx_data *txdata)
  707. {
  708. spin_lock_bh(&txdata->lock);
  709. txdata->dot1x_open = 0;
  710. txdata->enabled = 0;
  711. txdata->idle = 0;
  712. txdata->last_idle = 0;
  713. txdata->begin = 0;
  714. txdata->agg_wsize = 0;
  715. txdata->agg_timeout = 0;
  716. txdata->agg_amsdu = 0;
  717. txdata->addba_in_progress = false;
  718. spin_unlock_bh(&txdata->lock);
  719. }
  720. int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
  721. int cid, int tid)
  722. {
  723. int rc;
  724. struct wmi_vring_cfg_cmd cmd = {
  725. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  726. .vring_cfg = {
  727. .tx_sw_ring = {
  728. .max_mpdu_size =
  729. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  730. .ring_size = cpu_to_le16(size),
  731. },
  732. .ringid = id,
  733. .cidxtid = mk_cidxtid(cid, tid),
  734. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  735. .mac_ctrl = 0,
  736. .to_resolution = 0,
  737. .agg_max_wsize = 0,
  738. .schd_params = {
  739. .priority = cpu_to_le16(0),
  740. .timeslot_us = cpu_to_le16(0xfff),
  741. },
  742. },
  743. };
  744. struct {
  745. struct wmi_cmd_hdr wmi;
  746. struct wmi_vring_cfg_done_event cmd;
  747. } __packed reply;
  748. struct vring *vring = &wil->vring_tx[id];
  749. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  750. wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
  751. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  752. lockdep_assert_held(&wil->mutex);
  753. if (vring->va) {
  754. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  755. rc = -EINVAL;
  756. goto out;
  757. }
  758. wil_tx_data_init(txdata);
  759. vring->size = size;
  760. rc = wil_vring_alloc(wil, vring);
  761. if (rc)
  762. goto out;
  763. wil->vring2cid_tid[id][0] = cid;
  764. wil->vring2cid_tid[id][1] = tid;
  765. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  766. if (!wil->privacy)
  767. txdata->dot1x_open = true;
  768. rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  769. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  770. if (rc)
  771. goto out_free;
  772. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  773. wil_err(wil, "Tx config failed, status 0x%02x\n",
  774. reply.cmd.status);
  775. rc = -EINVAL;
  776. goto out_free;
  777. }
  778. spin_lock_bh(&txdata->lock);
  779. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  780. txdata->enabled = 1;
  781. spin_unlock_bh(&txdata->lock);
  782. if (txdata->dot1x_open && (agg_wsize >= 0))
  783. wil_addba_tx_request(wil, id, agg_wsize);
  784. return 0;
  785. out_free:
  786. spin_lock_bh(&txdata->lock);
  787. txdata->dot1x_open = false;
  788. txdata->enabled = 0;
  789. spin_unlock_bh(&txdata->lock);
  790. wil_vring_free(wil, vring, 1);
  791. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
  792. wil->vring2cid_tid[id][1] = 0;
  793. out:
  794. return rc;
  795. }
  796. int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
  797. {
  798. int rc;
  799. struct wmi_bcast_vring_cfg_cmd cmd = {
  800. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  801. .vring_cfg = {
  802. .tx_sw_ring = {
  803. .max_mpdu_size =
  804. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  805. .ring_size = cpu_to_le16(size),
  806. },
  807. .ringid = id,
  808. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  809. },
  810. };
  811. struct {
  812. struct wmi_cmd_hdr wmi;
  813. struct wmi_vring_cfg_done_event cmd;
  814. } __packed reply;
  815. struct vring *vring = &wil->vring_tx[id];
  816. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  817. wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
  818. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  819. lockdep_assert_held(&wil->mutex);
  820. if (vring->va) {
  821. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  822. rc = -EINVAL;
  823. goto out;
  824. }
  825. wil_tx_data_init(txdata);
  826. vring->size = size;
  827. rc = wil_vring_alloc(wil, vring);
  828. if (rc)
  829. goto out;
  830. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
  831. wil->vring2cid_tid[id][1] = 0; /* TID */
  832. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  833. if (!wil->privacy)
  834. txdata->dot1x_open = true;
  835. rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  836. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  837. if (rc)
  838. goto out_free;
  839. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  840. wil_err(wil, "Tx config failed, status 0x%02x\n",
  841. reply.cmd.status);
  842. rc = -EINVAL;
  843. goto out_free;
  844. }
  845. spin_lock_bh(&txdata->lock);
  846. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  847. txdata->enabled = 1;
  848. spin_unlock_bh(&txdata->lock);
  849. return 0;
  850. out_free:
  851. spin_lock_bh(&txdata->lock);
  852. txdata->enabled = 0;
  853. txdata->dot1x_open = false;
  854. spin_unlock_bh(&txdata->lock);
  855. wil_vring_free(wil, vring, 1);
  856. out:
  857. return rc;
  858. }
  859. void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
  860. {
  861. struct vring *vring = &wil->vring_tx[id];
  862. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  863. lockdep_assert_held(&wil->mutex);
  864. if (!vring->va)
  865. return;
  866. wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
  867. spin_lock_bh(&txdata->lock);
  868. txdata->dot1x_open = false;
  869. txdata->enabled = 0; /* no Tx can be in progress or start anew */
  870. spin_unlock_bh(&txdata->lock);
  871. /* napi_synchronize waits for completion of the current NAPI but will
  872. * not prevent the next NAPI run.
  873. * Add a memory barrier to guarantee that txdata->enabled is zeroed
  874. * before napi_synchronize so that the next scheduled NAPI will not
  875. * handle this vring
  876. */
  877. wmb();
  878. /* make sure NAPI won't touch this vring */
  879. if (test_bit(wil_status_napi_en, wil->status))
  880. napi_synchronize(&wil->napi_tx);
  881. wil_vring_free(wil, vring, 1);
  882. }
  883. static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
  884. struct sk_buff *skb)
  885. {
  886. int i;
  887. struct ethhdr *eth = (void *)skb->data;
  888. int cid = wil_find_cid(wil, eth->h_dest);
  889. if (cid < 0)
  890. return NULL;
  891. /* TODO: fix for multiple TID */
  892. for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
  893. if (!wil->vring_tx_data[i].dot1x_open &&
  894. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  895. continue;
  896. if (wil->vring2cid_tid[i][0] == cid) {
  897. struct vring *v = &wil->vring_tx[i];
  898. struct vring_tx_data *txdata = &wil->vring_tx_data[i];
  899. wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
  900. eth->h_dest, i);
  901. if (v->va && txdata->enabled) {
  902. return v;
  903. } else {
  904. wil_dbg_txrx(wil,
  905. "find_tx_ucast: vring[%d] not valid\n",
  906. i);
  907. return NULL;
  908. }
  909. }
  910. }
  911. return NULL;
  912. }
  913. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  914. struct sk_buff *skb);
  915. static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
  916. struct sk_buff *skb)
  917. {
  918. struct vring *v;
  919. int i;
  920. u8 cid;
  921. struct vring_tx_data *txdata;
  922. /* In the STA mode, it is expected to have only 1 VRING
  923. * for the AP we connected to.
  924. * find 1-st vring eligible for this skb and use it.
  925. */
  926. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  927. v = &wil->vring_tx[i];
  928. txdata = &wil->vring_tx_data[i];
  929. if (!v->va || !txdata->enabled)
  930. continue;
  931. cid = wil->vring2cid_tid[i][0];
  932. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  933. continue;
  934. if (!wil->vring_tx_data[i].dot1x_open &&
  935. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  936. continue;
  937. wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
  938. return v;
  939. }
  940. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  941. return NULL;
  942. }
  943. /* Use one of 2 strategies:
  944. *
  945. * 1. New (real broadcast):
  946. * use dedicated broadcast vring
  947. * 2. Old (pseudo-DMS):
  948. * Find 1-st vring and return it;
  949. * duplicate skb and send it to other active vrings;
  950. * in all cases override dest address to unicast peer's address
  951. * Use old strategy when new is not supported yet:
  952. * - for PBSS
  953. */
  954. static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
  955. struct sk_buff *skb)
  956. {
  957. struct vring *v;
  958. struct vring_tx_data *txdata;
  959. int i = wil->bcast_vring;
  960. if (i < 0)
  961. return NULL;
  962. v = &wil->vring_tx[i];
  963. txdata = &wil->vring_tx_data[i];
  964. if (!v->va || !txdata->enabled)
  965. return NULL;
  966. if (!wil->vring_tx_data[i].dot1x_open &&
  967. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  968. return NULL;
  969. return v;
  970. }
  971. static void wil_set_da_for_vring(struct wil6210_priv *wil,
  972. struct sk_buff *skb, int vring_index)
  973. {
  974. struct ethhdr *eth = (void *)skb->data;
  975. int cid = wil->vring2cid_tid[vring_index][0];
  976. ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
  977. }
  978. static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
  979. struct sk_buff *skb)
  980. {
  981. struct vring *v, *v2;
  982. struct sk_buff *skb2;
  983. int i;
  984. u8 cid;
  985. struct ethhdr *eth = (void *)skb->data;
  986. char *src = eth->h_source;
  987. struct vring_tx_data *txdata;
  988. /* find 1-st vring eligible for data */
  989. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  990. v = &wil->vring_tx[i];
  991. txdata = &wil->vring_tx_data[i];
  992. if (!v->va || !txdata->enabled)
  993. continue;
  994. cid = wil->vring2cid_tid[i][0];
  995. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  996. continue;
  997. if (!wil->vring_tx_data[i].dot1x_open &&
  998. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  999. continue;
  1000. /* don't Tx back to source when re-routing Rx->Tx at the AP */
  1001. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1002. continue;
  1003. goto found;
  1004. }
  1005. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  1006. return NULL;
  1007. found:
  1008. wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
  1009. wil_set_da_for_vring(wil, skb, i);
  1010. /* find other active vrings and duplicate skb for each */
  1011. for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
  1012. v2 = &wil->vring_tx[i];
  1013. if (!v2->va)
  1014. continue;
  1015. cid = wil->vring2cid_tid[i][0];
  1016. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  1017. continue;
  1018. if (!wil->vring_tx_data[i].dot1x_open &&
  1019. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  1020. continue;
  1021. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1022. continue;
  1023. skb2 = skb_copy(skb, GFP_ATOMIC);
  1024. if (skb2) {
  1025. wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
  1026. wil_set_da_for_vring(wil, skb2, i);
  1027. wil_tx_vring(wil, v2, skb2);
  1028. } else {
  1029. wil_err(wil, "skb_copy failed\n");
  1030. }
  1031. }
  1032. return v;
  1033. }
  1034. static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
  1035. int vring_index)
  1036. {
  1037. wil_desc_addr_set(&d->dma.addr, pa);
  1038. d->dma.ip_length = 0;
  1039. /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
  1040. d->dma.b11 = 0/*14 | BIT(7)*/;
  1041. d->dma.error = 0;
  1042. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  1043. d->dma.length = cpu_to_le16((u16)len);
  1044. d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
  1045. d->mac.d[0] = 0;
  1046. d->mac.d[1] = 0;
  1047. d->mac.d[2] = 0;
  1048. d->mac.ucode_cmd = 0;
  1049. /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
  1050. d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
  1051. (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
  1052. return 0;
  1053. }
  1054. static inline
  1055. void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
  1056. {
  1057. d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
  1058. }
  1059. /**
  1060. * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
  1061. * @skb is used to obtain the protocol and headers length.
  1062. * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
  1063. * 2 - middle, 3 - last descriptor.
  1064. */
  1065. static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
  1066. struct sk_buff *skb,
  1067. int tso_desc_type, bool is_ipv4,
  1068. int tcp_hdr_len, int skb_net_hdr_len)
  1069. {
  1070. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1071. d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
  1072. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1073. /* L4 header len: TCP header length */
  1074. d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1075. /* Setup TSO: bit and desc type */
  1076. d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
  1077. (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
  1078. d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
  1079. d->dma.ip_length = skb_net_hdr_len;
  1080. /* Enable TCP/UDP checksum */
  1081. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1082. /* Calculate pseudo-header */
  1083. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1084. }
  1085. /**
  1086. * Sets the descriptor @d up for csum. The corresponding
  1087. * @skb is used to obtain the protocol and headers length.
  1088. * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
  1089. * Note, if d==NULL, the function only returns the protocol result.
  1090. *
  1091. * It is very similar to previous wil_tx_desc_offload_setup_tso. This
  1092. * is "if unrolling" to optimize the critical path.
  1093. */
  1094. static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
  1095. struct sk_buff *skb){
  1096. int protocol;
  1097. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1098. return 0;
  1099. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1100. switch (skb->protocol) {
  1101. case cpu_to_be16(ETH_P_IP):
  1102. protocol = ip_hdr(skb)->protocol;
  1103. d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
  1104. break;
  1105. case cpu_to_be16(ETH_P_IPV6):
  1106. protocol = ipv6_hdr(skb)->nexthdr;
  1107. break;
  1108. default:
  1109. return -EINVAL;
  1110. }
  1111. switch (protocol) {
  1112. case IPPROTO_TCP:
  1113. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1114. /* L4 header len: TCP header length */
  1115. d->dma.d0 |=
  1116. (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1117. break;
  1118. case IPPROTO_UDP:
  1119. /* L4 header len: UDP header length */
  1120. d->dma.d0 |=
  1121. (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1122. break;
  1123. default:
  1124. return -EINVAL;
  1125. }
  1126. d->dma.ip_length = skb_network_header_len(skb);
  1127. /* Enable TCP/UDP checksum */
  1128. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1129. /* Calculate pseudo-header */
  1130. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1131. return 0;
  1132. }
  1133. static inline void wil_tx_last_desc(struct vring_tx_desc *d)
  1134. {
  1135. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
  1136. BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
  1137. BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1138. }
  1139. static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
  1140. {
  1141. d->dma.d0 |= wil_tso_type_lst <<
  1142. DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
  1143. }
  1144. static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
  1145. struct sk_buff *skb)
  1146. {
  1147. struct device *dev = wil_to_dev(wil);
  1148. /* point to descriptors in shared memory */
  1149. volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
  1150. *_first_desc = NULL;
  1151. /* pointers to shadow descriptors */
  1152. struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
  1153. *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
  1154. *first_desc = &first_desc_mem;
  1155. /* pointer to shadow descriptors' context */
  1156. struct wil_ctx *hdr_ctx, *first_ctx = NULL;
  1157. int descs_used = 0; /* total number of used descriptors */
  1158. int sg_desc_cnt = 0; /* number of descriptors for current mss*/
  1159. u32 swhead = vring->swhead;
  1160. int used, avail = wil_vring_avail_tx(vring);
  1161. int nr_frags = skb_shinfo(skb)->nr_frags;
  1162. int min_desc_required = nr_frags + 1;
  1163. int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
  1164. int f, len, hdrlen, headlen;
  1165. int vring_index = vring - wil->vring_tx;
  1166. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1167. uint i = swhead;
  1168. dma_addr_t pa;
  1169. const skb_frag_t *frag = NULL;
  1170. int rem_data = mss;
  1171. int lenmss;
  1172. int hdr_compensation_need = true;
  1173. int desc_tso_type = wil_tso_type_first;
  1174. bool is_ipv4;
  1175. int tcp_hdr_len;
  1176. int skb_net_hdr_len;
  1177. int gso_type;
  1178. int rc = -EINVAL;
  1179. wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
  1180. vring_index);
  1181. if (unlikely(!txdata->enabled))
  1182. return -EINVAL;
  1183. /* A typical page 4K is 3-4 payloads, we assume each fragment
  1184. * is a full payload, that's how min_desc_required has been
  1185. * calculated. In real we might need more or less descriptors,
  1186. * this is the initial check only.
  1187. */
  1188. if (unlikely(avail < min_desc_required)) {
  1189. wil_err_ratelimited(wil,
  1190. "TSO: Tx ring[%2d] full. No space for %d fragments\n",
  1191. vring_index, min_desc_required);
  1192. return -ENOMEM;
  1193. }
  1194. /* Header Length = MAC header len + IP header len + TCP header len*/
  1195. hdrlen = ETH_HLEN +
  1196. (int)skb_network_header_len(skb) +
  1197. tcp_hdrlen(skb);
  1198. gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
  1199. switch (gso_type) {
  1200. case SKB_GSO_TCPV4:
  1201. /* TCP v4, zero out the IP length and IPv4 checksum fields
  1202. * as required by the offloading doc
  1203. */
  1204. ip_hdr(skb)->tot_len = 0;
  1205. ip_hdr(skb)->check = 0;
  1206. is_ipv4 = true;
  1207. break;
  1208. case SKB_GSO_TCPV6:
  1209. /* TCP v6, zero out the payload length */
  1210. ipv6_hdr(skb)->payload_len = 0;
  1211. is_ipv4 = false;
  1212. break;
  1213. default:
  1214. /* other than TCPv4 or TCPv6 types are not supported for TSO.
  1215. * It is also illegal for both to be set simultaneously
  1216. */
  1217. return -EINVAL;
  1218. }
  1219. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1220. return -EINVAL;
  1221. /* tcp header length and skb network header length are fixed for all
  1222. * packet's descriptors - read then once here
  1223. */
  1224. tcp_hdr_len = tcp_hdrlen(skb);
  1225. skb_net_hdr_len = skb_network_header_len(skb);
  1226. _hdr_desc = &vring->va[i].tx;
  1227. pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
  1228. if (unlikely(dma_mapping_error(dev, pa))) {
  1229. wil_err(wil, "TSO: Skb head DMA map error\n");
  1230. goto err_exit;
  1231. }
  1232. wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
  1233. wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
  1234. tcp_hdr_len, skb_net_hdr_len);
  1235. wil_tx_last_desc(hdr_desc);
  1236. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1237. hdr_ctx = &vring->ctx[i];
  1238. descs_used++;
  1239. headlen = skb_headlen(skb) - hdrlen;
  1240. for (f = headlen ? -1 : 0; f < nr_frags; f++) {
  1241. if (headlen) {
  1242. len = headlen;
  1243. wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
  1244. len);
  1245. } else {
  1246. frag = &skb_shinfo(skb)->frags[f];
  1247. len = frag->size;
  1248. wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
  1249. }
  1250. while (len) {
  1251. wil_dbg_txrx(wil,
  1252. "TSO: len %d, rem_data %d, descs_used %d\n",
  1253. len, rem_data, descs_used);
  1254. if (descs_used == avail) {
  1255. wil_err_ratelimited(wil, "TSO: ring overflow\n");
  1256. rc = -ENOMEM;
  1257. goto mem_error;
  1258. }
  1259. lenmss = min_t(int, rem_data, len);
  1260. i = (swhead + descs_used) % vring->size;
  1261. wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
  1262. if (!headlen) {
  1263. pa = skb_frag_dma_map(dev, frag,
  1264. frag->size - len, lenmss,
  1265. DMA_TO_DEVICE);
  1266. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1267. } else {
  1268. pa = dma_map_single(dev,
  1269. skb->data +
  1270. skb_headlen(skb) - headlen,
  1271. lenmss,
  1272. DMA_TO_DEVICE);
  1273. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1274. headlen -= lenmss;
  1275. }
  1276. if (unlikely(dma_mapping_error(dev, pa))) {
  1277. wil_err(wil, "TSO: DMA map page error\n");
  1278. goto mem_error;
  1279. }
  1280. _desc = &vring->va[i].tx;
  1281. if (!_first_desc) {
  1282. _first_desc = _desc;
  1283. first_ctx = &vring->ctx[i];
  1284. d = first_desc;
  1285. } else {
  1286. d = &desc_mem;
  1287. }
  1288. wil_tx_desc_map(d, pa, lenmss, vring_index);
  1289. wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
  1290. is_ipv4, tcp_hdr_len,
  1291. skb_net_hdr_len);
  1292. /* use tso_type_first only once */
  1293. desc_tso_type = wil_tso_type_mid;
  1294. descs_used++; /* desc used so far */
  1295. sg_desc_cnt++; /* desc used for this segment */
  1296. len -= lenmss;
  1297. rem_data -= lenmss;
  1298. wil_dbg_txrx(wil,
  1299. "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
  1300. len, rem_data, descs_used, sg_desc_cnt);
  1301. /* Close the segment if reached mss size or last frag*/
  1302. if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
  1303. if (hdr_compensation_need) {
  1304. /* first segment include hdr desc for
  1305. * release
  1306. */
  1307. hdr_ctx->nr_frags = sg_desc_cnt;
  1308. wil_tx_desc_set_nr_frags(first_desc,
  1309. sg_desc_cnt +
  1310. 1);
  1311. hdr_compensation_need = false;
  1312. } else {
  1313. wil_tx_desc_set_nr_frags(first_desc,
  1314. sg_desc_cnt);
  1315. }
  1316. first_ctx->nr_frags = sg_desc_cnt - 1;
  1317. wil_tx_last_desc(d);
  1318. /* first descriptor may also be the last
  1319. * for this mss - make sure not to copy
  1320. * it twice
  1321. */
  1322. if (first_desc != d)
  1323. *_first_desc = *first_desc;
  1324. /*last descriptor will be copied at the end
  1325. * of this TS processing
  1326. */
  1327. if (f < nr_frags - 1 || len > 0)
  1328. *_desc = *d;
  1329. rem_data = mss;
  1330. _first_desc = NULL;
  1331. sg_desc_cnt = 0;
  1332. } else if (first_desc != d) /* update mid descriptor */
  1333. *_desc = *d;
  1334. }
  1335. }
  1336. /* first descriptor may also be the last.
  1337. * in this case d pointer is invalid
  1338. */
  1339. if (_first_desc == _desc)
  1340. d = first_desc;
  1341. /* Last data descriptor */
  1342. wil_set_tx_desc_last_tso(d);
  1343. *_desc = *d;
  1344. /* Fill the total number of descriptors in first desc (hdr)*/
  1345. wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
  1346. *_hdr_desc = *hdr_desc;
  1347. /* hold reference to skb
  1348. * to prevent skb release before accounting
  1349. * in case of immediate "tx done"
  1350. */
  1351. vring->ctx[i].skb = skb_get(skb);
  1352. /* performance monitoring */
  1353. used = wil_vring_used_tx(vring);
  1354. if (wil_val_in_range(vring_idle_trsh,
  1355. used, used + descs_used)) {
  1356. txdata->idle += get_cycles() - txdata->last_idle;
  1357. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1358. vring_index, used, used + descs_used);
  1359. }
  1360. /* Make sure to advance the head only after descriptor update is done.
  1361. * This will prevent a race condition where the completion thread
  1362. * will see the DU bit set from previous run and will handle the
  1363. * skb before it was completed.
  1364. */
  1365. wmb();
  1366. /* advance swhead */
  1367. wil_vring_advance_head(vring, descs_used);
  1368. wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
  1369. /* make sure all writes to descriptors (shared memory) are done before
  1370. * committing them to HW
  1371. */
  1372. wmb();
  1373. wil_w(wil, vring->hwtail, vring->swhead);
  1374. return 0;
  1375. mem_error:
  1376. while (descs_used > 0) {
  1377. struct wil_ctx *ctx;
  1378. i = (swhead + descs_used - 1) % vring->size;
  1379. d = (struct vring_tx_desc *)&vring->va[i].tx;
  1380. _desc = &vring->va[i].tx;
  1381. *d = *_desc;
  1382. _desc->dma.status = TX_DMA_STATUS_DU;
  1383. ctx = &vring->ctx[i];
  1384. wil_txdesc_unmap(dev, d, ctx);
  1385. memset(ctx, 0, sizeof(*ctx));
  1386. descs_used--;
  1387. }
  1388. err_exit:
  1389. return rc;
  1390. }
  1391. static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1392. struct sk_buff *skb)
  1393. {
  1394. struct device *dev = wil_to_dev(wil);
  1395. struct vring_tx_desc dd, *d = &dd;
  1396. volatile struct vring_tx_desc *_d;
  1397. u32 swhead = vring->swhead;
  1398. int avail = wil_vring_avail_tx(vring);
  1399. int nr_frags = skb_shinfo(skb)->nr_frags;
  1400. uint f = 0;
  1401. int vring_index = vring - wil->vring_tx;
  1402. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1403. uint i = swhead;
  1404. dma_addr_t pa;
  1405. int used;
  1406. bool mcast = (vring_index == wil->bcast_vring);
  1407. uint len = skb_headlen(skb);
  1408. wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
  1409. vring_index);
  1410. if (unlikely(!txdata->enabled))
  1411. return -EINVAL;
  1412. if (unlikely(avail < 1 + nr_frags)) {
  1413. wil_err_ratelimited(wil,
  1414. "Tx ring[%2d] full. No space for %d fragments\n",
  1415. vring_index, 1 + nr_frags);
  1416. return -ENOMEM;
  1417. }
  1418. _d = &vring->va[i].tx;
  1419. pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
  1420. wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
  1421. skb_headlen(skb), skb->data, &pa);
  1422. wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
  1423. skb->data, skb_headlen(skb), false);
  1424. if (unlikely(dma_mapping_error(dev, pa)))
  1425. return -EINVAL;
  1426. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1427. /* 1-st segment */
  1428. wil_tx_desc_map(d, pa, len, vring_index);
  1429. if (unlikely(mcast)) {
  1430. d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
  1431. if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
  1432. d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
  1433. }
  1434. /* Process TCP/UDP checksum offloading */
  1435. if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
  1436. wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
  1437. vring_index);
  1438. goto dma_error;
  1439. }
  1440. vring->ctx[i].nr_frags = nr_frags;
  1441. wil_tx_desc_set_nr_frags(d, nr_frags + 1);
  1442. /* middle segments */
  1443. for (; f < nr_frags; f++) {
  1444. const struct skb_frag_struct *frag =
  1445. &skb_shinfo(skb)->frags[f];
  1446. int len = skb_frag_size(frag);
  1447. *_d = *d;
  1448. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1449. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1450. (const void *)d, sizeof(*d), false);
  1451. i = (swhead + f + 1) % vring->size;
  1452. _d = &vring->va[i].tx;
  1453. pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
  1454. DMA_TO_DEVICE);
  1455. if (unlikely(dma_mapping_error(dev, pa))) {
  1456. wil_err(wil, "Tx[%2d] failed to map fragment\n",
  1457. vring_index);
  1458. goto dma_error;
  1459. }
  1460. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1461. wil_tx_desc_map(d, pa, len, vring_index);
  1462. /* no need to check return code -
  1463. * if it succeeded for 1-st descriptor,
  1464. * it will succeed here too
  1465. */
  1466. wil_tx_desc_offload_setup(d, skb);
  1467. }
  1468. /* for the last seg only */
  1469. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
  1470. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
  1471. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1472. *_d = *d;
  1473. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1474. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1475. (const void *)d, sizeof(*d), false);
  1476. /* hold reference to skb
  1477. * to prevent skb release before accounting
  1478. * in case of immediate "tx done"
  1479. */
  1480. vring->ctx[i].skb = skb_get(skb);
  1481. /* performance monitoring */
  1482. used = wil_vring_used_tx(vring);
  1483. if (wil_val_in_range(vring_idle_trsh,
  1484. used, used + nr_frags + 1)) {
  1485. txdata->idle += get_cycles() - txdata->last_idle;
  1486. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1487. vring_index, used, used + nr_frags + 1);
  1488. }
  1489. /* Make sure to advance the head only after descriptor update is done.
  1490. * This will prevent a race condition where the completion thread
  1491. * will see the DU bit set from previous run and will handle the
  1492. * skb before it was completed.
  1493. */
  1494. wmb();
  1495. /* advance swhead */
  1496. wil_vring_advance_head(vring, nr_frags + 1);
  1497. wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
  1498. vring->swhead);
  1499. trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
  1500. /* make sure all writes to descriptors (shared memory) are done before
  1501. * committing them to HW
  1502. */
  1503. wmb();
  1504. wil_w(wil, vring->hwtail, vring->swhead);
  1505. return 0;
  1506. dma_error:
  1507. /* unmap what we have mapped */
  1508. nr_frags = f + 1; /* frags mapped + one for skb head */
  1509. for (f = 0; f < nr_frags; f++) {
  1510. struct wil_ctx *ctx;
  1511. i = (swhead + f) % vring->size;
  1512. ctx = &vring->ctx[i];
  1513. _d = &vring->va[i].tx;
  1514. *d = *_d;
  1515. _d->dma.status = TX_DMA_STATUS_DU;
  1516. wil_txdesc_unmap(dev, d, ctx);
  1517. memset(ctx, 0, sizeof(*ctx));
  1518. }
  1519. return -EINVAL;
  1520. }
  1521. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1522. struct sk_buff *skb)
  1523. {
  1524. int vring_index = vring - wil->vring_tx;
  1525. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1526. int rc;
  1527. spin_lock(&txdata->lock);
  1528. rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
  1529. (wil, vring, skb);
  1530. spin_unlock(&txdata->lock);
  1531. return rc;
  1532. }
  1533. /**
  1534. * Check status of tx vrings and stop/wake net queues if needed
  1535. *
  1536. * This function does one of two checks:
  1537. * In case check_stop is true, will check if net queues need to be stopped. If
  1538. * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
  1539. * In case check_stop is false, will check if net queues need to be waked. If
  1540. * the conditions for waking are met, netif_tx_wake_all_queues() is called.
  1541. * vring is the vring which is currently being modified by either adding
  1542. * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
  1543. * be null when irrelevant (e.g. connect/disconnect events).
  1544. *
  1545. * The implementation is to stop net queues if modified vring has low
  1546. * descriptor availability. Wake if all vrings are not in low descriptor
  1547. * availability and modified vring has high descriptor availability.
  1548. */
  1549. static inline void __wil_update_net_queues(struct wil6210_priv *wil,
  1550. struct vring *vring,
  1551. bool check_stop)
  1552. {
  1553. int i;
  1554. if (vring)
  1555. wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d",
  1556. (int)(vring - wil->vring_tx), check_stop,
  1557. wil->net_queue_stopped);
  1558. else
  1559. wil_dbg_txrx(wil, "check_stop=%d, stopped=%d",
  1560. check_stop, wil->net_queue_stopped);
  1561. if (check_stop == wil->net_queue_stopped)
  1562. /* net queues already in desired state */
  1563. return;
  1564. if (check_stop) {
  1565. if (!vring || unlikely(wil_vring_avail_low(vring))) {
  1566. /* not enough room in the vring */
  1567. netif_tx_stop_all_queues(wil_to_ndev(wil));
  1568. wil->net_queue_stopped = true;
  1569. wil_dbg_txrx(wil, "netif_tx_stop called\n");
  1570. }
  1571. return;
  1572. }
  1573. /* check wake */
  1574. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  1575. struct vring *cur_vring = &wil->vring_tx[i];
  1576. struct vring_tx_data *txdata = &wil->vring_tx_data[i];
  1577. if (!cur_vring->va || !txdata->enabled || cur_vring == vring)
  1578. continue;
  1579. if (wil_vring_avail_low(cur_vring)) {
  1580. wil_dbg_txrx(wil, "vring %d full, can't wake\n",
  1581. (int)(cur_vring - wil->vring_tx));
  1582. return;
  1583. }
  1584. }
  1585. if (!vring || wil_vring_avail_high(vring)) {
  1586. /* enough room in the vring */
  1587. wil_dbg_txrx(wil, "calling netif_tx_wake\n");
  1588. netif_tx_wake_all_queues(wil_to_ndev(wil));
  1589. wil->net_queue_stopped = false;
  1590. }
  1591. }
  1592. void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
  1593. bool check_stop)
  1594. {
  1595. spin_lock(&wil->net_queue_lock);
  1596. __wil_update_net_queues(wil, vring, check_stop);
  1597. spin_unlock(&wil->net_queue_lock);
  1598. }
  1599. void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
  1600. bool check_stop)
  1601. {
  1602. spin_lock_bh(&wil->net_queue_lock);
  1603. __wil_update_net_queues(wil, vring, check_stop);
  1604. spin_unlock_bh(&wil->net_queue_lock);
  1605. }
  1606. netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1607. {
  1608. struct wil6210_priv *wil = ndev_to_wil(ndev);
  1609. struct ethhdr *eth = (void *)skb->data;
  1610. bool bcast = is_multicast_ether_addr(eth->h_dest);
  1611. struct vring *vring;
  1612. static bool pr_once_fw;
  1613. int rc;
  1614. wil_dbg_txrx(wil, "start_xmit\n");
  1615. if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
  1616. if (!pr_once_fw) {
  1617. wil_err(wil, "FW not ready\n");
  1618. pr_once_fw = true;
  1619. }
  1620. goto drop;
  1621. }
  1622. if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
  1623. wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
  1624. goto drop;
  1625. }
  1626. if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
  1627. wil_err(wil, "Xmit in monitor mode not supported\n");
  1628. goto drop;
  1629. }
  1630. pr_once_fw = false;
  1631. /* find vring */
  1632. if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
  1633. /* in STA mode (ESS), all to same VRING (to AP) */
  1634. vring = wil_find_tx_vring_sta(wil, skb);
  1635. } else if (bcast) {
  1636. if (wil->pbss)
  1637. /* in pbss, no bcast VRING - duplicate skb in
  1638. * all stations VRINGs
  1639. */
  1640. vring = wil_find_tx_bcast_2(wil, skb);
  1641. else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
  1642. /* AP has a dedicated bcast VRING */
  1643. vring = wil_find_tx_bcast_1(wil, skb);
  1644. else
  1645. /* unexpected combination, fallback to duplicating
  1646. * the skb in all stations VRINGs
  1647. */
  1648. vring = wil_find_tx_bcast_2(wil, skb);
  1649. } else {
  1650. /* unicast, find specific VRING by dest. address */
  1651. vring = wil_find_tx_ucast(wil, skb);
  1652. }
  1653. if (unlikely(!vring)) {
  1654. wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
  1655. goto drop;
  1656. }
  1657. /* set up vring entry */
  1658. rc = wil_tx_vring(wil, vring, skb);
  1659. switch (rc) {
  1660. case 0:
  1661. /* shall we stop net queues? */
  1662. wil_update_net_queues_bh(wil, vring, true);
  1663. /* statistics will be updated on the tx_complete */
  1664. dev_kfree_skb_any(skb);
  1665. return NETDEV_TX_OK;
  1666. case -ENOMEM:
  1667. return NETDEV_TX_BUSY;
  1668. default:
  1669. break; /* goto drop; */
  1670. }
  1671. drop:
  1672. ndev->stats.tx_dropped++;
  1673. dev_kfree_skb_any(skb);
  1674. return NET_XMIT_DROP;
  1675. }
  1676. static inline bool wil_need_txstat(struct sk_buff *skb)
  1677. {
  1678. struct ethhdr *eth = (void *)skb->data;
  1679. return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
  1680. (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
  1681. }
  1682. static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
  1683. {
  1684. if (unlikely(wil_need_txstat(skb)))
  1685. skb_complete_wifi_ack(skb, acked);
  1686. else
  1687. acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
  1688. }
  1689. /**
  1690. * Clean up transmitted skb's from the Tx VRING
  1691. *
  1692. * Return number of descriptors cleared
  1693. *
  1694. * Safe to call from IRQ
  1695. */
  1696. int wil_tx_complete(struct wil6210_priv *wil, int ringid)
  1697. {
  1698. struct net_device *ndev = wil_to_ndev(wil);
  1699. struct device *dev = wil_to_dev(wil);
  1700. struct vring *vring = &wil->vring_tx[ringid];
  1701. struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
  1702. int done = 0;
  1703. int cid = wil->vring2cid_tid[ringid][0];
  1704. struct wil_net_stats *stats = NULL;
  1705. volatile struct vring_tx_desc *_d;
  1706. int used_before_complete;
  1707. int used_new;
  1708. if (unlikely(!vring->va)) {
  1709. wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
  1710. return 0;
  1711. }
  1712. if (unlikely(!txdata->enabled)) {
  1713. wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
  1714. return 0;
  1715. }
  1716. wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
  1717. used_before_complete = wil_vring_used_tx(vring);
  1718. if (cid < WIL6210_MAX_CID)
  1719. stats = &wil->sta[cid].stats;
  1720. while (!wil_vring_is_empty(vring)) {
  1721. int new_swtail;
  1722. struct wil_ctx *ctx = &vring->ctx[vring->swtail];
  1723. /**
  1724. * For the fragmented skb, HW will set DU bit only for the
  1725. * last fragment. look for it.
  1726. * In TSO the first DU will include hdr desc
  1727. */
  1728. int lf = (vring->swtail + ctx->nr_frags) % vring->size;
  1729. /* TODO: check we are not past head */
  1730. _d = &vring->va[lf].tx;
  1731. if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
  1732. break;
  1733. new_swtail = (lf + 1) % vring->size;
  1734. while (vring->swtail != new_swtail) {
  1735. struct vring_tx_desc dd, *d = &dd;
  1736. u16 dmalen;
  1737. struct sk_buff *skb;
  1738. ctx = &vring->ctx[vring->swtail];
  1739. skb = ctx->skb;
  1740. _d = &vring->va[vring->swtail].tx;
  1741. *d = *_d;
  1742. dmalen = le16_to_cpu(d->dma.length);
  1743. trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
  1744. d->dma.error);
  1745. wil_dbg_txrx(wil,
  1746. "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
  1747. ringid, vring->swtail, dmalen,
  1748. d->dma.status, d->dma.error);
  1749. wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
  1750. (const void *)d, sizeof(*d), false);
  1751. wil_txdesc_unmap(dev, d, ctx);
  1752. if (skb) {
  1753. if (likely(d->dma.error == 0)) {
  1754. ndev->stats.tx_packets++;
  1755. ndev->stats.tx_bytes += skb->len;
  1756. if (stats) {
  1757. stats->tx_packets++;
  1758. stats->tx_bytes += skb->len;
  1759. }
  1760. } else {
  1761. ndev->stats.tx_errors++;
  1762. if (stats)
  1763. stats->tx_errors++;
  1764. }
  1765. wil_consume_skb(skb, d->dma.error == 0);
  1766. }
  1767. memset(ctx, 0, sizeof(*ctx));
  1768. /* Make sure the ctx is zeroed before updating the tail
  1769. * to prevent a case where wil_tx_vring will see
  1770. * this descriptor as used and handle it before ctx zero
  1771. * is completed.
  1772. */
  1773. wmb();
  1774. /* There is no need to touch HW descriptor:
  1775. * - ststus bit TX_DMA_STATUS_DU is set by design,
  1776. * so hardware will not try to process this desc.,
  1777. * - rest of descriptor will be initialized on Tx.
  1778. */
  1779. vring->swtail = wil_vring_next_tail(vring);
  1780. done++;
  1781. }
  1782. }
  1783. /* performance monitoring */
  1784. used_new = wil_vring_used_tx(vring);
  1785. if (wil_val_in_range(vring_idle_trsh,
  1786. used_new, used_before_complete)) {
  1787. wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
  1788. ringid, used_before_complete, used_new);
  1789. txdata->last_idle = get_cycles();
  1790. }
  1791. /* shall we wake net queues? */
  1792. if (done)
  1793. wil_update_net_queues(wil, vring, false);
  1794. return done;
  1795. }