txrx.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119
  1. /*
  2. * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <net/ieee80211_radiotap.h>
  18. #include <linux/if_arp.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/ip.h>
  21. #include <linux/ipv6.h>
  22. #include <net/ipv6.h>
  23. #include <linux/prefetch.h>
  24. #include "wil6210.h"
  25. #include "wmi.h"
  26. #include "txrx.h"
  27. #include "trace.h"
  28. static bool rtap_include_phy_info;
  29. module_param(rtap_include_phy_info, bool, 0444);
  30. MODULE_PARM_DESC(rtap_include_phy_info,
  31. " Include PHY info in the radiotap header, default - no");
  32. bool rx_align_2;
  33. module_param(rx_align_2, bool, 0444);
  34. MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
  35. bool rx_large_buf;
  36. module_param(rx_large_buf, bool, 0444);
  37. MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
  38. static inline uint wil_rx_snaplen(void)
  39. {
  40. return rx_align_2 ? 6 : 0;
  41. }
  42. static inline int wil_vring_is_empty(struct vring *vring)
  43. {
  44. return vring->swhead == vring->swtail;
  45. }
  46. static inline u32 wil_vring_next_tail(struct vring *vring)
  47. {
  48. return (vring->swtail + 1) % vring->size;
  49. }
  50. static inline void wil_vring_advance_head(struct vring *vring, int n)
  51. {
  52. vring->swhead = (vring->swhead + n) % vring->size;
  53. }
  54. static inline int wil_vring_is_full(struct vring *vring)
  55. {
  56. return wil_vring_next_tail(vring) == vring->swhead;
  57. }
  58. /* Used space in Tx Vring */
  59. static inline int wil_vring_used_tx(struct vring *vring)
  60. {
  61. u32 swhead = vring->swhead;
  62. u32 swtail = vring->swtail;
  63. return (vring->size + swhead - swtail) % vring->size;
  64. }
  65. /* Available space in Tx Vring */
  66. static inline int wil_vring_avail_tx(struct vring *vring)
  67. {
  68. return vring->size - wil_vring_used_tx(vring) - 1;
  69. }
  70. /* wil_vring_wmark_low - low watermark for available descriptor space */
  71. static inline int wil_vring_wmark_low(struct vring *vring)
  72. {
  73. return vring->size/8;
  74. }
  75. /* wil_vring_wmark_high - high watermark for available descriptor space */
  76. static inline int wil_vring_wmark_high(struct vring *vring)
  77. {
  78. return vring->size/4;
  79. }
  80. /* returns true if num avail descriptors is lower than wmark_low */
  81. static inline int wil_vring_avail_low(struct vring *vring)
  82. {
  83. return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
  84. }
  85. /* returns true if num avail descriptors is higher than wmark_high */
  86. static inline int wil_vring_avail_high(struct vring *vring)
  87. {
  88. return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
  89. }
  90. /* wil_val_in_range - check if value in [min,max) */
  91. static inline bool wil_val_in_range(int val, int min, int max)
  92. {
  93. return val >= min && val < max;
  94. }
  95. static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
  96. {
  97. struct device *dev = wil_to_dev(wil);
  98. size_t sz = vring->size * sizeof(vring->va[0]);
  99. uint i;
  100. wil_dbg_misc(wil, "vring_alloc:\n");
  101. BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
  102. vring->swhead = 0;
  103. vring->swtail = 0;
  104. vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
  105. if (!vring->ctx) {
  106. vring->va = NULL;
  107. return -ENOMEM;
  108. }
  109. /* vring->va should be aligned on its size rounded up to power of 2
  110. * This is granted by the dma_alloc_coherent.
  111. *
  112. * HW has limitation that all vrings addresses must share the same
  113. * upper 16 msb bits part of 48 bits address. To workaround that,
  114. * if we are using 48 bit addresses switch to 32 bit allocation
  115. * before allocating vring memory.
  116. *
  117. * There's no check for the return value of dma_set_mask_and_coherent,
  118. * since we assume if we were able to set the mask during
  119. * initialization in this system it will not fail if we set it again
  120. */
  121. if (wil->use_extended_dma_addr)
  122. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  123. vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
  124. if (!vring->va) {
  125. kfree(vring->ctx);
  126. vring->ctx = NULL;
  127. return -ENOMEM;
  128. }
  129. if (wil->use_extended_dma_addr)
  130. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
  131. /* initially, all descriptors are SW owned
  132. * For Tx and Rx, ownership bit is at the same location, thus
  133. * we can use any
  134. */
  135. for (i = 0; i < vring->size; i++) {
  136. volatile struct vring_tx_desc *_d = &vring->va[i].tx;
  137. _d->dma.status = TX_DMA_STATUS_DU;
  138. }
  139. wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
  140. vring->va, &vring->pa, vring->ctx);
  141. return 0;
  142. }
  143. static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
  144. struct wil_ctx *ctx)
  145. {
  146. dma_addr_t pa = wil_desc_addr(&d->dma.addr);
  147. u16 dmalen = le16_to_cpu(d->dma.length);
  148. switch (ctx->mapped_as) {
  149. case wil_mapped_as_single:
  150. dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
  151. break;
  152. case wil_mapped_as_page:
  153. dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
  154. break;
  155. default:
  156. break;
  157. }
  158. }
  159. static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
  160. int tx)
  161. {
  162. struct device *dev = wil_to_dev(wil);
  163. size_t sz = vring->size * sizeof(vring->va[0]);
  164. lockdep_assert_held(&wil->mutex);
  165. if (tx) {
  166. int vring_index = vring - wil->vring_tx;
  167. wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
  168. vring_index, vring->size, vring->va,
  169. &vring->pa, vring->ctx);
  170. } else {
  171. wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
  172. vring->size, vring->va,
  173. &vring->pa, vring->ctx);
  174. }
  175. while (!wil_vring_is_empty(vring)) {
  176. dma_addr_t pa;
  177. u16 dmalen;
  178. struct wil_ctx *ctx;
  179. if (tx) {
  180. struct vring_tx_desc dd, *d = &dd;
  181. volatile struct vring_tx_desc *_d =
  182. &vring->va[vring->swtail].tx;
  183. ctx = &vring->ctx[vring->swtail];
  184. if (!ctx) {
  185. wil_dbg_txrx(wil,
  186. "ctx(%d) was already completed\n",
  187. vring->swtail);
  188. vring->swtail = wil_vring_next_tail(vring);
  189. continue;
  190. }
  191. *d = *_d;
  192. wil_txdesc_unmap(dev, d, ctx);
  193. if (ctx->skb)
  194. dev_kfree_skb_any(ctx->skb);
  195. vring->swtail = wil_vring_next_tail(vring);
  196. } else { /* rx */
  197. struct vring_rx_desc dd, *d = &dd;
  198. volatile struct vring_rx_desc *_d =
  199. &vring->va[vring->swhead].rx;
  200. ctx = &vring->ctx[vring->swhead];
  201. *d = *_d;
  202. pa = wil_desc_addr(&d->dma.addr);
  203. dmalen = le16_to_cpu(d->dma.length);
  204. dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
  205. kfree_skb(ctx->skb);
  206. wil_vring_advance_head(vring, 1);
  207. }
  208. }
  209. dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
  210. kfree(vring->ctx);
  211. vring->pa = 0;
  212. vring->va = NULL;
  213. vring->ctx = NULL;
  214. }
  215. /**
  216. * Allocate one skb for Rx VRING
  217. *
  218. * Safe to call from IRQ
  219. */
  220. static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
  221. u32 i, int headroom)
  222. {
  223. struct device *dev = wil_to_dev(wil);
  224. unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
  225. struct vring_rx_desc dd, *d = &dd;
  226. volatile struct vring_rx_desc *_d = &vring->va[i].rx;
  227. dma_addr_t pa;
  228. struct sk_buff *skb = dev_alloc_skb(sz + headroom);
  229. if (unlikely(!skb))
  230. return -ENOMEM;
  231. skb_reserve(skb, headroom);
  232. skb_put(skb, sz);
  233. pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
  234. if (unlikely(dma_mapping_error(dev, pa))) {
  235. kfree_skb(skb);
  236. return -ENOMEM;
  237. }
  238. d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
  239. wil_desc_addr_set(&d->dma.addr, pa);
  240. /* ip_length don't care */
  241. /* b11 don't care */
  242. /* error don't care */
  243. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  244. d->dma.length = cpu_to_le16(sz);
  245. *_d = *d;
  246. vring->ctx[i].skb = skb;
  247. return 0;
  248. }
  249. /**
  250. * Adds radiotap header
  251. *
  252. * Any error indicated as "Bad FCS"
  253. *
  254. * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
  255. * - Rx descriptor: 32 bytes
  256. * - Phy info
  257. */
  258. static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
  259. struct sk_buff *skb)
  260. {
  261. struct wireless_dev *wdev = wil->wdev;
  262. struct wil6210_rtap {
  263. struct ieee80211_radiotap_header rthdr;
  264. /* fields should be in the order of bits in rthdr.it_present */
  265. /* flags */
  266. u8 flags;
  267. /* channel */
  268. __le16 chnl_freq __aligned(2);
  269. __le16 chnl_flags;
  270. /* MCS */
  271. u8 mcs_present;
  272. u8 mcs_flags;
  273. u8 mcs_index;
  274. } __packed;
  275. struct wil6210_rtap_vendor {
  276. struct wil6210_rtap rtap;
  277. /* vendor */
  278. u8 vendor_oui[3] __aligned(2);
  279. u8 vendor_ns;
  280. __le16 vendor_skip;
  281. u8 vendor_data[0];
  282. } __packed;
  283. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  284. struct wil6210_rtap_vendor *rtap_vendor;
  285. int rtap_len = sizeof(struct wil6210_rtap);
  286. int phy_length = 0; /* phy info header size, bytes */
  287. static char phy_data[128];
  288. struct ieee80211_channel *ch = wdev->preset_chandef.chan;
  289. if (rtap_include_phy_info) {
  290. rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
  291. /* calculate additional length */
  292. if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
  293. /**
  294. * PHY info starts from 8-byte boundary
  295. * there are 8-byte lines, last line may be partially
  296. * written (HW bug), thus FW configures for last line
  297. * to be excessive. Driver skips this last line.
  298. */
  299. int len = min_t(int, 8 + sizeof(phy_data),
  300. wil_rxdesc_phy_length(d));
  301. if (len > 8) {
  302. void *p = skb_tail_pointer(skb);
  303. void *pa = PTR_ALIGN(p, 8);
  304. if (skb_tailroom(skb) >= len + (pa - p)) {
  305. phy_length = len - 8;
  306. memcpy(phy_data, pa, phy_length);
  307. }
  308. }
  309. }
  310. rtap_len += phy_length;
  311. }
  312. if (skb_headroom(skb) < rtap_len &&
  313. pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
  314. wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
  315. return;
  316. }
  317. rtap_vendor = (void *)skb_push(skb, rtap_len);
  318. memset(rtap_vendor, 0, rtap_len);
  319. rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
  320. rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
  321. rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
  322. (1 << IEEE80211_RADIOTAP_FLAGS) |
  323. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  324. (1 << IEEE80211_RADIOTAP_MCS));
  325. if (d->dma.status & RX_DMA_STATUS_ERROR)
  326. rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
  327. rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
  328. rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
  329. rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
  330. rtap_vendor->rtap.mcs_flags = 0;
  331. rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
  332. if (rtap_include_phy_info) {
  333. rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
  334. IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
  335. /* OUI for Wilocity 04:ce:14 */
  336. rtap_vendor->vendor_oui[0] = 0x04;
  337. rtap_vendor->vendor_oui[1] = 0xce;
  338. rtap_vendor->vendor_oui[2] = 0x14;
  339. rtap_vendor->vendor_ns = 1;
  340. /* Rx descriptor + PHY data */
  341. rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
  342. phy_length);
  343. memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
  344. memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
  345. phy_length);
  346. }
  347. }
  348. /* similar to ieee80211_ version, but FC contain only 1-st byte */
  349. static inline int wil_is_back_req(u8 fc)
  350. {
  351. return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
  352. (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
  353. }
  354. /**
  355. * reap 1 frame from @swhead
  356. *
  357. * Rx descriptor copied to skb->cb
  358. *
  359. * Safe to call from IRQ
  360. */
  361. static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
  362. struct vring *vring)
  363. {
  364. struct device *dev = wil_to_dev(wil);
  365. struct net_device *ndev = wil_to_ndev(wil);
  366. volatile struct vring_rx_desc *_d;
  367. struct vring_rx_desc *d;
  368. struct sk_buff *skb;
  369. dma_addr_t pa;
  370. unsigned int snaplen = wil_rx_snaplen();
  371. unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
  372. u16 dmalen;
  373. u8 ftype;
  374. int cid;
  375. int i;
  376. struct wil_net_stats *stats;
  377. BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
  378. again:
  379. if (unlikely(wil_vring_is_empty(vring)))
  380. return NULL;
  381. i = (int)vring->swhead;
  382. _d = &vring->va[i].rx;
  383. if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
  384. /* it is not error, we just reached end of Rx done area */
  385. return NULL;
  386. }
  387. skb = vring->ctx[i].skb;
  388. vring->ctx[i].skb = NULL;
  389. wil_vring_advance_head(vring, 1);
  390. if (!skb) {
  391. wil_err(wil, "No Rx skb at [%d]\n", i);
  392. goto again;
  393. }
  394. d = wil_skb_rxdesc(skb);
  395. *d = *_d;
  396. pa = wil_desc_addr(&d->dma.addr);
  397. dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
  398. dmalen = le16_to_cpu(d->dma.length);
  399. trace_wil6210_rx(i, d);
  400. wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
  401. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  402. (const void *)d, sizeof(*d), false);
  403. cid = wil_rxdesc_cid(d);
  404. stats = &wil->sta[cid].stats;
  405. if (unlikely(dmalen > sz)) {
  406. wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
  407. stats->rx_large_frame++;
  408. kfree_skb(skb);
  409. goto again;
  410. }
  411. skb_trim(skb, dmalen);
  412. prefetch(skb->data);
  413. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  414. skb->data, skb_headlen(skb), false);
  415. stats->last_mcs_rx = wil_rxdesc_mcs(d);
  416. if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
  417. stats->rx_per_mcs[stats->last_mcs_rx]++;
  418. /* use radiotap header only if required */
  419. if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
  420. wil_rx_add_radiotap_header(wil, skb);
  421. /* no extra checks if in sniffer mode */
  422. if (ndev->type != ARPHRD_ETHER)
  423. return skb;
  424. /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
  425. * Driver should recognize it by frame type, that is found
  426. * in Rx descriptor. If type is not data, it is 802.11 frame as is
  427. */
  428. ftype = wil_rxdesc_ftype(d) << 2;
  429. if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
  430. u8 fc1 = wil_rxdesc_fc1(d);
  431. int mid = wil_rxdesc_mid(d);
  432. int tid = wil_rxdesc_tid(d);
  433. u16 seq = wil_rxdesc_seq(d);
  434. wil_dbg_txrx(wil,
  435. "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  436. fc1, mid, cid, tid, seq);
  437. stats->rx_non_data_frame++;
  438. if (wil_is_back_req(fc1)) {
  439. wil_dbg_txrx(wil,
  440. "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
  441. mid, cid, tid, seq);
  442. wil_rx_bar(wil, cid, tid, seq);
  443. } else {
  444. /* print again all info. One can enable only this
  445. * without overhead for printing every Rx frame
  446. */
  447. wil_dbg_txrx(wil,
  448. "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  449. fc1, mid, cid, tid, seq);
  450. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  451. (const void *)d, sizeof(*d), false);
  452. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  453. skb->data, skb_headlen(skb), false);
  454. }
  455. kfree_skb(skb);
  456. goto again;
  457. }
  458. if (unlikely(skb->len < ETH_HLEN + snaplen)) {
  459. wil_err(wil, "Short frame, len = %d\n", skb->len);
  460. stats->rx_short_frame++;
  461. kfree_skb(skb);
  462. goto again;
  463. }
  464. /* L4 IDENT is on when HW calculated checksum, check status
  465. * and in case of error drop the packet
  466. * higher stack layers will handle retransmission (if required)
  467. */
  468. if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
  469. /* L4 protocol identified, csum calculated */
  470. if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
  471. skb->ip_summed = CHECKSUM_UNNECESSARY;
  472. /* If HW reports bad checksum, let IP stack re-check it
  473. * For example, HW don't understand Microsoft IP stack that
  474. * mis-calculates TCP checksum - if it should be 0x0,
  475. * it writes 0xffff in violation of RFC 1624
  476. */
  477. }
  478. if (snaplen) {
  479. /* Packet layout
  480. * +-------+-------+---------+------------+------+
  481. * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
  482. * +-------+-------+---------+------------+------+
  483. * Need to remove SNAP, shifting SA and DA forward
  484. */
  485. memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
  486. skb_pull(skb, snaplen);
  487. }
  488. return skb;
  489. }
  490. /**
  491. * allocate and fill up to @count buffers in rx ring
  492. * buffers posted at @swtail
  493. */
  494. static int wil_rx_refill(struct wil6210_priv *wil, int count)
  495. {
  496. struct net_device *ndev = wil_to_ndev(wil);
  497. struct vring *v = &wil->vring_rx;
  498. u32 next_tail;
  499. int rc = 0;
  500. int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
  501. WIL6210_RTAP_SIZE : 0;
  502. for (; next_tail = wil_vring_next_tail(v),
  503. (next_tail != v->swhead) && (count-- > 0);
  504. v->swtail = next_tail) {
  505. rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
  506. if (unlikely(rc)) {
  507. wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
  508. rc, v->swtail);
  509. break;
  510. }
  511. }
  512. /* make sure all writes to descriptors (shared memory) are done before
  513. * committing them to HW
  514. */
  515. wmb();
  516. wil_w(wil, v->hwtail, v->swtail);
  517. return rc;
  518. }
  519. /**
  520. * reverse_memcmp - Compare two areas of memory, in reverse order
  521. * @cs: One area of memory
  522. * @ct: Another area of memory
  523. * @count: The size of the area.
  524. *
  525. * Cut'n'paste from original memcmp (see lib/string.c)
  526. * with minimal modifications
  527. */
  528. static int reverse_memcmp(const void *cs, const void *ct, size_t count)
  529. {
  530. const unsigned char *su1, *su2;
  531. int res = 0;
  532. for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
  533. --su1, --su2, count--) {
  534. res = *su1 - *su2;
  535. if (res)
  536. break;
  537. }
  538. return res;
  539. }
  540. static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
  541. {
  542. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  543. int cid = wil_rxdesc_cid(d);
  544. int tid = wil_rxdesc_tid(d);
  545. int key_id = wil_rxdesc_key_id(d);
  546. int mc = wil_rxdesc_mcast(d);
  547. struct wil_sta_info *s = &wil->sta[cid];
  548. struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
  549. &s->tid_crypto_rx[tid];
  550. struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
  551. const u8 *pn = (u8 *)&d->mac.pn_15_0;
  552. if (!cc->key_set) {
  553. wil_err_ratelimited(wil,
  554. "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
  555. cid, tid, mc, key_id);
  556. return -EINVAL;
  557. }
  558. if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
  559. wil_err_ratelimited(wil,
  560. "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
  561. cid, tid, mc, key_id, pn, cc->pn);
  562. return -EINVAL;
  563. }
  564. memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
  565. return 0;
  566. }
  567. /*
  568. * Pass Rx packet to the netif. Update statistics.
  569. * Called in softirq context (NAPI poll).
  570. */
  571. void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
  572. {
  573. gro_result_t rc = GRO_NORMAL;
  574. struct wil6210_priv *wil = ndev_to_wil(ndev);
  575. struct wireless_dev *wdev = wil_to_wdev(wil);
  576. unsigned int len = skb->len;
  577. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  578. int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
  579. int security = wil_rxdesc_security(d);
  580. struct ethhdr *eth = (void *)skb->data;
  581. /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
  582. * is not suitable, need to look at data
  583. */
  584. int mcast = is_multicast_ether_addr(eth->h_dest);
  585. struct wil_net_stats *stats = &wil->sta[cid].stats;
  586. struct sk_buff *xmit_skb = NULL;
  587. static const char * const gro_res_str[] = {
  588. [GRO_MERGED] = "GRO_MERGED",
  589. [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
  590. [GRO_HELD] = "GRO_HELD",
  591. [GRO_NORMAL] = "GRO_NORMAL",
  592. [GRO_DROP] = "GRO_DROP",
  593. };
  594. if (ndev->features & NETIF_F_RXHASH)
  595. /* fake L4 to ensure it won't be re-calculated later
  596. * set hash to any non-zero value to activate rps
  597. * mechanism, core will be chosen according
  598. * to user-level rps configuration.
  599. */
  600. skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
  601. skb_orphan(skb);
  602. if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
  603. rc = GRO_DROP;
  604. dev_kfree_skb(skb);
  605. stats->rx_replay++;
  606. goto stats;
  607. }
  608. if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
  609. if (mcast) {
  610. /* send multicast frames both to higher layers in
  611. * local net stack and back to the wireless medium
  612. */
  613. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  614. } else {
  615. int xmit_cid = wil_find_cid(wil, eth->h_dest);
  616. if (xmit_cid >= 0) {
  617. /* The destination station is associated to
  618. * this AP (in this VLAN), so send the frame
  619. * directly to it and do not pass it to local
  620. * net stack.
  621. */
  622. xmit_skb = skb;
  623. skb = NULL;
  624. }
  625. }
  626. }
  627. if (xmit_skb) {
  628. /* Send to wireless media and increase priority by 256 to
  629. * keep the received priority instead of reclassifying
  630. * the frame (see cfg80211_classify8021d).
  631. */
  632. xmit_skb->dev = ndev;
  633. xmit_skb->priority += 256;
  634. xmit_skb->protocol = htons(ETH_P_802_3);
  635. skb_reset_network_header(xmit_skb);
  636. skb_reset_mac_header(xmit_skb);
  637. wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
  638. dev_queue_xmit(xmit_skb);
  639. }
  640. if (skb) { /* deliver to local stack */
  641. skb->protocol = eth_type_trans(skb, ndev);
  642. rc = napi_gro_receive(&wil->napi_rx, skb);
  643. wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
  644. len, gro_res_str[rc]);
  645. }
  646. stats:
  647. /* statistics. rc set to GRO_NORMAL for AP bridging */
  648. if (unlikely(rc == GRO_DROP)) {
  649. ndev->stats.rx_dropped++;
  650. stats->rx_dropped++;
  651. wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
  652. } else {
  653. ndev->stats.rx_packets++;
  654. stats->rx_packets++;
  655. ndev->stats.rx_bytes += len;
  656. stats->rx_bytes += len;
  657. if (mcast)
  658. ndev->stats.multicast++;
  659. }
  660. }
  661. /**
  662. * Proceed all completed skb's from Rx VRING
  663. *
  664. * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
  665. */
  666. void wil_rx_handle(struct wil6210_priv *wil, int *quota)
  667. {
  668. struct net_device *ndev = wil_to_ndev(wil);
  669. struct vring *v = &wil->vring_rx;
  670. struct sk_buff *skb;
  671. if (unlikely(!v->va)) {
  672. wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
  673. return;
  674. }
  675. wil_dbg_txrx(wil, "rx_handle\n");
  676. while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
  677. (*quota)--;
  678. if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
  679. skb->dev = ndev;
  680. skb_reset_mac_header(skb);
  681. skb->ip_summed = CHECKSUM_UNNECESSARY;
  682. skb->pkt_type = PACKET_OTHERHOST;
  683. skb->protocol = htons(ETH_P_802_2);
  684. wil_netif_rx_any(skb, ndev);
  685. } else {
  686. wil_rx_reorder(wil, skb);
  687. }
  688. }
  689. wil_rx_refill(wil, v->size);
  690. }
  691. static void wil_rx_buf_len_init(struct wil6210_priv *wil)
  692. {
  693. wil->rx_buf_len = rx_large_buf ?
  694. WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
  695. if (mtu_max > wil->rx_buf_len) {
  696. /* do not allow RX buffers to be smaller than mtu_max, for
  697. * backward compatibility (mtu_max parameter was also used
  698. * to support receiving large packets)
  699. */
  700. wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
  701. wil->rx_buf_len = mtu_max;
  702. }
  703. }
  704. int wil_rx_init(struct wil6210_priv *wil, u16 size)
  705. {
  706. struct vring *vring = &wil->vring_rx;
  707. int rc;
  708. wil_dbg_misc(wil, "rx_init\n");
  709. if (vring->va) {
  710. wil_err(wil, "Rx ring already allocated\n");
  711. return -EINVAL;
  712. }
  713. wil_rx_buf_len_init(wil);
  714. vring->size = size;
  715. rc = wil_vring_alloc(wil, vring);
  716. if (rc)
  717. return rc;
  718. rc = wmi_rx_chain_add(wil, vring);
  719. if (rc)
  720. goto err_free;
  721. rc = wil_rx_refill(wil, vring->size);
  722. if (rc)
  723. goto err_free;
  724. return 0;
  725. err_free:
  726. wil_vring_free(wil, vring, 0);
  727. return rc;
  728. }
  729. void wil_rx_fini(struct wil6210_priv *wil)
  730. {
  731. struct vring *vring = &wil->vring_rx;
  732. wil_dbg_misc(wil, "rx_fini\n");
  733. if (vring->va)
  734. wil_vring_free(wil, vring, 0);
  735. }
  736. static inline void wil_tx_data_init(struct vring_tx_data *txdata)
  737. {
  738. spin_lock_bh(&txdata->lock);
  739. txdata->dot1x_open = 0;
  740. txdata->enabled = 0;
  741. txdata->idle = 0;
  742. txdata->last_idle = 0;
  743. txdata->begin = 0;
  744. txdata->agg_wsize = 0;
  745. txdata->agg_timeout = 0;
  746. txdata->agg_amsdu = 0;
  747. txdata->addba_in_progress = false;
  748. spin_unlock_bh(&txdata->lock);
  749. }
  750. int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
  751. int cid, int tid)
  752. {
  753. int rc;
  754. struct wmi_vring_cfg_cmd cmd = {
  755. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  756. .vring_cfg = {
  757. .tx_sw_ring = {
  758. .max_mpdu_size =
  759. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  760. .ring_size = cpu_to_le16(size),
  761. },
  762. .ringid = id,
  763. .cidxtid = mk_cidxtid(cid, tid),
  764. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  765. .mac_ctrl = 0,
  766. .to_resolution = 0,
  767. .agg_max_wsize = 0,
  768. .schd_params = {
  769. .priority = cpu_to_le16(0),
  770. .timeslot_us = cpu_to_le16(0xfff),
  771. },
  772. },
  773. };
  774. struct {
  775. struct wmi_cmd_hdr wmi;
  776. struct wmi_vring_cfg_done_event cmd;
  777. } __packed reply;
  778. struct vring *vring = &wil->vring_tx[id];
  779. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  780. wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
  781. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  782. lockdep_assert_held(&wil->mutex);
  783. if (vring->va) {
  784. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  785. rc = -EINVAL;
  786. goto out;
  787. }
  788. wil_tx_data_init(txdata);
  789. vring->size = size;
  790. rc = wil_vring_alloc(wil, vring);
  791. if (rc)
  792. goto out;
  793. wil->vring2cid_tid[id][0] = cid;
  794. wil->vring2cid_tid[id][1] = tid;
  795. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  796. if (!wil->privacy)
  797. txdata->dot1x_open = true;
  798. rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  799. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  800. if (rc)
  801. goto out_free;
  802. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  803. wil_err(wil, "Tx config failed, status 0x%02x\n",
  804. reply.cmd.status);
  805. rc = -EINVAL;
  806. goto out_free;
  807. }
  808. spin_lock_bh(&txdata->lock);
  809. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  810. txdata->enabled = 1;
  811. spin_unlock_bh(&txdata->lock);
  812. if (txdata->dot1x_open && (agg_wsize >= 0))
  813. wil_addba_tx_request(wil, id, agg_wsize);
  814. return 0;
  815. out_free:
  816. spin_lock_bh(&txdata->lock);
  817. txdata->dot1x_open = false;
  818. txdata->enabled = 0;
  819. spin_unlock_bh(&txdata->lock);
  820. wil_vring_free(wil, vring, 1);
  821. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
  822. wil->vring2cid_tid[id][1] = 0;
  823. out:
  824. return rc;
  825. }
  826. int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
  827. {
  828. int rc;
  829. struct wmi_bcast_vring_cfg_cmd cmd = {
  830. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  831. .vring_cfg = {
  832. .tx_sw_ring = {
  833. .max_mpdu_size =
  834. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  835. .ring_size = cpu_to_le16(size),
  836. },
  837. .ringid = id,
  838. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  839. },
  840. };
  841. struct {
  842. struct wmi_cmd_hdr wmi;
  843. struct wmi_vring_cfg_done_event cmd;
  844. } __packed reply;
  845. struct vring *vring = &wil->vring_tx[id];
  846. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  847. wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
  848. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  849. lockdep_assert_held(&wil->mutex);
  850. if (vring->va) {
  851. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  852. rc = -EINVAL;
  853. goto out;
  854. }
  855. wil_tx_data_init(txdata);
  856. vring->size = size;
  857. rc = wil_vring_alloc(wil, vring);
  858. if (rc)
  859. goto out;
  860. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
  861. wil->vring2cid_tid[id][1] = 0; /* TID */
  862. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  863. if (!wil->privacy)
  864. txdata->dot1x_open = true;
  865. rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  866. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  867. if (rc)
  868. goto out_free;
  869. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  870. wil_err(wil, "Tx config failed, status 0x%02x\n",
  871. reply.cmd.status);
  872. rc = -EINVAL;
  873. goto out_free;
  874. }
  875. spin_lock_bh(&txdata->lock);
  876. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  877. txdata->enabled = 1;
  878. spin_unlock_bh(&txdata->lock);
  879. return 0;
  880. out_free:
  881. spin_lock_bh(&txdata->lock);
  882. txdata->enabled = 0;
  883. txdata->dot1x_open = false;
  884. spin_unlock_bh(&txdata->lock);
  885. wil_vring_free(wil, vring, 1);
  886. out:
  887. return rc;
  888. }
  889. void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
  890. {
  891. struct vring *vring = &wil->vring_tx[id];
  892. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  893. lockdep_assert_held(&wil->mutex);
  894. if (!vring->va)
  895. return;
  896. wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
  897. spin_lock_bh(&txdata->lock);
  898. txdata->dot1x_open = false;
  899. txdata->enabled = 0; /* no Tx can be in progress or start anew */
  900. spin_unlock_bh(&txdata->lock);
  901. /* napi_synchronize waits for completion of the current NAPI but will
  902. * not prevent the next NAPI run.
  903. * Add a memory barrier to guarantee that txdata->enabled is zeroed
  904. * before napi_synchronize so that the next scheduled NAPI will not
  905. * handle this vring
  906. */
  907. wmb();
  908. /* make sure NAPI won't touch this vring */
  909. if (test_bit(wil_status_napi_en, wil->status))
  910. napi_synchronize(&wil->napi_tx);
  911. wil_vring_free(wil, vring, 1);
  912. }
  913. static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
  914. struct sk_buff *skb)
  915. {
  916. int i;
  917. struct ethhdr *eth = (void *)skb->data;
  918. int cid = wil_find_cid(wil, eth->h_dest);
  919. if (cid < 0)
  920. return NULL;
  921. /* TODO: fix for multiple TID */
  922. for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
  923. if (!wil->vring_tx_data[i].dot1x_open &&
  924. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  925. continue;
  926. if (wil->vring2cid_tid[i][0] == cid) {
  927. struct vring *v = &wil->vring_tx[i];
  928. struct vring_tx_data *txdata = &wil->vring_tx_data[i];
  929. wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
  930. eth->h_dest, i);
  931. if (v->va && txdata->enabled) {
  932. return v;
  933. } else {
  934. wil_dbg_txrx(wil,
  935. "find_tx_ucast: vring[%d] not valid\n",
  936. i);
  937. return NULL;
  938. }
  939. }
  940. }
  941. return NULL;
  942. }
  943. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  944. struct sk_buff *skb);
  945. static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
  946. struct sk_buff *skb)
  947. {
  948. struct vring *v;
  949. int i;
  950. u8 cid;
  951. struct vring_tx_data *txdata;
  952. /* In the STA mode, it is expected to have only 1 VRING
  953. * for the AP we connected to.
  954. * find 1-st vring eligible for this skb and use it.
  955. */
  956. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  957. v = &wil->vring_tx[i];
  958. txdata = &wil->vring_tx_data[i];
  959. if (!v->va || !txdata->enabled)
  960. continue;
  961. cid = wil->vring2cid_tid[i][0];
  962. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  963. continue;
  964. if (!wil->vring_tx_data[i].dot1x_open &&
  965. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  966. continue;
  967. wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
  968. return v;
  969. }
  970. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  971. return NULL;
  972. }
  973. /* Use one of 2 strategies:
  974. *
  975. * 1. New (real broadcast):
  976. * use dedicated broadcast vring
  977. * 2. Old (pseudo-DMS):
  978. * Find 1-st vring and return it;
  979. * duplicate skb and send it to other active vrings;
  980. * in all cases override dest address to unicast peer's address
  981. * Use old strategy when new is not supported yet:
  982. * - for PBSS
  983. */
  984. static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
  985. struct sk_buff *skb)
  986. {
  987. struct vring *v;
  988. struct vring_tx_data *txdata;
  989. int i = wil->bcast_vring;
  990. if (i < 0)
  991. return NULL;
  992. v = &wil->vring_tx[i];
  993. txdata = &wil->vring_tx_data[i];
  994. if (!v->va || !txdata->enabled)
  995. return NULL;
  996. if (!wil->vring_tx_data[i].dot1x_open &&
  997. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  998. return NULL;
  999. return v;
  1000. }
  1001. static void wil_set_da_for_vring(struct wil6210_priv *wil,
  1002. struct sk_buff *skb, int vring_index)
  1003. {
  1004. struct ethhdr *eth = (void *)skb->data;
  1005. int cid = wil->vring2cid_tid[vring_index][0];
  1006. ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
  1007. }
  1008. static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
  1009. struct sk_buff *skb)
  1010. {
  1011. struct vring *v, *v2;
  1012. struct sk_buff *skb2;
  1013. int i;
  1014. u8 cid;
  1015. struct ethhdr *eth = (void *)skb->data;
  1016. char *src = eth->h_source;
  1017. struct vring_tx_data *txdata;
  1018. /* find 1-st vring eligible for data */
  1019. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  1020. v = &wil->vring_tx[i];
  1021. txdata = &wil->vring_tx_data[i];
  1022. if (!v->va || !txdata->enabled)
  1023. continue;
  1024. cid = wil->vring2cid_tid[i][0];
  1025. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  1026. continue;
  1027. if (!wil->vring_tx_data[i].dot1x_open &&
  1028. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  1029. continue;
  1030. /* don't Tx back to source when re-routing Rx->Tx at the AP */
  1031. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1032. continue;
  1033. goto found;
  1034. }
  1035. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  1036. return NULL;
  1037. found:
  1038. wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
  1039. wil_set_da_for_vring(wil, skb, i);
  1040. /* find other active vrings and duplicate skb for each */
  1041. for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
  1042. v2 = &wil->vring_tx[i];
  1043. if (!v2->va)
  1044. continue;
  1045. cid = wil->vring2cid_tid[i][0];
  1046. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  1047. continue;
  1048. if (!wil->vring_tx_data[i].dot1x_open &&
  1049. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  1050. continue;
  1051. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1052. continue;
  1053. skb2 = skb_copy(skb, GFP_ATOMIC);
  1054. if (skb2) {
  1055. wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
  1056. wil_set_da_for_vring(wil, skb2, i);
  1057. wil_tx_vring(wil, v2, skb2);
  1058. } else {
  1059. wil_err(wil, "skb_copy failed\n");
  1060. }
  1061. }
  1062. return v;
  1063. }
  1064. static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
  1065. int vring_index)
  1066. {
  1067. wil_desc_addr_set(&d->dma.addr, pa);
  1068. d->dma.ip_length = 0;
  1069. /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
  1070. d->dma.b11 = 0/*14 | BIT(7)*/;
  1071. d->dma.error = 0;
  1072. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  1073. d->dma.length = cpu_to_le16((u16)len);
  1074. d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
  1075. d->mac.d[0] = 0;
  1076. d->mac.d[1] = 0;
  1077. d->mac.d[2] = 0;
  1078. d->mac.ucode_cmd = 0;
  1079. /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
  1080. d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
  1081. (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
  1082. return 0;
  1083. }
  1084. static inline
  1085. void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
  1086. {
  1087. d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
  1088. }
  1089. /**
  1090. * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
  1091. * @skb is used to obtain the protocol and headers length.
  1092. * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
  1093. * 2 - middle, 3 - last descriptor.
  1094. */
  1095. static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
  1096. struct sk_buff *skb,
  1097. int tso_desc_type, bool is_ipv4,
  1098. int tcp_hdr_len, int skb_net_hdr_len)
  1099. {
  1100. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1101. d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
  1102. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1103. /* L4 header len: TCP header length */
  1104. d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1105. /* Setup TSO: bit and desc type */
  1106. d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
  1107. (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
  1108. d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
  1109. d->dma.ip_length = skb_net_hdr_len;
  1110. /* Enable TCP/UDP checksum */
  1111. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1112. /* Calculate pseudo-header */
  1113. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1114. }
  1115. /**
  1116. * Sets the descriptor @d up for csum. The corresponding
  1117. * @skb is used to obtain the protocol and headers length.
  1118. * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
  1119. * Note, if d==NULL, the function only returns the protocol result.
  1120. *
  1121. * It is very similar to previous wil_tx_desc_offload_setup_tso. This
  1122. * is "if unrolling" to optimize the critical path.
  1123. */
  1124. static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
  1125. struct sk_buff *skb){
  1126. int protocol;
  1127. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1128. return 0;
  1129. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1130. switch (skb->protocol) {
  1131. case cpu_to_be16(ETH_P_IP):
  1132. protocol = ip_hdr(skb)->protocol;
  1133. d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
  1134. break;
  1135. case cpu_to_be16(ETH_P_IPV6):
  1136. protocol = ipv6_hdr(skb)->nexthdr;
  1137. break;
  1138. default:
  1139. return -EINVAL;
  1140. }
  1141. switch (protocol) {
  1142. case IPPROTO_TCP:
  1143. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1144. /* L4 header len: TCP header length */
  1145. d->dma.d0 |=
  1146. (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1147. break;
  1148. case IPPROTO_UDP:
  1149. /* L4 header len: UDP header length */
  1150. d->dma.d0 |=
  1151. (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1152. break;
  1153. default:
  1154. return -EINVAL;
  1155. }
  1156. d->dma.ip_length = skb_network_header_len(skb);
  1157. /* Enable TCP/UDP checksum */
  1158. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1159. /* Calculate pseudo-header */
  1160. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1161. return 0;
  1162. }
  1163. static inline void wil_tx_last_desc(struct vring_tx_desc *d)
  1164. {
  1165. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
  1166. BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
  1167. BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1168. }
  1169. static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
  1170. {
  1171. d->dma.d0 |= wil_tso_type_lst <<
  1172. DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
  1173. }
  1174. static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
  1175. struct sk_buff *skb)
  1176. {
  1177. struct device *dev = wil_to_dev(wil);
  1178. /* point to descriptors in shared memory */
  1179. volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
  1180. *_first_desc = NULL;
  1181. /* pointers to shadow descriptors */
  1182. struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
  1183. *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
  1184. *first_desc = &first_desc_mem;
  1185. /* pointer to shadow descriptors' context */
  1186. struct wil_ctx *hdr_ctx, *first_ctx = NULL;
  1187. int descs_used = 0; /* total number of used descriptors */
  1188. int sg_desc_cnt = 0; /* number of descriptors for current mss*/
  1189. u32 swhead = vring->swhead;
  1190. int used, avail = wil_vring_avail_tx(vring);
  1191. int nr_frags = skb_shinfo(skb)->nr_frags;
  1192. int min_desc_required = nr_frags + 1;
  1193. int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
  1194. int f, len, hdrlen, headlen;
  1195. int vring_index = vring - wil->vring_tx;
  1196. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1197. uint i = swhead;
  1198. dma_addr_t pa;
  1199. const skb_frag_t *frag = NULL;
  1200. int rem_data = mss;
  1201. int lenmss;
  1202. int hdr_compensation_need = true;
  1203. int desc_tso_type = wil_tso_type_first;
  1204. bool is_ipv4;
  1205. int tcp_hdr_len;
  1206. int skb_net_hdr_len;
  1207. int gso_type;
  1208. int rc = -EINVAL;
  1209. wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
  1210. vring_index);
  1211. if (unlikely(!txdata->enabled))
  1212. return -EINVAL;
  1213. /* A typical page 4K is 3-4 payloads, we assume each fragment
  1214. * is a full payload, that's how min_desc_required has been
  1215. * calculated. In real we might need more or less descriptors,
  1216. * this is the initial check only.
  1217. */
  1218. if (unlikely(avail < min_desc_required)) {
  1219. wil_err_ratelimited(wil,
  1220. "TSO: Tx ring[%2d] full. No space for %d fragments\n",
  1221. vring_index, min_desc_required);
  1222. return -ENOMEM;
  1223. }
  1224. /* Header Length = MAC header len + IP header len + TCP header len*/
  1225. hdrlen = ETH_HLEN +
  1226. (int)skb_network_header_len(skb) +
  1227. tcp_hdrlen(skb);
  1228. gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
  1229. switch (gso_type) {
  1230. case SKB_GSO_TCPV4:
  1231. /* TCP v4, zero out the IP length and IPv4 checksum fields
  1232. * as required by the offloading doc
  1233. */
  1234. ip_hdr(skb)->tot_len = 0;
  1235. ip_hdr(skb)->check = 0;
  1236. is_ipv4 = true;
  1237. break;
  1238. case SKB_GSO_TCPV6:
  1239. /* TCP v6, zero out the payload length */
  1240. ipv6_hdr(skb)->payload_len = 0;
  1241. is_ipv4 = false;
  1242. break;
  1243. default:
  1244. /* other than TCPv4 or TCPv6 types are not supported for TSO.
  1245. * It is also illegal for both to be set simultaneously
  1246. */
  1247. return -EINVAL;
  1248. }
  1249. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1250. return -EINVAL;
  1251. /* tcp header length and skb network header length are fixed for all
  1252. * packet's descriptors - read then once here
  1253. */
  1254. tcp_hdr_len = tcp_hdrlen(skb);
  1255. skb_net_hdr_len = skb_network_header_len(skb);
  1256. _hdr_desc = &vring->va[i].tx;
  1257. pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
  1258. if (unlikely(dma_mapping_error(dev, pa))) {
  1259. wil_err(wil, "TSO: Skb head DMA map error\n");
  1260. goto err_exit;
  1261. }
  1262. wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
  1263. wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
  1264. tcp_hdr_len, skb_net_hdr_len);
  1265. wil_tx_last_desc(hdr_desc);
  1266. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1267. hdr_ctx = &vring->ctx[i];
  1268. descs_used++;
  1269. headlen = skb_headlen(skb) - hdrlen;
  1270. for (f = headlen ? -1 : 0; f < nr_frags; f++) {
  1271. if (headlen) {
  1272. len = headlen;
  1273. wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
  1274. len);
  1275. } else {
  1276. frag = &skb_shinfo(skb)->frags[f];
  1277. len = frag->size;
  1278. wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
  1279. }
  1280. while (len) {
  1281. wil_dbg_txrx(wil,
  1282. "TSO: len %d, rem_data %d, descs_used %d\n",
  1283. len, rem_data, descs_used);
  1284. if (descs_used == avail) {
  1285. wil_err_ratelimited(wil, "TSO: ring overflow\n");
  1286. rc = -ENOMEM;
  1287. goto mem_error;
  1288. }
  1289. lenmss = min_t(int, rem_data, len);
  1290. i = (swhead + descs_used) % vring->size;
  1291. wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
  1292. if (!headlen) {
  1293. pa = skb_frag_dma_map(dev, frag,
  1294. frag->size - len, lenmss,
  1295. DMA_TO_DEVICE);
  1296. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1297. } else {
  1298. pa = dma_map_single(dev,
  1299. skb->data +
  1300. skb_headlen(skb) - headlen,
  1301. lenmss,
  1302. DMA_TO_DEVICE);
  1303. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1304. headlen -= lenmss;
  1305. }
  1306. if (unlikely(dma_mapping_error(dev, pa))) {
  1307. wil_err(wil, "TSO: DMA map page error\n");
  1308. goto mem_error;
  1309. }
  1310. _desc = &vring->va[i].tx;
  1311. if (!_first_desc) {
  1312. _first_desc = _desc;
  1313. first_ctx = &vring->ctx[i];
  1314. d = first_desc;
  1315. } else {
  1316. d = &desc_mem;
  1317. }
  1318. wil_tx_desc_map(d, pa, lenmss, vring_index);
  1319. wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
  1320. is_ipv4, tcp_hdr_len,
  1321. skb_net_hdr_len);
  1322. /* use tso_type_first only once */
  1323. desc_tso_type = wil_tso_type_mid;
  1324. descs_used++; /* desc used so far */
  1325. sg_desc_cnt++; /* desc used for this segment */
  1326. len -= lenmss;
  1327. rem_data -= lenmss;
  1328. wil_dbg_txrx(wil,
  1329. "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
  1330. len, rem_data, descs_used, sg_desc_cnt);
  1331. /* Close the segment if reached mss size or last frag*/
  1332. if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
  1333. if (hdr_compensation_need) {
  1334. /* first segment include hdr desc for
  1335. * release
  1336. */
  1337. hdr_ctx->nr_frags = sg_desc_cnt;
  1338. wil_tx_desc_set_nr_frags(first_desc,
  1339. sg_desc_cnt +
  1340. 1);
  1341. hdr_compensation_need = false;
  1342. } else {
  1343. wil_tx_desc_set_nr_frags(first_desc,
  1344. sg_desc_cnt);
  1345. }
  1346. first_ctx->nr_frags = sg_desc_cnt - 1;
  1347. wil_tx_last_desc(d);
  1348. /* first descriptor may also be the last
  1349. * for this mss - make sure not to copy
  1350. * it twice
  1351. */
  1352. if (first_desc != d)
  1353. *_first_desc = *first_desc;
  1354. /*last descriptor will be copied at the end
  1355. * of this TS processing
  1356. */
  1357. if (f < nr_frags - 1 || len > 0)
  1358. *_desc = *d;
  1359. rem_data = mss;
  1360. _first_desc = NULL;
  1361. sg_desc_cnt = 0;
  1362. } else if (first_desc != d) /* update mid descriptor */
  1363. *_desc = *d;
  1364. }
  1365. }
  1366. /* first descriptor may also be the last.
  1367. * in this case d pointer is invalid
  1368. */
  1369. if (_first_desc == _desc)
  1370. d = first_desc;
  1371. /* Last data descriptor */
  1372. wil_set_tx_desc_last_tso(d);
  1373. *_desc = *d;
  1374. /* Fill the total number of descriptors in first desc (hdr)*/
  1375. wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
  1376. *_hdr_desc = *hdr_desc;
  1377. /* hold reference to skb
  1378. * to prevent skb release before accounting
  1379. * in case of immediate "tx done"
  1380. */
  1381. vring->ctx[i].skb = skb_get(skb);
  1382. /* performance monitoring */
  1383. used = wil_vring_used_tx(vring);
  1384. if (wil_val_in_range(vring_idle_trsh,
  1385. used, used + descs_used)) {
  1386. txdata->idle += get_cycles() - txdata->last_idle;
  1387. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1388. vring_index, used, used + descs_used);
  1389. }
  1390. /* Make sure to advance the head only after descriptor update is done.
  1391. * This will prevent a race condition where the completion thread
  1392. * will see the DU bit set from previous run and will handle the
  1393. * skb before it was completed.
  1394. */
  1395. wmb();
  1396. /* advance swhead */
  1397. wil_vring_advance_head(vring, descs_used);
  1398. wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
  1399. /* make sure all writes to descriptors (shared memory) are done before
  1400. * committing them to HW
  1401. */
  1402. wmb();
  1403. wil_w(wil, vring->hwtail, vring->swhead);
  1404. return 0;
  1405. mem_error:
  1406. while (descs_used > 0) {
  1407. struct wil_ctx *ctx;
  1408. i = (swhead + descs_used - 1) % vring->size;
  1409. d = (struct vring_tx_desc *)&vring->va[i].tx;
  1410. _desc = &vring->va[i].tx;
  1411. *d = *_desc;
  1412. _desc->dma.status = TX_DMA_STATUS_DU;
  1413. ctx = &vring->ctx[i];
  1414. wil_txdesc_unmap(dev, d, ctx);
  1415. memset(ctx, 0, sizeof(*ctx));
  1416. descs_used--;
  1417. }
  1418. err_exit:
  1419. return rc;
  1420. }
  1421. static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1422. struct sk_buff *skb)
  1423. {
  1424. struct device *dev = wil_to_dev(wil);
  1425. struct vring_tx_desc dd, *d = &dd;
  1426. volatile struct vring_tx_desc *_d;
  1427. u32 swhead = vring->swhead;
  1428. int avail = wil_vring_avail_tx(vring);
  1429. int nr_frags = skb_shinfo(skb)->nr_frags;
  1430. uint f = 0;
  1431. int vring_index = vring - wil->vring_tx;
  1432. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1433. uint i = swhead;
  1434. dma_addr_t pa;
  1435. int used;
  1436. bool mcast = (vring_index == wil->bcast_vring);
  1437. uint len = skb_headlen(skb);
  1438. wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
  1439. vring_index);
  1440. if (unlikely(!txdata->enabled))
  1441. return -EINVAL;
  1442. if (unlikely(avail < 1 + nr_frags)) {
  1443. wil_err_ratelimited(wil,
  1444. "Tx ring[%2d] full. No space for %d fragments\n",
  1445. vring_index, 1 + nr_frags);
  1446. return -ENOMEM;
  1447. }
  1448. _d = &vring->va[i].tx;
  1449. pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
  1450. wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
  1451. skb_headlen(skb), skb->data, &pa);
  1452. wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
  1453. skb->data, skb_headlen(skb), false);
  1454. if (unlikely(dma_mapping_error(dev, pa)))
  1455. return -EINVAL;
  1456. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1457. /* 1-st segment */
  1458. wil_tx_desc_map(d, pa, len, vring_index);
  1459. if (unlikely(mcast)) {
  1460. d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
  1461. if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
  1462. d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
  1463. }
  1464. /* Process TCP/UDP checksum offloading */
  1465. if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
  1466. wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
  1467. vring_index);
  1468. goto dma_error;
  1469. }
  1470. vring->ctx[i].nr_frags = nr_frags;
  1471. wil_tx_desc_set_nr_frags(d, nr_frags + 1);
  1472. /* middle segments */
  1473. for (; f < nr_frags; f++) {
  1474. const struct skb_frag_struct *frag =
  1475. &skb_shinfo(skb)->frags[f];
  1476. int len = skb_frag_size(frag);
  1477. *_d = *d;
  1478. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1479. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1480. (const void *)d, sizeof(*d), false);
  1481. i = (swhead + f + 1) % vring->size;
  1482. _d = &vring->va[i].tx;
  1483. pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
  1484. DMA_TO_DEVICE);
  1485. if (unlikely(dma_mapping_error(dev, pa))) {
  1486. wil_err(wil, "Tx[%2d] failed to map fragment\n",
  1487. vring_index);
  1488. goto dma_error;
  1489. }
  1490. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1491. wil_tx_desc_map(d, pa, len, vring_index);
  1492. /* no need to check return code -
  1493. * if it succeeded for 1-st descriptor,
  1494. * it will succeed here too
  1495. */
  1496. wil_tx_desc_offload_setup(d, skb);
  1497. }
  1498. /* for the last seg only */
  1499. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
  1500. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
  1501. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1502. *_d = *d;
  1503. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1504. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1505. (const void *)d, sizeof(*d), false);
  1506. /* hold reference to skb
  1507. * to prevent skb release before accounting
  1508. * in case of immediate "tx done"
  1509. */
  1510. vring->ctx[i].skb = skb_get(skb);
  1511. /* performance monitoring */
  1512. used = wil_vring_used_tx(vring);
  1513. if (wil_val_in_range(vring_idle_trsh,
  1514. used, used + nr_frags + 1)) {
  1515. txdata->idle += get_cycles() - txdata->last_idle;
  1516. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1517. vring_index, used, used + nr_frags + 1);
  1518. }
  1519. /* Make sure to advance the head only after descriptor update is done.
  1520. * This will prevent a race condition where the completion thread
  1521. * will see the DU bit set from previous run and will handle the
  1522. * skb before it was completed.
  1523. */
  1524. wmb();
  1525. /* advance swhead */
  1526. wil_vring_advance_head(vring, nr_frags + 1);
  1527. wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
  1528. vring->swhead);
  1529. trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
  1530. /* make sure all writes to descriptors (shared memory) are done before
  1531. * committing them to HW
  1532. */
  1533. wmb();
  1534. wil_w(wil, vring->hwtail, vring->swhead);
  1535. return 0;
  1536. dma_error:
  1537. /* unmap what we have mapped */
  1538. nr_frags = f + 1; /* frags mapped + one for skb head */
  1539. for (f = 0; f < nr_frags; f++) {
  1540. struct wil_ctx *ctx;
  1541. i = (swhead + f) % vring->size;
  1542. ctx = &vring->ctx[i];
  1543. _d = &vring->va[i].tx;
  1544. *d = *_d;
  1545. _d->dma.status = TX_DMA_STATUS_DU;
  1546. wil_txdesc_unmap(dev, d, ctx);
  1547. memset(ctx, 0, sizeof(*ctx));
  1548. }
  1549. return -EINVAL;
  1550. }
  1551. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1552. struct sk_buff *skb)
  1553. {
  1554. int vring_index = vring - wil->vring_tx;
  1555. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1556. int rc;
  1557. spin_lock(&txdata->lock);
  1558. rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
  1559. (wil, vring, skb);
  1560. spin_unlock(&txdata->lock);
  1561. return rc;
  1562. }
  1563. /**
  1564. * Check status of tx vrings and stop/wake net queues if needed
  1565. *
  1566. * This function does one of two checks:
  1567. * In case check_stop is true, will check if net queues need to be stopped. If
  1568. * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
  1569. * In case check_stop is false, will check if net queues need to be waked. If
  1570. * the conditions for waking are met, netif_tx_wake_all_queues() is called.
  1571. * vring is the vring which is currently being modified by either adding
  1572. * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
  1573. * be null when irrelevant (e.g. connect/disconnect events).
  1574. *
  1575. * The implementation is to stop net queues if modified vring has low
  1576. * descriptor availability. Wake if all vrings are not in low descriptor
  1577. * availability and modified vring has high descriptor availability.
  1578. */
  1579. static inline void __wil_update_net_queues(struct wil6210_priv *wil,
  1580. struct vring *vring,
  1581. bool check_stop)
  1582. {
  1583. int i;
  1584. if (vring)
  1585. wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d",
  1586. (int)(vring - wil->vring_tx), check_stop,
  1587. wil->net_queue_stopped);
  1588. else
  1589. wil_dbg_txrx(wil, "check_stop=%d, stopped=%d",
  1590. check_stop, wil->net_queue_stopped);
  1591. if (check_stop == wil->net_queue_stopped)
  1592. /* net queues already in desired state */
  1593. return;
  1594. if (check_stop) {
  1595. if (!vring || unlikely(wil_vring_avail_low(vring))) {
  1596. /* not enough room in the vring */
  1597. netif_tx_stop_all_queues(wil_to_ndev(wil));
  1598. wil->net_queue_stopped = true;
  1599. wil_dbg_txrx(wil, "netif_tx_stop called\n");
  1600. }
  1601. return;
  1602. }
  1603. /* check wake */
  1604. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  1605. struct vring *cur_vring = &wil->vring_tx[i];
  1606. struct vring_tx_data *txdata = &wil->vring_tx_data[i];
  1607. if (!cur_vring->va || !txdata->enabled || cur_vring == vring)
  1608. continue;
  1609. if (wil_vring_avail_low(cur_vring)) {
  1610. wil_dbg_txrx(wil, "vring %d full, can't wake\n",
  1611. (int)(cur_vring - wil->vring_tx));
  1612. return;
  1613. }
  1614. }
  1615. if (!vring || wil_vring_avail_high(vring)) {
  1616. /* enough room in the vring */
  1617. wil_dbg_txrx(wil, "calling netif_tx_wake\n");
  1618. netif_tx_wake_all_queues(wil_to_ndev(wil));
  1619. wil->net_queue_stopped = false;
  1620. }
  1621. }
  1622. void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
  1623. bool check_stop)
  1624. {
  1625. spin_lock(&wil->net_queue_lock);
  1626. __wil_update_net_queues(wil, vring, check_stop);
  1627. spin_unlock(&wil->net_queue_lock);
  1628. }
  1629. void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
  1630. bool check_stop)
  1631. {
  1632. spin_lock_bh(&wil->net_queue_lock);
  1633. __wil_update_net_queues(wil, vring, check_stop);
  1634. spin_unlock_bh(&wil->net_queue_lock);
  1635. }
  1636. netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1637. {
  1638. struct wil6210_priv *wil = ndev_to_wil(ndev);
  1639. struct ethhdr *eth = (void *)skb->data;
  1640. bool bcast = is_multicast_ether_addr(eth->h_dest);
  1641. struct vring *vring;
  1642. static bool pr_once_fw;
  1643. int rc;
  1644. wil_dbg_txrx(wil, "start_xmit\n");
  1645. if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
  1646. if (!pr_once_fw) {
  1647. wil_err(wil, "FW not ready\n");
  1648. pr_once_fw = true;
  1649. }
  1650. goto drop;
  1651. }
  1652. if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
  1653. wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
  1654. goto drop;
  1655. }
  1656. if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
  1657. wil_err(wil, "Xmit in monitor mode not supported\n");
  1658. goto drop;
  1659. }
  1660. pr_once_fw = false;
  1661. /* find vring */
  1662. if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
  1663. /* in STA mode (ESS), all to same VRING (to AP) */
  1664. vring = wil_find_tx_vring_sta(wil, skb);
  1665. } else if (bcast) {
  1666. if (wil->pbss)
  1667. /* in pbss, no bcast VRING - duplicate skb in
  1668. * all stations VRINGs
  1669. */
  1670. vring = wil_find_tx_bcast_2(wil, skb);
  1671. else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
  1672. /* AP has a dedicated bcast VRING */
  1673. vring = wil_find_tx_bcast_1(wil, skb);
  1674. else
  1675. /* unexpected combination, fallback to duplicating
  1676. * the skb in all stations VRINGs
  1677. */
  1678. vring = wil_find_tx_bcast_2(wil, skb);
  1679. } else {
  1680. /* unicast, find specific VRING by dest. address */
  1681. vring = wil_find_tx_ucast(wil, skb);
  1682. }
  1683. if (unlikely(!vring)) {
  1684. wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
  1685. goto drop;
  1686. }
  1687. /* set up vring entry */
  1688. rc = wil_tx_vring(wil, vring, skb);
  1689. switch (rc) {
  1690. case 0:
  1691. /* shall we stop net queues? */
  1692. wil_update_net_queues_bh(wil, vring, true);
  1693. /* statistics will be updated on the tx_complete */
  1694. dev_kfree_skb_any(skb);
  1695. return NETDEV_TX_OK;
  1696. case -ENOMEM:
  1697. return NETDEV_TX_BUSY;
  1698. default:
  1699. break; /* goto drop; */
  1700. }
  1701. drop:
  1702. ndev->stats.tx_dropped++;
  1703. dev_kfree_skb_any(skb);
  1704. return NET_XMIT_DROP;
  1705. }
  1706. static inline bool wil_need_txstat(struct sk_buff *skb)
  1707. {
  1708. struct ethhdr *eth = (void *)skb->data;
  1709. return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
  1710. (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
  1711. }
  1712. static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
  1713. {
  1714. if (unlikely(wil_need_txstat(skb)))
  1715. skb_complete_wifi_ack(skb, acked);
  1716. else
  1717. acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
  1718. }
  1719. /**
  1720. * Clean up transmitted skb's from the Tx VRING
  1721. *
  1722. * Return number of descriptors cleared
  1723. *
  1724. * Safe to call from IRQ
  1725. */
  1726. int wil_tx_complete(struct wil6210_priv *wil, int ringid)
  1727. {
  1728. struct net_device *ndev = wil_to_ndev(wil);
  1729. struct device *dev = wil_to_dev(wil);
  1730. struct vring *vring = &wil->vring_tx[ringid];
  1731. struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
  1732. int done = 0;
  1733. int cid = wil->vring2cid_tid[ringid][0];
  1734. struct wil_net_stats *stats = NULL;
  1735. volatile struct vring_tx_desc *_d;
  1736. int used_before_complete;
  1737. int used_new;
  1738. if (unlikely(!vring->va)) {
  1739. wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
  1740. return 0;
  1741. }
  1742. if (unlikely(!txdata->enabled)) {
  1743. wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
  1744. return 0;
  1745. }
  1746. wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
  1747. used_before_complete = wil_vring_used_tx(vring);
  1748. if (cid < WIL6210_MAX_CID)
  1749. stats = &wil->sta[cid].stats;
  1750. while (!wil_vring_is_empty(vring)) {
  1751. int new_swtail;
  1752. struct wil_ctx *ctx = &vring->ctx[vring->swtail];
  1753. /**
  1754. * For the fragmented skb, HW will set DU bit only for the
  1755. * last fragment. look for it.
  1756. * In TSO the first DU will include hdr desc
  1757. */
  1758. int lf = (vring->swtail + ctx->nr_frags) % vring->size;
  1759. /* TODO: check we are not past head */
  1760. _d = &vring->va[lf].tx;
  1761. if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
  1762. break;
  1763. new_swtail = (lf + 1) % vring->size;
  1764. while (vring->swtail != new_swtail) {
  1765. struct vring_tx_desc dd, *d = &dd;
  1766. u16 dmalen;
  1767. struct sk_buff *skb;
  1768. ctx = &vring->ctx[vring->swtail];
  1769. skb = ctx->skb;
  1770. _d = &vring->va[vring->swtail].tx;
  1771. *d = *_d;
  1772. dmalen = le16_to_cpu(d->dma.length);
  1773. trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
  1774. d->dma.error);
  1775. wil_dbg_txrx(wil,
  1776. "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
  1777. ringid, vring->swtail, dmalen,
  1778. d->dma.status, d->dma.error);
  1779. wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
  1780. (const void *)d, sizeof(*d), false);
  1781. wil_txdesc_unmap(dev, d, ctx);
  1782. if (skb) {
  1783. if (likely(d->dma.error == 0)) {
  1784. ndev->stats.tx_packets++;
  1785. ndev->stats.tx_bytes += skb->len;
  1786. if (stats) {
  1787. stats->tx_packets++;
  1788. stats->tx_bytes += skb->len;
  1789. }
  1790. } else {
  1791. ndev->stats.tx_errors++;
  1792. if (stats)
  1793. stats->tx_errors++;
  1794. }
  1795. wil_consume_skb(skb, d->dma.error == 0);
  1796. }
  1797. memset(ctx, 0, sizeof(*ctx));
  1798. /* Make sure the ctx is zeroed before updating the tail
  1799. * to prevent a case where wil_tx_vring will see
  1800. * this descriptor as used and handle it before ctx zero
  1801. * is completed.
  1802. */
  1803. wmb();
  1804. /* There is no need to touch HW descriptor:
  1805. * - ststus bit TX_DMA_STATUS_DU is set by design,
  1806. * so hardware will not try to process this desc.,
  1807. * - rest of descriptor will be initialized on Tx.
  1808. */
  1809. vring->swtail = wil_vring_next_tail(vring);
  1810. done++;
  1811. }
  1812. }
  1813. /* performance monitoring */
  1814. used_new = wil_vring_used_tx(vring);
  1815. if (wil_val_in_range(vring_idle_trsh,
  1816. used_new, used_before_complete)) {
  1817. wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
  1818. ringid, used_before_complete, used_new);
  1819. txdata->last_idle = get_cycles();
  1820. }
  1821. /* shall we wake net queues? */
  1822. if (done)
  1823. wil_update_net_queues(wil, vring, false);
  1824. return done;
  1825. }