txrx.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816
  1. /*
  2. * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <net/ieee80211_radiotap.h>
  18. #include <linux/if_arp.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/ip.h>
  21. #include <linux/ipv6.h>
  22. #include <net/ipv6.h>
  23. #include <linux/prefetch.h>
  24. #include "wil6210.h"
  25. #include "wmi.h"
  26. #include "txrx.h"
  27. #include "trace.h"
  28. static bool rtap_include_phy_info;
  29. module_param(rtap_include_phy_info, bool, S_IRUGO);
  30. MODULE_PARM_DESC(rtap_include_phy_info,
  31. " Include PHY info in the radiotap header, default - no");
  32. bool rx_align_2;
  33. module_param(rx_align_2, bool, S_IRUGO);
  34. MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
  35. static inline uint wil_rx_snaplen(void)
  36. {
  37. return rx_align_2 ? 6 : 0;
  38. }
  39. static inline int wil_vring_is_empty(struct vring *vring)
  40. {
  41. return vring->swhead == vring->swtail;
  42. }
  43. static inline u32 wil_vring_next_tail(struct vring *vring)
  44. {
  45. return (vring->swtail + 1) % vring->size;
  46. }
  47. static inline void wil_vring_advance_head(struct vring *vring, int n)
  48. {
  49. vring->swhead = (vring->swhead + n) % vring->size;
  50. }
  51. static inline int wil_vring_is_full(struct vring *vring)
  52. {
  53. return wil_vring_next_tail(vring) == vring->swhead;
  54. }
  55. /* Used space in Tx Vring */
  56. static inline int wil_vring_used_tx(struct vring *vring)
  57. {
  58. u32 swhead = vring->swhead;
  59. u32 swtail = vring->swtail;
  60. return (vring->size + swhead - swtail) % vring->size;
  61. }
  62. /* Available space in Tx Vring */
  63. static inline int wil_vring_avail_tx(struct vring *vring)
  64. {
  65. return vring->size - wil_vring_used_tx(vring) - 1;
  66. }
  67. /* wil_vring_wmark_low - low watermark for available descriptor space */
  68. static inline int wil_vring_wmark_low(struct vring *vring)
  69. {
  70. return vring->size/8;
  71. }
  72. /* wil_vring_wmark_high - high watermark for available descriptor space */
  73. static inline int wil_vring_wmark_high(struct vring *vring)
  74. {
  75. return vring->size/4;
  76. }
  77. /* wil_val_in_range - check if value in [min,max) */
  78. static inline bool wil_val_in_range(int val, int min, int max)
  79. {
  80. return val >= min && val < max;
  81. }
  82. static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
  83. {
  84. struct device *dev = wil_to_dev(wil);
  85. size_t sz = vring->size * sizeof(vring->va[0]);
  86. uint i;
  87. wil_dbg_misc(wil, "%s()\n", __func__);
  88. BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
  89. vring->swhead = 0;
  90. vring->swtail = 0;
  91. vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
  92. if (!vring->ctx) {
  93. vring->va = NULL;
  94. return -ENOMEM;
  95. }
  96. /* vring->va should be aligned on its size rounded up to power of 2
  97. * This is granted by the dma_alloc_coherent
  98. */
  99. vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
  100. if (!vring->va) {
  101. kfree(vring->ctx);
  102. vring->ctx = NULL;
  103. return -ENOMEM;
  104. }
  105. /* initially, all descriptors are SW owned
  106. * For Tx and Rx, ownership bit is at the same location, thus
  107. * we can use any
  108. */
  109. for (i = 0; i < vring->size; i++) {
  110. volatile struct vring_tx_desc *_d = &vring->va[i].tx;
  111. _d->dma.status = TX_DMA_STATUS_DU;
  112. }
  113. wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
  114. vring->va, &vring->pa, vring->ctx);
  115. return 0;
  116. }
  117. static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
  118. struct wil_ctx *ctx)
  119. {
  120. dma_addr_t pa = wil_desc_addr(&d->dma.addr);
  121. u16 dmalen = le16_to_cpu(d->dma.length);
  122. switch (ctx->mapped_as) {
  123. case wil_mapped_as_single:
  124. dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
  125. break;
  126. case wil_mapped_as_page:
  127. dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
  128. break;
  129. default:
  130. break;
  131. }
  132. }
  133. static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
  134. int tx)
  135. {
  136. struct device *dev = wil_to_dev(wil);
  137. size_t sz = vring->size * sizeof(vring->va[0]);
  138. if (tx) {
  139. int vring_index = vring - wil->vring_tx;
  140. wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
  141. vring_index, vring->size, vring->va,
  142. &vring->pa, vring->ctx);
  143. } else {
  144. wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
  145. vring->size, vring->va,
  146. &vring->pa, vring->ctx);
  147. }
  148. while (!wil_vring_is_empty(vring)) {
  149. dma_addr_t pa;
  150. u16 dmalen;
  151. struct wil_ctx *ctx;
  152. if (tx) {
  153. struct vring_tx_desc dd, *d = &dd;
  154. volatile struct vring_tx_desc *_d =
  155. &vring->va[vring->swtail].tx;
  156. ctx = &vring->ctx[vring->swtail];
  157. *d = *_d;
  158. wil_txdesc_unmap(dev, d, ctx);
  159. if (ctx->skb)
  160. dev_kfree_skb_any(ctx->skb);
  161. vring->swtail = wil_vring_next_tail(vring);
  162. } else { /* rx */
  163. struct vring_rx_desc dd, *d = &dd;
  164. volatile struct vring_rx_desc *_d =
  165. &vring->va[vring->swhead].rx;
  166. ctx = &vring->ctx[vring->swhead];
  167. *d = *_d;
  168. pa = wil_desc_addr(&d->dma.addr);
  169. dmalen = le16_to_cpu(d->dma.length);
  170. dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
  171. kfree_skb(ctx->skb);
  172. wil_vring_advance_head(vring, 1);
  173. }
  174. }
  175. dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
  176. kfree(vring->ctx);
  177. vring->pa = 0;
  178. vring->va = NULL;
  179. vring->ctx = NULL;
  180. }
  181. /**
  182. * Allocate one skb for Rx VRING
  183. *
  184. * Safe to call from IRQ
  185. */
  186. static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
  187. u32 i, int headroom)
  188. {
  189. struct device *dev = wil_to_dev(wil);
  190. unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
  191. struct vring_rx_desc dd, *d = &dd;
  192. volatile struct vring_rx_desc *_d = &vring->va[i].rx;
  193. dma_addr_t pa;
  194. struct sk_buff *skb = dev_alloc_skb(sz + headroom);
  195. if (unlikely(!skb))
  196. return -ENOMEM;
  197. skb_reserve(skb, headroom);
  198. skb_put(skb, sz);
  199. pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
  200. if (unlikely(dma_mapping_error(dev, pa))) {
  201. kfree_skb(skb);
  202. return -ENOMEM;
  203. }
  204. d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
  205. wil_desc_addr_set(&d->dma.addr, pa);
  206. /* ip_length don't care */
  207. /* b11 don't care */
  208. /* error don't care */
  209. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  210. d->dma.length = cpu_to_le16(sz);
  211. *_d = *d;
  212. vring->ctx[i].skb = skb;
  213. return 0;
  214. }
  215. /**
  216. * Adds radiotap header
  217. *
  218. * Any error indicated as "Bad FCS"
  219. *
  220. * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
  221. * - Rx descriptor: 32 bytes
  222. * - Phy info
  223. */
  224. static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
  225. struct sk_buff *skb)
  226. {
  227. struct wireless_dev *wdev = wil->wdev;
  228. struct wil6210_rtap {
  229. struct ieee80211_radiotap_header rthdr;
  230. /* fields should be in the order of bits in rthdr.it_present */
  231. /* flags */
  232. u8 flags;
  233. /* channel */
  234. __le16 chnl_freq __aligned(2);
  235. __le16 chnl_flags;
  236. /* MCS */
  237. u8 mcs_present;
  238. u8 mcs_flags;
  239. u8 mcs_index;
  240. } __packed;
  241. struct wil6210_rtap_vendor {
  242. struct wil6210_rtap rtap;
  243. /* vendor */
  244. u8 vendor_oui[3] __aligned(2);
  245. u8 vendor_ns;
  246. __le16 vendor_skip;
  247. u8 vendor_data[0];
  248. } __packed;
  249. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  250. struct wil6210_rtap_vendor *rtap_vendor;
  251. int rtap_len = sizeof(struct wil6210_rtap);
  252. int phy_length = 0; /* phy info header size, bytes */
  253. static char phy_data[128];
  254. struct ieee80211_channel *ch = wdev->preset_chandef.chan;
  255. if (rtap_include_phy_info) {
  256. rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
  257. /* calculate additional length */
  258. if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
  259. /**
  260. * PHY info starts from 8-byte boundary
  261. * there are 8-byte lines, last line may be partially
  262. * written (HW bug), thus FW configures for last line
  263. * to be excessive. Driver skips this last line.
  264. */
  265. int len = min_t(int, 8 + sizeof(phy_data),
  266. wil_rxdesc_phy_length(d));
  267. if (len > 8) {
  268. void *p = skb_tail_pointer(skb);
  269. void *pa = PTR_ALIGN(p, 8);
  270. if (skb_tailroom(skb) >= len + (pa - p)) {
  271. phy_length = len - 8;
  272. memcpy(phy_data, pa, phy_length);
  273. }
  274. }
  275. }
  276. rtap_len += phy_length;
  277. }
  278. if (skb_headroom(skb) < rtap_len &&
  279. pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
  280. wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
  281. return;
  282. }
  283. rtap_vendor = (void *)skb_push(skb, rtap_len);
  284. memset(rtap_vendor, 0, rtap_len);
  285. rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
  286. rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
  287. rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
  288. (1 << IEEE80211_RADIOTAP_FLAGS) |
  289. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  290. (1 << IEEE80211_RADIOTAP_MCS));
  291. if (d->dma.status & RX_DMA_STATUS_ERROR)
  292. rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
  293. rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
  294. rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
  295. rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
  296. rtap_vendor->rtap.mcs_flags = 0;
  297. rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
  298. if (rtap_include_phy_info) {
  299. rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
  300. IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
  301. /* OUI for Wilocity 04:ce:14 */
  302. rtap_vendor->vendor_oui[0] = 0x04;
  303. rtap_vendor->vendor_oui[1] = 0xce;
  304. rtap_vendor->vendor_oui[2] = 0x14;
  305. rtap_vendor->vendor_ns = 1;
  306. /* Rx descriptor + PHY data */
  307. rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
  308. phy_length);
  309. memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
  310. memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
  311. phy_length);
  312. }
  313. }
  314. /**
  315. * reap 1 frame from @swhead
  316. *
  317. * Rx descriptor copied to skb->cb
  318. *
  319. * Safe to call from IRQ
  320. */
  321. static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
  322. struct vring *vring)
  323. {
  324. struct device *dev = wil_to_dev(wil);
  325. struct net_device *ndev = wil_to_ndev(wil);
  326. volatile struct vring_rx_desc *_d;
  327. struct vring_rx_desc *d;
  328. struct sk_buff *skb;
  329. dma_addr_t pa;
  330. unsigned int snaplen = wil_rx_snaplen();
  331. unsigned int sz = mtu_max + ETH_HLEN + snaplen;
  332. u16 dmalen;
  333. u8 ftype;
  334. int cid;
  335. int i = (int)vring->swhead;
  336. struct wil_net_stats *stats;
  337. BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
  338. if (unlikely(wil_vring_is_empty(vring)))
  339. return NULL;
  340. _d = &vring->va[i].rx;
  341. if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
  342. /* it is not error, we just reached end of Rx done area */
  343. return NULL;
  344. }
  345. skb = vring->ctx[i].skb;
  346. vring->ctx[i].skb = NULL;
  347. wil_vring_advance_head(vring, 1);
  348. if (!skb) {
  349. wil_err(wil, "No Rx skb at [%d]\n", i);
  350. return NULL;
  351. }
  352. d = wil_skb_rxdesc(skb);
  353. *d = *_d;
  354. pa = wil_desc_addr(&d->dma.addr);
  355. dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
  356. dmalen = le16_to_cpu(d->dma.length);
  357. trace_wil6210_rx(i, d);
  358. wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
  359. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
  360. (const void *)d, sizeof(*d), false);
  361. if (unlikely(dmalen > sz)) {
  362. wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
  363. kfree_skb(skb);
  364. return NULL;
  365. }
  366. skb_trim(skb, dmalen);
  367. prefetch(skb->data);
  368. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  369. skb->data, skb_headlen(skb), false);
  370. cid = wil_rxdesc_cid(d);
  371. stats = &wil->sta[cid].stats;
  372. stats->last_mcs_rx = wil_rxdesc_mcs(d);
  373. if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
  374. stats->rx_per_mcs[stats->last_mcs_rx]++;
  375. /* use radiotap header only if required */
  376. if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
  377. wil_rx_add_radiotap_header(wil, skb);
  378. /* no extra checks if in sniffer mode */
  379. if (ndev->type != ARPHRD_ETHER)
  380. return skb;
  381. /*
  382. * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
  383. * Driver should recognize it by frame type, that is found
  384. * in Rx descriptor. If type is not data, it is 802.11 frame as is
  385. */
  386. ftype = wil_rxdesc_ftype(d) << 2;
  387. if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
  388. wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
  389. /* TODO: process it */
  390. kfree_skb(skb);
  391. return NULL;
  392. }
  393. if (unlikely(skb->len < ETH_HLEN + snaplen)) {
  394. wil_err(wil, "Short frame, len = %d\n", skb->len);
  395. /* TODO: process it (i.e. BAR) */
  396. kfree_skb(skb);
  397. return NULL;
  398. }
  399. /* L4 IDENT is on when HW calculated checksum, check status
  400. * and in case of error drop the packet
  401. * higher stack layers will handle retransmission (if required)
  402. */
  403. if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
  404. /* L4 protocol identified, csum calculated */
  405. if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
  406. skb->ip_summed = CHECKSUM_UNNECESSARY;
  407. /* If HW reports bad checksum, let IP stack re-check it
  408. * For example, HW don't understand Microsoft IP stack that
  409. * mis-calculates TCP checksum - if it should be 0x0,
  410. * it writes 0xffff in violation of RFC 1624
  411. */
  412. }
  413. if (snaplen) {
  414. /* Packet layout
  415. * +-------+-------+---------+------------+------+
  416. * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
  417. * +-------+-------+---------+------------+------+
  418. * Need to remove SNAP, shifting SA and DA forward
  419. */
  420. memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
  421. skb_pull(skb, snaplen);
  422. }
  423. return skb;
  424. }
  425. /**
  426. * allocate and fill up to @count buffers in rx ring
  427. * buffers posted at @swtail
  428. */
  429. static int wil_rx_refill(struct wil6210_priv *wil, int count)
  430. {
  431. struct net_device *ndev = wil_to_ndev(wil);
  432. struct vring *v = &wil->vring_rx;
  433. u32 next_tail;
  434. int rc = 0;
  435. int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
  436. WIL6210_RTAP_SIZE : 0;
  437. for (; next_tail = wil_vring_next_tail(v),
  438. (next_tail != v->swhead) && (count-- > 0);
  439. v->swtail = next_tail) {
  440. rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
  441. if (unlikely(rc)) {
  442. wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
  443. rc, v->swtail);
  444. break;
  445. }
  446. }
  447. wil_w(wil, v->hwtail, v->swtail);
  448. return rc;
  449. }
  450. /*
  451. * Pass Rx packet to the netif. Update statistics.
  452. * Called in softirq context (NAPI poll).
  453. */
  454. void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
  455. {
  456. gro_result_t rc = GRO_NORMAL;
  457. struct wil6210_priv *wil = ndev_to_wil(ndev);
  458. struct wireless_dev *wdev = wil_to_wdev(wil);
  459. unsigned int len = skb->len;
  460. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  461. int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
  462. struct ethhdr *eth = (void *)skb->data;
  463. /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
  464. * is not suitable, need to look at data
  465. */
  466. int mcast = is_multicast_ether_addr(eth->h_dest);
  467. struct wil_net_stats *stats = &wil->sta[cid].stats;
  468. struct sk_buff *xmit_skb = NULL;
  469. static const char * const gro_res_str[] = {
  470. [GRO_MERGED] = "GRO_MERGED",
  471. [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
  472. [GRO_HELD] = "GRO_HELD",
  473. [GRO_NORMAL] = "GRO_NORMAL",
  474. [GRO_DROP] = "GRO_DROP",
  475. };
  476. if (ndev->features & NETIF_F_RXHASH)
  477. /* fake L4 to ensure it won't be re-calculated later
  478. * set hash to any non-zero value to activate rps
  479. * mechanism, core will be chosen according
  480. * to user-level rps configuration.
  481. */
  482. skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
  483. skb_orphan(skb);
  484. if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
  485. if (mcast) {
  486. /* send multicast frames both to higher layers in
  487. * local net stack and back to the wireless medium
  488. */
  489. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  490. } else {
  491. int xmit_cid = wil_find_cid(wil, eth->h_dest);
  492. if (xmit_cid >= 0) {
  493. /* The destination station is associated to
  494. * this AP (in this VLAN), so send the frame
  495. * directly to it and do not pass it to local
  496. * net stack.
  497. */
  498. xmit_skb = skb;
  499. skb = NULL;
  500. }
  501. }
  502. }
  503. if (xmit_skb) {
  504. /* Send to wireless media and increase priority by 256 to
  505. * keep the received priority instead of reclassifying
  506. * the frame (see cfg80211_classify8021d).
  507. */
  508. xmit_skb->dev = ndev;
  509. xmit_skb->priority += 256;
  510. xmit_skb->protocol = htons(ETH_P_802_3);
  511. skb_reset_network_header(xmit_skb);
  512. skb_reset_mac_header(xmit_skb);
  513. wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
  514. dev_queue_xmit(xmit_skb);
  515. }
  516. if (skb) { /* deliver to local stack */
  517. skb->protocol = eth_type_trans(skb, ndev);
  518. rc = napi_gro_receive(&wil->napi_rx, skb);
  519. wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
  520. len, gro_res_str[rc]);
  521. }
  522. /* statistics. rc set to GRO_NORMAL for AP bridging */
  523. if (unlikely(rc == GRO_DROP)) {
  524. ndev->stats.rx_dropped++;
  525. stats->rx_dropped++;
  526. wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
  527. } else {
  528. ndev->stats.rx_packets++;
  529. stats->rx_packets++;
  530. ndev->stats.rx_bytes += len;
  531. stats->rx_bytes += len;
  532. if (mcast)
  533. ndev->stats.multicast++;
  534. }
  535. }
  536. /**
  537. * Proceed all completed skb's from Rx VRING
  538. *
  539. * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
  540. */
  541. void wil_rx_handle(struct wil6210_priv *wil, int *quota)
  542. {
  543. struct net_device *ndev = wil_to_ndev(wil);
  544. struct vring *v = &wil->vring_rx;
  545. struct sk_buff *skb;
  546. if (unlikely(!v->va)) {
  547. wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
  548. return;
  549. }
  550. wil_dbg_txrx(wil, "%s()\n", __func__);
  551. while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
  552. (*quota)--;
  553. if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
  554. skb->dev = ndev;
  555. skb_reset_mac_header(skb);
  556. skb->ip_summed = CHECKSUM_UNNECESSARY;
  557. skb->pkt_type = PACKET_OTHERHOST;
  558. skb->protocol = htons(ETH_P_802_2);
  559. wil_netif_rx_any(skb, ndev);
  560. } else {
  561. wil_rx_reorder(wil, skb);
  562. }
  563. }
  564. wil_rx_refill(wil, v->size);
  565. }
  566. int wil_rx_init(struct wil6210_priv *wil, u16 size)
  567. {
  568. struct vring *vring = &wil->vring_rx;
  569. int rc;
  570. wil_dbg_misc(wil, "%s()\n", __func__);
  571. if (vring->va) {
  572. wil_err(wil, "Rx ring already allocated\n");
  573. return -EINVAL;
  574. }
  575. vring->size = size;
  576. rc = wil_vring_alloc(wil, vring);
  577. if (rc)
  578. return rc;
  579. rc = wmi_rx_chain_add(wil, vring);
  580. if (rc)
  581. goto err_free;
  582. rc = wil_rx_refill(wil, vring->size);
  583. if (rc)
  584. goto err_free;
  585. return 0;
  586. err_free:
  587. wil_vring_free(wil, vring, 0);
  588. return rc;
  589. }
  590. void wil_rx_fini(struct wil6210_priv *wil)
  591. {
  592. struct vring *vring = &wil->vring_rx;
  593. wil_dbg_misc(wil, "%s()\n", __func__);
  594. if (vring->va)
  595. wil_vring_free(wil, vring, 0);
  596. }
  597. int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
  598. int cid, int tid)
  599. {
  600. int rc;
  601. struct wmi_vring_cfg_cmd cmd = {
  602. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  603. .vring_cfg = {
  604. .tx_sw_ring = {
  605. .max_mpdu_size =
  606. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  607. .ring_size = cpu_to_le16(size),
  608. },
  609. .ringid = id,
  610. .cidxtid = mk_cidxtid(cid, tid),
  611. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  612. .mac_ctrl = 0,
  613. .to_resolution = 0,
  614. .agg_max_wsize = 0,
  615. .schd_params = {
  616. .priority = cpu_to_le16(0),
  617. .timeslot_us = cpu_to_le16(0xfff),
  618. },
  619. },
  620. };
  621. struct {
  622. struct wil6210_mbox_hdr_wmi wmi;
  623. struct wmi_vring_cfg_done_event cmd;
  624. } __packed reply;
  625. struct vring *vring = &wil->vring_tx[id];
  626. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  627. wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
  628. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  629. if (vring->va) {
  630. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  631. rc = -EINVAL;
  632. goto out;
  633. }
  634. memset(txdata, 0, sizeof(*txdata));
  635. spin_lock_init(&txdata->lock);
  636. vring->size = size;
  637. rc = wil_vring_alloc(wil, vring);
  638. if (rc)
  639. goto out;
  640. wil->vring2cid_tid[id][0] = cid;
  641. wil->vring2cid_tid[id][1] = tid;
  642. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  643. if (!wil->privacy)
  644. txdata->dot1x_open = true;
  645. rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  646. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  647. if (rc)
  648. goto out_free;
  649. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  650. wil_err(wil, "Tx config failed, status 0x%02x\n",
  651. reply.cmd.status);
  652. rc = -EINVAL;
  653. goto out_free;
  654. }
  655. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  656. txdata->enabled = 1;
  657. if (txdata->dot1x_open && (agg_wsize >= 0))
  658. wil_addba_tx_request(wil, id, agg_wsize);
  659. return 0;
  660. out_free:
  661. txdata->dot1x_open = false;
  662. txdata->enabled = 0;
  663. wil_vring_free(wil, vring, 1);
  664. out:
  665. return rc;
  666. }
  667. int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
  668. {
  669. int rc;
  670. struct wmi_bcast_vring_cfg_cmd cmd = {
  671. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  672. .vring_cfg = {
  673. .tx_sw_ring = {
  674. .max_mpdu_size =
  675. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  676. .ring_size = cpu_to_le16(size),
  677. },
  678. .ringid = id,
  679. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  680. },
  681. };
  682. struct {
  683. struct wil6210_mbox_hdr_wmi wmi;
  684. struct wmi_vring_cfg_done_event cmd;
  685. } __packed reply;
  686. struct vring *vring = &wil->vring_tx[id];
  687. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  688. wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
  689. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  690. if (vring->va) {
  691. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  692. rc = -EINVAL;
  693. goto out;
  694. }
  695. memset(txdata, 0, sizeof(*txdata));
  696. spin_lock_init(&txdata->lock);
  697. vring->size = size;
  698. rc = wil_vring_alloc(wil, vring);
  699. if (rc)
  700. goto out;
  701. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
  702. wil->vring2cid_tid[id][1] = 0; /* TID */
  703. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  704. if (!wil->privacy)
  705. txdata->dot1x_open = true;
  706. rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  707. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  708. if (rc)
  709. goto out_free;
  710. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  711. wil_err(wil, "Tx config failed, status 0x%02x\n",
  712. reply.cmd.status);
  713. rc = -EINVAL;
  714. goto out_free;
  715. }
  716. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  717. txdata->enabled = 1;
  718. return 0;
  719. out_free:
  720. txdata->enabled = 0;
  721. txdata->dot1x_open = false;
  722. wil_vring_free(wil, vring, 1);
  723. out:
  724. return rc;
  725. }
  726. void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
  727. {
  728. struct vring *vring = &wil->vring_tx[id];
  729. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  730. WARN_ON(!mutex_is_locked(&wil->mutex));
  731. if (!vring->va)
  732. return;
  733. wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
  734. spin_lock_bh(&txdata->lock);
  735. txdata->dot1x_open = false;
  736. txdata->enabled = 0; /* no Tx can be in progress or start anew */
  737. spin_unlock_bh(&txdata->lock);
  738. /* make sure NAPI won't touch this vring */
  739. if (test_bit(wil_status_napi_en, wil->status))
  740. napi_synchronize(&wil->napi_tx);
  741. wil_vring_free(wil, vring, 1);
  742. memset(txdata, 0, sizeof(*txdata));
  743. }
  744. static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
  745. struct sk_buff *skb)
  746. {
  747. int i;
  748. struct ethhdr *eth = (void *)skb->data;
  749. int cid = wil_find_cid(wil, eth->h_dest);
  750. if (cid < 0)
  751. return NULL;
  752. /* TODO: fix for multiple TID */
  753. for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
  754. if (!wil->vring_tx_data[i].dot1x_open &&
  755. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  756. continue;
  757. if (wil->vring2cid_tid[i][0] == cid) {
  758. struct vring *v = &wil->vring_tx[i];
  759. wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
  760. __func__, eth->h_dest, i);
  761. if (v->va) {
  762. return v;
  763. } else {
  764. wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
  765. return NULL;
  766. }
  767. }
  768. }
  769. return NULL;
  770. }
  771. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  772. struct sk_buff *skb);
  773. static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
  774. struct sk_buff *skb)
  775. {
  776. struct vring *v;
  777. int i;
  778. u8 cid;
  779. /* In the STA mode, it is expected to have only 1 VRING
  780. * for the AP we connected to.
  781. * find 1-st vring eligible for this skb and use it.
  782. */
  783. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  784. v = &wil->vring_tx[i];
  785. if (!v->va)
  786. continue;
  787. cid = wil->vring2cid_tid[i][0];
  788. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  789. continue;
  790. if (!wil->vring_tx_data[i].dot1x_open &&
  791. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  792. continue;
  793. wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
  794. return v;
  795. }
  796. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  797. return NULL;
  798. }
  799. /* Use one of 2 strategies:
  800. *
  801. * 1. New (real broadcast):
  802. * use dedicated broadcast vring
  803. * 2. Old (pseudo-DMS):
  804. * Find 1-st vring and return it;
  805. * duplicate skb and send it to other active vrings;
  806. * in all cases override dest address to unicast peer's address
  807. * Use old strategy when new is not supported yet:
  808. * - for PBSS
  809. */
  810. static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
  811. struct sk_buff *skb)
  812. {
  813. struct vring *v;
  814. int i = wil->bcast_vring;
  815. if (i < 0)
  816. return NULL;
  817. v = &wil->vring_tx[i];
  818. if (!v->va)
  819. return NULL;
  820. if (!wil->vring_tx_data[i].dot1x_open &&
  821. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  822. return NULL;
  823. return v;
  824. }
  825. static void wil_set_da_for_vring(struct wil6210_priv *wil,
  826. struct sk_buff *skb, int vring_index)
  827. {
  828. struct ethhdr *eth = (void *)skb->data;
  829. int cid = wil->vring2cid_tid[vring_index][0];
  830. ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
  831. }
  832. static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
  833. struct sk_buff *skb)
  834. {
  835. struct vring *v, *v2;
  836. struct sk_buff *skb2;
  837. int i;
  838. u8 cid;
  839. struct ethhdr *eth = (void *)skb->data;
  840. char *src = eth->h_source;
  841. /* find 1-st vring eligible for data */
  842. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  843. v = &wil->vring_tx[i];
  844. if (!v->va)
  845. continue;
  846. cid = wil->vring2cid_tid[i][0];
  847. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  848. continue;
  849. if (!wil->vring_tx_data[i].dot1x_open &&
  850. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  851. continue;
  852. /* don't Tx back to source when re-routing Rx->Tx at the AP */
  853. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  854. continue;
  855. goto found;
  856. }
  857. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  858. return NULL;
  859. found:
  860. wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
  861. wil_set_da_for_vring(wil, skb, i);
  862. /* find other active vrings and duplicate skb for each */
  863. for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
  864. v2 = &wil->vring_tx[i];
  865. if (!v2->va)
  866. continue;
  867. cid = wil->vring2cid_tid[i][0];
  868. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  869. continue;
  870. if (!wil->vring_tx_data[i].dot1x_open &&
  871. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  872. continue;
  873. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  874. continue;
  875. skb2 = skb_copy(skb, GFP_ATOMIC);
  876. if (skb2) {
  877. wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
  878. wil_set_da_for_vring(wil, skb2, i);
  879. wil_tx_vring(wil, v2, skb2);
  880. } else {
  881. wil_err(wil, "skb_copy failed\n");
  882. }
  883. }
  884. return v;
  885. }
  886. static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
  887. struct sk_buff *skb)
  888. {
  889. struct wireless_dev *wdev = wil->wdev;
  890. if (wdev->iftype != NL80211_IFTYPE_AP)
  891. return wil_find_tx_bcast_2(wil, skb);
  892. return wil_find_tx_bcast_1(wil, skb);
  893. }
  894. static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
  895. int vring_index)
  896. {
  897. wil_desc_addr_set(&d->dma.addr, pa);
  898. d->dma.ip_length = 0;
  899. /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
  900. d->dma.b11 = 0/*14 | BIT(7)*/;
  901. d->dma.error = 0;
  902. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  903. d->dma.length = cpu_to_le16((u16)len);
  904. d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
  905. d->mac.d[0] = 0;
  906. d->mac.d[1] = 0;
  907. d->mac.d[2] = 0;
  908. d->mac.ucode_cmd = 0;
  909. /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
  910. d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
  911. (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
  912. return 0;
  913. }
  914. static inline
  915. void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
  916. {
  917. d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
  918. }
  919. /**
  920. * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
  921. * @skb is used to obtain the protocol and headers length.
  922. * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
  923. * 2 - middle, 3 - last descriptor.
  924. */
  925. static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
  926. struct sk_buff *skb,
  927. int tso_desc_type, bool is_ipv4,
  928. int tcp_hdr_len, int skb_net_hdr_len)
  929. {
  930. d->dma.b11 = ETH_HLEN; /* MAC header length */
  931. d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
  932. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  933. /* L4 header len: TCP header length */
  934. d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  935. /* Setup TSO: bit and desc type */
  936. d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
  937. (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
  938. d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
  939. d->dma.ip_length = skb_net_hdr_len;
  940. /* Enable TCP/UDP checksum */
  941. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  942. /* Calculate pseudo-header */
  943. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  944. }
  945. /**
  946. * Sets the descriptor @d up for csum. The corresponding
  947. * @skb is used to obtain the protocol and headers length.
  948. * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
  949. * Note, if d==NULL, the function only returns the protocol result.
  950. *
  951. * It is very similar to previous wil_tx_desc_offload_setup_tso. This
  952. * is "if unrolling" to optimize the critical path.
  953. */
  954. static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
  955. struct sk_buff *skb){
  956. int protocol;
  957. if (skb->ip_summed != CHECKSUM_PARTIAL)
  958. return 0;
  959. d->dma.b11 = ETH_HLEN; /* MAC header length */
  960. switch (skb->protocol) {
  961. case cpu_to_be16(ETH_P_IP):
  962. protocol = ip_hdr(skb)->protocol;
  963. d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
  964. break;
  965. case cpu_to_be16(ETH_P_IPV6):
  966. protocol = ipv6_hdr(skb)->nexthdr;
  967. break;
  968. default:
  969. return -EINVAL;
  970. }
  971. switch (protocol) {
  972. case IPPROTO_TCP:
  973. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  974. /* L4 header len: TCP header length */
  975. d->dma.d0 |=
  976. (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  977. break;
  978. case IPPROTO_UDP:
  979. /* L4 header len: UDP header length */
  980. d->dma.d0 |=
  981. (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  982. break;
  983. default:
  984. return -EINVAL;
  985. }
  986. d->dma.ip_length = skb_network_header_len(skb);
  987. /* Enable TCP/UDP checksum */
  988. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  989. /* Calculate pseudo-header */
  990. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  991. return 0;
  992. }
  993. static inline void wil_tx_last_desc(struct vring_tx_desc *d)
  994. {
  995. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
  996. BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
  997. BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  998. }
  999. static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
  1000. {
  1001. d->dma.d0 |= wil_tso_type_lst <<
  1002. DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
  1003. }
  1004. static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
  1005. struct sk_buff *skb)
  1006. {
  1007. struct device *dev = wil_to_dev(wil);
  1008. /* point to descriptors in shared memory */
  1009. volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
  1010. *_first_desc = NULL;
  1011. /* pointers to shadow descriptors */
  1012. struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
  1013. *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
  1014. *first_desc = &first_desc_mem;
  1015. /* pointer to shadow descriptors' context */
  1016. struct wil_ctx *hdr_ctx, *first_ctx = NULL;
  1017. int descs_used = 0; /* total number of used descriptors */
  1018. int sg_desc_cnt = 0; /* number of descriptors for current mss*/
  1019. u32 swhead = vring->swhead;
  1020. int used, avail = wil_vring_avail_tx(vring);
  1021. int nr_frags = skb_shinfo(skb)->nr_frags;
  1022. int min_desc_required = nr_frags + 1;
  1023. int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
  1024. int f, len, hdrlen, headlen;
  1025. int vring_index = vring - wil->vring_tx;
  1026. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1027. uint i = swhead;
  1028. dma_addr_t pa;
  1029. const skb_frag_t *frag = NULL;
  1030. int rem_data = mss;
  1031. int lenmss;
  1032. int hdr_compensation_need = true;
  1033. int desc_tso_type = wil_tso_type_first;
  1034. bool is_ipv4;
  1035. int tcp_hdr_len;
  1036. int skb_net_hdr_len;
  1037. int gso_type;
  1038. wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
  1039. __func__, skb->len, vring_index);
  1040. if (unlikely(!txdata->enabled))
  1041. return -EINVAL;
  1042. /* A typical page 4K is 3-4 payloads, we assume each fragment
  1043. * is a full payload, that's how min_desc_required has been
  1044. * calculated. In real we might need more or less descriptors,
  1045. * this is the initial check only.
  1046. */
  1047. if (unlikely(avail < min_desc_required)) {
  1048. wil_err_ratelimited(wil,
  1049. "TSO: Tx ring[%2d] full. No space for %d fragments\n",
  1050. vring_index, min_desc_required);
  1051. return -ENOMEM;
  1052. }
  1053. /* Header Length = MAC header len + IP header len + TCP header len*/
  1054. hdrlen = ETH_HLEN +
  1055. (int)skb_network_header_len(skb) +
  1056. tcp_hdrlen(skb);
  1057. gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
  1058. switch (gso_type) {
  1059. case SKB_GSO_TCPV4:
  1060. /* TCP v4, zero out the IP length and IPv4 checksum fields
  1061. * as required by the offloading doc
  1062. */
  1063. ip_hdr(skb)->tot_len = 0;
  1064. ip_hdr(skb)->check = 0;
  1065. is_ipv4 = true;
  1066. break;
  1067. case SKB_GSO_TCPV6:
  1068. /* TCP v6, zero out the payload length */
  1069. ipv6_hdr(skb)->payload_len = 0;
  1070. is_ipv4 = false;
  1071. break;
  1072. default:
  1073. /* other than TCPv4 or TCPv6 types are not supported for TSO.
  1074. * It is also illegal for both to be set simultaneously
  1075. */
  1076. return -EINVAL;
  1077. }
  1078. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1079. return -EINVAL;
  1080. /* tcp header length and skb network header length are fixed for all
  1081. * packet's descriptors - read then once here
  1082. */
  1083. tcp_hdr_len = tcp_hdrlen(skb);
  1084. skb_net_hdr_len = skb_network_header_len(skb);
  1085. _hdr_desc = &vring->va[i].tx;
  1086. pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
  1087. if (unlikely(dma_mapping_error(dev, pa))) {
  1088. wil_err(wil, "TSO: Skb head DMA map error\n");
  1089. goto err_exit;
  1090. }
  1091. wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
  1092. wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
  1093. tcp_hdr_len, skb_net_hdr_len);
  1094. wil_tx_last_desc(hdr_desc);
  1095. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1096. hdr_ctx = &vring->ctx[i];
  1097. descs_used++;
  1098. headlen = skb_headlen(skb) - hdrlen;
  1099. for (f = headlen ? -1 : 0; f < nr_frags; f++) {
  1100. if (headlen) {
  1101. len = headlen;
  1102. wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
  1103. len);
  1104. } else {
  1105. frag = &skb_shinfo(skb)->frags[f];
  1106. len = frag->size;
  1107. wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
  1108. }
  1109. while (len) {
  1110. wil_dbg_txrx(wil,
  1111. "TSO: len %d, rem_data %d, descs_used %d\n",
  1112. len, rem_data, descs_used);
  1113. if (descs_used == avail) {
  1114. wil_err(wil, "TSO: ring overflow\n");
  1115. goto dma_error;
  1116. }
  1117. lenmss = min_t(int, rem_data, len);
  1118. i = (swhead + descs_used) % vring->size;
  1119. wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
  1120. if (!headlen) {
  1121. pa = skb_frag_dma_map(dev, frag,
  1122. frag->size - len, lenmss,
  1123. DMA_TO_DEVICE);
  1124. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1125. } else {
  1126. pa = dma_map_single(dev,
  1127. skb->data +
  1128. skb_headlen(skb) - headlen,
  1129. lenmss,
  1130. DMA_TO_DEVICE);
  1131. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1132. headlen -= lenmss;
  1133. }
  1134. if (unlikely(dma_mapping_error(dev, pa)))
  1135. goto dma_error;
  1136. _desc = &vring->va[i].tx;
  1137. if (!_first_desc) {
  1138. _first_desc = _desc;
  1139. first_ctx = &vring->ctx[i];
  1140. d = first_desc;
  1141. } else {
  1142. d = &desc_mem;
  1143. }
  1144. wil_tx_desc_map(d, pa, lenmss, vring_index);
  1145. wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
  1146. is_ipv4, tcp_hdr_len,
  1147. skb_net_hdr_len);
  1148. /* use tso_type_first only once */
  1149. desc_tso_type = wil_tso_type_mid;
  1150. descs_used++; /* desc used so far */
  1151. sg_desc_cnt++; /* desc used for this segment */
  1152. len -= lenmss;
  1153. rem_data -= lenmss;
  1154. wil_dbg_txrx(wil,
  1155. "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
  1156. len, rem_data, descs_used, sg_desc_cnt);
  1157. /* Close the segment if reached mss size or last frag*/
  1158. if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
  1159. if (hdr_compensation_need) {
  1160. /* first segment include hdr desc for
  1161. * release
  1162. */
  1163. hdr_ctx->nr_frags = sg_desc_cnt;
  1164. wil_tx_desc_set_nr_frags(first_desc,
  1165. sg_desc_cnt +
  1166. 1);
  1167. hdr_compensation_need = false;
  1168. } else {
  1169. wil_tx_desc_set_nr_frags(first_desc,
  1170. sg_desc_cnt);
  1171. }
  1172. first_ctx->nr_frags = sg_desc_cnt - 1;
  1173. wil_tx_last_desc(d);
  1174. /* first descriptor may also be the last
  1175. * for this mss - make sure not to copy
  1176. * it twice
  1177. */
  1178. if (first_desc != d)
  1179. *_first_desc = *first_desc;
  1180. /*last descriptor will be copied at the end
  1181. * of this TS processing
  1182. */
  1183. if (f < nr_frags - 1 || len > 0)
  1184. *_desc = *d;
  1185. rem_data = mss;
  1186. _first_desc = NULL;
  1187. sg_desc_cnt = 0;
  1188. } else if (first_desc != d) /* update mid descriptor */
  1189. *_desc = *d;
  1190. }
  1191. }
  1192. /* first descriptor may also be the last.
  1193. * in this case d pointer is invalid
  1194. */
  1195. if (_first_desc == _desc)
  1196. d = first_desc;
  1197. /* Last data descriptor */
  1198. wil_set_tx_desc_last_tso(d);
  1199. *_desc = *d;
  1200. /* Fill the total number of descriptors in first desc (hdr)*/
  1201. wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
  1202. *_hdr_desc = *hdr_desc;
  1203. /* hold reference to skb
  1204. * to prevent skb release before accounting
  1205. * in case of immediate "tx done"
  1206. */
  1207. vring->ctx[i].skb = skb_get(skb);
  1208. /* performance monitoring */
  1209. used = wil_vring_used_tx(vring);
  1210. if (wil_val_in_range(vring_idle_trsh,
  1211. used, used + descs_used)) {
  1212. txdata->idle += get_cycles() - txdata->last_idle;
  1213. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1214. vring_index, used, used + descs_used);
  1215. }
  1216. /* advance swhead */
  1217. wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
  1218. wil_vring_advance_head(vring, descs_used);
  1219. /* make sure all writes to descriptors (shared memory) are done before
  1220. * committing them to HW
  1221. */
  1222. wmb();
  1223. wil_w(wil, vring->hwtail, vring->swhead);
  1224. return 0;
  1225. dma_error:
  1226. wil_err(wil, "TSO: DMA map page error\n");
  1227. while (descs_used > 0) {
  1228. struct wil_ctx *ctx;
  1229. i = (swhead + descs_used) % vring->size;
  1230. d = (struct vring_tx_desc *)&vring->va[i].tx;
  1231. _desc = &vring->va[i].tx;
  1232. *d = *_desc;
  1233. _desc->dma.status = TX_DMA_STATUS_DU;
  1234. ctx = &vring->ctx[i];
  1235. wil_txdesc_unmap(dev, d, ctx);
  1236. if (ctx->skb)
  1237. dev_kfree_skb_any(ctx->skb);
  1238. memset(ctx, 0, sizeof(*ctx));
  1239. descs_used--;
  1240. }
  1241. err_exit:
  1242. return -EINVAL;
  1243. }
  1244. static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1245. struct sk_buff *skb)
  1246. {
  1247. struct device *dev = wil_to_dev(wil);
  1248. struct vring_tx_desc dd, *d = &dd;
  1249. volatile struct vring_tx_desc *_d;
  1250. u32 swhead = vring->swhead;
  1251. int avail = wil_vring_avail_tx(vring);
  1252. int nr_frags = skb_shinfo(skb)->nr_frags;
  1253. uint f = 0;
  1254. int vring_index = vring - wil->vring_tx;
  1255. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1256. uint i = swhead;
  1257. dma_addr_t pa;
  1258. int used;
  1259. bool mcast = (vring_index == wil->bcast_vring);
  1260. uint len = skb_headlen(skb);
  1261. wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
  1262. __func__, skb->len, vring_index);
  1263. if (unlikely(!txdata->enabled))
  1264. return -EINVAL;
  1265. if (unlikely(avail < 1 + nr_frags)) {
  1266. wil_err_ratelimited(wil,
  1267. "Tx ring[%2d] full. No space for %d fragments\n",
  1268. vring_index, 1 + nr_frags);
  1269. return -ENOMEM;
  1270. }
  1271. _d = &vring->va[i].tx;
  1272. pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
  1273. wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
  1274. skb_headlen(skb), skb->data, &pa);
  1275. wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
  1276. skb->data, skb_headlen(skb), false);
  1277. if (unlikely(dma_mapping_error(dev, pa)))
  1278. return -EINVAL;
  1279. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1280. /* 1-st segment */
  1281. wil_tx_desc_map(d, pa, len, vring_index);
  1282. if (unlikely(mcast)) {
  1283. d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
  1284. if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
  1285. d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
  1286. }
  1287. /* Process TCP/UDP checksum offloading */
  1288. if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
  1289. wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
  1290. vring_index);
  1291. goto dma_error;
  1292. }
  1293. vring->ctx[i].nr_frags = nr_frags;
  1294. wil_tx_desc_set_nr_frags(d, nr_frags + 1);
  1295. /* middle segments */
  1296. for (; f < nr_frags; f++) {
  1297. const struct skb_frag_struct *frag =
  1298. &skb_shinfo(skb)->frags[f];
  1299. int len = skb_frag_size(frag);
  1300. *_d = *d;
  1301. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1302. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1303. (const void *)d, sizeof(*d), false);
  1304. i = (swhead + f + 1) % vring->size;
  1305. _d = &vring->va[i].tx;
  1306. pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
  1307. DMA_TO_DEVICE);
  1308. if (unlikely(dma_mapping_error(dev, pa)))
  1309. goto dma_error;
  1310. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1311. wil_tx_desc_map(d, pa, len, vring_index);
  1312. /* no need to check return code -
  1313. * if it succeeded for 1-st descriptor,
  1314. * it will succeed here too
  1315. */
  1316. wil_tx_desc_offload_setup(d, skb);
  1317. }
  1318. /* for the last seg only */
  1319. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
  1320. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
  1321. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1322. *_d = *d;
  1323. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1324. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1325. (const void *)d, sizeof(*d), false);
  1326. /* hold reference to skb
  1327. * to prevent skb release before accounting
  1328. * in case of immediate "tx done"
  1329. */
  1330. vring->ctx[i].skb = skb_get(skb);
  1331. /* performance monitoring */
  1332. used = wil_vring_used_tx(vring);
  1333. if (wil_val_in_range(vring_idle_trsh,
  1334. used, used + nr_frags + 1)) {
  1335. txdata->idle += get_cycles() - txdata->last_idle;
  1336. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1337. vring_index, used, used + nr_frags + 1);
  1338. }
  1339. /* advance swhead */
  1340. wil_vring_advance_head(vring, nr_frags + 1);
  1341. wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
  1342. vring->swhead);
  1343. trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
  1344. /* make sure all writes to descriptors (shared memory) are done before
  1345. * committing them to HW
  1346. */
  1347. wmb();
  1348. wil_w(wil, vring->hwtail, vring->swhead);
  1349. return 0;
  1350. dma_error:
  1351. /* unmap what we have mapped */
  1352. nr_frags = f + 1; /* frags mapped + one for skb head */
  1353. for (f = 0; f < nr_frags; f++) {
  1354. struct wil_ctx *ctx;
  1355. i = (swhead + f) % vring->size;
  1356. ctx = &vring->ctx[i];
  1357. _d = &vring->va[i].tx;
  1358. *d = *_d;
  1359. _d->dma.status = TX_DMA_STATUS_DU;
  1360. wil_txdesc_unmap(dev, d, ctx);
  1361. if (ctx->skb)
  1362. dev_kfree_skb_any(ctx->skb);
  1363. memset(ctx, 0, sizeof(*ctx));
  1364. }
  1365. return -EINVAL;
  1366. }
  1367. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1368. struct sk_buff *skb)
  1369. {
  1370. int vring_index = vring - wil->vring_tx;
  1371. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1372. int rc;
  1373. spin_lock(&txdata->lock);
  1374. rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
  1375. (wil, vring, skb);
  1376. spin_unlock(&txdata->lock);
  1377. return rc;
  1378. }
  1379. netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1380. {
  1381. struct wil6210_priv *wil = ndev_to_wil(ndev);
  1382. struct ethhdr *eth = (void *)skb->data;
  1383. bool bcast = is_multicast_ether_addr(eth->h_dest);
  1384. struct vring *vring;
  1385. static bool pr_once_fw;
  1386. int rc;
  1387. wil_dbg_txrx(wil, "%s()\n", __func__);
  1388. if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
  1389. if (!pr_once_fw) {
  1390. wil_err(wil, "FW not ready\n");
  1391. pr_once_fw = true;
  1392. }
  1393. goto drop;
  1394. }
  1395. if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
  1396. wil_err(wil, "FW not connected\n");
  1397. goto drop;
  1398. }
  1399. if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
  1400. wil_err(wil, "Xmit in monitor mode not supported\n");
  1401. goto drop;
  1402. }
  1403. pr_once_fw = false;
  1404. /* find vring */
  1405. if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
  1406. /* in STA mode (ESS), all to same VRING */
  1407. vring = wil_find_tx_vring_sta(wil, skb);
  1408. } else { /* direct communication, find matching VRING */
  1409. vring = bcast ? wil_find_tx_bcast(wil, skb) :
  1410. wil_find_tx_ucast(wil, skb);
  1411. }
  1412. if (unlikely(!vring)) {
  1413. wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
  1414. goto drop;
  1415. }
  1416. /* set up vring entry */
  1417. rc = wil_tx_vring(wil, vring, skb);
  1418. /* do we still have enough room in the vring? */
  1419. if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) {
  1420. netif_tx_stop_all_queues(wil_to_ndev(wil));
  1421. wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
  1422. }
  1423. switch (rc) {
  1424. case 0:
  1425. /* statistics will be updated on the tx_complete */
  1426. dev_kfree_skb_any(skb);
  1427. return NETDEV_TX_OK;
  1428. case -ENOMEM:
  1429. return NETDEV_TX_BUSY;
  1430. default:
  1431. break; /* goto drop; */
  1432. }
  1433. drop:
  1434. ndev->stats.tx_dropped++;
  1435. dev_kfree_skb_any(skb);
  1436. return NET_XMIT_DROP;
  1437. }
  1438. static inline bool wil_need_txstat(struct sk_buff *skb)
  1439. {
  1440. struct ethhdr *eth = (void *)skb->data;
  1441. return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
  1442. (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
  1443. }
  1444. static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
  1445. {
  1446. if (unlikely(wil_need_txstat(skb)))
  1447. skb_complete_wifi_ack(skb, acked);
  1448. else
  1449. acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
  1450. }
  1451. /**
  1452. * Clean up transmitted skb's from the Tx VRING
  1453. *
  1454. * Return number of descriptors cleared
  1455. *
  1456. * Safe to call from IRQ
  1457. */
  1458. int wil_tx_complete(struct wil6210_priv *wil, int ringid)
  1459. {
  1460. struct net_device *ndev = wil_to_ndev(wil);
  1461. struct device *dev = wil_to_dev(wil);
  1462. struct vring *vring = &wil->vring_tx[ringid];
  1463. struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
  1464. int done = 0;
  1465. int cid = wil->vring2cid_tid[ringid][0];
  1466. struct wil_net_stats *stats = NULL;
  1467. volatile struct vring_tx_desc *_d;
  1468. int used_before_complete;
  1469. int used_new;
  1470. if (unlikely(!vring->va)) {
  1471. wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
  1472. return 0;
  1473. }
  1474. if (unlikely(!txdata->enabled)) {
  1475. wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
  1476. return 0;
  1477. }
  1478. wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
  1479. used_before_complete = wil_vring_used_tx(vring);
  1480. if (cid < WIL6210_MAX_CID)
  1481. stats = &wil->sta[cid].stats;
  1482. while (!wil_vring_is_empty(vring)) {
  1483. int new_swtail;
  1484. struct wil_ctx *ctx = &vring->ctx[vring->swtail];
  1485. /**
  1486. * For the fragmented skb, HW will set DU bit only for the
  1487. * last fragment. look for it.
  1488. * In TSO the first DU will include hdr desc
  1489. */
  1490. int lf = (vring->swtail + ctx->nr_frags) % vring->size;
  1491. /* TODO: check we are not past head */
  1492. _d = &vring->va[lf].tx;
  1493. if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
  1494. break;
  1495. new_swtail = (lf + 1) % vring->size;
  1496. while (vring->swtail != new_swtail) {
  1497. struct vring_tx_desc dd, *d = &dd;
  1498. u16 dmalen;
  1499. struct sk_buff *skb;
  1500. ctx = &vring->ctx[vring->swtail];
  1501. skb = ctx->skb;
  1502. _d = &vring->va[vring->swtail].tx;
  1503. *d = *_d;
  1504. dmalen = le16_to_cpu(d->dma.length);
  1505. trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
  1506. d->dma.error);
  1507. wil_dbg_txrx(wil,
  1508. "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
  1509. ringid, vring->swtail, dmalen,
  1510. d->dma.status, d->dma.error);
  1511. wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
  1512. (const void *)d, sizeof(*d), false);
  1513. wil_txdesc_unmap(dev, d, ctx);
  1514. if (skb) {
  1515. if (likely(d->dma.error == 0)) {
  1516. ndev->stats.tx_packets++;
  1517. ndev->stats.tx_bytes += skb->len;
  1518. if (stats) {
  1519. stats->tx_packets++;
  1520. stats->tx_bytes += skb->len;
  1521. }
  1522. } else {
  1523. ndev->stats.tx_errors++;
  1524. if (stats)
  1525. stats->tx_errors++;
  1526. }
  1527. wil_consume_skb(skb, d->dma.error == 0);
  1528. }
  1529. memset(ctx, 0, sizeof(*ctx));
  1530. /* There is no need to touch HW descriptor:
  1531. * - ststus bit TX_DMA_STATUS_DU is set by design,
  1532. * so hardware will not try to process this desc.,
  1533. * - rest of descriptor will be initialized on Tx.
  1534. */
  1535. vring->swtail = wil_vring_next_tail(vring);
  1536. done++;
  1537. }
  1538. }
  1539. /* performance monitoring */
  1540. used_new = wil_vring_used_tx(vring);
  1541. if (wil_val_in_range(vring_idle_trsh,
  1542. used_new, used_before_complete)) {
  1543. wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
  1544. ringid, used_before_complete, used_new);
  1545. txdata->last_idle = get_cycles();
  1546. }
  1547. if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
  1548. wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
  1549. netif_tx_wake_all_queues(wil_to_ndev(wil));
  1550. }
  1551. return done;
  1552. }