txrx.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077
  1. /*
  2. * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/etherdevice.h>
  17. #include <net/ieee80211_radiotap.h>
  18. #include <linux/if_arp.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/ip.h>
  21. #include <linux/ipv6.h>
  22. #include <net/ipv6.h>
  23. #include <linux/prefetch.h>
  24. #include "wil6210.h"
  25. #include "wmi.h"
  26. #include "txrx.h"
  27. #include "trace.h"
  28. static bool rtap_include_phy_info;
  29. module_param(rtap_include_phy_info, bool, S_IRUGO);
  30. MODULE_PARM_DESC(rtap_include_phy_info,
  31. " Include PHY info in the radiotap header, default - no");
  32. bool rx_align_2;
  33. module_param(rx_align_2, bool, S_IRUGO);
  34. MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
  35. static inline uint wil_rx_snaplen(void)
  36. {
  37. return rx_align_2 ? 6 : 0;
  38. }
  39. static inline int wil_vring_is_empty(struct vring *vring)
  40. {
  41. return vring->swhead == vring->swtail;
  42. }
  43. static inline u32 wil_vring_next_tail(struct vring *vring)
  44. {
  45. return (vring->swtail + 1) % vring->size;
  46. }
  47. static inline void wil_vring_advance_head(struct vring *vring, int n)
  48. {
  49. vring->swhead = (vring->swhead + n) % vring->size;
  50. }
  51. static inline int wil_vring_is_full(struct vring *vring)
  52. {
  53. return wil_vring_next_tail(vring) == vring->swhead;
  54. }
  55. /* Used space in Tx Vring */
  56. static inline int wil_vring_used_tx(struct vring *vring)
  57. {
  58. u32 swhead = vring->swhead;
  59. u32 swtail = vring->swtail;
  60. return (vring->size + swhead - swtail) % vring->size;
  61. }
  62. /* Available space in Tx Vring */
  63. static inline int wil_vring_avail_tx(struct vring *vring)
  64. {
  65. return vring->size - wil_vring_used_tx(vring) - 1;
  66. }
  67. /* wil_vring_wmark_low - low watermark for available descriptor space */
  68. static inline int wil_vring_wmark_low(struct vring *vring)
  69. {
  70. return vring->size/8;
  71. }
  72. /* wil_vring_wmark_high - high watermark for available descriptor space */
  73. static inline int wil_vring_wmark_high(struct vring *vring)
  74. {
  75. return vring->size/4;
  76. }
  77. /* returns true if num avail descriptors is lower than wmark_low */
  78. static inline int wil_vring_avail_low(struct vring *vring)
  79. {
  80. return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring);
  81. }
  82. /* returns true if num avail descriptors is higher than wmark_high */
  83. static inline int wil_vring_avail_high(struct vring *vring)
  84. {
  85. return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
  86. }
  87. /* wil_val_in_range - check if value in [min,max) */
  88. static inline bool wil_val_in_range(int val, int min, int max)
  89. {
  90. return val >= min && val < max;
  91. }
  92. static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
  93. {
  94. struct device *dev = wil_to_dev(wil);
  95. size_t sz = vring->size * sizeof(vring->va[0]);
  96. uint i;
  97. wil_dbg_misc(wil, "%s()\n", __func__);
  98. BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
  99. vring->swhead = 0;
  100. vring->swtail = 0;
  101. vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
  102. if (!vring->ctx) {
  103. vring->va = NULL;
  104. return -ENOMEM;
  105. }
  106. /* vring->va should be aligned on its size rounded up to power of 2
  107. * This is granted by the dma_alloc_coherent
  108. */
  109. vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
  110. if (!vring->va) {
  111. kfree(vring->ctx);
  112. vring->ctx = NULL;
  113. return -ENOMEM;
  114. }
  115. /* initially, all descriptors are SW owned
  116. * For Tx and Rx, ownership bit is at the same location, thus
  117. * we can use any
  118. */
  119. for (i = 0; i < vring->size; i++) {
  120. volatile struct vring_tx_desc *_d = &vring->va[i].tx;
  121. _d->dma.status = TX_DMA_STATUS_DU;
  122. }
  123. wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
  124. vring->va, &vring->pa, vring->ctx);
  125. return 0;
  126. }
  127. static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
  128. struct wil_ctx *ctx)
  129. {
  130. dma_addr_t pa = wil_desc_addr(&d->dma.addr);
  131. u16 dmalen = le16_to_cpu(d->dma.length);
  132. switch (ctx->mapped_as) {
  133. case wil_mapped_as_single:
  134. dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
  135. break;
  136. case wil_mapped_as_page:
  137. dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
  138. break;
  139. default:
  140. break;
  141. }
  142. }
  143. static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
  144. int tx)
  145. {
  146. struct device *dev = wil_to_dev(wil);
  147. size_t sz = vring->size * sizeof(vring->va[0]);
  148. lockdep_assert_held(&wil->mutex);
  149. if (tx) {
  150. int vring_index = vring - wil->vring_tx;
  151. wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
  152. vring_index, vring->size, vring->va,
  153. &vring->pa, vring->ctx);
  154. } else {
  155. wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
  156. vring->size, vring->va,
  157. &vring->pa, vring->ctx);
  158. }
  159. while (!wil_vring_is_empty(vring)) {
  160. dma_addr_t pa;
  161. u16 dmalen;
  162. struct wil_ctx *ctx;
  163. if (tx) {
  164. struct vring_tx_desc dd, *d = &dd;
  165. volatile struct vring_tx_desc *_d =
  166. &vring->va[vring->swtail].tx;
  167. ctx = &vring->ctx[vring->swtail];
  168. if (!ctx) {
  169. wil_dbg_txrx(wil,
  170. "ctx(%d) was already completed\n",
  171. vring->swtail);
  172. vring->swtail = wil_vring_next_tail(vring);
  173. continue;
  174. }
  175. *d = *_d;
  176. wil_txdesc_unmap(dev, d, ctx);
  177. if (ctx->skb)
  178. dev_kfree_skb_any(ctx->skb);
  179. vring->swtail = wil_vring_next_tail(vring);
  180. } else { /* rx */
  181. struct vring_rx_desc dd, *d = &dd;
  182. volatile struct vring_rx_desc *_d =
  183. &vring->va[vring->swhead].rx;
  184. ctx = &vring->ctx[vring->swhead];
  185. *d = *_d;
  186. pa = wil_desc_addr(&d->dma.addr);
  187. dmalen = le16_to_cpu(d->dma.length);
  188. dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
  189. kfree_skb(ctx->skb);
  190. wil_vring_advance_head(vring, 1);
  191. }
  192. }
  193. dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
  194. kfree(vring->ctx);
  195. vring->pa = 0;
  196. vring->va = NULL;
  197. vring->ctx = NULL;
  198. }
  199. /**
  200. * Allocate one skb for Rx VRING
  201. *
  202. * Safe to call from IRQ
  203. */
  204. static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
  205. u32 i, int headroom)
  206. {
  207. struct device *dev = wil_to_dev(wil);
  208. unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
  209. struct vring_rx_desc dd, *d = &dd;
  210. volatile struct vring_rx_desc *_d = &vring->va[i].rx;
  211. dma_addr_t pa;
  212. struct sk_buff *skb = dev_alloc_skb(sz + headroom);
  213. if (unlikely(!skb))
  214. return -ENOMEM;
  215. skb_reserve(skb, headroom);
  216. skb_put(skb, sz);
  217. pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
  218. if (unlikely(dma_mapping_error(dev, pa))) {
  219. kfree_skb(skb);
  220. return -ENOMEM;
  221. }
  222. d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
  223. wil_desc_addr_set(&d->dma.addr, pa);
  224. /* ip_length don't care */
  225. /* b11 don't care */
  226. /* error don't care */
  227. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  228. d->dma.length = cpu_to_le16(sz);
  229. *_d = *d;
  230. vring->ctx[i].skb = skb;
  231. return 0;
  232. }
  233. /**
  234. * Adds radiotap header
  235. *
  236. * Any error indicated as "Bad FCS"
  237. *
  238. * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
  239. * - Rx descriptor: 32 bytes
  240. * - Phy info
  241. */
  242. static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
  243. struct sk_buff *skb)
  244. {
  245. struct wireless_dev *wdev = wil->wdev;
  246. struct wil6210_rtap {
  247. struct ieee80211_radiotap_header rthdr;
  248. /* fields should be in the order of bits in rthdr.it_present */
  249. /* flags */
  250. u8 flags;
  251. /* channel */
  252. __le16 chnl_freq __aligned(2);
  253. __le16 chnl_flags;
  254. /* MCS */
  255. u8 mcs_present;
  256. u8 mcs_flags;
  257. u8 mcs_index;
  258. } __packed;
  259. struct wil6210_rtap_vendor {
  260. struct wil6210_rtap rtap;
  261. /* vendor */
  262. u8 vendor_oui[3] __aligned(2);
  263. u8 vendor_ns;
  264. __le16 vendor_skip;
  265. u8 vendor_data[0];
  266. } __packed;
  267. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  268. struct wil6210_rtap_vendor *rtap_vendor;
  269. int rtap_len = sizeof(struct wil6210_rtap);
  270. int phy_length = 0; /* phy info header size, bytes */
  271. static char phy_data[128];
  272. struct ieee80211_channel *ch = wdev->preset_chandef.chan;
  273. if (rtap_include_phy_info) {
  274. rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
  275. /* calculate additional length */
  276. if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
  277. /**
  278. * PHY info starts from 8-byte boundary
  279. * there are 8-byte lines, last line may be partially
  280. * written (HW bug), thus FW configures for last line
  281. * to be excessive. Driver skips this last line.
  282. */
  283. int len = min_t(int, 8 + sizeof(phy_data),
  284. wil_rxdesc_phy_length(d));
  285. if (len > 8) {
  286. void *p = skb_tail_pointer(skb);
  287. void *pa = PTR_ALIGN(p, 8);
  288. if (skb_tailroom(skb) >= len + (pa - p)) {
  289. phy_length = len - 8;
  290. memcpy(phy_data, pa, phy_length);
  291. }
  292. }
  293. }
  294. rtap_len += phy_length;
  295. }
  296. if (skb_headroom(skb) < rtap_len &&
  297. pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
  298. wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
  299. return;
  300. }
  301. rtap_vendor = (void *)skb_push(skb, rtap_len);
  302. memset(rtap_vendor, 0, rtap_len);
  303. rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
  304. rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
  305. rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
  306. (1 << IEEE80211_RADIOTAP_FLAGS) |
  307. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  308. (1 << IEEE80211_RADIOTAP_MCS));
  309. if (d->dma.status & RX_DMA_STATUS_ERROR)
  310. rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
  311. rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
  312. rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
  313. rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
  314. rtap_vendor->rtap.mcs_flags = 0;
  315. rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
  316. if (rtap_include_phy_info) {
  317. rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
  318. IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
  319. /* OUI for Wilocity 04:ce:14 */
  320. rtap_vendor->vendor_oui[0] = 0x04;
  321. rtap_vendor->vendor_oui[1] = 0xce;
  322. rtap_vendor->vendor_oui[2] = 0x14;
  323. rtap_vendor->vendor_ns = 1;
  324. /* Rx descriptor + PHY data */
  325. rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
  326. phy_length);
  327. memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
  328. memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
  329. phy_length);
  330. }
  331. }
  332. /* similar to ieee80211_ version, but FC contain only 1-st byte */
  333. static inline int wil_is_back_req(u8 fc)
  334. {
  335. return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
  336. (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
  337. }
  338. /**
  339. * reap 1 frame from @swhead
  340. *
  341. * Rx descriptor copied to skb->cb
  342. *
  343. * Safe to call from IRQ
  344. */
  345. static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
  346. struct vring *vring)
  347. {
  348. struct device *dev = wil_to_dev(wil);
  349. struct net_device *ndev = wil_to_ndev(wil);
  350. volatile struct vring_rx_desc *_d;
  351. struct vring_rx_desc *d;
  352. struct sk_buff *skb;
  353. dma_addr_t pa;
  354. unsigned int snaplen = wil_rx_snaplen();
  355. unsigned int sz = mtu_max + ETH_HLEN + snaplen;
  356. u16 dmalen;
  357. u8 ftype;
  358. int cid;
  359. int i;
  360. struct wil_net_stats *stats;
  361. BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
  362. again:
  363. if (unlikely(wil_vring_is_empty(vring)))
  364. return NULL;
  365. i = (int)vring->swhead;
  366. _d = &vring->va[i].rx;
  367. if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
  368. /* it is not error, we just reached end of Rx done area */
  369. return NULL;
  370. }
  371. skb = vring->ctx[i].skb;
  372. vring->ctx[i].skb = NULL;
  373. wil_vring_advance_head(vring, 1);
  374. if (!skb) {
  375. wil_err(wil, "No Rx skb at [%d]\n", i);
  376. goto again;
  377. }
  378. d = wil_skb_rxdesc(skb);
  379. *d = *_d;
  380. pa = wil_desc_addr(&d->dma.addr);
  381. dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
  382. dmalen = le16_to_cpu(d->dma.length);
  383. trace_wil6210_rx(i, d);
  384. wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
  385. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  386. (const void *)d, sizeof(*d), false);
  387. cid = wil_rxdesc_cid(d);
  388. stats = &wil->sta[cid].stats;
  389. if (unlikely(dmalen > sz)) {
  390. wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
  391. stats->rx_large_frame++;
  392. kfree_skb(skb);
  393. goto again;
  394. }
  395. skb_trim(skb, dmalen);
  396. prefetch(skb->data);
  397. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  398. skb->data, skb_headlen(skb), false);
  399. stats->last_mcs_rx = wil_rxdesc_mcs(d);
  400. if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
  401. stats->rx_per_mcs[stats->last_mcs_rx]++;
  402. /* use radiotap header only if required */
  403. if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
  404. wil_rx_add_radiotap_header(wil, skb);
  405. /* no extra checks if in sniffer mode */
  406. if (ndev->type != ARPHRD_ETHER)
  407. return skb;
  408. /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
  409. * Driver should recognize it by frame type, that is found
  410. * in Rx descriptor. If type is not data, it is 802.11 frame as is
  411. */
  412. ftype = wil_rxdesc_ftype(d) << 2;
  413. if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
  414. u8 fc1 = wil_rxdesc_fc1(d);
  415. int mid = wil_rxdesc_mid(d);
  416. int tid = wil_rxdesc_tid(d);
  417. u16 seq = wil_rxdesc_seq(d);
  418. wil_dbg_txrx(wil,
  419. "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  420. fc1, mid, cid, tid, seq);
  421. stats->rx_non_data_frame++;
  422. if (wil_is_back_req(fc1)) {
  423. wil_dbg_txrx(wil,
  424. "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
  425. mid, cid, tid, seq);
  426. wil_rx_bar(wil, cid, tid, seq);
  427. } else {
  428. /* print again all info. One can enable only this
  429. * without overhead for printing every Rx frame
  430. */
  431. wil_dbg_txrx(wil,
  432. "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
  433. fc1, mid, cid, tid, seq);
  434. wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
  435. (const void *)d, sizeof(*d), false);
  436. wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
  437. skb->data, skb_headlen(skb), false);
  438. }
  439. kfree_skb(skb);
  440. goto again;
  441. }
  442. if (unlikely(skb->len < ETH_HLEN + snaplen)) {
  443. wil_err(wil, "Short frame, len = %d\n", skb->len);
  444. stats->rx_short_frame++;
  445. kfree_skb(skb);
  446. goto again;
  447. }
  448. /* L4 IDENT is on when HW calculated checksum, check status
  449. * and in case of error drop the packet
  450. * higher stack layers will handle retransmission (if required)
  451. */
  452. if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
  453. /* L4 protocol identified, csum calculated */
  454. if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
  455. skb->ip_summed = CHECKSUM_UNNECESSARY;
  456. /* If HW reports bad checksum, let IP stack re-check it
  457. * For example, HW don't understand Microsoft IP stack that
  458. * mis-calculates TCP checksum - if it should be 0x0,
  459. * it writes 0xffff in violation of RFC 1624
  460. */
  461. }
  462. if (snaplen) {
  463. /* Packet layout
  464. * +-------+-------+---------+------------+------+
  465. * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
  466. * +-------+-------+---------+------------+------+
  467. * Need to remove SNAP, shifting SA and DA forward
  468. */
  469. memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
  470. skb_pull(skb, snaplen);
  471. }
  472. return skb;
  473. }
  474. /**
  475. * allocate and fill up to @count buffers in rx ring
  476. * buffers posted at @swtail
  477. */
  478. static int wil_rx_refill(struct wil6210_priv *wil, int count)
  479. {
  480. struct net_device *ndev = wil_to_ndev(wil);
  481. struct vring *v = &wil->vring_rx;
  482. u32 next_tail;
  483. int rc = 0;
  484. int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
  485. WIL6210_RTAP_SIZE : 0;
  486. for (; next_tail = wil_vring_next_tail(v),
  487. (next_tail != v->swhead) && (count-- > 0);
  488. v->swtail = next_tail) {
  489. rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
  490. if (unlikely(rc)) {
  491. wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
  492. rc, v->swtail);
  493. break;
  494. }
  495. }
  496. /* make sure all writes to descriptors (shared memory) are done before
  497. * committing them to HW
  498. */
  499. wmb();
  500. wil_w(wil, v->hwtail, v->swtail);
  501. return rc;
  502. }
  503. /**
  504. * reverse_memcmp - Compare two areas of memory, in reverse order
  505. * @cs: One area of memory
  506. * @ct: Another area of memory
  507. * @count: The size of the area.
  508. *
  509. * Cut'n'paste from original memcmp (see lib/string.c)
  510. * with minimal modifications
  511. */
  512. static int reverse_memcmp(const void *cs, const void *ct, size_t count)
  513. {
  514. const unsigned char *su1, *su2;
  515. int res = 0;
  516. for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
  517. --su1, --su2, count--) {
  518. res = *su1 - *su2;
  519. if (res)
  520. break;
  521. }
  522. return res;
  523. }
  524. static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
  525. {
  526. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  527. int cid = wil_rxdesc_cid(d);
  528. int tid = wil_rxdesc_tid(d);
  529. int key_id = wil_rxdesc_key_id(d);
  530. int mc = wil_rxdesc_mcast(d);
  531. struct wil_sta_info *s = &wil->sta[cid];
  532. struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
  533. &s->tid_crypto_rx[tid];
  534. struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
  535. const u8 *pn = (u8 *)&d->mac.pn_15_0;
  536. if (!cc->key_set) {
  537. wil_err_ratelimited(wil,
  538. "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
  539. cid, tid, mc, key_id);
  540. return -EINVAL;
  541. }
  542. if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
  543. wil_err_ratelimited(wil,
  544. "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
  545. cid, tid, mc, key_id, pn, cc->pn);
  546. return -EINVAL;
  547. }
  548. memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
  549. return 0;
  550. }
  551. /*
  552. * Pass Rx packet to the netif. Update statistics.
  553. * Called in softirq context (NAPI poll).
  554. */
  555. void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
  556. {
  557. gro_result_t rc = GRO_NORMAL;
  558. struct wil6210_priv *wil = ndev_to_wil(ndev);
  559. struct wireless_dev *wdev = wil_to_wdev(wil);
  560. unsigned int len = skb->len;
  561. struct vring_rx_desc *d = wil_skb_rxdesc(skb);
  562. int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
  563. int security = wil_rxdesc_security(d);
  564. struct ethhdr *eth = (void *)skb->data;
  565. /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
  566. * is not suitable, need to look at data
  567. */
  568. int mcast = is_multicast_ether_addr(eth->h_dest);
  569. struct wil_net_stats *stats = &wil->sta[cid].stats;
  570. struct sk_buff *xmit_skb = NULL;
  571. static const char * const gro_res_str[] = {
  572. [GRO_MERGED] = "GRO_MERGED",
  573. [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
  574. [GRO_HELD] = "GRO_HELD",
  575. [GRO_NORMAL] = "GRO_NORMAL",
  576. [GRO_DROP] = "GRO_DROP",
  577. };
  578. if (ndev->features & NETIF_F_RXHASH)
  579. /* fake L4 to ensure it won't be re-calculated later
  580. * set hash to any non-zero value to activate rps
  581. * mechanism, core will be chosen according
  582. * to user-level rps configuration.
  583. */
  584. skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
  585. skb_orphan(skb);
  586. if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
  587. rc = GRO_DROP;
  588. dev_kfree_skb(skb);
  589. stats->rx_replay++;
  590. goto stats;
  591. }
  592. if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
  593. if (mcast) {
  594. /* send multicast frames both to higher layers in
  595. * local net stack and back to the wireless medium
  596. */
  597. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  598. } else {
  599. int xmit_cid = wil_find_cid(wil, eth->h_dest);
  600. if (xmit_cid >= 0) {
  601. /* The destination station is associated to
  602. * this AP (in this VLAN), so send the frame
  603. * directly to it and do not pass it to local
  604. * net stack.
  605. */
  606. xmit_skb = skb;
  607. skb = NULL;
  608. }
  609. }
  610. }
  611. if (xmit_skb) {
  612. /* Send to wireless media and increase priority by 256 to
  613. * keep the received priority instead of reclassifying
  614. * the frame (see cfg80211_classify8021d).
  615. */
  616. xmit_skb->dev = ndev;
  617. xmit_skb->priority += 256;
  618. xmit_skb->protocol = htons(ETH_P_802_3);
  619. skb_reset_network_header(xmit_skb);
  620. skb_reset_mac_header(xmit_skb);
  621. wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
  622. dev_queue_xmit(xmit_skb);
  623. }
  624. if (skb) { /* deliver to local stack */
  625. skb->protocol = eth_type_trans(skb, ndev);
  626. rc = napi_gro_receive(&wil->napi_rx, skb);
  627. wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
  628. len, gro_res_str[rc]);
  629. }
  630. stats:
  631. /* statistics. rc set to GRO_NORMAL for AP bridging */
  632. if (unlikely(rc == GRO_DROP)) {
  633. ndev->stats.rx_dropped++;
  634. stats->rx_dropped++;
  635. wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
  636. } else {
  637. ndev->stats.rx_packets++;
  638. stats->rx_packets++;
  639. ndev->stats.rx_bytes += len;
  640. stats->rx_bytes += len;
  641. if (mcast)
  642. ndev->stats.multicast++;
  643. }
  644. }
  645. /**
  646. * Proceed all completed skb's from Rx VRING
  647. *
  648. * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
  649. */
  650. void wil_rx_handle(struct wil6210_priv *wil, int *quota)
  651. {
  652. struct net_device *ndev = wil_to_ndev(wil);
  653. struct vring *v = &wil->vring_rx;
  654. struct sk_buff *skb;
  655. if (unlikely(!v->va)) {
  656. wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
  657. return;
  658. }
  659. wil_dbg_txrx(wil, "%s()\n", __func__);
  660. while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
  661. (*quota)--;
  662. if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
  663. skb->dev = ndev;
  664. skb_reset_mac_header(skb);
  665. skb->ip_summed = CHECKSUM_UNNECESSARY;
  666. skb->pkt_type = PACKET_OTHERHOST;
  667. skb->protocol = htons(ETH_P_802_2);
  668. wil_netif_rx_any(skb, ndev);
  669. } else {
  670. wil_rx_reorder(wil, skb);
  671. }
  672. }
  673. wil_rx_refill(wil, v->size);
  674. }
  675. int wil_rx_init(struct wil6210_priv *wil, u16 size)
  676. {
  677. struct vring *vring = &wil->vring_rx;
  678. int rc;
  679. wil_dbg_misc(wil, "%s()\n", __func__);
  680. if (vring->va) {
  681. wil_err(wil, "Rx ring already allocated\n");
  682. return -EINVAL;
  683. }
  684. vring->size = size;
  685. rc = wil_vring_alloc(wil, vring);
  686. if (rc)
  687. return rc;
  688. rc = wmi_rx_chain_add(wil, vring);
  689. if (rc)
  690. goto err_free;
  691. rc = wil_rx_refill(wil, vring->size);
  692. if (rc)
  693. goto err_free;
  694. return 0;
  695. err_free:
  696. wil_vring_free(wil, vring, 0);
  697. return rc;
  698. }
  699. void wil_rx_fini(struct wil6210_priv *wil)
  700. {
  701. struct vring *vring = &wil->vring_rx;
  702. wil_dbg_misc(wil, "%s()\n", __func__);
  703. if (vring->va)
  704. wil_vring_free(wil, vring, 0);
  705. }
  706. static inline void wil_tx_data_init(struct vring_tx_data *txdata)
  707. {
  708. spin_lock_bh(&txdata->lock);
  709. txdata->dot1x_open = 0;
  710. txdata->enabled = 0;
  711. txdata->idle = 0;
  712. txdata->last_idle = 0;
  713. txdata->begin = 0;
  714. txdata->agg_wsize = 0;
  715. txdata->agg_timeout = 0;
  716. txdata->agg_amsdu = 0;
  717. txdata->addba_in_progress = false;
  718. spin_unlock_bh(&txdata->lock);
  719. }
  720. int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
  721. int cid, int tid)
  722. {
  723. int rc;
  724. struct wmi_vring_cfg_cmd cmd = {
  725. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  726. .vring_cfg = {
  727. .tx_sw_ring = {
  728. .max_mpdu_size =
  729. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  730. .ring_size = cpu_to_le16(size),
  731. },
  732. .ringid = id,
  733. .cidxtid = mk_cidxtid(cid, tid),
  734. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  735. .mac_ctrl = 0,
  736. .to_resolution = 0,
  737. .agg_max_wsize = 0,
  738. .schd_params = {
  739. .priority = cpu_to_le16(0),
  740. .timeslot_us = cpu_to_le16(0xfff),
  741. },
  742. },
  743. };
  744. struct {
  745. struct wmi_cmd_hdr wmi;
  746. struct wmi_vring_cfg_done_event cmd;
  747. } __packed reply;
  748. struct vring *vring = &wil->vring_tx[id];
  749. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  750. wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
  751. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  752. lockdep_assert_held(&wil->mutex);
  753. if (vring->va) {
  754. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  755. rc = -EINVAL;
  756. goto out;
  757. }
  758. wil_tx_data_init(txdata);
  759. vring->size = size;
  760. rc = wil_vring_alloc(wil, vring);
  761. if (rc)
  762. goto out;
  763. wil->vring2cid_tid[id][0] = cid;
  764. wil->vring2cid_tid[id][1] = tid;
  765. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  766. if (!wil->privacy)
  767. txdata->dot1x_open = true;
  768. rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  769. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  770. if (rc)
  771. goto out_free;
  772. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  773. wil_err(wil, "Tx config failed, status 0x%02x\n",
  774. reply.cmd.status);
  775. rc = -EINVAL;
  776. goto out_free;
  777. }
  778. spin_lock_bh(&txdata->lock);
  779. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  780. txdata->enabled = 1;
  781. spin_unlock_bh(&txdata->lock);
  782. if (txdata->dot1x_open && (agg_wsize >= 0))
  783. wil_addba_tx_request(wil, id, agg_wsize);
  784. return 0;
  785. out_free:
  786. spin_lock_bh(&txdata->lock);
  787. txdata->dot1x_open = false;
  788. txdata->enabled = 0;
  789. spin_unlock_bh(&txdata->lock);
  790. wil_vring_free(wil, vring, 1);
  791. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
  792. wil->vring2cid_tid[id][1] = 0;
  793. out:
  794. return rc;
  795. }
  796. int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
  797. {
  798. int rc;
  799. struct wmi_bcast_vring_cfg_cmd cmd = {
  800. .action = cpu_to_le32(WMI_VRING_CMD_ADD),
  801. .vring_cfg = {
  802. .tx_sw_ring = {
  803. .max_mpdu_size =
  804. cpu_to_le16(wil_mtu2macbuf(mtu_max)),
  805. .ring_size = cpu_to_le16(size),
  806. },
  807. .ringid = id,
  808. .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
  809. },
  810. };
  811. struct {
  812. struct wmi_cmd_hdr wmi;
  813. struct wmi_vring_cfg_done_event cmd;
  814. } __packed reply;
  815. struct vring *vring = &wil->vring_tx[id];
  816. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  817. wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
  818. cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
  819. lockdep_assert_held(&wil->mutex);
  820. if (vring->va) {
  821. wil_err(wil, "Tx ring [%d] already allocated\n", id);
  822. rc = -EINVAL;
  823. goto out;
  824. }
  825. wil_tx_data_init(txdata);
  826. vring->size = size;
  827. rc = wil_vring_alloc(wil, vring);
  828. if (rc)
  829. goto out;
  830. wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
  831. wil->vring2cid_tid[id][1] = 0; /* TID */
  832. cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
  833. if (!wil->privacy)
  834. txdata->dot1x_open = true;
  835. rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
  836. WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
  837. if (rc)
  838. goto out_free;
  839. if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
  840. wil_err(wil, "Tx config failed, status 0x%02x\n",
  841. reply.cmd.status);
  842. rc = -EINVAL;
  843. goto out_free;
  844. }
  845. spin_lock_bh(&txdata->lock);
  846. vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
  847. txdata->enabled = 1;
  848. spin_unlock_bh(&txdata->lock);
  849. return 0;
  850. out_free:
  851. spin_lock_bh(&txdata->lock);
  852. txdata->enabled = 0;
  853. txdata->dot1x_open = false;
  854. spin_unlock_bh(&txdata->lock);
  855. wil_vring_free(wil, vring, 1);
  856. out:
  857. return rc;
  858. }
  859. void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
  860. {
  861. struct vring *vring = &wil->vring_tx[id];
  862. struct vring_tx_data *txdata = &wil->vring_tx_data[id];
  863. lockdep_assert_held(&wil->mutex);
  864. if (!vring->va)
  865. return;
  866. wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
  867. spin_lock_bh(&txdata->lock);
  868. txdata->dot1x_open = false;
  869. txdata->enabled = 0; /* no Tx can be in progress or start anew */
  870. spin_unlock_bh(&txdata->lock);
  871. /* napi_synchronize waits for completion of the current NAPI but will
  872. * not prevent the next NAPI run.
  873. * Add a memory barrier to guarantee that txdata->enabled is zeroed
  874. * before napi_synchronize so that the next scheduled NAPI will not
  875. * handle this vring
  876. */
  877. wmb();
  878. /* make sure NAPI won't touch this vring */
  879. if (test_bit(wil_status_napi_en, wil->status))
  880. napi_synchronize(&wil->napi_tx);
  881. wil_vring_free(wil, vring, 1);
  882. }
  883. static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
  884. struct sk_buff *skb)
  885. {
  886. int i;
  887. struct ethhdr *eth = (void *)skb->data;
  888. int cid = wil_find_cid(wil, eth->h_dest);
  889. if (cid < 0)
  890. return NULL;
  891. /* TODO: fix for multiple TID */
  892. for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
  893. if (!wil->vring_tx_data[i].dot1x_open &&
  894. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  895. continue;
  896. if (wil->vring2cid_tid[i][0] == cid) {
  897. struct vring *v = &wil->vring_tx[i];
  898. struct vring_tx_data *txdata = &wil->vring_tx_data[i];
  899. wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
  900. __func__, eth->h_dest, i);
  901. if (v->va && txdata->enabled) {
  902. return v;
  903. } else {
  904. wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
  905. return NULL;
  906. }
  907. }
  908. }
  909. return NULL;
  910. }
  911. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  912. struct sk_buff *skb);
  913. static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
  914. struct sk_buff *skb)
  915. {
  916. struct vring *v;
  917. int i;
  918. u8 cid;
  919. struct vring_tx_data *txdata;
  920. /* In the STA mode, it is expected to have only 1 VRING
  921. * for the AP we connected to.
  922. * find 1-st vring eligible for this skb and use it.
  923. */
  924. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  925. v = &wil->vring_tx[i];
  926. txdata = &wil->vring_tx_data[i];
  927. if (!v->va || !txdata->enabled)
  928. continue;
  929. cid = wil->vring2cid_tid[i][0];
  930. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  931. continue;
  932. if (!wil->vring_tx_data[i].dot1x_open &&
  933. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  934. continue;
  935. wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
  936. return v;
  937. }
  938. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  939. return NULL;
  940. }
  941. /* Use one of 2 strategies:
  942. *
  943. * 1. New (real broadcast):
  944. * use dedicated broadcast vring
  945. * 2. Old (pseudo-DMS):
  946. * Find 1-st vring and return it;
  947. * duplicate skb and send it to other active vrings;
  948. * in all cases override dest address to unicast peer's address
  949. * Use old strategy when new is not supported yet:
  950. * - for PBSS
  951. */
  952. static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
  953. struct sk_buff *skb)
  954. {
  955. struct vring *v;
  956. struct vring_tx_data *txdata;
  957. int i = wil->bcast_vring;
  958. if (i < 0)
  959. return NULL;
  960. v = &wil->vring_tx[i];
  961. txdata = &wil->vring_tx_data[i];
  962. if (!v->va || !txdata->enabled)
  963. return NULL;
  964. if (!wil->vring_tx_data[i].dot1x_open &&
  965. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  966. return NULL;
  967. return v;
  968. }
  969. static void wil_set_da_for_vring(struct wil6210_priv *wil,
  970. struct sk_buff *skb, int vring_index)
  971. {
  972. struct ethhdr *eth = (void *)skb->data;
  973. int cid = wil->vring2cid_tid[vring_index][0];
  974. ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
  975. }
  976. static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
  977. struct sk_buff *skb)
  978. {
  979. struct vring *v, *v2;
  980. struct sk_buff *skb2;
  981. int i;
  982. u8 cid;
  983. struct ethhdr *eth = (void *)skb->data;
  984. char *src = eth->h_source;
  985. struct vring_tx_data *txdata;
  986. /* find 1-st vring eligible for data */
  987. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  988. v = &wil->vring_tx[i];
  989. txdata = &wil->vring_tx_data[i];
  990. if (!v->va || !txdata->enabled)
  991. continue;
  992. cid = wil->vring2cid_tid[i][0];
  993. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  994. continue;
  995. if (!wil->vring_tx_data[i].dot1x_open &&
  996. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  997. continue;
  998. /* don't Tx back to source when re-routing Rx->Tx at the AP */
  999. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1000. continue;
  1001. goto found;
  1002. }
  1003. wil_dbg_txrx(wil, "Tx while no vrings active?\n");
  1004. return NULL;
  1005. found:
  1006. wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
  1007. wil_set_da_for_vring(wil, skb, i);
  1008. /* find other active vrings and duplicate skb for each */
  1009. for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
  1010. v2 = &wil->vring_tx[i];
  1011. if (!v2->va)
  1012. continue;
  1013. cid = wil->vring2cid_tid[i][0];
  1014. if (cid >= WIL6210_MAX_CID) /* skip BCAST */
  1015. continue;
  1016. if (!wil->vring_tx_data[i].dot1x_open &&
  1017. (skb->protocol != cpu_to_be16(ETH_P_PAE)))
  1018. continue;
  1019. if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
  1020. continue;
  1021. skb2 = skb_copy(skb, GFP_ATOMIC);
  1022. if (skb2) {
  1023. wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
  1024. wil_set_da_for_vring(wil, skb2, i);
  1025. wil_tx_vring(wil, v2, skb2);
  1026. } else {
  1027. wil_err(wil, "skb_copy failed\n");
  1028. }
  1029. }
  1030. return v;
  1031. }
  1032. static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
  1033. struct sk_buff *skb)
  1034. {
  1035. struct wireless_dev *wdev = wil->wdev;
  1036. if (wdev->iftype != NL80211_IFTYPE_AP)
  1037. return wil_find_tx_bcast_2(wil, skb);
  1038. return wil_find_tx_bcast_1(wil, skb);
  1039. }
  1040. static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
  1041. int vring_index)
  1042. {
  1043. wil_desc_addr_set(&d->dma.addr, pa);
  1044. d->dma.ip_length = 0;
  1045. /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
  1046. d->dma.b11 = 0/*14 | BIT(7)*/;
  1047. d->dma.error = 0;
  1048. d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
  1049. d->dma.length = cpu_to_le16((u16)len);
  1050. d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
  1051. d->mac.d[0] = 0;
  1052. d->mac.d[1] = 0;
  1053. d->mac.d[2] = 0;
  1054. d->mac.ucode_cmd = 0;
  1055. /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
  1056. d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
  1057. (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
  1058. return 0;
  1059. }
  1060. static inline
  1061. void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
  1062. {
  1063. d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
  1064. }
  1065. /**
  1066. * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
  1067. * @skb is used to obtain the protocol and headers length.
  1068. * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
  1069. * 2 - middle, 3 - last descriptor.
  1070. */
  1071. static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
  1072. struct sk_buff *skb,
  1073. int tso_desc_type, bool is_ipv4,
  1074. int tcp_hdr_len, int skb_net_hdr_len)
  1075. {
  1076. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1077. d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
  1078. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1079. /* L4 header len: TCP header length */
  1080. d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1081. /* Setup TSO: bit and desc type */
  1082. d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
  1083. (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
  1084. d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
  1085. d->dma.ip_length = skb_net_hdr_len;
  1086. /* Enable TCP/UDP checksum */
  1087. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1088. /* Calculate pseudo-header */
  1089. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1090. }
  1091. /**
  1092. * Sets the descriptor @d up for csum. The corresponding
  1093. * @skb is used to obtain the protocol and headers length.
  1094. * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
  1095. * Note, if d==NULL, the function only returns the protocol result.
  1096. *
  1097. * It is very similar to previous wil_tx_desc_offload_setup_tso. This
  1098. * is "if unrolling" to optimize the critical path.
  1099. */
  1100. static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
  1101. struct sk_buff *skb){
  1102. int protocol;
  1103. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1104. return 0;
  1105. d->dma.b11 = ETH_HLEN; /* MAC header length */
  1106. switch (skb->protocol) {
  1107. case cpu_to_be16(ETH_P_IP):
  1108. protocol = ip_hdr(skb)->protocol;
  1109. d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
  1110. break;
  1111. case cpu_to_be16(ETH_P_IPV6):
  1112. protocol = ipv6_hdr(skb)->nexthdr;
  1113. break;
  1114. default:
  1115. return -EINVAL;
  1116. }
  1117. switch (protocol) {
  1118. case IPPROTO_TCP:
  1119. d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
  1120. /* L4 header len: TCP header length */
  1121. d->dma.d0 |=
  1122. (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1123. break;
  1124. case IPPROTO_UDP:
  1125. /* L4 header len: UDP header length */
  1126. d->dma.d0 |=
  1127. (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
  1128. break;
  1129. default:
  1130. return -EINVAL;
  1131. }
  1132. d->dma.ip_length = skb_network_header_len(skb);
  1133. /* Enable TCP/UDP checksum */
  1134. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
  1135. /* Calculate pseudo-header */
  1136. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
  1137. return 0;
  1138. }
  1139. static inline void wil_tx_last_desc(struct vring_tx_desc *d)
  1140. {
  1141. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
  1142. BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
  1143. BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1144. }
  1145. static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
  1146. {
  1147. d->dma.d0 |= wil_tso_type_lst <<
  1148. DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
  1149. }
  1150. static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
  1151. struct sk_buff *skb)
  1152. {
  1153. struct device *dev = wil_to_dev(wil);
  1154. /* point to descriptors in shared memory */
  1155. volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
  1156. *_first_desc = NULL;
  1157. /* pointers to shadow descriptors */
  1158. struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
  1159. *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
  1160. *first_desc = &first_desc_mem;
  1161. /* pointer to shadow descriptors' context */
  1162. struct wil_ctx *hdr_ctx, *first_ctx = NULL;
  1163. int descs_used = 0; /* total number of used descriptors */
  1164. int sg_desc_cnt = 0; /* number of descriptors for current mss*/
  1165. u32 swhead = vring->swhead;
  1166. int used, avail = wil_vring_avail_tx(vring);
  1167. int nr_frags = skb_shinfo(skb)->nr_frags;
  1168. int min_desc_required = nr_frags + 1;
  1169. int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
  1170. int f, len, hdrlen, headlen;
  1171. int vring_index = vring - wil->vring_tx;
  1172. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1173. uint i = swhead;
  1174. dma_addr_t pa;
  1175. const skb_frag_t *frag = NULL;
  1176. int rem_data = mss;
  1177. int lenmss;
  1178. int hdr_compensation_need = true;
  1179. int desc_tso_type = wil_tso_type_first;
  1180. bool is_ipv4;
  1181. int tcp_hdr_len;
  1182. int skb_net_hdr_len;
  1183. int gso_type;
  1184. int rc = -EINVAL;
  1185. wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
  1186. __func__, skb->len, vring_index);
  1187. if (unlikely(!txdata->enabled))
  1188. return -EINVAL;
  1189. /* A typical page 4K is 3-4 payloads, we assume each fragment
  1190. * is a full payload, that's how min_desc_required has been
  1191. * calculated. In real we might need more or less descriptors,
  1192. * this is the initial check only.
  1193. */
  1194. if (unlikely(avail < min_desc_required)) {
  1195. wil_err_ratelimited(wil,
  1196. "TSO: Tx ring[%2d] full. No space for %d fragments\n",
  1197. vring_index, min_desc_required);
  1198. return -ENOMEM;
  1199. }
  1200. /* Header Length = MAC header len + IP header len + TCP header len*/
  1201. hdrlen = ETH_HLEN +
  1202. (int)skb_network_header_len(skb) +
  1203. tcp_hdrlen(skb);
  1204. gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
  1205. switch (gso_type) {
  1206. case SKB_GSO_TCPV4:
  1207. /* TCP v4, zero out the IP length and IPv4 checksum fields
  1208. * as required by the offloading doc
  1209. */
  1210. ip_hdr(skb)->tot_len = 0;
  1211. ip_hdr(skb)->check = 0;
  1212. is_ipv4 = true;
  1213. break;
  1214. case SKB_GSO_TCPV6:
  1215. /* TCP v6, zero out the payload length */
  1216. ipv6_hdr(skb)->payload_len = 0;
  1217. is_ipv4 = false;
  1218. break;
  1219. default:
  1220. /* other than TCPv4 or TCPv6 types are not supported for TSO.
  1221. * It is also illegal for both to be set simultaneously
  1222. */
  1223. return -EINVAL;
  1224. }
  1225. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1226. return -EINVAL;
  1227. /* tcp header length and skb network header length are fixed for all
  1228. * packet's descriptors - read then once here
  1229. */
  1230. tcp_hdr_len = tcp_hdrlen(skb);
  1231. skb_net_hdr_len = skb_network_header_len(skb);
  1232. _hdr_desc = &vring->va[i].tx;
  1233. pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
  1234. if (unlikely(dma_mapping_error(dev, pa))) {
  1235. wil_err(wil, "TSO: Skb head DMA map error\n");
  1236. goto err_exit;
  1237. }
  1238. wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
  1239. wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
  1240. tcp_hdr_len, skb_net_hdr_len);
  1241. wil_tx_last_desc(hdr_desc);
  1242. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1243. hdr_ctx = &vring->ctx[i];
  1244. descs_used++;
  1245. headlen = skb_headlen(skb) - hdrlen;
  1246. for (f = headlen ? -1 : 0; f < nr_frags; f++) {
  1247. if (headlen) {
  1248. len = headlen;
  1249. wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
  1250. len);
  1251. } else {
  1252. frag = &skb_shinfo(skb)->frags[f];
  1253. len = frag->size;
  1254. wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
  1255. }
  1256. while (len) {
  1257. wil_dbg_txrx(wil,
  1258. "TSO: len %d, rem_data %d, descs_used %d\n",
  1259. len, rem_data, descs_used);
  1260. if (descs_used == avail) {
  1261. wil_err_ratelimited(wil, "TSO: ring overflow\n");
  1262. rc = -ENOMEM;
  1263. goto mem_error;
  1264. }
  1265. lenmss = min_t(int, rem_data, len);
  1266. i = (swhead + descs_used) % vring->size;
  1267. wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
  1268. if (!headlen) {
  1269. pa = skb_frag_dma_map(dev, frag,
  1270. frag->size - len, lenmss,
  1271. DMA_TO_DEVICE);
  1272. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1273. } else {
  1274. pa = dma_map_single(dev,
  1275. skb->data +
  1276. skb_headlen(skb) - headlen,
  1277. lenmss,
  1278. DMA_TO_DEVICE);
  1279. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1280. headlen -= lenmss;
  1281. }
  1282. if (unlikely(dma_mapping_error(dev, pa))) {
  1283. wil_err(wil, "TSO: DMA map page error\n");
  1284. goto mem_error;
  1285. }
  1286. _desc = &vring->va[i].tx;
  1287. if (!_first_desc) {
  1288. _first_desc = _desc;
  1289. first_ctx = &vring->ctx[i];
  1290. d = first_desc;
  1291. } else {
  1292. d = &desc_mem;
  1293. }
  1294. wil_tx_desc_map(d, pa, lenmss, vring_index);
  1295. wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
  1296. is_ipv4, tcp_hdr_len,
  1297. skb_net_hdr_len);
  1298. /* use tso_type_first only once */
  1299. desc_tso_type = wil_tso_type_mid;
  1300. descs_used++; /* desc used so far */
  1301. sg_desc_cnt++; /* desc used for this segment */
  1302. len -= lenmss;
  1303. rem_data -= lenmss;
  1304. wil_dbg_txrx(wil,
  1305. "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
  1306. len, rem_data, descs_used, sg_desc_cnt);
  1307. /* Close the segment if reached mss size or last frag*/
  1308. if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
  1309. if (hdr_compensation_need) {
  1310. /* first segment include hdr desc for
  1311. * release
  1312. */
  1313. hdr_ctx->nr_frags = sg_desc_cnt;
  1314. wil_tx_desc_set_nr_frags(first_desc,
  1315. sg_desc_cnt +
  1316. 1);
  1317. hdr_compensation_need = false;
  1318. } else {
  1319. wil_tx_desc_set_nr_frags(first_desc,
  1320. sg_desc_cnt);
  1321. }
  1322. first_ctx->nr_frags = sg_desc_cnt - 1;
  1323. wil_tx_last_desc(d);
  1324. /* first descriptor may also be the last
  1325. * for this mss - make sure not to copy
  1326. * it twice
  1327. */
  1328. if (first_desc != d)
  1329. *_first_desc = *first_desc;
  1330. /*last descriptor will be copied at the end
  1331. * of this TS processing
  1332. */
  1333. if (f < nr_frags - 1 || len > 0)
  1334. *_desc = *d;
  1335. rem_data = mss;
  1336. _first_desc = NULL;
  1337. sg_desc_cnt = 0;
  1338. } else if (first_desc != d) /* update mid descriptor */
  1339. *_desc = *d;
  1340. }
  1341. }
  1342. /* first descriptor may also be the last.
  1343. * in this case d pointer is invalid
  1344. */
  1345. if (_first_desc == _desc)
  1346. d = first_desc;
  1347. /* Last data descriptor */
  1348. wil_set_tx_desc_last_tso(d);
  1349. *_desc = *d;
  1350. /* Fill the total number of descriptors in first desc (hdr)*/
  1351. wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
  1352. *_hdr_desc = *hdr_desc;
  1353. /* hold reference to skb
  1354. * to prevent skb release before accounting
  1355. * in case of immediate "tx done"
  1356. */
  1357. vring->ctx[i].skb = skb_get(skb);
  1358. /* performance monitoring */
  1359. used = wil_vring_used_tx(vring);
  1360. if (wil_val_in_range(vring_idle_trsh,
  1361. used, used + descs_used)) {
  1362. txdata->idle += get_cycles() - txdata->last_idle;
  1363. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1364. vring_index, used, used + descs_used);
  1365. }
  1366. /* Make sure to advance the head only after descriptor update is done.
  1367. * This will prevent a race condition where the completion thread
  1368. * will see the DU bit set from previous run and will handle the
  1369. * skb before it was completed.
  1370. */
  1371. wmb();
  1372. /* advance swhead */
  1373. wil_vring_advance_head(vring, descs_used);
  1374. wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
  1375. /* make sure all writes to descriptors (shared memory) are done before
  1376. * committing them to HW
  1377. */
  1378. wmb();
  1379. wil_w(wil, vring->hwtail, vring->swhead);
  1380. return 0;
  1381. mem_error:
  1382. while (descs_used > 0) {
  1383. struct wil_ctx *ctx;
  1384. i = (swhead + descs_used - 1) % vring->size;
  1385. d = (struct vring_tx_desc *)&vring->va[i].tx;
  1386. _desc = &vring->va[i].tx;
  1387. *d = *_desc;
  1388. _desc->dma.status = TX_DMA_STATUS_DU;
  1389. ctx = &vring->ctx[i];
  1390. wil_txdesc_unmap(dev, d, ctx);
  1391. memset(ctx, 0, sizeof(*ctx));
  1392. descs_used--;
  1393. }
  1394. err_exit:
  1395. return rc;
  1396. }
  1397. static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1398. struct sk_buff *skb)
  1399. {
  1400. struct device *dev = wil_to_dev(wil);
  1401. struct vring_tx_desc dd, *d = &dd;
  1402. volatile struct vring_tx_desc *_d;
  1403. u32 swhead = vring->swhead;
  1404. int avail = wil_vring_avail_tx(vring);
  1405. int nr_frags = skb_shinfo(skb)->nr_frags;
  1406. uint f = 0;
  1407. int vring_index = vring - wil->vring_tx;
  1408. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1409. uint i = swhead;
  1410. dma_addr_t pa;
  1411. int used;
  1412. bool mcast = (vring_index == wil->bcast_vring);
  1413. uint len = skb_headlen(skb);
  1414. wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
  1415. __func__, skb->len, vring_index);
  1416. if (unlikely(!txdata->enabled))
  1417. return -EINVAL;
  1418. if (unlikely(avail < 1 + nr_frags)) {
  1419. wil_err_ratelimited(wil,
  1420. "Tx ring[%2d] full. No space for %d fragments\n",
  1421. vring_index, 1 + nr_frags);
  1422. return -ENOMEM;
  1423. }
  1424. _d = &vring->va[i].tx;
  1425. pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
  1426. wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index,
  1427. skb_headlen(skb), skb->data, &pa);
  1428. wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
  1429. skb->data, skb_headlen(skb), false);
  1430. if (unlikely(dma_mapping_error(dev, pa)))
  1431. return -EINVAL;
  1432. vring->ctx[i].mapped_as = wil_mapped_as_single;
  1433. /* 1-st segment */
  1434. wil_tx_desc_map(d, pa, len, vring_index);
  1435. if (unlikely(mcast)) {
  1436. d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
  1437. if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
  1438. d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
  1439. }
  1440. /* Process TCP/UDP checksum offloading */
  1441. if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
  1442. wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
  1443. vring_index);
  1444. goto dma_error;
  1445. }
  1446. vring->ctx[i].nr_frags = nr_frags;
  1447. wil_tx_desc_set_nr_frags(d, nr_frags + 1);
  1448. /* middle segments */
  1449. for (; f < nr_frags; f++) {
  1450. const struct skb_frag_struct *frag =
  1451. &skb_shinfo(skb)->frags[f];
  1452. int len = skb_frag_size(frag);
  1453. *_d = *d;
  1454. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1455. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1456. (const void *)d, sizeof(*d), false);
  1457. i = (swhead + f + 1) % vring->size;
  1458. _d = &vring->va[i].tx;
  1459. pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
  1460. DMA_TO_DEVICE);
  1461. if (unlikely(dma_mapping_error(dev, pa))) {
  1462. wil_err(wil, "Tx[%2d] failed to map fragment\n",
  1463. vring_index);
  1464. goto dma_error;
  1465. }
  1466. vring->ctx[i].mapped_as = wil_mapped_as_page;
  1467. wil_tx_desc_map(d, pa, len, vring_index);
  1468. /* no need to check return code -
  1469. * if it succeeded for 1-st descriptor,
  1470. * it will succeed here too
  1471. */
  1472. wil_tx_desc_offload_setup(d, skb);
  1473. }
  1474. /* for the last seg only */
  1475. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
  1476. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
  1477. d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
  1478. *_d = *d;
  1479. wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i);
  1480. wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
  1481. (const void *)d, sizeof(*d), false);
  1482. /* hold reference to skb
  1483. * to prevent skb release before accounting
  1484. * in case of immediate "tx done"
  1485. */
  1486. vring->ctx[i].skb = skb_get(skb);
  1487. /* performance monitoring */
  1488. used = wil_vring_used_tx(vring);
  1489. if (wil_val_in_range(vring_idle_trsh,
  1490. used, used + nr_frags + 1)) {
  1491. txdata->idle += get_cycles() - txdata->last_idle;
  1492. wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
  1493. vring_index, used, used + nr_frags + 1);
  1494. }
  1495. /* Make sure to advance the head only after descriptor update is done.
  1496. * This will prevent a race condition where the completion thread
  1497. * will see the DU bit set from previous run and will handle the
  1498. * skb before it was completed.
  1499. */
  1500. wmb();
  1501. /* advance swhead */
  1502. wil_vring_advance_head(vring, nr_frags + 1);
  1503. wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
  1504. vring->swhead);
  1505. trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
  1506. /* make sure all writes to descriptors (shared memory) are done before
  1507. * committing them to HW
  1508. */
  1509. wmb();
  1510. wil_w(wil, vring->hwtail, vring->swhead);
  1511. return 0;
  1512. dma_error:
  1513. /* unmap what we have mapped */
  1514. nr_frags = f + 1; /* frags mapped + one for skb head */
  1515. for (f = 0; f < nr_frags; f++) {
  1516. struct wil_ctx *ctx;
  1517. i = (swhead + f) % vring->size;
  1518. ctx = &vring->ctx[i];
  1519. _d = &vring->va[i].tx;
  1520. *d = *_d;
  1521. _d->dma.status = TX_DMA_STATUS_DU;
  1522. wil_txdesc_unmap(dev, d, ctx);
  1523. memset(ctx, 0, sizeof(*ctx));
  1524. }
  1525. return -EINVAL;
  1526. }
  1527. static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
  1528. struct sk_buff *skb)
  1529. {
  1530. int vring_index = vring - wil->vring_tx;
  1531. struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
  1532. int rc;
  1533. spin_lock(&txdata->lock);
  1534. rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
  1535. (wil, vring, skb);
  1536. spin_unlock(&txdata->lock);
  1537. return rc;
  1538. }
  1539. /**
  1540. * Check status of tx vrings and stop/wake net queues if needed
  1541. *
  1542. * This function does one of two checks:
  1543. * In case check_stop is true, will check if net queues need to be stopped. If
  1544. * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
  1545. * In case check_stop is false, will check if net queues need to be waked. If
  1546. * the conditions for waking are met, netif_tx_wake_all_queues() is called.
  1547. * vring is the vring which is currently being modified by either adding
  1548. * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
  1549. * be null when irrelevant (e.g. connect/disconnect events).
  1550. *
  1551. * The implementation is to stop net queues if modified vring has low
  1552. * descriptor availability. Wake if all vrings are not in low descriptor
  1553. * availability and modified vring has high descriptor availability.
  1554. */
  1555. static inline void __wil_update_net_queues(struct wil6210_priv *wil,
  1556. struct vring *vring,
  1557. bool check_stop)
  1558. {
  1559. int i;
  1560. if (vring)
  1561. wil_dbg_txrx(wil, "vring %d, check_stop=%d, stopped=%d",
  1562. (int)(vring - wil->vring_tx), check_stop,
  1563. wil->net_queue_stopped);
  1564. else
  1565. wil_dbg_txrx(wil, "check_stop=%d, stopped=%d",
  1566. check_stop, wil->net_queue_stopped);
  1567. if (check_stop == wil->net_queue_stopped)
  1568. /* net queues already in desired state */
  1569. return;
  1570. if (check_stop) {
  1571. if (!vring || unlikely(wil_vring_avail_low(vring))) {
  1572. /* not enough room in the vring */
  1573. netif_tx_stop_all_queues(wil_to_ndev(wil));
  1574. wil->net_queue_stopped = true;
  1575. wil_dbg_txrx(wil, "netif_tx_stop called\n");
  1576. }
  1577. return;
  1578. }
  1579. /* check wake */
  1580. for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
  1581. struct vring *cur_vring = &wil->vring_tx[i];
  1582. struct vring_tx_data *txdata = &wil->vring_tx_data[i];
  1583. if (!cur_vring->va || !txdata->enabled || cur_vring == vring)
  1584. continue;
  1585. if (wil_vring_avail_low(cur_vring)) {
  1586. wil_dbg_txrx(wil, "vring %d full, can't wake\n",
  1587. (int)(cur_vring - wil->vring_tx));
  1588. return;
  1589. }
  1590. }
  1591. if (!vring || wil_vring_avail_high(vring)) {
  1592. /* enough room in the vring */
  1593. wil_dbg_txrx(wil, "calling netif_tx_wake\n");
  1594. netif_tx_wake_all_queues(wil_to_ndev(wil));
  1595. wil->net_queue_stopped = false;
  1596. }
  1597. }
  1598. void wil_update_net_queues(struct wil6210_priv *wil, struct vring *vring,
  1599. bool check_stop)
  1600. {
  1601. spin_lock(&wil->net_queue_lock);
  1602. __wil_update_net_queues(wil, vring, check_stop);
  1603. spin_unlock(&wil->net_queue_lock);
  1604. }
  1605. void wil_update_net_queues_bh(struct wil6210_priv *wil, struct vring *vring,
  1606. bool check_stop)
  1607. {
  1608. spin_lock_bh(&wil->net_queue_lock);
  1609. __wil_update_net_queues(wil, vring, check_stop);
  1610. spin_unlock_bh(&wil->net_queue_lock);
  1611. }
  1612. netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  1613. {
  1614. struct wil6210_priv *wil = ndev_to_wil(ndev);
  1615. struct ethhdr *eth = (void *)skb->data;
  1616. bool bcast = is_multicast_ether_addr(eth->h_dest);
  1617. struct vring *vring;
  1618. static bool pr_once_fw;
  1619. int rc;
  1620. wil_dbg_txrx(wil, "%s()\n", __func__);
  1621. if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
  1622. if (!pr_once_fw) {
  1623. wil_err(wil, "FW not ready\n");
  1624. pr_once_fw = true;
  1625. }
  1626. goto drop;
  1627. }
  1628. if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
  1629. wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
  1630. goto drop;
  1631. }
  1632. if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
  1633. wil_err(wil, "Xmit in monitor mode not supported\n");
  1634. goto drop;
  1635. }
  1636. pr_once_fw = false;
  1637. /* find vring */
  1638. if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
  1639. /* in STA mode (ESS), all to same VRING */
  1640. vring = wil_find_tx_vring_sta(wil, skb);
  1641. } else { /* direct communication, find matching VRING */
  1642. vring = bcast ? wil_find_tx_bcast(wil, skb) :
  1643. wil_find_tx_ucast(wil, skb);
  1644. }
  1645. if (unlikely(!vring)) {
  1646. wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
  1647. goto drop;
  1648. }
  1649. /* set up vring entry */
  1650. rc = wil_tx_vring(wil, vring, skb);
  1651. switch (rc) {
  1652. case 0:
  1653. /* shall we stop net queues? */
  1654. wil_update_net_queues_bh(wil, vring, true);
  1655. /* statistics will be updated on the tx_complete */
  1656. dev_kfree_skb_any(skb);
  1657. return NETDEV_TX_OK;
  1658. case -ENOMEM:
  1659. return NETDEV_TX_BUSY;
  1660. default:
  1661. break; /* goto drop; */
  1662. }
  1663. drop:
  1664. ndev->stats.tx_dropped++;
  1665. dev_kfree_skb_any(skb);
  1666. return NET_XMIT_DROP;
  1667. }
  1668. static inline bool wil_need_txstat(struct sk_buff *skb)
  1669. {
  1670. struct ethhdr *eth = (void *)skb->data;
  1671. return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
  1672. (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
  1673. }
  1674. static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
  1675. {
  1676. if (unlikely(wil_need_txstat(skb)))
  1677. skb_complete_wifi_ack(skb, acked);
  1678. else
  1679. acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
  1680. }
  1681. /**
  1682. * Clean up transmitted skb's from the Tx VRING
  1683. *
  1684. * Return number of descriptors cleared
  1685. *
  1686. * Safe to call from IRQ
  1687. */
  1688. int wil_tx_complete(struct wil6210_priv *wil, int ringid)
  1689. {
  1690. struct net_device *ndev = wil_to_ndev(wil);
  1691. struct device *dev = wil_to_dev(wil);
  1692. struct vring *vring = &wil->vring_tx[ringid];
  1693. struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
  1694. int done = 0;
  1695. int cid = wil->vring2cid_tid[ringid][0];
  1696. struct wil_net_stats *stats = NULL;
  1697. volatile struct vring_tx_desc *_d;
  1698. int used_before_complete;
  1699. int used_new;
  1700. if (unlikely(!vring->va)) {
  1701. wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
  1702. return 0;
  1703. }
  1704. if (unlikely(!txdata->enabled)) {
  1705. wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
  1706. return 0;
  1707. }
  1708. wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
  1709. used_before_complete = wil_vring_used_tx(vring);
  1710. if (cid < WIL6210_MAX_CID)
  1711. stats = &wil->sta[cid].stats;
  1712. while (!wil_vring_is_empty(vring)) {
  1713. int new_swtail;
  1714. struct wil_ctx *ctx = &vring->ctx[vring->swtail];
  1715. /**
  1716. * For the fragmented skb, HW will set DU bit only for the
  1717. * last fragment. look for it.
  1718. * In TSO the first DU will include hdr desc
  1719. */
  1720. int lf = (vring->swtail + ctx->nr_frags) % vring->size;
  1721. /* TODO: check we are not past head */
  1722. _d = &vring->va[lf].tx;
  1723. if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
  1724. break;
  1725. new_swtail = (lf + 1) % vring->size;
  1726. while (vring->swtail != new_swtail) {
  1727. struct vring_tx_desc dd, *d = &dd;
  1728. u16 dmalen;
  1729. struct sk_buff *skb;
  1730. ctx = &vring->ctx[vring->swtail];
  1731. skb = ctx->skb;
  1732. _d = &vring->va[vring->swtail].tx;
  1733. *d = *_d;
  1734. dmalen = le16_to_cpu(d->dma.length);
  1735. trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
  1736. d->dma.error);
  1737. wil_dbg_txrx(wil,
  1738. "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
  1739. ringid, vring->swtail, dmalen,
  1740. d->dma.status, d->dma.error);
  1741. wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
  1742. (const void *)d, sizeof(*d), false);
  1743. wil_txdesc_unmap(dev, d, ctx);
  1744. if (skb) {
  1745. if (likely(d->dma.error == 0)) {
  1746. ndev->stats.tx_packets++;
  1747. ndev->stats.tx_bytes += skb->len;
  1748. if (stats) {
  1749. stats->tx_packets++;
  1750. stats->tx_bytes += skb->len;
  1751. }
  1752. } else {
  1753. ndev->stats.tx_errors++;
  1754. if (stats)
  1755. stats->tx_errors++;
  1756. }
  1757. wil_consume_skb(skb, d->dma.error == 0);
  1758. }
  1759. memset(ctx, 0, sizeof(*ctx));
  1760. /* Make sure the ctx is zeroed before updating the tail
  1761. * to prevent a case where wil_tx_vring will see
  1762. * this descriptor as used and handle it before ctx zero
  1763. * is completed.
  1764. */
  1765. wmb();
  1766. /* There is no need to touch HW descriptor:
  1767. * - ststus bit TX_DMA_STATUS_DU is set by design,
  1768. * so hardware will not try to process this desc.,
  1769. * - rest of descriptor will be initialized on Tx.
  1770. */
  1771. vring->swtail = wil_vring_next_tail(vring);
  1772. done++;
  1773. }
  1774. }
  1775. /* performance monitoring */
  1776. used_new = wil_vring_used_tx(vring);
  1777. if (wil_val_in_range(vring_idle_trsh,
  1778. used_new, used_before_complete)) {
  1779. wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
  1780. ringid, used_before_complete, used_new);
  1781. txdata->last_idle = get_cycles();
  1782. }
  1783. /* shall we wake net queues? */
  1784. if (done)
  1785. wil_update_net_queues(wil, vring, false);
  1786. return done;
  1787. }