i40e_txrx.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2014 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include <linux/prefetch.h>
  27. #include "i40e.h"
  28. #include "i40e_prototype.h"
  29. static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
  30. u32 td_tag)
  31. {
  32. return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
  33. ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
  34. ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
  35. ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
  36. ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
  37. }
  38. #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
  39. #define I40E_FD_CLEAN_DELAY 10
  40. /**
  41. * i40e_program_fdir_filter - Program a Flow Director filter
  42. * @fdir_data: Packet data that will be filter parameters
  43. * @raw_packet: the pre-allocated packet buffer for FDir
  44. * @pf: The pf pointer
  45. * @add: True for add/update, False for remove
  46. **/
  47. int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
  48. struct i40e_pf *pf, bool add)
  49. {
  50. struct i40e_filter_program_desc *fdir_desc;
  51. struct i40e_tx_buffer *tx_buf, *first;
  52. struct i40e_tx_desc *tx_desc;
  53. struct i40e_ring *tx_ring;
  54. unsigned int fpt, dcc;
  55. struct i40e_vsi *vsi;
  56. struct device *dev;
  57. dma_addr_t dma;
  58. u32 td_cmd = 0;
  59. u16 delay = 0;
  60. u16 i;
  61. /* find existing FDIR VSI */
  62. vsi = NULL;
  63. for (i = 0; i < pf->num_alloc_vsi; i++)
  64. if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
  65. vsi = pf->vsi[i];
  66. if (!vsi)
  67. return -ENOENT;
  68. tx_ring = vsi->tx_rings[0];
  69. dev = tx_ring->dev;
  70. /* we need two descriptors to add/del a filter and we can wait */
  71. do {
  72. if (I40E_DESC_UNUSED(tx_ring) > 1)
  73. break;
  74. msleep_interruptible(1);
  75. delay++;
  76. } while (delay < I40E_FD_CLEAN_DELAY);
  77. if (!(I40E_DESC_UNUSED(tx_ring) > 1))
  78. return -EAGAIN;
  79. dma = dma_map_single(dev, raw_packet,
  80. I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
  81. if (dma_mapping_error(dev, dma))
  82. goto dma_fail;
  83. /* grab the next descriptor */
  84. i = tx_ring->next_to_use;
  85. fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
  86. first = &tx_ring->tx_bi[i];
  87. memset(first, 0, sizeof(struct i40e_tx_buffer));
  88. tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
  89. fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
  90. I40E_TXD_FLTR_QW0_QINDEX_MASK;
  91. fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
  92. I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
  93. fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
  94. I40E_TXD_FLTR_QW0_PCTYPE_MASK;
  95. /* Use LAN VSI Id if not programmed by user */
  96. if (fdir_data->dest_vsi == 0)
  97. fpt |= (pf->vsi[pf->lan_vsi]->id) <<
  98. I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
  99. else
  100. fpt |= ((u32)fdir_data->dest_vsi <<
  101. I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
  102. I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
  103. dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
  104. if (add)
  105. dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
  106. I40E_TXD_FLTR_QW1_PCMD_SHIFT;
  107. else
  108. dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
  109. I40E_TXD_FLTR_QW1_PCMD_SHIFT;
  110. dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
  111. I40E_TXD_FLTR_QW1_DEST_MASK;
  112. dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
  113. I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
  114. if (fdir_data->cnt_index != 0) {
  115. dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
  116. dcc |= ((u32)fdir_data->cnt_index <<
  117. I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
  118. I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
  119. }
  120. fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
  121. fdir_desc->rsvd = cpu_to_le32(0);
  122. fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
  123. fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
  124. /* Now program a dummy descriptor */
  125. i = tx_ring->next_to_use;
  126. tx_desc = I40E_TX_DESC(tx_ring, i);
  127. tx_buf = &tx_ring->tx_bi[i];
  128. tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
  129. memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
  130. /* record length, and DMA address */
  131. dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
  132. dma_unmap_addr_set(tx_buf, dma, dma);
  133. tx_desc->buffer_addr = cpu_to_le64(dma);
  134. td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
  135. tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
  136. tx_buf->raw_buf = (void *)raw_packet;
  137. tx_desc->cmd_type_offset_bsz =
  138. build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
  139. /* set the timestamp */
  140. tx_buf->time_stamp = jiffies;
  141. /* Force memory writes to complete before letting h/w
  142. * know there are new descriptors to fetch.
  143. */
  144. wmb();
  145. /* Mark the data descriptor to be watched */
  146. first->next_to_watch = tx_desc;
  147. writel(tx_ring->next_to_use, tx_ring->tail);
  148. return 0;
  149. dma_fail:
  150. return -1;
  151. }
  152. #define IP_HEADER_OFFSET 14
  153. #define I40E_UDPIP_DUMMY_PACKET_LEN 42
  154. /**
  155. * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  156. * @vsi: pointer to the targeted VSI
  157. * @fd_data: the flow director data required for the FDir descriptor
  158. * @add: true adds a filter, false removes it
  159. *
  160. * Returns 0 if the filters were successfully added or removed
  161. **/
  162. static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
  163. struct i40e_fdir_filter *fd_data,
  164. bool add)
  165. {
  166. struct i40e_pf *pf = vsi->back;
  167. struct udphdr *udp;
  168. struct iphdr *ip;
  169. bool err = false;
  170. u8 *raw_packet;
  171. int ret;
  172. static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  173. 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
  174. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  175. raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
  176. if (!raw_packet)
  177. return -ENOMEM;
  178. memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
  179. ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
  180. udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
  181. + sizeof(struct iphdr));
  182. ip->daddr = fd_data->dst_ip[0];
  183. udp->dest = fd_data->dst_port;
  184. ip->saddr = fd_data->src_ip[0];
  185. udp->source = fd_data->src_port;
  186. fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
  187. ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
  188. if (ret) {
  189. dev_info(&pf->pdev->dev,
  190. "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
  191. fd_data->pctype, fd_data->fd_id, ret);
  192. err = true;
  193. } else {
  194. if (add)
  195. dev_info(&pf->pdev->dev,
  196. "Filter OK for PCTYPE %d loc = %d\n",
  197. fd_data->pctype, fd_data->fd_id);
  198. else
  199. dev_info(&pf->pdev->dev,
  200. "Filter deleted for PCTYPE %d loc = %d\n",
  201. fd_data->pctype, fd_data->fd_id);
  202. }
  203. return err ? -EOPNOTSUPP : 0;
  204. }
  205. #define I40E_TCPIP_DUMMY_PACKET_LEN 54
  206. /**
  207. * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  208. * @vsi: pointer to the targeted VSI
  209. * @fd_data: the flow director data required for the FDir descriptor
  210. * @add: true adds a filter, false removes it
  211. *
  212. * Returns 0 if the filters were successfully added or removed
  213. **/
  214. static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
  215. struct i40e_fdir_filter *fd_data,
  216. bool add)
  217. {
  218. struct i40e_pf *pf = vsi->back;
  219. struct tcphdr *tcp;
  220. struct iphdr *ip;
  221. bool err = false;
  222. u8 *raw_packet;
  223. int ret;
  224. /* Dummy packet */
  225. static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  226. 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
  227. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
  228. 0x0, 0x72, 0, 0, 0, 0};
  229. raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
  230. if (!raw_packet)
  231. return -ENOMEM;
  232. memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
  233. ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
  234. tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
  235. + sizeof(struct iphdr));
  236. ip->daddr = fd_data->dst_ip[0];
  237. tcp->dest = fd_data->dst_port;
  238. ip->saddr = fd_data->src_ip[0];
  239. tcp->source = fd_data->src_port;
  240. if (add) {
  241. pf->fd_tcp_rule++;
  242. if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
  243. dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
  244. pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
  245. }
  246. } else {
  247. pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
  248. (pf->fd_tcp_rule - 1) : 0;
  249. if (pf->fd_tcp_rule == 0) {
  250. pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
  251. dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
  252. }
  253. }
  254. fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
  255. ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
  256. if (ret) {
  257. dev_info(&pf->pdev->dev,
  258. "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
  259. fd_data->pctype, fd_data->fd_id, ret);
  260. err = true;
  261. } else {
  262. if (add)
  263. dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
  264. fd_data->pctype, fd_data->fd_id);
  265. else
  266. dev_info(&pf->pdev->dev,
  267. "Filter deleted for PCTYPE %d loc = %d\n",
  268. fd_data->pctype, fd_data->fd_id);
  269. }
  270. return err ? -EOPNOTSUPP : 0;
  271. }
  272. /**
  273. * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
  274. * a specific flow spec
  275. * @vsi: pointer to the targeted VSI
  276. * @fd_data: the flow director data required for the FDir descriptor
  277. * @add: true adds a filter, false removes it
  278. *
  279. * Always returns -EOPNOTSUPP
  280. **/
  281. static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
  282. struct i40e_fdir_filter *fd_data,
  283. bool add)
  284. {
  285. return -EOPNOTSUPP;
  286. }
  287. #define I40E_IP_DUMMY_PACKET_LEN 34
  288. /**
  289. * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  290. * a specific flow spec
  291. * @vsi: pointer to the targeted VSI
  292. * @fd_data: the flow director data required for the FDir descriptor
  293. * @add: true adds a filter, false removes it
  294. *
  295. * Returns 0 if the filters were successfully added or removed
  296. **/
  297. static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
  298. struct i40e_fdir_filter *fd_data,
  299. bool add)
  300. {
  301. struct i40e_pf *pf = vsi->back;
  302. struct iphdr *ip;
  303. bool err = false;
  304. u8 *raw_packet;
  305. int ret;
  306. int i;
  307. static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  308. 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
  309. 0, 0, 0, 0};
  310. for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
  311. i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
  312. raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
  313. if (!raw_packet)
  314. return -ENOMEM;
  315. memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
  316. ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
  317. ip->saddr = fd_data->src_ip[0];
  318. ip->daddr = fd_data->dst_ip[0];
  319. ip->protocol = 0;
  320. fd_data->pctype = i;
  321. ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
  322. if (ret) {
  323. dev_info(&pf->pdev->dev,
  324. "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
  325. fd_data->pctype, fd_data->fd_id, ret);
  326. err = true;
  327. } else {
  328. if (add)
  329. dev_info(&pf->pdev->dev,
  330. "Filter OK for PCTYPE %d loc = %d\n",
  331. fd_data->pctype, fd_data->fd_id);
  332. else
  333. dev_info(&pf->pdev->dev,
  334. "Filter deleted for PCTYPE %d loc = %d\n",
  335. fd_data->pctype, fd_data->fd_id);
  336. }
  337. }
  338. return err ? -EOPNOTSUPP : 0;
  339. }
  340. /**
  341. * i40e_add_del_fdir - Build raw packets to add/del fdir filter
  342. * @vsi: pointer to the targeted VSI
  343. * @cmd: command to get or set RX flow classification rules
  344. * @add: true adds a filter, false removes it
  345. *
  346. **/
  347. int i40e_add_del_fdir(struct i40e_vsi *vsi,
  348. struct i40e_fdir_filter *input, bool add)
  349. {
  350. struct i40e_pf *pf = vsi->back;
  351. int ret;
  352. switch (input->flow_type & ~FLOW_EXT) {
  353. case TCP_V4_FLOW:
  354. ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
  355. break;
  356. case UDP_V4_FLOW:
  357. ret = i40e_add_del_fdir_udpv4(vsi, input, add);
  358. break;
  359. case SCTP_V4_FLOW:
  360. ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
  361. break;
  362. case IPV4_FLOW:
  363. ret = i40e_add_del_fdir_ipv4(vsi, input, add);
  364. break;
  365. case IP_USER_FLOW:
  366. switch (input->ip4_proto) {
  367. case IPPROTO_TCP:
  368. ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
  369. break;
  370. case IPPROTO_UDP:
  371. ret = i40e_add_del_fdir_udpv4(vsi, input, add);
  372. break;
  373. case IPPROTO_SCTP:
  374. ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
  375. break;
  376. default:
  377. ret = i40e_add_del_fdir_ipv4(vsi, input, add);
  378. break;
  379. }
  380. break;
  381. default:
  382. dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
  383. input->flow_type);
  384. ret = -EINVAL;
  385. }
  386. /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
  387. return ret;
  388. }
  389. /**
  390. * i40e_fd_handle_status - check the Programming Status for FD
  391. * @rx_ring: the Rx ring for this descriptor
  392. * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
  393. * @prog_id: the id originally used for programming
  394. *
  395. * This is used to verify if the FD programming or invalidation
  396. * requested by SW to the HW is successful or not and take actions accordingly.
  397. **/
  398. static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
  399. union i40e_rx_desc *rx_desc, u8 prog_id)
  400. {
  401. struct i40e_pf *pf = rx_ring->vsi->back;
  402. struct pci_dev *pdev = pf->pdev;
  403. u32 fcnt_prog, fcnt_avail;
  404. u32 error;
  405. u64 qw;
  406. qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  407. error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
  408. I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
  409. if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
  410. if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
  411. (I40E_DEBUG_FD & pf->hw.debug_mask))
  412. dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
  413. rx_desc->wb.qword0.hi_dword.fd_id);
  414. pf->fd_add_err++;
  415. /* store the current atr filter count */
  416. pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
  417. /* filter programming failed most likely due to table full */
  418. fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
  419. fcnt_avail = pf->fdir_pf_filter_count;
  420. /* If ATR is running fcnt_prog can quickly change,
  421. * if we are very close to full, it makes sense to disable
  422. * FD ATR/SB and then re-enable it when there is room.
  423. */
  424. if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
  425. if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
  426. !(pf->auto_disable_flags &
  427. I40E_FLAG_FD_SB_ENABLED)) {
  428. dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
  429. pf->auto_disable_flags |=
  430. I40E_FLAG_FD_SB_ENABLED;
  431. }
  432. } else {
  433. dev_info(&pdev->dev,
  434. "FD filter programming failed due to incorrect filter parameters\n");
  435. }
  436. } else if (error ==
  437. (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
  438. if (I40E_DEBUG_FD & pf->hw.debug_mask)
  439. dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
  440. rx_desc->wb.qword0.hi_dword.fd_id);
  441. }
  442. }
  443. /**
  444. * i40e_unmap_and_free_tx_resource - Release a Tx buffer
  445. * @ring: the ring that owns the buffer
  446. * @tx_buffer: the buffer to free
  447. **/
  448. static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
  449. struct i40e_tx_buffer *tx_buffer)
  450. {
  451. if (tx_buffer->skb) {
  452. if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
  453. kfree(tx_buffer->raw_buf);
  454. else
  455. dev_kfree_skb_any(tx_buffer->skb);
  456. if (dma_unmap_len(tx_buffer, len))
  457. dma_unmap_single(ring->dev,
  458. dma_unmap_addr(tx_buffer, dma),
  459. dma_unmap_len(tx_buffer, len),
  460. DMA_TO_DEVICE);
  461. } else if (dma_unmap_len(tx_buffer, len)) {
  462. dma_unmap_page(ring->dev,
  463. dma_unmap_addr(tx_buffer, dma),
  464. dma_unmap_len(tx_buffer, len),
  465. DMA_TO_DEVICE);
  466. }
  467. tx_buffer->next_to_watch = NULL;
  468. tx_buffer->skb = NULL;
  469. dma_unmap_len_set(tx_buffer, len, 0);
  470. /* tx_buffer must be completely set up in the transmit path */
  471. }
  472. /**
  473. * i40e_clean_tx_ring - Free any empty Tx buffers
  474. * @tx_ring: ring to be cleaned
  475. **/
  476. void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
  477. {
  478. unsigned long bi_size;
  479. u16 i;
  480. /* ring already cleared, nothing to do */
  481. if (!tx_ring->tx_bi)
  482. return;
  483. /* Free all the Tx ring sk_buffs */
  484. for (i = 0; i < tx_ring->count; i++)
  485. i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
  486. bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
  487. memset(tx_ring->tx_bi, 0, bi_size);
  488. /* Zero out the descriptor ring */
  489. memset(tx_ring->desc, 0, tx_ring->size);
  490. tx_ring->next_to_use = 0;
  491. tx_ring->next_to_clean = 0;
  492. if (!tx_ring->netdev)
  493. return;
  494. /* cleanup Tx queue statistics */
  495. netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
  496. tx_ring->queue_index));
  497. }
  498. /**
  499. * i40e_free_tx_resources - Free Tx resources per queue
  500. * @tx_ring: Tx descriptor ring for a specific queue
  501. *
  502. * Free all transmit software resources
  503. **/
  504. void i40e_free_tx_resources(struct i40e_ring *tx_ring)
  505. {
  506. i40e_clean_tx_ring(tx_ring);
  507. kfree(tx_ring->tx_bi);
  508. tx_ring->tx_bi = NULL;
  509. if (tx_ring->desc) {
  510. dma_free_coherent(tx_ring->dev, tx_ring->size,
  511. tx_ring->desc, tx_ring->dma);
  512. tx_ring->desc = NULL;
  513. }
  514. }
  515. /**
  516. * i40e_get_tx_pending - how many tx descriptors not processed
  517. * @tx_ring: the ring of descriptors
  518. *
  519. * Since there is no access to the ring head register
  520. * in XL710, we need to use our local copies
  521. **/
  522. static u32 i40e_get_tx_pending(struct i40e_ring *ring)
  523. {
  524. u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
  525. ? ring->next_to_use
  526. : ring->next_to_use + ring->count);
  527. return ntu - ring->next_to_clean;
  528. }
  529. /**
  530. * i40e_check_tx_hang - Is there a hang in the Tx queue
  531. * @tx_ring: the ring of descriptors
  532. **/
  533. static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
  534. {
  535. u32 tx_pending = i40e_get_tx_pending(tx_ring);
  536. struct i40e_pf *pf = tx_ring->vsi->back;
  537. bool ret = false;
  538. clear_check_for_tx_hang(tx_ring);
  539. /* Check for a hung queue, but be thorough. This verifies
  540. * that a transmit has been completed since the previous
  541. * check AND there is at least one packet pending. The
  542. * ARMED bit is set to indicate a potential hang. The
  543. * bit is cleared if a pause frame is received to remove
  544. * false hang detection due to PFC or 802.3x frames. By
  545. * requiring this to fail twice we avoid races with
  546. * PFC clearing the ARMED bit and conditions where we
  547. * run the check_tx_hang logic with a transmit completion
  548. * pending but without time to complete it yet.
  549. */
  550. if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
  551. (tx_pending >= I40E_MIN_DESC_PENDING)) {
  552. /* make sure it is true for two checks in a row */
  553. ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
  554. &tx_ring->state);
  555. } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
  556. (tx_pending < I40E_MIN_DESC_PENDING) &&
  557. (tx_pending > 0)) {
  558. if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
  559. dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
  560. tx_pending, tx_ring->queue_index);
  561. pf->tx_sluggish_count++;
  562. } else {
  563. /* update completed stats and disarm the hang check */
  564. tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
  565. clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
  566. }
  567. return ret;
  568. }
  569. /**
  570. * i40e_get_head - Retrieve head from head writeback
  571. * @tx_ring: tx ring to fetch head of
  572. *
  573. * Returns value of Tx ring head based on value stored
  574. * in head write-back location
  575. **/
  576. static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
  577. {
  578. void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
  579. return le32_to_cpu(*(volatile __le32 *)head);
  580. }
  581. /**
  582. * i40e_clean_tx_irq - Reclaim resources after transmit completes
  583. * @tx_ring: tx ring to clean
  584. * @budget: how many cleans we're allowed
  585. *
  586. * Returns true if there's any budget left (e.g. the clean is finished)
  587. **/
  588. static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
  589. {
  590. u16 i = tx_ring->next_to_clean;
  591. struct i40e_tx_buffer *tx_buf;
  592. struct i40e_tx_desc *tx_head;
  593. struct i40e_tx_desc *tx_desc;
  594. unsigned int total_packets = 0;
  595. unsigned int total_bytes = 0;
  596. tx_buf = &tx_ring->tx_bi[i];
  597. tx_desc = I40E_TX_DESC(tx_ring, i);
  598. i -= tx_ring->count;
  599. tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
  600. do {
  601. struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
  602. /* if next_to_watch is not set then there is no work pending */
  603. if (!eop_desc)
  604. break;
  605. /* prevent any other reads prior to eop_desc */
  606. read_barrier_depends();
  607. /* we have caught up to head, no work left to do */
  608. if (tx_head == tx_desc)
  609. break;
  610. /* clear next_to_watch to prevent false hangs */
  611. tx_buf->next_to_watch = NULL;
  612. /* update the statistics for this packet */
  613. total_bytes += tx_buf->bytecount;
  614. total_packets += tx_buf->gso_segs;
  615. /* free the skb */
  616. dev_consume_skb_any(tx_buf->skb);
  617. /* unmap skb header data */
  618. dma_unmap_single(tx_ring->dev,
  619. dma_unmap_addr(tx_buf, dma),
  620. dma_unmap_len(tx_buf, len),
  621. DMA_TO_DEVICE);
  622. /* clear tx_buffer data */
  623. tx_buf->skb = NULL;
  624. dma_unmap_len_set(tx_buf, len, 0);
  625. /* unmap remaining buffers */
  626. while (tx_desc != eop_desc) {
  627. tx_buf++;
  628. tx_desc++;
  629. i++;
  630. if (unlikely(!i)) {
  631. i -= tx_ring->count;
  632. tx_buf = tx_ring->tx_bi;
  633. tx_desc = I40E_TX_DESC(tx_ring, 0);
  634. }
  635. /* unmap any remaining paged data */
  636. if (dma_unmap_len(tx_buf, len)) {
  637. dma_unmap_page(tx_ring->dev,
  638. dma_unmap_addr(tx_buf, dma),
  639. dma_unmap_len(tx_buf, len),
  640. DMA_TO_DEVICE);
  641. dma_unmap_len_set(tx_buf, len, 0);
  642. }
  643. }
  644. /* move us one more past the eop_desc for start of next pkt */
  645. tx_buf++;
  646. tx_desc++;
  647. i++;
  648. if (unlikely(!i)) {
  649. i -= tx_ring->count;
  650. tx_buf = tx_ring->tx_bi;
  651. tx_desc = I40E_TX_DESC(tx_ring, 0);
  652. }
  653. /* update budget accounting */
  654. budget--;
  655. } while (likely(budget));
  656. i += tx_ring->count;
  657. tx_ring->next_to_clean = i;
  658. u64_stats_update_begin(&tx_ring->syncp);
  659. tx_ring->stats.bytes += total_bytes;
  660. tx_ring->stats.packets += total_packets;
  661. u64_stats_update_end(&tx_ring->syncp);
  662. tx_ring->q_vector->tx.total_bytes += total_bytes;
  663. tx_ring->q_vector->tx.total_packets += total_packets;
  664. if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
  665. /* schedule immediate reset if we believe we hung */
  666. dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
  667. " VSI <%d>\n"
  668. " Tx Queue <%d>\n"
  669. " next_to_use <%x>\n"
  670. " next_to_clean <%x>\n",
  671. tx_ring->vsi->seid,
  672. tx_ring->queue_index,
  673. tx_ring->next_to_use, i);
  674. dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
  675. " time_stamp <%lx>\n"
  676. " jiffies <%lx>\n",
  677. tx_ring->tx_bi[i].time_stamp, jiffies);
  678. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  679. dev_info(tx_ring->dev,
  680. "tx hang detected on queue %d, resetting adapter\n",
  681. tx_ring->queue_index);
  682. tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
  683. /* the adapter is about to reset, no point in enabling stuff */
  684. return true;
  685. }
  686. netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
  687. tx_ring->queue_index),
  688. total_packets, total_bytes);
  689. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  690. if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
  691. (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
  692. /* Make sure that anybody stopping the queue after this
  693. * sees the new next_to_clean.
  694. */
  695. smp_mb();
  696. if (__netif_subqueue_stopped(tx_ring->netdev,
  697. tx_ring->queue_index) &&
  698. !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
  699. netif_wake_subqueue(tx_ring->netdev,
  700. tx_ring->queue_index);
  701. ++tx_ring->tx_stats.restart_queue;
  702. }
  703. }
  704. return budget > 0;
  705. }
  706. /**
  707. * i40e_set_new_dynamic_itr - Find new ITR level
  708. * @rc: structure containing ring performance data
  709. *
  710. * Stores a new ITR value based on packets and byte counts during
  711. * the last interrupt. The advantage of per interrupt computation
  712. * is faster updates and more accurate ITR for the current traffic
  713. * pattern. Constants in this function were computed based on
  714. * theoretical maximum wire speed and thresholds were set based on
  715. * testing data as well as attempting to minimize response time
  716. * while increasing bulk throughput.
  717. **/
  718. static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
  719. {
  720. enum i40e_latency_range new_latency_range = rc->latency_range;
  721. u32 new_itr = rc->itr;
  722. int bytes_per_int;
  723. if (rc->total_packets == 0 || !rc->itr)
  724. return;
  725. /* simple throttlerate management
  726. * 0-10MB/s lowest (100000 ints/s)
  727. * 10-20MB/s low (20000 ints/s)
  728. * 20-1249MB/s bulk (8000 ints/s)
  729. */
  730. bytes_per_int = rc->total_bytes / rc->itr;
  731. switch (rc->itr) {
  732. case I40E_LOWEST_LATENCY:
  733. if (bytes_per_int > 10)
  734. new_latency_range = I40E_LOW_LATENCY;
  735. break;
  736. case I40E_LOW_LATENCY:
  737. if (bytes_per_int > 20)
  738. new_latency_range = I40E_BULK_LATENCY;
  739. else if (bytes_per_int <= 10)
  740. new_latency_range = I40E_LOWEST_LATENCY;
  741. break;
  742. case I40E_BULK_LATENCY:
  743. if (bytes_per_int <= 20)
  744. rc->latency_range = I40E_LOW_LATENCY;
  745. break;
  746. }
  747. switch (new_latency_range) {
  748. case I40E_LOWEST_LATENCY:
  749. new_itr = I40E_ITR_100K;
  750. break;
  751. case I40E_LOW_LATENCY:
  752. new_itr = I40E_ITR_20K;
  753. break;
  754. case I40E_BULK_LATENCY:
  755. new_itr = I40E_ITR_8K;
  756. break;
  757. default:
  758. break;
  759. }
  760. if (new_itr != rc->itr) {
  761. /* do an exponential smoothing */
  762. new_itr = (10 * new_itr * rc->itr) /
  763. ((9 * new_itr) + rc->itr);
  764. rc->itr = new_itr & I40E_MAX_ITR;
  765. }
  766. rc->total_bytes = 0;
  767. rc->total_packets = 0;
  768. }
  769. /**
  770. * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
  771. * @q_vector: the vector to adjust
  772. **/
  773. static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
  774. {
  775. u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
  776. struct i40e_hw *hw = &q_vector->vsi->back->hw;
  777. u32 reg_addr;
  778. u16 old_itr;
  779. reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
  780. old_itr = q_vector->rx.itr;
  781. i40e_set_new_dynamic_itr(&q_vector->rx);
  782. if (old_itr != q_vector->rx.itr)
  783. wr32(hw, reg_addr, q_vector->rx.itr);
  784. reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
  785. old_itr = q_vector->tx.itr;
  786. i40e_set_new_dynamic_itr(&q_vector->tx);
  787. if (old_itr != q_vector->tx.itr)
  788. wr32(hw, reg_addr, q_vector->tx.itr);
  789. }
  790. /**
  791. * i40e_clean_programming_status - clean the programming status descriptor
  792. * @rx_ring: the rx ring that has this descriptor
  793. * @rx_desc: the rx descriptor written back by HW
  794. *
  795. * Flow director should handle FD_FILTER_STATUS to check its filter programming
  796. * status being successful or not and take actions accordingly. FCoE should
  797. * handle its context/filter programming/invalidation status and take actions.
  798. *
  799. **/
  800. static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
  801. union i40e_rx_desc *rx_desc)
  802. {
  803. u64 qw;
  804. u8 id;
  805. qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  806. id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
  807. I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
  808. if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
  809. i40e_fd_handle_status(rx_ring, rx_desc, id);
  810. #ifdef I40E_FCOE
  811. else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
  812. (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
  813. i40e_fcoe_handle_status(rx_ring, rx_desc, id);
  814. #endif
  815. }
  816. /**
  817. * i40e_setup_tx_descriptors - Allocate the Tx descriptors
  818. * @tx_ring: the tx ring to set up
  819. *
  820. * Return 0 on success, negative on error
  821. **/
  822. int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
  823. {
  824. struct device *dev = tx_ring->dev;
  825. int bi_size;
  826. if (!dev)
  827. return -ENOMEM;
  828. bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
  829. tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
  830. if (!tx_ring->tx_bi)
  831. goto err;
  832. /* round up to nearest 4K */
  833. tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
  834. /* add u32 for head writeback, align after this takes care of
  835. * guaranteeing this is at least one cache line in size
  836. */
  837. tx_ring->size += sizeof(u32);
  838. tx_ring->size = ALIGN(tx_ring->size, 4096);
  839. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  840. &tx_ring->dma, GFP_KERNEL);
  841. if (!tx_ring->desc) {
  842. dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
  843. tx_ring->size);
  844. goto err;
  845. }
  846. tx_ring->next_to_use = 0;
  847. tx_ring->next_to_clean = 0;
  848. return 0;
  849. err:
  850. kfree(tx_ring->tx_bi);
  851. tx_ring->tx_bi = NULL;
  852. return -ENOMEM;
  853. }
  854. /**
  855. * i40e_clean_rx_ring - Free Rx buffers
  856. * @rx_ring: ring to be cleaned
  857. **/
  858. void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
  859. {
  860. struct device *dev = rx_ring->dev;
  861. struct i40e_rx_buffer *rx_bi;
  862. unsigned long bi_size;
  863. u16 i;
  864. /* ring already cleared, nothing to do */
  865. if (!rx_ring->rx_bi)
  866. return;
  867. /* Free all the Rx ring sk_buffs */
  868. for (i = 0; i < rx_ring->count; i++) {
  869. rx_bi = &rx_ring->rx_bi[i];
  870. if (rx_bi->dma) {
  871. dma_unmap_single(dev,
  872. rx_bi->dma,
  873. rx_ring->rx_buf_len,
  874. DMA_FROM_DEVICE);
  875. rx_bi->dma = 0;
  876. }
  877. if (rx_bi->skb) {
  878. dev_kfree_skb(rx_bi->skb);
  879. rx_bi->skb = NULL;
  880. }
  881. if (rx_bi->page) {
  882. if (rx_bi->page_dma) {
  883. dma_unmap_page(dev,
  884. rx_bi->page_dma,
  885. PAGE_SIZE / 2,
  886. DMA_FROM_DEVICE);
  887. rx_bi->page_dma = 0;
  888. }
  889. __free_page(rx_bi->page);
  890. rx_bi->page = NULL;
  891. rx_bi->page_offset = 0;
  892. }
  893. }
  894. bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
  895. memset(rx_ring->rx_bi, 0, bi_size);
  896. /* Zero out the descriptor ring */
  897. memset(rx_ring->desc, 0, rx_ring->size);
  898. rx_ring->next_to_clean = 0;
  899. rx_ring->next_to_use = 0;
  900. }
  901. /**
  902. * i40e_free_rx_resources - Free Rx resources
  903. * @rx_ring: ring to clean the resources from
  904. *
  905. * Free all receive software resources
  906. **/
  907. void i40e_free_rx_resources(struct i40e_ring *rx_ring)
  908. {
  909. i40e_clean_rx_ring(rx_ring);
  910. kfree(rx_ring->rx_bi);
  911. rx_ring->rx_bi = NULL;
  912. if (rx_ring->desc) {
  913. dma_free_coherent(rx_ring->dev, rx_ring->size,
  914. rx_ring->desc, rx_ring->dma);
  915. rx_ring->desc = NULL;
  916. }
  917. }
  918. /**
  919. * i40e_setup_rx_descriptors - Allocate Rx descriptors
  920. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  921. *
  922. * Returns 0 on success, negative on failure
  923. **/
  924. int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
  925. {
  926. struct device *dev = rx_ring->dev;
  927. int bi_size;
  928. bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
  929. rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
  930. if (!rx_ring->rx_bi)
  931. goto err;
  932. /* Round up to nearest 4K */
  933. rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
  934. ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
  935. : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
  936. rx_ring->size = ALIGN(rx_ring->size, 4096);
  937. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  938. &rx_ring->dma, GFP_KERNEL);
  939. if (!rx_ring->desc) {
  940. dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
  941. rx_ring->size);
  942. goto err;
  943. }
  944. rx_ring->next_to_clean = 0;
  945. rx_ring->next_to_use = 0;
  946. return 0;
  947. err:
  948. kfree(rx_ring->rx_bi);
  949. rx_ring->rx_bi = NULL;
  950. return -ENOMEM;
  951. }
  952. /**
  953. * i40e_release_rx_desc - Store the new tail and head values
  954. * @rx_ring: ring to bump
  955. * @val: new head index
  956. **/
  957. static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
  958. {
  959. rx_ring->next_to_use = val;
  960. /* Force memory writes to complete before letting h/w
  961. * know there are new descriptors to fetch. (Only
  962. * applicable for weak-ordered memory model archs,
  963. * such as IA-64).
  964. */
  965. wmb();
  966. writel(val, rx_ring->tail);
  967. }
  968. /**
  969. * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
  970. * @rx_ring: ring to place buffers on
  971. * @cleaned_count: number of buffers to replace
  972. **/
  973. void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
  974. {
  975. u16 i = rx_ring->next_to_use;
  976. union i40e_rx_desc *rx_desc;
  977. struct i40e_rx_buffer *bi;
  978. struct sk_buff *skb;
  979. /* do nothing if no valid netdev defined */
  980. if (!rx_ring->netdev || !cleaned_count)
  981. return;
  982. while (cleaned_count--) {
  983. rx_desc = I40E_RX_DESC(rx_ring, i);
  984. bi = &rx_ring->rx_bi[i];
  985. skb = bi->skb;
  986. if (!skb) {
  987. skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
  988. rx_ring->rx_buf_len);
  989. if (!skb) {
  990. rx_ring->rx_stats.alloc_buff_failed++;
  991. goto no_buffers;
  992. }
  993. /* initialize queue mapping */
  994. skb_record_rx_queue(skb, rx_ring->queue_index);
  995. bi->skb = skb;
  996. }
  997. if (!bi->dma) {
  998. bi->dma = dma_map_single(rx_ring->dev,
  999. skb->data,
  1000. rx_ring->rx_buf_len,
  1001. DMA_FROM_DEVICE);
  1002. if (dma_mapping_error(rx_ring->dev, bi->dma)) {
  1003. rx_ring->rx_stats.alloc_buff_failed++;
  1004. bi->dma = 0;
  1005. goto no_buffers;
  1006. }
  1007. }
  1008. if (ring_is_ps_enabled(rx_ring)) {
  1009. if (!bi->page) {
  1010. bi->page = alloc_page(GFP_ATOMIC);
  1011. if (!bi->page) {
  1012. rx_ring->rx_stats.alloc_page_failed++;
  1013. goto no_buffers;
  1014. }
  1015. }
  1016. if (!bi->page_dma) {
  1017. /* use a half page if we're re-using */
  1018. bi->page_offset ^= PAGE_SIZE / 2;
  1019. bi->page_dma = dma_map_page(rx_ring->dev,
  1020. bi->page,
  1021. bi->page_offset,
  1022. PAGE_SIZE / 2,
  1023. DMA_FROM_DEVICE);
  1024. if (dma_mapping_error(rx_ring->dev,
  1025. bi->page_dma)) {
  1026. rx_ring->rx_stats.alloc_page_failed++;
  1027. bi->page_dma = 0;
  1028. goto no_buffers;
  1029. }
  1030. }
  1031. /* Refresh the desc even if buffer_addrs didn't change
  1032. * because each write-back erases this info.
  1033. */
  1034. rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
  1035. rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
  1036. } else {
  1037. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
  1038. rx_desc->read.hdr_addr = 0;
  1039. }
  1040. i++;
  1041. if (i == rx_ring->count)
  1042. i = 0;
  1043. }
  1044. no_buffers:
  1045. if (rx_ring->next_to_use != i)
  1046. i40e_release_rx_desc(rx_ring, i);
  1047. }
  1048. /**
  1049. * i40e_receive_skb - Send a completed packet up the stack
  1050. * @rx_ring: rx ring in play
  1051. * @skb: packet to send up
  1052. * @vlan_tag: vlan tag for packet
  1053. **/
  1054. static void i40e_receive_skb(struct i40e_ring *rx_ring,
  1055. struct sk_buff *skb, u16 vlan_tag)
  1056. {
  1057. struct i40e_q_vector *q_vector = rx_ring->q_vector;
  1058. struct i40e_vsi *vsi = rx_ring->vsi;
  1059. u64 flags = vsi->back->flags;
  1060. if (vlan_tag & VLAN_VID_MASK)
  1061. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
  1062. if (flags & I40E_FLAG_IN_NETPOLL)
  1063. netif_rx(skb);
  1064. else
  1065. napi_gro_receive(&q_vector->napi, skb);
  1066. }
  1067. /**
  1068. * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  1069. * @vsi: the VSI we care about
  1070. * @skb: skb currently being received and modified
  1071. * @rx_status: status value of last descriptor in packet
  1072. * @rx_error: error value of last descriptor in packet
  1073. * @rx_ptype: ptype value of last descriptor in packet
  1074. **/
  1075. static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
  1076. struct sk_buff *skb,
  1077. u32 rx_status,
  1078. u32 rx_error,
  1079. u16 rx_ptype)
  1080. {
  1081. struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
  1082. bool ipv4 = false, ipv6 = false;
  1083. bool ipv4_tunnel, ipv6_tunnel;
  1084. __wsum rx_udp_csum;
  1085. struct iphdr *iph;
  1086. __sum16 csum;
  1087. ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
  1088. (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
  1089. ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
  1090. (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
  1091. skb->ip_summed = CHECKSUM_NONE;
  1092. /* Rx csum enabled and ip headers found? */
  1093. if (!(vsi->netdev->features & NETIF_F_RXCSUM))
  1094. return;
  1095. /* did the hardware decode the packet and checksum? */
  1096. if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
  1097. return;
  1098. /* both known and outer_ip must be set for the below code to work */
  1099. if (!(decoded.known && decoded.outer_ip))
  1100. return;
  1101. if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
  1102. decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
  1103. ipv4 = true;
  1104. else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
  1105. decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
  1106. ipv6 = true;
  1107. if (ipv4 &&
  1108. (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
  1109. (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
  1110. goto checksum_fail;
  1111. /* likely incorrect csum if alternate IP extension headers found */
  1112. if (ipv6 &&
  1113. rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
  1114. /* don't increment checksum err here, non-fatal err */
  1115. return;
  1116. /* there was some L4 error, count error and punt packet to the stack */
  1117. if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
  1118. goto checksum_fail;
  1119. /* handle packets that were not able to be checksummed due
  1120. * to arrival speed, in this case the stack can compute
  1121. * the csum.
  1122. */
  1123. if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
  1124. return;
  1125. /* If VXLAN traffic has an outer UDPv4 checksum we need to check
  1126. * it in the driver, hardware does not do it for us.
  1127. * Since L3L4P bit was set we assume a valid IHL value (>=5)
  1128. * so the total length of IPv4 header is IHL*4 bytes
  1129. * The UDP_0 bit *may* bet set if the *inner* header is UDP
  1130. */
  1131. if (ipv4_tunnel &&
  1132. (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
  1133. !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
  1134. skb->transport_header = skb->mac_header +
  1135. sizeof(struct ethhdr) +
  1136. (ip_hdr(skb)->ihl * 4);
  1137. /* Add 4 bytes for VLAN tagged packets */
  1138. skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
  1139. skb->protocol == htons(ETH_P_8021AD))
  1140. ? VLAN_HLEN : 0;
  1141. rx_udp_csum = udp_csum(skb);
  1142. iph = ip_hdr(skb);
  1143. csum = csum_tcpudp_magic(
  1144. iph->saddr, iph->daddr,
  1145. (skb->len - skb_transport_offset(skb)),
  1146. IPPROTO_UDP, rx_udp_csum);
  1147. if (udp_hdr(skb)->check != csum)
  1148. goto checksum_fail;
  1149. }
  1150. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1151. skb->csum_level = ipv4_tunnel || ipv6_tunnel;
  1152. return;
  1153. checksum_fail:
  1154. vsi->back->hw_csum_rx_error++;
  1155. }
  1156. /**
  1157. * i40e_rx_hash - returns the hash value from the Rx descriptor
  1158. * @ring: descriptor ring
  1159. * @rx_desc: specific descriptor
  1160. **/
  1161. static inline u32 i40e_rx_hash(struct i40e_ring *ring,
  1162. union i40e_rx_desc *rx_desc)
  1163. {
  1164. const __le64 rss_mask =
  1165. cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
  1166. I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
  1167. if ((ring->netdev->features & NETIF_F_RXHASH) &&
  1168. (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
  1169. return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
  1170. else
  1171. return 0;
  1172. }
  1173. /**
  1174. * i40e_ptype_to_hash - get a hash type
  1175. * @ptype: the ptype value from the descriptor
  1176. *
  1177. * Returns a hash type to be used by skb_set_hash
  1178. **/
  1179. static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
  1180. {
  1181. struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
  1182. if (!decoded.known)
  1183. return PKT_HASH_TYPE_NONE;
  1184. if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
  1185. decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
  1186. return PKT_HASH_TYPE_L4;
  1187. else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
  1188. decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
  1189. return PKT_HASH_TYPE_L3;
  1190. else
  1191. return PKT_HASH_TYPE_L2;
  1192. }
  1193. /**
  1194. * i40e_clean_rx_irq - Reclaim resources after receive completes
  1195. * @rx_ring: rx ring to clean
  1196. * @budget: how many cleans we're allowed
  1197. *
  1198. * Returns true if there's any budget left (e.g. the clean is finished)
  1199. **/
  1200. static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
  1201. {
  1202. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  1203. u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
  1204. u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
  1205. const int current_node = numa_node_id();
  1206. struct i40e_vsi *vsi = rx_ring->vsi;
  1207. u16 i = rx_ring->next_to_clean;
  1208. union i40e_rx_desc *rx_desc;
  1209. u32 rx_error, rx_status;
  1210. u8 rx_ptype;
  1211. u64 qword;
  1212. if (budget <= 0)
  1213. return 0;
  1214. rx_desc = I40E_RX_DESC(rx_ring, i);
  1215. qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  1216. rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
  1217. I40E_RXD_QW1_STATUS_SHIFT;
  1218. while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
  1219. union i40e_rx_desc *next_rxd;
  1220. struct i40e_rx_buffer *rx_bi;
  1221. struct sk_buff *skb;
  1222. u16 vlan_tag;
  1223. if (i40e_rx_is_programming_status(qword)) {
  1224. i40e_clean_programming_status(rx_ring, rx_desc);
  1225. I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
  1226. goto next_desc;
  1227. }
  1228. rx_bi = &rx_ring->rx_bi[i];
  1229. skb = rx_bi->skb;
  1230. prefetch(skb->data);
  1231. rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
  1232. I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
  1233. rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
  1234. I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
  1235. rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
  1236. I40E_RXD_QW1_LENGTH_SPH_SHIFT;
  1237. rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
  1238. I40E_RXD_QW1_ERROR_SHIFT;
  1239. rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
  1240. rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
  1241. rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
  1242. I40E_RXD_QW1_PTYPE_SHIFT;
  1243. rx_bi->skb = NULL;
  1244. /* This memory barrier is needed to keep us from reading
  1245. * any other fields out of the rx_desc until we know the
  1246. * STATUS_DD bit is set
  1247. */
  1248. rmb();
  1249. /* Get the header and possibly the whole packet
  1250. * If this is an skb from previous receive dma will be 0
  1251. */
  1252. if (rx_bi->dma) {
  1253. u16 len;
  1254. if (rx_hbo)
  1255. len = I40E_RX_HDR_SIZE;
  1256. else if (rx_sph)
  1257. len = rx_header_len;
  1258. else if (rx_packet_len)
  1259. len = rx_packet_len; /* 1buf/no split found */
  1260. else
  1261. len = rx_header_len; /* split always mode */
  1262. skb_put(skb, len);
  1263. dma_unmap_single(rx_ring->dev,
  1264. rx_bi->dma,
  1265. rx_ring->rx_buf_len,
  1266. DMA_FROM_DEVICE);
  1267. rx_bi->dma = 0;
  1268. }
  1269. /* Get the rest of the data if this was a header split */
  1270. if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
  1271. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  1272. rx_bi->page,
  1273. rx_bi->page_offset,
  1274. rx_packet_len);
  1275. skb->len += rx_packet_len;
  1276. skb->data_len += rx_packet_len;
  1277. skb->truesize += rx_packet_len;
  1278. if ((page_count(rx_bi->page) == 1) &&
  1279. (page_to_nid(rx_bi->page) == current_node))
  1280. get_page(rx_bi->page);
  1281. else
  1282. rx_bi->page = NULL;
  1283. dma_unmap_page(rx_ring->dev,
  1284. rx_bi->page_dma,
  1285. PAGE_SIZE / 2,
  1286. DMA_FROM_DEVICE);
  1287. rx_bi->page_dma = 0;
  1288. }
  1289. I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
  1290. if (unlikely(
  1291. !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
  1292. struct i40e_rx_buffer *next_buffer;
  1293. next_buffer = &rx_ring->rx_bi[i];
  1294. if (ring_is_ps_enabled(rx_ring)) {
  1295. rx_bi->skb = next_buffer->skb;
  1296. rx_bi->dma = next_buffer->dma;
  1297. next_buffer->skb = skb;
  1298. next_buffer->dma = 0;
  1299. }
  1300. rx_ring->rx_stats.non_eop_descs++;
  1301. goto next_desc;
  1302. }
  1303. /* ERR_MASK will only have valid bits if EOP set */
  1304. if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
  1305. dev_kfree_skb_any(skb);
  1306. /* TODO: shouldn't we increment a counter indicating the
  1307. * drop?
  1308. */
  1309. goto next_desc;
  1310. }
  1311. skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
  1312. i40e_ptype_to_hash(rx_ptype));
  1313. if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
  1314. i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
  1315. I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
  1316. I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
  1317. rx_ring->last_rx_timestamp = jiffies;
  1318. }
  1319. /* probably a little skewed due to removing CRC */
  1320. total_rx_bytes += skb->len;
  1321. total_rx_packets++;
  1322. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  1323. i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
  1324. vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
  1325. ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
  1326. : 0;
  1327. #ifdef I40E_FCOE
  1328. if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
  1329. dev_kfree_skb_any(skb);
  1330. goto next_desc;
  1331. }
  1332. #endif
  1333. i40e_receive_skb(rx_ring, skb, vlan_tag);
  1334. rx_ring->netdev->last_rx = jiffies;
  1335. budget--;
  1336. next_desc:
  1337. rx_desc->wb.qword1.status_error_len = 0;
  1338. if (!budget)
  1339. break;
  1340. cleaned_count++;
  1341. /* return some buffers to hardware, one at a time is too slow */
  1342. if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
  1343. i40e_alloc_rx_buffers(rx_ring, cleaned_count);
  1344. cleaned_count = 0;
  1345. }
  1346. /* use prefetched values */
  1347. rx_desc = next_rxd;
  1348. qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  1349. rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
  1350. I40E_RXD_QW1_STATUS_SHIFT;
  1351. }
  1352. rx_ring->next_to_clean = i;
  1353. u64_stats_update_begin(&rx_ring->syncp);
  1354. rx_ring->stats.packets += total_rx_packets;
  1355. rx_ring->stats.bytes += total_rx_bytes;
  1356. u64_stats_update_end(&rx_ring->syncp);
  1357. rx_ring->q_vector->rx.total_packets += total_rx_packets;
  1358. rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
  1359. if (cleaned_count)
  1360. i40e_alloc_rx_buffers(rx_ring, cleaned_count);
  1361. return budget > 0;
  1362. }
  1363. /**
  1364. * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  1365. * @napi: napi struct with our devices info in it
  1366. * @budget: amount of work driver is allowed to do this pass, in packets
  1367. *
  1368. * This function will clean all queues associated with a q_vector.
  1369. *
  1370. * Returns the amount of work done
  1371. **/
  1372. int i40e_napi_poll(struct napi_struct *napi, int budget)
  1373. {
  1374. struct i40e_q_vector *q_vector =
  1375. container_of(napi, struct i40e_q_vector, napi);
  1376. struct i40e_vsi *vsi = q_vector->vsi;
  1377. struct i40e_ring *ring;
  1378. bool clean_complete = true;
  1379. int budget_per_ring;
  1380. if (test_bit(__I40E_DOWN, &vsi->state)) {
  1381. napi_complete(napi);
  1382. return 0;
  1383. }
  1384. /* Since the actual Tx work is minimal, we can give the Tx a larger
  1385. * budget and be more aggressive about cleaning up the Tx descriptors.
  1386. */
  1387. i40e_for_each_ring(ring, q_vector->tx)
  1388. clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
  1389. /* We attempt to distribute budget to each Rx queue fairly, but don't
  1390. * allow the budget to go below 1 because that would exit polling early.
  1391. */
  1392. budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
  1393. i40e_for_each_ring(ring, q_vector->rx)
  1394. clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
  1395. /* If work not completed, return budget and polling will return */
  1396. if (!clean_complete)
  1397. return budget;
  1398. /* Work is done so exit the polling mode and re-enable the interrupt */
  1399. napi_complete(napi);
  1400. if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
  1401. ITR_IS_DYNAMIC(vsi->tx_itr_setting))
  1402. i40e_update_dynamic_itr(q_vector);
  1403. if (!test_bit(__I40E_DOWN, &vsi->state)) {
  1404. if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
  1405. i40e_irq_dynamic_enable(vsi,
  1406. q_vector->v_idx + vsi->base_vector);
  1407. } else {
  1408. struct i40e_hw *hw = &vsi->back->hw;
  1409. /* We re-enable the queue 0 cause, but
  1410. * don't worry about dynamic_enable
  1411. * because we left it on for the other
  1412. * possible interrupts during napi
  1413. */
  1414. u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
  1415. qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
  1416. wr32(hw, I40E_QINT_RQCTL(0), qval);
  1417. qval = rd32(hw, I40E_QINT_TQCTL(0));
  1418. qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
  1419. wr32(hw, I40E_QINT_TQCTL(0), qval);
  1420. i40e_irq_dynamic_enable_icr0(vsi->back);
  1421. }
  1422. }
  1423. return 0;
  1424. }
  1425. /**
  1426. * i40e_atr - Add a Flow Director ATR filter
  1427. * @tx_ring: ring to add programming descriptor to
  1428. * @skb: send buffer
  1429. * @flags: send flags
  1430. * @protocol: wire protocol
  1431. **/
  1432. static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  1433. u32 flags, __be16 protocol)
  1434. {
  1435. struct i40e_filter_program_desc *fdir_desc;
  1436. struct i40e_pf *pf = tx_ring->vsi->back;
  1437. union {
  1438. unsigned char *network;
  1439. struct iphdr *ipv4;
  1440. struct ipv6hdr *ipv6;
  1441. } hdr;
  1442. struct tcphdr *th;
  1443. unsigned int hlen;
  1444. u32 flex_ptype, dtype_cmd;
  1445. u16 i;
  1446. /* make sure ATR is enabled */
  1447. if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
  1448. return;
  1449. /* if sampling is disabled do nothing */
  1450. if (!tx_ring->atr_sample_rate)
  1451. return;
  1452. /* snag network header to get L4 type and address */
  1453. hdr.network = skb_network_header(skb);
  1454. /* Currently only IPv4/IPv6 with TCP is supported */
  1455. if (protocol == htons(ETH_P_IP)) {
  1456. if (hdr.ipv4->protocol != IPPROTO_TCP)
  1457. return;
  1458. /* access ihl as a u8 to avoid unaligned access on ia64 */
  1459. hlen = (hdr.network[0] & 0x0F) << 2;
  1460. } else if (protocol == htons(ETH_P_IPV6)) {
  1461. if (hdr.ipv6->nexthdr != IPPROTO_TCP)
  1462. return;
  1463. hlen = sizeof(struct ipv6hdr);
  1464. } else {
  1465. return;
  1466. }
  1467. th = (struct tcphdr *)(hdr.network + hlen);
  1468. /* Due to lack of space, no more new filters can be programmed */
  1469. if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
  1470. return;
  1471. tx_ring->atr_count++;
  1472. /* sample on all syn/fin/rst packets or once every atr sample rate */
  1473. if (!th->fin &&
  1474. !th->syn &&
  1475. !th->rst &&
  1476. (tx_ring->atr_count < tx_ring->atr_sample_rate))
  1477. return;
  1478. tx_ring->atr_count = 0;
  1479. /* grab the next descriptor */
  1480. i = tx_ring->next_to_use;
  1481. fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
  1482. i++;
  1483. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1484. flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
  1485. I40E_TXD_FLTR_QW0_QINDEX_MASK;
  1486. flex_ptype |= (protocol == htons(ETH_P_IP)) ?
  1487. (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
  1488. I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
  1489. (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
  1490. I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
  1491. flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
  1492. dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
  1493. dtype_cmd |= (th->fin || th->rst) ?
  1494. (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
  1495. I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
  1496. (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
  1497. I40E_TXD_FLTR_QW1_PCMD_SHIFT);
  1498. dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
  1499. I40E_TXD_FLTR_QW1_DEST_SHIFT;
  1500. dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
  1501. I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
  1502. dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
  1503. dtype_cmd |=
  1504. ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
  1505. I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
  1506. fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
  1507. fdir_desc->rsvd = cpu_to_le32(0);
  1508. fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
  1509. fdir_desc->fd_id = cpu_to_le32(0);
  1510. }
  1511. /**
  1512. * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  1513. * @skb: send buffer
  1514. * @tx_ring: ring to send buffer on
  1515. * @flags: the tx flags to be set
  1516. *
  1517. * Checks the skb and set up correspondingly several generic transmit flags
  1518. * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
  1519. *
  1520. * Returns error code indicate the frame should be dropped upon error and the
  1521. * otherwise returns 0 to indicate the flags has been set properly.
  1522. **/
  1523. #ifdef I40E_FCOE
  1524. int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
  1525. struct i40e_ring *tx_ring,
  1526. u32 *flags)
  1527. #else
  1528. static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
  1529. struct i40e_ring *tx_ring,
  1530. u32 *flags)
  1531. #endif
  1532. {
  1533. __be16 protocol = skb->protocol;
  1534. u32 tx_flags = 0;
  1535. /* if we have a HW VLAN tag being added, default to the HW one */
  1536. if (vlan_tx_tag_present(skb)) {
  1537. tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
  1538. tx_flags |= I40E_TX_FLAGS_HW_VLAN;
  1539. /* else if it is a SW VLAN, check the next protocol and store the tag */
  1540. } else if (protocol == htons(ETH_P_8021Q)) {
  1541. struct vlan_hdr *vhdr, _vhdr;
  1542. vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
  1543. if (!vhdr)
  1544. return -EINVAL;
  1545. protocol = vhdr->h_vlan_encapsulated_proto;
  1546. tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
  1547. tx_flags |= I40E_TX_FLAGS_SW_VLAN;
  1548. }
  1549. /* Insert 802.1p priority into VLAN header */
  1550. if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
  1551. (skb->priority != TC_PRIO_CONTROL)) {
  1552. tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
  1553. tx_flags |= (skb->priority & 0x7) <<
  1554. I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
  1555. if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
  1556. struct vlan_ethhdr *vhdr;
  1557. int rc;
  1558. rc = skb_cow_head(skb, 0);
  1559. if (rc < 0)
  1560. return rc;
  1561. vhdr = (struct vlan_ethhdr *)skb->data;
  1562. vhdr->h_vlan_TCI = htons(tx_flags >>
  1563. I40E_TX_FLAGS_VLAN_SHIFT);
  1564. } else {
  1565. tx_flags |= I40E_TX_FLAGS_HW_VLAN;
  1566. }
  1567. }
  1568. *flags = tx_flags;
  1569. return 0;
  1570. }
  1571. /**
  1572. * i40e_tso - set up the tso context descriptor
  1573. * @tx_ring: ptr to the ring to send
  1574. * @skb: ptr to the skb we're sending
  1575. * @tx_flags: the collected send information
  1576. * @protocol: the send protocol
  1577. * @hdr_len: ptr to the size of the packet header
  1578. * @cd_tunneling: ptr to context descriptor bits
  1579. *
  1580. * Returns 0 if no TSO can happen, 1 if tso is going, or error
  1581. **/
  1582. static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
  1583. u32 tx_flags, __be16 protocol, u8 *hdr_len,
  1584. u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
  1585. {
  1586. u32 cd_cmd, cd_tso_len, cd_mss;
  1587. struct ipv6hdr *ipv6h;
  1588. struct tcphdr *tcph;
  1589. struct iphdr *iph;
  1590. u32 l4len;
  1591. int err;
  1592. if (!skb_is_gso(skb))
  1593. return 0;
  1594. err = skb_cow_head(skb, 0);
  1595. if (err < 0)
  1596. return err;
  1597. if (protocol == htons(ETH_P_IP)) {
  1598. iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
  1599. tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
  1600. iph->tot_len = 0;
  1601. iph->check = 0;
  1602. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  1603. 0, IPPROTO_TCP, 0);
  1604. } else if (skb_is_gso_v6(skb)) {
  1605. ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
  1606. : ipv6_hdr(skb);
  1607. tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
  1608. ipv6h->payload_len = 0;
  1609. tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
  1610. 0, IPPROTO_TCP, 0);
  1611. }
  1612. l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
  1613. *hdr_len = (skb->encapsulation
  1614. ? (skb_inner_transport_header(skb) - skb->data)
  1615. : skb_transport_offset(skb)) + l4len;
  1616. /* find the field values */
  1617. cd_cmd = I40E_TX_CTX_DESC_TSO;
  1618. cd_tso_len = skb->len - *hdr_len;
  1619. cd_mss = skb_shinfo(skb)->gso_size;
  1620. *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
  1621. ((u64)cd_tso_len <<
  1622. I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
  1623. ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
  1624. return 1;
  1625. }
  1626. /**
  1627. * i40e_tsyn - set up the tsyn context descriptor
  1628. * @tx_ring: ptr to the ring to send
  1629. * @skb: ptr to the skb we're sending
  1630. * @tx_flags: the collected send information
  1631. *
  1632. * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
  1633. **/
  1634. static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
  1635. u32 tx_flags, u64 *cd_type_cmd_tso_mss)
  1636. {
  1637. struct i40e_pf *pf;
  1638. if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
  1639. return 0;
  1640. /* Tx timestamps cannot be sampled when doing TSO */
  1641. if (tx_flags & I40E_TX_FLAGS_TSO)
  1642. return 0;
  1643. /* only timestamp the outbound packet if the user has requested it and
  1644. * we are not already transmitting a packet to be timestamped
  1645. */
  1646. pf = i40e_netdev_to_pf(tx_ring->netdev);
  1647. if (pf->ptp_tx &&
  1648. !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
  1649. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  1650. pf->ptp_tx_skb = skb_get(skb);
  1651. } else {
  1652. return 0;
  1653. }
  1654. *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
  1655. I40E_TXD_CTX_QW1_CMD_SHIFT;
  1656. return 1;
  1657. }
  1658. /**
  1659. * i40e_tx_enable_csum - Enable Tx checksum offloads
  1660. * @skb: send buffer
  1661. * @tx_flags: Tx flags currently set
  1662. * @td_cmd: Tx descriptor command bits to set
  1663. * @td_offset: Tx descriptor header offsets to set
  1664. * @cd_tunneling: ptr to context desc bits
  1665. **/
  1666. static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
  1667. u32 *td_cmd, u32 *td_offset,
  1668. struct i40e_ring *tx_ring,
  1669. u32 *cd_tunneling)
  1670. {
  1671. struct ipv6hdr *this_ipv6_hdr;
  1672. unsigned int this_tcp_hdrlen;
  1673. struct iphdr *this_ip_hdr;
  1674. u32 network_hdr_len;
  1675. u8 l4_hdr = 0;
  1676. if (skb->encapsulation) {
  1677. network_hdr_len = skb_inner_network_header_len(skb);
  1678. this_ip_hdr = inner_ip_hdr(skb);
  1679. this_ipv6_hdr = inner_ipv6_hdr(skb);
  1680. this_tcp_hdrlen = inner_tcp_hdrlen(skb);
  1681. if (tx_flags & I40E_TX_FLAGS_IPV4) {
  1682. if (tx_flags & I40E_TX_FLAGS_TSO) {
  1683. *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
  1684. ip_hdr(skb)->check = 0;
  1685. } else {
  1686. *cd_tunneling |=
  1687. I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
  1688. }
  1689. } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
  1690. if (tx_flags & I40E_TX_FLAGS_TSO) {
  1691. *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
  1692. ip_hdr(skb)->check = 0;
  1693. } else {
  1694. *cd_tunneling |=
  1695. I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
  1696. }
  1697. }
  1698. /* Now set the ctx descriptor fields */
  1699. *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
  1700. I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
  1701. I40E_TXD_CTX_UDP_TUNNELING |
  1702. ((skb_inner_network_offset(skb) -
  1703. skb_transport_offset(skb)) >> 1) <<
  1704. I40E_TXD_CTX_QW0_NATLEN_SHIFT;
  1705. } else {
  1706. network_hdr_len = skb_network_header_len(skb);
  1707. this_ip_hdr = ip_hdr(skb);
  1708. this_ipv6_hdr = ipv6_hdr(skb);
  1709. this_tcp_hdrlen = tcp_hdrlen(skb);
  1710. }
  1711. /* Enable IP checksum offloads */
  1712. if (tx_flags & I40E_TX_FLAGS_IPV4) {
  1713. l4_hdr = this_ip_hdr->protocol;
  1714. /* the stack computes the IP header already, the only time we
  1715. * need the hardware to recompute it is in the case of TSO.
  1716. */
  1717. if (tx_flags & I40E_TX_FLAGS_TSO) {
  1718. *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
  1719. this_ip_hdr->check = 0;
  1720. } else {
  1721. *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
  1722. }
  1723. /* Now set the td_offset for IP header length */
  1724. *td_offset = (network_hdr_len >> 2) <<
  1725. I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
  1726. } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
  1727. l4_hdr = this_ipv6_hdr->nexthdr;
  1728. *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
  1729. /* Now set the td_offset for IP header length */
  1730. *td_offset = (network_hdr_len >> 2) <<
  1731. I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
  1732. }
  1733. /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
  1734. *td_offset |= (skb_network_offset(skb) >> 1) <<
  1735. I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
  1736. /* Enable L4 checksum offloads */
  1737. switch (l4_hdr) {
  1738. case IPPROTO_TCP:
  1739. /* enable checksum offloads */
  1740. *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
  1741. *td_offset |= (this_tcp_hdrlen >> 2) <<
  1742. I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
  1743. break;
  1744. case IPPROTO_SCTP:
  1745. /* enable SCTP checksum offload */
  1746. *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
  1747. *td_offset |= (sizeof(struct sctphdr) >> 2) <<
  1748. I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
  1749. break;
  1750. case IPPROTO_UDP:
  1751. /* enable UDP checksum offload */
  1752. *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
  1753. *td_offset |= (sizeof(struct udphdr) >> 2) <<
  1754. I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
  1755. break;
  1756. default:
  1757. break;
  1758. }
  1759. }
  1760. /**
  1761. * i40e_create_tx_ctx Build the Tx context descriptor
  1762. * @tx_ring: ring to create the descriptor on
  1763. * @cd_type_cmd_tso_mss: Quad Word 1
  1764. * @cd_tunneling: Quad Word 0 - bits 0-31
  1765. * @cd_l2tag2: Quad Word 0 - bits 32-63
  1766. **/
  1767. static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
  1768. const u64 cd_type_cmd_tso_mss,
  1769. const u32 cd_tunneling, const u32 cd_l2tag2)
  1770. {
  1771. struct i40e_tx_context_desc *context_desc;
  1772. int i = tx_ring->next_to_use;
  1773. if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
  1774. !cd_tunneling && !cd_l2tag2)
  1775. return;
  1776. /* grab the next descriptor */
  1777. context_desc = I40E_TX_CTXTDESC(tx_ring, i);
  1778. i++;
  1779. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1780. /* cpu_to_le32 and assign to struct fields */
  1781. context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
  1782. context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
  1783. context_desc->rsvd = cpu_to_le16(0);
  1784. context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
  1785. }
  1786. /**
  1787. * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
  1788. * @tx_ring: the ring to be checked
  1789. * @size: the size buffer we want to assure is available
  1790. *
  1791. * Returns -EBUSY if a stop is needed, else 0
  1792. **/
  1793. static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  1794. {
  1795. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  1796. /* Memory barrier before checking head and tail */
  1797. smp_mb();
  1798. /* Check again in a case another CPU has just made room available. */
  1799. if (likely(I40E_DESC_UNUSED(tx_ring) < size))
  1800. return -EBUSY;
  1801. /* A reprieve! - use start_queue because it doesn't call schedule */
  1802. netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
  1803. ++tx_ring->tx_stats.restart_queue;
  1804. return 0;
  1805. }
  1806. /**
  1807. * i40e_maybe_stop_tx - 1st level check for tx stop conditions
  1808. * @tx_ring: the ring to be checked
  1809. * @size: the size buffer we want to assure is available
  1810. *
  1811. * Returns 0 if stop is not needed
  1812. **/
  1813. #ifdef I40E_FCOE
  1814. int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  1815. #else
  1816. static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  1817. #endif
  1818. {
  1819. if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
  1820. return 0;
  1821. return __i40e_maybe_stop_tx(tx_ring, size);
  1822. }
  1823. /**
  1824. * i40e_tx_map - Build the Tx descriptor
  1825. * @tx_ring: ring to send buffer on
  1826. * @skb: send buffer
  1827. * @first: first buffer info buffer to use
  1828. * @tx_flags: collected send information
  1829. * @hdr_len: size of the packet header
  1830. * @td_cmd: the command field in the descriptor
  1831. * @td_offset: offset for checksum or crc
  1832. **/
  1833. #ifdef I40E_FCOE
  1834. void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
  1835. struct i40e_tx_buffer *first, u32 tx_flags,
  1836. const u8 hdr_len, u32 td_cmd, u32 td_offset)
  1837. #else
  1838. static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
  1839. struct i40e_tx_buffer *first, u32 tx_flags,
  1840. const u8 hdr_len, u32 td_cmd, u32 td_offset)
  1841. #endif
  1842. {
  1843. unsigned int data_len = skb->data_len;
  1844. unsigned int size = skb_headlen(skb);
  1845. struct skb_frag_struct *frag;
  1846. struct i40e_tx_buffer *tx_bi;
  1847. struct i40e_tx_desc *tx_desc;
  1848. u16 i = tx_ring->next_to_use;
  1849. u32 td_tag = 0;
  1850. dma_addr_t dma;
  1851. u16 gso_segs;
  1852. if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
  1853. td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
  1854. td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
  1855. I40E_TX_FLAGS_VLAN_SHIFT;
  1856. }
  1857. if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
  1858. gso_segs = skb_shinfo(skb)->gso_segs;
  1859. else
  1860. gso_segs = 1;
  1861. /* multiply data chunks by size of headers */
  1862. first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
  1863. first->gso_segs = gso_segs;
  1864. first->skb = skb;
  1865. first->tx_flags = tx_flags;
  1866. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  1867. tx_desc = I40E_TX_DESC(tx_ring, i);
  1868. tx_bi = first;
  1869. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  1870. if (dma_mapping_error(tx_ring->dev, dma))
  1871. goto dma_error;
  1872. /* record length, and DMA address */
  1873. dma_unmap_len_set(tx_bi, len, size);
  1874. dma_unmap_addr_set(tx_bi, dma, dma);
  1875. tx_desc->buffer_addr = cpu_to_le64(dma);
  1876. while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
  1877. tx_desc->cmd_type_offset_bsz =
  1878. build_ctob(td_cmd, td_offset,
  1879. I40E_MAX_DATA_PER_TXD, td_tag);
  1880. tx_desc++;
  1881. i++;
  1882. if (i == tx_ring->count) {
  1883. tx_desc = I40E_TX_DESC(tx_ring, 0);
  1884. i = 0;
  1885. }
  1886. dma += I40E_MAX_DATA_PER_TXD;
  1887. size -= I40E_MAX_DATA_PER_TXD;
  1888. tx_desc->buffer_addr = cpu_to_le64(dma);
  1889. }
  1890. if (likely(!data_len))
  1891. break;
  1892. tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
  1893. size, td_tag);
  1894. tx_desc++;
  1895. i++;
  1896. if (i == tx_ring->count) {
  1897. tx_desc = I40E_TX_DESC(tx_ring, 0);
  1898. i = 0;
  1899. }
  1900. size = skb_frag_size(frag);
  1901. data_len -= size;
  1902. dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
  1903. DMA_TO_DEVICE);
  1904. tx_bi = &tx_ring->tx_bi[i];
  1905. }
  1906. /* Place RS bit on last descriptor of any packet that spans across the
  1907. * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
  1908. */
  1909. #define WB_STRIDE 0x3
  1910. if (((i & WB_STRIDE) != WB_STRIDE) &&
  1911. (first <= &tx_ring->tx_bi[i]) &&
  1912. (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
  1913. tx_desc->cmd_type_offset_bsz =
  1914. build_ctob(td_cmd, td_offset, size, td_tag) |
  1915. cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
  1916. I40E_TXD_QW1_CMD_SHIFT);
  1917. } else {
  1918. tx_desc->cmd_type_offset_bsz =
  1919. build_ctob(td_cmd, td_offset, size, td_tag) |
  1920. cpu_to_le64((u64)I40E_TXD_CMD <<
  1921. I40E_TXD_QW1_CMD_SHIFT);
  1922. }
  1923. netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
  1924. tx_ring->queue_index),
  1925. first->bytecount);
  1926. /* set the timestamp */
  1927. first->time_stamp = jiffies;
  1928. /* Force memory writes to complete before letting h/w
  1929. * know there are new descriptors to fetch. (Only
  1930. * applicable for weak-ordered memory model archs,
  1931. * such as IA-64).
  1932. */
  1933. wmb();
  1934. /* set next_to_watch value indicating a packet is present */
  1935. first->next_to_watch = tx_desc;
  1936. i++;
  1937. if (i == tx_ring->count)
  1938. i = 0;
  1939. tx_ring->next_to_use = i;
  1940. i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
  1941. /* notify HW of packet */
  1942. if (!skb->xmit_more ||
  1943. netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
  1944. tx_ring->queue_index)))
  1945. writel(i, tx_ring->tail);
  1946. return;
  1947. dma_error:
  1948. dev_info(tx_ring->dev, "TX DMA map failed\n");
  1949. /* clear dma mappings for failed tx_bi map */
  1950. for (;;) {
  1951. tx_bi = &tx_ring->tx_bi[i];
  1952. i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
  1953. if (tx_bi == first)
  1954. break;
  1955. if (i == 0)
  1956. i = tx_ring->count;
  1957. i--;
  1958. }
  1959. tx_ring->next_to_use = i;
  1960. }
  1961. /**
  1962. * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
  1963. * @skb: send buffer
  1964. * @tx_ring: ring to send buffer on
  1965. *
  1966. * Returns number of data descriptors needed for this skb. Returns 0 to indicate
  1967. * there is not enough descriptors available in this ring since we need at least
  1968. * one descriptor.
  1969. **/
  1970. #ifdef I40E_FCOE
  1971. int i40e_xmit_descriptor_count(struct sk_buff *skb,
  1972. struct i40e_ring *tx_ring)
  1973. #else
  1974. static int i40e_xmit_descriptor_count(struct sk_buff *skb,
  1975. struct i40e_ring *tx_ring)
  1976. #endif
  1977. {
  1978. unsigned int f;
  1979. int count = 0;
  1980. /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
  1981. * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
  1982. * + 4 desc gap to avoid the cache line where head is,
  1983. * + 1 desc for context descriptor,
  1984. * otherwise try next time
  1985. */
  1986. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  1987. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  1988. count += TXD_USE_COUNT(skb_headlen(skb));
  1989. if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
  1990. tx_ring->tx_stats.tx_busy++;
  1991. return 0;
  1992. }
  1993. return count;
  1994. }
  1995. /**
  1996. * i40e_xmit_frame_ring - Sends buffer on Tx ring
  1997. * @skb: send buffer
  1998. * @tx_ring: ring to send buffer on
  1999. *
  2000. * Returns NETDEV_TX_OK if sent, else an error code
  2001. **/
  2002. static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
  2003. struct i40e_ring *tx_ring)
  2004. {
  2005. u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
  2006. u32 cd_tunneling = 0, cd_l2tag2 = 0;
  2007. struct i40e_tx_buffer *first;
  2008. u32 td_offset = 0;
  2009. u32 tx_flags = 0;
  2010. __be16 protocol;
  2011. u32 td_cmd = 0;
  2012. u8 hdr_len = 0;
  2013. int tsyn;
  2014. int tso;
  2015. if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
  2016. return NETDEV_TX_BUSY;
  2017. /* prepare the xmit flags */
  2018. if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
  2019. goto out_drop;
  2020. /* obtain protocol of skb */
  2021. protocol = vlan_get_protocol(skb);
  2022. /* record the location of the first descriptor for this packet */
  2023. first = &tx_ring->tx_bi[tx_ring->next_to_use];
  2024. /* setup IPv4/IPv6 offloads */
  2025. if (protocol == htons(ETH_P_IP))
  2026. tx_flags |= I40E_TX_FLAGS_IPV4;
  2027. else if (protocol == htons(ETH_P_IPV6))
  2028. tx_flags |= I40E_TX_FLAGS_IPV6;
  2029. tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
  2030. &cd_type_cmd_tso_mss, &cd_tunneling);
  2031. if (tso < 0)
  2032. goto out_drop;
  2033. else if (tso)
  2034. tx_flags |= I40E_TX_FLAGS_TSO;
  2035. tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
  2036. if (tsyn)
  2037. tx_flags |= I40E_TX_FLAGS_TSYN;
  2038. skb_tx_timestamp(skb);
  2039. /* always enable CRC insertion offload */
  2040. td_cmd |= I40E_TX_DESC_CMD_ICRC;
  2041. /* Always offload the checksum, since it's in the data descriptor */
  2042. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2043. tx_flags |= I40E_TX_FLAGS_CSUM;
  2044. i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
  2045. tx_ring, &cd_tunneling);
  2046. }
  2047. i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
  2048. cd_tunneling, cd_l2tag2);
  2049. /* Add Flow Director ATR if it's enabled.
  2050. *
  2051. * NOTE: this must always be directly before the data descriptor.
  2052. */
  2053. i40e_atr(tx_ring, skb, tx_flags, protocol);
  2054. i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
  2055. td_cmd, td_offset);
  2056. return NETDEV_TX_OK;
  2057. out_drop:
  2058. dev_kfree_skb_any(skb);
  2059. return NETDEV_TX_OK;
  2060. }
  2061. /**
  2062. * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
  2063. * @skb: send buffer
  2064. * @netdev: network interface device structure
  2065. *
  2066. * Returns NETDEV_TX_OK if sent, else an error code
  2067. **/
  2068. netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  2069. {
  2070. struct i40e_netdev_priv *np = netdev_priv(netdev);
  2071. struct i40e_vsi *vsi = np->vsi;
  2072. struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
  2073. /* hardware can't handle really short frames, hardware padding works
  2074. * beyond this point
  2075. */
  2076. if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
  2077. if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
  2078. return NETDEV_TX_OK;
  2079. skb->len = I40E_MIN_TX_LEN;
  2080. skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
  2081. }
  2082. return i40e_xmit_frame_ring(skb, tx_ring);
  2083. }