i40e_txrx.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Driver
  4. * Copyright(c) 2013 - 2016 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #include <linux/prefetch.h>
  27. #include <net/busy_poll.h>
  28. #include "i40e.h"
  29. #include "i40e_prototype.h"
  30. static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
  31. u32 td_tag)
  32. {
  33. return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
  34. ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
  35. ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
  36. ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
  37. ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
  38. }
  39. #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
  40. #define I40E_FD_CLEAN_DELAY 10
  41. /**
  42. * i40e_program_fdir_filter - Program a Flow Director filter
  43. * @fdir_data: Packet data that will be filter parameters
  44. * @raw_packet: the pre-allocated packet buffer for FDir
  45. * @pf: The PF pointer
  46. * @add: True for add/update, False for remove
  47. **/
  48. int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
  49. struct i40e_pf *pf, bool add)
  50. {
  51. struct i40e_filter_program_desc *fdir_desc;
  52. struct i40e_tx_buffer *tx_buf, *first;
  53. struct i40e_tx_desc *tx_desc;
  54. struct i40e_ring *tx_ring;
  55. unsigned int fpt, dcc;
  56. struct i40e_vsi *vsi;
  57. struct device *dev;
  58. dma_addr_t dma;
  59. u32 td_cmd = 0;
  60. u16 delay = 0;
  61. u16 i;
  62. /* find existing FDIR VSI */
  63. vsi = NULL;
  64. for (i = 0; i < pf->num_alloc_vsi; i++)
  65. if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
  66. vsi = pf->vsi[i];
  67. if (!vsi)
  68. return -ENOENT;
  69. tx_ring = vsi->tx_rings[0];
  70. dev = tx_ring->dev;
  71. /* we need two descriptors to add/del a filter and we can wait */
  72. do {
  73. if (I40E_DESC_UNUSED(tx_ring) > 1)
  74. break;
  75. msleep_interruptible(1);
  76. delay++;
  77. } while (delay < I40E_FD_CLEAN_DELAY);
  78. if (!(I40E_DESC_UNUSED(tx_ring) > 1))
  79. return -EAGAIN;
  80. dma = dma_map_single(dev, raw_packet,
  81. I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
  82. if (dma_mapping_error(dev, dma))
  83. goto dma_fail;
  84. /* grab the next descriptor */
  85. i = tx_ring->next_to_use;
  86. fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
  87. first = &tx_ring->tx_bi[i];
  88. memset(first, 0, sizeof(struct i40e_tx_buffer));
  89. tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
  90. fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
  91. I40E_TXD_FLTR_QW0_QINDEX_MASK;
  92. fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
  93. I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
  94. fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
  95. I40E_TXD_FLTR_QW0_PCTYPE_MASK;
  96. /* Use LAN VSI Id if not programmed by user */
  97. if (fdir_data->dest_vsi == 0)
  98. fpt |= (pf->vsi[pf->lan_vsi]->id) <<
  99. I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
  100. else
  101. fpt |= ((u32)fdir_data->dest_vsi <<
  102. I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
  103. I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
  104. dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
  105. if (add)
  106. dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
  107. I40E_TXD_FLTR_QW1_PCMD_SHIFT;
  108. else
  109. dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
  110. I40E_TXD_FLTR_QW1_PCMD_SHIFT;
  111. dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
  112. I40E_TXD_FLTR_QW1_DEST_MASK;
  113. dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
  114. I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
  115. if (fdir_data->cnt_index != 0) {
  116. dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
  117. dcc |= ((u32)fdir_data->cnt_index <<
  118. I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
  119. I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
  120. }
  121. fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
  122. fdir_desc->rsvd = cpu_to_le32(0);
  123. fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
  124. fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
  125. /* Now program a dummy descriptor */
  126. i = tx_ring->next_to_use;
  127. tx_desc = I40E_TX_DESC(tx_ring, i);
  128. tx_buf = &tx_ring->tx_bi[i];
  129. tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
  130. memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
  131. /* record length, and DMA address */
  132. dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
  133. dma_unmap_addr_set(tx_buf, dma, dma);
  134. tx_desc->buffer_addr = cpu_to_le64(dma);
  135. td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
  136. tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
  137. tx_buf->raw_buf = (void *)raw_packet;
  138. tx_desc->cmd_type_offset_bsz =
  139. build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
  140. /* Force memory writes to complete before letting h/w
  141. * know there are new descriptors to fetch.
  142. */
  143. wmb();
  144. /* Mark the data descriptor to be watched */
  145. first->next_to_watch = tx_desc;
  146. writel(tx_ring->next_to_use, tx_ring->tail);
  147. return 0;
  148. dma_fail:
  149. return -1;
  150. }
  151. #define IP_HEADER_OFFSET 14
  152. #define I40E_UDPIP_DUMMY_PACKET_LEN 42
  153. /**
  154. * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  155. * @vsi: pointer to the targeted VSI
  156. * @fd_data: the flow director data required for the FDir descriptor
  157. * @add: true adds a filter, false removes it
  158. *
  159. * Returns 0 if the filters were successfully added or removed
  160. **/
  161. static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
  162. struct i40e_fdir_filter *fd_data,
  163. bool add)
  164. {
  165. struct i40e_pf *pf = vsi->back;
  166. struct udphdr *udp;
  167. struct iphdr *ip;
  168. bool err = false;
  169. u8 *raw_packet;
  170. int ret;
  171. static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  172. 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
  173. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  174. raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
  175. if (!raw_packet)
  176. return -ENOMEM;
  177. memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
  178. ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
  179. udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
  180. + sizeof(struct iphdr));
  181. ip->daddr = fd_data->dst_ip[0];
  182. udp->dest = fd_data->dst_port;
  183. ip->saddr = fd_data->src_ip[0];
  184. udp->source = fd_data->src_port;
  185. fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
  186. ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
  187. if (ret) {
  188. dev_info(&pf->pdev->dev,
  189. "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
  190. fd_data->pctype, fd_data->fd_id, ret);
  191. err = true;
  192. } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
  193. if (add)
  194. dev_info(&pf->pdev->dev,
  195. "Filter OK for PCTYPE %d loc = %d\n",
  196. fd_data->pctype, fd_data->fd_id);
  197. else
  198. dev_info(&pf->pdev->dev,
  199. "Filter deleted for PCTYPE %d loc = %d\n",
  200. fd_data->pctype, fd_data->fd_id);
  201. }
  202. if (err)
  203. kfree(raw_packet);
  204. return err ? -EOPNOTSUPP : 0;
  205. }
  206. #define I40E_TCPIP_DUMMY_PACKET_LEN 54
  207. /**
  208. * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  209. * @vsi: pointer to the targeted VSI
  210. * @fd_data: the flow director data required for the FDir descriptor
  211. * @add: true adds a filter, false removes it
  212. *
  213. * Returns 0 if the filters were successfully added or removed
  214. **/
  215. static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
  216. struct i40e_fdir_filter *fd_data,
  217. bool add)
  218. {
  219. struct i40e_pf *pf = vsi->back;
  220. struct tcphdr *tcp;
  221. struct iphdr *ip;
  222. bool err = false;
  223. u8 *raw_packet;
  224. int ret;
  225. /* Dummy packet */
  226. static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  227. 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
  228. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
  229. 0x0, 0x72, 0, 0, 0, 0};
  230. raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
  231. if (!raw_packet)
  232. return -ENOMEM;
  233. memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
  234. ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
  235. tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
  236. + sizeof(struct iphdr));
  237. ip->daddr = fd_data->dst_ip[0];
  238. tcp->dest = fd_data->dst_port;
  239. ip->saddr = fd_data->src_ip[0];
  240. tcp->source = fd_data->src_port;
  241. if (add) {
  242. pf->fd_tcp_rule++;
  243. if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
  244. if (I40E_DEBUG_FD & pf->hw.debug_mask)
  245. dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
  246. pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
  247. }
  248. } else {
  249. pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
  250. (pf->fd_tcp_rule - 1) : 0;
  251. if (pf->fd_tcp_rule == 0) {
  252. pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
  253. if (I40E_DEBUG_FD & pf->hw.debug_mask)
  254. dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
  255. }
  256. }
  257. fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
  258. ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
  259. if (ret) {
  260. dev_info(&pf->pdev->dev,
  261. "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
  262. fd_data->pctype, fd_data->fd_id, ret);
  263. err = true;
  264. } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
  265. if (add)
  266. dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
  267. fd_data->pctype, fd_data->fd_id);
  268. else
  269. dev_info(&pf->pdev->dev,
  270. "Filter deleted for PCTYPE %d loc = %d\n",
  271. fd_data->pctype, fd_data->fd_id);
  272. }
  273. if (err)
  274. kfree(raw_packet);
  275. return err ? -EOPNOTSUPP : 0;
  276. }
  277. /**
  278. * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
  279. * a specific flow spec
  280. * @vsi: pointer to the targeted VSI
  281. * @fd_data: the flow director data required for the FDir descriptor
  282. * @add: true adds a filter, false removes it
  283. *
  284. * Returns 0 if the filters were successfully added or removed
  285. **/
  286. static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
  287. struct i40e_fdir_filter *fd_data,
  288. bool add)
  289. {
  290. return -EOPNOTSUPP;
  291. }
  292. #define I40E_IP_DUMMY_PACKET_LEN 34
  293. /**
  294. * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
  295. * a specific flow spec
  296. * @vsi: pointer to the targeted VSI
  297. * @fd_data: the flow director data required for the FDir descriptor
  298. * @add: true adds a filter, false removes it
  299. *
  300. * Returns 0 if the filters were successfully added or removed
  301. **/
  302. static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
  303. struct i40e_fdir_filter *fd_data,
  304. bool add)
  305. {
  306. struct i40e_pf *pf = vsi->back;
  307. struct iphdr *ip;
  308. bool err = false;
  309. u8 *raw_packet;
  310. int ret;
  311. int i;
  312. static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
  313. 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
  314. 0, 0, 0, 0};
  315. for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
  316. i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
  317. raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
  318. if (!raw_packet)
  319. return -ENOMEM;
  320. memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
  321. ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
  322. ip->saddr = fd_data->src_ip[0];
  323. ip->daddr = fd_data->dst_ip[0];
  324. ip->protocol = 0;
  325. fd_data->pctype = i;
  326. ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
  327. if (ret) {
  328. dev_info(&pf->pdev->dev,
  329. "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
  330. fd_data->pctype, fd_data->fd_id, ret);
  331. err = true;
  332. } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
  333. if (add)
  334. dev_info(&pf->pdev->dev,
  335. "Filter OK for PCTYPE %d loc = %d\n",
  336. fd_data->pctype, fd_data->fd_id);
  337. else
  338. dev_info(&pf->pdev->dev,
  339. "Filter deleted for PCTYPE %d loc = %d\n",
  340. fd_data->pctype, fd_data->fd_id);
  341. }
  342. }
  343. if (err)
  344. kfree(raw_packet);
  345. return err ? -EOPNOTSUPP : 0;
  346. }
  347. /**
  348. * i40e_add_del_fdir - Build raw packets to add/del fdir filter
  349. * @vsi: pointer to the targeted VSI
  350. * @cmd: command to get or set RX flow classification rules
  351. * @add: true adds a filter, false removes it
  352. *
  353. **/
  354. int i40e_add_del_fdir(struct i40e_vsi *vsi,
  355. struct i40e_fdir_filter *input, bool add)
  356. {
  357. struct i40e_pf *pf = vsi->back;
  358. int ret;
  359. switch (input->flow_type & ~FLOW_EXT) {
  360. case TCP_V4_FLOW:
  361. ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
  362. break;
  363. case UDP_V4_FLOW:
  364. ret = i40e_add_del_fdir_udpv4(vsi, input, add);
  365. break;
  366. case SCTP_V4_FLOW:
  367. ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
  368. break;
  369. case IPV4_FLOW:
  370. ret = i40e_add_del_fdir_ipv4(vsi, input, add);
  371. break;
  372. case IP_USER_FLOW:
  373. switch (input->ip4_proto) {
  374. case IPPROTO_TCP:
  375. ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
  376. break;
  377. case IPPROTO_UDP:
  378. ret = i40e_add_del_fdir_udpv4(vsi, input, add);
  379. break;
  380. case IPPROTO_SCTP:
  381. ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
  382. break;
  383. default:
  384. ret = i40e_add_del_fdir_ipv4(vsi, input, add);
  385. break;
  386. }
  387. break;
  388. default:
  389. dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
  390. input->flow_type);
  391. ret = -EINVAL;
  392. }
  393. /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
  394. return ret;
  395. }
  396. /**
  397. * i40e_fd_handle_status - check the Programming Status for FD
  398. * @rx_ring: the Rx ring for this descriptor
  399. * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
  400. * @prog_id: the id originally used for programming
  401. *
  402. * This is used to verify if the FD programming or invalidation
  403. * requested by SW to the HW is successful or not and take actions accordingly.
  404. **/
  405. static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
  406. union i40e_rx_desc *rx_desc, u8 prog_id)
  407. {
  408. struct i40e_pf *pf = rx_ring->vsi->back;
  409. struct pci_dev *pdev = pf->pdev;
  410. u32 fcnt_prog, fcnt_avail;
  411. u32 error;
  412. u64 qw;
  413. qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  414. error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
  415. I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
  416. if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
  417. pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
  418. if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
  419. (I40E_DEBUG_FD & pf->hw.debug_mask))
  420. dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
  421. pf->fd_inv);
  422. /* Check if the programming error is for ATR.
  423. * If so, auto disable ATR and set a state for
  424. * flush in progress. Next time we come here if flush is in
  425. * progress do nothing, once flush is complete the state will
  426. * be cleared.
  427. */
  428. if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
  429. return;
  430. pf->fd_add_err++;
  431. /* store the current atr filter count */
  432. pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
  433. if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
  434. (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
  435. pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
  436. set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
  437. }
  438. /* filter programming failed most likely due to table full */
  439. fcnt_prog = i40e_get_global_fd_count(pf);
  440. fcnt_avail = pf->fdir_pf_filter_count;
  441. /* If ATR is running fcnt_prog can quickly change,
  442. * if we are very close to full, it makes sense to disable
  443. * FD ATR/SB and then re-enable it when there is room.
  444. */
  445. if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
  446. if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
  447. !(pf->auto_disable_flags &
  448. I40E_FLAG_FD_SB_ENABLED)) {
  449. if (I40E_DEBUG_FD & pf->hw.debug_mask)
  450. dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
  451. pf->auto_disable_flags |=
  452. I40E_FLAG_FD_SB_ENABLED;
  453. }
  454. }
  455. } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
  456. if (I40E_DEBUG_FD & pf->hw.debug_mask)
  457. dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
  458. rx_desc->wb.qword0.hi_dword.fd_id);
  459. }
  460. }
  461. /**
  462. * i40e_unmap_and_free_tx_resource - Release a Tx buffer
  463. * @ring: the ring that owns the buffer
  464. * @tx_buffer: the buffer to free
  465. **/
  466. static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
  467. struct i40e_tx_buffer *tx_buffer)
  468. {
  469. if (tx_buffer->skb) {
  470. dev_kfree_skb_any(tx_buffer->skb);
  471. if (dma_unmap_len(tx_buffer, len))
  472. dma_unmap_single(ring->dev,
  473. dma_unmap_addr(tx_buffer, dma),
  474. dma_unmap_len(tx_buffer, len),
  475. DMA_TO_DEVICE);
  476. } else if (dma_unmap_len(tx_buffer, len)) {
  477. dma_unmap_page(ring->dev,
  478. dma_unmap_addr(tx_buffer, dma),
  479. dma_unmap_len(tx_buffer, len),
  480. DMA_TO_DEVICE);
  481. }
  482. if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
  483. kfree(tx_buffer->raw_buf);
  484. tx_buffer->next_to_watch = NULL;
  485. tx_buffer->skb = NULL;
  486. dma_unmap_len_set(tx_buffer, len, 0);
  487. /* tx_buffer must be completely set up in the transmit path */
  488. }
  489. /**
  490. * i40e_clean_tx_ring - Free any empty Tx buffers
  491. * @tx_ring: ring to be cleaned
  492. **/
  493. void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
  494. {
  495. unsigned long bi_size;
  496. u16 i;
  497. /* ring already cleared, nothing to do */
  498. if (!tx_ring->tx_bi)
  499. return;
  500. /* Free all the Tx ring sk_buffs */
  501. for (i = 0; i < tx_ring->count; i++)
  502. i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
  503. bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
  504. memset(tx_ring->tx_bi, 0, bi_size);
  505. /* Zero out the descriptor ring */
  506. memset(tx_ring->desc, 0, tx_ring->size);
  507. tx_ring->next_to_use = 0;
  508. tx_ring->next_to_clean = 0;
  509. if (!tx_ring->netdev)
  510. return;
  511. /* cleanup Tx queue statistics */
  512. netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
  513. tx_ring->queue_index));
  514. }
  515. /**
  516. * i40e_free_tx_resources - Free Tx resources per queue
  517. * @tx_ring: Tx descriptor ring for a specific queue
  518. *
  519. * Free all transmit software resources
  520. **/
  521. void i40e_free_tx_resources(struct i40e_ring *tx_ring)
  522. {
  523. i40e_clean_tx_ring(tx_ring);
  524. kfree(tx_ring->tx_bi);
  525. tx_ring->tx_bi = NULL;
  526. if (tx_ring->desc) {
  527. dma_free_coherent(tx_ring->dev, tx_ring->size,
  528. tx_ring->desc, tx_ring->dma);
  529. tx_ring->desc = NULL;
  530. }
  531. }
  532. /**
  533. * i40e_get_tx_pending - how many tx descriptors not processed
  534. * @tx_ring: the ring of descriptors
  535. * @in_sw: is tx_pending being checked in SW or HW
  536. *
  537. * Since there is no access to the ring head register
  538. * in XL710, we need to use our local copies
  539. **/
  540. u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
  541. {
  542. u32 head, tail;
  543. if (!in_sw)
  544. head = i40e_get_head(ring);
  545. else
  546. head = ring->next_to_clean;
  547. tail = readl(ring->tail);
  548. if (head != tail)
  549. return (head < tail) ?
  550. tail - head : (tail + ring->count - head);
  551. return 0;
  552. }
  553. #define WB_STRIDE 0x3
  554. /**
  555. * i40e_clean_tx_irq - Reclaim resources after transmit completes
  556. * @vsi: the VSI we care about
  557. * @tx_ring: Tx ring to clean
  558. * @napi_budget: Used to determine if we are in netpoll
  559. *
  560. * Returns true if there's any budget left (e.g. the clean is finished)
  561. **/
  562. static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
  563. struct i40e_ring *tx_ring, int napi_budget)
  564. {
  565. u16 i = tx_ring->next_to_clean;
  566. struct i40e_tx_buffer *tx_buf;
  567. struct i40e_tx_desc *tx_head;
  568. struct i40e_tx_desc *tx_desc;
  569. unsigned int total_bytes = 0, total_packets = 0;
  570. unsigned int budget = vsi->work_limit;
  571. tx_buf = &tx_ring->tx_bi[i];
  572. tx_desc = I40E_TX_DESC(tx_ring, i);
  573. i -= tx_ring->count;
  574. tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
  575. do {
  576. struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
  577. /* if next_to_watch is not set then there is no work pending */
  578. if (!eop_desc)
  579. break;
  580. /* prevent any other reads prior to eop_desc */
  581. read_barrier_depends();
  582. /* we have caught up to head, no work left to do */
  583. if (tx_head == tx_desc)
  584. break;
  585. /* clear next_to_watch to prevent false hangs */
  586. tx_buf->next_to_watch = NULL;
  587. /* update the statistics for this packet */
  588. total_bytes += tx_buf->bytecount;
  589. total_packets += tx_buf->gso_segs;
  590. /* free the skb */
  591. napi_consume_skb(tx_buf->skb, napi_budget);
  592. /* unmap skb header data */
  593. dma_unmap_single(tx_ring->dev,
  594. dma_unmap_addr(tx_buf, dma),
  595. dma_unmap_len(tx_buf, len),
  596. DMA_TO_DEVICE);
  597. /* clear tx_buffer data */
  598. tx_buf->skb = NULL;
  599. dma_unmap_len_set(tx_buf, len, 0);
  600. /* unmap remaining buffers */
  601. while (tx_desc != eop_desc) {
  602. tx_buf++;
  603. tx_desc++;
  604. i++;
  605. if (unlikely(!i)) {
  606. i -= tx_ring->count;
  607. tx_buf = tx_ring->tx_bi;
  608. tx_desc = I40E_TX_DESC(tx_ring, 0);
  609. }
  610. /* unmap any remaining paged data */
  611. if (dma_unmap_len(tx_buf, len)) {
  612. dma_unmap_page(tx_ring->dev,
  613. dma_unmap_addr(tx_buf, dma),
  614. dma_unmap_len(tx_buf, len),
  615. DMA_TO_DEVICE);
  616. dma_unmap_len_set(tx_buf, len, 0);
  617. }
  618. }
  619. /* move us one more past the eop_desc for start of next pkt */
  620. tx_buf++;
  621. tx_desc++;
  622. i++;
  623. if (unlikely(!i)) {
  624. i -= tx_ring->count;
  625. tx_buf = tx_ring->tx_bi;
  626. tx_desc = I40E_TX_DESC(tx_ring, 0);
  627. }
  628. prefetch(tx_desc);
  629. /* update budget accounting */
  630. budget--;
  631. } while (likely(budget));
  632. i += tx_ring->count;
  633. tx_ring->next_to_clean = i;
  634. u64_stats_update_begin(&tx_ring->syncp);
  635. tx_ring->stats.bytes += total_bytes;
  636. tx_ring->stats.packets += total_packets;
  637. u64_stats_update_end(&tx_ring->syncp);
  638. tx_ring->q_vector->tx.total_bytes += total_bytes;
  639. tx_ring->q_vector->tx.total_packets += total_packets;
  640. if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
  641. unsigned int j = 0;
  642. /* check to see if there are < 4 descriptors
  643. * waiting to be written back, then kick the hardware to force
  644. * them to be written back in case we stay in NAPI.
  645. * In this mode on X722 we do not enable Interrupt.
  646. */
  647. j = i40e_get_tx_pending(tx_ring, false);
  648. if (budget &&
  649. ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
  650. !test_bit(__I40E_DOWN, &vsi->state) &&
  651. (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
  652. tx_ring->arm_wb = true;
  653. }
  654. netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
  655. tx_ring->queue_index),
  656. total_packets, total_bytes);
  657. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  658. if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
  659. (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
  660. /* Make sure that anybody stopping the queue after this
  661. * sees the new next_to_clean.
  662. */
  663. smp_mb();
  664. if (__netif_subqueue_stopped(tx_ring->netdev,
  665. tx_ring->queue_index) &&
  666. !test_bit(__I40E_DOWN, &vsi->state)) {
  667. netif_wake_subqueue(tx_ring->netdev,
  668. tx_ring->queue_index);
  669. ++tx_ring->tx_stats.restart_queue;
  670. }
  671. }
  672. return !!budget;
  673. }
  674. /**
  675. * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
  676. * @vsi: the VSI we care about
  677. * @q_vector: the vector on which to enable writeback
  678. *
  679. **/
  680. static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
  681. struct i40e_q_vector *q_vector)
  682. {
  683. u16 flags = q_vector->tx.ring[0].flags;
  684. u32 val;
  685. if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
  686. return;
  687. if (q_vector->arm_wb_state)
  688. return;
  689. if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
  690. val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
  691. I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
  692. wr32(&vsi->back->hw,
  693. I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
  694. val);
  695. } else {
  696. val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
  697. I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
  698. wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
  699. }
  700. q_vector->arm_wb_state = true;
  701. }
  702. /**
  703. * i40e_force_wb - Issue SW Interrupt so HW does a wb
  704. * @vsi: the VSI we care about
  705. * @q_vector: the vector on which to force writeback
  706. *
  707. **/
  708. void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
  709. {
  710. if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
  711. u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  712. I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
  713. I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
  714. I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
  715. /* allow 00 to be written to the index */
  716. wr32(&vsi->back->hw,
  717. I40E_PFINT_DYN_CTLN(q_vector->v_idx +
  718. vsi->base_vector - 1), val);
  719. } else {
  720. u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
  721. I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
  722. I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
  723. I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
  724. /* allow 00 to be written to the index */
  725. wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
  726. }
  727. }
  728. /**
  729. * i40e_set_new_dynamic_itr - Find new ITR level
  730. * @rc: structure containing ring performance data
  731. *
  732. * Returns true if ITR changed, false if not
  733. *
  734. * Stores a new ITR value based on packets and byte counts during
  735. * the last interrupt. The advantage of per interrupt computation
  736. * is faster updates and more accurate ITR for the current traffic
  737. * pattern. Constants in this function were computed based on
  738. * theoretical maximum wire speed and thresholds were set based on
  739. * testing data as well as attempting to minimize response time
  740. * while increasing bulk throughput.
  741. **/
  742. static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
  743. {
  744. enum i40e_latency_range new_latency_range = rc->latency_range;
  745. struct i40e_q_vector *qv = rc->ring->q_vector;
  746. u32 new_itr = rc->itr;
  747. int bytes_per_int;
  748. int usecs;
  749. if (rc->total_packets == 0 || !rc->itr)
  750. return false;
  751. /* simple throttlerate management
  752. * 0-10MB/s lowest (50000 ints/s)
  753. * 10-20MB/s low (20000 ints/s)
  754. * 20-1249MB/s bulk (18000 ints/s)
  755. * > 40000 Rx packets per second (8000 ints/s)
  756. *
  757. * The math works out because the divisor is in 10^(-6) which
  758. * turns the bytes/us input value into MB/s values, but
  759. * make sure to use usecs, as the register values written
  760. * are in 2 usec increments in the ITR registers, and make sure
  761. * to use the smoothed values that the countdown timer gives us.
  762. */
  763. usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
  764. bytes_per_int = rc->total_bytes / usecs;
  765. switch (new_latency_range) {
  766. case I40E_LOWEST_LATENCY:
  767. if (bytes_per_int > 10)
  768. new_latency_range = I40E_LOW_LATENCY;
  769. break;
  770. case I40E_LOW_LATENCY:
  771. if (bytes_per_int > 20)
  772. new_latency_range = I40E_BULK_LATENCY;
  773. else if (bytes_per_int <= 10)
  774. new_latency_range = I40E_LOWEST_LATENCY;
  775. break;
  776. case I40E_BULK_LATENCY:
  777. case I40E_ULTRA_LATENCY:
  778. default:
  779. if (bytes_per_int <= 20)
  780. new_latency_range = I40E_LOW_LATENCY;
  781. break;
  782. }
  783. /* this is to adjust RX more aggressively when streaming small
  784. * packets. The value of 40000 was picked as it is just beyond
  785. * what the hardware can receive per second if in low latency
  786. * mode.
  787. */
  788. #define RX_ULTRA_PACKET_RATE 40000
  789. if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
  790. (&qv->rx == rc))
  791. new_latency_range = I40E_ULTRA_LATENCY;
  792. rc->latency_range = new_latency_range;
  793. switch (new_latency_range) {
  794. case I40E_LOWEST_LATENCY:
  795. new_itr = I40E_ITR_50K;
  796. break;
  797. case I40E_LOW_LATENCY:
  798. new_itr = I40E_ITR_20K;
  799. break;
  800. case I40E_BULK_LATENCY:
  801. new_itr = I40E_ITR_18K;
  802. break;
  803. case I40E_ULTRA_LATENCY:
  804. new_itr = I40E_ITR_8K;
  805. break;
  806. default:
  807. break;
  808. }
  809. rc->total_bytes = 0;
  810. rc->total_packets = 0;
  811. if (new_itr != rc->itr) {
  812. rc->itr = new_itr;
  813. return true;
  814. }
  815. return false;
  816. }
  817. /**
  818. * i40e_clean_programming_status - clean the programming status descriptor
  819. * @rx_ring: the rx ring that has this descriptor
  820. * @rx_desc: the rx descriptor written back by HW
  821. *
  822. * Flow director should handle FD_FILTER_STATUS to check its filter programming
  823. * status being successful or not and take actions accordingly. FCoE should
  824. * handle its context/filter programming/invalidation status and take actions.
  825. *
  826. **/
  827. static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
  828. union i40e_rx_desc *rx_desc)
  829. {
  830. u64 qw;
  831. u8 id;
  832. qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  833. id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
  834. I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
  835. if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
  836. i40e_fd_handle_status(rx_ring, rx_desc, id);
  837. #ifdef I40E_FCOE
  838. else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
  839. (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
  840. i40e_fcoe_handle_status(rx_ring, rx_desc, id);
  841. #endif
  842. }
  843. /**
  844. * i40e_setup_tx_descriptors - Allocate the Tx descriptors
  845. * @tx_ring: the tx ring to set up
  846. *
  847. * Return 0 on success, negative on error
  848. **/
  849. int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
  850. {
  851. struct device *dev = tx_ring->dev;
  852. int bi_size;
  853. if (!dev)
  854. return -ENOMEM;
  855. /* warn if we are about to overwrite the pointer */
  856. WARN_ON(tx_ring->tx_bi);
  857. bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
  858. tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
  859. if (!tx_ring->tx_bi)
  860. goto err;
  861. /* round up to nearest 4K */
  862. tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
  863. /* add u32 for head writeback, align after this takes care of
  864. * guaranteeing this is at least one cache line in size
  865. */
  866. tx_ring->size += sizeof(u32);
  867. tx_ring->size = ALIGN(tx_ring->size, 4096);
  868. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  869. &tx_ring->dma, GFP_KERNEL);
  870. if (!tx_ring->desc) {
  871. dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
  872. tx_ring->size);
  873. goto err;
  874. }
  875. tx_ring->next_to_use = 0;
  876. tx_ring->next_to_clean = 0;
  877. return 0;
  878. err:
  879. kfree(tx_ring->tx_bi);
  880. tx_ring->tx_bi = NULL;
  881. return -ENOMEM;
  882. }
  883. /**
  884. * i40e_clean_rx_ring - Free Rx buffers
  885. * @rx_ring: ring to be cleaned
  886. **/
  887. void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
  888. {
  889. struct device *dev = rx_ring->dev;
  890. struct i40e_rx_buffer *rx_bi;
  891. unsigned long bi_size;
  892. u16 i;
  893. /* ring already cleared, nothing to do */
  894. if (!rx_ring->rx_bi)
  895. return;
  896. if (ring_is_ps_enabled(rx_ring)) {
  897. int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
  898. rx_bi = &rx_ring->rx_bi[0];
  899. if (rx_bi->hdr_buf) {
  900. dma_free_coherent(dev,
  901. bufsz,
  902. rx_bi->hdr_buf,
  903. rx_bi->dma);
  904. for (i = 0; i < rx_ring->count; i++) {
  905. rx_bi = &rx_ring->rx_bi[i];
  906. rx_bi->dma = 0;
  907. rx_bi->hdr_buf = NULL;
  908. }
  909. }
  910. }
  911. /* Free all the Rx ring sk_buffs */
  912. for (i = 0; i < rx_ring->count; i++) {
  913. rx_bi = &rx_ring->rx_bi[i];
  914. if (rx_bi->dma) {
  915. dma_unmap_single(dev,
  916. rx_bi->dma,
  917. rx_ring->rx_buf_len,
  918. DMA_FROM_DEVICE);
  919. rx_bi->dma = 0;
  920. }
  921. if (rx_bi->skb) {
  922. dev_kfree_skb(rx_bi->skb);
  923. rx_bi->skb = NULL;
  924. }
  925. if (rx_bi->page) {
  926. if (rx_bi->page_dma) {
  927. dma_unmap_page(dev,
  928. rx_bi->page_dma,
  929. PAGE_SIZE,
  930. DMA_FROM_DEVICE);
  931. rx_bi->page_dma = 0;
  932. }
  933. __free_page(rx_bi->page);
  934. rx_bi->page = NULL;
  935. rx_bi->page_offset = 0;
  936. }
  937. }
  938. bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
  939. memset(rx_ring->rx_bi, 0, bi_size);
  940. /* Zero out the descriptor ring */
  941. memset(rx_ring->desc, 0, rx_ring->size);
  942. rx_ring->next_to_clean = 0;
  943. rx_ring->next_to_use = 0;
  944. }
  945. /**
  946. * i40e_free_rx_resources - Free Rx resources
  947. * @rx_ring: ring to clean the resources from
  948. *
  949. * Free all receive software resources
  950. **/
  951. void i40e_free_rx_resources(struct i40e_ring *rx_ring)
  952. {
  953. i40e_clean_rx_ring(rx_ring);
  954. kfree(rx_ring->rx_bi);
  955. rx_ring->rx_bi = NULL;
  956. if (rx_ring->desc) {
  957. dma_free_coherent(rx_ring->dev, rx_ring->size,
  958. rx_ring->desc, rx_ring->dma);
  959. rx_ring->desc = NULL;
  960. }
  961. }
  962. /**
  963. * i40e_alloc_rx_headers - allocate rx header buffers
  964. * @rx_ring: ring to alloc buffers
  965. *
  966. * Allocate rx header buffers for the entire ring. As these are static,
  967. * this is only called when setting up a new ring.
  968. **/
  969. void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
  970. {
  971. struct device *dev = rx_ring->dev;
  972. struct i40e_rx_buffer *rx_bi;
  973. dma_addr_t dma;
  974. void *buffer;
  975. int buf_size;
  976. int i;
  977. if (rx_ring->rx_bi[0].hdr_buf)
  978. return;
  979. /* Make sure the buffers don't cross cache line boundaries. */
  980. buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
  981. buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
  982. &dma, GFP_KERNEL);
  983. if (!buffer)
  984. return;
  985. for (i = 0; i < rx_ring->count; i++) {
  986. rx_bi = &rx_ring->rx_bi[i];
  987. rx_bi->dma = dma + (i * buf_size);
  988. rx_bi->hdr_buf = buffer + (i * buf_size);
  989. }
  990. }
  991. /**
  992. * i40e_setup_rx_descriptors - Allocate Rx descriptors
  993. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  994. *
  995. * Returns 0 on success, negative on failure
  996. **/
  997. int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
  998. {
  999. struct device *dev = rx_ring->dev;
  1000. int bi_size;
  1001. /* warn if we are about to overwrite the pointer */
  1002. WARN_ON(rx_ring->rx_bi);
  1003. bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
  1004. rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
  1005. if (!rx_ring->rx_bi)
  1006. goto err;
  1007. u64_stats_init(&rx_ring->syncp);
  1008. /* Round up to nearest 4K */
  1009. rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
  1010. ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
  1011. : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
  1012. rx_ring->size = ALIGN(rx_ring->size, 4096);
  1013. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  1014. &rx_ring->dma, GFP_KERNEL);
  1015. if (!rx_ring->desc) {
  1016. dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
  1017. rx_ring->size);
  1018. goto err;
  1019. }
  1020. rx_ring->next_to_clean = 0;
  1021. rx_ring->next_to_use = 0;
  1022. return 0;
  1023. err:
  1024. kfree(rx_ring->rx_bi);
  1025. rx_ring->rx_bi = NULL;
  1026. return -ENOMEM;
  1027. }
  1028. /**
  1029. * i40e_release_rx_desc - Store the new tail and head values
  1030. * @rx_ring: ring to bump
  1031. * @val: new head index
  1032. **/
  1033. static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
  1034. {
  1035. rx_ring->next_to_use = val;
  1036. /* Force memory writes to complete before letting h/w
  1037. * know there are new descriptors to fetch. (Only
  1038. * applicable for weak-ordered memory model archs,
  1039. * such as IA-64).
  1040. */
  1041. wmb();
  1042. writel(val, rx_ring->tail);
  1043. }
  1044. /**
  1045. * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
  1046. * @rx_ring: ring to place buffers on
  1047. * @cleaned_count: number of buffers to replace
  1048. *
  1049. * Returns true if any errors on allocation
  1050. **/
  1051. bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
  1052. {
  1053. u16 i = rx_ring->next_to_use;
  1054. union i40e_rx_desc *rx_desc;
  1055. struct i40e_rx_buffer *bi;
  1056. const int current_node = numa_node_id();
  1057. /* do nothing if no valid netdev defined */
  1058. if (!rx_ring->netdev || !cleaned_count)
  1059. return false;
  1060. while (cleaned_count--) {
  1061. rx_desc = I40E_RX_DESC(rx_ring, i);
  1062. bi = &rx_ring->rx_bi[i];
  1063. if (bi->skb) /* desc is in use */
  1064. goto no_buffers;
  1065. /* If we've been moved to a different NUMA node, release the
  1066. * page so we can get a new one on the current node.
  1067. */
  1068. if (bi->page && page_to_nid(bi->page) != current_node) {
  1069. dma_unmap_page(rx_ring->dev,
  1070. bi->page_dma,
  1071. PAGE_SIZE,
  1072. DMA_FROM_DEVICE);
  1073. __free_page(bi->page);
  1074. bi->page = NULL;
  1075. bi->page_dma = 0;
  1076. rx_ring->rx_stats.realloc_count++;
  1077. } else if (bi->page) {
  1078. rx_ring->rx_stats.page_reuse_count++;
  1079. }
  1080. if (!bi->page) {
  1081. bi->page = alloc_page(GFP_ATOMIC);
  1082. if (!bi->page) {
  1083. rx_ring->rx_stats.alloc_page_failed++;
  1084. goto no_buffers;
  1085. }
  1086. bi->page_dma = dma_map_page(rx_ring->dev,
  1087. bi->page,
  1088. 0,
  1089. PAGE_SIZE,
  1090. DMA_FROM_DEVICE);
  1091. if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
  1092. rx_ring->rx_stats.alloc_page_failed++;
  1093. __free_page(bi->page);
  1094. bi->page = NULL;
  1095. bi->page_dma = 0;
  1096. bi->page_offset = 0;
  1097. goto no_buffers;
  1098. }
  1099. bi->page_offset = 0;
  1100. }
  1101. /* Refresh the desc even if buffer_addrs didn't change
  1102. * because each write-back erases this info.
  1103. */
  1104. rx_desc->read.pkt_addr =
  1105. cpu_to_le64(bi->page_dma + bi->page_offset);
  1106. rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
  1107. i++;
  1108. if (i == rx_ring->count)
  1109. i = 0;
  1110. }
  1111. if (rx_ring->next_to_use != i)
  1112. i40e_release_rx_desc(rx_ring, i);
  1113. return false;
  1114. no_buffers:
  1115. if (rx_ring->next_to_use != i)
  1116. i40e_release_rx_desc(rx_ring, i);
  1117. /* make sure to come back via polling to try again after
  1118. * allocation failure
  1119. */
  1120. return true;
  1121. }
  1122. /**
  1123. * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
  1124. * @rx_ring: ring to place buffers on
  1125. * @cleaned_count: number of buffers to replace
  1126. *
  1127. * Returns true if any errors on allocation
  1128. **/
  1129. bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
  1130. {
  1131. u16 i = rx_ring->next_to_use;
  1132. union i40e_rx_desc *rx_desc;
  1133. struct i40e_rx_buffer *bi;
  1134. struct sk_buff *skb;
  1135. /* do nothing if no valid netdev defined */
  1136. if (!rx_ring->netdev || !cleaned_count)
  1137. return false;
  1138. while (cleaned_count--) {
  1139. rx_desc = I40E_RX_DESC(rx_ring, i);
  1140. bi = &rx_ring->rx_bi[i];
  1141. skb = bi->skb;
  1142. if (!skb) {
  1143. skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
  1144. rx_ring->rx_buf_len,
  1145. GFP_ATOMIC |
  1146. __GFP_NOWARN);
  1147. if (!skb) {
  1148. rx_ring->rx_stats.alloc_buff_failed++;
  1149. goto no_buffers;
  1150. }
  1151. /* initialize queue mapping */
  1152. skb_record_rx_queue(skb, rx_ring->queue_index);
  1153. bi->skb = skb;
  1154. }
  1155. if (!bi->dma) {
  1156. bi->dma = dma_map_single(rx_ring->dev,
  1157. skb->data,
  1158. rx_ring->rx_buf_len,
  1159. DMA_FROM_DEVICE);
  1160. if (dma_mapping_error(rx_ring->dev, bi->dma)) {
  1161. rx_ring->rx_stats.alloc_buff_failed++;
  1162. bi->dma = 0;
  1163. dev_kfree_skb(bi->skb);
  1164. bi->skb = NULL;
  1165. goto no_buffers;
  1166. }
  1167. }
  1168. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
  1169. rx_desc->read.hdr_addr = 0;
  1170. i++;
  1171. if (i == rx_ring->count)
  1172. i = 0;
  1173. }
  1174. if (rx_ring->next_to_use != i)
  1175. i40e_release_rx_desc(rx_ring, i);
  1176. return false;
  1177. no_buffers:
  1178. if (rx_ring->next_to_use != i)
  1179. i40e_release_rx_desc(rx_ring, i);
  1180. /* make sure to come back via polling to try again after
  1181. * allocation failure
  1182. */
  1183. return true;
  1184. }
  1185. /**
  1186. * i40e_receive_skb - Send a completed packet up the stack
  1187. * @rx_ring: rx ring in play
  1188. * @skb: packet to send up
  1189. * @vlan_tag: vlan tag for packet
  1190. **/
  1191. static void i40e_receive_skb(struct i40e_ring *rx_ring,
  1192. struct sk_buff *skb, u16 vlan_tag)
  1193. {
  1194. struct i40e_q_vector *q_vector = rx_ring->q_vector;
  1195. if (vlan_tag & VLAN_VID_MASK)
  1196. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
  1197. napi_gro_receive(&q_vector->napi, skb);
  1198. }
  1199. /**
  1200. * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
  1201. * @vsi: the VSI we care about
  1202. * @skb: skb currently being received and modified
  1203. * @rx_status: status value of last descriptor in packet
  1204. * @rx_error: error value of last descriptor in packet
  1205. * @rx_ptype: ptype value of last descriptor in packet
  1206. **/
  1207. static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
  1208. struct sk_buff *skb,
  1209. u32 rx_status,
  1210. u32 rx_error,
  1211. u16 rx_ptype)
  1212. {
  1213. struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
  1214. bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
  1215. skb->ip_summed = CHECKSUM_NONE;
  1216. /* Rx csum enabled and ip headers found? */
  1217. if (!(vsi->netdev->features & NETIF_F_RXCSUM))
  1218. return;
  1219. /* did the hardware decode the packet and checksum? */
  1220. if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
  1221. return;
  1222. /* both known and outer_ip must be set for the below code to work */
  1223. if (!(decoded.known && decoded.outer_ip))
  1224. return;
  1225. ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
  1226. (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
  1227. ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
  1228. (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
  1229. if (ipv4 &&
  1230. (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
  1231. BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
  1232. goto checksum_fail;
  1233. /* likely incorrect csum if alternate IP extension headers found */
  1234. if (ipv6 &&
  1235. rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
  1236. /* don't increment checksum err here, non-fatal err */
  1237. return;
  1238. /* there was some L4 error, count error and punt packet to the stack */
  1239. if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
  1240. goto checksum_fail;
  1241. /* handle packets that were not able to be checksummed due
  1242. * to arrival speed, in this case the stack can compute
  1243. * the csum.
  1244. */
  1245. if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
  1246. return;
  1247. /* The hardware supported by this driver does not validate outer
  1248. * checksums for tunneled VXLAN or GENEVE frames. I don't agree
  1249. * with it but the specification states that you "MAY validate", it
  1250. * doesn't make it a hard requirement so if we have validated the
  1251. * inner checksum report CHECKSUM_UNNECESSARY.
  1252. */
  1253. ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
  1254. (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
  1255. ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
  1256. (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
  1257. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1258. skb->csum_level = ipv4_tunnel || ipv6_tunnel;
  1259. return;
  1260. checksum_fail:
  1261. vsi->back->hw_csum_rx_error++;
  1262. }
  1263. /**
  1264. * i40e_ptype_to_htype - get a hash type
  1265. * @ptype: the ptype value from the descriptor
  1266. *
  1267. * Returns a hash type to be used by skb_set_hash
  1268. **/
  1269. static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
  1270. {
  1271. struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
  1272. if (!decoded.known)
  1273. return PKT_HASH_TYPE_NONE;
  1274. if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
  1275. decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
  1276. return PKT_HASH_TYPE_L4;
  1277. else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
  1278. decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
  1279. return PKT_HASH_TYPE_L3;
  1280. else
  1281. return PKT_HASH_TYPE_L2;
  1282. }
  1283. /**
  1284. * i40e_rx_hash - set the hash value in the skb
  1285. * @ring: descriptor ring
  1286. * @rx_desc: specific descriptor
  1287. **/
  1288. static inline void i40e_rx_hash(struct i40e_ring *ring,
  1289. union i40e_rx_desc *rx_desc,
  1290. struct sk_buff *skb,
  1291. u8 rx_ptype)
  1292. {
  1293. u32 hash;
  1294. const __le64 rss_mask =
  1295. cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
  1296. I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
  1297. if (ring->netdev->features & NETIF_F_RXHASH)
  1298. return;
  1299. if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
  1300. hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
  1301. skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
  1302. }
  1303. }
  1304. /**
  1305. * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
  1306. * @rx_ring: rx ring to clean
  1307. * @budget: how many cleans we're allowed
  1308. *
  1309. * Returns true if there's any budget left (e.g. the clean is finished)
  1310. **/
  1311. static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
  1312. {
  1313. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  1314. u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
  1315. u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
  1316. struct i40e_vsi *vsi = rx_ring->vsi;
  1317. u16 i = rx_ring->next_to_clean;
  1318. union i40e_rx_desc *rx_desc;
  1319. u32 rx_error, rx_status;
  1320. bool failure = false;
  1321. u8 rx_ptype;
  1322. u64 qword;
  1323. u32 copysize;
  1324. if (budget <= 0)
  1325. return 0;
  1326. do {
  1327. struct i40e_rx_buffer *rx_bi;
  1328. struct sk_buff *skb;
  1329. u16 vlan_tag;
  1330. /* return some buffers to hardware, one at a time is too slow */
  1331. if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
  1332. failure = failure ||
  1333. i40e_alloc_rx_buffers_ps(rx_ring,
  1334. cleaned_count);
  1335. cleaned_count = 0;
  1336. }
  1337. i = rx_ring->next_to_clean;
  1338. rx_desc = I40E_RX_DESC(rx_ring, i);
  1339. qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  1340. rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
  1341. I40E_RXD_QW1_STATUS_SHIFT;
  1342. if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
  1343. break;
  1344. /* This memory barrier is needed to keep us from reading
  1345. * any other fields out of the rx_desc until we know the
  1346. * DD bit is set.
  1347. */
  1348. dma_rmb();
  1349. /* sync header buffer for reading */
  1350. dma_sync_single_range_for_cpu(rx_ring->dev,
  1351. rx_ring->rx_bi[0].dma,
  1352. i * rx_ring->rx_hdr_len,
  1353. rx_ring->rx_hdr_len,
  1354. DMA_FROM_DEVICE);
  1355. if (i40e_rx_is_programming_status(qword)) {
  1356. i40e_clean_programming_status(rx_ring, rx_desc);
  1357. I40E_RX_INCREMENT(rx_ring, i);
  1358. continue;
  1359. }
  1360. rx_bi = &rx_ring->rx_bi[i];
  1361. skb = rx_bi->skb;
  1362. if (likely(!skb)) {
  1363. skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
  1364. rx_ring->rx_hdr_len,
  1365. GFP_ATOMIC |
  1366. __GFP_NOWARN);
  1367. if (!skb) {
  1368. rx_ring->rx_stats.alloc_buff_failed++;
  1369. failure = true;
  1370. break;
  1371. }
  1372. /* initialize queue mapping */
  1373. skb_record_rx_queue(skb, rx_ring->queue_index);
  1374. /* we are reusing so sync this buffer for CPU use */
  1375. dma_sync_single_range_for_cpu(rx_ring->dev,
  1376. rx_ring->rx_bi[0].dma,
  1377. i * rx_ring->rx_hdr_len,
  1378. rx_ring->rx_hdr_len,
  1379. DMA_FROM_DEVICE);
  1380. }
  1381. rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
  1382. I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
  1383. rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
  1384. I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
  1385. rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
  1386. I40E_RXD_QW1_LENGTH_SPH_SHIFT;
  1387. rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
  1388. I40E_RXD_QW1_ERROR_SHIFT;
  1389. rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
  1390. rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
  1391. rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
  1392. I40E_RXD_QW1_PTYPE_SHIFT;
  1393. /* sync half-page for reading */
  1394. dma_sync_single_range_for_cpu(rx_ring->dev,
  1395. rx_bi->page_dma,
  1396. rx_bi->page_offset,
  1397. PAGE_SIZE / 2,
  1398. DMA_FROM_DEVICE);
  1399. prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
  1400. rx_bi->skb = NULL;
  1401. cleaned_count++;
  1402. copysize = 0;
  1403. if (rx_hbo || rx_sph) {
  1404. int len;
  1405. if (rx_hbo)
  1406. len = I40E_RX_HDR_SIZE;
  1407. else
  1408. len = rx_header_len;
  1409. memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
  1410. } else if (skb->len == 0) {
  1411. int len;
  1412. unsigned char *va = page_address(rx_bi->page) +
  1413. rx_bi->page_offset;
  1414. len = min(rx_packet_len, rx_ring->rx_hdr_len);
  1415. memcpy(__skb_put(skb, len), va, len);
  1416. copysize = len;
  1417. rx_packet_len -= len;
  1418. }
  1419. /* Get the rest of the data if this was a header split */
  1420. if (rx_packet_len) {
  1421. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  1422. rx_bi->page,
  1423. rx_bi->page_offset + copysize,
  1424. rx_packet_len, I40E_RXBUFFER_2048);
  1425. /* If the page count is more than 2, then both halves
  1426. * of the page are used and we need to free it. Do it
  1427. * here instead of in the alloc code. Otherwise one
  1428. * of the half-pages might be released between now and
  1429. * then, and we wouldn't know which one to use.
  1430. * Don't call get_page and free_page since those are
  1431. * both expensive atomic operations that just change
  1432. * the refcount in opposite directions. Just give the
  1433. * page to the stack; he can have our refcount.
  1434. */
  1435. if (page_count(rx_bi->page) > 2) {
  1436. dma_unmap_page(rx_ring->dev,
  1437. rx_bi->page_dma,
  1438. PAGE_SIZE,
  1439. DMA_FROM_DEVICE);
  1440. rx_bi->page = NULL;
  1441. rx_bi->page_dma = 0;
  1442. rx_ring->rx_stats.realloc_count++;
  1443. } else {
  1444. get_page(rx_bi->page);
  1445. /* switch to the other half-page here; the
  1446. * allocation code programs the right addr
  1447. * into HW. If we haven't used this half-page,
  1448. * the address won't be changed, and HW can
  1449. * just use it next time through.
  1450. */
  1451. rx_bi->page_offset ^= PAGE_SIZE / 2;
  1452. }
  1453. }
  1454. I40E_RX_INCREMENT(rx_ring, i);
  1455. if (unlikely(
  1456. !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
  1457. struct i40e_rx_buffer *next_buffer;
  1458. next_buffer = &rx_ring->rx_bi[i];
  1459. next_buffer->skb = skb;
  1460. rx_ring->rx_stats.non_eop_descs++;
  1461. continue;
  1462. }
  1463. /* ERR_MASK will only have valid bits if EOP set */
  1464. if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
  1465. dev_kfree_skb_any(skb);
  1466. continue;
  1467. }
  1468. i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
  1469. if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
  1470. i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
  1471. I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
  1472. I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
  1473. rx_ring->last_rx_timestamp = jiffies;
  1474. }
  1475. /* probably a little skewed due to removing CRC */
  1476. total_rx_bytes += skb->len;
  1477. total_rx_packets++;
  1478. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  1479. i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
  1480. vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
  1481. ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
  1482. : 0;
  1483. #ifdef I40E_FCOE
  1484. if (unlikely(
  1485. i40e_rx_is_fcoe(rx_ptype) &&
  1486. !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
  1487. dev_kfree_skb_any(skb);
  1488. continue;
  1489. }
  1490. #endif
  1491. i40e_receive_skb(rx_ring, skb, vlan_tag);
  1492. rx_desc->wb.qword1.status_error_len = 0;
  1493. } while (likely(total_rx_packets < budget));
  1494. u64_stats_update_begin(&rx_ring->syncp);
  1495. rx_ring->stats.packets += total_rx_packets;
  1496. rx_ring->stats.bytes += total_rx_bytes;
  1497. u64_stats_update_end(&rx_ring->syncp);
  1498. rx_ring->q_vector->rx.total_packets += total_rx_packets;
  1499. rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
  1500. return failure ? budget : total_rx_packets;
  1501. }
  1502. /**
  1503. * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
  1504. * @rx_ring: rx ring to clean
  1505. * @budget: how many cleans we're allowed
  1506. *
  1507. * Returns number of packets cleaned
  1508. **/
  1509. static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
  1510. {
  1511. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  1512. u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
  1513. struct i40e_vsi *vsi = rx_ring->vsi;
  1514. union i40e_rx_desc *rx_desc;
  1515. u32 rx_error, rx_status;
  1516. u16 rx_packet_len;
  1517. bool failure = false;
  1518. u8 rx_ptype;
  1519. u64 qword;
  1520. u16 i;
  1521. do {
  1522. struct i40e_rx_buffer *rx_bi;
  1523. struct sk_buff *skb;
  1524. u16 vlan_tag;
  1525. /* return some buffers to hardware, one at a time is too slow */
  1526. if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
  1527. failure = failure ||
  1528. i40e_alloc_rx_buffers_1buf(rx_ring,
  1529. cleaned_count);
  1530. cleaned_count = 0;
  1531. }
  1532. i = rx_ring->next_to_clean;
  1533. rx_desc = I40E_RX_DESC(rx_ring, i);
  1534. qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
  1535. rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
  1536. I40E_RXD_QW1_STATUS_SHIFT;
  1537. if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
  1538. break;
  1539. /* This memory barrier is needed to keep us from reading
  1540. * any other fields out of the rx_desc until we know the
  1541. * DD bit is set.
  1542. */
  1543. dma_rmb();
  1544. if (i40e_rx_is_programming_status(qword)) {
  1545. i40e_clean_programming_status(rx_ring, rx_desc);
  1546. I40E_RX_INCREMENT(rx_ring, i);
  1547. continue;
  1548. }
  1549. rx_bi = &rx_ring->rx_bi[i];
  1550. skb = rx_bi->skb;
  1551. prefetch(skb->data);
  1552. rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
  1553. I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
  1554. rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
  1555. I40E_RXD_QW1_ERROR_SHIFT;
  1556. rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
  1557. rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
  1558. I40E_RXD_QW1_PTYPE_SHIFT;
  1559. rx_bi->skb = NULL;
  1560. cleaned_count++;
  1561. /* Get the header and possibly the whole packet
  1562. * If this is an skb from previous receive dma will be 0
  1563. */
  1564. skb_put(skb, rx_packet_len);
  1565. dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
  1566. DMA_FROM_DEVICE);
  1567. rx_bi->dma = 0;
  1568. I40E_RX_INCREMENT(rx_ring, i);
  1569. if (unlikely(
  1570. !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
  1571. rx_ring->rx_stats.non_eop_descs++;
  1572. continue;
  1573. }
  1574. /* ERR_MASK will only have valid bits if EOP set */
  1575. if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
  1576. dev_kfree_skb_any(skb);
  1577. continue;
  1578. }
  1579. i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
  1580. if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
  1581. i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
  1582. I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
  1583. I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
  1584. rx_ring->last_rx_timestamp = jiffies;
  1585. }
  1586. /* probably a little skewed due to removing CRC */
  1587. total_rx_bytes += skb->len;
  1588. total_rx_packets++;
  1589. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  1590. i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
  1591. vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
  1592. ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
  1593. : 0;
  1594. #ifdef I40E_FCOE
  1595. if (unlikely(
  1596. i40e_rx_is_fcoe(rx_ptype) &&
  1597. !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
  1598. dev_kfree_skb_any(skb);
  1599. continue;
  1600. }
  1601. #endif
  1602. i40e_receive_skb(rx_ring, skb, vlan_tag);
  1603. rx_desc->wb.qword1.status_error_len = 0;
  1604. } while (likely(total_rx_packets < budget));
  1605. u64_stats_update_begin(&rx_ring->syncp);
  1606. rx_ring->stats.packets += total_rx_packets;
  1607. rx_ring->stats.bytes += total_rx_bytes;
  1608. u64_stats_update_end(&rx_ring->syncp);
  1609. rx_ring->q_vector->rx.total_packets += total_rx_packets;
  1610. rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
  1611. return failure ? budget : total_rx_packets;
  1612. }
  1613. static u32 i40e_buildreg_itr(const int type, const u16 itr)
  1614. {
  1615. u32 val;
  1616. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  1617. /* Don't clear PBA because that can cause lost interrupts that
  1618. * came in while we were cleaning/polling
  1619. */
  1620. (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
  1621. (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
  1622. return val;
  1623. }
  1624. /* a small macro to shorten up some long lines */
  1625. #define INTREG I40E_PFINT_DYN_CTLN
  1626. /**
  1627. * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
  1628. * @vsi: the VSI we care about
  1629. * @q_vector: q_vector for which itr is being updated and interrupt enabled
  1630. *
  1631. **/
  1632. static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
  1633. struct i40e_q_vector *q_vector)
  1634. {
  1635. struct i40e_hw *hw = &vsi->back->hw;
  1636. bool rx = false, tx = false;
  1637. u32 rxval, txval;
  1638. int vector;
  1639. int idx = q_vector->v_idx;
  1640. vector = (q_vector->v_idx + vsi->base_vector);
  1641. /* avoid dynamic calculation if in countdown mode OR if
  1642. * all dynamic is disabled
  1643. */
  1644. rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
  1645. if (q_vector->itr_countdown > 0 ||
  1646. (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
  1647. !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
  1648. goto enable_int;
  1649. }
  1650. if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
  1651. rx = i40e_set_new_dynamic_itr(&q_vector->rx);
  1652. rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
  1653. }
  1654. if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
  1655. tx = i40e_set_new_dynamic_itr(&q_vector->tx);
  1656. txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
  1657. }
  1658. if (rx || tx) {
  1659. /* get the higher of the two ITR adjustments and
  1660. * use the same value for both ITR registers
  1661. * when in adaptive mode (Rx and/or Tx)
  1662. */
  1663. u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
  1664. q_vector->tx.itr = q_vector->rx.itr = itr;
  1665. txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
  1666. tx = true;
  1667. rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
  1668. rx = true;
  1669. }
  1670. /* only need to enable the interrupt once, but need
  1671. * to possibly update both ITR values
  1672. */
  1673. if (rx) {
  1674. /* set the INTENA_MSK_MASK so that this first write
  1675. * won't actually enable the interrupt, instead just
  1676. * updating the ITR (it's bit 31 PF and VF)
  1677. */
  1678. rxval |= BIT(31);
  1679. /* don't check _DOWN because interrupt isn't being enabled */
  1680. wr32(hw, INTREG(vector - 1), rxval);
  1681. }
  1682. enable_int:
  1683. if (!test_bit(__I40E_DOWN, &vsi->state))
  1684. wr32(hw, INTREG(vector - 1), txval);
  1685. if (q_vector->itr_countdown)
  1686. q_vector->itr_countdown--;
  1687. else
  1688. q_vector->itr_countdown = ITR_COUNTDOWN_START;
  1689. }
  1690. /**
  1691. * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  1692. * @napi: napi struct with our devices info in it
  1693. * @budget: amount of work driver is allowed to do this pass, in packets
  1694. *
  1695. * This function will clean all queues associated with a q_vector.
  1696. *
  1697. * Returns the amount of work done
  1698. **/
  1699. int i40e_napi_poll(struct napi_struct *napi, int budget)
  1700. {
  1701. struct i40e_q_vector *q_vector =
  1702. container_of(napi, struct i40e_q_vector, napi);
  1703. struct i40e_vsi *vsi = q_vector->vsi;
  1704. struct i40e_ring *ring;
  1705. bool clean_complete = true;
  1706. bool arm_wb = false;
  1707. int budget_per_ring;
  1708. int work_done = 0;
  1709. if (test_bit(__I40E_DOWN, &vsi->state)) {
  1710. napi_complete(napi);
  1711. return 0;
  1712. }
  1713. /* Clear hung_detected bit */
  1714. clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
  1715. /* Since the actual Tx work is minimal, we can give the Tx a larger
  1716. * budget and be more aggressive about cleaning up the Tx descriptors.
  1717. */
  1718. i40e_for_each_ring(ring, q_vector->tx) {
  1719. if (!i40e_clean_tx_irq(vsi, ring, budget)) {
  1720. clean_complete = false;
  1721. continue;
  1722. }
  1723. arm_wb |= ring->arm_wb;
  1724. ring->arm_wb = false;
  1725. }
  1726. /* Handle case where we are called by netpoll with a budget of 0 */
  1727. if (budget <= 0)
  1728. goto tx_only;
  1729. /* We attempt to distribute budget to each Rx queue fairly, but don't
  1730. * allow the budget to go below 1 because that would exit polling early.
  1731. */
  1732. budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
  1733. i40e_for_each_ring(ring, q_vector->rx) {
  1734. int cleaned;
  1735. if (ring_is_ps_enabled(ring))
  1736. cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
  1737. else
  1738. cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
  1739. work_done += cleaned;
  1740. /* if we clean as many as budgeted, we must not be done */
  1741. if (cleaned >= budget_per_ring)
  1742. clean_complete = false;
  1743. }
  1744. /* If work not completed, return budget and polling will return */
  1745. if (!clean_complete) {
  1746. tx_only:
  1747. if (arm_wb) {
  1748. q_vector->tx.ring[0].tx_stats.tx_force_wb++;
  1749. i40e_enable_wb_on_itr(vsi, q_vector);
  1750. }
  1751. return budget;
  1752. }
  1753. if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
  1754. q_vector->arm_wb_state = false;
  1755. /* Work is done so exit the polling mode and re-enable the interrupt */
  1756. napi_complete_done(napi, work_done);
  1757. if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
  1758. i40e_update_enable_itr(vsi, q_vector);
  1759. } else { /* Legacy mode */
  1760. i40e_irq_dynamic_enable_icr0(vsi->back, false);
  1761. }
  1762. return 0;
  1763. }
  1764. /**
  1765. * i40e_atr - Add a Flow Director ATR filter
  1766. * @tx_ring: ring to add programming descriptor to
  1767. * @skb: send buffer
  1768. * @tx_flags: send tx flags
  1769. **/
  1770. static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  1771. u32 tx_flags)
  1772. {
  1773. struct i40e_filter_program_desc *fdir_desc;
  1774. struct i40e_pf *pf = tx_ring->vsi->back;
  1775. union {
  1776. unsigned char *network;
  1777. struct iphdr *ipv4;
  1778. struct ipv6hdr *ipv6;
  1779. } hdr;
  1780. struct tcphdr *th;
  1781. unsigned int hlen;
  1782. u32 flex_ptype, dtype_cmd;
  1783. int l4_proto;
  1784. u16 i;
  1785. /* make sure ATR is enabled */
  1786. if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
  1787. return;
  1788. if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
  1789. return;
  1790. /* if sampling is disabled do nothing */
  1791. if (!tx_ring->atr_sample_rate)
  1792. return;
  1793. /* Currently only IPv4/IPv6 with TCP is supported */
  1794. if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
  1795. return;
  1796. /* snag network header to get L4 type and address */
  1797. hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
  1798. skb_inner_network_header(skb) : skb_network_header(skb);
  1799. /* Note: tx_flags gets modified to reflect inner protocols in
  1800. * tx_enable_csum function if encap is enabled.
  1801. */
  1802. if (tx_flags & I40E_TX_FLAGS_IPV4) {
  1803. /* access ihl as u8 to avoid unaligned access on ia64 */
  1804. hlen = (hdr.network[0] & 0x0F) << 2;
  1805. l4_proto = hdr.ipv4->protocol;
  1806. } else {
  1807. hlen = hdr.network - skb->data;
  1808. l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
  1809. hlen -= hdr.network - skb->data;
  1810. }
  1811. if (l4_proto != IPPROTO_TCP)
  1812. return;
  1813. th = (struct tcphdr *)(hdr.network + hlen);
  1814. /* Due to lack of space, no more new filters can be programmed */
  1815. if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
  1816. return;
  1817. if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
  1818. (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
  1819. /* HW ATR eviction will take care of removing filters on FIN
  1820. * and RST packets.
  1821. */
  1822. if (th->fin || th->rst)
  1823. return;
  1824. }
  1825. tx_ring->atr_count++;
  1826. /* sample on all syn/fin/rst packets or once every atr sample rate */
  1827. if (!th->fin &&
  1828. !th->syn &&
  1829. !th->rst &&
  1830. (tx_ring->atr_count < tx_ring->atr_sample_rate))
  1831. return;
  1832. tx_ring->atr_count = 0;
  1833. /* grab the next descriptor */
  1834. i = tx_ring->next_to_use;
  1835. fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
  1836. i++;
  1837. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  1838. flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
  1839. I40E_TXD_FLTR_QW0_QINDEX_MASK;
  1840. flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
  1841. (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
  1842. I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
  1843. (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
  1844. I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
  1845. flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
  1846. dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
  1847. dtype_cmd |= (th->fin || th->rst) ?
  1848. (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
  1849. I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
  1850. (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
  1851. I40E_TXD_FLTR_QW1_PCMD_SHIFT);
  1852. dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
  1853. I40E_TXD_FLTR_QW1_DEST_SHIFT;
  1854. dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
  1855. I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
  1856. dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
  1857. if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
  1858. dtype_cmd |=
  1859. ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
  1860. I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
  1861. I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
  1862. else
  1863. dtype_cmd |=
  1864. ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
  1865. I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
  1866. I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
  1867. if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
  1868. (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
  1869. dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
  1870. fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
  1871. fdir_desc->rsvd = cpu_to_le32(0);
  1872. fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
  1873. fdir_desc->fd_id = cpu_to_le32(0);
  1874. }
  1875. /**
  1876. * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  1877. * @skb: send buffer
  1878. * @tx_ring: ring to send buffer on
  1879. * @flags: the tx flags to be set
  1880. *
  1881. * Checks the skb and set up correspondingly several generic transmit flags
  1882. * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
  1883. *
  1884. * Returns error code indicate the frame should be dropped upon error and the
  1885. * otherwise returns 0 to indicate the flags has been set properly.
  1886. **/
  1887. #ifdef I40E_FCOE
  1888. inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
  1889. struct i40e_ring *tx_ring,
  1890. u32 *flags)
  1891. #else
  1892. static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
  1893. struct i40e_ring *tx_ring,
  1894. u32 *flags)
  1895. #endif
  1896. {
  1897. __be16 protocol = skb->protocol;
  1898. u32 tx_flags = 0;
  1899. if (protocol == htons(ETH_P_8021Q) &&
  1900. !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
  1901. /* When HW VLAN acceleration is turned off by the user the
  1902. * stack sets the protocol to 8021q so that the driver
  1903. * can take any steps required to support the SW only
  1904. * VLAN handling. In our case the driver doesn't need
  1905. * to take any further steps so just set the protocol
  1906. * to the encapsulated ethertype.
  1907. */
  1908. skb->protocol = vlan_get_protocol(skb);
  1909. goto out;
  1910. }
  1911. /* if we have a HW VLAN tag being added, default to the HW one */
  1912. if (skb_vlan_tag_present(skb)) {
  1913. tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
  1914. tx_flags |= I40E_TX_FLAGS_HW_VLAN;
  1915. /* else if it is a SW VLAN, check the next protocol and store the tag */
  1916. } else if (protocol == htons(ETH_P_8021Q)) {
  1917. struct vlan_hdr *vhdr, _vhdr;
  1918. vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
  1919. if (!vhdr)
  1920. return -EINVAL;
  1921. protocol = vhdr->h_vlan_encapsulated_proto;
  1922. tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
  1923. tx_flags |= I40E_TX_FLAGS_SW_VLAN;
  1924. }
  1925. if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
  1926. goto out;
  1927. /* Insert 802.1p priority into VLAN header */
  1928. if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
  1929. (skb->priority != TC_PRIO_CONTROL)) {
  1930. tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
  1931. tx_flags |= (skb->priority & 0x7) <<
  1932. I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
  1933. if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
  1934. struct vlan_ethhdr *vhdr;
  1935. int rc;
  1936. rc = skb_cow_head(skb, 0);
  1937. if (rc < 0)
  1938. return rc;
  1939. vhdr = (struct vlan_ethhdr *)skb->data;
  1940. vhdr->h_vlan_TCI = htons(tx_flags >>
  1941. I40E_TX_FLAGS_VLAN_SHIFT);
  1942. } else {
  1943. tx_flags |= I40E_TX_FLAGS_HW_VLAN;
  1944. }
  1945. }
  1946. out:
  1947. *flags = tx_flags;
  1948. return 0;
  1949. }
  1950. /**
  1951. * i40e_tso - set up the tso context descriptor
  1952. * @skb: ptr to the skb we're sending
  1953. * @hdr_len: ptr to the size of the packet header
  1954. * @cd_type_cmd_tso_mss: Quad Word 1
  1955. *
  1956. * Returns 0 if no TSO can happen, 1 if tso is going, or error
  1957. **/
  1958. static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
  1959. {
  1960. u64 cd_cmd, cd_tso_len, cd_mss;
  1961. union {
  1962. struct iphdr *v4;
  1963. struct ipv6hdr *v6;
  1964. unsigned char *hdr;
  1965. } ip;
  1966. union {
  1967. struct tcphdr *tcp;
  1968. struct udphdr *udp;
  1969. unsigned char *hdr;
  1970. } l4;
  1971. u32 paylen, l4_offset;
  1972. int err;
  1973. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1974. return 0;
  1975. if (!skb_is_gso(skb))
  1976. return 0;
  1977. err = skb_cow_head(skb, 0);
  1978. if (err < 0)
  1979. return err;
  1980. ip.hdr = skb_network_header(skb);
  1981. l4.hdr = skb_transport_header(skb);
  1982. /* initialize outer IP header fields */
  1983. if (ip.v4->version == 4) {
  1984. ip.v4->tot_len = 0;
  1985. ip.v4->check = 0;
  1986. } else {
  1987. ip.v6->payload_len = 0;
  1988. }
  1989. if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
  1990. SKB_GSO_UDP_TUNNEL_CSUM)) {
  1991. if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
  1992. /* determine offset of outer transport header */
  1993. l4_offset = l4.hdr - skb->data;
  1994. /* remove payload length from outer checksum */
  1995. paylen = skb->len - l4_offset;
  1996. csum_replace_by_diff(&l4.udp->check, htonl(paylen));
  1997. }
  1998. /* reset pointers to inner headers */
  1999. ip.hdr = skb_inner_network_header(skb);
  2000. l4.hdr = skb_inner_transport_header(skb);
  2001. /* initialize inner IP header fields */
  2002. if (ip.v4->version == 4) {
  2003. ip.v4->tot_len = 0;
  2004. ip.v4->check = 0;
  2005. } else {
  2006. ip.v6->payload_len = 0;
  2007. }
  2008. }
  2009. /* determine offset of inner transport header */
  2010. l4_offset = l4.hdr - skb->data;
  2011. /* remove payload length from inner checksum */
  2012. paylen = skb->len - l4_offset;
  2013. csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
  2014. /* compute length of segmentation header */
  2015. *hdr_len = (l4.tcp->doff * 4) + l4_offset;
  2016. /* find the field values */
  2017. cd_cmd = I40E_TX_CTX_DESC_TSO;
  2018. cd_tso_len = skb->len - *hdr_len;
  2019. cd_mss = skb_shinfo(skb)->gso_size;
  2020. *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
  2021. (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
  2022. (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
  2023. return 1;
  2024. }
  2025. /**
  2026. * i40e_tsyn - set up the tsyn context descriptor
  2027. * @tx_ring: ptr to the ring to send
  2028. * @skb: ptr to the skb we're sending
  2029. * @tx_flags: the collected send information
  2030. * @cd_type_cmd_tso_mss: Quad Word 1
  2031. *
  2032. * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
  2033. **/
  2034. static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
  2035. u32 tx_flags, u64 *cd_type_cmd_tso_mss)
  2036. {
  2037. struct i40e_pf *pf;
  2038. if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
  2039. return 0;
  2040. /* Tx timestamps cannot be sampled when doing TSO */
  2041. if (tx_flags & I40E_TX_FLAGS_TSO)
  2042. return 0;
  2043. /* only timestamp the outbound packet if the user has requested it and
  2044. * we are not already transmitting a packet to be timestamped
  2045. */
  2046. pf = i40e_netdev_to_pf(tx_ring->netdev);
  2047. if (!(pf->flags & I40E_FLAG_PTP))
  2048. return 0;
  2049. if (pf->ptp_tx &&
  2050. !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
  2051. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2052. pf->ptp_tx_skb = skb_get(skb);
  2053. } else {
  2054. return 0;
  2055. }
  2056. *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
  2057. I40E_TXD_CTX_QW1_CMD_SHIFT;
  2058. return 1;
  2059. }
  2060. /**
  2061. * i40e_tx_enable_csum - Enable Tx checksum offloads
  2062. * @skb: send buffer
  2063. * @tx_flags: pointer to Tx flags currently set
  2064. * @td_cmd: Tx descriptor command bits to set
  2065. * @td_offset: Tx descriptor header offsets to set
  2066. * @tx_ring: Tx descriptor ring
  2067. * @cd_tunneling: ptr to context desc bits
  2068. **/
  2069. static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
  2070. u32 *td_cmd, u32 *td_offset,
  2071. struct i40e_ring *tx_ring,
  2072. u32 *cd_tunneling)
  2073. {
  2074. union {
  2075. struct iphdr *v4;
  2076. struct ipv6hdr *v6;
  2077. unsigned char *hdr;
  2078. } ip;
  2079. union {
  2080. struct tcphdr *tcp;
  2081. struct udphdr *udp;
  2082. unsigned char *hdr;
  2083. } l4;
  2084. unsigned char *exthdr;
  2085. u32 offset, cmd = 0;
  2086. __be16 frag_off;
  2087. u8 l4_proto = 0;
  2088. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2089. return 0;
  2090. ip.hdr = skb_network_header(skb);
  2091. l4.hdr = skb_transport_header(skb);
  2092. /* compute outer L2 header size */
  2093. offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
  2094. if (skb->encapsulation) {
  2095. u32 tunnel = 0;
  2096. /* define outer network header type */
  2097. if (*tx_flags & I40E_TX_FLAGS_IPV4) {
  2098. tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
  2099. I40E_TX_CTX_EXT_IP_IPV4 :
  2100. I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
  2101. l4_proto = ip.v4->protocol;
  2102. } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
  2103. tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
  2104. exthdr = ip.hdr + sizeof(*ip.v6);
  2105. l4_proto = ip.v6->nexthdr;
  2106. if (l4.hdr != exthdr)
  2107. ipv6_skip_exthdr(skb, exthdr - skb->data,
  2108. &l4_proto, &frag_off);
  2109. }
  2110. /* compute outer L3 header size */
  2111. tunnel |= ((l4.hdr - ip.hdr) / 4) <<
  2112. I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
  2113. /* switch IP header pointer from outer to inner header */
  2114. ip.hdr = skb_inner_network_header(skb);
  2115. /* define outer transport */
  2116. switch (l4_proto) {
  2117. case IPPROTO_UDP:
  2118. tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
  2119. *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
  2120. break;
  2121. case IPPROTO_GRE:
  2122. tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
  2123. *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
  2124. break;
  2125. default:
  2126. if (*tx_flags & I40E_TX_FLAGS_TSO)
  2127. return -1;
  2128. skb_checksum_help(skb);
  2129. return 0;
  2130. }
  2131. /* compute tunnel header size */
  2132. tunnel |= ((ip.hdr - l4.hdr) / 2) <<
  2133. I40E_TXD_CTX_QW0_NATLEN_SHIFT;
  2134. /* indicate if we need to offload outer UDP header */
  2135. if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
  2136. (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
  2137. tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
  2138. /* record tunnel offload values */
  2139. *cd_tunneling |= tunnel;
  2140. /* switch L4 header pointer from outer to inner */
  2141. l4.hdr = skb_inner_transport_header(skb);
  2142. l4_proto = 0;
  2143. /* reset type as we transition from outer to inner headers */
  2144. *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
  2145. if (ip.v4->version == 4)
  2146. *tx_flags |= I40E_TX_FLAGS_IPV4;
  2147. if (ip.v6->version == 6)
  2148. *tx_flags |= I40E_TX_FLAGS_IPV6;
  2149. }
  2150. /* Enable IP checksum offloads */
  2151. if (*tx_flags & I40E_TX_FLAGS_IPV4) {
  2152. l4_proto = ip.v4->protocol;
  2153. /* the stack computes the IP header already, the only time we
  2154. * need the hardware to recompute it is in the case of TSO.
  2155. */
  2156. cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
  2157. I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
  2158. I40E_TX_DESC_CMD_IIPT_IPV4;
  2159. } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
  2160. cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
  2161. exthdr = ip.hdr + sizeof(*ip.v6);
  2162. l4_proto = ip.v6->nexthdr;
  2163. if (l4.hdr != exthdr)
  2164. ipv6_skip_exthdr(skb, exthdr - skb->data,
  2165. &l4_proto, &frag_off);
  2166. }
  2167. /* compute inner L3 header size */
  2168. offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
  2169. /* Enable L4 checksum offloads */
  2170. switch (l4_proto) {
  2171. case IPPROTO_TCP:
  2172. /* enable checksum offloads */
  2173. cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
  2174. offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
  2175. break;
  2176. case IPPROTO_SCTP:
  2177. /* enable SCTP checksum offload */
  2178. cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
  2179. offset |= (sizeof(struct sctphdr) >> 2) <<
  2180. I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
  2181. break;
  2182. case IPPROTO_UDP:
  2183. /* enable UDP checksum offload */
  2184. cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
  2185. offset |= (sizeof(struct udphdr) >> 2) <<
  2186. I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
  2187. break;
  2188. default:
  2189. if (*tx_flags & I40E_TX_FLAGS_TSO)
  2190. return -1;
  2191. skb_checksum_help(skb);
  2192. return 0;
  2193. }
  2194. *td_cmd |= cmd;
  2195. *td_offset |= offset;
  2196. return 1;
  2197. }
  2198. /**
  2199. * i40e_create_tx_ctx Build the Tx context descriptor
  2200. * @tx_ring: ring to create the descriptor on
  2201. * @cd_type_cmd_tso_mss: Quad Word 1
  2202. * @cd_tunneling: Quad Word 0 - bits 0-31
  2203. * @cd_l2tag2: Quad Word 0 - bits 32-63
  2204. **/
  2205. static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
  2206. const u64 cd_type_cmd_tso_mss,
  2207. const u32 cd_tunneling, const u32 cd_l2tag2)
  2208. {
  2209. struct i40e_tx_context_desc *context_desc;
  2210. int i = tx_ring->next_to_use;
  2211. if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
  2212. !cd_tunneling && !cd_l2tag2)
  2213. return;
  2214. /* grab the next descriptor */
  2215. context_desc = I40E_TX_CTXTDESC(tx_ring, i);
  2216. i++;
  2217. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  2218. /* cpu_to_le32 and assign to struct fields */
  2219. context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
  2220. context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
  2221. context_desc->rsvd = cpu_to_le16(0);
  2222. context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
  2223. }
  2224. /**
  2225. * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
  2226. * @tx_ring: the ring to be checked
  2227. * @size: the size buffer we want to assure is available
  2228. *
  2229. * Returns -EBUSY if a stop is needed, else 0
  2230. **/
  2231. int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  2232. {
  2233. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  2234. /* Memory barrier before checking head and tail */
  2235. smp_mb();
  2236. /* Check again in a case another CPU has just made room available. */
  2237. if (likely(I40E_DESC_UNUSED(tx_ring) < size))
  2238. return -EBUSY;
  2239. /* A reprieve! - use start_queue because it doesn't call schedule */
  2240. netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
  2241. ++tx_ring->tx_stats.restart_queue;
  2242. return 0;
  2243. }
  2244. /**
  2245. * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
  2246. * @skb: send buffer
  2247. *
  2248. * Note: Our HW can't scatter-gather more than 8 fragments to build
  2249. * a packet on the wire and so we need to figure out the cases where we
  2250. * need to linearize the skb.
  2251. **/
  2252. bool __i40e_chk_linearize(struct sk_buff *skb)
  2253. {
  2254. const struct skb_frag_struct *frag, *stale;
  2255. int gso_size, nr_frags, sum;
  2256. /* check to see if TSO is enabled, if so we may get a repreive */
  2257. gso_size = skb_shinfo(skb)->gso_size;
  2258. if (unlikely(!gso_size))
  2259. return true;
  2260. /* no need to check if number of frags is less than 8 */
  2261. nr_frags = skb_shinfo(skb)->nr_frags;
  2262. if (nr_frags < I40E_MAX_BUFFER_TXD)
  2263. return false;
  2264. /* We need to walk through the list and validate that each group
  2265. * of 6 fragments totals at least gso_size. However we don't need
  2266. * to perform such validation on the first or last 6 since the first
  2267. * 6 cannot inherit any data from a descriptor before them, and the
  2268. * last 6 cannot inherit any data from a descriptor after them.
  2269. */
  2270. nr_frags -= I40E_MAX_BUFFER_TXD - 1;
  2271. frag = &skb_shinfo(skb)->frags[0];
  2272. /* Initialize size to the negative value of gso_size minus 1. We
  2273. * use this as the worst case scenerio in which the frag ahead
  2274. * of us only provides one byte which is why we are limited to 6
  2275. * descriptors for a single transmit as the header and previous
  2276. * fragment are already consuming 2 descriptors.
  2277. */
  2278. sum = 1 - gso_size;
  2279. /* Add size of frags 1 through 5 to create our initial sum */
  2280. sum += skb_frag_size(++frag);
  2281. sum += skb_frag_size(++frag);
  2282. sum += skb_frag_size(++frag);
  2283. sum += skb_frag_size(++frag);
  2284. sum += skb_frag_size(++frag);
  2285. /* Walk through fragments adding latest fragment, testing it, and
  2286. * then removing stale fragments from the sum.
  2287. */
  2288. stale = &skb_shinfo(skb)->frags[0];
  2289. for (;;) {
  2290. sum += skb_frag_size(++frag);
  2291. /* if sum is negative we failed to make sufficient progress */
  2292. if (sum < 0)
  2293. return true;
  2294. /* use pre-decrement to avoid processing last fragment */
  2295. if (!--nr_frags)
  2296. break;
  2297. sum -= skb_frag_size(++stale);
  2298. }
  2299. return false;
  2300. }
  2301. /**
  2302. * i40e_tx_map - Build the Tx descriptor
  2303. * @tx_ring: ring to send buffer on
  2304. * @skb: send buffer
  2305. * @first: first buffer info buffer to use
  2306. * @tx_flags: collected send information
  2307. * @hdr_len: size of the packet header
  2308. * @td_cmd: the command field in the descriptor
  2309. * @td_offset: offset for checksum or crc
  2310. **/
  2311. #ifdef I40E_FCOE
  2312. inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
  2313. struct i40e_tx_buffer *first, u32 tx_flags,
  2314. const u8 hdr_len, u32 td_cmd, u32 td_offset)
  2315. #else
  2316. static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
  2317. struct i40e_tx_buffer *first, u32 tx_flags,
  2318. const u8 hdr_len, u32 td_cmd, u32 td_offset)
  2319. #endif
  2320. {
  2321. unsigned int data_len = skb->data_len;
  2322. unsigned int size = skb_headlen(skb);
  2323. struct skb_frag_struct *frag;
  2324. struct i40e_tx_buffer *tx_bi;
  2325. struct i40e_tx_desc *tx_desc;
  2326. u16 i = tx_ring->next_to_use;
  2327. u32 td_tag = 0;
  2328. dma_addr_t dma;
  2329. u16 gso_segs;
  2330. u16 desc_count = 0;
  2331. bool tail_bump = true;
  2332. bool do_rs = false;
  2333. if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
  2334. td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
  2335. td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
  2336. I40E_TX_FLAGS_VLAN_SHIFT;
  2337. }
  2338. if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
  2339. gso_segs = skb_shinfo(skb)->gso_segs;
  2340. else
  2341. gso_segs = 1;
  2342. /* multiply data chunks by size of headers */
  2343. first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
  2344. first->gso_segs = gso_segs;
  2345. first->skb = skb;
  2346. first->tx_flags = tx_flags;
  2347. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  2348. tx_desc = I40E_TX_DESC(tx_ring, i);
  2349. tx_bi = first;
  2350. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  2351. unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
  2352. if (dma_mapping_error(tx_ring->dev, dma))
  2353. goto dma_error;
  2354. /* record length, and DMA address */
  2355. dma_unmap_len_set(tx_bi, len, size);
  2356. dma_unmap_addr_set(tx_bi, dma, dma);
  2357. /* align size to end of page */
  2358. max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
  2359. tx_desc->buffer_addr = cpu_to_le64(dma);
  2360. while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
  2361. tx_desc->cmd_type_offset_bsz =
  2362. build_ctob(td_cmd, td_offset,
  2363. max_data, td_tag);
  2364. tx_desc++;
  2365. i++;
  2366. desc_count++;
  2367. if (i == tx_ring->count) {
  2368. tx_desc = I40E_TX_DESC(tx_ring, 0);
  2369. i = 0;
  2370. }
  2371. dma += max_data;
  2372. size -= max_data;
  2373. max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
  2374. tx_desc->buffer_addr = cpu_to_le64(dma);
  2375. }
  2376. if (likely(!data_len))
  2377. break;
  2378. tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
  2379. size, td_tag);
  2380. tx_desc++;
  2381. i++;
  2382. desc_count++;
  2383. if (i == tx_ring->count) {
  2384. tx_desc = I40E_TX_DESC(tx_ring, 0);
  2385. i = 0;
  2386. }
  2387. size = skb_frag_size(frag);
  2388. data_len -= size;
  2389. dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
  2390. DMA_TO_DEVICE);
  2391. tx_bi = &tx_ring->tx_bi[i];
  2392. }
  2393. /* set next_to_watch value indicating a packet is present */
  2394. first->next_to_watch = tx_desc;
  2395. i++;
  2396. if (i == tx_ring->count)
  2397. i = 0;
  2398. tx_ring->next_to_use = i;
  2399. netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
  2400. tx_ring->queue_index),
  2401. first->bytecount);
  2402. i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
  2403. /* Algorithm to optimize tail and RS bit setting:
  2404. * if xmit_more is supported
  2405. * if xmit_more is true
  2406. * do not update tail and do not mark RS bit.
  2407. * if xmit_more is false and last xmit_more was false
  2408. * if every packet spanned less than 4 desc
  2409. * then set RS bit on 4th packet and update tail
  2410. * on every packet
  2411. * else
  2412. * update tail and set RS bit on every packet.
  2413. * if xmit_more is false and last_xmit_more was true
  2414. * update tail and set RS bit.
  2415. *
  2416. * Optimization: wmb to be issued only in case of tail update.
  2417. * Also optimize the Descriptor WB path for RS bit with the same
  2418. * algorithm.
  2419. *
  2420. * Note: If there are less than 4 packets
  2421. * pending and interrupts were disabled the service task will
  2422. * trigger a force WB.
  2423. */
  2424. if (skb->xmit_more &&
  2425. !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
  2426. tx_ring->queue_index))) {
  2427. tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
  2428. tail_bump = false;
  2429. } else if (!skb->xmit_more &&
  2430. !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
  2431. tx_ring->queue_index)) &&
  2432. (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
  2433. (tx_ring->packet_stride < WB_STRIDE) &&
  2434. (desc_count < WB_STRIDE)) {
  2435. tx_ring->packet_stride++;
  2436. } else {
  2437. tx_ring->packet_stride = 0;
  2438. tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
  2439. do_rs = true;
  2440. }
  2441. if (do_rs)
  2442. tx_ring->packet_stride = 0;
  2443. tx_desc->cmd_type_offset_bsz =
  2444. build_ctob(td_cmd, td_offset, size, td_tag) |
  2445. cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
  2446. I40E_TX_DESC_CMD_EOP) <<
  2447. I40E_TXD_QW1_CMD_SHIFT);
  2448. /* notify HW of packet */
  2449. if (!tail_bump)
  2450. prefetchw(tx_desc + 1);
  2451. if (tail_bump) {
  2452. /* Force memory writes to complete before letting h/w
  2453. * know there are new descriptors to fetch. (Only
  2454. * applicable for weak-ordered memory model archs,
  2455. * such as IA-64).
  2456. */
  2457. wmb();
  2458. writel(i, tx_ring->tail);
  2459. }
  2460. return;
  2461. dma_error:
  2462. dev_info(tx_ring->dev, "TX DMA map failed\n");
  2463. /* clear dma mappings for failed tx_bi map */
  2464. for (;;) {
  2465. tx_bi = &tx_ring->tx_bi[i];
  2466. i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
  2467. if (tx_bi == first)
  2468. break;
  2469. if (i == 0)
  2470. i = tx_ring->count;
  2471. i--;
  2472. }
  2473. tx_ring->next_to_use = i;
  2474. }
  2475. /**
  2476. * i40e_xmit_frame_ring - Sends buffer on Tx ring
  2477. * @skb: send buffer
  2478. * @tx_ring: ring to send buffer on
  2479. *
  2480. * Returns NETDEV_TX_OK if sent, else an error code
  2481. **/
  2482. static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
  2483. struct i40e_ring *tx_ring)
  2484. {
  2485. u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
  2486. u32 cd_tunneling = 0, cd_l2tag2 = 0;
  2487. struct i40e_tx_buffer *first;
  2488. u32 td_offset = 0;
  2489. u32 tx_flags = 0;
  2490. __be16 protocol;
  2491. u32 td_cmd = 0;
  2492. u8 hdr_len = 0;
  2493. int tso, count;
  2494. int tsyn;
  2495. /* prefetch the data, we'll need it later */
  2496. prefetch(skb->data);
  2497. count = i40e_xmit_descriptor_count(skb);
  2498. if (i40e_chk_linearize(skb, count)) {
  2499. if (__skb_linearize(skb))
  2500. goto out_drop;
  2501. count = i40e_txd_use_count(skb->len);
  2502. tx_ring->tx_stats.tx_linearize++;
  2503. }
  2504. /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
  2505. * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
  2506. * + 4 desc gap to avoid the cache line where head is,
  2507. * + 1 desc for context descriptor,
  2508. * otherwise try next time
  2509. */
  2510. if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
  2511. tx_ring->tx_stats.tx_busy++;
  2512. return NETDEV_TX_BUSY;
  2513. }
  2514. /* prepare the xmit flags */
  2515. if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
  2516. goto out_drop;
  2517. /* obtain protocol of skb */
  2518. protocol = vlan_get_protocol(skb);
  2519. /* record the location of the first descriptor for this packet */
  2520. first = &tx_ring->tx_bi[tx_ring->next_to_use];
  2521. /* setup IPv4/IPv6 offloads */
  2522. if (protocol == htons(ETH_P_IP))
  2523. tx_flags |= I40E_TX_FLAGS_IPV4;
  2524. else if (protocol == htons(ETH_P_IPV6))
  2525. tx_flags |= I40E_TX_FLAGS_IPV6;
  2526. tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
  2527. if (tso < 0)
  2528. goto out_drop;
  2529. else if (tso)
  2530. tx_flags |= I40E_TX_FLAGS_TSO;
  2531. /* Always offload the checksum, since it's in the data descriptor */
  2532. tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
  2533. tx_ring, &cd_tunneling);
  2534. if (tso < 0)
  2535. goto out_drop;
  2536. tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
  2537. if (tsyn)
  2538. tx_flags |= I40E_TX_FLAGS_TSYN;
  2539. skb_tx_timestamp(skb);
  2540. /* always enable CRC insertion offload */
  2541. td_cmd |= I40E_TX_DESC_CMD_ICRC;
  2542. i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
  2543. cd_tunneling, cd_l2tag2);
  2544. /* Add Flow Director ATR if it's enabled.
  2545. *
  2546. * NOTE: this must always be directly before the data descriptor.
  2547. */
  2548. i40e_atr(tx_ring, skb, tx_flags);
  2549. i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
  2550. td_cmd, td_offset);
  2551. return NETDEV_TX_OK;
  2552. out_drop:
  2553. dev_kfree_skb_any(skb);
  2554. return NETDEV_TX_OK;
  2555. }
  2556. /**
  2557. * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
  2558. * @skb: send buffer
  2559. * @netdev: network interface device structure
  2560. *
  2561. * Returns NETDEV_TX_OK if sent, else an error code
  2562. **/
  2563. netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  2564. {
  2565. struct i40e_netdev_priv *np = netdev_priv(netdev);
  2566. struct i40e_vsi *vsi = np->vsi;
  2567. struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
  2568. /* hardware can't handle really short frames, hardware padding works
  2569. * beyond this point
  2570. */
  2571. if (skb_put_padto(skb, I40E_MIN_TX_LEN))
  2572. return NETDEV_TX_OK;
  2573. return i40e_xmit_frame_ring(skb, tx_ring);
  2574. }