hns3_enet.c 92 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright (c) 2016-2017 Hisilicon Limited.
  3. #include <linux/dma-mapping.h>
  4. #include <linux/etherdevice.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/if_vlan.h>
  7. #include <linux/ip.h>
  8. #include <linux/ipv6.h>
  9. #include <linux/module.h>
  10. #include <linux/pci.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/sctp.h>
  13. #include <linux/vermagic.h>
  14. #include <net/gre.h>
  15. #include <net/pkt_cls.h>
  16. #include <net/vxlan.h>
  17. #include "hnae3.h"
  18. #include "hns3_enet.h"
  19. static void hns3_clear_all_ring(struct hnae3_handle *h);
  20. static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
  21. static const char hns3_driver_name[] = "hns3";
  22. const char hns3_driver_version[] = VERMAGIC_STRING;
  23. static const char hns3_driver_string[] =
  24. "Hisilicon Ethernet Network Driver for Hip08 Family";
  25. static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
  26. static struct hnae3_client client;
  27. /* hns3_pci_tbl - PCI Device ID Table
  28. *
  29. * Last entry must be all 0s
  30. *
  31. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  32. * Class, Class Mask, private data (not used) }
  33. */
  34. static const struct pci_device_id hns3_pci_tbl[] = {
  35. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
  36. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
  37. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
  38. HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  39. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
  40. HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  41. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
  42. HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  43. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
  44. HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  45. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
  46. HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  47. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  48. {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
  49. HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  50. /* required last entry */
  51. {0, }
  52. };
  53. MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
  54. static irqreturn_t hns3_irq_handle(int irq, void *vector)
  55. {
  56. struct hns3_enet_tqp_vector *tqp_vector = vector;
  57. napi_schedule(&tqp_vector->napi);
  58. return IRQ_HANDLED;
  59. }
  60. static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
  61. {
  62. struct hns3_enet_tqp_vector *tqp_vectors;
  63. unsigned int i;
  64. for (i = 0; i < priv->vector_num; i++) {
  65. tqp_vectors = &priv->tqp_vector[i];
  66. if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
  67. continue;
  68. /* release the irq resource */
  69. free_irq(tqp_vectors->vector_irq, tqp_vectors);
  70. tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
  71. }
  72. }
  73. static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
  74. {
  75. struct hns3_enet_tqp_vector *tqp_vectors;
  76. int txrx_int_idx = 0;
  77. int rx_int_idx = 0;
  78. int tx_int_idx = 0;
  79. unsigned int i;
  80. int ret;
  81. for (i = 0; i < priv->vector_num; i++) {
  82. tqp_vectors = &priv->tqp_vector[i];
  83. if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
  84. continue;
  85. if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
  86. snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
  87. "%s-%s-%d", priv->netdev->name, "TxRx",
  88. txrx_int_idx++);
  89. txrx_int_idx++;
  90. } else if (tqp_vectors->rx_group.ring) {
  91. snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
  92. "%s-%s-%d", priv->netdev->name, "Rx",
  93. rx_int_idx++);
  94. } else if (tqp_vectors->tx_group.ring) {
  95. snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
  96. "%s-%s-%d", priv->netdev->name, "Tx",
  97. tx_int_idx++);
  98. } else {
  99. /* Skip this unused q_vector */
  100. continue;
  101. }
  102. tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
  103. ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
  104. tqp_vectors->name,
  105. tqp_vectors);
  106. if (ret) {
  107. netdev_err(priv->netdev, "request irq(%d) fail\n",
  108. tqp_vectors->vector_irq);
  109. return ret;
  110. }
  111. tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
  112. }
  113. return 0;
  114. }
  115. static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
  116. u32 mask_en)
  117. {
  118. writel(mask_en, tqp_vector->mask_addr);
  119. }
  120. static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
  121. {
  122. napi_enable(&tqp_vector->napi);
  123. /* enable vector */
  124. hns3_mask_vector_irq(tqp_vector, 1);
  125. }
  126. static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
  127. {
  128. /* disable vector */
  129. hns3_mask_vector_irq(tqp_vector, 0);
  130. disable_irq(tqp_vector->vector_irq);
  131. napi_disable(&tqp_vector->napi);
  132. }
  133. void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
  134. u32 rl_value)
  135. {
  136. u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
  137. /* this defines the configuration for RL (Interrupt Rate Limiter).
  138. * Rl defines rate of interrupts i.e. number of interrupts-per-second
  139. * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
  140. */
  141. if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
  142. !tqp_vector->rx_group.coal.gl_adapt_enable)
  143. /* According to the hardware, the range of rl_reg is
  144. * 0-59 and the unit is 4.
  145. */
  146. rl_reg |= HNS3_INT_RL_ENABLE_MASK;
  147. writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
  148. }
  149. void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
  150. u32 gl_value)
  151. {
  152. u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
  153. writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
  154. }
  155. void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
  156. u32 gl_value)
  157. {
  158. u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
  159. writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
  160. }
  161. static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
  162. struct hns3_nic_priv *priv)
  163. {
  164. struct hnae3_handle *h = priv->ae_handle;
  165. /* initialize the configuration for interrupt coalescing.
  166. * 1. GL (Interrupt Gap Limiter)
  167. * 2. RL (Interrupt Rate Limiter)
  168. */
  169. /* Default: enable interrupt coalescing self-adaptive and GL */
  170. tqp_vector->tx_group.coal.gl_adapt_enable = 1;
  171. tqp_vector->rx_group.coal.gl_adapt_enable = 1;
  172. tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
  173. tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
  174. /* Default: disable RL */
  175. h->kinfo.int_rl_setting = 0;
  176. tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
  177. tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
  178. tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
  179. }
  180. static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
  181. struct hns3_nic_priv *priv)
  182. {
  183. struct hnae3_handle *h = priv->ae_handle;
  184. hns3_set_vector_coalesce_tx_gl(tqp_vector,
  185. tqp_vector->tx_group.coal.int_gl);
  186. hns3_set_vector_coalesce_rx_gl(tqp_vector,
  187. tqp_vector->rx_group.coal.int_gl);
  188. hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
  189. }
  190. static int hns3_nic_set_real_num_queue(struct net_device *netdev)
  191. {
  192. struct hnae3_handle *h = hns3_get_handle(netdev);
  193. struct hnae3_knic_private_info *kinfo = &h->kinfo;
  194. unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
  195. int i, ret;
  196. if (kinfo->num_tc <= 1) {
  197. netdev_reset_tc(netdev);
  198. } else {
  199. ret = netdev_set_num_tc(netdev, kinfo->num_tc);
  200. if (ret) {
  201. netdev_err(netdev,
  202. "netdev_set_num_tc fail, ret=%d!\n", ret);
  203. return ret;
  204. }
  205. for (i = 0; i < HNAE3_MAX_TC; i++) {
  206. if (!kinfo->tc_info[i].enable)
  207. continue;
  208. netdev_set_tc_queue(netdev,
  209. kinfo->tc_info[i].tc,
  210. kinfo->tc_info[i].tqp_count,
  211. kinfo->tc_info[i].tqp_offset);
  212. }
  213. }
  214. ret = netif_set_real_num_tx_queues(netdev, queue_size);
  215. if (ret) {
  216. netdev_err(netdev,
  217. "netif_set_real_num_tx_queues fail, ret=%d!\n",
  218. ret);
  219. return ret;
  220. }
  221. ret = netif_set_real_num_rx_queues(netdev, queue_size);
  222. if (ret) {
  223. netdev_err(netdev,
  224. "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
  225. return ret;
  226. }
  227. return 0;
  228. }
  229. static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
  230. {
  231. u16 free_tqps, max_rss_size, max_tqps;
  232. h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
  233. max_tqps = h->kinfo.num_tc * max_rss_size;
  234. return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
  235. }
  236. static int hns3_nic_net_up(struct net_device *netdev)
  237. {
  238. struct hns3_nic_priv *priv = netdev_priv(netdev);
  239. struct hnae3_handle *h = priv->ae_handle;
  240. int i, j;
  241. int ret;
  242. ret = hns3_nic_reset_all_ring(h);
  243. if (ret)
  244. return ret;
  245. /* get irq resource for all vectors */
  246. ret = hns3_nic_init_irq(priv);
  247. if (ret) {
  248. netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
  249. return ret;
  250. }
  251. /* enable the vectors */
  252. for (i = 0; i < priv->vector_num; i++)
  253. hns3_vector_enable(&priv->tqp_vector[i]);
  254. /* start the ae_dev */
  255. ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
  256. if (ret)
  257. goto out_start_err;
  258. clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
  259. return 0;
  260. out_start_err:
  261. for (j = i - 1; j >= 0; j--)
  262. hns3_vector_disable(&priv->tqp_vector[j]);
  263. hns3_nic_uninit_irq(priv);
  264. return ret;
  265. }
  266. static int hns3_nic_net_open(struct net_device *netdev)
  267. {
  268. struct hns3_nic_priv *priv = netdev_priv(netdev);
  269. struct hnae3_handle *h = hns3_get_handle(netdev);
  270. struct hnae3_knic_private_info *kinfo;
  271. int i, ret;
  272. netif_carrier_off(netdev);
  273. ret = hns3_nic_set_real_num_queue(netdev);
  274. if (ret)
  275. return ret;
  276. ret = hns3_nic_net_up(netdev);
  277. if (ret) {
  278. netdev_err(netdev,
  279. "hns net up fail, ret=%d!\n", ret);
  280. return ret;
  281. }
  282. kinfo = &h->kinfo;
  283. for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
  284. netdev_set_prio_tc_map(netdev, i,
  285. kinfo->prio_tc[i]);
  286. }
  287. priv->ae_handle->last_reset_time = jiffies;
  288. return 0;
  289. }
  290. static void hns3_nic_net_down(struct net_device *netdev)
  291. {
  292. struct hns3_nic_priv *priv = netdev_priv(netdev);
  293. const struct hnae3_ae_ops *ops;
  294. int i;
  295. if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
  296. return;
  297. /* disable vectors */
  298. for (i = 0; i < priv->vector_num; i++)
  299. hns3_vector_disable(&priv->tqp_vector[i]);
  300. /* stop ae_dev */
  301. ops = priv->ae_handle->ae_algo->ops;
  302. if (ops->stop)
  303. ops->stop(priv->ae_handle);
  304. /* free irq resources */
  305. hns3_nic_uninit_irq(priv);
  306. hns3_clear_all_ring(priv->ae_handle);
  307. }
  308. static int hns3_nic_net_stop(struct net_device *netdev)
  309. {
  310. netif_tx_stop_all_queues(netdev);
  311. netif_carrier_off(netdev);
  312. hns3_nic_net_down(netdev);
  313. return 0;
  314. }
  315. static int hns3_nic_uc_sync(struct net_device *netdev,
  316. const unsigned char *addr)
  317. {
  318. struct hnae3_handle *h = hns3_get_handle(netdev);
  319. if (h->ae_algo->ops->add_uc_addr)
  320. return h->ae_algo->ops->add_uc_addr(h, addr);
  321. return 0;
  322. }
  323. static int hns3_nic_uc_unsync(struct net_device *netdev,
  324. const unsigned char *addr)
  325. {
  326. struct hnae3_handle *h = hns3_get_handle(netdev);
  327. if (h->ae_algo->ops->rm_uc_addr)
  328. return h->ae_algo->ops->rm_uc_addr(h, addr);
  329. return 0;
  330. }
  331. static int hns3_nic_mc_sync(struct net_device *netdev,
  332. const unsigned char *addr)
  333. {
  334. struct hnae3_handle *h = hns3_get_handle(netdev);
  335. if (h->ae_algo->ops->add_mc_addr)
  336. return h->ae_algo->ops->add_mc_addr(h, addr);
  337. return 0;
  338. }
  339. static int hns3_nic_mc_unsync(struct net_device *netdev,
  340. const unsigned char *addr)
  341. {
  342. struct hnae3_handle *h = hns3_get_handle(netdev);
  343. if (h->ae_algo->ops->rm_mc_addr)
  344. return h->ae_algo->ops->rm_mc_addr(h, addr);
  345. return 0;
  346. }
  347. static void hns3_nic_set_rx_mode(struct net_device *netdev)
  348. {
  349. struct hnae3_handle *h = hns3_get_handle(netdev);
  350. if (h->ae_algo->ops->set_promisc_mode) {
  351. if (netdev->flags & IFF_PROMISC)
  352. h->ae_algo->ops->set_promisc_mode(h, true, true);
  353. else if (netdev->flags & IFF_ALLMULTI)
  354. h->ae_algo->ops->set_promisc_mode(h, false, true);
  355. else
  356. h->ae_algo->ops->set_promisc_mode(h, false, false);
  357. }
  358. if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
  359. netdev_err(netdev, "sync uc address fail\n");
  360. if (netdev->flags & IFF_MULTICAST) {
  361. if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
  362. netdev_err(netdev, "sync mc address fail\n");
  363. if (h->ae_algo->ops->update_mta_status)
  364. h->ae_algo->ops->update_mta_status(h);
  365. }
  366. }
  367. static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
  368. u16 *mss, u32 *type_cs_vlan_tso)
  369. {
  370. u32 l4_offset, hdr_len;
  371. union l3_hdr_info l3;
  372. union l4_hdr_info l4;
  373. u32 l4_paylen;
  374. int ret;
  375. if (!skb_is_gso(skb))
  376. return 0;
  377. ret = skb_cow_head(skb, 0);
  378. if (ret)
  379. return ret;
  380. l3.hdr = skb_network_header(skb);
  381. l4.hdr = skb_transport_header(skb);
  382. /* Software should clear the IPv4's checksum field when tso is
  383. * needed.
  384. */
  385. if (l3.v4->version == 4)
  386. l3.v4->check = 0;
  387. /* tunnel packet.*/
  388. if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
  389. SKB_GSO_GRE_CSUM |
  390. SKB_GSO_UDP_TUNNEL |
  391. SKB_GSO_UDP_TUNNEL_CSUM)) {
  392. if ((!(skb_shinfo(skb)->gso_type &
  393. SKB_GSO_PARTIAL)) &&
  394. (skb_shinfo(skb)->gso_type &
  395. SKB_GSO_UDP_TUNNEL_CSUM)) {
  396. /* Software should clear the udp's checksum
  397. * field when tso is needed.
  398. */
  399. l4.udp->check = 0;
  400. }
  401. /* reset l3&l4 pointers from outer to inner headers */
  402. l3.hdr = skb_inner_network_header(skb);
  403. l4.hdr = skb_inner_transport_header(skb);
  404. /* Software should clear the IPv4's checksum field when
  405. * tso is needed.
  406. */
  407. if (l3.v4->version == 4)
  408. l3.v4->check = 0;
  409. }
  410. /* normal or tunnel packet*/
  411. l4_offset = l4.hdr - skb->data;
  412. hdr_len = (l4.tcp->doff * 4) + l4_offset;
  413. /* remove payload length from inner pseudo checksum when tso*/
  414. l4_paylen = skb->len - l4_offset;
  415. csum_replace_by_diff(&l4.tcp->check,
  416. (__force __wsum)htonl(l4_paylen));
  417. /* find the txbd field values */
  418. *paylen = skb->len - hdr_len;
  419. hnae3_set_bit(*type_cs_vlan_tso,
  420. HNS3_TXD_TSO_B, 1);
  421. /* get MSS for TSO */
  422. *mss = skb_shinfo(skb)->gso_size;
  423. return 0;
  424. }
  425. static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
  426. u8 *il4_proto)
  427. {
  428. union {
  429. struct iphdr *v4;
  430. struct ipv6hdr *v6;
  431. unsigned char *hdr;
  432. } l3;
  433. unsigned char *l4_hdr;
  434. unsigned char *exthdr;
  435. u8 l4_proto_tmp;
  436. __be16 frag_off;
  437. /* find outer header point */
  438. l3.hdr = skb_network_header(skb);
  439. l4_hdr = skb_transport_header(skb);
  440. if (skb->protocol == htons(ETH_P_IPV6)) {
  441. exthdr = l3.hdr + sizeof(*l3.v6);
  442. l4_proto_tmp = l3.v6->nexthdr;
  443. if (l4_hdr != exthdr)
  444. ipv6_skip_exthdr(skb, exthdr - skb->data,
  445. &l4_proto_tmp, &frag_off);
  446. } else if (skb->protocol == htons(ETH_P_IP)) {
  447. l4_proto_tmp = l3.v4->protocol;
  448. } else {
  449. return -EINVAL;
  450. }
  451. *ol4_proto = l4_proto_tmp;
  452. /* tunnel packet */
  453. if (!skb->encapsulation) {
  454. *il4_proto = 0;
  455. return 0;
  456. }
  457. /* find inner header point */
  458. l3.hdr = skb_inner_network_header(skb);
  459. l4_hdr = skb_inner_transport_header(skb);
  460. if (l3.v6->version == 6) {
  461. exthdr = l3.hdr + sizeof(*l3.v6);
  462. l4_proto_tmp = l3.v6->nexthdr;
  463. if (l4_hdr != exthdr)
  464. ipv6_skip_exthdr(skb, exthdr - skb->data,
  465. &l4_proto_tmp, &frag_off);
  466. } else if (l3.v4->version == 4) {
  467. l4_proto_tmp = l3.v4->protocol;
  468. }
  469. *il4_proto = l4_proto_tmp;
  470. return 0;
  471. }
  472. static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
  473. u8 il4_proto, u32 *type_cs_vlan_tso,
  474. u32 *ol_type_vlan_len_msec)
  475. {
  476. union {
  477. struct iphdr *v4;
  478. struct ipv6hdr *v6;
  479. unsigned char *hdr;
  480. } l3;
  481. union {
  482. struct tcphdr *tcp;
  483. struct udphdr *udp;
  484. struct gre_base_hdr *gre;
  485. unsigned char *hdr;
  486. } l4;
  487. unsigned char *l2_hdr;
  488. u8 l4_proto = ol4_proto;
  489. u32 ol2_len;
  490. u32 ol3_len;
  491. u32 ol4_len;
  492. u32 l2_len;
  493. u32 l3_len;
  494. l3.hdr = skb_network_header(skb);
  495. l4.hdr = skb_transport_header(skb);
  496. /* compute L2 header size for normal packet, defined in 2 Bytes */
  497. l2_len = l3.hdr - skb->data;
  498. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
  499. HNS3_TXD_L2LEN_S, l2_len >> 1);
  500. /* tunnel packet*/
  501. if (skb->encapsulation) {
  502. /* compute OL2 header size, defined in 2 Bytes */
  503. ol2_len = l2_len;
  504. hnae3_set_field(*ol_type_vlan_len_msec,
  505. HNS3_TXD_L2LEN_M,
  506. HNS3_TXD_L2LEN_S, ol2_len >> 1);
  507. /* compute OL3 header size, defined in 4 Bytes */
  508. ol3_len = l4.hdr - l3.hdr;
  509. hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
  510. HNS3_TXD_L3LEN_S, ol3_len >> 2);
  511. /* MAC in UDP, MAC in GRE (0x6558)*/
  512. if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
  513. /* switch MAC header ptr from outer to inner header.*/
  514. l2_hdr = skb_inner_mac_header(skb);
  515. /* compute OL4 header size, defined in 4 Bytes. */
  516. ol4_len = l2_hdr - l4.hdr;
  517. hnae3_set_field(*ol_type_vlan_len_msec,
  518. HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
  519. ol4_len >> 2);
  520. /* switch IP header ptr from outer to inner header */
  521. l3.hdr = skb_inner_network_header(skb);
  522. /* compute inner l2 header size, defined in 2 Bytes. */
  523. l2_len = l3.hdr - l2_hdr;
  524. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
  525. HNS3_TXD_L2LEN_S, l2_len >> 1);
  526. } else {
  527. /* skb packet types not supported by hardware,
  528. * txbd len fild doesn't be filled.
  529. */
  530. return;
  531. }
  532. /* switch L4 header pointer from outer to inner */
  533. l4.hdr = skb_inner_transport_header(skb);
  534. l4_proto = il4_proto;
  535. }
  536. /* compute inner(/normal) L3 header size, defined in 4 Bytes */
  537. l3_len = l4.hdr - l3.hdr;
  538. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
  539. HNS3_TXD_L3LEN_S, l3_len >> 2);
  540. /* compute inner(/normal) L4 header size, defined in 4 Bytes */
  541. switch (l4_proto) {
  542. case IPPROTO_TCP:
  543. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
  544. HNS3_TXD_L4LEN_S, l4.tcp->doff);
  545. break;
  546. case IPPROTO_SCTP:
  547. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
  548. HNS3_TXD_L4LEN_S,
  549. (sizeof(struct sctphdr) >> 2));
  550. break;
  551. case IPPROTO_UDP:
  552. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
  553. HNS3_TXD_L4LEN_S,
  554. (sizeof(struct udphdr) >> 2));
  555. break;
  556. default:
  557. /* skb packet types not supported by hardware,
  558. * txbd len fild doesn't be filled.
  559. */
  560. return;
  561. }
  562. }
  563. /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
  564. * and it is udp packet, which has a dest port as the IANA assigned.
  565. * the hardware is expected to do the checksum offload, but the
  566. * hardware will not do the checksum offload when udp dest port is
  567. * 4789.
  568. */
  569. static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
  570. {
  571. #define IANA_VXLAN_PORT 4789
  572. union {
  573. struct tcphdr *tcp;
  574. struct udphdr *udp;
  575. struct gre_base_hdr *gre;
  576. unsigned char *hdr;
  577. } l4;
  578. l4.hdr = skb_transport_header(skb);
  579. if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
  580. return false;
  581. skb_checksum_help(skb);
  582. return true;
  583. }
  584. static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
  585. u8 il4_proto, u32 *type_cs_vlan_tso,
  586. u32 *ol_type_vlan_len_msec)
  587. {
  588. union {
  589. struct iphdr *v4;
  590. struct ipv6hdr *v6;
  591. unsigned char *hdr;
  592. } l3;
  593. u32 l4_proto = ol4_proto;
  594. l3.hdr = skb_network_header(skb);
  595. /* define OL3 type and tunnel type(OL4).*/
  596. if (skb->encapsulation) {
  597. /* define outer network header type.*/
  598. if (skb->protocol == htons(ETH_P_IP)) {
  599. if (skb_is_gso(skb))
  600. hnae3_set_field(*ol_type_vlan_len_msec,
  601. HNS3_TXD_OL3T_M,
  602. HNS3_TXD_OL3T_S,
  603. HNS3_OL3T_IPV4_CSUM);
  604. else
  605. hnae3_set_field(*ol_type_vlan_len_msec,
  606. HNS3_TXD_OL3T_M,
  607. HNS3_TXD_OL3T_S,
  608. HNS3_OL3T_IPV4_NO_CSUM);
  609. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  610. hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
  611. HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
  612. }
  613. /* define tunnel type(OL4).*/
  614. switch (l4_proto) {
  615. case IPPROTO_UDP:
  616. hnae3_set_field(*ol_type_vlan_len_msec,
  617. HNS3_TXD_TUNTYPE_M,
  618. HNS3_TXD_TUNTYPE_S,
  619. HNS3_TUN_MAC_IN_UDP);
  620. break;
  621. case IPPROTO_GRE:
  622. hnae3_set_field(*ol_type_vlan_len_msec,
  623. HNS3_TXD_TUNTYPE_M,
  624. HNS3_TXD_TUNTYPE_S,
  625. HNS3_TUN_NVGRE);
  626. break;
  627. default:
  628. /* drop the skb tunnel packet if hardware don't support,
  629. * because hardware can't calculate csum when TSO.
  630. */
  631. if (skb_is_gso(skb))
  632. return -EDOM;
  633. /* the stack computes the IP header already,
  634. * driver calculate l4 checksum when not TSO.
  635. */
  636. skb_checksum_help(skb);
  637. return 0;
  638. }
  639. l3.hdr = skb_inner_network_header(skb);
  640. l4_proto = il4_proto;
  641. }
  642. if (l3.v4->version == 4) {
  643. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
  644. HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
  645. /* the stack computes the IP header already, the only time we
  646. * need the hardware to recompute it is in the case of TSO.
  647. */
  648. if (skb_is_gso(skb))
  649. hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
  650. } else if (l3.v6->version == 6) {
  651. hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
  652. HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
  653. }
  654. switch (l4_proto) {
  655. case IPPROTO_TCP:
  656. hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
  657. hnae3_set_field(*type_cs_vlan_tso,
  658. HNS3_TXD_L4T_M,
  659. HNS3_TXD_L4T_S,
  660. HNS3_L4T_TCP);
  661. break;
  662. case IPPROTO_UDP:
  663. if (hns3_tunnel_csum_bug(skb))
  664. break;
  665. hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
  666. hnae3_set_field(*type_cs_vlan_tso,
  667. HNS3_TXD_L4T_M,
  668. HNS3_TXD_L4T_S,
  669. HNS3_L4T_UDP);
  670. break;
  671. case IPPROTO_SCTP:
  672. hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
  673. hnae3_set_field(*type_cs_vlan_tso,
  674. HNS3_TXD_L4T_M,
  675. HNS3_TXD_L4T_S,
  676. HNS3_L4T_SCTP);
  677. break;
  678. default:
  679. /* drop the skb tunnel packet if hardware don't support,
  680. * because hardware can't calculate csum when TSO.
  681. */
  682. if (skb_is_gso(skb))
  683. return -EDOM;
  684. /* the stack computes the IP header already,
  685. * driver calculate l4 checksum when not TSO.
  686. */
  687. skb_checksum_help(skb);
  688. return 0;
  689. }
  690. return 0;
  691. }
  692. static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
  693. {
  694. /* Config bd buffer end */
  695. hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
  696. HNS3_TXD_BDTYPE_S, 0);
  697. hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
  698. hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
  699. hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
  700. }
  701. static int hns3_fill_desc_vtags(struct sk_buff *skb,
  702. struct hns3_enet_ring *tx_ring,
  703. u32 *inner_vlan_flag,
  704. u32 *out_vlan_flag,
  705. u16 *inner_vtag,
  706. u16 *out_vtag)
  707. {
  708. #define HNS3_TX_VLAN_PRIO_SHIFT 13
  709. if (skb->protocol == htons(ETH_P_8021Q) &&
  710. !(tx_ring->tqp->handle->kinfo.netdev->features &
  711. NETIF_F_HW_VLAN_CTAG_TX)) {
  712. /* When HW VLAN acceleration is turned off, and the stack
  713. * sets the protocol to 802.1q, the driver just need to
  714. * set the protocol to the encapsulated ethertype.
  715. */
  716. skb->protocol = vlan_get_protocol(skb);
  717. return 0;
  718. }
  719. if (skb_vlan_tag_present(skb)) {
  720. u16 vlan_tag;
  721. vlan_tag = skb_vlan_tag_get(skb);
  722. vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
  723. /* Based on hw strategy, use out_vtag in two layer tag case,
  724. * and use inner_vtag in one tag case.
  725. */
  726. if (skb->protocol == htons(ETH_P_8021Q)) {
  727. hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
  728. *out_vtag = vlan_tag;
  729. } else {
  730. hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
  731. *inner_vtag = vlan_tag;
  732. }
  733. } else if (skb->protocol == htons(ETH_P_8021Q)) {
  734. struct vlan_ethhdr *vhdr;
  735. int rc;
  736. rc = skb_cow_head(skb, 0);
  737. if (rc < 0)
  738. return rc;
  739. vhdr = (struct vlan_ethhdr *)skb->data;
  740. vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
  741. << HNS3_TX_VLAN_PRIO_SHIFT);
  742. }
  743. skb->protocol = vlan_get_protocol(skb);
  744. return 0;
  745. }
  746. static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
  747. int size, dma_addr_t dma, int frag_end,
  748. enum hns_desc_type type)
  749. {
  750. struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
  751. struct hns3_desc *desc = &ring->desc[ring->next_to_use];
  752. u32 ol_type_vlan_len_msec = 0;
  753. u16 bdtp_fe_sc_vld_ra_ri = 0;
  754. u32 type_cs_vlan_tso = 0;
  755. struct sk_buff *skb;
  756. u16 inner_vtag = 0;
  757. u16 out_vtag = 0;
  758. u32 paylen = 0;
  759. u16 mss = 0;
  760. u8 ol4_proto;
  761. u8 il4_proto;
  762. int ret;
  763. /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
  764. desc_cb->priv = priv;
  765. desc_cb->length = size;
  766. desc_cb->dma = dma;
  767. desc_cb->type = type;
  768. /* now, fill the descriptor */
  769. desc->addr = cpu_to_le64(dma);
  770. desc->tx.send_size = cpu_to_le16((u16)size);
  771. hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
  772. desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
  773. if (type == DESC_TYPE_SKB) {
  774. skb = (struct sk_buff *)priv;
  775. paylen = skb->len;
  776. ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
  777. &ol_type_vlan_len_msec,
  778. &inner_vtag, &out_vtag);
  779. if (unlikely(ret))
  780. return ret;
  781. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  782. skb_reset_mac_len(skb);
  783. ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
  784. if (ret)
  785. return ret;
  786. hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
  787. &type_cs_vlan_tso,
  788. &ol_type_vlan_len_msec);
  789. ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
  790. &type_cs_vlan_tso,
  791. &ol_type_vlan_len_msec);
  792. if (ret)
  793. return ret;
  794. ret = hns3_set_tso(skb, &paylen, &mss,
  795. &type_cs_vlan_tso);
  796. if (ret)
  797. return ret;
  798. }
  799. /* Set txbd */
  800. desc->tx.ol_type_vlan_len_msec =
  801. cpu_to_le32(ol_type_vlan_len_msec);
  802. desc->tx.type_cs_vlan_tso_len =
  803. cpu_to_le32(type_cs_vlan_tso);
  804. desc->tx.paylen = cpu_to_le32(paylen);
  805. desc->tx.mss = cpu_to_le16(mss);
  806. desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
  807. desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
  808. }
  809. /* move ring pointer to next.*/
  810. ring_ptr_move_fw(ring, next_to_use);
  811. return 0;
  812. }
  813. static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
  814. int size, dma_addr_t dma, int frag_end,
  815. enum hns_desc_type type)
  816. {
  817. unsigned int frag_buf_num;
  818. unsigned int k;
  819. int sizeoflast;
  820. int ret;
  821. frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
  822. sizeoflast = size % HNS3_MAX_BD_SIZE;
  823. sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
  824. /* When the frag size is bigger than hardware, split this frag */
  825. for (k = 0; k < frag_buf_num; k++) {
  826. ret = hns3_fill_desc(ring, priv,
  827. (k == frag_buf_num - 1) ?
  828. sizeoflast : HNS3_MAX_BD_SIZE,
  829. dma + HNS3_MAX_BD_SIZE * k,
  830. frag_end && (k == frag_buf_num - 1) ? 1 : 0,
  831. (type == DESC_TYPE_SKB && !k) ?
  832. DESC_TYPE_SKB : DESC_TYPE_PAGE);
  833. if (ret)
  834. return ret;
  835. }
  836. return 0;
  837. }
  838. static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
  839. struct hns3_enet_ring *ring)
  840. {
  841. struct sk_buff *skb = *out_skb;
  842. struct skb_frag_struct *frag;
  843. int bdnum_for_frag;
  844. int frag_num;
  845. int buf_num;
  846. int size;
  847. int i;
  848. size = skb_headlen(skb);
  849. buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
  850. frag_num = skb_shinfo(skb)->nr_frags;
  851. for (i = 0; i < frag_num; i++) {
  852. frag = &skb_shinfo(skb)->frags[i];
  853. size = skb_frag_size(frag);
  854. bdnum_for_frag =
  855. (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
  856. if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
  857. return -ENOMEM;
  858. buf_num += bdnum_for_frag;
  859. }
  860. if (buf_num > ring_space(ring))
  861. return -EBUSY;
  862. *bnum = buf_num;
  863. return 0;
  864. }
  865. static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
  866. struct hns3_enet_ring *ring)
  867. {
  868. struct sk_buff *skb = *out_skb;
  869. int buf_num;
  870. /* No. of segments (plus a header) */
  871. buf_num = skb_shinfo(skb)->nr_frags + 1;
  872. if (buf_num > ring_space(ring))
  873. return -EBUSY;
  874. *bnum = buf_num;
  875. return 0;
  876. }
  877. static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
  878. {
  879. struct device *dev = ring_to_dev(ring);
  880. unsigned int i;
  881. for (i = 0; i < ring->desc_num; i++) {
  882. /* check if this is where we started */
  883. if (ring->next_to_use == next_to_use_orig)
  884. break;
  885. /* unmap the descriptor dma address */
  886. if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
  887. dma_unmap_single(dev,
  888. ring->desc_cb[ring->next_to_use].dma,
  889. ring->desc_cb[ring->next_to_use].length,
  890. DMA_TO_DEVICE);
  891. else
  892. dma_unmap_page(dev,
  893. ring->desc_cb[ring->next_to_use].dma,
  894. ring->desc_cb[ring->next_to_use].length,
  895. DMA_TO_DEVICE);
  896. /* rollback one */
  897. ring_ptr_move_bw(ring, next_to_use);
  898. }
  899. }
  900. netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
  901. {
  902. struct hns3_nic_priv *priv = netdev_priv(netdev);
  903. struct hns3_nic_ring_data *ring_data =
  904. &tx_ring_data(priv, skb->queue_mapping);
  905. struct hns3_enet_ring *ring = ring_data->ring;
  906. struct device *dev = priv->dev;
  907. struct netdev_queue *dev_queue;
  908. struct skb_frag_struct *frag;
  909. int next_to_use_head;
  910. int next_to_use_frag;
  911. dma_addr_t dma;
  912. int buf_num;
  913. int seg_num;
  914. int size;
  915. int ret;
  916. int i;
  917. /* Prefetch the data used later */
  918. prefetch(skb->data);
  919. switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
  920. case -EBUSY:
  921. u64_stats_update_begin(&ring->syncp);
  922. ring->stats.tx_busy++;
  923. u64_stats_update_end(&ring->syncp);
  924. goto out_net_tx_busy;
  925. case -ENOMEM:
  926. u64_stats_update_begin(&ring->syncp);
  927. ring->stats.sw_err_cnt++;
  928. u64_stats_update_end(&ring->syncp);
  929. netdev_err(netdev, "no memory to xmit!\n");
  930. goto out_err_tx_ok;
  931. default:
  932. break;
  933. }
  934. /* No. of segments (plus a header) */
  935. seg_num = skb_shinfo(skb)->nr_frags + 1;
  936. /* Fill the first part */
  937. size = skb_headlen(skb);
  938. next_to_use_head = ring->next_to_use;
  939. dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
  940. if (dma_mapping_error(dev, dma)) {
  941. netdev_err(netdev, "TX head DMA map failed\n");
  942. ring->stats.sw_err_cnt++;
  943. goto out_err_tx_ok;
  944. }
  945. ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
  946. DESC_TYPE_SKB);
  947. if (ret)
  948. goto head_dma_map_err;
  949. next_to_use_frag = ring->next_to_use;
  950. /* Fill the fragments */
  951. for (i = 1; i < seg_num; i++) {
  952. frag = &skb_shinfo(skb)->frags[i - 1];
  953. size = skb_frag_size(frag);
  954. dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
  955. if (dma_mapping_error(dev, dma)) {
  956. netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
  957. ring->stats.sw_err_cnt++;
  958. goto frag_dma_map_err;
  959. }
  960. ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
  961. seg_num - 1 == i ? 1 : 0,
  962. DESC_TYPE_PAGE);
  963. if (ret)
  964. goto frag_dma_map_err;
  965. }
  966. /* Complete translate all packets */
  967. dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
  968. netdev_tx_sent_queue(dev_queue, skb->len);
  969. wmb(); /* Commit all data before submit */
  970. hnae3_queue_xmit(ring->tqp, buf_num);
  971. return NETDEV_TX_OK;
  972. frag_dma_map_err:
  973. hns_nic_dma_unmap(ring, next_to_use_frag);
  974. head_dma_map_err:
  975. hns_nic_dma_unmap(ring, next_to_use_head);
  976. out_err_tx_ok:
  977. dev_kfree_skb_any(skb);
  978. return NETDEV_TX_OK;
  979. out_net_tx_busy:
  980. netif_stop_subqueue(netdev, ring_data->queue_index);
  981. smp_mb(); /* Commit all data before submit */
  982. return NETDEV_TX_BUSY;
  983. }
  984. static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
  985. {
  986. struct hnae3_handle *h = hns3_get_handle(netdev);
  987. struct sockaddr *mac_addr = p;
  988. int ret;
  989. if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
  990. return -EADDRNOTAVAIL;
  991. if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
  992. netdev_info(netdev, "already using mac address %pM\n",
  993. mac_addr->sa_data);
  994. return 0;
  995. }
  996. ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
  997. if (ret) {
  998. netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
  999. return ret;
  1000. }
  1001. ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
  1002. return 0;
  1003. }
  1004. static int hns3_nic_set_features(struct net_device *netdev,
  1005. netdev_features_t features)
  1006. {
  1007. netdev_features_t changed = netdev->features ^ features;
  1008. struct hns3_nic_priv *priv = netdev_priv(netdev);
  1009. struct hnae3_handle *h = priv->ae_handle;
  1010. int ret;
  1011. if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
  1012. if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
  1013. priv->ops.fill_desc = hns3_fill_desc_tso;
  1014. priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
  1015. } else {
  1016. priv->ops.fill_desc = hns3_fill_desc;
  1017. priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
  1018. }
  1019. }
  1020. if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
  1021. h->ae_algo->ops->enable_vlan_filter) {
  1022. if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
  1023. h->ae_algo->ops->enable_vlan_filter(h, true);
  1024. else
  1025. h->ae_algo->ops->enable_vlan_filter(h, false);
  1026. }
  1027. if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
  1028. h->ae_algo->ops->enable_hw_strip_rxvtag) {
  1029. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  1030. ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
  1031. else
  1032. ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
  1033. if (ret)
  1034. return ret;
  1035. }
  1036. netdev->features = features;
  1037. return 0;
  1038. }
  1039. static void hns3_nic_get_stats64(struct net_device *netdev,
  1040. struct rtnl_link_stats64 *stats)
  1041. {
  1042. struct hns3_nic_priv *priv = netdev_priv(netdev);
  1043. int queue_num = priv->ae_handle->kinfo.num_tqps;
  1044. struct hnae3_handle *handle = priv->ae_handle;
  1045. struct hns3_enet_ring *ring;
  1046. unsigned int start;
  1047. unsigned int idx;
  1048. u64 tx_bytes = 0;
  1049. u64 rx_bytes = 0;
  1050. u64 tx_pkts = 0;
  1051. u64 rx_pkts = 0;
  1052. u64 tx_drop = 0;
  1053. u64 rx_drop = 0;
  1054. if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
  1055. return;
  1056. handle->ae_algo->ops->update_stats(handle, &netdev->stats);
  1057. for (idx = 0; idx < queue_num; idx++) {
  1058. /* fetch the tx stats */
  1059. ring = priv->ring_data[idx].ring;
  1060. do {
  1061. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1062. tx_bytes += ring->stats.tx_bytes;
  1063. tx_pkts += ring->stats.tx_pkts;
  1064. tx_drop += ring->stats.tx_busy;
  1065. tx_drop += ring->stats.sw_err_cnt;
  1066. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1067. /* fetch the rx stats */
  1068. ring = priv->ring_data[idx + queue_num].ring;
  1069. do {
  1070. start = u64_stats_fetch_begin_irq(&ring->syncp);
  1071. rx_bytes += ring->stats.rx_bytes;
  1072. rx_pkts += ring->stats.rx_pkts;
  1073. rx_drop += ring->stats.non_vld_descs;
  1074. rx_drop += ring->stats.err_pkt_len;
  1075. rx_drop += ring->stats.l2_err;
  1076. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  1077. }
  1078. stats->tx_bytes = tx_bytes;
  1079. stats->tx_packets = tx_pkts;
  1080. stats->rx_bytes = rx_bytes;
  1081. stats->rx_packets = rx_pkts;
  1082. stats->rx_errors = netdev->stats.rx_errors;
  1083. stats->multicast = netdev->stats.multicast;
  1084. stats->rx_length_errors = netdev->stats.rx_length_errors;
  1085. stats->rx_crc_errors = netdev->stats.rx_crc_errors;
  1086. stats->rx_missed_errors = netdev->stats.rx_missed_errors;
  1087. stats->tx_errors = netdev->stats.tx_errors;
  1088. stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
  1089. stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
  1090. stats->collisions = netdev->stats.collisions;
  1091. stats->rx_over_errors = netdev->stats.rx_over_errors;
  1092. stats->rx_frame_errors = netdev->stats.rx_frame_errors;
  1093. stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
  1094. stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
  1095. stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
  1096. stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
  1097. stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
  1098. stats->tx_window_errors = netdev->stats.tx_window_errors;
  1099. stats->rx_compressed = netdev->stats.rx_compressed;
  1100. stats->tx_compressed = netdev->stats.tx_compressed;
  1101. }
  1102. static int hns3_setup_tc(struct net_device *netdev, void *type_data)
  1103. {
  1104. struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
  1105. struct hnae3_handle *h = hns3_get_handle(netdev);
  1106. struct hnae3_knic_private_info *kinfo = &h->kinfo;
  1107. u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
  1108. u8 tc = mqprio_qopt->qopt.num_tc;
  1109. u16 mode = mqprio_qopt->mode;
  1110. u8 hw = mqprio_qopt->qopt.hw;
  1111. bool if_running;
  1112. int ret;
  1113. if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
  1114. mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
  1115. return -EOPNOTSUPP;
  1116. if (tc > HNAE3_MAX_TC)
  1117. return -EINVAL;
  1118. if (!netdev)
  1119. return -EINVAL;
  1120. if_running = netif_running(netdev);
  1121. if (if_running) {
  1122. hns3_nic_net_stop(netdev);
  1123. msleep(100);
  1124. }
  1125. ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
  1126. kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
  1127. if (ret)
  1128. goto out;
  1129. ret = hns3_nic_set_real_num_queue(netdev);
  1130. out:
  1131. if (if_running)
  1132. hns3_nic_net_open(netdev);
  1133. return ret;
  1134. }
  1135. static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
  1136. void *type_data)
  1137. {
  1138. if (type != TC_SETUP_QDISC_MQPRIO)
  1139. return -EOPNOTSUPP;
  1140. return hns3_setup_tc(dev, type_data);
  1141. }
  1142. static int hns3_vlan_rx_add_vid(struct net_device *netdev,
  1143. __be16 proto, u16 vid)
  1144. {
  1145. struct hnae3_handle *h = hns3_get_handle(netdev);
  1146. struct hns3_nic_priv *priv = netdev_priv(netdev);
  1147. int ret = -EIO;
  1148. if (h->ae_algo->ops->set_vlan_filter)
  1149. ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
  1150. if (!ret)
  1151. set_bit(vid, priv->active_vlans);
  1152. return ret;
  1153. }
  1154. static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
  1155. __be16 proto, u16 vid)
  1156. {
  1157. struct hnae3_handle *h = hns3_get_handle(netdev);
  1158. struct hns3_nic_priv *priv = netdev_priv(netdev);
  1159. int ret = -EIO;
  1160. if (h->ae_algo->ops->set_vlan_filter)
  1161. ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
  1162. if (!ret)
  1163. clear_bit(vid, priv->active_vlans);
  1164. return ret;
  1165. }
  1166. static void hns3_restore_vlan(struct net_device *netdev)
  1167. {
  1168. struct hns3_nic_priv *priv = netdev_priv(netdev);
  1169. u16 vid;
  1170. int ret;
  1171. for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
  1172. ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
  1173. if (ret)
  1174. netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
  1175. vid, ret);
  1176. }
  1177. }
  1178. static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
  1179. u8 qos, __be16 vlan_proto)
  1180. {
  1181. struct hnae3_handle *h = hns3_get_handle(netdev);
  1182. int ret = -EIO;
  1183. if (h->ae_algo->ops->set_vf_vlan_filter)
  1184. ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
  1185. qos, vlan_proto);
  1186. return ret;
  1187. }
  1188. static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
  1189. {
  1190. struct hnae3_handle *h = hns3_get_handle(netdev);
  1191. bool if_running = netif_running(netdev);
  1192. int ret;
  1193. if (!h->ae_algo->ops->set_mtu)
  1194. return -EOPNOTSUPP;
  1195. /* if this was called with netdev up then bring netdevice down */
  1196. if (if_running) {
  1197. (void)hns3_nic_net_stop(netdev);
  1198. msleep(100);
  1199. }
  1200. ret = h->ae_algo->ops->set_mtu(h, new_mtu);
  1201. if (ret) {
  1202. netdev_err(netdev, "failed to change MTU in hardware %d\n",
  1203. ret);
  1204. return ret;
  1205. }
  1206. netdev->mtu = new_mtu;
  1207. /* if the netdev was running earlier, bring it up again */
  1208. if (if_running && hns3_nic_net_open(netdev))
  1209. ret = -EINVAL;
  1210. return ret;
  1211. }
  1212. static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
  1213. {
  1214. struct hns3_nic_priv *priv = netdev_priv(ndev);
  1215. struct hns3_enet_ring *tx_ring = NULL;
  1216. int timeout_queue = 0;
  1217. int hw_head, hw_tail;
  1218. int i;
  1219. /* Find the stopped queue the same way the stack does */
  1220. for (i = 0; i < ndev->real_num_tx_queues; i++) {
  1221. struct netdev_queue *q;
  1222. unsigned long trans_start;
  1223. q = netdev_get_tx_queue(ndev, i);
  1224. trans_start = q->trans_start;
  1225. if (netif_xmit_stopped(q) &&
  1226. time_after(jiffies,
  1227. (trans_start + ndev->watchdog_timeo))) {
  1228. timeout_queue = i;
  1229. break;
  1230. }
  1231. }
  1232. if (i == ndev->num_tx_queues) {
  1233. netdev_info(ndev,
  1234. "no netdev TX timeout queue found, timeout count: %llu\n",
  1235. priv->tx_timeout_count);
  1236. return false;
  1237. }
  1238. tx_ring = priv->ring_data[timeout_queue].ring;
  1239. hw_head = readl_relaxed(tx_ring->tqp->io_base +
  1240. HNS3_RING_TX_RING_HEAD_REG);
  1241. hw_tail = readl_relaxed(tx_ring->tqp->io_base +
  1242. HNS3_RING_TX_RING_TAIL_REG);
  1243. netdev_info(ndev,
  1244. "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
  1245. priv->tx_timeout_count,
  1246. timeout_queue,
  1247. tx_ring->next_to_use,
  1248. tx_ring->next_to_clean,
  1249. hw_head,
  1250. hw_tail,
  1251. readl(tx_ring->tqp_vector->mask_addr));
  1252. return true;
  1253. }
  1254. static void hns3_nic_net_timeout(struct net_device *ndev)
  1255. {
  1256. struct hns3_nic_priv *priv = netdev_priv(ndev);
  1257. struct hnae3_handle *h = priv->ae_handle;
  1258. if (!hns3_get_tx_timeo_queue_info(ndev))
  1259. return;
  1260. priv->tx_timeout_count++;
  1261. if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
  1262. return;
  1263. /* request the reset */
  1264. if (h->ae_algo->ops->reset_event)
  1265. h->ae_algo->ops->reset_event(h);
  1266. }
  1267. static const struct net_device_ops hns3_nic_netdev_ops = {
  1268. .ndo_open = hns3_nic_net_open,
  1269. .ndo_stop = hns3_nic_net_stop,
  1270. .ndo_start_xmit = hns3_nic_net_xmit,
  1271. .ndo_tx_timeout = hns3_nic_net_timeout,
  1272. .ndo_set_mac_address = hns3_nic_net_set_mac_address,
  1273. .ndo_change_mtu = hns3_nic_change_mtu,
  1274. .ndo_set_features = hns3_nic_set_features,
  1275. .ndo_get_stats64 = hns3_nic_get_stats64,
  1276. .ndo_setup_tc = hns3_nic_setup_tc,
  1277. .ndo_set_rx_mode = hns3_nic_set_rx_mode,
  1278. .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
  1279. .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
  1280. .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
  1281. };
  1282. static bool hns3_is_phys_func(struct pci_dev *pdev)
  1283. {
  1284. u32 dev_id = pdev->device;
  1285. switch (dev_id) {
  1286. case HNAE3_DEV_ID_GE:
  1287. case HNAE3_DEV_ID_25GE:
  1288. case HNAE3_DEV_ID_25GE_RDMA:
  1289. case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
  1290. case HNAE3_DEV_ID_50GE_RDMA:
  1291. case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
  1292. case HNAE3_DEV_ID_100G_RDMA_MACSEC:
  1293. return true;
  1294. case HNAE3_DEV_ID_100G_VF:
  1295. case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
  1296. return false;
  1297. default:
  1298. dev_warn(&pdev->dev, "un-recognized pci device-id %d",
  1299. dev_id);
  1300. }
  1301. return false;
  1302. }
  1303. static void hns3_disable_sriov(struct pci_dev *pdev)
  1304. {
  1305. /* If our VFs are assigned we cannot shut down SR-IOV
  1306. * without causing issues, so just leave the hardware
  1307. * available but disabled
  1308. */
  1309. if (pci_vfs_assigned(pdev)) {
  1310. dev_warn(&pdev->dev,
  1311. "disabling driver while VFs are assigned\n");
  1312. return;
  1313. }
  1314. pci_disable_sriov(pdev);
  1315. }
  1316. /* hns3_probe - Device initialization routine
  1317. * @pdev: PCI device information struct
  1318. * @ent: entry in hns3_pci_tbl
  1319. *
  1320. * hns3_probe initializes a PF identified by a pci_dev structure.
  1321. * The OS initialization, configuring of the PF private structure,
  1322. * and a hardware reset occur.
  1323. *
  1324. * Returns 0 on success, negative on failure
  1325. */
  1326. static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1327. {
  1328. struct hnae3_ae_dev *ae_dev;
  1329. int ret;
  1330. ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
  1331. GFP_KERNEL);
  1332. if (!ae_dev) {
  1333. ret = -ENOMEM;
  1334. return ret;
  1335. }
  1336. ae_dev->pdev = pdev;
  1337. ae_dev->flag = ent->driver_data;
  1338. ae_dev->dev_type = HNAE3_DEV_KNIC;
  1339. pci_set_drvdata(pdev, ae_dev);
  1340. hnae3_register_ae_dev(ae_dev);
  1341. return 0;
  1342. }
  1343. /* hns3_remove - Device removal routine
  1344. * @pdev: PCI device information struct
  1345. */
  1346. static void hns3_remove(struct pci_dev *pdev)
  1347. {
  1348. struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
  1349. if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
  1350. hns3_disable_sriov(pdev);
  1351. hnae3_unregister_ae_dev(ae_dev);
  1352. }
  1353. /**
  1354. * hns3_pci_sriov_configure
  1355. * @pdev: pointer to a pci_dev structure
  1356. * @num_vfs: number of VFs to allocate
  1357. *
  1358. * Enable or change the number of VFs. Called when the user updates the number
  1359. * of VFs in sysfs.
  1360. **/
  1361. static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
  1362. {
  1363. int ret;
  1364. if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
  1365. dev_warn(&pdev->dev, "Can not config SRIOV\n");
  1366. return -EINVAL;
  1367. }
  1368. if (num_vfs) {
  1369. ret = pci_enable_sriov(pdev, num_vfs);
  1370. if (ret)
  1371. dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
  1372. else
  1373. return num_vfs;
  1374. } else if (!pci_vfs_assigned(pdev)) {
  1375. pci_disable_sriov(pdev);
  1376. } else {
  1377. dev_warn(&pdev->dev,
  1378. "Unable to free VFs because some are assigned to VMs.\n");
  1379. }
  1380. return 0;
  1381. }
  1382. static struct pci_driver hns3_driver = {
  1383. .name = hns3_driver_name,
  1384. .id_table = hns3_pci_tbl,
  1385. .probe = hns3_probe,
  1386. .remove = hns3_remove,
  1387. .sriov_configure = hns3_pci_sriov_configure,
  1388. };
  1389. /* set default feature to hns3 */
  1390. static void hns3_set_default_feature(struct net_device *netdev)
  1391. {
  1392. struct hnae3_handle *h = hns3_get_handle(netdev);
  1393. struct pci_dev *pdev = h->pdev;
  1394. netdev->priv_flags |= IFF_UNICAST_FLT;
  1395. netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1396. NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
  1397. NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
  1398. NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
  1399. NETIF_F_GSO_UDP_TUNNEL_CSUM;
  1400. netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
  1401. netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
  1402. netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1403. NETIF_F_HW_VLAN_CTAG_FILTER |
  1404. NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
  1405. NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
  1406. NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
  1407. NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
  1408. NETIF_F_GSO_UDP_TUNNEL_CSUM;
  1409. netdev->vlan_features |=
  1410. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
  1411. NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
  1412. NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
  1413. NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
  1414. NETIF_F_GSO_UDP_TUNNEL_CSUM;
  1415. netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  1416. NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
  1417. NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
  1418. NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
  1419. NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
  1420. NETIF_F_GSO_UDP_TUNNEL_CSUM;
  1421. if (pdev->revision != 0x20)
  1422. netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  1423. }
  1424. static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
  1425. struct hns3_desc_cb *cb)
  1426. {
  1427. unsigned int order = hnae3_page_order(ring);
  1428. struct page *p;
  1429. p = dev_alloc_pages(order);
  1430. if (!p)
  1431. return -ENOMEM;
  1432. cb->priv = p;
  1433. cb->page_offset = 0;
  1434. cb->reuse_flag = 0;
  1435. cb->buf = page_address(p);
  1436. cb->length = hnae3_page_size(ring);
  1437. cb->type = DESC_TYPE_PAGE;
  1438. return 0;
  1439. }
  1440. static void hns3_free_buffer(struct hns3_enet_ring *ring,
  1441. struct hns3_desc_cb *cb)
  1442. {
  1443. if (cb->type == DESC_TYPE_SKB)
  1444. dev_kfree_skb_any((struct sk_buff *)cb->priv);
  1445. else if (!HNAE3_IS_TX_RING(ring))
  1446. put_page((struct page *)cb->priv);
  1447. memset(cb, 0, sizeof(*cb));
  1448. }
  1449. static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
  1450. {
  1451. cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
  1452. cb->length, ring_to_dma_dir(ring));
  1453. if (dma_mapping_error(ring_to_dev(ring), cb->dma))
  1454. return -EIO;
  1455. return 0;
  1456. }
  1457. static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
  1458. struct hns3_desc_cb *cb)
  1459. {
  1460. if (cb->type == DESC_TYPE_SKB)
  1461. dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
  1462. ring_to_dma_dir(ring));
  1463. else
  1464. dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
  1465. ring_to_dma_dir(ring));
  1466. }
  1467. static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
  1468. {
  1469. hns3_unmap_buffer(ring, &ring->desc_cb[i]);
  1470. ring->desc[i].addr = 0;
  1471. }
  1472. static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
  1473. {
  1474. struct hns3_desc_cb *cb = &ring->desc_cb[i];
  1475. if (!ring->desc_cb[i].dma)
  1476. return;
  1477. hns3_buffer_detach(ring, i);
  1478. hns3_free_buffer(ring, cb);
  1479. }
  1480. static void hns3_free_buffers(struct hns3_enet_ring *ring)
  1481. {
  1482. int i;
  1483. for (i = 0; i < ring->desc_num; i++)
  1484. hns3_free_buffer_detach(ring, i);
  1485. }
  1486. /* free desc along with its attached buffer */
  1487. static void hns3_free_desc(struct hns3_enet_ring *ring)
  1488. {
  1489. int size = ring->desc_num * sizeof(ring->desc[0]);
  1490. hns3_free_buffers(ring);
  1491. if (ring->desc) {
  1492. dma_free_coherent(ring_to_dev(ring), size,
  1493. ring->desc, ring->desc_dma_addr);
  1494. ring->desc = NULL;
  1495. }
  1496. }
  1497. static int hns3_alloc_desc(struct hns3_enet_ring *ring)
  1498. {
  1499. int size = ring->desc_num * sizeof(ring->desc[0]);
  1500. ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
  1501. &ring->desc_dma_addr,
  1502. GFP_KERNEL);
  1503. if (!ring->desc)
  1504. return -ENOMEM;
  1505. return 0;
  1506. }
  1507. static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
  1508. struct hns3_desc_cb *cb)
  1509. {
  1510. int ret;
  1511. ret = hns3_alloc_buffer(ring, cb);
  1512. if (ret)
  1513. goto out;
  1514. ret = hns3_map_buffer(ring, cb);
  1515. if (ret)
  1516. goto out_with_buf;
  1517. return 0;
  1518. out_with_buf:
  1519. hns3_free_buffer(ring, cb);
  1520. out:
  1521. return ret;
  1522. }
  1523. static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
  1524. {
  1525. int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
  1526. if (ret)
  1527. return ret;
  1528. ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
  1529. return 0;
  1530. }
  1531. /* Allocate memory for raw pkg, and map with dma */
  1532. static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
  1533. {
  1534. int i, j, ret;
  1535. for (i = 0; i < ring->desc_num; i++) {
  1536. ret = hns3_alloc_buffer_attach(ring, i);
  1537. if (ret)
  1538. goto out_buffer_fail;
  1539. }
  1540. return 0;
  1541. out_buffer_fail:
  1542. for (j = i - 1; j >= 0; j--)
  1543. hns3_free_buffer_detach(ring, j);
  1544. return ret;
  1545. }
  1546. /* detach a in-used buffer and replace with a reserved one */
  1547. static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
  1548. struct hns3_desc_cb *res_cb)
  1549. {
  1550. hns3_unmap_buffer(ring, &ring->desc_cb[i]);
  1551. ring->desc_cb[i] = *res_cb;
  1552. ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
  1553. ring->desc[i].rx.bd_base_info = 0;
  1554. }
  1555. static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
  1556. {
  1557. ring->desc_cb[i].reuse_flag = 0;
  1558. ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
  1559. + ring->desc_cb[i].page_offset);
  1560. ring->desc[i].rx.bd_base_info = 0;
  1561. }
  1562. static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
  1563. int *pkts)
  1564. {
  1565. struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
  1566. (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
  1567. (*bytes) += desc_cb->length;
  1568. /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
  1569. hns3_free_buffer_detach(ring, ring->next_to_clean);
  1570. ring_ptr_move_fw(ring, next_to_clean);
  1571. }
  1572. static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
  1573. {
  1574. int u = ring->next_to_use;
  1575. int c = ring->next_to_clean;
  1576. if (unlikely(h > ring->desc_num))
  1577. return 0;
  1578. return u > c ? (h > c && h <= u) : (h > c || h <= u);
  1579. }
  1580. bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
  1581. {
  1582. struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
  1583. struct netdev_queue *dev_queue;
  1584. int bytes, pkts;
  1585. int head;
  1586. head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
  1587. rmb(); /* Make sure head is ready before touch any data */
  1588. if (is_ring_empty(ring) || head == ring->next_to_clean)
  1589. return true; /* no data to poll */
  1590. if (unlikely(!is_valid_clean_head(ring, head))) {
  1591. netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
  1592. ring->next_to_use, ring->next_to_clean);
  1593. u64_stats_update_begin(&ring->syncp);
  1594. ring->stats.io_err_cnt++;
  1595. u64_stats_update_end(&ring->syncp);
  1596. return true;
  1597. }
  1598. bytes = 0;
  1599. pkts = 0;
  1600. while (head != ring->next_to_clean && budget) {
  1601. hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
  1602. /* Issue prefetch for next Tx descriptor */
  1603. prefetch(&ring->desc_cb[ring->next_to_clean]);
  1604. budget--;
  1605. }
  1606. ring->tqp_vector->tx_group.total_bytes += bytes;
  1607. ring->tqp_vector->tx_group.total_packets += pkts;
  1608. u64_stats_update_begin(&ring->syncp);
  1609. ring->stats.tx_bytes += bytes;
  1610. ring->stats.tx_pkts += pkts;
  1611. u64_stats_update_end(&ring->syncp);
  1612. dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
  1613. netdev_tx_completed_queue(dev_queue, pkts, bytes);
  1614. if (unlikely(pkts && netif_carrier_ok(netdev) &&
  1615. (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
  1616. /* Make sure that anybody stopping the queue after this
  1617. * sees the new next_to_clean.
  1618. */
  1619. smp_mb();
  1620. if (netif_tx_queue_stopped(dev_queue)) {
  1621. netif_tx_wake_queue(dev_queue);
  1622. ring->stats.restart_queue++;
  1623. }
  1624. }
  1625. return !!budget;
  1626. }
  1627. static int hns3_desc_unused(struct hns3_enet_ring *ring)
  1628. {
  1629. int ntc = ring->next_to_clean;
  1630. int ntu = ring->next_to_use;
  1631. return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
  1632. }
  1633. static void
  1634. hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
  1635. {
  1636. struct hns3_desc_cb *desc_cb;
  1637. struct hns3_desc_cb res_cbs;
  1638. int i, ret;
  1639. for (i = 0; i < cleand_count; i++) {
  1640. desc_cb = &ring->desc_cb[ring->next_to_use];
  1641. if (desc_cb->reuse_flag) {
  1642. u64_stats_update_begin(&ring->syncp);
  1643. ring->stats.reuse_pg_cnt++;
  1644. u64_stats_update_end(&ring->syncp);
  1645. hns3_reuse_buffer(ring, ring->next_to_use);
  1646. } else {
  1647. ret = hns3_reserve_buffer_map(ring, &res_cbs);
  1648. if (ret) {
  1649. u64_stats_update_begin(&ring->syncp);
  1650. ring->stats.sw_err_cnt++;
  1651. u64_stats_update_end(&ring->syncp);
  1652. netdev_err(ring->tqp->handle->kinfo.netdev,
  1653. "hnae reserve buffer map failed.\n");
  1654. break;
  1655. }
  1656. hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
  1657. }
  1658. ring_ptr_move_fw(ring, next_to_use);
  1659. }
  1660. wmb(); /* Make all data has been write before submit */
  1661. writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
  1662. }
  1663. static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
  1664. struct hns3_enet_ring *ring, int pull_len,
  1665. struct hns3_desc_cb *desc_cb)
  1666. {
  1667. struct hns3_desc *desc;
  1668. u32 truesize;
  1669. int size;
  1670. int last_offset;
  1671. bool twobufs;
  1672. twobufs = ((PAGE_SIZE < 8192) &&
  1673. hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
  1674. desc = &ring->desc[ring->next_to_clean];
  1675. size = le16_to_cpu(desc->rx.size);
  1676. truesize = hnae3_buf_size(ring);
  1677. if (!twobufs)
  1678. last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
  1679. skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
  1680. size - pull_len, truesize);
  1681. /* Avoid re-using remote pages,flag default unreuse */
  1682. if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
  1683. return;
  1684. if (twobufs) {
  1685. /* If we are only owner of page we can reuse it */
  1686. if (likely(page_count(desc_cb->priv) == 1)) {
  1687. /* Flip page offset to other buffer */
  1688. desc_cb->page_offset ^= truesize;
  1689. desc_cb->reuse_flag = 1;
  1690. /* bump ref count on page before it is given*/
  1691. get_page(desc_cb->priv);
  1692. }
  1693. return;
  1694. }
  1695. /* Move offset up to the next cache line */
  1696. desc_cb->page_offset += truesize;
  1697. if (desc_cb->page_offset <= last_offset) {
  1698. desc_cb->reuse_flag = 1;
  1699. /* Bump ref count on page before it is given*/
  1700. get_page(desc_cb->priv);
  1701. }
  1702. }
  1703. static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
  1704. struct hns3_desc *desc)
  1705. {
  1706. struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
  1707. int l3_type, l4_type;
  1708. u32 bd_base_info;
  1709. int ol4_type;
  1710. u32 l234info;
  1711. bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
  1712. l234info = le32_to_cpu(desc->rx.l234_info);
  1713. skb->ip_summed = CHECKSUM_NONE;
  1714. skb_checksum_none_assert(skb);
  1715. if (!(netdev->features & NETIF_F_RXCSUM))
  1716. return;
  1717. /* check if hardware has done checksum */
  1718. if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
  1719. return;
  1720. if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
  1721. hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
  1722. hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
  1723. hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
  1724. netdev_err(netdev, "L3/L4 error pkt\n");
  1725. u64_stats_update_begin(&ring->syncp);
  1726. ring->stats.l3l4_csum_err++;
  1727. u64_stats_update_end(&ring->syncp);
  1728. return;
  1729. }
  1730. l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
  1731. HNS3_RXD_L3ID_S);
  1732. l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
  1733. HNS3_RXD_L4ID_S);
  1734. ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
  1735. HNS3_RXD_OL4ID_S);
  1736. switch (ol4_type) {
  1737. case HNS3_OL4_TYPE_MAC_IN_UDP:
  1738. case HNS3_OL4_TYPE_NVGRE:
  1739. skb->csum_level = 1;
  1740. /* fall through */
  1741. case HNS3_OL4_TYPE_NO_TUN:
  1742. /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
  1743. if ((l3_type == HNS3_L3_TYPE_IPV4 ||
  1744. l3_type == HNS3_L3_TYPE_IPV6) &&
  1745. (l4_type == HNS3_L4_TYPE_UDP ||
  1746. l4_type == HNS3_L4_TYPE_TCP ||
  1747. l4_type == HNS3_L4_TYPE_SCTP))
  1748. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1749. break;
  1750. }
  1751. }
  1752. static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
  1753. {
  1754. napi_gro_receive(&ring->tqp_vector->napi, skb);
  1755. }
  1756. static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
  1757. struct hns3_desc *desc, u32 l234info)
  1758. {
  1759. struct pci_dev *pdev = ring->tqp->handle->pdev;
  1760. u16 vlan_tag;
  1761. if (pdev->revision == 0x20) {
  1762. vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
  1763. if (!(vlan_tag & VLAN_VID_MASK))
  1764. vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
  1765. return vlan_tag;
  1766. }
  1767. #define HNS3_STRP_OUTER_VLAN 0x1
  1768. #define HNS3_STRP_INNER_VLAN 0x2
  1769. switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
  1770. HNS3_RXD_STRP_TAGP_S)) {
  1771. case HNS3_STRP_OUTER_VLAN:
  1772. vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
  1773. break;
  1774. case HNS3_STRP_INNER_VLAN:
  1775. vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
  1776. break;
  1777. default:
  1778. vlan_tag = 0;
  1779. break;
  1780. }
  1781. return vlan_tag;
  1782. }
  1783. static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
  1784. struct sk_buff **out_skb, int *out_bnum)
  1785. {
  1786. struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
  1787. struct hns3_desc_cb *desc_cb;
  1788. struct hns3_desc *desc;
  1789. struct sk_buff *skb;
  1790. unsigned char *va;
  1791. u32 bd_base_info;
  1792. int pull_len;
  1793. u32 l234info;
  1794. int length;
  1795. int bnum;
  1796. desc = &ring->desc[ring->next_to_clean];
  1797. desc_cb = &ring->desc_cb[ring->next_to_clean];
  1798. prefetch(desc);
  1799. length = le16_to_cpu(desc->rx.size);
  1800. bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
  1801. /* Check valid BD */
  1802. if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
  1803. return -EFAULT;
  1804. va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
  1805. /* Prefetch first cache line of first page
  1806. * Idea is to cache few bytes of the header of the packet. Our L1 Cache
  1807. * line size is 64B so need to prefetch twice to make it 128B. But in
  1808. * actual we can have greater size of caches with 128B Level 1 cache
  1809. * lines. In such a case, single fetch would suffice to cache in the
  1810. * relevant part of the header.
  1811. */
  1812. prefetch(va);
  1813. #if L1_CACHE_BYTES < 128
  1814. prefetch(va + L1_CACHE_BYTES);
  1815. #endif
  1816. skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
  1817. HNS3_RX_HEAD_SIZE);
  1818. if (unlikely(!skb)) {
  1819. netdev_err(netdev, "alloc rx skb fail\n");
  1820. u64_stats_update_begin(&ring->syncp);
  1821. ring->stats.sw_err_cnt++;
  1822. u64_stats_update_end(&ring->syncp);
  1823. return -ENOMEM;
  1824. }
  1825. prefetchw(skb->data);
  1826. bnum = 1;
  1827. if (length <= HNS3_RX_HEAD_SIZE) {
  1828. memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
  1829. /* We can reuse buffer as-is, just make sure it is local */
  1830. if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
  1831. desc_cb->reuse_flag = 1;
  1832. else /* This page cannot be reused so discard it */
  1833. put_page(desc_cb->priv);
  1834. ring_ptr_move_fw(ring, next_to_clean);
  1835. } else {
  1836. u64_stats_update_begin(&ring->syncp);
  1837. ring->stats.seg_pkt_cnt++;
  1838. u64_stats_update_end(&ring->syncp);
  1839. pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
  1840. memcpy(__skb_put(skb, pull_len), va,
  1841. ALIGN(pull_len, sizeof(long)));
  1842. hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
  1843. ring_ptr_move_fw(ring, next_to_clean);
  1844. while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
  1845. desc = &ring->desc[ring->next_to_clean];
  1846. desc_cb = &ring->desc_cb[ring->next_to_clean];
  1847. bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
  1848. hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
  1849. ring_ptr_move_fw(ring, next_to_clean);
  1850. bnum++;
  1851. }
  1852. }
  1853. *out_bnum = bnum;
  1854. l234info = le32_to_cpu(desc->rx.l234_info);
  1855. /* Based on hw strategy, the tag offloaded will be stored at
  1856. * ot_vlan_tag in two layer tag case, and stored at vlan_tag
  1857. * in one layer tag case.
  1858. */
  1859. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
  1860. u16 vlan_tag;
  1861. vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
  1862. if (vlan_tag & VLAN_VID_MASK)
  1863. __vlan_hwaccel_put_tag(skb,
  1864. htons(ETH_P_8021Q),
  1865. vlan_tag);
  1866. }
  1867. if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
  1868. netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
  1869. ((u64 *)desc)[0], ((u64 *)desc)[1]);
  1870. u64_stats_update_begin(&ring->syncp);
  1871. ring->stats.non_vld_descs++;
  1872. u64_stats_update_end(&ring->syncp);
  1873. dev_kfree_skb_any(skb);
  1874. return -EINVAL;
  1875. }
  1876. if (unlikely((!desc->rx.pkt_len) ||
  1877. hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
  1878. netdev_err(netdev, "truncated pkt\n");
  1879. u64_stats_update_begin(&ring->syncp);
  1880. ring->stats.err_pkt_len++;
  1881. u64_stats_update_end(&ring->syncp);
  1882. dev_kfree_skb_any(skb);
  1883. return -EFAULT;
  1884. }
  1885. if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
  1886. netdev_err(netdev, "L2 error pkt\n");
  1887. u64_stats_update_begin(&ring->syncp);
  1888. ring->stats.l2_err++;
  1889. u64_stats_update_end(&ring->syncp);
  1890. dev_kfree_skb_any(skb);
  1891. return -EFAULT;
  1892. }
  1893. u64_stats_update_begin(&ring->syncp);
  1894. ring->stats.rx_pkts++;
  1895. ring->stats.rx_bytes += skb->len;
  1896. u64_stats_update_end(&ring->syncp);
  1897. ring->tqp_vector->rx_group.total_bytes += skb->len;
  1898. hns3_rx_checksum(ring, skb, desc);
  1899. return 0;
  1900. }
  1901. int hns3_clean_rx_ring(
  1902. struct hns3_enet_ring *ring, int budget,
  1903. void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
  1904. {
  1905. #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
  1906. struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
  1907. int recv_pkts, recv_bds, clean_count, err;
  1908. int unused_count = hns3_desc_unused(ring);
  1909. struct sk_buff *skb = NULL;
  1910. int num, bnum = 0;
  1911. num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
  1912. rmb(); /* Make sure num taken effect before the other data is touched */
  1913. recv_pkts = 0, recv_bds = 0, clean_count = 0;
  1914. num -= unused_count;
  1915. while (recv_pkts < budget && recv_bds < num) {
  1916. /* Reuse or realloc buffers */
  1917. if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
  1918. hns3_nic_alloc_rx_buffers(ring,
  1919. clean_count + unused_count);
  1920. clean_count = 0;
  1921. unused_count = hns3_desc_unused(ring);
  1922. }
  1923. /* Poll one pkt */
  1924. err = hns3_handle_rx_bd(ring, &skb, &bnum);
  1925. if (unlikely(!skb)) /* This fault cannot be repaired */
  1926. goto out;
  1927. recv_bds += bnum;
  1928. clean_count += bnum;
  1929. if (unlikely(err)) { /* Do jump the err */
  1930. recv_pkts++;
  1931. continue;
  1932. }
  1933. /* Do update ip stack process */
  1934. skb->protocol = eth_type_trans(skb, netdev);
  1935. rx_fn(ring, skb);
  1936. recv_pkts++;
  1937. }
  1938. out:
  1939. /* Make all data has been write before submit */
  1940. if (clean_count + unused_count > 0)
  1941. hns3_nic_alloc_rx_buffers(ring,
  1942. clean_count + unused_count);
  1943. return recv_pkts;
  1944. }
  1945. static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
  1946. {
  1947. struct hns3_enet_tqp_vector *tqp_vector =
  1948. ring_group->ring->tqp_vector;
  1949. enum hns3_flow_level_range new_flow_level;
  1950. int packets_per_msecs;
  1951. int bytes_per_msecs;
  1952. u32 time_passed_ms;
  1953. u16 new_int_gl;
  1954. if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
  1955. return false;
  1956. if (ring_group->total_packets == 0) {
  1957. ring_group->coal.int_gl = HNS3_INT_GL_50K;
  1958. ring_group->coal.flow_level = HNS3_FLOW_LOW;
  1959. return true;
  1960. }
  1961. /* Simple throttlerate management
  1962. * 0-10MB/s lower (50000 ints/s)
  1963. * 10-20MB/s middle (20000 ints/s)
  1964. * 20-1249MB/s high (18000 ints/s)
  1965. * > 40000pps ultra (8000 ints/s)
  1966. */
  1967. new_flow_level = ring_group->coal.flow_level;
  1968. new_int_gl = ring_group->coal.int_gl;
  1969. time_passed_ms =
  1970. jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
  1971. if (!time_passed_ms)
  1972. return false;
  1973. do_div(ring_group->total_packets, time_passed_ms);
  1974. packets_per_msecs = ring_group->total_packets;
  1975. do_div(ring_group->total_bytes, time_passed_ms);
  1976. bytes_per_msecs = ring_group->total_bytes;
  1977. #define HNS3_RX_LOW_BYTE_RATE 10000
  1978. #define HNS3_RX_MID_BYTE_RATE 20000
  1979. switch (new_flow_level) {
  1980. case HNS3_FLOW_LOW:
  1981. if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
  1982. new_flow_level = HNS3_FLOW_MID;
  1983. break;
  1984. case HNS3_FLOW_MID:
  1985. if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
  1986. new_flow_level = HNS3_FLOW_HIGH;
  1987. else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
  1988. new_flow_level = HNS3_FLOW_LOW;
  1989. break;
  1990. case HNS3_FLOW_HIGH:
  1991. case HNS3_FLOW_ULTRA:
  1992. default:
  1993. if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
  1994. new_flow_level = HNS3_FLOW_MID;
  1995. break;
  1996. }
  1997. #define HNS3_RX_ULTRA_PACKET_RATE 40
  1998. if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
  1999. &tqp_vector->rx_group == ring_group)
  2000. new_flow_level = HNS3_FLOW_ULTRA;
  2001. switch (new_flow_level) {
  2002. case HNS3_FLOW_LOW:
  2003. new_int_gl = HNS3_INT_GL_50K;
  2004. break;
  2005. case HNS3_FLOW_MID:
  2006. new_int_gl = HNS3_INT_GL_20K;
  2007. break;
  2008. case HNS3_FLOW_HIGH:
  2009. new_int_gl = HNS3_INT_GL_18K;
  2010. break;
  2011. case HNS3_FLOW_ULTRA:
  2012. new_int_gl = HNS3_INT_GL_8K;
  2013. break;
  2014. default:
  2015. break;
  2016. }
  2017. ring_group->total_bytes = 0;
  2018. ring_group->total_packets = 0;
  2019. ring_group->coal.flow_level = new_flow_level;
  2020. if (new_int_gl != ring_group->coal.int_gl) {
  2021. ring_group->coal.int_gl = new_int_gl;
  2022. return true;
  2023. }
  2024. return false;
  2025. }
  2026. static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
  2027. {
  2028. struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
  2029. struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
  2030. bool rx_update, tx_update;
  2031. if (tqp_vector->int_adapt_down > 0) {
  2032. tqp_vector->int_adapt_down--;
  2033. return;
  2034. }
  2035. if (rx_group->coal.gl_adapt_enable) {
  2036. rx_update = hns3_get_new_int_gl(rx_group);
  2037. if (rx_update)
  2038. hns3_set_vector_coalesce_rx_gl(tqp_vector,
  2039. rx_group->coal.int_gl);
  2040. }
  2041. if (tx_group->coal.gl_adapt_enable) {
  2042. tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
  2043. if (tx_update)
  2044. hns3_set_vector_coalesce_tx_gl(tqp_vector,
  2045. tx_group->coal.int_gl);
  2046. }
  2047. tqp_vector->last_jiffies = jiffies;
  2048. tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
  2049. }
  2050. static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
  2051. {
  2052. struct hns3_enet_ring *ring;
  2053. int rx_pkt_total = 0;
  2054. struct hns3_enet_tqp_vector *tqp_vector =
  2055. container_of(napi, struct hns3_enet_tqp_vector, napi);
  2056. bool clean_complete = true;
  2057. int rx_budget;
  2058. /* Since the actual Tx work is minimal, we can give the Tx a larger
  2059. * budget and be more aggressive about cleaning up the Tx descriptors.
  2060. */
  2061. hns3_for_each_ring(ring, tqp_vector->tx_group) {
  2062. if (!hns3_clean_tx_ring(ring, budget))
  2063. clean_complete = false;
  2064. }
  2065. /* make sure rx ring budget not smaller than 1 */
  2066. rx_budget = max(budget / tqp_vector->num_tqps, 1);
  2067. hns3_for_each_ring(ring, tqp_vector->rx_group) {
  2068. int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
  2069. hns3_rx_skb);
  2070. if (rx_cleaned >= rx_budget)
  2071. clean_complete = false;
  2072. rx_pkt_total += rx_cleaned;
  2073. }
  2074. tqp_vector->rx_group.total_packets += rx_pkt_total;
  2075. if (!clean_complete)
  2076. return budget;
  2077. napi_complete(napi);
  2078. hns3_update_new_int_gl(tqp_vector);
  2079. hns3_mask_vector_irq(tqp_vector, 1);
  2080. return rx_pkt_total;
  2081. }
  2082. static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
  2083. struct hnae3_ring_chain_node *head)
  2084. {
  2085. struct pci_dev *pdev = tqp_vector->handle->pdev;
  2086. struct hnae3_ring_chain_node *cur_chain = head;
  2087. struct hnae3_ring_chain_node *chain;
  2088. struct hns3_enet_ring *tx_ring;
  2089. struct hns3_enet_ring *rx_ring;
  2090. tx_ring = tqp_vector->tx_group.ring;
  2091. if (tx_ring) {
  2092. cur_chain->tqp_index = tx_ring->tqp->tqp_index;
  2093. hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
  2094. HNAE3_RING_TYPE_TX);
  2095. hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
  2096. HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
  2097. cur_chain->next = NULL;
  2098. while (tx_ring->next) {
  2099. tx_ring = tx_ring->next;
  2100. chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
  2101. GFP_KERNEL);
  2102. if (!chain)
  2103. return -ENOMEM;
  2104. cur_chain->next = chain;
  2105. chain->tqp_index = tx_ring->tqp->tqp_index;
  2106. hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
  2107. HNAE3_RING_TYPE_TX);
  2108. hnae3_set_field(chain->int_gl_idx,
  2109. HNAE3_RING_GL_IDX_M,
  2110. HNAE3_RING_GL_IDX_S,
  2111. HNAE3_RING_GL_TX);
  2112. cur_chain = chain;
  2113. }
  2114. }
  2115. rx_ring = tqp_vector->rx_group.ring;
  2116. if (!tx_ring && rx_ring) {
  2117. cur_chain->next = NULL;
  2118. cur_chain->tqp_index = rx_ring->tqp->tqp_index;
  2119. hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
  2120. HNAE3_RING_TYPE_RX);
  2121. hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
  2122. HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
  2123. rx_ring = rx_ring->next;
  2124. }
  2125. while (rx_ring) {
  2126. chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
  2127. if (!chain)
  2128. return -ENOMEM;
  2129. cur_chain->next = chain;
  2130. chain->tqp_index = rx_ring->tqp->tqp_index;
  2131. hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
  2132. HNAE3_RING_TYPE_RX);
  2133. hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
  2134. HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
  2135. cur_chain = chain;
  2136. rx_ring = rx_ring->next;
  2137. }
  2138. return 0;
  2139. }
  2140. static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
  2141. struct hnae3_ring_chain_node *head)
  2142. {
  2143. struct pci_dev *pdev = tqp_vector->handle->pdev;
  2144. struct hnae3_ring_chain_node *chain_tmp, *chain;
  2145. chain = head->next;
  2146. while (chain) {
  2147. chain_tmp = chain->next;
  2148. devm_kfree(&pdev->dev, chain);
  2149. chain = chain_tmp;
  2150. }
  2151. }
  2152. static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
  2153. struct hns3_enet_ring *ring)
  2154. {
  2155. ring->next = group->ring;
  2156. group->ring = ring;
  2157. group->count++;
  2158. }
  2159. static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
  2160. {
  2161. struct hnae3_ring_chain_node vector_ring_chain;
  2162. struct hnae3_handle *h = priv->ae_handle;
  2163. struct hns3_enet_tqp_vector *tqp_vector;
  2164. int ret = 0;
  2165. u16 i;
  2166. for (i = 0; i < priv->vector_num; i++) {
  2167. tqp_vector = &priv->tqp_vector[i];
  2168. hns3_vector_gl_rl_init_hw(tqp_vector, priv);
  2169. tqp_vector->num_tqps = 0;
  2170. }
  2171. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2172. u16 vector_i = i % priv->vector_num;
  2173. u16 tqp_num = h->kinfo.num_tqps;
  2174. tqp_vector = &priv->tqp_vector[vector_i];
  2175. hns3_add_ring_to_group(&tqp_vector->tx_group,
  2176. priv->ring_data[i].ring);
  2177. hns3_add_ring_to_group(&tqp_vector->rx_group,
  2178. priv->ring_data[i + tqp_num].ring);
  2179. priv->ring_data[i].ring->tqp_vector = tqp_vector;
  2180. priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
  2181. tqp_vector->num_tqps++;
  2182. }
  2183. for (i = 0; i < priv->vector_num; i++) {
  2184. tqp_vector = &priv->tqp_vector[i];
  2185. tqp_vector->rx_group.total_bytes = 0;
  2186. tqp_vector->rx_group.total_packets = 0;
  2187. tqp_vector->tx_group.total_bytes = 0;
  2188. tqp_vector->tx_group.total_packets = 0;
  2189. tqp_vector->handle = h;
  2190. ret = hns3_get_vector_ring_chain(tqp_vector,
  2191. &vector_ring_chain);
  2192. if (ret)
  2193. return ret;
  2194. ret = h->ae_algo->ops->map_ring_to_vector(h,
  2195. tqp_vector->vector_irq, &vector_ring_chain);
  2196. hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
  2197. if (ret)
  2198. return ret;
  2199. netif_napi_add(priv->netdev, &tqp_vector->napi,
  2200. hns3_nic_common_poll, NAPI_POLL_WEIGHT);
  2201. }
  2202. return 0;
  2203. }
  2204. static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
  2205. {
  2206. struct hnae3_handle *h = priv->ae_handle;
  2207. struct hns3_enet_tqp_vector *tqp_vector;
  2208. struct hnae3_vector_info *vector;
  2209. struct pci_dev *pdev = h->pdev;
  2210. u16 tqp_num = h->kinfo.num_tqps;
  2211. u16 vector_num;
  2212. int ret = 0;
  2213. u16 i;
  2214. /* RSS size, cpu online and vector_num should be the same */
  2215. /* Should consider 2p/4p later */
  2216. vector_num = min_t(u16, num_online_cpus(), tqp_num);
  2217. vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
  2218. GFP_KERNEL);
  2219. if (!vector)
  2220. return -ENOMEM;
  2221. vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
  2222. priv->vector_num = vector_num;
  2223. priv->tqp_vector = (struct hns3_enet_tqp_vector *)
  2224. devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
  2225. GFP_KERNEL);
  2226. if (!priv->tqp_vector) {
  2227. ret = -ENOMEM;
  2228. goto out;
  2229. }
  2230. for (i = 0; i < priv->vector_num; i++) {
  2231. tqp_vector = &priv->tqp_vector[i];
  2232. tqp_vector->idx = i;
  2233. tqp_vector->mask_addr = vector[i].io_addr;
  2234. tqp_vector->vector_irq = vector[i].vector;
  2235. hns3_vector_gl_rl_init(tqp_vector, priv);
  2236. }
  2237. out:
  2238. devm_kfree(&pdev->dev, vector);
  2239. return ret;
  2240. }
  2241. static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
  2242. {
  2243. group->ring = NULL;
  2244. group->count = 0;
  2245. }
  2246. static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
  2247. {
  2248. struct hnae3_ring_chain_node vector_ring_chain;
  2249. struct hnae3_handle *h = priv->ae_handle;
  2250. struct hns3_enet_tqp_vector *tqp_vector;
  2251. int i, ret;
  2252. for (i = 0; i < priv->vector_num; i++) {
  2253. tqp_vector = &priv->tqp_vector[i];
  2254. ret = hns3_get_vector_ring_chain(tqp_vector,
  2255. &vector_ring_chain);
  2256. if (ret)
  2257. return ret;
  2258. ret = h->ae_algo->ops->unmap_ring_from_vector(h,
  2259. tqp_vector->vector_irq, &vector_ring_chain);
  2260. if (ret)
  2261. return ret;
  2262. hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
  2263. if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
  2264. (void)irq_set_affinity_hint(
  2265. priv->tqp_vector[i].vector_irq,
  2266. NULL);
  2267. free_irq(priv->tqp_vector[i].vector_irq,
  2268. &priv->tqp_vector[i]);
  2269. }
  2270. priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
  2271. hns3_clear_ring_group(&tqp_vector->rx_group);
  2272. hns3_clear_ring_group(&tqp_vector->tx_group);
  2273. netif_napi_del(&priv->tqp_vector[i].napi);
  2274. }
  2275. return 0;
  2276. }
  2277. static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
  2278. {
  2279. struct hnae3_handle *h = priv->ae_handle;
  2280. struct pci_dev *pdev = h->pdev;
  2281. int i, ret;
  2282. for (i = 0; i < priv->vector_num; i++) {
  2283. struct hns3_enet_tqp_vector *tqp_vector;
  2284. tqp_vector = &priv->tqp_vector[i];
  2285. ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
  2286. if (ret)
  2287. return ret;
  2288. }
  2289. devm_kfree(&pdev->dev, priv->tqp_vector);
  2290. return 0;
  2291. }
  2292. static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
  2293. int ring_type)
  2294. {
  2295. struct hns3_nic_ring_data *ring_data = priv->ring_data;
  2296. int queue_num = priv->ae_handle->kinfo.num_tqps;
  2297. struct pci_dev *pdev = priv->ae_handle->pdev;
  2298. struct hns3_enet_ring *ring;
  2299. ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
  2300. if (!ring)
  2301. return -ENOMEM;
  2302. if (ring_type == HNAE3_RING_TYPE_TX) {
  2303. ring_data[q->tqp_index].ring = ring;
  2304. ring_data[q->tqp_index].queue_index = q->tqp_index;
  2305. ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
  2306. } else {
  2307. ring_data[q->tqp_index + queue_num].ring = ring;
  2308. ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
  2309. ring->io_base = q->io_base;
  2310. }
  2311. hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
  2312. ring->tqp = q;
  2313. ring->desc = NULL;
  2314. ring->desc_cb = NULL;
  2315. ring->dev = priv->dev;
  2316. ring->desc_dma_addr = 0;
  2317. ring->buf_size = q->buf_size;
  2318. ring->desc_num = q->desc_num;
  2319. ring->next_to_use = 0;
  2320. ring->next_to_clean = 0;
  2321. return 0;
  2322. }
  2323. static int hns3_queue_to_ring(struct hnae3_queue *tqp,
  2324. struct hns3_nic_priv *priv)
  2325. {
  2326. int ret;
  2327. ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
  2328. if (ret)
  2329. return ret;
  2330. ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
  2331. if (ret)
  2332. return ret;
  2333. return 0;
  2334. }
  2335. static int hns3_get_ring_config(struct hns3_nic_priv *priv)
  2336. {
  2337. struct hnae3_handle *h = priv->ae_handle;
  2338. struct pci_dev *pdev = h->pdev;
  2339. int i, ret;
  2340. priv->ring_data = devm_kzalloc(&pdev->dev,
  2341. array3_size(h->kinfo.num_tqps,
  2342. sizeof(*priv->ring_data),
  2343. 2),
  2344. GFP_KERNEL);
  2345. if (!priv->ring_data)
  2346. return -ENOMEM;
  2347. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2348. ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
  2349. if (ret)
  2350. goto err;
  2351. }
  2352. return 0;
  2353. err:
  2354. devm_kfree(&pdev->dev, priv->ring_data);
  2355. return ret;
  2356. }
  2357. static void hns3_put_ring_config(struct hns3_nic_priv *priv)
  2358. {
  2359. struct hnae3_handle *h = priv->ae_handle;
  2360. int i;
  2361. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2362. devm_kfree(priv->dev, priv->ring_data[i].ring);
  2363. devm_kfree(priv->dev,
  2364. priv->ring_data[i + h->kinfo.num_tqps].ring);
  2365. }
  2366. devm_kfree(priv->dev, priv->ring_data);
  2367. }
  2368. static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
  2369. {
  2370. int ret;
  2371. if (ring->desc_num <= 0 || ring->buf_size <= 0)
  2372. return -EINVAL;
  2373. ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
  2374. GFP_KERNEL);
  2375. if (!ring->desc_cb) {
  2376. ret = -ENOMEM;
  2377. goto out;
  2378. }
  2379. ret = hns3_alloc_desc(ring);
  2380. if (ret)
  2381. goto out_with_desc_cb;
  2382. if (!HNAE3_IS_TX_RING(ring)) {
  2383. ret = hns3_alloc_ring_buffers(ring);
  2384. if (ret)
  2385. goto out_with_desc;
  2386. }
  2387. return 0;
  2388. out_with_desc:
  2389. hns3_free_desc(ring);
  2390. out_with_desc_cb:
  2391. kfree(ring->desc_cb);
  2392. ring->desc_cb = NULL;
  2393. out:
  2394. return ret;
  2395. }
  2396. static void hns3_fini_ring(struct hns3_enet_ring *ring)
  2397. {
  2398. hns3_free_desc(ring);
  2399. kfree(ring->desc_cb);
  2400. ring->desc_cb = NULL;
  2401. ring->next_to_clean = 0;
  2402. ring->next_to_use = 0;
  2403. }
  2404. static int hns3_buf_size2type(u32 buf_size)
  2405. {
  2406. int bd_size_type;
  2407. switch (buf_size) {
  2408. case 512:
  2409. bd_size_type = HNS3_BD_SIZE_512_TYPE;
  2410. break;
  2411. case 1024:
  2412. bd_size_type = HNS3_BD_SIZE_1024_TYPE;
  2413. break;
  2414. case 2048:
  2415. bd_size_type = HNS3_BD_SIZE_2048_TYPE;
  2416. break;
  2417. case 4096:
  2418. bd_size_type = HNS3_BD_SIZE_4096_TYPE;
  2419. break;
  2420. default:
  2421. bd_size_type = HNS3_BD_SIZE_2048_TYPE;
  2422. }
  2423. return bd_size_type;
  2424. }
  2425. static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
  2426. {
  2427. dma_addr_t dma = ring->desc_dma_addr;
  2428. struct hnae3_queue *q = ring->tqp;
  2429. if (!HNAE3_IS_TX_RING(ring)) {
  2430. hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
  2431. (u32)dma);
  2432. hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
  2433. (u32)((dma >> 31) >> 1));
  2434. hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
  2435. hns3_buf_size2type(ring->buf_size));
  2436. hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
  2437. ring->desc_num / 8 - 1);
  2438. } else {
  2439. hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
  2440. (u32)dma);
  2441. hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
  2442. (u32)((dma >> 31) >> 1));
  2443. hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
  2444. ring->desc_num / 8 - 1);
  2445. }
  2446. }
  2447. static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
  2448. {
  2449. struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
  2450. int i;
  2451. for (i = 0; i < HNAE3_MAX_TC; i++) {
  2452. struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
  2453. int j;
  2454. if (!tc_info->enable)
  2455. continue;
  2456. for (j = 0; j < tc_info->tqp_count; j++) {
  2457. struct hnae3_queue *q;
  2458. q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
  2459. hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
  2460. tc_info->tc);
  2461. }
  2462. }
  2463. }
  2464. int hns3_init_all_ring(struct hns3_nic_priv *priv)
  2465. {
  2466. struct hnae3_handle *h = priv->ae_handle;
  2467. int ring_num = h->kinfo.num_tqps * 2;
  2468. int i, j;
  2469. int ret;
  2470. for (i = 0; i < ring_num; i++) {
  2471. ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
  2472. if (ret) {
  2473. dev_err(priv->dev,
  2474. "Alloc ring memory fail! ret=%d\n", ret);
  2475. goto out_when_alloc_ring_memory;
  2476. }
  2477. u64_stats_init(&priv->ring_data[i].ring->syncp);
  2478. }
  2479. return 0;
  2480. out_when_alloc_ring_memory:
  2481. for (j = i - 1; j >= 0; j--)
  2482. hns3_fini_ring(priv->ring_data[j].ring);
  2483. return -ENOMEM;
  2484. }
  2485. int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
  2486. {
  2487. struct hnae3_handle *h = priv->ae_handle;
  2488. int i;
  2489. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2490. if (h->ae_algo->ops->reset_queue)
  2491. h->ae_algo->ops->reset_queue(h, i);
  2492. hns3_fini_ring(priv->ring_data[i].ring);
  2493. hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
  2494. }
  2495. return 0;
  2496. }
  2497. /* Set mac addr if it is configured. or leave it to the AE driver */
  2498. static void hns3_init_mac_addr(struct net_device *netdev, bool init)
  2499. {
  2500. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2501. struct hnae3_handle *h = priv->ae_handle;
  2502. u8 mac_addr_temp[ETH_ALEN];
  2503. if (h->ae_algo->ops->get_mac_addr && init) {
  2504. h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
  2505. ether_addr_copy(netdev->dev_addr, mac_addr_temp);
  2506. }
  2507. /* Check if the MAC address is valid, if not get a random one */
  2508. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2509. eth_hw_addr_random(netdev);
  2510. dev_warn(priv->dev, "using random MAC address %pM\n",
  2511. netdev->dev_addr);
  2512. }
  2513. if (h->ae_algo->ops->set_mac_addr)
  2514. h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
  2515. }
  2516. static void hns3_uninit_mac_addr(struct net_device *netdev)
  2517. {
  2518. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2519. struct hnae3_handle *h = priv->ae_handle;
  2520. if (h->ae_algo->ops->rm_uc_addr)
  2521. h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
  2522. }
  2523. static void hns3_nic_set_priv_ops(struct net_device *netdev)
  2524. {
  2525. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2526. if ((netdev->features & NETIF_F_TSO) ||
  2527. (netdev->features & NETIF_F_TSO6)) {
  2528. priv->ops.fill_desc = hns3_fill_desc_tso;
  2529. priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
  2530. } else {
  2531. priv->ops.fill_desc = hns3_fill_desc;
  2532. priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
  2533. }
  2534. }
  2535. static int hns3_client_init(struct hnae3_handle *handle)
  2536. {
  2537. struct pci_dev *pdev = handle->pdev;
  2538. struct hns3_nic_priv *priv;
  2539. struct net_device *netdev;
  2540. int ret;
  2541. netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
  2542. hns3_get_max_available_channels(handle));
  2543. if (!netdev)
  2544. return -ENOMEM;
  2545. priv = netdev_priv(netdev);
  2546. priv->dev = &pdev->dev;
  2547. priv->netdev = netdev;
  2548. priv->ae_handle = handle;
  2549. priv->ae_handle->last_reset_time = jiffies;
  2550. priv->tx_timeout_count = 0;
  2551. handle->kinfo.netdev = netdev;
  2552. handle->priv = (void *)priv;
  2553. hns3_init_mac_addr(netdev, true);
  2554. hns3_set_default_feature(netdev);
  2555. netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
  2556. netdev->priv_flags |= IFF_UNICAST_FLT;
  2557. netdev->netdev_ops = &hns3_nic_netdev_ops;
  2558. SET_NETDEV_DEV(netdev, &pdev->dev);
  2559. hns3_ethtool_set_ops(netdev);
  2560. hns3_nic_set_priv_ops(netdev);
  2561. /* Carrier off reporting is important to ethtool even BEFORE open */
  2562. netif_carrier_off(netdev);
  2563. if (handle->flags & HNAE3_SUPPORT_VF)
  2564. handle->reset_level = HNAE3_VF_RESET;
  2565. else
  2566. handle->reset_level = HNAE3_FUNC_RESET;
  2567. ret = hns3_get_ring_config(priv);
  2568. if (ret) {
  2569. ret = -ENOMEM;
  2570. goto out_get_ring_cfg;
  2571. }
  2572. ret = hns3_nic_alloc_vector_data(priv);
  2573. if (ret) {
  2574. ret = -ENOMEM;
  2575. goto out_alloc_vector_data;
  2576. }
  2577. ret = hns3_nic_init_vector_data(priv);
  2578. if (ret) {
  2579. ret = -ENOMEM;
  2580. goto out_init_vector_data;
  2581. }
  2582. ret = hns3_init_all_ring(priv);
  2583. if (ret) {
  2584. ret = -ENOMEM;
  2585. goto out_init_ring_data;
  2586. }
  2587. ret = register_netdev(netdev);
  2588. if (ret) {
  2589. dev_err(priv->dev, "probe register netdev fail!\n");
  2590. goto out_reg_netdev_fail;
  2591. }
  2592. hns3_dcbnl_setup(handle);
  2593. /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
  2594. netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
  2595. return ret;
  2596. out_reg_netdev_fail:
  2597. out_init_ring_data:
  2598. (void)hns3_nic_uninit_vector_data(priv);
  2599. out_init_vector_data:
  2600. hns3_nic_dealloc_vector_data(priv);
  2601. out_alloc_vector_data:
  2602. priv->ring_data = NULL;
  2603. out_get_ring_cfg:
  2604. priv->ae_handle = NULL;
  2605. free_netdev(netdev);
  2606. return ret;
  2607. }
  2608. static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
  2609. {
  2610. struct net_device *netdev = handle->kinfo.netdev;
  2611. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2612. int ret;
  2613. if (netdev->reg_state != NETREG_UNINITIALIZED)
  2614. unregister_netdev(netdev);
  2615. hns3_force_clear_all_rx_ring(handle);
  2616. ret = hns3_nic_uninit_vector_data(priv);
  2617. if (ret)
  2618. netdev_err(netdev, "uninit vector error\n");
  2619. ret = hns3_nic_dealloc_vector_data(priv);
  2620. if (ret)
  2621. netdev_err(netdev, "dealloc vector error\n");
  2622. ret = hns3_uninit_all_ring(priv);
  2623. if (ret)
  2624. netdev_err(netdev, "uninit ring error\n");
  2625. hns3_put_ring_config(priv);
  2626. priv->ring_data = NULL;
  2627. hns3_uninit_mac_addr(netdev);
  2628. free_netdev(netdev);
  2629. }
  2630. static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
  2631. {
  2632. struct net_device *netdev = handle->kinfo.netdev;
  2633. if (!netdev)
  2634. return;
  2635. if (linkup) {
  2636. netif_carrier_on(netdev);
  2637. netif_tx_wake_all_queues(netdev);
  2638. netdev_info(netdev, "link up\n");
  2639. } else {
  2640. netif_carrier_off(netdev);
  2641. netif_tx_stop_all_queues(netdev);
  2642. netdev_info(netdev, "link down\n");
  2643. }
  2644. }
  2645. static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
  2646. {
  2647. struct hnae3_knic_private_info *kinfo = &handle->kinfo;
  2648. struct net_device *ndev = kinfo->netdev;
  2649. bool if_running;
  2650. int ret;
  2651. if (tc > HNAE3_MAX_TC)
  2652. return -EINVAL;
  2653. if (!ndev)
  2654. return -ENODEV;
  2655. if_running = netif_running(ndev);
  2656. if (if_running) {
  2657. (void)hns3_nic_net_stop(ndev);
  2658. msleep(100);
  2659. }
  2660. ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
  2661. kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
  2662. if (ret)
  2663. goto err_out;
  2664. ret = hns3_nic_set_real_num_queue(ndev);
  2665. err_out:
  2666. if (if_running)
  2667. (void)hns3_nic_net_open(ndev);
  2668. return ret;
  2669. }
  2670. static void hns3_recover_hw_addr(struct net_device *ndev)
  2671. {
  2672. struct netdev_hw_addr_list *list;
  2673. struct netdev_hw_addr *ha, *tmp;
  2674. /* go through and sync uc_addr entries to the device */
  2675. list = &ndev->uc;
  2676. list_for_each_entry_safe(ha, tmp, &list->list, list)
  2677. hns3_nic_uc_sync(ndev, ha->addr);
  2678. /* go through and sync mc_addr entries to the device */
  2679. list = &ndev->mc;
  2680. list_for_each_entry_safe(ha, tmp, &list->list, list)
  2681. hns3_nic_mc_sync(ndev, ha->addr);
  2682. }
  2683. static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
  2684. {
  2685. while (ring->next_to_clean != ring->next_to_use) {
  2686. ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
  2687. hns3_free_buffer_detach(ring, ring->next_to_clean);
  2688. ring_ptr_move_fw(ring, next_to_clean);
  2689. }
  2690. }
  2691. static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
  2692. {
  2693. struct hns3_desc_cb res_cbs;
  2694. int ret;
  2695. while (ring->next_to_use != ring->next_to_clean) {
  2696. /* When a buffer is not reused, it's memory has been
  2697. * freed in hns3_handle_rx_bd or will be freed by
  2698. * stack, so we need to replace the buffer here.
  2699. */
  2700. if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
  2701. ret = hns3_reserve_buffer_map(ring, &res_cbs);
  2702. if (ret) {
  2703. u64_stats_update_begin(&ring->syncp);
  2704. ring->stats.sw_err_cnt++;
  2705. u64_stats_update_end(&ring->syncp);
  2706. /* if alloc new buffer fail, exit directly
  2707. * and reclear in up flow.
  2708. */
  2709. netdev_warn(ring->tqp->handle->kinfo.netdev,
  2710. "reserve buffer map failed, ret = %d\n",
  2711. ret);
  2712. return ret;
  2713. }
  2714. hns3_replace_buffer(ring, ring->next_to_use,
  2715. &res_cbs);
  2716. }
  2717. ring_ptr_move_fw(ring, next_to_use);
  2718. }
  2719. return 0;
  2720. }
  2721. static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
  2722. {
  2723. while (ring->next_to_use != ring->next_to_clean) {
  2724. /* When a buffer is not reused, it's memory has been
  2725. * freed in hns3_handle_rx_bd or will be freed by
  2726. * stack, so only need to unmap the buffer here.
  2727. */
  2728. if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
  2729. hns3_unmap_buffer(ring,
  2730. &ring->desc_cb[ring->next_to_use]);
  2731. ring->desc_cb[ring->next_to_use].dma = 0;
  2732. }
  2733. ring_ptr_move_fw(ring, next_to_use);
  2734. }
  2735. }
  2736. static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
  2737. {
  2738. struct net_device *ndev = h->kinfo.netdev;
  2739. struct hns3_nic_priv *priv = netdev_priv(ndev);
  2740. struct hns3_enet_ring *ring;
  2741. u32 i;
  2742. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2743. ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
  2744. hns3_force_clear_rx_ring(ring);
  2745. }
  2746. }
  2747. static void hns3_clear_all_ring(struct hnae3_handle *h)
  2748. {
  2749. struct net_device *ndev = h->kinfo.netdev;
  2750. struct hns3_nic_priv *priv = netdev_priv(ndev);
  2751. u32 i;
  2752. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2753. struct netdev_queue *dev_queue;
  2754. struct hns3_enet_ring *ring;
  2755. ring = priv->ring_data[i].ring;
  2756. hns3_clear_tx_ring(ring);
  2757. dev_queue = netdev_get_tx_queue(ndev,
  2758. priv->ring_data[i].queue_index);
  2759. netdev_tx_reset_queue(dev_queue);
  2760. ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
  2761. /* Continue to clear other rings even if clearing some
  2762. * rings failed.
  2763. */
  2764. hns3_clear_rx_ring(ring);
  2765. }
  2766. }
  2767. int hns3_nic_reset_all_ring(struct hnae3_handle *h)
  2768. {
  2769. struct net_device *ndev = h->kinfo.netdev;
  2770. struct hns3_nic_priv *priv = netdev_priv(ndev);
  2771. struct hns3_enet_ring *rx_ring;
  2772. int i, j;
  2773. int ret;
  2774. for (i = 0; i < h->kinfo.num_tqps; i++) {
  2775. h->ae_algo->ops->reset_queue(h, i);
  2776. hns3_init_ring_hw(priv->ring_data[i].ring);
  2777. /* We need to clear tx ring here because self test will
  2778. * use the ring and will not run down before up
  2779. */
  2780. hns3_clear_tx_ring(priv->ring_data[i].ring);
  2781. priv->ring_data[i].ring->next_to_clean = 0;
  2782. priv->ring_data[i].ring->next_to_use = 0;
  2783. rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
  2784. hns3_init_ring_hw(rx_ring);
  2785. ret = hns3_clear_rx_ring(rx_ring);
  2786. if (ret)
  2787. return ret;
  2788. /* We can not know the hardware head and tail when this
  2789. * function is called in reset flow, so we reuse all desc.
  2790. */
  2791. for (j = 0; j < rx_ring->desc_num; j++)
  2792. hns3_reuse_buffer(rx_ring, j);
  2793. rx_ring->next_to_clean = 0;
  2794. rx_ring->next_to_use = 0;
  2795. }
  2796. hns3_init_tx_ring_tc(priv);
  2797. return 0;
  2798. }
  2799. static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
  2800. {
  2801. struct hnae3_knic_private_info *kinfo = &handle->kinfo;
  2802. struct net_device *ndev = kinfo->netdev;
  2803. if (!netif_running(ndev))
  2804. return 0;
  2805. return hns3_nic_net_stop(ndev);
  2806. }
  2807. static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
  2808. {
  2809. struct hnae3_knic_private_info *kinfo = &handle->kinfo;
  2810. int ret = 0;
  2811. if (netif_running(kinfo->netdev)) {
  2812. ret = hns3_nic_net_up(kinfo->netdev);
  2813. if (ret) {
  2814. netdev_err(kinfo->netdev,
  2815. "hns net up fail, ret=%d!\n", ret);
  2816. return ret;
  2817. }
  2818. handle->last_reset_time = jiffies;
  2819. }
  2820. return ret;
  2821. }
  2822. static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
  2823. {
  2824. struct net_device *netdev = handle->kinfo.netdev;
  2825. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2826. int ret;
  2827. hns3_init_mac_addr(netdev, false);
  2828. hns3_nic_set_rx_mode(netdev);
  2829. hns3_recover_hw_addr(netdev);
  2830. /* Hardware table is only clear when pf resets */
  2831. if (!(handle->flags & HNAE3_SUPPORT_VF))
  2832. hns3_restore_vlan(netdev);
  2833. /* Carrier off reporting is important to ethtool even BEFORE open */
  2834. netif_carrier_off(netdev);
  2835. ret = hns3_nic_init_vector_data(priv);
  2836. if (ret)
  2837. return ret;
  2838. ret = hns3_init_all_ring(priv);
  2839. if (ret) {
  2840. hns3_nic_uninit_vector_data(priv);
  2841. priv->ring_data = NULL;
  2842. }
  2843. return ret;
  2844. }
  2845. static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
  2846. {
  2847. struct net_device *netdev = handle->kinfo.netdev;
  2848. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2849. int ret;
  2850. hns3_force_clear_all_rx_ring(handle);
  2851. ret = hns3_nic_uninit_vector_data(priv);
  2852. if (ret) {
  2853. netdev_err(netdev, "uninit vector error\n");
  2854. return ret;
  2855. }
  2856. ret = hns3_uninit_all_ring(priv);
  2857. if (ret)
  2858. netdev_err(netdev, "uninit ring error\n");
  2859. hns3_uninit_mac_addr(netdev);
  2860. return ret;
  2861. }
  2862. static int hns3_reset_notify(struct hnae3_handle *handle,
  2863. enum hnae3_reset_notify_type type)
  2864. {
  2865. int ret = 0;
  2866. switch (type) {
  2867. case HNAE3_UP_CLIENT:
  2868. ret = hns3_reset_notify_up_enet(handle);
  2869. break;
  2870. case HNAE3_DOWN_CLIENT:
  2871. ret = hns3_reset_notify_down_enet(handle);
  2872. break;
  2873. case HNAE3_INIT_CLIENT:
  2874. ret = hns3_reset_notify_init_enet(handle);
  2875. break;
  2876. case HNAE3_UNINIT_CLIENT:
  2877. ret = hns3_reset_notify_uninit_enet(handle);
  2878. break;
  2879. default:
  2880. break;
  2881. }
  2882. return ret;
  2883. }
  2884. static void hns3_restore_coal(struct hns3_nic_priv *priv,
  2885. struct hns3_enet_coalesce *tx,
  2886. struct hns3_enet_coalesce *rx)
  2887. {
  2888. u16 vector_num = priv->vector_num;
  2889. int i;
  2890. for (i = 0; i < vector_num; i++) {
  2891. memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
  2892. sizeof(struct hns3_enet_coalesce));
  2893. memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
  2894. sizeof(struct hns3_enet_coalesce));
  2895. }
  2896. }
  2897. static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
  2898. struct hns3_enet_coalesce *tx,
  2899. struct hns3_enet_coalesce *rx)
  2900. {
  2901. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2902. struct hnae3_handle *h = hns3_get_handle(netdev);
  2903. int ret;
  2904. ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
  2905. if (ret)
  2906. return ret;
  2907. ret = hns3_get_ring_config(priv);
  2908. if (ret)
  2909. return ret;
  2910. ret = hns3_nic_alloc_vector_data(priv);
  2911. if (ret)
  2912. goto err_alloc_vector;
  2913. hns3_restore_coal(priv, tx, rx);
  2914. ret = hns3_nic_init_vector_data(priv);
  2915. if (ret)
  2916. goto err_uninit_vector;
  2917. ret = hns3_init_all_ring(priv);
  2918. if (ret)
  2919. goto err_put_ring;
  2920. return 0;
  2921. err_put_ring:
  2922. hns3_put_ring_config(priv);
  2923. err_uninit_vector:
  2924. hns3_nic_uninit_vector_data(priv);
  2925. err_alloc_vector:
  2926. hns3_nic_dealloc_vector_data(priv);
  2927. return ret;
  2928. }
  2929. static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
  2930. {
  2931. return (new_tqp_num / num_tc) * num_tc;
  2932. }
  2933. int hns3_set_channels(struct net_device *netdev,
  2934. struct ethtool_channels *ch)
  2935. {
  2936. struct hns3_nic_priv *priv = netdev_priv(netdev);
  2937. struct hnae3_handle *h = hns3_get_handle(netdev);
  2938. struct hnae3_knic_private_info *kinfo = &h->kinfo;
  2939. struct hns3_enet_coalesce tx_coal, rx_coal;
  2940. bool if_running = netif_running(netdev);
  2941. u32 new_tqp_num = ch->combined_count;
  2942. u16 org_tqp_num;
  2943. int ret;
  2944. if (ch->rx_count || ch->tx_count)
  2945. return -EINVAL;
  2946. if (new_tqp_num > hns3_get_max_available_channels(h) ||
  2947. new_tqp_num < kinfo->num_tc) {
  2948. dev_err(&netdev->dev,
  2949. "Change tqps fail, the tqp range is from %d to %d",
  2950. kinfo->num_tc,
  2951. hns3_get_max_available_channels(h));
  2952. return -EINVAL;
  2953. }
  2954. new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
  2955. if (kinfo->num_tqps == new_tqp_num)
  2956. return 0;
  2957. if (if_running)
  2958. hns3_nic_net_stop(netdev);
  2959. ret = hns3_nic_uninit_vector_data(priv);
  2960. if (ret) {
  2961. dev_err(&netdev->dev,
  2962. "Unbind vector with tqp fail, nothing is changed");
  2963. goto open_netdev;
  2964. }
  2965. /* Changing the tqp num may also change the vector num,
  2966. * ethtool only support setting and querying one coal
  2967. * configuation for now, so save the vector 0' coal
  2968. * configuation here in order to restore it.
  2969. */
  2970. memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
  2971. sizeof(struct hns3_enet_coalesce));
  2972. memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
  2973. sizeof(struct hns3_enet_coalesce));
  2974. hns3_nic_dealloc_vector_data(priv);
  2975. hns3_uninit_all_ring(priv);
  2976. hns3_put_ring_config(priv);
  2977. org_tqp_num = h->kinfo.num_tqps;
  2978. ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
  2979. if (ret) {
  2980. ret = hns3_modify_tqp_num(netdev, org_tqp_num,
  2981. &tx_coal, &rx_coal);
  2982. if (ret) {
  2983. /* If revert to old tqp failed, fatal error occurred */
  2984. dev_err(&netdev->dev,
  2985. "Revert to old tqp num fail, ret=%d", ret);
  2986. return ret;
  2987. }
  2988. dev_info(&netdev->dev,
  2989. "Change tqp num fail, Revert to old tqp num");
  2990. }
  2991. open_netdev:
  2992. if (if_running)
  2993. hns3_nic_net_open(netdev);
  2994. return ret;
  2995. }
  2996. static const struct hnae3_client_ops client_ops = {
  2997. .init_instance = hns3_client_init,
  2998. .uninit_instance = hns3_client_uninit,
  2999. .link_status_change = hns3_link_status_change,
  3000. .setup_tc = hns3_client_setup_tc,
  3001. .reset_notify = hns3_reset_notify,
  3002. };
  3003. /* hns3_init_module - Driver registration routine
  3004. * hns3_init_module is the first routine called when the driver is
  3005. * loaded. All it does is register with the PCI subsystem.
  3006. */
  3007. static int __init hns3_init_module(void)
  3008. {
  3009. int ret;
  3010. pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
  3011. pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
  3012. client.type = HNAE3_CLIENT_KNIC;
  3013. snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
  3014. hns3_driver_name);
  3015. client.ops = &client_ops;
  3016. INIT_LIST_HEAD(&client.node);
  3017. ret = hnae3_register_client(&client);
  3018. if (ret)
  3019. return ret;
  3020. ret = pci_register_driver(&hns3_driver);
  3021. if (ret)
  3022. hnae3_unregister_client(&client);
  3023. return ret;
  3024. }
  3025. module_init(hns3_init_module);
  3026. /* hns3_exit_module - Driver exit cleanup routine
  3027. * hns3_exit_module is called just before the driver is removed
  3028. * from memory.
  3029. */
  3030. static void __exit hns3_exit_module(void)
  3031. {
  3032. pci_unregister_driver(&hns3_driver);
  3033. hnae3_unregister_client(&client);
  3034. }
  3035. module_exit(hns3_exit_module);
  3036. MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
  3037. MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
  3038. MODULE_LICENSE("GPL");
  3039. MODULE_ALIAS("pci:hns-nic");
  3040. MODULE_VERSION(HNS3_MOD_VERSION);