ice_main.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. /* Intel(R) Ethernet Connection E800 Series Linux Driver */
  4. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  5. #include "ice.h"
  6. #define DRV_VERSION "ice-0.0.1-k"
  7. #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
  8. static const char ice_drv_ver[] = DRV_VERSION;
  9. static const char ice_driver_string[] = DRV_SUMMARY;
  10. static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
  11. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  12. MODULE_DESCRIPTION(DRV_SUMMARY);
  13. MODULE_LICENSE("GPL");
  14. MODULE_VERSION(DRV_VERSION);
  15. static int debug = -1;
  16. module_param(debug, int, 0644);
  17. #ifndef CONFIG_DYNAMIC_DEBUG
  18. MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
  19. #else
  20. MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
  21. #endif /* !CONFIG_DYNAMIC_DEBUG */
  22. static struct workqueue_struct *ice_wq;
  23. static const struct net_device_ops ice_netdev_ops;
  24. static int ice_vsi_release(struct ice_vsi *vsi);
  25. /**
  26. * ice_get_free_slot - get the next non-NULL location index in array
  27. * @array: array to search
  28. * @size: size of the array
  29. * @curr: last known occupied index to be used as a search hint
  30. *
  31. * void * is being used to keep the functionality generic. This lets us use this
  32. * function on any array of pointers.
  33. */
  34. static int ice_get_free_slot(void *array, int size, int curr)
  35. {
  36. int **tmp_array = (int **)array;
  37. int next;
  38. if (curr < (size - 1) && !tmp_array[curr + 1]) {
  39. next = curr + 1;
  40. } else {
  41. int i = 0;
  42. while ((i < size) && (tmp_array[i]))
  43. i++;
  44. if (i == size)
  45. next = ICE_NO_VSI;
  46. else
  47. next = i;
  48. }
  49. return next;
  50. }
  51. /**
  52. * ice_search_res - Search the tracker for a block of resources
  53. * @res: pointer to the resource
  54. * @needed: size of the block needed
  55. * @id: identifier to track owner
  56. * Returns the base item index of the block, or -ENOMEM for error
  57. */
  58. static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
  59. {
  60. int start = res->search_hint;
  61. int end = start;
  62. id |= ICE_RES_VALID_BIT;
  63. do {
  64. /* skip already allocated entries */
  65. if (res->list[end++] & ICE_RES_VALID_BIT) {
  66. start = end;
  67. if ((start + needed) > res->num_entries)
  68. break;
  69. }
  70. if (end == (start + needed)) {
  71. int i = start;
  72. /* there was enough, so assign it to the requestor */
  73. while (i != end)
  74. res->list[i++] = id;
  75. if (end == res->num_entries)
  76. end = 0;
  77. res->search_hint = end;
  78. return start;
  79. }
  80. } while (1);
  81. return -ENOMEM;
  82. }
  83. /**
  84. * ice_get_res - get a block of resources
  85. * @pf: board private structure
  86. * @res: pointer to the resource
  87. * @needed: size of the block needed
  88. * @id: identifier to track owner
  89. *
  90. * Returns the base item index of the block, or -ENOMEM for error
  91. * The search_hint trick and lack of advanced fit-finding only works
  92. * because we're highly likely to have all the same sized requests.
  93. * Linear search time and any fragmentation should be minimal.
  94. */
  95. static int
  96. ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
  97. {
  98. int ret;
  99. if (!res || !pf)
  100. return -EINVAL;
  101. if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
  102. dev_err(&pf->pdev->dev,
  103. "param err: needed=%d, num_entries = %d id=0x%04x\n",
  104. needed, res->num_entries, id);
  105. return -EINVAL;
  106. }
  107. /* search based on search_hint */
  108. ret = ice_search_res(res, needed, id);
  109. if (ret < 0) {
  110. /* previous search failed. Reset search hint and try again */
  111. res->search_hint = 0;
  112. ret = ice_search_res(res, needed, id);
  113. }
  114. return ret;
  115. }
  116. /**
  117. * ice_free_res - free a block of resources
  118. * @res: pointer to the resource
  119. * @index: starting index previously returned by ice_get_res
  120. * @id: identifier to track owner
  121. * Returns number of resources freed
  122. */
  123. static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
  124. {
  125. int count = 0;
  126. int i;
  127. if (!res || index >= res->num_entries)
  128. return -EINVAL;
  129. id |= ICE_RES_VALID_BIT;
  130. for (i = index; i < res->num_entries && res->list[i] == id; i++) {
  131. res->list[i] = 0;
  132. count++;
  133. }
  134. return count;
  135. }
  136. /**
  137. * ice_add_mac_to_list - Add a mac address filter entry to the list
  138. * @vsi: the VSI to be forwarded to
  139. * @add_list: pointer to the list which contains MAC filter entries
  140. * @macaddr: the MAC address to be added.
  141. *
  142. * Adds mac address filter entry to the temp list
  143. *
  144. * Returns 0 on success or ENOMEM on failure.
  145. */
  146. static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
  147. const u8 *macaddr)
  148. {
  149. struct ice_fltr_list_entry *tmp;
  150. struct ice_pf *pf = vsi->back;
  151. tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
  152. if (!tmp)
  153. return -ENOMEM;
  154. tmp->fltr_info.flag = ICE_FLTR_TX;
  155. tmp->fltr_info.src = vsi->vsi_num;
  156. tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
  157. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  158. tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
  159. ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
  160. INIT_LIST_HEAD(&tmp->list_entry);
  161. list_add(&tmp->list_entry, add_list);
  162. return 0;
  163. }
  164. /**
  165. * ice_free_fltr_list - free filter lists helper
  166. * @dev: pointer to the device struct
  167. * @h: pointer to the list head to be freed
  168. *
  169. * Helper function to free filter lists previously created using
  170. * ice_add_mac_to_list
  171. */
  172. static void ice_free_fltr_list(struct device *dev, struct list_head *h)
  173. {
  174. struct ice_fltr_list_entry *e, *tmp;
  175. list_for_each_entry_safe(e, tmp, h, list_entry) {
  176. list_del(&e->list_entry);
  177. devm_kfree(dev, e);
  178. }
  179. }
  180. /**
  181. * ice_print_link_msg - print link up or down message
  182. * @vsi: the VSI whose link status is being queried
  183. * @isup: boolean for if the link is now up or down
  184. */
  185. static void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
  186. {
  187. const char *speed;
  188. const char *fc;
  189. if (vsi->current_isup == isup)
  190. return;
  191. vsi->current_isup = isup;
  192. if (!isup) {
  193. netdev_info(vsi->netdev, "NIC Link is Down\n");
  194. return;
  195. }
  196. switch (vsi->port_info->phy.link_info.link_speed) {
  197. case ICE_AQ_LINK_SPEED_40GB:
  198. speed = "40 G";
  199. break;
  200. case ICE_AQ_LINK_SPEED_25GB:
  201. speed = "25 G";
  202. break;
  203. case ICE_AQ_LINK_SPEED_20GB:
  204. speed = "20 G";
  205. break;
  206. case ICE_AQ_LINK_SPEED_10GB:
  207. speed = "10 G";
  208. break;
  209. case ICE_AQ_LINK_SPEED_5GB:
  210. speed = "5 G";
  211. break;
  212. case ICE_AQ_LINK_SPEED_2500MB:
  213. speed = "2.5 G";
  214. break;
  215. case ICE_AQ_LINK_SPEED_1000MB:
  216. speed = "1 G";
  217. break;
  218. case ICE_AQ_LINK_SPEED_100MB:
  219. speed = "100 M";
  220. break;
  221. default:
  222. speed = "Unknown";
  223. break;
  224. }
  225. switch (vsi->port_info->fc.current_mode) {
  226. case ICE_FC_FULL:
  227. fc = "RX/TX";
  228. break;
  229. case ICE_FC_TX_PAUSE:
  230. fc = "TX";
  231. break;
  232. case ICE_FC_RX_PAUSE:
  233. fc = "RX";
  234. break;
  235. default:
  236. fc = "Unknown";
  237. break;
  238. }
  239. netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
  240. speed, fc);
  241. }
  242. /**
  243. * __ice_clean_ctrlq - helper function to clean controlq rings
  244. * @pf: ptr to struct ice_pf
  245. * @q_type: specific Control queue type
  246. */
  247. static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
  248. {
  249. struct ice_rq_event_info event;
  250. struct ice_hw *hw = &pf->hw;
  251. struct ice_ctl_q_info *cq;
  252. u16 pending, i = 0;
  253. const char *qtype;
  254. u32 oldval, val;
  255. switch (q_type) {
  256. case ICE_CTL_Q_ADMIN:
  257. cq = &hw->adminq;
  258. qtype = "Admin";
  259. break;
  260. default:
  261. dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
  262. q_type);
  263. return 0;
  264. }
  265. /* check for error indications - PF_xx_AxQLEN register layout for
  266. * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
  267. */
  268. val = rd32(hw, cq->rq.len);
  269. if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
  270. PF_FW_ARQLEN_ARQCRIT_M)) {
  271. oldval = val;
  272. if (val & PF_FW_ARQLEN_ARQVFE_M)
  273. dev_dbg(&pf->pdev->dev,
  274. "%s Receive Queue VF Error detected\n", qtype);
  275. if (val & PF_FW_ARQLEN_ARQOVFL_M) {
  276. dev_dbg(&pf->pdev->dev,
  277. "%s Receive Queue Overflow Error detected\n",
  278. qtype);
  279. }
  280. if (val & PF_FW_ARQLEN_ARQCRIT_M)
  281. dev_dbg(&pf->pdev->dev,
  282. "%s Receive Queue Critical Error detected\n",
  283. qtype);
  284. val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
  285. PF_FW_ARQLEN_ARQCRIT_M);
  286. if (oldval != val)
  287. wr32(hw, cq->rq.len, val);
  288. }
  289. val = rd32(hw, cq->sq.len);
  290. if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
  291. PF_FW_ATQLEN_ATQCRIT_M)) {
  292. oldval = val;
  293. if (val & PF_FW_ATQLEN_ATQVFE_M)
  294. dev_dbg(&pf->pdev->dev,
  295. "%s Send Queue VF Error detected\n", qtype);
  296. if (val & PF_FW_ATQLEN_ATQOVFL_M) {
  297. dev_dbg(&pf->pdev->dev,
  298. "%s Send Queue Overflow Error detected\n",
  299. qtype);
  300. }
  301. if (val & PF_FW_ATQLEN_ATQCRIT_M)
  302. dev_dbg(&pf->pdev->dev,
  303. "%s Send Queue Critical Error detected\n",
  304. qtype);
  305. val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
  306. PF_FW_ATQLEN_ATQCRIT_M);
  307. if (oldval != val)
  308. wr32(hw, cq->sq.len, val);
  309. }
  310. event.buf_len = cq->rq_buf_size;
  311. event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
  312. GFP_KERNEL);
  313. if (!event.msg_buf)
  314. return 0;
  315. do {
  316. enum ice_status ret;
  317. ret = ice_clean_rq_elem(hw, cq, &event, &pending);
  318. if (ret == ICE_ERR_AQ_NO_WORK)
  319. break;
  320. if (ret) {
  321. dev_err(&pf->pdev->dev,
  322. "%s Receive Queue event error %d\n", qtype,
  323. ret);
  324. break;
  325. }
  326. } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
  327. devm_kfree(&pf->pdev->dev, event.msg_buf);
  328. return pending && (i == ICE_DFLT_IRQ_WORK);
  329. }
  330. /**
  331. * ice_clean_adminq_subtask - clean the AdminQ rings
  332. * @pf: board private structure
  333. */
  334. static void ice_clean_adminq_subtask(struct ice_pf *pf)
  335. {
  336. struct ice_hw *hw = &pf->hw;
  337. u32 val;
  338. if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
  339. return;
  340. if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
  341. return;
  342. clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
  343. /* re-enable Admin queue interrupt causes */
  344. val = rd32(hw, PFINT_FW_CTL);
  345. wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
  346. ice_flush(hw);
  347. }
  348. /**
  349. * ice_service_task_schedule - schedule the service task to wake up
  350. * @pf: board private structure
  351. *
  352. * If not already scheduled, this puts the task into the work queue.
  353. */
  354. static void ice_service_task_schedule(struct ice_pf *pf)
  355. {
  356. if (!test_bit(__ICE_DOWN, pf->state) &&
  357. !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
  358. queue_work(ice_wq, &pf->serv_task);
  359. }
  360. /**
  361. * ice_service_task_complete - finish up the service task
  362. * @pf: board private structure
  363. */
  364. static void ice_service_task_complete(struct ice_pf *pf)
  365. {
  366. WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
  367. /* force memory (pf->state) to sync before next service task */
  368. smp_mb__before_atomic();
  369. clear_bit(__ICE_SERVICE_SCHED, pf->state);
  370. }
  371. /**
  372. * ice_service_timer - timer callback to schedule service task
  373. * @t: pointer to timer_list
  374. */
  375. static void ice_service_timer(struct timer_list *t)
  376. {
  377. struct ice_pf *pf = from_timer(pf, t, serv_tmr);
  378. mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
  379. ice_service_task_schedule(pf);
  380. }
  381. /**
  382. * ice_service_task - manage and run subtasks
  383. * @work: pointer to work_struct contained by the PF struct
  384. */
  385. static void ice_service_task(struct work_struct *work)
  386. {
  387. struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
  388. unsigned long start_time = jiffies;
  389. /* subtasks */
  390. ice_clean_adminq_subtask(pf);
  391. /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
  392. ice_service_task_complete(pf);
  393. /* If the tasks have taken longer than one service timer period
  394. * or there is more work to be done, reset the service timer to
  395. * schedule the service task now.
  396. */
  397. if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
  398. test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
  399. mod_timer(&pf->serv_tmr, jiffies);
  400. }
  401. /**
  402. * ice_set_ctrlq_len - helper function to set controlq length
  403. * @hw: pointer to the hw instance
  404. */
  405. static void ice_set_ctrlq_len(struct ice_hw *hw)
  406. {
  407. hw->adminq.num_rq_entries = ICE_AQ_LEN;
  408. hw->adminq.num_sq_entries = ICE_AQ_LEN;
  409. hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
  410. hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
  411. }
  412. /**
  413. * ice_irq_affinity_notify - Callback for affinity changes
  414. * @notify: context as to what irq was changed
  415. * @mask: the new affinity mask
  416. *
  417. * This is a callback function used by the irq_set_affinity_notifier function
  418. * so that we may register to receive changes to the irq affinity masks.
  419. */
  420. static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
  421. const cpumask_t *mask)
  422. {
  423. struct ice_q_vector *q_vector =
  424. container_of(notify, struct ice_q_vector, affinity_notify);
  425. cpumask_copy(&q_vector->affinity_mask, mask);
  426. }
  427. /**
  428. * ice_irq_affinity_release - Callback for affinity notifier release
  429. * @ref: internal core kernel usage
  430. *
  431. * This is a callback function used by the irq_set_affinity_notifier function
  432. * to inform the current notification subscriber that they will no longer
  433. * receive notifications.
  434. */
  435. static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
  436. /**
  437. * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
  438. * @vsi: the VSI being un-configured
  439. */
  440. static void ice_vsi_dis_irq(struct ice_vsi *vsi)
  441. {
  442. struct ice_pf *pf = vsi->back;
  443. struct ice_hw *hw = &pf->hw;
  444. int base = vsi->base_vector;
  445. u32 val;
  446. int i;
  447. /* disable interrupt causation from each queue */
  448. if (vsi->tx_rings) {
  449. ice_for_each_txq(vsi, i) {
  450. if (vsi->tx_rings[i]) {
  451. u16 reg;
  452. reg = vsi->tx_rings[i]->reg_idx;
  453. val = rd32(hw, QINT_TQCTL(reg));
  454. val &= ~QINT_TQCTL_CAUSE_ENA_M;
  455. wr32(hw, QINT_TQCTL(reg), val);
  456. }
  457. }
  458. }
  459. if (vsi->rx_rings) {
  460. ice_for_each_rxq(vsi, i) {
  461. if (vsi->rx_rings[i]) {
  462. u16 reg;
  463. reg = vsi->rx_rings[i]->reg_idx;
  464. val = rd32(hw, QINT_RQCTL(reg));
  465. val &= ~QINT_RQCTL_CAUSE_ENA_M;
  466. wr32(hw, QINT_RQCTL(reg), val);
  467. }
  468. }
  469. }
  470. /* disable each interrupt */
  471. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  472. for (i = vsi->base_vector;
  473. i < (vsi->num_q_vectors + vsi->base_vector); i++)
  474. wr32(hw, GLINT_DYN_CTL(i), 0);
  475. ice_flush(hw);
  476. for (i = 0; i < vsi->num_q_vectors; i++)
  477. synchronize_irq(pf->msix_entries[i + base].vector);
  478. }
  479. }
  480. /**
  481. * ice_vsi_ena_irq - Enable IRQ for the given VSI
  482. * @vsi: the VSI being configured
  483. */
  484. static int ice_vsi_ena_irq(struct ice_vsi *vsi)
  485. {
  486. struct ice_pf *pf = vsi->back;
  487. struct ice_hw *hw = &pf->hw;
  488. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  489. int i;
  490. for (i = 0; i < vsi->num_q_vectors; i++)
  491. ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
  492. }
  493. ice_flush(hw);
  494. return 0;
  495. }
  496. /**
  497. * ice_vsi_delete - delete a VSI from the switch
  498. * @vsi: pointer to VSI being removed
  499. */
  500. static void ice_vsi_delete(struct ice_vsi *vsi)
  501. {
  502. struct ice_pf *pf = vsi->back;
  503. struct ice_vsi_ctx ctxt;
  504. enum ice_status status;
  505. ctxt.vsi_num = vsi->vsi_num;
  506. memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
  507. status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
  508. if (status)
  509. dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
  510. vsi->vsi_num);
  511. }
  512. /**
  513. * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
  514. * @vsi: the VSI being configured
  515. * @basename: name for the vector
  516. */
  517. static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
  518. {
  519. int q_vectors = vsi->num_q_vectors;
  520. struct ice_pf *pf = vsi->back;
  521. int base = vsi->base_vector;
  522. int rx_int_idx = 0;
  523. int tx_int_idx = 0;
  524. int vector, err;
  525. int irq_num;
  526. for (vector = 0; vector < q_vectors; vector++) {
  527. struct ice_q_vector *q_vector = vsi->q_vectors[vector];
  528. irq_num = pf->msix_entries[base + vector].vector;
  529. if (q_vector->tx.ring && q_vector->rx.ring) {
  530. snprintf(q_vector->name, sizeof(q_vector->name) - 1,
  531. "%s-%s-%d", basename, "TxRx", rx_int_idx++);
  532. tx_int_idx++;
  533. } else if (q_vector->rx.ring) {
  534. snprintf(q_vector->name, sizeof(q_vector->name) - 1,
  535. "%s-%s-%d", basename, "rx", rx_int_idx++);
  536. } else if (q_vector->tx.ring) {
  537. snprintf(q_vector->name, sizeof(q_vector->name) - 1,
  538. "%s-%s-%d", basename, "tx", tx_int_idx++);
  539. } else {
  540. /* skip this unused q_vector */
  541. continue;
  542. }
  543. err = devm_request_irq(&pf->pdev->dev,
  544. pf->msix_entries[base + vector].vector,
  545. vsi->irq_handler, 0, q_vector->name,
  546. q_vector);
  547. if (err) {
  548. netdev_err(vsi->netdev,
  549. "MSIX request_irq failed, error: %d\n", err);
  550. goto free_q_irqs;
  551. }
  552. /* register for affinity change notifications */
  553. q_vector->affinity_notify.notify = ice_irq_affinity_notify;
  554. q_vector->affinity_notify.release = ice_irq_affinity_release;
  555. irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
  556. /* assign the mask for this irq */
  557. irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
  558. }
  559. vsi->irqs_ready = true;
  560. return 0;
  561. free_q_irqs:
  562. while (vector) {
  563. vector--;
  564. irq_num = pf->msix_entries[base + vector].vector,
  565. irq_set_affinity_notifier(irq_num, NULL);
  566. irq_set_affinity_hint(irq_num, NULL);
  567. devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
  568. }
  569. return err;
  570. }
  571. /**
  572. * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
  573. * @vsi: the VSI being configured
  574. */
  575. static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
  576. {
  577. struct ice_hw_common_caps *cap;
  578. struct ice_pf *pf = vsi->back;
  579. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
  580. vsi->rss_size = 1;
  581. return;
  582. }
  583. cap = &pf->hw.func_caps.common_cap;
  584. switch (vsi->type) {
  585. case ICE_VSI_PF:
  586. /* PF VSI will inherit RSS instance of PF */
  587. vsi->rss_table_size = cap->rss_table_size;
  588. vsi->rss_size = min_t(int, num_online_cpus(),
  589. BIT(cap->rss_table_entry_width));
  590. vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
  591. break;
  592. default:
  593. dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
  594. break;
  595. }
  596. }
  597. /**
  598. * ice_vsi_setup_q_map - Setup a VSI queue map
  599. * @vsi: the VSI being configured
  600. * @ctxt: VSI context structure
  601. */
  602. static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
  603. {
  604. u16 offset = 0, qmap = 0, numq_tc;
  605. u16 pow = 0, max_rss = 0, qcount;
  606. u16 qcount_tx = vsi->alloc_txq;
  607. u16 qcount_rx = vsi->alloc_rxq;
  608. bool ena_tc0 = false;
  609. int i;
  610. /* at least TC0 should be enabled by default */
  611. if (vsi->tc_cfg.numtc) {
  612. if (!(vsi->tc_cfg.ena_tc & BIT(0)))
  613. ena_tc0 = true;
  614. } else {
  615. ena_tc0 = true;
  616. }
  617. if (ena_tc0) {
  618. vsi->tc_cfg.numtc++;
  619. vsi->tc_cfg.ena_tc |= 1;
  620. }
  621. numq_tc = qcount_rx / vsi->tc_cfg.numtc;
  622. /* TC mapping is a function of the number of Rx queues assigned to the
  623. * VSI for each traffic class and the offset of these queues.
  624. * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
  625. * queues allocated to TC0. No:of queues is a power-of-2.
  626. *
  627. * If TC is not enabled, the queue offset is set to 0, and allocate one
  628. * queue, this way, traffic for the given TC will be sent to the default
  629. * queue.
  630. *
  631. * Setup number and offset of Rx queues for all TCs for the VSI
  632. */
  633. /* qcount will change if RSS is enabled */
  634. if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
  635. if (vsi->type == ICE_VSI_PF)
  636. max_rss = ICE_MAX_LG_RSS_QS;
  637. else
  638. max_rss = ICE_MAX_SMALL_RSS_QS;
  639. qcount = min_t(int, numq_tc, max_rss);
  640. qcount = min_t(int, qcount, vsi->rss_size);
  641. } else {
  642. qcount = numq_tc;
  643. }
  644. /* find higher power-of-2 of qcount */
  645. pow = ilog2(qcount);
  646. if (!is_power_of_2(qcount))
  647. pow++;
  648. for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
  649. if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
  650. /* TC is not enabled */
  651. vsi->tc_cfg.tc_info[i].qoffset = 0;
  652. vsi->tc_cfg.tc_info[i].qcount = 1;
  653. ctxt->info.tc_mapping[i] = 0;
  654. continue;
  655. }
  656. /* TC is enabled */
  657. vsi->tc_cfg.tc_info[i].qoffset = offset;
  658. vsi->tc_cfg.tc_info[i].qcount = qcount;
  659. qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
  660. ICE_AQ_VSI_TC_Q_OFFSET_M) |
  661. ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
  662. ICE_AQ_VSI_TC_Q_NUM_M);
  663. offset += qcount;
  664. ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
  665. }
  666. vsi->num_txq = qcount_tx;
  667. vsi->num_rxq = offset;
  668. /* Rx queue mapping */
  669. ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
  670. /* q_mapping buffer holds the info for the first queue allocated for
  671. * this VSI in the PF space and also the number of queues associated
  672. * with this VSI.
  673. */
  674. ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
  675. ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
  676. }
  677. /**
  678. * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
  679. * @ctxt: the VSI context being set
  680. *
  681. * This initializes a default VSI context for all sections except the Queues.
  682. */
  683. static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
  684. {
  685. u32 table = 0;
  686. memset(&ctxt->info, 0, sizeof(ctxt->info));
  687. /* VSI's should be allocated from shared pool */
  688. ctxt->alloc_from_pool = true;
  689. /* Src pruning enabled by default */
  690. ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
  691. /* Traffic from VSI can be sent to LAN */
  692. ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
  693. /* Allow all packets untagged/tagged */
  694. ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
  695. ICE_AQ_VSI_PVLAN_MODE_M) >>
  696. ICE_AQ_VSI_PVLAN_MODE_S);
  697. /* Show VLAN/UP from packets in Rx descriptors */
  698. ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
  699. ICE_AQ_VSI_PVLAN_EMOD_M) >>
  700. ICE_AQ_VSI_PVLAN_EMOD_S);
  701. /* Have 1:1 UP mapping for both ingress/egress tables */
  702. table |= ICE_UP_TABLE_TRANSLATE(0, 0);
  703. table |= ICE_UP_TABLE_TRANSLATE(1, 1);
  704. table |= ICE_UP_TABLE_TRANSLATE(2, 2);
  705. table |= ICE_UP_TABLE_TRANSLATE(3, 3);
  706. table |= ICE_UP_TABLE_TRANSLATE(4, 4);
  707. table |= ICE_UP_TABLE_TRANSLATE(5, 5);
  708. table |= ICE_UP_TABLE_TRANSLATE(6, 6);
  709. table |= ICE_UP_TABLE_TRANSLATE(7, 7);
  710. ctxt->info.ingress_table = cpu_to_le32(table);
  711. ctxt->info.egress_table = cpu_to_le32(table);
  712. /* Have 1:1 UP mapping for outer to inner UP table */
  713. ctxt->info.outer_up_table = cpu_to_le32(table);
  714. /* No Outer tag support outer_tag_flags remains to zero */
  715. }
  716. /**
  717. * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
  718. * @ctxt: the VSI context being set
  719. * @vsi: the VSI being configured
  720. */
  721. static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
  722. {
  723. u8 lut_type, hash_type;
  724. switch (vsi->type) {
  725. case ICE_VSI_PF:
  726. /* PF VSI will inherit RSS instance of PF */
  727. lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
  728. hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
  729. break;
  730. default:
  731. dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
  732. vsi->type);
  733. return;
  734. }
  735. ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
  736. ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
  737. ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
  738. ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
  739. }
  740. /**
  741. * ice_vsi_add - Create a new VSI or fetch preallocated VSI
  742. * @vsi: the VSI being configured
  743. *
  744. * This initializes a VSI context depending on the VSI type to be added and
  745. * passes it down to the add_vsi aq command to create a new VSI.
  746. */
  747. static int ice_vsi_add(struct ice_vsi *vsi)
  748. {
  749. struct ice_vsi_ctx ctxt = { 0 };
  750. struct ice_pf *pf = vsi->back;
  751. struct ice_hw *hw = &pf->hw;
  752. int ret = 0;
  753. switch (vsi->type) {
  754. case ICE_VSI_PF:
  755. ctxt.flags = ICE_AQ_VSI_TYPE_PF;
  756. break;
  757. default:
  758. return -ENODEV;
  759. }
  760. ice_set_dflt_vsi_ctx(&ctxt);
  761. /* if the switch is in VEB mode, allow VSI loopback */
  762. if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
  763. ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
  764. /* Set LUT type and HASH type if RSS is enabled */
  765. if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
  766. ice_set_rss_vsi_ctx(&ctxt, vsi);
  767. ctxt.info.sw_id = vsi->port_info->sw_id;
  768. ice_vsi_setup_q_map(vsi, &ctxt);
  769. ret = ice_aq_add_vsi(hw, &ctxt, NULL);
  770. if (ret) {
  771. dev_err(&vsi->back->pdev->dev,
  772. "Add VSI AQ call failed, err %d\n", ret);
  773. return -EIO;
  774. }
  775. vsi->info = ctxt.info;
  776. vsi->vsi_num = ctxt.vsi_num;
  777. return ret;
  778. }
  779. /**
  780. * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
  781. * @vsi: the VSI being cleaned up
  782. */
  783. static void ice_vsi_release_msix(struct ice_vsi *vsi)
  784. {
  785. struct ice_pf *pf = vsi->back;
  786. u16 vector = vsi->base_vector;
  787. struct ice_hw *hw = &pf->hw;
  788. u32 txq = 0;
  789. u32 rxq = 0;
  790. int i, q;
  791. for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
  792. struct ice_q_vector *q_vector = vsi->q_vectors[i];
  793. wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
  794. wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
  795. for (q = 0; q < q_vector->num_ring_tx; q++) {
  796. wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
  797. txq++;
  798. }
  799. for (q = 0; q < q_vector->num_ring_rx; q++) {
  800. wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
  801. rxq++;
  802. }
  803. }
  804. ice_flush(hw);
  805. }
  806. /**
  807. * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
  808. * @vsi: the VSI having rings deallocated
  809. */
  810. static void ice_vsi_clear_rings(struct ice_vsi *vsi)
  811. {
  812. int i;
  813. if (vsi->tx_rings) {
  814. for (i = 0; i < vsi->alloc_txq; i++) {
  815. if (vsi->tx_rings[i]) {
  816. kfree_rcu(vsi->tx_rings[i], rcu);
  817. vsi->tx_rings[i] = NULL;
  818. }
  819. }
  820. }
  821. if (vsi->rx_rings) {
  822. for (i = 0; i < vsi->alloc_rxq; i++) {
  823. if (vsi->rx_rings[i]) {
  824. kfree_rcu(vsi->rx_rings[i], rcu);
  825. vsi->rx_rings[i] = NULL;
  826. }
  827. }
  828. }
  829. }
  830. /**
  831. * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
  832. * @vsi: VSI which is having rings allocated
  833. */
  834. static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
  835. {
  836. struct ice_pf *pf = vsi->back;
  837. int i;
  838. /* Allocate tx_rings */
  839. for (i = 0; i < vsi->alloc_txq; i++) {
  840. struct ice_ring *ring;
  841. /* allocate with kzalloc(), free with kfree_rcu() */
  842. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  843. if (!ring)
  844. goto err_out;
  845. ring->q_index = i;
  846. ring->reg_idx = vsi->txq_map[i];
  847. ring->ring_active = false;
  848. ring->vsi = vsi;
  849. ring->netdev = vsi->netdev;
  850. ring->dev = &pf->pdev->dev;
  851. ring->count = vsi->num_desc;
  852. vsi->tx_rings[i] = ring;
  853. }
  854. /* Allocate rx_rings */
  855. for (i = 0; i < vsi->alloc_rxq; i++) {
  856. struct ice_ring *ring;
  857. /* allocate with kzalloc(), free with kfree_rcu() */
  858. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  859. if (!ring)
  860. goto err_out;
  861. ring->q_index = i;
  862. ring->reg_idx = vsi->rxq_map[i];
  863. ring->ring_active = false;
  864. ring->vsi = vsi;
  865. ring->netdev = vsi->netdev;
  866. ring->dev = &pf->pdev->dev;
  867. ring->count = vsi->num_desc;
  868. vsi->rx_rings[i] = ring;
  869. }
  870. return 0;
  871. err_out:
  872. ice_vsi_clear_rings(vsi);
  873. return -ENOMEM;
  874. }
  875. /**
  876. * ice_vsi_free_irq - Free the irq association with the OS
  877. * @vsi: the VSI being configured
  878. */
  879. static void ice_vsi_free_irq(struct ice_vsi *vsi)
  880. {
  881. struct ice_pf *pf = vsi->back;
  882. int base = vsi->base_vector;
  883. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  884. int i;
  885. if (!vsi->q_vectors || !vsi->irqs_ready)
  886. return;
  887. vsi->irqs_ready = false;
  888. for (i = 0; i < vsi->num_q_vectors; i++) {
  889. u16 vector = i + base;
  890. int irq_num;
  891. irq_num = pf->msix_entries[vector].vector;
  892. /* free only the irqs that were actually requested */
  893. if (!vsi->q_vectors[i] ||
  894. !(vsi->q_vectors[i]->num_ring_tx ||
  895. vsi->q_vectors[i]->num_ring_rx))
  896. continue;
  897. /* clear the affinity notifier in the IRQ descriptor */
  898. irq_set_affinity_notifier(irq_num, NULL);
  899. /* clear the affinity_mask in the IRQ descriptor */
  900. irq_set_affinity_hint(irq_num, NULL);
  901. synchronize_irq(irq_num);
  902. devm_free_irq(&pf->pdev->dev, irq_num,
  903. vsi->q_vectors[i]);
  904. }
  905. ice_vsi_release_msix(vsi);
  906. }
  907. }
  908. /**
  909. * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
  910. * @vsi: the VSI being configured
  911. */
  912. static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
  913. {
  914. struct ice_pf *pf = vsi->back;
  915. u16 vector = vsi->base_vector;
  916. struct ice_hw *hw = &pf->hw;
  917. u32 txq = 0, rxq = 0;
  918. int i, q, itr;
  919. u8 itr_gran;
  920. for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
  921. struct ice_q_vector *q_vector = vsi->q_vectors[i];
  922. itr_gran = hw->itr_gran_200;
  923. if (q_vector->num_ring_rx) {
  924. q_vector->rx.itr =
  925. ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
  926. itr_gran);
  927. q_vector->rx.latency_range = ICE_LOW_LATENCY;
  928. }
  929. if (q_vector->num_ring_tx) {
  930. q_vector->tx.itr =
  931. ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
  932. itr_gran);
  933. q_vector->tx.latency_range = ICE_LOW_LATENCY;
  934. }
  935. wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
  936. wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
  937. /* Both Transmit Queue Interrupt Cause Control register
  938. * and Receive Queue Interrupt Cause control register
  939. * expects MSIX_INDX field to be the vector index
  940. * within the function space and not the absolute
  941. * vector index across PF or across device.
  942. * For SR-IOV VF VSIs queue vector index always starts
  943. * with 1 since first vector index(0) is used for OICR
  944. * in VF space. Since VMDq and other PF VSIs are withtin
  945. * the PF function space, use the vector index thats
  946. * tracked for this PF.
  947. */
  948. for (q = 0; q < q_vector->num_ring_tx; q++) {
  949. u32 val;
  950. itr = ICE_TX_ITR;
  951. val = QINT_TQCTL_CAUSE_ENA_M |
  952. (itr << QINT_TQCTL_ITR_INDX_S) |
  953. (vector << QINT_TQCTL_MSIX_INDX_S);
  954. wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
  955. txq++;
  956. }
  957. for (q = 0; q < q_vector->num_ring_rx; q++) {
  958. u32 val;
  959. itr = ICE_RX_ITR;
  960. val = QINT_RQCTL_CAUSE_ENA_M |
  961. (itr << QINT_RQCTL_ITR_INDX_S) |
  962. (vector << QINT_RQCTL_MSIX_INDX_S);
  963. wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
  964. rxq++;
  965. }
  966. }
  967. ice_flush(hw);
  968. }
  969. /**
  970. * ice_ena_misc_vector - enable the non-queue interrupts
  971. * @pf: board private structure
  972. */
  973. static void ice_ena_misc_vector(struct ice_pf *pf)
  974. {
  975. struct ice_hw *hw = &pf->hw;
  976. u32 val;
  977. /* clear things first */
  978. wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
  979. rd32(hw, PFINT_OICR); /* read to clear */
  980. val = (PFINT_OICR_HLP_RDY_M |
  981. PFINT_OICR_CPM_RDY_M |
  982. PFINT_OICR_ECC_ERR_M |
  983. PFINT_OICR_MAL_DETECT_M |
  984. PFINT_OICR_GRST_M |
  985. PFINT_OICR_PCI_EXCEPTION_M |
  986. PFINT_OICR_GPIO_M |
  987. PFINT_OICR_STORM_DETECT_M |
  988. PFINT_OICR_HMC_ERR_M);
  989. wr32(hw, PFINT_OICR_ENA, val);
  990. /* SW_ITR_IDX = 0, but don't change INTENA */
  991. wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
  992. GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
  993. }
  994. /**
  995. * ice_misc_intr - misc interrupt handler
  996. * @irq: interrupt number
  997. * @data: pointer to a q_vector
  998. */
  999. static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
  1000. {
  1001. struct ice_pf *pf = (struct ice_pf *)data;
  1002. struct ice_hw *hw = &pf->hw;
  1003. irqreturn_t ret = IRQ_NONE;
  1004. u32 oicr, ena_mask;
  1005. set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
  1006. oicr = rd32(hw, PFINT_OICR);
  1007. ena_mask = rd32(hw, PFINT_OICR_ENA);
  1008. if (!(oicr & PFINT_OICR_INTEVENT_M))
  1009. goto ena_intr;
  1010. if (oicr & PFINT_OICR_HMC_ERR_M) {
  1011. ena_mask &= ~PFINT_OICR_HMC_ERR_M;
  1012. dev_dbg(&pf->pdev->dev,
  1013. "HMC Error interrupt - info 0x%x, data 0x%x\n",
  1014. rd32(hw, PFHMC_ERRORINFO),
  1015. rd32(hw, PFHMC_ERRORDATA));
  1016. }
  1017. /* Report and mask off any remaining unexpected interrupts */
  1018. oicr &= ena_mask;
  1019. if (oicr) {
  1020. dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
  1021. oicr);
  1022. /* If a critical error is pending there is no choice but to
  1023. * reset the device.
  1024. */
  1025. if (oicr & (PFINT_OICR_PE_CRITERR_M |
  1026. PFINT_OICR_PCI_EXCEPTION_M |
  1027. PFINT_OICR_ECC_ERR_M))
  1028. set_bit(__ICE_PFR_REQ, pf->state);
  1029. ena_mask &= ~oicr;
  1030. }
  1031. ret = IRQ_HANDLED;
  1032. ena_intr:
  1033. /* re-enable interrupt causes that are not handled during this pass */
  1034. wr32(hw, PFINT_OICR_ENA, ena_mask);
  1035. if (!test_bit(__ICE_DOWN, pf->state)) {
  1036. ice_service_task_schedule(pf);
  1037. ice_irq_dynamic_ena(hw, NULL, NULL);
  1038. }
  1039. return ret;
  1040. }
  1041. /**
  1042. * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
  1043. * @vsi: the VSI being configured
  1044. *
  1045. * This function maps descriptor rings to the queue-specific vectors allotted
  1046. * through the MSI-X enabling code. On a constrained vector budget, we map Tx
  1047. * and Rx rings to the vector as "efficiently" as possible.
  1048. */
  1049. static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
  1050. {
  1051. int q_vectors = vsi->num_q_vectors;
  1052. int tx_rings_rem, rx_rings_rem;
  1053. int v_id;
  1054. /* initially assigning remaining rings count to VSIs num queue value */
  1055. tx_rings_rem = vsi->num_txq;
  1056. rx_rings_rem = vsi->num_rxq;
  1057. for (v_id = 0; v_id < q_vectors; v_id++) {
  1058. struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
  1059. int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
  1060. /* Tx rings mapping to vector */
  1061. tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
  1062. q_vector->num_ring_tx = tx_rings_per_v;
  1063. q_vector->tx.ring = NULL;
  1064. q_base = vsi->num_txq - tx_rings_rem;
  1065. for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
  1066. struct ice_ring *tx_ring = vsi->tx_rings[q_id];
  1067. tx_ring->q_vector = q_vector;
  1068. tx_ring->next = q_vector->tx.ring;
  1069. q_vector->tx.ring = tx_ring;
  1070. }
  1071. tx_rings_rem -= tx_rings_per_v;
  1072. /* Rx rings mapping to vector */
  1073. rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
  1074. q_vector->num_ring_rx = rx_rings_per_v;
  1075. q_vector->rx.ring = NULL;
  1076. q_base = vsi->num_rxq - rx_rings_rem;
  1077. for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
  1078. struct ice_ring *rx_ring = vsi->rx_rings[q_id];
  1079. rx_ring->q_vector = q_vector;
  1080. rx_ring->next = q_vector->rx.ring;
  1081. q_vector->rx.ring = rx_ring;
  1082. }
  1083. rx_rings_rem -= rx_rings_per_v;
  1084. }
  1085. }
  1086. /**
  1087. * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
  1088. * @vsi: the VSI being configured
  1089. *
  1090. * Return 0 on success and a negative value on error
  1091. */
  1092. static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
  1093. {
  1094. struct ice_pf *pf = vsi->back;
  1095. switch (vsi->type) {
  1096. case ICE_VSI_PF:
  1097. vsi->alloc_txq = pf->num_lan_tx;
  1098. vsi->alloc_rxq = pf->num_lan_rx;
  1099. vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
  1100. vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
  1101. break;
  1102. default:
  1103. dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
  1104. vsi->type);
  1105. break;
  1106. }
  1107. }
  1108. /**
  1109. * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
  1110. * @vsi: VSI pointer
  1111. * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
  1112. *
  1113. * On error: returns error code (negative)
  1114. * On success: returns 0
  1115. */
  1116. static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
  1117. {
  1118. struct ice_pf *pf = vsi->back;
  1119. /* allocate memory for both Tx and Rx ring pointers */
  1120. vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
  1121. sizeof(struct ice_ring *), GFP_KERNEL);
  1122. if (!vsi->tx_rings)
  1123. goto err_txrings;
  1124. vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
  1125. sizeof(struct ice_ring *), GFP_KERNEL);
  1126. if (!vsi->rx_rings)
  1127. goto err_rxrings;
  1128. if (alloc_qvectors) {
  1129. /* allocate memory for q_vector pointers */
  1130. vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
  1131. vsi->num_q_vectors,
  1132. sizeof(struct ice_q_vector *),
  1133. GFP_KERNEL);
  1134. if (!vsi->q_vectors)
  1135. goto err_vectors;
  1136. }
  1137. return 0;
  1138. err_vectors:
  1139. devm_kfree(&pf->pdev->dev, vsi->rx_rings);
  1140. err_rxrings:
  1141. devm_kfree(&pf->pdev->dev, vsi->tx_rings);
  1142. err_txrings:
  1143. return -ENOMEM;
  1144. }
  1145. /**
  1146. * ice_msix_clean_rings - MSIX mode Interrupt Handler
  1147. * @irq: interrupt number
  1148. * @data: pointer to a q_vector
  1149. */
  1150. static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
  1151. {
  1152. struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
  1153. if (!q_vector->tx.ring && !q_vector->rx.ring)
  1154. return IRQ_HANDLED;
  1155. napi_schedule(&q_vector->napi);
  1156. return IRQ_HANDLED;
  1157. }
  1158. /**
  1159. * ice_vsi_alloc - Allocates the next available struct vsi in the PF
  1160. * @pf: board private structure
  1161. * @type: type of VSI
  1162. *
  1163. * returns a pointer to a VSI on success, NULL on failure.
  1164. */
  1165. static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
  1166. {
  1167. struct ice_vsi *vsi = NULL;
  1168. /* Need to protect the allocation of the VSIs at the PF level */
  1169. mutex_lock(&pf->sw_mutex);
  1170. /* If we have already allocated our maximum number of VSIs,
  1171. * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
  1172. * is available to be populated
  1173. */
  1174. if (pf->next_vsi == ICE_NO_VSI) {
  1175. dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
  1176. goto unlock_pf;
  1177. }
  1178. vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
  1179. if (!vsi)
  1180. goto unlock_pf;
  1181. vsi->type = type;
  1182. vsi->back = pf;
  1183. set_bit(__ICE_DOWN, vsi->state);
  1184. vsi->idx = pf->next_vsi;
  1185. vsi->work_lmt = ICE_DFLT_IRQ_WORK;
  1186. ice_vsi_set_num_qs(vsi);
  1187. switch (vsi->type) {
  1188. case ICE_VSI_PF:
  1189. if (ice_vsi_alloc_arrays(vsi, true))
  1190. goto err_rings;
  1191. /* Setup default MSIX irq handler for VSI */
  1192. vsi->irq_handler = ice_msix_clean_rings;
  1193. break;
  1194. default:
  1195. dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
  1196. goto unlock_pf;
  1197. }
  1198. /* fill VSI slot in the PF struct */
  1199. pf->vsi[pf->next_vsi] = vsi;
  1200. /* prepare pf->next_vsi for next use */
  1201. pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
  1202. pf->next_vsi);
  1203. goto unlock_pf;
  1204. err_rings:
  1205. devm_kfree(&pf->pdev->dev, vsi);
  1206. vsi = NULL;
  1207. unlock_pf:
  1208. mutex_unlock(&pf->sw_mutex);
  1209. return vsi;
  1210. }
  1211. /**
  1212. * ice_free_irq_msix_misc - Unroll misc vector setup
  1213. * @pf: board private structure
  1214. */
  1215. static void ice_free_irq_msix_misc(struct ice_pf *pf)
  1216. {
  1217. /* disable OICR interrupt */
  1218. wr32(&pf->hw, PFINT_OICR_ENA, 0);
  1219. ice_flush(&pf->hw);
  1220. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
  1221. synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
  1222. devm_free_irq(&pf->pdev->dev,
  1223. pf->msix_entries[pf->oicr_idx].vector, pf);
  1224. }
  1225. ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
  1226. }
  1227. /**
  1228. * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
  1229. * @pf: board private structure
  1230. *
  1231. * This sets up the handler for MSIX 0, which is used to manage the
  1232. * non-queue interrupts, e.g. AdminQ and errors. This is not used
  1233. * when in MSI or Legacy interrupt mode.
  1234. */
  1235. static int ice_req_irq_msix_misc(struct ice_pf *pf)
  1236. {
  1237. struct ice_hw *hw = &pf->hw;
  1238. int oicr_idx, err = 0;
  1239. u8 itr_gran;
  1240. u32 val;
  1241. if (!pf->int_name[0])
  1242. snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
  1243. dev_driver_string(&pf->pdev->dev),
  1244. dev_name(&pf->pdev->dev));
  1245. /* reserve one vector in irq_tracker for misc interrupts */
  1246. oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
  1247. if (oicr_idx < 0)
  1248. return oicr_idx;
  1249. pf->oicr_idx = oicr_idx;
  1250. err = devm_request_irq(&pf->pdev->dev,
  1251. pf->msix_entries[pf->oicr_idx].vector,
  1252. ice_misc_intr, 0, pf->int_name, pf);
  1253. if (err) {
  1254. dev_err(&pf->pdev->dev,
  1255. "devm_request_irq for %s failed: %d\n",
  1256. pf->int_name, err);
  1257. ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
  1258. return err;
  1259. }
  1260. ice_ena_misc_vector(pf);
  1261. val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
  1262. (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
  1263. PFINT_OICR_CTL_CAUSE_ENA_M;
  1264. wr32(hw, PFINT_OICR_CTL, val);
  1265. /* This enables Admin queue Interrupt causes */
  1266. val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
  1267. (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
  1268. PFINT_FW_CTL_CAUSE_ENA_M;
  1269. wr32(hw, PFINT_FW_CTL, val);
  1270. itr_gran = hw->itr_gran_200;
  1271. wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
  1272. ITR_TO_REG(ICE_ITR_8K, itr_gran));
  1273. ice_flush(hw);
  1274. ice_irq_dynamic_ena(hw, NULL, NULL);
  1275. return 0;
  1276. }
  1277. /**
  1278. * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
  1279. * @vsi: the VSI getting queues
  1280. *
  1281. * Return 0 on success and a negative value on error
  1282. */
  1283. static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
  1284. {
  1285. struct ice_pf *pf = vsi->back;
  1286. int offset, ret = 0;
  1287. mutex_lock(&pf->avail_q_mutex);
  1288. /* look for contiguous block of queues for tx */
  1289. offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
  1290. 0, vsi->alloc_txq, 0);
  1291. if (offset < ICE_MAX_TXQS) {
  1292. int i;
  1293. bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
  1294. for (i = 0; i < vsi->alloc_txq; i++)
  1295. vsi->txq_map[i] = i + offset;
  1296. } else {
  1297. ret = -ENOMEM;
  1298. vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
  1299. }
  1300. /* look for contiguous block of queues for rx */
  1301. offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
  1302. 0, vsi->alloc_rxq, 0);
  1303. if (offset < ICE_MAX_RXQS) {
  1304. int i;
  1305. bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
  1306. for (i = 0; i < vsi->alloc_rxq; i++)
  1307. vsi->rxq_map[i] = i + offset;
  1308. } else {
  1309. ret = -ENOMEM;
  1310. vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
  1311. }
  1312. mutex_unlock(&pf->avail_q_mutex);
  1313. return ret;
  1314. }
  1315. /**
  1316. * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
  1317. * @vsi: the VSI getting queues
  1318. *
  1319. * Return 0 on success and a negative value on error
  1320. */
  1321. static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
  1322. {
  1323. struct ice_pf *pf = vsi->back;
  1324. int i, index = 0;
  1325. mutex_lock(&pf->avail_q_mutex);
  1326. if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
  1327. for (i = 0; i < vsi->alloc_txq; i++) {
  1328. index = find_next_zero_bit(pf->avail_txqs,
  1329. ICE_MAX_TXQS, index);
  1330. if (index < ICE_MAX_TXQS) {
  1331. set_bit(index, pf->avail_txqs);
  1332. vsi->txq_map[i] = index;
  1333. } else {
  1334. goto err_scatter_tx;
  1335. }
  1336. }
  1337. }
  1338. if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
  1339. for (i = 0; i < vsi->alloc_rxq; i++) {
  1340. index = find_next_zero_bit(pf->avail_rxqs,
  1341. ICE_MAX_RXQS, index);
  1342. if (index < ICE_MAX_RXQS) {
  1343. set_bit(index, pf->avail_rxqs);
  1344. vsi->rxq_map[i] = index;
  1345. } else {
  1346. goto err_scatter_rx;
  1347. }
  1348. }
  1349. }
  1350. mutex_unlock(&pf->avail_q_mutex);
  1351. return 0;
  1352. err_scatter_rx:
  1353. /* unflag any queues we have grabbed (i is failed position) */
  1354. for (index = 0; index < i; index++) {
  1355. clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
  1356. vsi->rxq_map[index] = 0;
  1357. }
  1358. i = vsi->alloc_txq;
  1359. err_scatter_tx:
  1360. /* i is either position of failed attempt or vsi->alloc_txq */
  1361. for (index = 0; index < i; index++) {
  1362. clear_bit(vsi->txq_map[index], pf->avail_txqs);
  1363. vsi->txq_map[index] = 0;
  1364. }
  1365. mutex_unlock(&pf->avail_q_mutex);
  1366. return -ENOMEM;
  1367. }
  1368. /**
  1369. * ice_vsi_get_qs - Assign queues from PF to VSI
  1370. * @vsi: the VSI to assign queues to
  1371. *
  1372. * Returns 0 on success and a negative value on error
  1373. */
  1374. static int ice_vsi_get_qs(struct ice_vsi *vsi)
  1375. {
  1376. int ret = 0;
  1377. vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
  1378. vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
  1379. /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
  1380. * modes individually to scatter if assigning contiguous queues
  1381. * to rx or tx fails
  1382. */
  1383. ret = ice_vsi_get_qs_contig(vsi);
  1384. if (ret < 0) {
  1385. if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
  1386. vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
  1387. ICE_MAX_SCATTER_TXQS);
  1388. if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
  1389. vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
  1390. ICE_MAX_SCATTER_RXQS);
  1391. ret = ice_vsi_get_qs_scatter(vsi);
  1392. }
  1393. return ret;
  1394. }
  1395. /**
  1396. * ice_vsi_put_qs - Release queues from VSI to PF
  1397. * @vsi: the VSI thats going to release queues
  1398. */
  1399. static void ice_vsi_put_qs(struct ice_vsi *vsi)
  1400. {
  1401. struct ice_pf *pf = vsi->back;
  1402. int i;
  1403. mutex_lock(&pf->avail_q_mutex);
  1404. for (i = 0; i < vsi->alloc_txq; i++) {
  1405. clear_bit(vsi->txq_map[i], pf->avail_txqs);
  1406. vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
  1407. }
  1408. for (i = 0; i < vsi->alloc_rxq; i++) {
  1409. clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
  1410. vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
  1411. }
  1412. mutex_unlock(&pf->avail_q_mutex);
  1413. }
  1414. /**
  1415. * ice_free_q_vector - Free memory allocated for a specific interrupt vector
  1416. * @vsi: VSI having the memory freed
  1417. * @v_idx: index of the vector to be freed
  1418. */
  1419. static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
  1420. {
  1421. struct ice_q_vector *q_vector;
  1422. struct ice_ring *ring;
  1423. if (!vsi->q_vectors[v_idx]) {
  1424. dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
  1425. v_idx);
  1426. return;
  1427. }
  1428. q_vector = vsi->q_vectors[v_idx];
  1429. ice_for_each_ring(ring, q_vector->tx)
  1430. ring->q_vector = NULL;
  1431. ice_for_each_ring(ring, q_vector->rx)
  1432. ring->q_vector = NULL;
  1433. /* only VSI with an associated netdev is set up with NAPI */
  1434. if (vsi->netdev)
  1435. netif_napi_del(&q_vector->napi);
  1436. devm_kfree(&vsi->back->pdev->dev, q_vector);
  1437. vsi->q_vectors[v_idx] = NULL;
  1438. }
  1439. /**
  1440. * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
  1441. * @vsi: the VSI having memory freed
  1442. */
  1443. static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
  1444. {
  1445. int v_idx;
  1446. for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
  1447. ice_free_q_vector(vsi, v_idx);
  1448. }
  1449. /**
  1450. * ice_cfg_netdev - Setup the netdev flags
  1451. * @vsi: the VSI being configured
  1452. *
  1453. * Returns 0 on success, negative value on failure
  1454. */
  1455. static int ice_cfg_netdev(struct ice_vsi *vsi)
  1456. {
  1457. netdev_features_t csumo_features;
  1458. netdev_features_t vlano_features;
  1459. netdev_features_t dflt_features;
  1460. netdev_features_t tso_features;
  1461. struct ice_netdev_priv *np;
  1462. struct net_device *netdev;
  1463. u8 mac_addr[ETH_ALEN];
  1464. netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
  1465. vsi->alloc_txq, vsi->alloc_rxq);
  1466. if (!netdev)
  1467. return -ENOMEM;
  1468. vsi->netdev = netdev;
  1469. np = netdev_priv(netdev);
  1470. np->vsi = vsi;
  1471. dflt_features = NETIF_F_SG |
  1472. NETIF_F_HIGHDMA |
  1473. NETIF_F_RXHASH;
  1474. csumo_features = NETIF_F_RXCSUM |
  1475. NETIF_F_IP_CSUM |
  1476. NETIF_F_IPV6_CSUM;
  1477. vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
  1478. NETIF_F_HW_VLAN_CTAG_TX |
  1479. NETIF_F_HW_VLAN_CTAG_RX;
  1480. tso_features = NETIF_F_TSO;
  1481. /* set features that user can change */
  1482. netdev->hw_features = dflt_features | csumo_features |
  1483. vlano_features | tso_features;
  1484. /* enable features */
  1485. netdev->features |= netdev->hw_features;
  1486. /* encap and VLAN devices inherit default, csumo and tso features */
  1487. netdev->hw_enc_features |= dflt_features | csumo_features |
  1488. tso_features;
  1489. netdev->vlan_features |= dflt_features | csumo_features |
  1490. tso_features;
  1491. if (vsi->type == ICE_VSI_PF) {
  1492. SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
  1493. ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
  1494. ether_addr_copy(netdev->dev_addr, mac_addr);
  1495. ether_addr_copy(netdev->perm_addr, mac_addr);
  1496. }
  1497. netdev->priv_flags |= IFF_UNICAST_FLT;
  1498. /* assign netdev_ops */
  1499. netdev->netdev_ops = &ice_netdev_ops;
  1500. /* setup watchdog timeout value to be 5 second */
  1501. netdev->watchdog_timeo = 5 * HZ;
  1502. netdev->min_mtu = ETH_MIN_MTU;
  1503. netdev->max_mtu = ICE_MAX_MTU;
  1504. return 0;
  1505. }
  1506. /**
  1507. * ice_vsi_free_arrays - clean up vsi resources
  1508. * @vsi: pointer to VSI being cleared
  1509. * @free_qvectors: bool to specify if q_vectors should be deallocated
  1510. */
  1511. static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
  1512. {
  1513. struct ice_pf *pf = vsi->back;
  1514. /* free the ring and vector containers */
  1515. if (free_qvectors && vsi->q_vectors) {
  1516. devm_kfree(&pf->pdev->dev, vsi->q_vectors);
  1517. vsi->q_vectors = NULL;
  1518. }
  1519. if (vsi->tx_rings) {
  1520. devm_kfree(&pf->pdev->dev, vsi->tx_rings);
  1521. vsi->tx_rings = NULL;
  1522. }
  1523. if (vsi->rx_rings) {
  1524. devm_kfree(&pf->pdev->dev, vsi->rx_rings);
  1525. vsi->rx_rings = NULL;
  1526. }
  1527. }
  1528. /**
  1529. * ice_vsi_clear - clean up and deallocate the provided vsi
  1530. * @vsi: pointer to VSI being cleared
  1531. *
  1532. * This deallocates the vsi's queue resources, removes it from the PF's
  1533. * VSI array if necessary, and deallocates the VSI
  1534. *
  1535. * Returns 0 on success, negative on failure
  1536. */
  1537. static int ice_vsi_clear(struct ice_vsi *vsi)
  1538. {
  1539. struct ice_pf *pf = NULL;
  1540. if (!vsi)
  1541. return 0;
  1542. if (!vsi->back)
  1543. return -EINVAL;
  1544. pf = vsi->back;
  1545. if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
  1546. dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
  1547. vsi->idx);
  1548. return -EINVAL;
  1549. }
  1550. mutex_lock(&pf->sw_mutex);
  1551. /* updates the PF for this cleared vsi */
  1552. pf->vsi[vsi->idx] = NULL;
  1553. if (vsi->idx < pf->next_vsi)
  1554. pf->next_vsi = vsi->idx;
  1555. ice_vsi_free_arrays(vsi, true);
  1556. mutex_unlock(&pf->sw_mutex);
  1557. devm_kfree(&pf->pdev->dev, vsi);
  1558. return 0;
  1559. }
  1560. /**
  1561. * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
  1562. * @vsi: the VSI being configured
  1563. * @v_idx: index of the vector in the vsi struct
  1564. *
  1565. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1566. */
  1567. static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
  1568. {
  1569. struct ice_pf *pf = vsi->back;
  1570. struct ice_q_vector *q_vector;
  1571. /* allocate q_vector */
  1572. q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
  1573. if (!q_vector)
  1574. return -ENOMEM;
  1575. q_vector->vsi = vsi;
  1576. q_vector->v_idx = v_idx;
  1577. /* only set affinity_mask if the CPU is online */
  1578. if (cpu_online(v_idx))
  1579. cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
  1580. if (vsi->netdev)
  1581. netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
  1582. NAPI_POLL_WEIGHT);
  1583. /* tie q_vector and vsi together */
  1584. vsi->q_vectors[v_idx] = q_vector;
  1585. return 0;
  1586. }
  1587. /**
  1588. * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
  1589. * @vsi: the VSI being configured
  1590. *
  1591. * We allocate one q_vector per queue interrupt. If allocation fails we
  1592. * return -ENOMEM.
  1593. */
  1594. static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
  1595. {
  1596. struct ice_pf *pf = vsi->back;
  1597. int v_idx = 0, num_q_vectors;
  1598. int err;
  1599. if (vsi->q_vectors[0]) {
  1600. dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
  1601. vsi->vsi_num);
  1602. return -EEXIST;
  1603. }
  1604. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  1605. num_q_vectors = vsi->num_q_vectors;
  1606. } else {
  1607. err = -EINVAL;
  1608. goto err_out;
  1609. }
  1610. for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
  1611. err = ice_vsi_alloc_q_vector(vsi, v_idx);
  1612. if (err)
  1613. goto err_out;
  1614. }
  1615. return 0;
  1616. err_out:
  1617. while (v_idx--)
  1618. ice_free_q_vector(vsi, v_idx);
  1619. dev_err(&pf->pdev->dev,
  1620. "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
  1621. vsi->num_q_vectors, vsi->vsi_num, err);
  1622. vsi->num_q_vectors = 0;
  1623. return err;
  1624. }
  1625. /**
  1626. * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
  1627. * @vsi: ptr to the VSI
  1628. *
  1629. * This should only be called after ice_vsi_alloc() which allocates the
  1630. * corresponding SW VSI structure and initializes num_queue_pairs for the
  1631. * newly allocated VSI.
  1632. *
  1633. * Returns 0 on success or negative on failure
  1634. */
  1635. static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
  1636. {
  1637. struct ice_pf *pf = vsi->back;
  1638. int num_q_vectors = 0;
  1639. if (vsi->base_vector) {
  1640. dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
  1641. vsi->vsi_num, vsi->base_vector);
  1642. return -EEXIST;
  1643. }
  1644. if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  1645. return -ENOENT;
  1646. switch (vsi->type) {
  1647. case ICE_VSI_PF:
  1648. num_q_vectors = vsi->num_q_vectors;
  1649. break;
  1650. default:
  1651. dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
  1652. vsi->type);
  1653. break;
  1654. }
  1655. if (num_q_vectors)
  1656. vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
  1657. num_q_vectors, vsi->idx);
  1658. if (vsi->base_vector < 0) {
  1659. dev_err(&pf->pdev->dev,
  1660. "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
  1661. num_q_vectors, vsi->vsi_num, vsi->base_vector);
  1662. return -ENOENT;
  1663. }
  1664. return 0;
  1665. }
  1666. /**
  1667. * ice_fill_rss_lut - Fill the RSS lookup table with default values
  1668. * @lut: Lookup table
  1669. * @rss_table_size: Lookup table size
  1670. * @rss_size: Range of queue number for hashing
  1671. */
  1672. void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
  1673. {
  1674. u16 i;
  1675. for (i = 0; i < rss_table_size; i++)
  1676. lut[i] = i % rss_size;
  1677. }
  1678. /**
  1679. * ice_vsi_cfg_rss - Configure RSS params for a VSI
  1680. * @vsi: VSI to be configured
  1681. */
  1682. static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
  1683. {
  1684. u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
  1685. struct ice_aqc_get_set_rss_keys *key;
  1686. struct ice_pf *pf = vsi->back;
  1687. enum ice_status status;
  1688. int err = 0;
  1689. u8 *lut;
  1690. vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
  1691. lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
  1692. if (!lut)
  1693. return -ENOMEM;
  1694. if (vsi->rss_lut_user)
  1695. memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
  1696. else
  1697. ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
  1698. status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
  1699. lut, vsi->rss_table_size);
  1700. if (status) {
  1701. dev_err(&vsi->back->pdev->dev,
  1702. "set_rss_lut failed, error %d\n", status);
  1703. err = -EIO;
  1704. goto ice_vsi_cfg_rss_exit;
  1705. }
  1706. key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
  1707. if (!key) {
  1708. err = -ENOMEM;
  1709. goto ice_vsi_cfg_rss_exit;
  1710. }
  1711. if (vsi->rss_hkey_user)
  1712. memcpy(seed, vsi->rss_hkey_user,
  1713. ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
  1714. else
  1715. netdev_rss_key_fill((void *)seed,
  1716. ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
  1717. memcpy(&key->standard_rss_key, seed,
  1718. ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
  1719. status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
  1720. if (status) {
  1721. dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
  1722. status);
  1723. err = -EIO;
  1724. }
  1725. devm_kfree(&pf->pdev->dev, key);
  1726. ice_vsi_cfg_rss_exit:
  1727. devm_kfree(&pf->pdev->dev, lut);
  1728. return err;
  1729. }
  1730. /**
  1731. * ice_vsi_setup - Set up a VSI by a given type
  1732. * @pf: board private structure
  1733. * @type: VSI type
  1734. * @pi: pointer to the port_info instance
  1735. *
  1736. * This allocates the sw VSI structure and its queue resources.
  1737. *
  1738. * Returns pointer to the successfully allocated and configure VSI sw struct on
  1739. * success, otherwise returns NULL on failure.
  1740. */
  1741. static struct ice_vsi *
  1742. ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
  1743. struct ice_port_info *pi)
  1744. {
  1745. struct device *dev = &pf->pdev->dev;
  1746. struct ice_vsi_ctx ctxt = { 0 };
  1747. struct ice_vsi *vsi;
  1748. int ret;
  1749. vsi = ice_vsi_alloc(pf, type);
  1750. if (!vsi) {
  1751. dev_err(dev, "could not allocate VSI\n");
  1752. return NULL;
  1753. }
  1754. vsi->port_info = pi;
  1755. vsi->vsw = pf->first_sw;
  1756. if (ice_vsi_get_qs(vsi)) {
  1757. dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
  1758. vsi->idx);
  1759. goto err_get_qs;
  1760. }
  1761. /* set RSS capabilities */
  1762. ice_vsi_set_rss_params(vsi);
  1763. /* create the VSI */
  1764. ret = ice_vsi_add(vsi);
  1765. if (ret)
  1766. goto err_vsi;
  1767. ctxt.vsi_num = vsi->vsi_num;
  1768. switch (vsi->type) {
  1769. case ICE_VSI_PF:
  1770. ret = ice_cfg_netdev(vsi);
  1771. if (ret)
  1772. goto err_cfg_netdev;
  1773. ret = register_netdev(vsi->netdev);
  1774. if (ret)
  1775. goto err_register_netdev;
  1776. netif_carrier_off(vsi->netdev);
  1777. /* make sure transmit queues start off as stopped */
  1778. netif_tx_stop_all_queues(vsi->netdev);
  1779. ret = ice_vsi_alloc_q_vectors(vsi);
  1780. if (ret)
  1781. goto err_msix;
  1782. ret = ice_vsi_setup_vector_base(vsi);
  1783. if (ret)
  1784. goto err_rings;
  1785. ret = ice_vsi_alloc_rings(vsi);
  1786. if (ret)
  1787. goto err_rings;
  1788. ice_vsi_map_rings_to_vectors(vsi);
  1789. /* Do not exit if configuring RSS had an issue, at least
  1790. * receive traffic on first queue. Hence no need to capture
  1791. * return value
  1792. */
  1793. if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
  1794. ice_vsi_cfg_rss(vsi);
  1795. break;
  1796. default:
  1797. /* if vsi type is not recognized, clean up the resources and
  1798. * exit
  1799. */
  1800. goto err_rings;
  1801. }
  1802. return vsi;
  1803. err_rings:
  1804. ice_vsi_free_q_vectors(vsi);
  1805. err_msix:
  1806. if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
  1807. unregister_netdev(vsi->netdev);
  1808. err_register_netdev:
  1809. if (vsi->netdev) {
  1810. free_netdev(vsi->netdev);
  1811. vsi->netdev = NULL;
  1812. }
  1813. err_cfg_netdev:
  1814. ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
  1815. if (ret)
  1816. dev_err(&vsi->back->pdev->dev,
  1817. "Free VSI AQ call failed, err %d\n", ret);
  1818. err_vsi:
  1819. ice_vsi_put_qs(vsi);
  1820. err_get_qs:
  1821. pf->q_left_tx += vsi->alloc_txq;
  1822. pf->q_left_rx += vsi->alloc_rxq;
  1823. ice_vsi_clear(vsi);
  1824. return NULL;
  1825. }
  1826. /**
  1827. * ice_vsi_add_vlan - Add vsi membership for given vlan
  1828. * @vsi: the vsi being configured
  1829. * @vid: vlan id to be added
  1830. */
  1831. static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
  1832. {
  1833. struct ice_fltr_list_entry *tmp;
  1834. struct ice_pf *pf = vsi->back;
  1835. LIST_HEAD(tmp_add_list);
  1836. enum ice_status status;
  1837. int err = 0;
  1838. tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
  1839. if (!tmp)
  1840. return -ENOMEM;
  1841. tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
  1842. tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1843. tmp->fltr_info.flag = ICE_FLTR_TX;
  1844. tmp->fltr_info.src = vsi->vsi_num;
  1845. tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
  1846. tmp->fltr_info.l_data.vlan.vlan_id = vid;
  1847. INIT_LIST_HEAD(&tmp->list_entry);
  1848. list_add(&tmp->list_entry, &tmp_add_list);
  1849. status = ice_add_vlan(&pf->hw, &tmp_add_list);
  1850. if (status) {
  1851. err = -ENODEV;
  1852. dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
  1853. vid, vsi->vsi_num);
  1854. }
  1855. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  1856. return err;
  1857. }
  1858. /**
  1859. * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
  1860. * @netdev: network interface to be adjusted
  1861. * @proto: unused protocol
  1862. * @vid: vlan id to be added
  1863. *
  1864. * net_device_ops implementation for adding vlan ids
  1865. */
  1866. static int ice_vlan_rx_add_vid(struct net_device *netdev,
  1867. __always_unused __be16 proto, u16 vid)
  1868. {
  1869. struct ice_netdev_priv *np = netdev_priv(netdev);
  1870. struct ice_vsi *vsi = np->vsi;
  1871. int ret = 0;
  1872. if (vid >= VLAN_N_VID) {
  1873. netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
  1874. vid, VLAN_N_VID);
  1875. return -EINVAL;
  1876. }
  1877. if (vsi->info.pvid)
  1878. return -EINVAL;
  1879. /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
  1880. * needed to continue allowing all untagged packets since VLAN prune
  1881. * list is applied to all packets by the switch
  1882. */
  1883. ret = ice_vsi_add_vlan(vsi, vid);
  1884. if (!ret)
  1885. set_bit(vid, vsi->active_vlans);
  1886. return ret;
  1887. }
  1888. /**
  1889. * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
  1890. * @vsi: the VSI being configured
  1891. * @vid: VLAN id to be removed
  1892. */
  1893. static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
  1894. {
  1895. struct ice_fltr_list_entry *list;
  1896. struct ice_pf *pf = vsi->back;
  1897. LIST_HEAD(tmp_add_list);
  1898. list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
  1899. if (!list)
  1900. return;
  1901. list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
  1902. list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
  1903. list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
  1904. list->fltr_info.l_data.vlan.vlan_id = vid;
  1905. list->fltr_info.flag = ICE_FLTR_TX;
  1906. list->fltr_info.src = vsi->vsi_num;
  1907. INIT_LIST_HEAD(&list->list_entry);
  1908. list_add(&list->list_entry, &tmp_add_list);
  1909. if (ice_remove_vlan(&pf->hw, &tmp_add_list))
  1910. dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
  1911. vid, vsi->vsi_num);
  1912. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  1913. }
  1914. /**
  1915. * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
  1916. * @netdev: network interface to be adjusted
  1917. * @proto: unused protocol
  1918. * @vid: vlan id to be removed
  1919. *
  1920. * net_device_ops implementation for removing vlan ids
  1921. */
  1922. static int ice_vlan_rx_kill_vid(struct net_device *netdev,
  1923. __always_unused __be16 proto, u16 vid)
  1924. {
  1925. struct ice_netdev_priv *np = netdev_priv(netdev);
  1926. struct ice_vsi *vsi = np->vsi;
  1927. if (vsi->info.pvid)
  1928. return -EINVAL;
  1929. /* return code is ignored as there is nothing a user
  1930. * can do about failure to remove and a log message was
  1931. * already printed from the other function
  1932. */
  1933. ice_vsi_kill_vlan(vsi, vid);
  1934. clear_bit(vid, vsi->active_vlans);
  1935. return 0;
  1936. }
  1937. /**
  1938. * ice_setup_pf_sw - Setup the HW switch on startup or after reset
  1939. * @pf: board private structure
  1940. *
  1941. * Returns 0 on success, negative value on failure
  1942. */
  1943. static int ice_setup_pf_sw(struct ice_pf *pf)
  1944. {
  1945. LIST_HEAD(tmp_add_list);
  1946. u8 broadcast[ETH_ALEN];
  1947. struct ice_vsi *vsi;
  1948. int status = 0;
  1949. vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
  1950. if (!vsi) {
  1951. status = -ENOMEM;
  1952. goto error_exit;
  1953. }
  1954. /* tmp_add_list contains a list of MAC addresses for which MAC
  1955. * filters need to be programmed. Add the VSI's unicast MAC to
  1956. * this list
  1957. */
  1958. status = ice_add_mac_to_list(vsi, &tmp_add_list,
  1959. vsi->port_info->mac.perm_addr);
  1960. if (status)
  1961. goto error_exit;
  1962. /* VSI needs to receive broadcast traffic, so add the broadcast
  1963. * MAC address to the list.
  1964. */
  1965. eth_broadcast_addr(broadcast);
  1966. status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
  1967. if (status)
  1968. goto error_exit;
  1969. /* program MAC filters for entries in tmp_add_list */
  1970. status = ice_add_mac(&pf->hw, &tmp_add_list);
  1971. if (status) {
  1972. dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
  1973. status = -ENOMEM;
  1974. goto error_exit;
  1975. }
  1976. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  1977. return status;
  1978. error_exit:
  1979. ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
  1980. if (vsi) {
  1981. ice_vsi_free_q_vectors(vsi);
  1982. if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
  1983. unregister_netdev(vsi->netdev);
  1984. if (vsi->netdev) {
  1985. free_netdev(vsi->netdev);
  1986. vsi->netdev = NULL;
  1987. }
  1988. ice_vsi_delete(vsi);
  1989. ice_vsi_put_qs(vsi);
  1990. pf->q_left_tx += vsi->alloc_txq;
  1991. pf->q_left_rx += vsi->alloc_rxq;
  1992. ice_vsi_clear(vsi);
  1993. }
  1994. return status;
  1995. }
  1996. /**
  1997. * ice_determine_q_usage - Calculate queue distribution
  1998. * @pf: board private structure
  1999. *
  2000. * Return -ENOMEM if we don't get enough queues for all ports
  2001. */
  2002. static void ice_determine_q_usage(struct ice_pf *pf)
  2003. {
  2004. u16 q_left_tx, q_left_rx;
  2005. q_left_tx = pf->hw.func_caps.common_cap.num_txq;
  2006. q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
  2007. /* initial support for only 1 tx queue */
  2008. pf->num_lan_tx = 1;
  2009. /* only 1 rx queue unless RSS is enabled */
  2010. if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
  2011. pf->num_lan_rx = 1;
  2012. else
  2013. pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
  2014. pf->q_left_tx = q_left_tx - pf->num_lan_tx;
  2015. pf->q_left_rx = q_left_rx - pf->num_lan_rx;
  2016. }
  2017. /**
  2018. * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
  2019. * @pf: board private structure to initialize
  2020. */
  2021. static void ice_deinit_pf(struct ice_pf *pf)
  2022. {
  2023. if (pf->serv_tmr.function)
  2024. del_timer_sync(&pf->serv_tmr);
  2025. if (pf->serv_task.func)
  2026. cancel_work_sync(&pf->serv_task);
  2027. mutex_destroy(&pf->sw_mutex);
  2028. mutex_destroy(&pf->avail_q_mutex);
  2029. }
  2030. /**
  2031. * ice_init_pf - Initialize general software structures (struct ice_pf)
  2032. * @pf: board private structure to initialize
  2033. */
  2034. static void ice_init_pf(struct ice_pf *pf)
  2035. {
  2036. bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
  2037. set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
  2038. mutex_init(&pf->sw_mutex);
  2039. mutex_init(&pf->avail_q_mutex);
  2040. /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
  2041. mutex_lock(&pf->avail_q_mutex);
  2042. bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
  2043. bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
  2044. mutex_unlock(&pf->avail_q_mutex);
  2045. if (pf->hw.func_caps.common_cap.rss_table_size)
  2046. set_bit(ICE_FLAG_RSS_ENA, pf->flags);
  2047. /* setup service timer and periodic service task */
  2048. timer_setup(&pf->serv_tmr, ice_service_timer, 0);
  2049. pf->serv_tmr_period = HZ;
  2050. INIT_WORK(&pf->serv_task, ice_service_task);
  2051. clear_bit(__ICE_SERVICE_SCHED, pf->state);
  2052. }
  2053. /**
  2054. * ice_ena_msix_range - Request a range of MSIX vectors from the OS
  2055. * @pf: board private structure
  2056. *
  2057. * compute the number of MSIX vectors required (v_budget) and request from
  2058. * the OS. Return the number of vectors reserved or negative on failure
  2059. */
  2060. static int ice_ena_msix_range(struct ice_pf *pf)
  2061. {
  2062. int v_left, v_actual, v_budget = 0;
  2063. int needed, err, i;
  2064. v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
  2065. /* reserve one vector for miscellaneous handler */
  2066. needed = 1;
  2067. v_budget += needed;
  2068. v_left -= needed;
  2069. /* reserve vectors for LAN traffic */
  2070. pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
  2071. v_budget += pf->num_lan_msix;
  2072. pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
  2073. sizeof(struct msix_entry), GFP_KERNEL);
  2074. if (!pf->msix_entries) {
  2075. err = -ENOMEM;
  2076. goto exit_err;
  2077. }
  2078. for (i = 0; i < v_budget; i++)
  2079. pf->msix_entries[i].entry = i;
  2080. /* actually reserve the vectors */
  2081. v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
  2082. ICE_MIN_MSIX, v_budget);
  2083. if (v_actual < 0) {
  2084. dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
  2085. err = v_actual;
  2086. goto msix_err;
  2087. }
  2088. if (v_actual < v_budget) {
  2089. dev_warn(&pf->pdev->dev,
  2090. "not enough vectors. requested = %d, obtained = %d\n",
  2091. v_budget, v_actual);
  2092. if (v_actual >= (pf->num_lan_msix + 1)) {
  2093. pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
  2094. } else if (v_actual >= 2) {
  2095. pf->num_lan_msix = 1;
  2096. pf->num_avail_msix = v_actual - 2;
  2097. } else {
  2098. pci_disable_msix(pf->pdev);
  2099. err = -ERANGE;
  2100. goto msix_err;
  2101. }
  2102. }
  2103. return v_actual;
  2104. msix_err:
  2105. devm_kfree(&pf->pdev->dev, pf->msix_entries);
  2106. goto exit_err;
  2107. exit_err:
  2108. pf->num_lan_msix = 0;
  2109. clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
  2110. return err;
  2111. }
  2112. /**
  2113. * ice_dis_msix - Disable MSI-X interrupt setup in OS
  2114. * @pf: board private structure
  2115. */
  2116. static void ice_dis_msix(struct ice_pf *pf)
  2117. {
  2118. pci_disable_msix(pf->pdev);
  2119. devm_kfree(&pf->pdev->dev, pf->msix_entries);
  2120. pf->msix_entries = NULL;
  2121. clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
  2122. }
  2123. /**
  2124. * ice_init_interrupt_scheme - Determine proper interrupt scheme
  2125. * @pf: board private structure to initialize
  2126. */
  2127. static int ice_init_interrupt_scheme(struct ice_pf *pf)
  2128. {
  2129. int vectors = 0;
  2130. ssize_t size;
  2131. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  2132. vectors = ice_ena_msix_range(pf);
  2133. else
  2134. return -ENODEV;
  2135. if (vectors < 0)
  2136. return vectors;
  2137. /* set up vector assignment tracking */
  2138. size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
  2139. pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
  2140. if (!pf->irq_tracker) {
  2141. ice_dis_msix(pf);
  2142. return -ENOMEM;
  2143. }
  2144. pf->irq_tracker->num_entries = vectors;
  2145. return 0;
  2146. }
  2147. /**
  2148. * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
  2149. * @pf: board private structure
  2150. */
  2151. static void ice_clear_interrupt_scheme(struct ice_pf *pf)
  2152. {
  2153. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  2154. ice_dis_msix(pf);
  2155. devm_kfree(&pf->pdev->dev, pf->irq_tracker);
  2156. pf->irq_tracker = NULL;
  2157. }
  2158. /**
  2159. * ice_probe - Device initialization routine
  2160. * @pdev: PCI device information struct
  2161. * @ent: entry in ice_pci_tbl
  2162. *
  2163. * Returns 0 on success, negative on failure
  2164. */
  2165. static int ice_probe(struct pci_dev *pdev,
  2166. const struct pci_device_id __always_unused *ent)
  2167. {
  2168. struct ice_pf *pf;
  2169. struct ice_hw *hw;
  2170. int err;
  2171. /* this driver uses devres, see Documentation/driver-model/devres.txt */
  2172. err = pcim_enable_device(pdev);
  2173. if (err)
  2174. return err;
  2175. err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
  2176. if (err) {
  2177. dev_err(&pdev->dev, "I/O map error %d\n", err);
  2178. return err;
  2179. }
  2180. pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
  2181. if (!pf)
  2182. return -ENOMEM;
  2183. /* set up for high or low dma */
  2184. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2185. if (err)
  2186. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2187. if (err) {
  2188. dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
  2189. return err;
  2190. }
  2191. pci_enable_pcie_error_reporting(pdev);
  2192. pci_set_master(pdev);
  2193. pf->pdev = pdev;
  2194. pci_set_drvdata(pdev, pf);
  2195. set_bit(__ICE_DOWN, pf->state);
  2196. hw = &pf->hw;
  2197. hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
  2198. hw->back = pf;
  2199. hw->vendor_id = pdev->vendor;
  2200. hw->device_id = pdev->device;
  2201. pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
  2202. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2203. hw->subsystem_device_id = pdev->subsystem_device;
  2204. hw->bus.device = PCI_SLOT(pdev->devfn);
  2205. hw->bus.func = PCI_FUNC(pdev->devfn);
  2206. ice_set_ctrlq_len(hw);
  2207. pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
  2208. #ifndef CONFIG_DYNAMIC_DEBUG
  2209. if (debug < -1)
  2210. hw->debug_mask = debug;
  2211. #endif
  2212. err = ice_init_hw(hw);
  2213. if (err) {
  2214. dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
  2215. err = -EIO;
  2216. goto err_exit_unroll;
  2217. }
  2218. dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
  2219. hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
  2220. hw->api_maj_ver, hw->api_min_ver);
  2221. ice_init_pf(pf);
  2222. ice_determine_q_usage(pf);
  2223. pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
  2224. hw->func_caps.guaranteed_num_vsi);
  2225. if (!pf->num_alloc_vsi) {
  2226. err = -EIO;
  2227. goto err_init_pf_unroll;
  2228. }
  2229. pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
  2230. sizeof(struct ice_vsi *), GFP_KERNEL);
  2231. if (!pf->vsi) {
  2232. err = -ENOMEM;
  2233. goto err_init_pf_unroll;
  2234. }
  2235. err = ice_init_interrupt_scheme(pf);
  2236. if (err) {
  2237. dev_err(&pdev->dev,
  2238. "ice_init_interrupt_scheme failed: %d\n", err);
  2239. err = -EIO;
  2240. goto err_init_interrupt_unroll;
  2241. }
  2242. /* In case of MSIX we are going to setup the misc vector right here
  2243. * to handle admin queue events etc. In case of legacy and MSI
  2244. * the misc functionality and queue processing is combined in
  2245. * the same vector and that gets setup at open.
  2246. */
  2247. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
  2248. err = ice_req_irq_msix_misc(pf);
  2249. if (err) {
  2250. dev_err(&pdev->dev,
  2251. "setup of misc vector failed: %d\n", err);
  2252. goto err_init_interrupt_unroll;
  2253. }
  2254. }
  2255. /* create switch struct for the switch element created by FW on boot */
  2256. pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
  2257. GFP_KERNEL);
  2258. if (!pf->first_sw) {
  2259. err = -ENOMEM;
  2260. goto err_msix_misc_unroll;
  2261. }
  2262. pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
  2263. pf->first_sw->pf = pf;
  2264. /* record the sw_id available for later use */
  2265. pf->first_sw->sw_id = hw->port_info->sw_id;
  2266. err = ice_setup_pf_sw(pf);
  2267. if (err) {
  2268. dev_err(&pdev->dev,
  2269. "probe failed due to setup pf switch:%d\n", err);
  2270. goto err_alloc_sw_unroll;
  2271. }
  2272. /* Driver is mostly up */
  2273. clear_bit(__ICE_DOWN, pf->state);
  2274. /* since everything is good, start the service timer */
  2275. mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
  2276. return 0;
  2277. err_alloc_sw_unroll:
  2278. set_bit(__ICE_DOWN, pf->state);
  2279. devm_kfree(&pf->pdev->dev, pf->first_sw);
  2280. err_msix_misc_unroll:
  2281. ice_free_irq_msix_misc(pf);
  2282. err_init_interrupt_unroll:
  2283. ice_clear_interrupt_scheme(pf);
  2284. devm_kfree(&pdev->dev, pf->vsi);
  2285. err_init_pf_unroll:
  2286. ice_deinit_pf(pf);
  2287. ice_deinit_hw(hw);
  2288. err_exit_unroll:
  2289. pci_disable_pcie_error_reporting(pdev);
  2290. return err;
  2291. }
  2292. /**
  2293. * ice_remove - Device removal routine
  2294. * @pdev: PCI device information struct
  2295. */
  2296. static void ice_remove(struct pci_dev *pdev)
  2297. {
  2298. struct ice_pf *pf = pci_get_drvdata(pdev);
  2299. int i = 0;
  2300. int err;
  2301. if (!pf)
  2302. return;
  2303. set_bit(__ICE_DOWN, pf->state);
  2304. for (i = 0; i < pf->num_alloc_vsi; i++) {
  2305. if (!pf->vsi[i])
  2306. continue;
  2307. err = ice_vsi_release(pf->vsi[i]);
  2308. if (err)
  2309. dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
  2310. i, err);
  2311. }
  2312. ice_free_irq_msix_misc(pf);
  2313. ice_clear_interrupt_scheme(pf);
  2314. ice_deinit_pf(pf);
  2315. ice_deinit_hw(&pf->hw);
  2316. pci_disable_pcie_error_reporting(pdev);
  2317. }
  2318. /* ice_pci_tbl - PCI Device ID Table
  2319. *
  2320. * Wildcard entries (PCI_ANY_ID) should come last
  2321. * Last entry must be all 0s
  2322. *
  2323. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  2324. * Class, Class Mask, private data (not used) }
  2325. */
  2326. static const struct pci_device_id ice_pci_tbl[] = {
  2327. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
  2328. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
  2329. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
  2330. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
  2331. { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
  2332. /* required last entry */
  2333. { 0, }
  2334. };
  2335. MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
  2336. static struct pci_driver ice_driver = {
  2337. .name = KBUILD_MODNAME,
  2338. .id_table = ice_pci_tbl,
  2339. .probe = ice_probe,
  2340. .remove = ice_remove,
  2341. };
  2342. /**
  2343. * ice_module_init - Driver registration routine
  2344. *
  2345. * ice_module_init is the first routine called when the driver is
  2346. * loaded. All it does is register with the PCI subsystem.
  2347. */
  2348. static int __init ice_module_init(void)
  2349. {
  2350. int status;
  2351. pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
  2352. pr_info("%s\n", ice_copyright);
  2353. ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
  2354. if (!ice_wq) {
  2355. pr_err("Failed to create workqueue\n");
  2356. return -ENOMEM;
  2357. }
  2358. status = pci_register_driver(&ice_driver);
  2359. if (status) {
  2360. pr_err("failed to register pci driver, err %d\n", status);
  2361. destroy_workqueue(ice_wq);
  2362. }
  2363. return status;
  2364. }
  2365. module_init(ice_module_init);
  2366. /**
  2367. * ice_module_exit - Driver exit cleanup routine
  2368. *
  2369. * ice_module_exit is called just before the driver is removed
  2370. * from memory.
  2371. */
  2372. static void __exit ice_module_exit(void)
  2373. {
  2374. pci_unregister_driver(&ice_driver);
  2375. destroy_workqueue(ice_wq);
  2376. pr_info("module unloaded\n");
  2377. }
  2378. module_exit(ice_module_exit);
  2379. /**
  2380. * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
  2381. * @vsi: the vsi being changed
  2382. */
  2383. static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
  2384. {
  2385. struct device *dev = &vsi->back->pdev->dev;
  2386. struct ice_hw *hw = &vsi->back->hw;
  2387. struct ice_vsi_ctx ctxt = { 0 };
  2388. enum ice_status status;
  2389. /* Here we are configuring the VSI to let the driver add VLAN tags by
  2390. * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
  2391. * tag insertion happens in the Tx hot path, in ice_tx_map.
  2392. */
  2393. ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
  2394. ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
  2395. ctxt.vsi_num = vsi->vsi_num;
  2396. status = ice_aq_update_vsi(hw, &ctxt, NULL);
  2397. if (status) {
  2398. dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
  2399. status, hw->adminq.sq_last_status);
  2400. return -EIO;
  2401. }
  2402. vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
  2403. return 0;
  2404. }
  2405. /**
  2406. * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
  2407. * @vsi: the vsi being changed
  2408. * @ena: boolean value indicating if this is a enable or disable request
  2409. */
  2410. static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
  2411. {
  2412. struct device *dev = &vsi->back->pdev->dev;
  2413. struct ice_hw *hw = &vsi->back->hw;
  2414. struct ice_vsi_ctx ctxt = { 0 };
  2415. enum ice_status status;
  2416. /* Here we are configuring what the VSI should do with the VLAN tag in
  2417. * the Rx packet. We can either leave the tag in the packet or put it in
  2418. * the Rx descriptor.
  2419. */
  2420. if (ena) {
  2421. /* Strip VLAN tag from Rx packet and put it in the desc */
  2422. ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
  2423. } else {
  2424. /* Disable stripping. Leave tag in packet */
  2425. ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
  2426. }
  2427. ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
  2428. ctxt.vsi_num = vsi->vsi_num;
  2429. status = ice_aq_update_vsi(hw, &ctxt, NULL);
  2430. if (status) {
  2431. dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
  2432. ena, status, hw->adminq.sq_last_status);
  2433. return -EIO;
  2434. }
  2435. vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
  2436. return 0;
  2437. }
  2438. /**
  2439. * ice_set_features - set the netdev feature flags
  2440. * @netdev: ptr to the netdev being adjusted
  2441. * @features: the feature set that the stack is suggesting
  2442. */
  2443. static int ice_set_features(struct net_device *netdev,
  2444. netdev_features_t features)
  2445. {
  2446. struct ice_netdev_priv *np = netdev_priv(netdev);
  2447. struct ice_vsi *vsi = np->vsi;
  2448. int ret = 0;
  2449. if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
  2450. !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
  2451. ret = ice_vsi_manage_vlan_stripping(vsi, true);
  2452. else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
  2453. (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
  2454. ret = ice_vsi_manage_vlan_stripping(vsi, false);
  2455. else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
  2456. !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
  2457. ret = ice_vsi_manage_vlan_insertion(vsi);
  2458. else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
  2459. (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
  2460. ret = ice_vsi_manage_vlan_insertion(vsi);
  2461. return ret;
  2462. }
  2463. /**
  2464. * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
  2465. * @vsi: VSI to setup vlan properties for
  2466. */
  2467. static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
  2468. {
  2469. int ret = 0;
  2470. if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  2471. ret = ice_vsi_manage_vlan_stripping(vsi, true);
  2472. if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
  2473. ret = ice_vsi_manage_vlan_insertion(vsi);
  2474. return ret;
  2475. }
  2476. /**
  2477. * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
  2478. * @vsi: the VSI being brought back up
  2479. */
  2480. static int ice_restore_vlan(struct ice_vsi *vsi)
  2481. {
  2482. int err;
  2483. u16 vid;
  2484. if (!vsi->netdev)
  2485. return -EINVAL;
  2486. err = ice_vsi_vlan_setup(vsi);
  2487. if (err)
  2488. return err;
  2489. for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
  2490. err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
  2491. if (err)
  2492. break;
  2493. }
  2494. return err;
  2495. }
  2496. /**
  2497. * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
  2498. * @ring: The Tx ring to configure
  2499. * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
  2500. * @pf_q: queue index in the PF space
  2501. *
  2502. * Configure the Tx descriptor ring in TLAN context.
  2503. */
  2504. static void
  2505. ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
  2506. {
  2507. struct ice_vsi *vsi = ring->vsi;
  2508. struct ice_hw *hw = &vsi->back->hw;
  2509. tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
  2510. tlan_ctx->port_num = vsi->port_info->lport;
  2511. /* Transmit Queue Length */
  2512. tlan_ctx->qlen = ring->count;
  2513. /* PF number */
  2514. tlan_ctx->pf_num = hw->pf_id;
  2515. /* queue belongs to a specific VSI type
  2516. * VF / VM index should be programmed per vmvf_type setting:
  2517. * for vmvf_type = VF, it is VF number between 0-256
  2518. * for vmvf_type = VM, it is VM number between 0-767
  2519. * for PF or EMP this field should be set to zero
  2520. */
  2521. switch (vsi->type) {
  2522. case ICE_VSI_PF:
  2523. tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
  2524. break;
  2525. default:
  2526. return;
  2527. }
  2528. /* make sure the context is associated with the right VSI */
  2529. tlan_ctx->src_vsi = vsi->vsi_num;
  2530. tlan_ctx->tso_ena = ICE_TX_LEGACY;
  2531. tlan_ctx->tso_qnum = pf_q;
  2532. /* Legacy or Advanced Host Interface:
  2533. * 0: Advanced Host Interface
  2534. * 1: Legacy Host Interface
  2535. */
  2536. tlan_ctx->legacy_int = ICE_TX_LEGACY;
  2537. }
  2538. /**
  2539. * ice_vsi_cfg_txqs - Configure the VSI for Tx
  2540. * @vsi: the VSI being configured
  2541. *
  2542. * Return 0 on success and a negative value on error
  2543. * Configure the Tx VSI for operation.
  2544. */
  2545. static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
  2546. {
  2547. struct ice_aqc_add_tx_qgrp *qg_buf;
  2548. struct ice_aqc_add_txqs_perq *txq;
  2549. struct ice_pf *pf = vsi->back;
  2550. enum ice_status status;
  2551. u16 buf_len, i, pf_q;
  2552. int err = 0, tc = 0;
  2553. u8 num_q_grps;
  2554. buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
  2555. qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
  2556. if (!qg_buf)
  2557. return -ENOMEM;
  2558. if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
  2559. err = -EINVAL;
  2560. goto err_cfg_txqs;
  2561. }
  2562. qg_buf->num_txqs = 1;
  2563. num_q_grps = 1;
  2564. /* set up and configure the tx queues */
  2565. ice_for_each_txq(vsi, i) {
  2566. struct ice_tlan_ctx tlan_ctx = { 0 };
  2567. pf_q = vsi->txq_map[i];
  2568. ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
  2569. /* copy context contents into the qg_buf */
  2570. qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
  2571. ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
  2572. ice_tlan_ctx_info);
  2573. /* init queue specific tail reg. It is referred as transmit
  2574. * comm scheduler queue doorbell.
  2575. */
  2576. vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
  2577. status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
  2578. num_q_grps, qg_buf, buf_len, NULL);
  2579. if (status) {
  2580. dev_err(&vsi->back->pdev->dev,
  2581. "Failed to set LAN Tx queue context, error: %d\n",
  2582. status);
  2583. err = -ENODEV;
  2584. goto err_cfg_txqs;
  2585. }
  2586. /* Add Tx Queue TEID into the VSI tx ring from the response
  2587. * This will complete configuring and enabling the queue.
  2588. */
  2589. txq = &qg_buf->txqs[0];
  2590. if (pf_q == le16_to_cpu(txq->txq_id))
  2591. vsi->tx_rings[i]->txq_teid =
  2592. le32_to_cpu(txq->q_teid);
  2593. }
  2594. err_cfg_txqs:
  2595. devm_kfree(&pf->pdev->dev, qg_buf);
  2596. return err;
  2597. }
  2598. /**
  2599. * ice_setup_rx_ctx - Configure a receive ring context
  2600. * @ring: The Rx ring to configure
  2601. *
  2602. * Configure the Rx descriptor ring in RLAN context.
  2603. */
  2604. static int ice_setup_rx_ctx(struct ice_ring *ring)
  2605. {
  2606. struct ice_vsi *vsi = ring->vsi;
  2607. struct ice_hw *hw = &vsi->back->hw;
  2608. u32 rxdid = ICE_RXDID_FLEX_NIC;
  2609. struct ice_rlan_ctx rlan_ctx;
  2610. u32 regval;
  2611. u16 pf_q;
  2612. int err;
  2613. /* what is RX queue number in global space of 2K rx queues */
  2614. pf_q = vsi->rxq_map[ring->q_index];
  2615. /* clear the context structure first */
  2616. memset(&rlan_ctx, 0, sizeof(rlan_ctx));
  2617. rlan_ctx.base = ring->dma >> 7;
  2618. rlan_ctx.qlen = ring->count;
  2619. /* Receive Packet Data Buffer Size.
  2620. * The Packet Data Buffer Size is defined in 128 byte units.
  2621. */
  2622. rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
  2623. /* use 32 byte descriptors */
  2624. rlan_ctx.dsize = 1;
  2625. /* Strip the Ethernet CRC bytes before the packet is posted to host
  2626. * memory.
  2627. */
  2628. rlan_ctx.crcstrip = 1;
  2629. /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
  2630. rlan_ctx.l2tsel = 1;
  2631. rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
  2632. rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
  2633. rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
  2634. /* This controls whether VLAN is stripped from inner headers
  2635. * The VLAN in the inner L2 header is stripped to the receive
  2636. * descriptor if enabled by this flag.
  2637. */
  2638. rlan_ctx.showiv = 0;
  2639. /* Max packet size for this queue - must not be set to a larger value
  2640. * than 5 x DBUF
  2641. */
  2642. rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
  2643. ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
  2644. /* Rx queue threshold in units of 64 */
  2645. rlan_ctx.lrxqthresh = 1;
  2646. /* Enable Flexible Descriptors in the queue context which
  2647. * allows this driver to select a specific receive descriptor format
  2648. */
  2649. regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
  2650. regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
  2651. QRXFLXP_CNTXT_RXDID_IDX_M;
  2652. /* increasing context priority to pick up profile id;
  2653. * default is 0x01; setting to 0x03 to ensure profile
  2654. * is programming if prev context is of same priority
  2655. */
  2656. regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
  2657. QRXFLXP_CNTXT_RXDID_PRIO_M;
  2658. wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
  2659. /* Absolute queue number out of 2K needs to be passed */
  2660. err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
  2661. if (err) {
  2662. dev_err(&vsi->back->pdev->dev,
  2663. "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
  2664. pf_q, err);
  2665. return -EIO;
  2666. }
  2667. /* init queue specific tail register */
  2668. ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
  2669. writel(0, ring->tail);
  2670. ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
  2671. return 0;
  2672. }
  2673. /**
  2674. * ice_vsi_cfg_rxqs - Configure the VSI for Rx
  2675. * @vsi: the VSI being configured
  2676. *
  2677. * Return 0 on success and a negative value on error
  2678. * Configure the Rx VSI for operation.
  2679. */
  2680. static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
  2681. {
  2682. int err = 0;
  2683. u16 i;
  2684. if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
  2685. vsi->max_frame = vsi->netdev->mtu +
  2686. ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  2687. else
  2688. vsi->max_frame = ICE_RXBUF_2048;
  2689. vsi->rx_buf_len = ICE_RXBUF_2048;
  2690. /* set up individual rings */
  2691. for (i = 0; i < vsi->num_rxq && !err; i++)
  2692. err = ice_setup_rx_ctx(vsi->rx_rings[i]);
  2693. if (err) {
  2694. dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
  2695. return -EIO;
  2696. }
  2697. return err;
  2698. }
  2699. /**
  2700. * ice_vsi_cfg - Setup the VSI
  2701. * @vsi: the VSI being configured
  2702. *
  2703. * Return 0 on success and negative value on error
  2704. */
  2705. static int ice_vsi_cfg(struct ice_vsi *vsi)
  2706. {
  2707. int err;
  2708. err = ice_restore_vlan(vsi);
  2709. if (err)
  2710. return err;
  2711. err = ice_vsi_cfg_txqs(vsi);
  2712. if (!err)
  2713. err = ice_vsi_cfg_rxqs(vsi);
  2714. return err;
  2715. }
  2716. /**
  2717. * ice_vsi_stop_tx_rings - Disable Tx rings
  2718. * @vsi: the VSI being configured
  2719. */
  2720. static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
  2721. {
  2722. struct ice_pf *pf = vsi->back;
  2723. struct ice_hw *hw = &pf->hw;
  2724. enum ice_status status;
  2725. u32 *q_teids, val;
  2726. u16 *q_ids, i;
  2727. int err = 0;
  2728. if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
  2729. return -EINVAL;
  2730. q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
  2731. GFP_KERNEL);
  2732. if (!q_teids)
  2733. return -ENOMEM;
  2734. q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
  2735. GFP_KERNEL);
  2736. if (!q_ids) {
  2737. err = -ENOMEM;
  2738. goto err_alloc_q_ids;
  2739. }
  2740. /* set up the tx queue list to be disabled */
  2741. ice_for_each_txq(vsi, i) {
  2742. u16 v_idx;
  2743. if (!vsi->tx_rings || !vsi->tx_rings[i]) {
  2744. err = -EINVAL;
  2745. goto err_out;
  2746. }
  2747. q_ids[i] = vsi->txq_map[i];
  2748. q_teids[i] = vsi->tx_rings[i]->txq_teid;
  2749. /* clear cause_ena bit for disabled queues */
  2750. val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
  2751. val &= ~QINT_TQCTL_CAUSE_ENA_M;
  2752. wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
  2753. /* software is expected to wait for 100 ns */
  2754. ndelay(100);
  2755. /* trigger a software interrupt for the vector associated to
  2756. * the queue to schedule napi handler
  2757. */
  2758. v_idx = vsi->tx_rings[i]->q_vector->v_idx;
  2759. wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
  2760. GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
  2761. }
  2762. status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
  2763. NULL);
  2764. if (status) {
  2765. dev_err(&pf->pdev->dev,
  2766. "Failed to disable LAN Tx queues, error: %d\n",
  2767. status);
  2768. err = -ENODEV;
  2769. }
  2770. err_out:
  2771. devm_kfree(&pf->pdev->dev, q_ids);
  2772. err_alloc_q_ids:
  2773. devm_kfree(&pf->pdev->dev, q_teids);
  2774. return err;
  2775. }
  2776. /**
  2777. * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
  2778. * @pf: the PF being configured
  2779. * @pf_q: the PF queue
  2780. * @ena: enable or disable state of the queue
  2781. *
  2782. * This routine will wait for the given Rx queue of the PF to reach the
  2783. * enabled or disabled state.
  2784. * Returns -ETIMEDOUT in case of failing to reach the requested state after
  2785. * multiple retries; else will return 0 in case of success.
  2786. */
  2787. static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
  2788. {
  2789. int i;
  2790. for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
  2791. u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
  2792. if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
  2793. break;
  2794. usleep_range(10, 20);
  2795. }
  2796. if (i >= ICE_Q_WAIT_RETRY_LIMIT)
  2797. return -ETIMEDOUT;
  2798. return 0;
  2799. }
  2800. /**
  2801. * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
  2802. * @vsi: the VSI being configured
  2803. * @ena: start or stop the rx rings
  2804. */
  2805. static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
  2806. {
  2807. struct ice_pf *pf = vsi->back;
  2808. struct ice_hw *hw = &pf->hw;
  2809. int i, j, ret = 0;
  2810. for (i = 0; i < vsi->num_rxq; i++) {
  2811. int pf_q = vsi->rxq_map[i];
  2812. u32 rx_reg;
  2813. for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
  2814. rx_reg = rd32(hw, QRX_CTRL(pf_q));
  2815. if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
  2816. ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
  2817. break;
  2818. usleep_range(1000, 2000);
  2819. }
  2820. /* Skip if the queue is already in the requested state */
  2821. if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
  2822. continue;
  2823. /* turn on/off the queue */
  2824. if (ena)
  2825. rx_reg |= QRX_CTRL_QENA_REQ_M;
  2826. else
  2827. rx_reg &= ~QRX_CTRL_QENA_REQ_M;
  2828. wr32(hw, QRX_CTRL(pf_q), rx_reg);
  2829. /* wait for the change to finish */
  2830. ret = ice_pf_rxq_wait(pf, pf_q, ena);
  2831. if (ret) {
  2832. dev_err(&pf->pdev->dev,
  2833. "VSI idx %d Rx ring %d %sable timeout\n",
  2834. vsi->idx, pf_q, (ena ? "en" : "dis"));
  2835. break;
  2836. }
  2837. }
  2838. return ret;
  2839. }
  2840. /**
  2841. * ice_vsi_start_rx_rings - start VSI's rx rings
  2842. * @vsi: the VSI whose rings are to be started
  2843. *
  2844. * Returns 0 on success and a negative value on error
  2845. */
  2846. static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
  2847. {
  2848. return ice_vsi_ctrl_rx_rings(vsi, true);
  2849. }
  2850. /**
  2851. * ice_vsi_stop_rx_rings - stop VSI's rx rings
  2852. * @vsi: the VSI
  2853. *
  2854. * Returns 0 on success and a negative value on error
  2855. */
  2856. static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
  2857. {
  2858. return ice_vsi_ctrl_rx_rings(vsi, false);
  2859. }
  2860. /**
  2861. * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
  2862. * @vsi: the VSI
  2863. * Returns 0 on success and a negative value on error
  2864. */
  2865. static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
  2866. {
  2867. int err_tx, err_rx;
  2868. err_tx = ice_vsi_stop_tx_rings(vsi);
  2869. if (err_tx)
  2870. dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
  2871. err_rx = ice_vsi_stop_rx_rings(vsi);
  2872. if (err_rx)
  2873. dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
  2874. if (err_tx || err_rx)
  2875. return -EIO;
  2876. return 0;
  2877. }
  2878. /**
  2879. * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
  2880. * @vsi: the VSI being configured
  2881. */
  2882. static void ice_napi_enable_all(struct ice_vsi *vsi)
  2883. {
  2884. int q_idx;
  2885. if (!vsi->netdev)
  2886. return;
  2887. for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
  2888. napi_enable(&vsi->q_vectors[q_idx]->napi);
  2889. }
  2890. /**
  2891. * ice_up_complete - Finish the last steps of bringing up a connection
  2892. * @vsi: The VSI being configured
  2893. *
  2894. * Return 0 on success and negative value on error
  2895. */
  2896. static int ice_up_complete(struct ice_vsi *vsi)
  2897. {
  2898. struct ice_pf *pf = vsi->back;
  2899. int err;
  2900. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  2901. ice_vsi_cfg_msix(vsi);
  2902. else
  2903. return -ENOTSUPP;
  2904. /* Enable only Rx rings, Tx rings were enabled by the FW when the
  2905. * Tx queue group list was configured and the context bits were
  2906. * programmed using ice_vsi_cfg_txqs
  2907. */
  2908. err = ice_vsi_start_rx_rings(vsi);
  2909. if (err)
  2910. return err;
  2911. clear_bit(__ICE_DOWN, vsi->state);
  2912. ice_napi_enable_all(vsi);
  2913. ice_vsi_ena_irq(vsi);
  2914. if (vsi->port_info &&
  2915. (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
  2916. vsi->netdev) {
  2917. ice_print_link_msg(vsi, true);
  2918. netif_tx_start_all_queues(vsi->netdev);
  2919. netif_carrier_on(vsi->netdev);
  2920. }
  2921. ice_service_task_schedule(pf);
  2922. return err;
  2923. }
  2924. /**
  2925. * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
  2926. * @vsi: VSI having NAPI disabled
  2927. */
  2928. static void ice_napi_disable_all(struct ice_vsi *vsi)
  2929. {
  2930. int q_idx;
  2931. if (!vsi->netdev)
  2932. return;
  2933. for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
  2934. napi_disable(&vsi->q_vectors[q_idx]->napi);
  2935. }
  2936. /**
  2937. * ice_down - Shutdown the connection
  2938. * @vsi: The VSI being stopped
  2939. */
  2940. static int ice_down(struct ice_vsi *vsi)
  2941. {
  2942. int i, err;
  2943. /* Caller of this function is expected to set the
  2944. * vsi->state __ICE_DOWN bit
  2945. */
  2946. if (vsi->netdev) {
  2947. netif_carrier_off(vsi->netdev);
  2948. netif_tx_disable(vsi->netdev);
  2949. }
  2950. ice_vsi_dis_irq(vsi);
  2951. err = ice_vsi_stop_tx_rx_rings(vsi);
  2952. ice_napi_disable_all(vsi);
  2953. ice_for_each_txq(vsi, i)
  2954. ice_clean_tx_ring(vsi->tx_rings[i]);
  2955. ice_for_each_rxq(vsi, i)
  2956. ice_clean_rx_ring(vsi->rx_rings[i]);
  2957. if (err)
  2958. netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
  2959. vsi->vsi_num, vsi->vsw->sw_id);
  2960. return err;
  2961. }
  2962. /**
  2963. * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
  2964. * @vsi: VSI having resources allocated
  2965. *
  2966. * Return 0 on success, negative on failure
  2967. */
  2968. static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
  2969. {
  2970. int i, err;
  2971. if (!vsi->num_txq) {
  2972. dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
  2973. vsi->vsi_num);
  2974. return -EINVAL;
  2975. }
  2976. ice_for_each_txq(vsi, i) {
  2977. err = ice_setup_tx_ring(vsi->tx_rings[i]);
  2978. if (err)
  2979. break;
  2980. }
  2981. return err;
  2982. }
  2983. /**
  2984. * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
  2985. * @vsi: VSI having resources allocated
  2986. *
  2987. * Return 0 on success, negative on failure
  2988. */
  2989. static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
  2990. {
  2991. int i, err;
  2992. if (!vsi->num_rxq) {
  2993. dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
  2994. vsi->vsi_num);
  2995. return -EINVAL;
  2996. }
  2997. ice_for_each_rxq(vsi, i) {
  2998. err = ice_setup_rx_ring(vsi->rx_rings[i]);
  2999. if (err)
  3000. break;
  3001. }
  3002. return err;
  3003. }
  3004. /**
  3005. * ice_vsi_req_irq - Request IRQ from the OS
  3006. * @vsi: The VSI IRQ is being requested for
  3007. * @basename: name for the vector
  3008. *
  3009. * Return 0 on success and a negative value on error
  3010. */
  3011. static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
  3012. {
  3013. struct ice_pf *pf = vsi->back;
  3014. int err = -EINVAL;
  3015. if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
  3016. err = ice_vsi_req_irq_msix(vsi, basename);
  3017. return err;
  3018. }
  3019. /**
  3020. * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
  3021. * @vsi: the VSI having resources freed
  3022. */
  3023. static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
  3024. {
  3025. int i;
  3026. if (!vsi->tx_rings)
  3027. return;
  3028. ice_for_each_txq(vsi, i)
  3029. if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
  3030. ice_free_tx_ring(vsi->tx_rings[i]);
  3031. }
  3032. /**
  3033. * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
  3034. * @vsi: the VSI having resources freed
  3035. */
  3036. static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
  3037. {
  3038. int i;
  3039. if (!vsi->rx_rings)
  3040. return;
  3041. ice_for_each_rxq(vsi, i)
  3042. if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
  3043. ice_free_rx_ring(vsi->rx_rings[i]);
  3044. }
  3045. /**
  3046. * ice_vsi_open - Called when a network interface is made active
  3047. * @vsi: the VSI to open
  3048. *
  3049. * Initialization of the VSI
  3050. *
  3051. * Returns 0 on success, negative value on error
  3052. */
  3053. static int ice_vsi_open(struct ice_vsi *vsi)
  3054. {
  3055. char int_name[ICE_INT_NAME_STR_LEN];
  3056. struct ice_pf *pf = vsi->back;
  3057. int err;
  3058. /* allocate descriptors */
  3059. err = ice_vsi_setup_tx_rings(vsi);
  3060. if (err)
  3061. goto err_setup_tx;
  3062. err = ice_vsi_setup_rx_rings(vsi);
  3063. if (err)
  3064. goto err_setup_rx;
  3065. err = ice_vsi_cfg(vsi);
  3066. if (err)
  3067. goto err_setup_rx;
  3068. snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
  3069. dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
  3070. err = ice_vsi_req_irq(vsi, int_name);
  3071. if (err)
  3072. goto err_setup_rx;
  3073. /* Notify the stack of the actual queue counts. */
  3074. err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
  3075. if (err)
  3076. goto err_set_qs;
  3077. err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
  3078. if (err)
  3079. goto err_set_qs;
  3080. err = ice_up_complete(vsi);
  3081. if (err)
  3082. goto err_up_complete;
  3083. return 0;
  3084. err_up_complete:
  3085. ice_down(vsi);
  3086. err_set_qs:
  3087. ice_vsi_free_irq(vsi);
  3088. err_setup_rx:
  3089. ice_vsi_free_rx_rings(vsi);
  3090. err_setup_tx:
  3091. ice_vsi_free_tx_rings(vsi);
  3092. return err;
  3093. }
  3094. /**
  3095. * ice_vsi_close - Shut down a VSI
  3096. * @vsi: the VSI being shut down
  3097. */
  3098. static void ice_vsi_close(struct ice_vsi *vsi)
  3099. {
  3100. if (!test_and_set_bit(__ICE_DOWN, vsi->state))
  3101. ice_down(vsi);
  3102. ice_vsi_free_irq(vsi);
  3103. ice_vsi_free_tx_rings(vsi);
  3104. ice_vsi_free_rx_rings(vsi);
  3105. }
  3106. /**
  3107. * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
  3108. * @vsi: the VSI being removed
  3109. */
  3110. static void ice_rss_clean(struct ice_vsi *vsi)
  3111. {
  3112. struct ice_pf *pf;
  3113. pf = vsi->back;
  3114. if (vsi->rss_hkey_user)
  3115. devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
  3116. if (vsi->rss_lut_user)
  3117. devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
  3118. }
  3119. /**
  3120. * ice_vsi_release - Delete a VSI and free its resources
  3121. * @vsi: the VSI being removed
  3122. *
  3123. * Returns 0 on success or < 0 on error
  3124. */
  3125. static int ice_vsi_release(struct ice_vsi *vsi)
  3126. {
  3127. struct ice_pf *pf;
  3128. if (!vsi->back)
  3129. return -ENODEV;
  3130. pf = vsi->back;
  3131. if (vsi->netdev) {
  3132. unregister_netdev(vsi->netdev);
  3133. free_netdev(vsi->netdev);
  3134. vsi->netdev = NULL;
  3135. }
  3136. if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
  3137. ice_rss_clean(vsi);
  3138. /* Disable VSI and free resources */
  3139. ice_vsi_dis_irq(vsi);
  3140. ice_vsi_close(vsi);
  3141. /* reclaim interrupt vectors back to PF */
  3142. ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
  3143. pf->num_avail_msix += vsi->num_q_vectors;
  3144. ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
  3145. ice_vsi_delete(vsi);
  3146. ice_vsi_free_q_vectors(vsi);
  3147. ice_vsi_clear_rings(vsi);
  3148. ice_vsi_put_qs(vsi);
  3149. pf->q_left_tx += vsi->alloc_txq;
  3150. pf->q_left_rx += vsi->alloc_rxq;
  3151. ice_vsi_clear(vsi);
  3152. return 0;
  3153. }
  3154. /**
  3155. * ice_set_rss - Set RSS keys and lut
  3156. * @vsi: Pointer to VSI structure
  3157. * @seed: RSS hash seed
  3158. * @lut: Lookup table
  3159. * @lut_size: Lookup table size
  3160. *
  3161. * Returns 0 on success, negative on failure
  3162. */
  3163. int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
  3164. {
  3165. struct ice_pf *pf = vsi->back;
  3166. struct ice_hw *hw = &pf->hw;
  3167. enum ice_status status;
  3168. if (seed) {
  3169. struct ice_aqc_get_set_rss_keys *buf =
  3170. (struct ice_aqc_get_set_rss_keys *)seed;
  3171. status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
  3172. if (status) {
  3173. dev_err(&pf->pdev->dev,
  3174. "Cannot set RSS key, err %d aq_err %d\n",
  3175. status, hw->adminq.rq_last_status);
  3176. return -EIO;
  3177. }
  3178. }
  3179. if (lut) {
  3180. status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
  3181. vsi->rss_lut_type, lut, lut_size);
  3182. if (status) {
  3183. dev_err(&pf->pdev->dev,
  3184. "Cannot set RSS lut, err %d aq_err %d\n",
  3185. status, hw->adminq.rq_last_status);
  3186. return -EIO;
  3187. }
  3188. }
  3189. return 0;
  3190. }
  3191. /**
  3192. * ice_get_rss - Get RSS keys and lut
  3193. * @vsi: Pointer to VSI structure
  3194. * @seed: Buffer to store the keys
  3195. * @lut: Buffer to store the lookup table entries
  3196. * @lut_size: Size of buffer to store the lookup table entries
  3197. *
  3198. * Returns 0 on success, negative on failure
  3199. */
  3200. int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
  3201. {
  3202. struct ice_pf *pf = vsi->back;
  3203. struct ice_hw *hw = &pf->hw;
  3204. enum ice_status status;
  3205. if (seed) {
  3206. struct ice_aqc_get_set_rss_keys *buf =
  3207. (struct ice_aqc_get_set_rss_keys *)seed;
  3208. status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
  3209. if (status) {
  3210. dev_err(&pf->pdev->dev,
  3211. "Cannot get RSS key, err %d aq_err %d\n",
  3212. status, hw->adminq.rq_last_status);
  3213. return -EIO;
  3214. }
  3215. }
  3216. if (lut) {
  3217. status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
  3218. vsi->rss_lut_type, lut, lut_size);
  3219. if (status) {
  3220. dev_err(&pf->pdev->dev,
  3221. "Cannot get RSS lut, err %d aq_err %d\n",
  3222. status, hw->adminq.rq_last_status);
  3223. return -EIO;
  3224. }
  3225. }
  3226. return 0;
  3227. }
  3228. /**
  3229. * ice_open - Called when a network interface becomes active
  3230. * @netdev: network interface device structure
  3231. *
  3232. * The open entry point is called when a network interface is made
  3233. * active by the system (IFF_UP). At this point all resources needed
  3234. * for transmit and receive operations are allocated, the interrupt
  3235. * handler is registered with the OS, the netdev watchdog is enabled,
  3236. * and the stack is notified that the interface is ready.
  3237. *
  3238. * Returns 0 on success, negative value on failure
  3239. */
  3240. static int ice_open(struct net_device *netdev)
  3241. {
  3242. struct ice_netdev_priv *np = netdev_priv(netdev);
  3243. struct ice_vsi *vsi = np->vsi;
  3244. int err;
  3245. netif_carrier_off(netdev);
  3246. err = ice_vsi_open(vsi);
  3247. if (err)
  3248. netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
  3249. vsi->vsi_num, vsi->vsw->sw_id);
  3250. return err;
  3251. }
  3252. /**
  3253. * ice_stop - Disables a network interface
  3254. * @netdev: network interface device structure
  3255. *
  3256. * The stop entry point is called when an interface is de-activated by the OS,
  3257. * and the netdevice enters the DOWN state. The hardware is still under the
  3258. * driver's control, but the netdev interface is disabled.
  3259. *
  3260. * Returns success only - not allowed to fail
  3261. */
  3262. static int ice_stop(struct net_device *netdev)
  3263. {
  3264. struct ice_netdev_priv *np = netdev_priv(netdev);
  3265. struct ice_vsi *vsi = np->vsi;
  3266. ice_vsi_close(vsi);
  3267. return 0;
  3268. }
  3269. static const struct net_device_ops ice_netdev_ops = {
  3270. .ndo_open = ice_open,
  3271. .ndo_stop = ice_stop,
  3272. .ndo_start_xmit = ice_start_xmit,
  3273. .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
  3274. .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
  3275. .ndo_set_features = ice_set_features,
  3276. };