vmxnet3_drv.c 91 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513
  1. /*
  2. * Linux driver for VMware's vmxnet3 ethernet NIC.
  3. *
  4. * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * The full GNU General Public License is included in this distribution in
  21. * the file called "COPYING".
  22. *
  23. * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <net/ip6_checksum.h>
  28. #include "vmxnet3_int.h"
  29. char vmxnet3_driver_name[] = "vmxnet3";
  30. #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
  31. /*
  32. * PCI Device ID Table
  33. * Last entry must be all 0s
  34. */
  35. static const struct pci_device_id vmxnet3_pciid_table[] = {
  36. {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
  37. {0}
  38. };
  39. MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
  40. static int enable_mq = 1;
  41. static void
  42. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
  43. /*
  44. * Enable/Disable the given intr
  45. */
  46. static void
  47. vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  48. {
  49. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
  50. }
  51. static void
  52. vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
  53. {
  54. VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
  55. }
  56. /*
  57. * Enable/Disable all intrs used by the device
  58. */
  59. static void
  60. vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
  61. {
  62. int i;
  63. for (i = 0; i < adapter->intr.num_intrs; i++)
  64. vmxnet3_enable_intr(adapter, i);
  65. adapter->shared->devRead.intrConf.intrCtrl &=
  66. cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
  67. }
  68. static void
  69. vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
  70. {
  71. int i;
  72. adapter->shared->devRead.intrConf.intrCtrl |=
  73. cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  74. for (i = 0; i < adapter->intr.num_intrs; i++)
  75. vmxnet3_disable_intr(adapter, i);
  76. }
  77. static void
  78. vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
  79. {
  80. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
  81. }
  82. static bool
  83. vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  84. {
  85. return tq->stopped;
  86. }
  87. static void
  88. vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  89. {
  90. tq->stopped = false;
  91. netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
  92. }
  93. static void
  94. vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  95. {
  96. tq->stopped = false;
  97. netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
  98. }
  99. static void
  100. vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
  101. {
  102. tq->stopped = true;
  103. tq->num_stop++;
  104. netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
  105. }
  106. /*
  107. * Check the link state. This may start or stop the tx queue.
  108. */
  109. static void
  110. vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
  111. {
  112. u32 ret;
  113. int i;
  114. unsigned long flags;
  115. spin_lock_irqsave(&adapter->cmd_lock, flags);
  116. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
  117. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  118. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  119. adapter->link_speed = ret >> 16;
  120. if (ret & 1) { /* Link is up. */
  121. netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
  122. adapter->link_speed);
  123. netif_carrier_on(adapter->netdev);
  124. if (affectTxQueue) {
  125. for (i = 0; i < adapter->num_tx_queues; i++)
  126. vmxnet3_tq_start(&adapter->tx_queue[i],
  127. adapter);
  128. }
  129. } else {
  130. netdev_info(adapter->netdev, "NIC Link is Down\n");
  131. netif_carrier_off(adapter->netdev);
  132. if (affectTxQueue) {
  133. for (i = 0; i < adapter->num_tx_queues; i++)
  134. vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
  135. }
  136. }
  137. }
  138. static void
  139. vmxnet3_process_events(struct vmxnet3_adapter *adapter)
  140. {
  141. int i;
  142. unsigned long flags;
  143. u32 events = le32_to_cpu(adapter->shared->ecr);
  144. if (!events)
  145. return;
  146. vmxnet3_ack_events(adapter, events);
  147. /* Check if link state has changed */
  148. if (events & VMXNET3_ECR_LINK)
  149. vmxnet3_check_link(adapter, true);
  150. /* Check if there is an error on xmit/recv queues */
  151. if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
  152. spin_lock_irqsave(&adapter->cmd_lock, flags);
  153. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  154. VMXNET3_CMD_GET_QUEUE_STATUS);
  155. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  156. for (i = 0; i < adapter->num_tx_queues; i++)
  157. if (adapter->tqd_start[i].status.stopped)
  158. dev_err(&adapter->netdev->dev,
  159. "%s: tq[%d] error 0x%x\n",
  160. adapter->netdev->name, i, le32_to_cpu(
  161. adapter->tqd_start[i].status.error));
  162. for (i = 0; i < adapter->num_rx_queues; i++)
  163. if (adapter->rqd_start[i].status.stopped)
  164. dev_err(&adapter->netdev->dev,
  165. "%s: rq[%d] error 0x%x\n",
  166. adapter->netdev->name, i,
  167. adapter->rqd_start[i].status.error);
  168. schedule_work(&adapter->work);
  169. }
  170. }
  171. #ifdef __BIG_ENDIAN_BITFIELD
  172. /*
  173. * The device expects the bitfields in shared structures to be written in
  174. * little endian. When CPU is big endian, the following routines are used to
  175. * correctly read and write into ABI.
  176. * The general technique used here is : double word bitfields are defined in
  177. * opposite order for big endian architecture. Then before reading them in
  178. * driver the complete double word is translated using le32_to_cpu. Similarly
  179. * After the driver writes into bitfields, cpu_to_le32 is used to translate the
  180. * double words into required format.
  181. * In order to avoid touching bits in shared structure more than once, temporary
  182. * descriptors are used. These are passed as srcDesc to following functions.
  183. */
  184. static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
  185. struct Vmxnet3_RxDesc *dstDesc)
  186. {
  187. u32 *src = (u32 *)srcDesc + 2;
  188. u32 *dst = (u32 *)dstDesc + 2;
  189. dstDesc->addr = le64_to_cpu(srcDesc->addr);
  190. *dst = le32_to_cpu(*src);
  191. dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
  192. }
  193. static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
  194. struct Vmxnet3_TxDesc *dstDesc)
  195. {
  196. int i;
  197. u32 *src = (u32 *)(srcDesc + 1);
  198. u32 *dst = (u32 *)(dstDesc + 1);
  199. /* Working backwards so that the gen bit is set at the end. */
  200. for (i = 2; i > 0; i--) {
  201. src--;
  202. dst--;
  203. *dst = cpu_to_le32(*src);
  204. }
  205. }
  206. static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
  207. struct Vmxnet3_RxCompDesc *dstDesc)
  208. {
  209. int i = 0;
  210. u32 *src = (u32 *)srcDesc;
  211. u32 *dst = (u32 *)dstDesc;
  212. for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
  213. *dst = le32_to_cpu(*src);
  214. src++;
  215. dst++;
  216. }
  217. }
  218. /* Used to read bitfield values from double words. */
  219. static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
  220. {
  221. u32 temp = le32_to_cpu(*bitfield);
  222. u32 mask = ((1 << size) - 1) << pos;
  223. temp &= mask;
  224. temp >>= pos;
  225. return temp;
  226. }
  227. #endif /* __BIG_ENDIAN_BITFIELD */
  228. #ifdef __BIG_ENDIAN_BITFIELD
  229. # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
  230. txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
  231. VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
  232. # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
  233. txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
  234. VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
  235. # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
  236. VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
  237. VMXNET3_TCD_GEN_SIZE)
  238. # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
  239. VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
  240. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
  241. (dstrcd) = (tmp); \
  242. vmxnet3_RxCompToCPU((rcd), (tmp)); \
  243. } while (0)
  244. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
  245. (dstrxd) = (tmp); \
  246. vmxnet3_RxDescToCPU((rxd), (tmp)); \
  247. } while (0)
  248. #else
  249. # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
  250. # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
  251. # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
  252. # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
  253. # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
  254. # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
  255. #endif /* __BIG_ENDIAN_BITFIELD */
  256. static void
  257. vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
  258. struct pci_dev *pdev)
  259. {
  260. if (tbi->map_type == VMXNET3_MAP_SINGLE)
  261. dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
  262. PCI_DMA_TODEVICE);
  263. else if (tbi->map_type == VMXNET3_MAP_PAGE)
  264. dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
  265. PCI_DMA_TODEVICE);
  266. else
  267. BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
  268. tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
  269. }
  270. static int
  271. vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
  272. struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
  273. {
  274. struct sk_buff *skb;
  275. int entries = 0;
  276. /* no out of order completion */
  277. BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
  278. BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
  279. skb = tq->buf_info[eop_idx].skb;
  280. BUG_ON(skb == NULL);
  281. tq->buf_info[eop_idx].skb = NULL;
  282. VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
  283. while (tq->tx_ring.next2comp != eop_idx) {
  284. vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
  285. pdev);
  286. /* update next2comp w/o tx_lock. Since we are marking more,
  287. * instead of less, tx ring entries avail, the worst case is
  288. * that the tx routine incorrectly re-queues a pkt due to
  289. * insufficient tx ring entries.
  290. */
  291. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  292. entries++;
  293. }
  294. dev_kfree_skb_any(skb);
  295. return entries;
  296. }
  297. static int
  298. vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
  299. struct vmxnet3_adapter *adapter)
  300. {
  301. int completed = 0;
  302. union Vmxnet3_GenericDesc *gdesc;
  303. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  304. while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
  305. completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
  306. &gdesc->tcd), tq, adapter->pdev,
  307. adapter);
  308. vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
  309. gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
  310. }
  311. if (completed) {
  312. spin_lock(&tq->tx_lock);
  313. if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
  314. vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
  315. VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
  316. netif_carrier_ok(adapter->netdev))) {
  317. vmxnet3_tq_wake(tq, adapter);
  318. }
  319. spin_unlock(&tq->tx_lock);
  320. }
  321. return completed;
  322. }
  323. static void
  324. vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
  325. struct vmxnet3_adapter *adapter)
  326. {
  327. int i;
  328. while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
  329. struct vmxnet3_tx_buf_info *tbi;
  330. tbi = tq->buf_info + tq->tx_ring.next2comp;
  331. vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
  332. if (tbi->skb) {
  333. dev_kfree_skb_any(tbi->skb);
  334. tbi->skb = NULL;
  335. }
  336. vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
  337. }
  338. /* sanity check, verify all buffers are indeed unmapped and freed */
  339. for (i = 0; i < tq->tx_ring.size; i++) {
  340. BUG_ON(tq->buf_info[i].skb != NULL ||
  341. tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
  342. }
  343. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  344. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  345. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  346. tq->comp_ring.next2proc = 0;
  347. }
  348. static void
  349. vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
  350. struct vmxnet3_adapter *adapter)
  351. {
  352. if (tq->tx_ring.base) {
  353. dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
  354. sizeof(struct Vmxnet3_TxDesc),
  355. tq->tx_ring.base, tq->tx_ring.basePA);
  356. tq->tx_ring.base = NULL;
  357. }
  358. if (tq->data_ring.base) {
  359. dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
  360. sizeof(struct Vmxnet3_TxDataDesc),
  361. tq->data_ring.base, tq->data_ring.basePA);
  362. tq->data_ring.base = NULL;
  363. }
  364. if (tq->comp_ring.base) {
  365. dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
  366. sizeof(struct Vmxnet3_TxCompDesc),
  367. tq->comp_ring.base, tq->comp_ring.basePA);
  368. tq->comp_ring.base = NULL;
  369. }
  370. if (tq->buf_info) {
  371. dma_free_coherent(&adapter->pdev->dev,
  372. tq->tx_ring.size * sizeof(tq->buf_info[0]),
  373. tq->buf_info, tq->buf_info_pa);
  374. tq->buf_info = NULL;
  375. }
  376. }
  377. /* Destroy all tx queues */
  378. void
  379. vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
  380. {
  381. int i;
  382. for (i = 0; i < adapter->num_tx_queues; i++)
  383. vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
  384. }
  385. static void
  386. vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
  387. struct vmxnet3_adapter *adapter)
  388. {
  389. int i;
  390. /* reset the tx ring contents to 0 and reset the tx ring states */
  391. memset(tq->tx_ring.base, 0, tq->tx_ring.size *
  392. sizeof(struct Vmxnet3_TxDesc));
  393. tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
  394. tq->tx_ring.gen = VMXNET3_INIT_GEN;
  395. memset(tq->data_ring.base, 0, tq->data_ring.size *
  396. sizeof(struct Vmxnet3_TxDataDesc));
  397. /* reset the tx comp ring contents to 0 and reset comp ring states */
  398. memset(tq->comp_ring.base, 0, tq->comp_ring.size *
  399. sizeof(struct Vmxnet3_TxCompDesc));
  400. tq->comp_ring.next2proc = 0;
  401. tq->comp_ring.gen = VMXNET3_INIT_GEN;
  402. /* reset the bookkeeping data */
  403. memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
  404. for (i = 0; i < tq->tx_ring.size; i++)
  405. tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
  406. /* stats are not reset */
  407. }
  408. static int
  409. vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
  410. struct vmxnet3_adapter *adapter)
  411. {
  412. size_t sz;
  413. BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
  414. tq->comp_ring.base || tq->buf_info);
  415. tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
  416. tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
  417. &tq->tx_ring.basePA, GFP_KERNEL);
  418. if (!tq->tx_ring.base) {
  419. netdev_err(adapter->netdev, "failed to allocate tx ring\n");
  420. goto err;
  421. }
  422. tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
  423. tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
  424. &tq->data_ring.basePA, GFP_KERNEL);
  425. if (!tq->data_ring.base) {
  426. netdev_err(adapter->netdev, "failed to allocate data ring\n");
  427. goto err;
  428. }
  429. tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
  430. tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
  431. &tq->comp_ring.basePA, GFP_KERNEL);
  432. if (!tq->comp_ring.base) {
  433. netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
  434. goto err;
  435. }
  436. sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
  437. tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
  438. &tq->buf_info_pa, GFP_KERNEL);
  439. if (!tq->buf_info)
  440. goto err;
  441. return 0;
  442. err:
  443. vmxnet3_tq_destroy(tq, adapter);
  444. return -ENOMEM;
  445. }
  446. static void
  447. vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
  448. {
  449. int i;
  450. for (i = 0; i < adapter->num_tx_queues; i++)
  451. vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
  452. }
  453. /*
  454. * starting from ring->next2fill, allocate rx buffers for the given ring
  455. * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
  456. * are allocated or allocation fails
  457. */
  458. static int
  459. vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
  460. int num_to_alloc, struct vmxnet3_adapter *adapter)
  461. {
  462. int num_allocated = 0;
  463. struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
  464. struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
  465. u32 val;
  466. while (num_allocated <= num_to_alloc) {
  467. struct vmxnet3_rx_buf_info *rbi;
  468. union Vmxnet3_GenericDesc *gd;
  469. rbi = rbi_base + ring->next2fill;
  470. gd = ring->base + ring->next2fill;
  471. if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
  472. if (rbi->skb == NULL) {
  473. rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
  474. rbi->len,
  475. GFP_KERNEL);
  476. if (unlikely(rbi->skb == NULL)) {
  477. rq->stats.rx_buf_alloc_failure++;
  478. break;
  479. }
  480. rbi->dma_addr = dma_map_single(
  481. &adapter->pdev->dev,
  482. rbi->skb->data, rbi->len,
  483. PCI_DMA_FROMDEVICE);
  484. } else {
  485. /* rx buffer skipped by the device */
  486. }
  487. val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
  488. } else {
  489. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
  490. rbi->len != PAGE_SIZE);
  491. if (rbi->page == NULL) {
  492. rbi->page = alloc_page(GFP_ATOMIC);
  493. if (unlikely(rbi->page == NULL)) {
  494. rq->stats.rx_buf_alloc_failure++;
  495. break;
  496. }
  497. rbi->dma_addr = dma_map_page(
  498. &adapter->pdev->dev,
  499. rbi->page, 0, PAGE_SIZE,
  500. PCI_DMA_FROMDEVICE);
  501. } else {
  502. /* rx buffers skipped by the device */
  503. }
  504. val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
  505. }
  506. BUG_ON(rbi->dma_addr == 0);
  507. gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
  508. gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
  509. | val | rbi->len);
  510. /* Fill the last buffer but dont mark it ready, or else the
  511. * device will think that the queue is full */
  512. if (num_allocated == num_to_alloc)
  513. break;
  514. gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
  515. num_allocated++;
  516. vmxnet3_cmd_ring_adv_next2fill(ring);
  517. }
  518. netdev_dbg(adapter->netdev,
  519. "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
  520. num_allocated, ring->next2fill, ring->next2comp);
  521. /* so that the device can distinguish a full ring and an empty ring */
  522. BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
  523. return num_allocated;
  524. }
  525. static void
  526. vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
  527. struct vmxnet3_rx_buf_info *rbi)
  528. {
  529. struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
  530. skb_shinfo(skb)->nr_frags;
  531. BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
  532. __skb_frag_set_page(frag, rbi->page);
  533. frag->page_offset = 0;
  534. skb_frag_size_set(frag, rcd->len);
  535. skb->data_len += rcd->len;
  536. skb->truesize += PAGE_SIZE;
  537. skb_shinfo(skb)->nr_frags++;
  538. }
  539. static void
  540. vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
  541. struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
  542. struct vmxnet3_adapter *adapter)
  543. {
  544. u32 dw2, len;
  545. unsigned long buf_offset;
  546. int i;
  547. union Vmxnet3_GenericDesc *gdesc;
  548. struct vmxnet3_tx_buf_info *tbi = NULL;
  549. BUG_ON(ctx->copy_size > skb_headlen(skb));
  550. /* use the previous gen bit for the SOP desc */
  551. dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
  552. ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
  553. gdesc = ctx->sop_txd; /* both loops below can be skipped */
  554. /* no need to map the buffer if headers are copied */
  555. if (ctx->copy_size) {
  556. ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
  557. tq->tx_ring.next2fill *
  558. sizeof(struct Vmxnet3_TxDataDesc));
  559. ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
  560. ctx->sop_txd->dword[3] = 0;
  561. tbi = tq->buf_info + tq->tx_ring.next2fill;
  562. tbi->map_type = VMXNET3_MAP_NONE;
  563. netdev_dbg(adapter->netdev,
  564. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  565. tq->tx_ring.next2fill,
  566. le64_to_cpu(ctx->sop_txd->txd.addr),
  567. ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
  568. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  569. /* use the right gen for non-SOP desc */
  570. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  571. }
  572. /* linear part can use multiple tx desc if it's big */
  573. len = skb_headlen(skb) - ctx->copy_size;
  574. buf_offset = ctx->copy_size;
  575. while (len) {
  576. u32 buf_size;
  577. if (len < VMXNET3_MAX_TX_BUF_SIZE) {
  578. buf_size = len;
  579. dw2 |= len;
  580. } else {
  581. buf_size = VMXNET3_MAX_TX_BUF_SIZE;
  582. /* spec says that for TxDesc.len, 0 == 2^14 */
  583. }
  584. tbi = tq->buf_info + tq->tx_ring.next2fill;
  585. tbi->map_type = VMXNET3_MAP_SINGLE;
  586. tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
  587. skb->data + buf_offset, buf_size,
  588. PCI_DMA_TODEVICE);
  589. tbi->len = buf_size;
  590. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  591. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  592. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  593. gdesc->dword[2] = cpu_to_le32(dw2);
  594. gdesc->dword[3] = 0;
  595. netdev_dbg(adapter->netdev,
  596. "txd[%u]: 0x%Lx 0x%x 0x%x\n",
  597. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  598. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  599. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  600. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  601. len -= buf_size;
  602. buf_offset += buf_size;
  603. }
  604. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  605. const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  606. u32 buf_size;
  607. buf_offset = 0;
  608. len = skb_frag_size(frag);
  609. while (len) {
  610. tbi = tq->buf_info + tq->tx_ring.next2fill;
  611. if (len < VMXNET3_MAX_TX_BUF_SIZE) {
  612. buf_size = len;
  613. dw2 |= len;
  614. } else {
  615. buf_size = VMXNET3_MAX_TX_BUF_SIZE;
  616. /* spec says that for TxDesc.len, 0 == 2^14 */
  617. }
  618. tbi->map_type = VMXNET3_MAP_PAGE;
  619. tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
  620. buf_offset, buf_size,
  621. DMA_TO_DEVICE);
  622. tbi->len = buf_size;
  623. gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
  624. BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
  625. gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
  626. gdesc->dword[2] = cpu_to_le32(dw2);
  627. gdesc->dword[3] = 0;
  628. netdev_dbg(adapter->netdev,
  629. "txd[%u]: 0x%llx %u %u\n",
  630. tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
  631. le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
  632. vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
  633. dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
  634. len -= buf_size;
  635. buf_offset += buf_size;
  636. }
  637. }
  638. ctx->eop_txd = gdesc;
  639. /* set the last buf_info for the pkt */
  640. tbi->skb = skb;
  641. tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
  642. }
  643. /* Init all tx queues */
  644. static void
  645. vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
  646. {
  647. int i;
  648. for (i = 0; i < adapter->num_tx_queues; i++)
  649. vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
  650. }
  651. /*
  652. * parse and copy relevant protocol headers:
  653. * For a tso pkt, relevant headers are L2/3/4 including options
  654. * For a pkt requesting csum offloading, they are L2/3 and may include L4
  655. * if it's a TCP/UDP pkt
  656. *
  657. * Returns:
  658. * -1: error happens during parsing
  659. * 0: protocol headers parsed, but too big to be copied
  660. * 1: protocol headers parsed and copied
  661. *
  662. * Other effects:
  663. * 1. related *ctx fields are updated.
  664. * 2. ctx->copy_size is # of bytes copied
  665. * 3. the portion copied is guaranteed to be in the linear part
  666. *
  667. */
  668. static int
  669. vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  670. struct vmxnet3_tx_ctx *ctx,
  671. struct vmxnet3_adapter *adapter)
  672. {
  673. struct Vmxnet3_TxDataDesc *tdd;
  674. u8 protocol = 0;
  675. if (ctx->mss) { /* TSO */
  676. ctx->eth_ip_hdr_size = skb_transport_offset(skb);
  677. ctx->l4_hdr_size = tcp_hdrlen(skb);
  678. ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
  679. } else {
  680. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  681. ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
  682. if (ctx->ipv4) {
  683. const struct iphdr *iph = ip_hdr(skb);
  684. protocol = iph->protocol;
  685. } else if (ctx->ipv6) {
  686. const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  687. protocol = ipv6h->nexthdr;
  688. }
  689. switch (protocol) {
  690. case IPPROTO_TCP:
  691. ctx->l4_hdr_size = tcp_hdrlen(skb);
  692. break;
  693. case IPPROTO_UDP:
  694. ctx->l4_hdr_size = sizeof(struct udphdr);
  695. break;
  696. default:
  697. ctx->l4_hdr_size = 0;
  698. break;
  699. }
  700. ctx->copy_size = min(ctx->eth_ip_hdr_size +
  701. ctx->l4_hdr_size, skb->len);
  702. } else {
  703. ctx->eth_ip_hdr_size = 0;
  704. ctx->l4_hdr_size = 0;
  705. /* copy as much as allowed */
  706. ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
  707. , skb_headlen(skb));
  708. }
  709. if (skb->len <= VMXNET3_HDR_COPY_SIZE)
  710. ctx->copy_size = skb->len;
  711. /* make sure headers are accessible directly */
  712. if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
  713. goto err;
  714. }
  715. if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
  716. tq->stats.oversized_hdr++;
  717. ctx->copy_size = 0;
  718. return 0;
  719. }
  720. tdd = tq->data_ring.base + tq->tx_ring.next2fill;
  721. memcpy(tdd->data, skb->data, ctx->copy_size);
  722. netdev_dbg(adapter->netdev,
  723. "copy %u bytes to dataRing[%u]\n",
  724. ctx->copy_size, tq->tx_ring.next2fill);
  725. return 1;
  726. err:
  727. return -1;
  728. }
  729. static void
  730. vmxnet3_prepare_tso(struct sk_buff *skb,
  731. struct vmxnet3_tx_ctx *ctx)
  732. {
  733. struct tcphdr *tcph = tcp_hdr(skb);
  734. if (ctx->ipv4) {
  735. struct iphdr *iph = ip_hdr(skb);
  736. iph->check = 0;
  737. tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
  738. IPPROTO_TCP, 0);
  739. } else if (ctx->ipv6) {
  740. struct ipv6hdr *iph = ipv6_hdr(skb);
  741. tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
  742. IPPROTO_TCP, 0);
  743. }
  744. }
  745. static int txd_estimate(const struct sk_buff *skb)
  746. {
  747. int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
  748. int i;
  749. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  750. const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  751. count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
  752. }
  753. return count;
  754. }
  755. /*
  756. * Transmits a pkt thru a given tq
  757. * Returns:
  758. * NETDEV_TX_OK: descriptors are setup successfully
  759. * NETDEV_TX_OK: error occurred, the pkt is dropped
  760. * NETDEV_TX_BUSY: tx ring is full, queue is stopped
  761. *
  762. * Side-effects:
  763. * 1. tx ring may be changed
  764. * 2. tq stats may be updated accordingly
  765. * 3. shared->txNumDeferred may be updated
  766. */
  767. static int
  768. vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
  769. struct vmxnet3_adapter *adapter, struct net_device *netdev)
  770. {
  771. int ret;
  772. u32 count;
  773. unsigned long flags;
  774. struct vmxnet3_tx_ctx ctx;
  775. union Vmxnet3_GenericDesc *gdesc;
  776. #ifdef __BIG_ENDIAN_BITFIELD
  777. /* Use temporary descriptor to avoid touching bits multiple times */
  778. union Vmxnet3_GenericDesc tempTxDesc;
  779. #endif
  780. count = txd_estimate(skb);
  781. ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
  782. ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
  783. ctx.mss = skb_shinfo(skb)->gso_size;
  784. if (ctx.mss) {
  785. if (skb_header_cloned(skb)) {
  786. if (unlikely(pskb_expand_head(skb, 0, 0,
  787. GFP_ATOMIC) != 0)) {
  788. tq->stats.drop_tso++;
  789. goto drop_pkt;
  790. }
  791. tq->stats.copy_skb_header++;
  792. }
  793. vmxnet3_prepare_tso(skb, &ctx);
  794. } else {
  795. if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
  796. /* non-tso pkts must not use more than
  797. * VMXNET3_MAX_TXD_PER_PKT entries
  798. */
  799. if (skb_linearize(skb) != 0) {
  800. tq->stats.drop_too_many_frags++;
  801. goto drop_pkt;
  802. }
  803. tq->stats.linearized++;
  804. /* recalculate the # of descriptors to use */
  805. count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
  806. }
  807. }
  808. spin_lock_irqsave(&tq->tx_lock, flags);
  809. if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
  810. tq->stats.tx_ring_full++;
  811. netdev_dbg(adapter->netdev,
  812. "tx queue stopped on %s, next2comp %u"
  813. " next2fill %u\n", adapter->netdev->name,
  814. tq->tx_ring.next2comp, tq->tx_ring.next2fill);
  815. vmxnet3_tq_stop(tq, adapter);
  816. spin_unlock_irqrestore(&tq->tx_lock, flags);
  817. return NETDEV_TX_BUSY;
  818. }
  819. ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
  820. if (ret >= 0) {
  821. BUG_ON(ret <= 0 && ctx.copy_size != 0);
  822. /* hdrs parsed, check against other limits */
  823. if (ctx.mss) {
  824. if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
  825. VMXNET3_MAX_TX_BUF_SIZE)) {
  826. goto hdr_too_big;
  827. }
  828. } else {
  829. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  830. if (unlikely(ctx.eth_ip_hdr_size +
  831. skb->csum_offset >
  832. VMXNET3_MAX_CSUM_OFFSET)) {
  833. goto hdr_too_big;
  834. }
  835. }
  836. }
  837. } else {
  838. tq->stats.drop_hdr_inspect_err++;
  839. goto unlock_drop_pkt;
  840. }
  841. /* fill tx descs related to addr & len */
  842. vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
  843. /* setup the EOP desc */
  844. ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
  845. /* setup the SOP desc */
  846. #ifdef __BIG_ENDIAN_BITFIELD
  847. gdesc = &tempTxDesc;
  848. gdesc->dword[2] = ctx.sop_txd->dword[2];
  849. gdesc->dword[3] = ctx.sop_txd->dword[3];
  850. #else
  851. gdesc = ctx.sop_txd;
  852. #endif
  853. if (ctx.mss) {
  854. gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
  855. gdesc->txd.om = VMXNET3_OM_TSO;
  856. gdesc->txd.msscof = ctx.mss;
  857. le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
  858. gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
  859. } else {
  860. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  861. gdesc->txd.hlen = ctx.eth_ip_hdr_size;
  862. gdesc->txd.om = VMXNET3_OM_CSUM;
  863. gdesc->txd.msscof = ctx.eth_ip_hdr_size +
  864. skb->csum_offset;
  865. } else {
  866. gdesc->txd.om = 0;
  867. gdesc->txd.msscof = 0;
  868. }
  869. le32_add_cpu(&tq->shared->txNumDeferred, 1);
  870. }
  871. if (skb_vlan_tag_present(skb)) {
  872. gdesc->txd.ti = 1;
  873. gdesc->txd.tci = skb_vlan_tag_get(skb);
  874. }
  875. /* finally flips the GEN bit of the SOP desc. */
  876. gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
  877. VMXNET3_TXD_GEN);
  878. #ifdef __BIG_ENDIAN_BITFIELD
  879. /* Finished updating in bitfields of Tx Desc, so write them in original
  880. * place.
  881. */
  882. vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
  883. (struct Vmxnet3_TxDesc *)ctx.sop_txd);
  884. gdesc = ctx.sop_txd;
  885. #endif
  886. netdev_dbg(adapter->netdev,
  887. "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
  888. (u32)(ctx.sop_txd -
  889. tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
  890. le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
  891. spin_unlock_irqrestore(&tq->tx_lock, flags);
  892. if (le32_to_cpu(tq->shared->txNumDeferred) >=
  893. le32_to_cpu(tq->shared->txThreshold)) {
  894. tq->shared->txNumDeferred = 0;
  895. VMXNET3_WRITE_BAR0_REG(adapter,
  896. VMXNET3_REG_TXPROD + tq->qid * 8,
  897. tq->tx_ring.next2fill);
  898. }
  899. return NETDEV_TX_OK;
  900. hdr_too_big:
  901. tq->stats.drop_oversized_hdr++;
  902. unlock_drop_pkt:
  903. spin_unlock_irqrestore(&tq->tx_lock, flags);
  904. drop_pkt:
  905. tq->stats.drop_total++;
  906. dev_kfree_skb_any(skb);
  907. return NETDEV_TX_OK;
  908. }
  909. static netdev_tx_t
  910. vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
  911. {
  912. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  913. BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
  914. return vmxnet3_tq_xmit(skb,
  915. &adapter->tx_queue[skb->queue_mapping],
  916. adapter, netdev);
  917. }
  918. static void
  919. vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
  920. struct sk_buff *skb,
  921. union Vmxnet3_GenericDesc *gdesc)
  922. {
  923. if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
  924. /* typical case: TCP/UDP over IP and both csums are correct */
  925. if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
  926. VMXNET3_RCD_CSUM_OK) {
  927. skb->ip_summed = CHECKSUM_UNNECESSARY;
  928. BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
  929. BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
  930. BUG_ON(gdesc->rcd.frg);
  931. } else {
  932. if (gdesc->rcd.csum) {
  933. skb->csum = htons(gdesc->rcd.csum);
  934. skb->ip_summed = CHECKSUM_PARTIAL;
  935. } else {
  936. skb_checksum_none_assert(skb);
  937. }
  938. }
  939. } else {
  940. skb_checksum_none_assert(skb);
  941. }
  942. }
  943. static void
  944. vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
  945. struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
  946. {
  947. rq->stats.drop_err++;
  948. if (!rcd->fcs)
  949. rq->stats.drop_fcs++;
  950. rq->stats.drop_total++;
  951. /*
  952. * We do not unmap and chain the rx buffer to the skb.
  953. * We basically pretend this buffer is not used and will be recycled
  954. * by vmxnet3_rq_alloc_rx_buf()
  955. */
  956. /*
  957. * ctx->skb may be NULL if this is the first and the only one
  958. * desc for the pkt
  959. */
  960. if (ctx->skb)
  961. dev_kfree_skb_irq(ctx->skb);
  962. ctx->skb = NULL;
  963. }
  964. static u32
  965. vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
  966. union Vmxnet3_GenericDesc *gdesc)
  967. {
  968. u32 hlen, maplen;
  969. union {
  970. void *ptr;
  971. struct ethhdr *eth;
  972. struct iphdr *ipv4;
  973. struct ipv6hdr *ipv6;
  974. struct tcphdr *tcp;
  975. } hdr;
  976. BUG_ON(gdesc->rcd.tcp == 0);
  977. maplen = skb_headlen(skb);
  978. if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
  979. return 0;
  980. hdr.eth = eth_hdr(skb);
  981. if (gdesc->rcd.v4) {
  982. BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
  983. hdr.ptr += sizeof(struct ethhdr);
  984. BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
  985. hlen = hdr.ipv4->ihl << 2;
  986. hdr.ptr += hdr.ipv4->ihl << 2;
  987. } else if (gdesc->rcd.v6) {
  988. BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
  989. hdr.ptr += sizeof(struct ethhdr);
  990. /* Use an estimated value, since we also need to handle
  991. * TSO case.
  992. */
  993. if (hdr.ipv6->nexthdr != IPPROTO_TCP)
  994. return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
  995. hlen = sizeof(struct ipv6hdr);
  996. hdr.ptr += sizeof(struct ipv6hdr);
  997. } else {
  998. /* Non-IP pkt, dont estimate header length */
  999. return 0;
  1000. }
  1001. if (hlen + sizeof(struct tcphdr) > maplen)
  1002. return 0;
  1003. return (hlen + (hdr.tcp->doff << 2));
  1004. }
  1005. static int
  1006. vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
  1007. struct vmxnet3_adapter *adapter, int quota)
  1008. {
  1009. static const u32 rxprod_reg[2] = {
  1010. VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
  1011. };
  1012. u32 num_pkts = 0;
  1013. bool skip_page_frags = false;
  1014. struct Vmxnet3_RxCompDesc *rcd;
  1015. struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
  1016. u16 segCnt = 0, mss = 0;
  1017. #ifdef __BIG_ENDIAN_BITFIELD
  1018. struct Vmxnet3_RxDesc rxCmdDesc;
  1019. struct Vmxnet3_RxCompDesc rxComp;
  1020. #endif
  1021. vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
  1022. &rxComp);
  1023. while (rcd->gen == rq->comp_ring.gen) {
  1024. struct vmxnet3_rx_buf_info *rbi;
  1025. struct sk_buff *skb, *new_skb = NULL;
  1026. struct page *new_page = NULL;
  1027. int num_to_alloc;
  1028. struct Vmxnet3_RxDesc *rxd;
  1029. u32 idx, ring_idx;
  1030. struct vmxnet3_cmd_ring *ring = NULL;
  1031. if (num_pkts >= quota) {
  1032. /* we may stop even before we see the EOP desc of
  1033. * the current pkt
  1034. */
  1035. break;
  1036. }
  1037. BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
  1038. idx = rcd->rxdIdx;
  1039. ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
  1040. ring = rq->rx_ring + ring_idx;
  1041. vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
  1042. &rxCmdDesc);
  1043. rbi = rq->buf_info[ring_idx] + idx;
  1044. BUG_ON(rxd->addr != rbi->dma_addr ||
  1045. rxd->len != rbi->len);
  1046. if (unlikely(rcd->eop && rcd->err)) {
  1047. vmxnet3_rx_error(rq, rcd, ctx, adapter);
  1048. goto rcd_done;
  1049. }
  1050. if (rcd->sop) { /* first buf of the pkt */
  1051. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
  1052. rcd->rqID != rq->qid);
  1053. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
  1054. BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
  1055. if (unlikely(rcd->len == 0)) {
  1056. /* Pretend the rx buffer is skipped. */
  1057. BUG_ON(!(rcd->sop && rcd->eop));
  1058. netdev_dbg(adapter->netdev,
  1059. "rxRing[%u][%u] 0 length\n",
  1060. ring_idx, idx);
  1061. goto rcd_done;
  1062. }
  1063. skip_page_frags = false;
  1064. ctx->skb = rbi->skb;
  1065. new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
  1066. rbi->len);
  1067. if (new_skb == NULL) {
  1068. /* Skb allocation failed, do not handover this
  1069. * skb to stack. Reuse it. Drop the existing pkt
  1070. */
  1071. rq->stats.rx_buf_alloc_failure++;
  1072. ctx->skb = NULL;
  1073. rq->stats.drop_total++;
  1074. skip_page_frags = true;
  1075. goto rcd_done;
  1076. }
  1077. dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
  1078. rbi->len,
  1079. PCI_DMA_FROMDEVICE);
  1080. #ifdef VMXNET3_RSS
  1081. if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
  1082. (adapter->netdev->features & NETIF_F_RXHASH))
  1083. skb_set_hash(ctx->skb,
  1084. le32_to_cpu(rcd->rssHash),
  1085. PKT_HASH_TYPE_L3);
  1086. #endif
  1087. skb_put(ctx->skb, rcd->len);
  1088. /* Immediate refill */
  1089. rbi->skb = new_skb;
  1090. rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
  1091. rbi->skb->data, rbi->len,
  1092. PCI_DMA_FROMDEVICE);
  1093. rxd->addr = cpu_to_le64(rbi->dma_addr);
  1094. rxd->len = rbi->len;
  1095. if (adapter->version == 2 &&
  1096. rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
  1097. struct Vmxnet3_RxCompDescExt *rcdlro;
  1098. rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
  1099. segCnt = rcdlro->segCnt;
  1100. BUG_ON(segCnt <= 1);
  1101. mss = rcdlro->mss;
  1102. if (unlikely(segCnt <= 1))
  1103. segCnt = 0;
  1104. } else {
  1105. segCnt = 0;
  1106. }
  1107. } else {
  1108. BUG_ON(ctx->skb == NULL && !skip_page_frags);
  1109. /* non SOP buffer must be type 1 in most cases */
  1110. BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
  1111. BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
  1112. /* If an sop buffer was dropped, skip all
  1113. * following non-sop fragments. They will be reused.
  1114. */
  1115. if (skip_page_frags)
  1116. goto rcd_done;
  1117. if (rcd->len) {
  1118. new_page = alloc_page(GFP_ATOMIC);
  1119. /* Replacement page frag could not be allocated.
  1120. * Reuse this page. Drop the pkt and free the
  1121. * skb which contained this page as a frag. Skip
  1122. * processing all the following non-sop frags.
  1123. */
  1124. if (unlikely(!new_page)) {
  1125. rq->stats.rx_buf_alloc_failure++;
  1126. dev_kfree_skb(ctx->skb);
  1127. ctx->skb = NULL;
  1128. skip_page_frags = true;
  1129. goto rcd_done;
  1130. }
  1131. dma_unmap_page(&adapter->pdev->dev,
  1132. rbi->dma_addr, rbi->len,
  1133. PCI_DMA_FROMDEVICE);
  1134. vmxnet3_append_frag(ctx->skb, rcd, rbi);
  1135. /* Immediate refill */
  1136. rbi->page = new_page;
  1137. rbi->dma_addr = dma_map_page(&adapter->pdev->dev
  1138. , rbi->page,
  1139. 0, PAGE_SIZE,
  1140. PCI_DMA_FROMDEVICE);
  1141. rxd->addr = cpu_to_le64(rbi->dma_addr);
  1142. rxd->len = rbi->len;
  1143. }
  1144. }
  1145. skb = ctx->skb;
  1146. if (rcd->eop) {
  1147. u32 mtu = adapter->netdev->mtu;
  1148. skb->len += skb->data_len;
  1149. vmxnet3_rx_csum(adapter, skb,
  1150. (union Vmxnet3_GenericDesc *)rcd);
  1151. skb->protocol = eth_type_trans(skb, adapter->netdev);
  1152. if (!rcd->tcp || !adapter->lro)
  1153. goto not_lro;
  1154. if (segCnt != 0 && mss != 0) {
  1155. skb_shinfo(skb)->gso_type = rcd->v4 ?
  1156. SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  1157. skb_shinfo(skb)->gso_size = mss;
  1158. skb_shinfo(skb)->gso_segs = segCnt;
  1159. } else if (segCnt != 0 || skb->len > mtu) {
  1160. u32 hlen;
  1161. hlen = vmxnet3_get_hdr_len(adapter, skb,
  1162. (union Vmxnet3_GenericDesc *)rcd);
  1163. if (hlen == 0)
  1164. goto not_lro;
  1165. skb_shinfo(skb)->gso_type =
  1166. rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
  1167. if (segCnt != 0) {
  1168. skb_shinfo(skb)->gso_segs = segCnt;
  1169. skb_shinfo(skb)->gso_size =
  1170. DIV_ROUND_UP(skb->len -
  1171. hlen, segCnt);
  1172. } else {
  1173. skb_shinfo(skb)->gso_size = mtu - hlen;
  1174. }
  1175. }
  1176. not_lro:
  1177. if (unlikely(rcd->ts))
  1178. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
  1179. if (adapter->netdev->features & NETIF_F_LRO)
  1180. netif_receive_skb(skb);
  1181. else
  1182. napi_gro_receive(&rq->napi, skb);
  1183. ctx->skb = NULL;
  1184. num_pkts++;
  1185. }
  1186. rcd_done:
  1187. /* device may have skipped some rx descs */
  1188. ring->next2comp = idx;
  1189. num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
  1190. ring = rq->rx_ring + ring_idx;
  1191. while (num_to_alloc) {
  1192. vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
  1193. &rxCmdDesc);
  1194. BUG_ON(!rxd->addr);
  1195. /* Recv desc is ready to be used by the device */
  1196. rxd->gen = ring->gen;
  1197. vmxnet3_cmd_ring_adv_next2fill(ring);
  1198. num_to_alloc--;
  1199. }
  1200. /* if needed, update the register */
  1201. if (unlikely(rq->shared->updateRxProd)) {
  1202. VMXNET3_WRITE_BAR0_REG(adapter,
  1203. rxprod_reg[ring_idx] + rq->qid * 8,
  1204. ring->next2fill);
  1205. }
  1206. vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
  1207. vmxnet3_getRxComp(rcd,
  1208. &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
  1209. }
  1210. return num_pkts;
  1211. }
  1212. static void
  1213. vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
  1214. struct vmxnet3_adapter *adapter)
  1215. {
  1216. u32 i, ring_idx;
  1217. struct Vmxnet3_RxDesc *rxd;
  1218. for (ring_idx = 0; ring_idx < 2; ring_idx++) {
  1219. for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
  1220. #ifdef __BIG_ENDIAN_BITFIELD
  1221. struct Vmxnet3_RxDesc rxDesc;
  1222. #endif
  1223. vmxnet3_getRxDesc(rxd,
  1224. &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
  1225. if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
  1226. rq->buf_info[ring_idx][i].skb) {
  1227. dma_unmap_single(&adapter->pdev->dev, rxd->addr,
  1228. rxd->len, PCI_DMA_FROMDEVICE);
  1229. dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
  1230. rq->buf_info[ring_idx][i].skb = NULL;
  1231. } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
  1232. rq->buf_info[ring_idx][i].page) {
  1233. dma_unmap_page(&adapter->pdev->dev, rxd->addr,
  1234. rxd->len, PCI_DMA_FROMDEVICE);
  1235. put_page(rq->buf_info[ring_idx][i].page);
  1236. rq->buf_info[ring_idx][i].page = NULL;
  1237. }
  1238. }
  1239. rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
  1240. rq->rx_ring[ring_idx].next2fill =
  1241. rq->rx_ring[ring_idx].next2comp = 0;
  1242. }
  1243. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1244. rq->comp_ring.next2proc = 0;
  1245. }
  1246. static void
  1247. vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
  1248. {
  1249. int i;
  1250. for (i = 0; i < adapter->num_rx_queues; i++)
  1251. vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
  1252. }
  1253. static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
  1254. struct vmxnet3_adapter *adapter)
  1255. {
  1256. int i;
  1257. int j;
  1258. /* all rx buffers must have already been freed */
  1259. for (i = 0; i < 2; i++) {
  1260. if (rq->buf_info[i]) {
  1261. for (j = 0; j < rq->rx_ring[i].size; j++)
  1262. BUG_ON(rq->buf_info[i][j].page != NULL);
  1263. }
  1264. }
  1265. for (i = 0; i < 2; i++) {
  1266. if (rq->rx_ring[i].base) {
  1267. dma_free_coherent(&adapter->pdev->dev,
  1268. rq->rx_ring[i].size
  1269. * sizeof(struct Vmxnet3_RxDesc),
  1270. rq->rx_ring[i].base,
  1271. rq->rx_ring[i].basePA);
  1272. rq->rx_ring[i].base = NULL;
  1273. }
  1274. rq->buf_info[i] = NULL;
  1275. }
  1276. if (rq->comp_ring.base) {
  1277. dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
  1278. * sizeof(struct Vmxnet3_RxCompDesc),
  1279. rq->comp_ring.base, rq->comp_ring.basePA);
  1280. rq->comp_ring.base = NULL;
  1281. }
  1282. if (rq->buf_info[0]) {
  1283. size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
  1284. (rq->rx_ring[0].size + rq->rx_ring[1].size);
  1285. dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
  1286. rq->buf_info_pa);
  1287. }
  1288. }
  1289. static int
  1290. vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
  1291. struct vmxnet3_adapter *adapter)
  1292. {
  1293. int i;
  1294. /* initialize buf_info */
  1295. for (i = 0; i < rq->rx_ring[0].size; i++) {
  1296. /* 1st buf for a pkt is skbuff */
  1297. if (i % adapter->rx_buf_per_pkt == 0) {
  1298. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
  1299. rq->buf_info[0][i].len = adapter->skb_buf_size;
  1300. } else { /* subsequent bufs for a pkt is frag */
  1301. rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1302. rq->buf_info[0][i].len = PAGE_SIZE;
  1303. }
  1304. }
  1305. for (i = 0; i < rq->rx_ring[1].size; i++) {
  1306. rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
  1307. rq->buf_info[1][i].len = PAGE_SIZE;
  1308. }
  1309. /* reset internal state and allocate buffers for both rings */
  1310. for (i = 0; i < 2; i++) {
  1311. rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
  1312. memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
  1313. sizeof(struct Vmxnet3_RxDesc));
  1314. rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
  1315. }
  1316. if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
  1317. adapter) == 0) {
  1318. /* at least has 1 rx buffer for the 1st ring */
  1319. return -ENOMEM;
  1320. }
  1321. vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
  1322. /* reset the comp ring */
  1323. rq->comp_ring.next2proc = 0;
  1324. memset(rq->comp_ring.base, 0, rq->comp_ring.size *
  1325. sizeof(struct Vmxnet3_RxCompDesc));
  1326. rq->comp_ring.gen = VMXNET3_INIT_GEN;
  1327. /* reset rxctx */
  1328. rq->rx_ctx.skb = NULL;
  1329. /* stats are not reset */
  1330. return 0;
  1331. }
  1332. static int
  1333. vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
  1334. {
  1335. int i, err = 0;
  1336. for (i = 0; i < adapter->num_rx_queues; i++) {
  1337. err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
  1338. if (unlikely(err)) {
  1339. dev_err(&adapter->netdev->dev, "%s: failed to "
  1340. "initialize rx queue%i\n",
  1341. adapter->netdev->name, i);
  1342. break;
  1343. }
  1344. }
  1345. return err;
  1346. }
  1347. static int
  1348. vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
  1349. {
  1350. int i;
  1351. size_t sz;
  1352. struct vmxnet3_rx_buf_info *bi;
  1353. for (i = 0; i < 2; i++) {
  1354. sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
  1355. rq->rx_ring[i].base = dma_alloc_coherent(
  1356. &adapter->pdev->dev, sz,
  1357. &rq->rx_ring[i].basePA,
  1358. GFP_KERNEL);
  1359. if (!rq->rx_ring[i].base) {
  1360. netdev_err(adapter->netdev,
  1361. "failed to allocate rx ring %d\n", i);
  1362. goto err;
  1363. }
  1364. }
  1365. sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
  1366. rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
  1367. &rq->comp_ring.basePA,
  1368. GFP_KERNEL);
  1369. if (!rq->comp_ring.base) {
  1370. netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
  1371. goto err;
  1372. }
  1373. sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
  1374. rq->rx_ring[1].size);
  1375. bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
  1376. GFP_KERNEL);
  1377. if (!bi)
  1378. goto err;
  1379. rq->buf_info[0] = bi;
  1380. rq->buf_info[1] = bi + rq->rx_ring[0].size;
  1381. return 0;
  1382. err:
  1383. vmxnet3_rq_destroy(rq, adapter);
  1384. return -ENOMEM;
  1385. }
  1386. static int
  1387. vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
  1388. {
  1389. int i, err = 0;
  1390. for (i = 0; i < adapter->num_rx_queues; i++) {
  1391. err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
  1392. if (unlikely(err)) {
  1393. dev_err(&adapter->netdev->dev,
  1394. "%s: failed to create rx queue%i\n",
  1395. adapter->netdev->name, i);
  1396. goto err_out;
  1397. }
  1398. }
  1399. return err;
  1400. err_out:
  1401. vmxnet3_rq_destroy_all(adapter);
  1402. return err;
  1403. }
  1404. /* Multiple queue aware polling function for tx and rx */
  1405. static int
  1406. vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
  1407. {
  1408. int rcd_done = 0, i;
  1409. if (unlikely(adapter->shared->ecr))
  1410. vmxnet3_process_events(adapter);
  1411. for (i = 0; i < adapter->num_tx_queues; i++)
  1412. vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
  1413. for (i = 0; i < adapter->num_rx_queues; i++)
  1414. rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
  1415. adapter, budget);
  1416. return rcd_done;
  1417. }
  1418. static int
  1419. vmxnet3_poll(struct napi_struct *napi, int budget)
  1420. {
  1421. struct vmxnet3_rx_queue *rx_queue = container_of(napi,
  1422. struct vmxnet3_rx_queue, napi);
  1423. int rxd_done;
  1424. rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
  1425. if (rxd_done < budget) {
  1426. napi_complete(napi);
  1427. vmxnet3_enable_all_intrs(rx_queue->adapter);
  1428. }
  1429. return rxd_done;
  1430. }
  1431. /*
  1432. * NAPI polling function for MSI-X mode with multiple Rx queues
  1433. * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
  1434. */
  1435. static int
  1436. vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
  1437. {
  1438. struct vmxnet3_rx_queue *rq = container_of(napi,
  1439. struct vmxnet3_rx_queue, napi);
  1440. struct vmxnet3_adapter *adapter = rq->adapter;
  1441. int rxd_done;
  1442. /* When sharing interrupt with corresponding tx queue, process
  1443. * tx completions in that queue as well
  1444. */
  1445. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
  1446. struct vmxnet3_tx_queue *tq =
  1447. &adapter->tx_queue[rq - adapter->rx_queue];
  1448. vmxnet3_tq_tx_complete(tq, adapter);
  1449. }
  1450. rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
  1451. if (rxd_done < budget) {
  1452. napi_complete(napi);
  1453. vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
  1454. }
  1455. return rxd_done;
  1456. }
  1457. #ifdef CONFIG_PCI_MSI
  1458. /*
  1459. * Handle completion interrupts on tx queues
  1460. * Returns whether or not the intr is handled
  1461. */
  1462. static irqreturn_t
  1463. vmxnet3_msix_tx(int irq, void *data)
  1464. {
  1465. struct vmxnet3_tx_queue *tq = data;
  1466. struct vmxnet3_adapter *adapter = tq->adapter;
  1467. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1468. vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
  1469. /* Handle the case where only one irq is allocate for all tx queues */
  1470. if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
  1471. int i;
  1472. for (i = 0; i < adapter->num_tx_queues; i++) {
  1473. struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
  1474. vmxnet3_tq_tx_complete(txq, adapter);
  1475. }
  1476. } else {
  1477. vmxnet3_tq_tx_complete(tq, adapter);
  1478. }
  1479. vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
  1480. return IRQ_HANDLED;
  1481. }
  1482. /*
  1483. * Handle completion interrupts on rx queues. Returns whether or not the
  1484. * intr is handled
  1485. */
  1486. static irqreturn_t
  1487. vmxnet3_msix_rx(int irq, void *data)
  1488. {
  1489. struct vmxnet3_rx_queue *rq = data;
  1490. struct vmxnet3_adapter *adapter = rq->adapter;
  1491. /* disable intr if needed */
  1492. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1493. vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
  1494. napi_schedule(&rq->napi);
  1495. return IRQ_HANDLED;
  1496. }
  1497. /*
  1498. *----------------------------------------------------------------------------
  1499. *
  1500. * vmxnet3_msix_event --
  1501. *
  1502. * vmxnet3 msix event intr handler
  1503. *
  1504. * Result:
  1505. * whether or not the intr is handled
  1506. *
  1507. *----------------------------------------------------------------------------
  1508. */
  1509. static irqreturn_t
  1510. vmxnet3_msix_event(int irq, void *data)
  1511. {
  1512. struct net_device *dev = data;
  1513. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1514. /* disable intr if needed */
  1515. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1516. vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
  1517. if (adapter->shared->ecr)
  1518. vmxnet3_process_events(adapter);
  1519. vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
  1520. return IRQ_HANDLED;
  1521. }
  1522. #endif /* CONFIG_PCI_MSI */
  1523. /* Interrupt handler for vmxnet3 */
  1524. static irqreturn_t
  1525. vmxnet3_intr(int irq, void *dev_id)
  1526. {
  1527. struct net_device *dev = dev_id;
  1528. struct vmxnet3_adapter *adapter = netdev_priv(dev);
  1529. if (adapter->intr.type == VMXNET3_IT_INTX) {
  1530. u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
  1531. if (unlikely(icr == 0))
  1532. /* not ours */
  1533. return IRQ_NONE;
  1534. }
  1535. /* disable intr if needed */
  1536. if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
  1537. vmxnet3_disable_all_intrs(adapter);
  1538. napi_schedule(&adapter->rx_queue[0].napi);
  1539. return IRQ_HANDLED;
  1540. }
  1541. #ifdef CONFIG_NET_POLL_CONTROLLER
  1542. /* netpoll callback. */
  1543. static void
  1544. vmxnet3_netpoll(struct net_device *netdev)
  1545. {
  1546. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1547. switch (adapter->intr.type) {
  1548. #ifdef CONFIG_PCI_MSI
  1549. case VMXNET3_IT_MSIX: {
  1550. int i;
  1551. for (i = 0; i < adapter->num_rx_queues; i++)
  1552. vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
  1553. break;
  1554. }
  1555. #endif
  1556. case VMXNET3_IT_MSI:
  1557. default:
  1558. vmxnet3_intr(0, adapter->netdev);
  1559. break;
  1560. }
  1561. }
  1562. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1563. static int
  1564. vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
  1565. {
  1566. struct vmxnet3_intr *intr = &adapter->intr;
  1567. int err = 0, i;
  1568. int vector = 0;
  1569. #ifdef CONFIG_PCI_MSI
  1570. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  1571. for (i = 0; i < adapter->num_tx_queues; i++) {
  1572. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
  1573. sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
  1574. adapter->netdev->name, vector);
  1575. err = request_irq(
  1576. intr->msix_entries[vector].vector,
  1577. vmxnet3_msix_tx, 0,
  1578. adapter->tx_queue[i].name,
  1579. &adapter->tx_queue[i]);
  1580. } else {
  1581. sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
  1582. adapter->netdev->name, vector);
  1583. }
  1584. if (err) {
  1585. dev_err(&adapter->netdev->dev,
  1586. "Failed to request irq for MSIX, %s, "
  1587. "error %d\n",
  1588. adapter->tx_queue[i].name, err);
  1589. return err;
  1590. }
  1591. /* Handle the case where only 1 MSIx was allocated for
  1592. * all tx queues */
  1593. if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
  1594. for (; i < adapter->num_tx_queues; i++)
  1595. adapter->tx_queue[i].comp_ring.intr_idx
  1596. = vector;
  1597. vector++;
  1598. break;
  1599. } else {
  1600. adapter->tx_queue[i].comp_ring.intr_idx
  1601. = vector++;
  1602. }
  1603. }
  1604. if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
  1605. vector = 0;
  1606. for (i = 0; i < adapter->num_rx_queues; i++) {
  1607. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
  1608. sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
  1609. adapter->netdev->name, vector);
  1610. else
  1611. sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
  1612. adapter->netdev->name, vector);
  1613. err = request_irq(intr->msix_entries[vector].vector,
  1614. vmxnet3_msix_rx, 0,
  1615. adapter->rx_queue[i].name,
  1616. &(adapter->rx_queue[i]));
  1617. if (err) {
  1618. netdev_err(adapter->netdev,
  1619. "Failed to request irq for MSIX, "
  1620. "%s, error %d\n",
  1621. adapter->rx_queue[i].name, err);
  1622. return err;
  1623. }
  1624. adapter->rx_queue[i].comp_ring.intr_idx = vector++;
  1625. }
  1626. sprintf(intr->event_msi_vector_name, "%s-event-%d",
  1627. adapter->netdev->name, vector);
  1628. err = request_irq(intr->msix_entries[vector].vector,
  1629. vmxnet3_msix_event, 0,
  1630. intr->event_msi_vector_name, adapter->netdev);
  1631. intr->event_intr_idx = vector;
  1632. } else if (intr->type == VMXNET3_IT_MSI) {
  1633. adapter->num_rx_queues = 1;
  1634. err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
  1635. adapter->netdev->name, adapter->netdev);
  1636. } else {
  1637. #endif
  1638. adapter->num_rx_queues = 1;
  1639. err = request_irq(adapter->pdev->irq, vmxnet3_intr,
  1640. IRQF_SHARED, adapter->netdev->name,
  1641. adapter->netdev);
  1642. #ifdef CONFIG_PCI_MSI
  1643. }
  1644. #endif
  1645. intr->num_intrs = vector + 1;
  1646. if (err) {
  1647. netdev_err(adapter->netdev,
  1648. "Failed to request irq (intr type:%d), error %d\n",
  1649. intr->type, err);
  1650. } else {
  1651. /* Number of rx queues will not change after this */
  1652. for (i = 0; i < adapter->num_rx_queues; i++) {
  1653. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  1654. rq->qid = i;
  1655. rq->qid2 = i + adapter->num_rx_queues;
  1656. }
  1657. /* init our intr settings */
  1658. for (i = 0; i < intr->num_intrs; i++)
  1659. intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
  1660. if (adapter->intr.type != VMXNET3_IT_MSIX) {
  1661. adapter->intr.event_intr_idx = 0;
  1662. for (i = 0; i < adapter->num_tx_queues; i++)
  1663. adapter->tx_queue[i].comp_ring.intr_idx = 0;
  1664. adapter->rx_queue[0].comp_ring.intr_idx = 0;
  1665. }
  1666. netdev_info(adapter->netdev,
  1667. "intr type %u, mode %u, %u vectors allocated\n",
  1668. intr->type, intr->mask_mode, intr->num_intrs);
  1669. }
  1670. return err;
  1671. }
  1672. static void
  1673. vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
  1674. {
  1675. struct vmxnet3_intr *intr = &adapter->intr;
  1676. BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
  1677. switch (intr->type) {
  1678. #ifdef CONFIG_PCI_MSI
  1679. case VMXNET3_IT_MSIX:
  1680. {
  1681. int i, vector = 0;
  1682. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
  1683. for (i = 0; i < adapter->num_tx_queues; i++) {
  1684. free_irq(intr->msix_entries[vector++].vector,
  1685. &(adapter->tx_queue[i]));
  1686. if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
  1687. break;
  1688. }
  1689. }
  1690. for (i = 0; i < adapter->num_rx_queues; i++) {
  1691. free_irq(intr->msix_entries[vector++].vector,
  1692. &(adapter->rx_queue[i]));
  1693. }
  1694. free_irq(intr->msix_entries[vector].vector,
  1695. adapter->netdev);
  1696. BUG_ON(vector >= intr->num_intrs);
  1697. break;
  1698. }
  1699. #endif
  1700. case VMXNET3_IT_MSI:
  1701. free_irq(adapter->pdev->irq, adapter->netdev);
  1702. break;
  1703. case VMXNET3_IT_INTX:
  1704. free_irq(adapter->pdev->irq, adapter->netdev);
  1705. break;
  1706. default:
  1707. BUG();
  1708. }
  1709. }
  1710. static void
  1711. vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
  1712. {
  1713. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1714. u16 vid;
  1715. /* allow untagged pkts */
  1716. VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
  1717. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  1718. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1719. }
  1720. static int
  1721. vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  1722. {
  1723. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1724. if (!(netdev->flags & IFF_PROMISC)) {
  1725. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1726. unsigned long flags;
  1727. VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
  1728. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1729. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1730. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1731. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1732. }
  1733. set_bit(vid, adapter->active_vlans);
  1734. return 0;
  1735. }
  1736. static int
  1737. vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
  1738. {
  1739. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1740. if (!(netdev->flags & IFF_PROMISC)) {
  1741. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1742. unsigned long flags;
  1743. VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
  1744. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1745. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1746. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1747. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1748. }
  1749. clear_bit(vid, adapter->active_vlans);
  1750. return 0;
  1751. }
  1752. static u8 *
  1753. vmxnet3_copy_mc(struct net_device *netdev)
  1754. {
  1755. u8 *buf = NULL;
  1756. u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
  1757. /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
  1758. if (sz <= 0xffff) {
  1759. /* We may be called with BH disabled */
  1760. buf = kmalloc(sz, GFP_ATOMIC);
  1761. if (buf) {
  1762. struct netdev_hw_addr *ha;
  1763. int i = 0;
  1764. netdev_for_each_mc_addr(ha, netdev)
  1765. memcpy(buf + i++ * ETH_ALEN, ha->addr,
  1766. ETH_ALEN);
  1767. }
  1768. }
  1769. return buf;
  1770. }
  1771. static void
  1772. vmxnet3_set_mc(struct net_device *netdev)
  1773. {
  1774. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  1775. unsigned long flags;
  1776. struct Vmxnet3_RxFilterConf *rxConf =
  1777. &adapter->shared->devRead.rxFilterConf;
  1778. u8 *new_table = NULL;
  1779. dma_addr_t new_table_pa = 0;
  1780. u32 new_mode = VMXNET3_RXM_UCAST;
  1781. if (netdev->flags & IFF_PROMISC) {
  1782. u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
  1783. memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
  1784. new_mode |= VMXNET3_RXM_PROMISC;
  1785. } else {
  1786. vmxnet3_restore_vlan(adapter);
  1787. }
  1788. if (netdev->flags & IFF_BROADCAST)
  1789. new_mode |= VMXNET3_RXM_BCAST;
  1790. if (netdev->flags & IFF_ALLMULTI)
  1791. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1792. else
  1793. if (!netdev_mc_empty(netdev)) {
  1794. new_table = vmxnet3_copy_mc(netdev);
  1795. if (new_table) {
  1796. rxConf->mfTableLen = cpu_to_le16(
  1797. netdev_mc_count(netdev) * ETH_ALEN);
  1798. new_table_pa = dma_map_single(
  1799. &adapter->pdev->dev,
  1800. new_table,
  1801. rxConf->mfTableLen,
  1802. PCI_DMA_TODEVICE);
  1803. }
  1804. if (new_table_pa) {
  1805. new_mode |= VMXNET3_RXM_MCAST;
  1806. rxConf->mfTablePA = cpu_to_le64(new_table_pa);
  1807. } else {
  1808. netdev_info(netdev,
  1809. "failed to copy mcast list, setting ALL_MULTI\n");
  1810. new_mode |= VMXNET3_RXM_ALL_MULTI;
  1811. }
  1812. }
  1813. if (!(new_mode & VMXNET3_RXM_MCAST)) {
  1814. rxConf->mfTableLen = 0;
  1815. rxConf->mfTablePA = 0;
  1816. }
  1817. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1818. if (new_mode != rxConf->rxMode) {
  1819. rxConf->rxMode = cpu_to_le32(new_mode);
  1820. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1821. VMXNET3_CMD_UPDATE_RX_MODE);
  1822. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1823. VMXNET3_CMD_UPDATE_VLAN_FILTERS);
  1824. }
  1825. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1826. VMXNET3_CMD_UPDATE_MAC_FILTERS);
  1827. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1828. if (new_table_pa)
  1829. dma_unmap_single(&adapter->pdev->dev, new_table_pa,
  1830. rxConf->mfTableLen, PCI_DMA_TODEVICE);
  1831. kfree(new_table);
  1832. }
  1833. void
  1834. vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
  1835. {
  1836. int i;
  1837. for (i = 0; i < adapter->num_rx_queues; i++)
  1838. vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
  1839. }
  1840. /*
  1841. * Set up driver_shared based on settings in adapter.
  1842. */
  1843. static void
  1844. vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
  1845. {
  1846. struct Vmxnet3_DriverShared *shared = adapter->shared;
  1847. struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
  1848. struct Vmxnet3_TxQueueConf *tqc;
  1849. struct Vmxnet3_RxQueueConf *rqc;
  1850. int i;
  1851. memset(shared, 0, sizeof(*shared));
  1852. /* driver settings */
  1853. shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
  1854. devRead->misc.driverInfo.version = cpu_to_le32(
  1855. VMXNET3_DRIVER_VERSION_NUM);
  1856. devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
  1857. VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
  1858. devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
  1859. *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
  1860. *((u32 *)&devRead->misc.driverInfo.gos));
  1861. devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
  1862. devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
  1863. devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
  1864. devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
  1865. /* set up feature flags */
  1866. if (adapter->netdev->features & NETIF_F_RXCSUM)
  1867. devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
  1868. if (adapter->netdev->features & NETIF_F_LRO) {
  1869. devRead->misc.uptFeatures |= UPT1_F_LRO;
  1870. devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
  1871. }
  1872. if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
  1873. devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
  1874. devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
  1875. devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
  1876. devRead->misc.queueDescLen = cpu_to_le32(
  1877. adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
  1878. adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
  1879. /* tx queue settings */
  1880. devRead->misc.numTxQueues = adapter->num_tx_queues;
  1881. for (i = 0; i < adapter->num_tx_queues; i++) {
  1882. struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
  1883. BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
  1884. tqc = &adapter->tqd_start[i].conf;
  1885. tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
  1886. tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
  1887. tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
  1888. tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
  1889. tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
  1890. tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
  1891. tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
  1892. tqc->ddLen = cpu_to_le32(
  1893. sizeof(struct vmxnet3_tx_buf_info) *
  1894. tqc->txRingSize);
  1895. tqc->intrIdx = tq->comp_ring.intr_idx;
  1896. }
  1897. /* rx queue settings */
  1898. devRead->misc.numRxQueues = adapter->num_rx_queues;
  1899. for (i = 0; i < adapter->num_rx_queues; i++) {
  1900. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  1901. rqc = &adapter->rqd_start[i].conf;
  1902. rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
  1903. rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
  1904. rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
  1905. rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
  1906. rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
  1907. rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
  1908. rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
  1909. rqc->ddLen = cpu_to_le32(
  1910. sizeof(struct vmxnet3_rx_buf_info) *
  1911. (rqc->rxRingSize[0] +
  1912. rqc->rxRingSize[1]));
  1913. rqc->intrIdx = rq->comp_ring.intr_idx;
  1914. }
  1915. #ifdef VMXNET3_RSS
  1916. memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
  1917. if (adapter->rss) {
  1918. struct UPT1_RSSConf *rssConf = adapter->rss_conf;
  1919. devRead->misc.uptFeatures |= UPT1_F_RSS;
  1920. devRead->misc.numRxQueues = adapter->num_rx_queues;
  1921. rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
  1922. UPT1_RSS_HASH_TYPE_IPV4 |
  1923. UPT1_RSS_HASH_TYPE_TCP_IPV6 |
  1924. UPT1_RSS_HASH_TYPE_IPV6;
  1925. rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
  1926. rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
  1927. rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
  1928. netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
  1929. for (i = 0; i < rssConf->indTableSize; i++)
  1930. rssConf->indTable[i] = ethtool_rxfh_indir_default(
  1931. i, adapter->num_rx_queues);
  1932. devRead->rssConfDesc.confVer = 1;
  1933. devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
  1934. devRead->rssConfDesc.confPA =
  1935. cpu_to_le64(adapter->rss_conf_pa);
  1936. }
  1937. #endif /* VMXNET3_RSS */
  1938. /* intr settings */
  1939. devRead->intrConf.autoMask = adapter->intr.mask_mode ==
  1940. VMXNET3_IMM_AUTO;
  1941. devRead->intrConf.numIntrs = adapter->intr.num_intrs;
  1942. for (i = 0; i < adapter->intr.num_intrs; i++)
  1943. devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
  1944. devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
  1945. devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
  1946. /* rx filter settings */
  1947. devRead->rxFilterConf.rxMode = 0;
  1948. vmxnet3_restore_vlan(adapter);
  1949. vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
  1950. /* the rest are already zeroed */
  1951. }
  1952. int
  1953. vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
  1954. {
  1955. int err, i;
  1956. u32 ret;
  1957. unsigned long flags;
  1958. netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
  1959. " ring sizes %u %u %u\n", adapter->netdev->name,
  1960. adapter->skb_buf_size, adapter->rx_buf_per_pkt,
  1961. adapter->tx_queue[0].tx_ring.size,
  1962. adapter->rx_queue[0].rx_ring[0].size,
  1963. adapter->rx_queue[0].rx_ring[1].size);
  1964. vmxnet3_tq_init_all(adapter);
  1965. err = vmxnet3_rq_init_all(adapter);
  1966. if (err) {
  1967. netdev_err(adapter->netdev,
  1968. "Failed to init rx queue error %d\n", err);
  1969. goto rq_err;
  1970. }
  1971. err = vmxnet3_request_irqs(adapter);
  1972. if (err) {
  1973. netdev_err(adapter->netdev,
  1974. "Failed to setup irq for error %d\n", err);
  1975. goto irq_err;
  1976. }
  1977. vmxnet3_setup_driver_shared(adapter);
  1978. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
  1979. adapter->shared_pa));
  1980. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
  1981. adapter->shared_pa));
  1982. spin_lock_irqsave(&adapter->cmd_lock, flags);
  1983. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  1984. VMXNET3_CMD_ACTIVATE_DEV);
  1985. ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  1986. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  1987. if (ret != 0) {
  1988. netdev_err(adapter->netdev,
  1989. "Failed to activate dev: error %u\n", ret);
  1990. err = -EINVAL;
  1991. goto activate_err;
  1992. }
  1993. for (i = 0; i < adapter->num_rx_queues; i++) {
  1994. VMXNET3_WRITE_BAR0_REG(adapter,
  1995. VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
  1996. adapter->rx_queue[i].rx_ring[0].next2fill);
  1997. VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
  1998. (i * VMXNET3_REG_ALIGN)),
  1999. adapter->rx_queue[i].rx_ring[1].next2fill);
  2000. }
  2001. /* Apply the rx filter settins last. */
  2002. vmxnet3_set_mc(adapter->netdev);
  2003. /*
  2004. * Check link state when first activating device. It will start the
  2005. * tx queue if the link is up.
  2006. */
  2007. vmxnet3_check_link(adapter, true);
  2008. for (i = 0; i < adapter->num_rx_queues; i++)
  2009. napi_enable(&adapter->rx_queue[i].napi);
  2010. vmxnet3_enable_all_intrs(adapter);
  2011. clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  2012. return 0;
  2013. activate_err:
  2014. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
  2015. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
  2016. vmxnet3_free_irqs(adapter);
  2017. irq_err:
  2018. rq_err:
  2019. /* free up buffers we allocated */
  2020. vmxnet3_rq_cleanup_all(adapter);
  2021. return err;
  2022. }
  2023. void
  2024. vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
  2025. {
  2026. unsigned long flags;
  2027. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2028. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
  2029. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2030. }
  2031. int
  2032. vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
  2033. {
  2034. int i;
  2035. unsigned long flags;
  2036. if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
  2037. return 0;
  2038. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2039. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2040. VMXNET3_CMD_QUIESCE_DEV);
  2041. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2042. vmxnet3_disable_all_intrs(adapter);
  2043. for (i = 0; i < adapter->num_rx_queues; i++)
  2044. napi_disable(&adapter->rx_queue[i].napi);
  2045. netif_tx_disable(adapter->netdev);
  2046. adapter->link_speed = 0;
  2047. netif_carrier_off(adapter->netdev);
  2048. vmxnet3_tq_cleanup_all(adapter);
  2049. vmxnet3_rq_cleanup_all(adapter);
  2050. vmxnet3_free_irqs(adapter);
  2051. return 0;
  2052. }
  2053. static void
  2054. vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  2055. {
  2056. u32 tmp;
  2057. tmp = *(u32 *)mac;
  2058. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
  2059. tmp = (mac[5] << 8) | mac[4];
  2060. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
  2061. }
  2062. static int
  2063. vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
  2064. {
  2065. struct sockaddr *addr = p;
  2066. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2067. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  2068. vmxnet3_write_mac_addr(adapter, addr->sa_data);
  2069. return 0;
  2070. }
  2071. /* ==================== initialization and cleanup routines ============ */
  2072. static int
  2073. vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
  2074. {
  2075. int err;
  2076. unsigned long mmio_start, mmio_len;
  2077. struct pci_dev *pdev = adapter->pdev;
  2078. err = pci_enable_device(pdev);
  2079. if (err) {
  2080. dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
  2081. return err;
  2082. }
  2083. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
  2084. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
  2085. dev_err(&pdev->dev,
  2086. "pci_set_consistent_dma_mask failed\n");
  2087. err = -EIO;
  2088. goto err_set_mask;
  2089. }
  2090. *dma64 = true;
  2091. } else {
  2092. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
  2093. dev_err(&pdev->dev,
  2094. "pci_set_dma_mask failed\n");
  2095. err = -EIO;
  2096. goto err_set_mask;
  2097. }
  2098. *dma64 = false;
  2099. }
  2100. err = pci_request_selected_regions(pdev, (1 << 2) - 1,
  2101. vmxnet3_driver_name);
  2102. if (err) {
  2103. dev_err(&pdev->dev,
  2104. "Failed to request region for adapter: error %d\n", err);
  2105. goto err_set_mask;
  2106. }
  2107. pci_set_master(pdev);
  2108. mmio_start = pci_resource_start(pdev, 0);
  2109. mmio_len = pci_resource_len(pdev, 0);
  2110. adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
  2111. if (!adapter->hw_addr0) {
  2112. dev_err(&pdev->dev, "Failed to map bar0\n");
  2113. err = -EIO;
  2114. goto err_ioremap;
  2115. }
  2116. mmio_start = pci_resource_start(pdev, 1);
  2117. mmio_len = pci_resource_len(pdev, 1);
  2118. adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
  2119. if (!adapter->hw_addr1) {
  2120. dev_err(&pdev->dev, "Failed to map bar1\n");
  2121. err = -EIO;
  2122. goto err_bar1;
  2123. }
  2124. return 0;
  2125. err_bar1:
  2126. iounmap(adapter->hw_addr0);
  2127. err_ioremap:
  2128. pci_release_selected_regions(pdev, (1 << 2) - 1);
  2129. err_set_mask:
  2130. pci_disable_device(pdev);
  2131. return err;
  2132. }
  2133. static void
  2134. vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
  2135. {
  2136. BUG_ON(!adapter->pdev);
  2137. iounmap(adapter->hw_addr0);
  2138. iounmap(adapter->hw_addr1);
  2139. pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
  2140. pci_disable_device(adapter->pdev);
  2141. }
  2142. static void
  2143. vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
  2144. {
  2145. size_t sz, i, ring0_size, ring1_size, comp_size;
  2146. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
  2147. if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
  2148. VMXNET3_MAX_ETH_HDR_SIZE) {
  2149. adapter->skb_buf_size = adapter->netdev->mtu +
  2150. VMXNET3_MAX_ETH_HDR_SIZE;
  2151. if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
  2152. adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
  2153. adapter->rx_buf_per_pkt = 1;
  2154. } else {
  2155. adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
  2156. sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
  2157. VMXNET3_MAX_ETH_HDR_SIZE;
  2158. adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
  2159. }
  2160. /*
  2161. * for simplicity, force the ring0 size to be a multiple of
  2162. * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
  2163. */
  2164. sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
  2165. ring0_size = adapter->rx_queue[0].rx_ring[0].size;
  2166. ring0_size = (ring0_size + sz - 1) / sz * sz;
  2167. ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
  2168. sz * sz);
  2169. ring1_size = adapter->rx_queue[0].rx_ring[1].size;
  2170. ring1_size = (ring1_size + sz - 1) / sz * sz;
  2171. ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
  2172. sz * sz);
  2173. comp_size = ring0_size + ring1_size;
  2174. for (i = 0; i < adapter->num_rx_queues; i++) {
  2175. rq = &adapter->rx_queue[i];
  2176. rq->rx_ring[0].size = ring0_size;
  2177. rq->rx_ring[1].size = ring1_size;
  2178. rq->comp_ring.size = comp_size;
  2179. }
  2180. }
  2181. int
  2182. vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
  2183. u32 rx_ring_size, u32 rx_ring2_size)
  2184. {
  2185. int err = 0, i;
  2186. for (i = 0; i < adapter->num_tx_queues; i++) {
  2187. struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
  2188. tq->tx_ring.size = tx_ring_size;
  2189. tq->data_ring.size = tx_ring_size;
  2190. tq->comp_ring.size = tx_ring_size;
  2191. tq->shared = &adapter->tqd_start[i].ctrl;
  2192. tq->stopped = true;
  2193. tq->adapter = adapter;
  2194. tq->qid = i;
  2195. err = vmxnet3_tq_create(tq, adapter);
  2196. /*
  2197. * Too late to change num_tx_queues. We cannot do away with
  2198. * lesser number of queues than what we asked for
  2199. */
  2200. if (err)
  2201. goto queue_err;
  2202. }
  2203. adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
  2204. adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
  2205. vmxnet3_adjust_rx_ring_size(adapter);
  2206. for (i = 0; i < adapter->num_rx_queues; i++) {
  2207. struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
  2208. /* qid and qid2 for rx queues will be assigned later when num
  2209. * of rx queues is finalized after allocating intrs */
  2210. rq->shared = &adapter->rqd_start[i].ctrl;
  2211. rq->adapter = adapter;
  2212. err = vmxnet3_rq_create(rq, adapter);
  2213. if (err) {
  2214. if (i == 0) {
  2215. netdev_err(adapter->netdev,
  2216. "Could not allocate any rx queues. "
  2217. "Aborting.\n");
  2218. goto queue_err;
  2219. } else {
  2220. netdev_info(adapter->netdev,
  2221. "Number of rx queues changed "
  2222. "to : %d.\n", i);
  2223. adapter->num_rx_queues = i;
  2224. err = 0;
  2225. break;
  2226. }
  2227. }
  2228. }
  2229. return err;
  2230. queue_err:
  2231. vmxnet3_tq_destroy_all(adapter);
  2232. return err;
  2233. }
  2234. static int
  2235. vmxnet3_open(struct net_device *netdev)
  2236. {
  2237. struct vmxnet3_adapter *adapter;
  2238. int err, i;
  2239. adapter = netdev_priv(netdev);
  2240. for (i = 0; i < adapter->num_tx_queues; i++)
  2241. spin_lock_init(&adapter->tx_queue[i].tx_lock);
  2242. err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
  2243. adapter->rx_ring_size,
  2244. adapter->rx_ring2_size);
  2245. if (err)
  2246. goto queue_err;
  2247. err = vmxnet3_activate_dev(adapter);
  2248. if (err)
  2249. goto activate_err;
  2250. return 0;
  2251. activate_err:
  2252. vmxnet3_rq_destroy_all(adapter);
  2253. vmxnet3_tq_destroy_all(adapter);
  2254. queue_err:
  2255. return err;
  2256. }
  2257. static int
  2258. vmxnet3_close(struct net_device *netdev)
  2259. {
  2260. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2261. /*
  2262. * Reset_work may be in the middle of resetting the device, wait for its
  2263. * completion.
  2264. */
  2265. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2266. msleep(1);
  2267. vmxnet3_quiesce_dev(adapter);
  2268. vmxnet3_rq_destroy_all(adapter);
  2269. vmxnet3_tq_destroy_all(adapter);
  2270. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2271. return 0;
  2272. }
  2273. void
  2274. vmxnet3_force_close(struct vmxnet3_adapter *adapter)
  2275. {
  2276. int i;
  2277. /*
  2278. * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
  2279. * vmxnet3_close() will deadlock.
  2280. */
  2281. BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
  2282. /* we need to enable NAPI, otherwise dev_close will deadlock */
  2283. for (i = 0; i < adapter->num_rx_queues; i++)
  2284. napi_enable(&adapter->rx_queue[i].napi);
  2285. dev_close(adapter->netdev);
  2286. }
  2287. static int
  2288. vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
  2289. {
  2290. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2291. int err = 0;
  2292. if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
  2293. return -EINVAL;
  2294. netdev->mtu = new_mtu;
  2295. /*
  2296. * Reset_work may be in the middle of resetting the device, wait for its
  2297. * completion.
  2298. */
  2299. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2300. msleep(1);
  2301. if (netif_running(netdev)) {
  2302. vmxnet3_quiesce_dev(adapter);
  2303. vmxnet3_reset_dev(adapter);
  2304. /* we need to re-create the rx queue based on the new mtu */
  2305. vmxnet3_rq_destroy_all(adapter);
  2306. vmxnet3_adjust_rx_ring_size(adapter);
  2307. err = vmxnet3_rq_create_all(adapter);
  2308. if (err) {
  2309. netdev_err(netdev,
  2310. "failed to re-create rx queues, "
  2311. " error %d. Closing it.\n", err);
  2312. goto out;
  2313. }
  2314. err = vmxnet3_activate_dev(adapter);
  2315. if (err) {
  2316. netdev_err(netdev,
  2317. "failed to re-activate, error %d. "
  2318. "Closing it\n", err);
  2319. goto out;
  2320. }
  2321. }
  2322. out:
  2323. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2324. if (err)
  2325. vmxnet3_force_close(adapter);
  2326. return err;
  2327. }
  2328. static void
  2329. vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
  2330. {
  2331. struct net_device *netdev = adapter->netdev;
  2332. netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
  2333. NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
  2334. NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
  2335. NETIF_F_LRO;
  2336. if (dma64)
  2337. netdev->hw_features |= NETIF_F_HIGHDMA;
  2338. netdev->vlan_features = netdev->hw_features &
  2339. ~(NETIF_F_HW_VLAN_CTAG_TX |
  2340. NETIF_F_HW_VLAN_CTAG_RX);
  2341. netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
  2342. }
  2343. static void
  2344. vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
  2345. {
  2346. u32 tmp;
  2347. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
  2348. *(u32 *)mac = tmp;
  2349. tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
  2350. mac[4] = tmp & 0xff;
  2351. mac[5] = (tmp >> 8) & 0xff;
  2352. }
  2353. #ifdef CONFIG_PCI_MSI
  2354. /*
  2355. * Enable MSIx vectors.
  2356. * Returns :
  2357. * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
  2358. * were enabled.
  2359. * number of vectors which were enabled otherwise (this number is greater
  2360. * than VMXNET3_LINUX_MIN_MSIX_VECT)
  2361. */
  2362. static int
  2363. vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
  2364. {
  2365. int ret = pci_enable_msix_range(adapter->pdev,
  2366. adapter->intr.msix_entries, nvec, nvec);
  2367. if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
  2368. dev_err(&adapter->netdev->dev,
  2369. "Failed to enable %d MSI-X, trying %d\n",
  2370. nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
  2371. ret = pci_enable_msix_range(adapter->pdev,
  2372. adapter->intr.msix_entries,
  2373. VMXNET3_LINUX_MIN_MSIX_VECT,
  2374. VMXNET3_LINUX_MIN_MSIX_VECT);
  2375. }
  2376. if (ret < 0) {
  2377. dev_err(&adapter->netdev->dev,
  2378. "Failed to enable MSI-X, error: %d\n", ret);
  2379. }
  2380. return ret;
  2381. }
  2382. #endif /* CONFIG_PCI_MSI */
  2383. static void
  2384. vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
  2385. {
  2386. u32 cfg;
  2387. unsigned long flags;
  2388. /* intr settings */
  2389. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2390. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2391. VMXNET3_CMD_GET_CONF_INTR);
  2392. cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
  2393. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2394. adapter->intr.type = cfg & 0x3;
  2395. adapter->intr.mask_mode = (cfg >> 2) & 0x3;
  2396. if (adapter->intr.type == VMXNET3_IT_AUTO) {
  2397. adapter->intr.type = VMXNET3_IT_MSIX;
  2398. }
  2399. #ifdef CONFIG_PCI_MSI
  2400. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  2401. int i, nvec;
  2402. nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
  2403. 1 : adapter->num_tx_queues;
  2404. nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
  2405. 0 : adapter->num_rx_queues;
  2406. nvec += 1; /* for link event */
  2407. nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
  2408. nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
  2409. for (i = 0; i < nvec; i++)
  2410. adapter->intr.msix_entries[i].entry = i;
  2411. nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
  2412. if (nvec < 0)
  2413. goto msix_err;
  2414. /* If we cannot allocate one MSIx vector per queue
  2415. * then limit the number of rx queues to 1
  2416. */
  2417. if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
  2418. if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
  2419. || adapter->num_rx_queues != 1) {
  2420. adapter->share_intr = VMXNET3_INTR_TXSHARE;
  2421. netdev_err(adapter->netdev,
  2422. "Number of rx queues : 1\n");
  2423. adapter->num_rx_queues = 1;
  2424. }
  2425. }
  2426. adapter->intr.num_intrs = nvec;
  2427. return;
  2428. msix_err:
  2429. /* If we cannot allocate MSIx vectors use only one rx queue */
  2430. dev_info(&adapter->pdev->dev,
  2431. "Failed to enable MSI-X, error %d. "
  2432. "Limiting #rx queues to 1, try MSI.\n", nvec);
  2433. adapter->intr.type = VMXNET3_IT_MSI;
  2434. }
  2435. if (adapter->intr.type == VMXNET3_IT_MSI) {
  2436. if (!pci_enable_msi(adapter->pdev)) {
  2437. adapter->num_rx_queues = 1;
  2438. adapter->intr.num_intrs = 1;
  2439. return;
  2440. }
  2441. }
  2442. #endif /* CONFIG_PCI_MSI */
  2443. adapter->num_rx_queues = 1;
  2444. dev_info(&adapter->netdev->dev,
  2445. "Using INTx interrupt, #Rx queues: 1.\n");
  2446. adapter->intr.type = VMXNET3_IT_INTX;
  2447. /* INT-X related setting */
  2448. adapter->intr.num_intrs = 1;
  2449. }
  2450. static void
  2451. vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
  2452. {
  2453. if (adapter->intr.type == VMXNET3_IT_MSIX)
  2454. pci_disable_msix(adapter->pdev);
  2455. else if (adapter->intr.type == VMXNET3_IT_MSI)
  2456. pci_disable_msi(adapter->pdev);
  2457. else
  2458. BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
  2459. }
  2460. static void
  2461. vmxnet3_tx_timeout(struct net_device *netdev)
  2462. {
  2463. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2464. adapter->tx_timeout_count++;
  2465. netdev_err(adapter->netdev, "tx hang\n");
  2466. schedule_work(&adapter->work);
  2467. netif_wake_queue(adapter->netdev);
  2468. }
  2469. static void
  2470. vmxnet3_reset_work(struct work_struct *data)
  2471. {
  2472. struct vmxnet3_adapter *adapter;
  2473. adapter = container_of(data, struct vmxnet3_adapter, work);
  2474. /* if another thread is resetting the device, no need to proceed */
  2475. if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2476. return;
  2477. /* if the device is closed, we must leave it alone */
  2478. rtnl_lock();
  2479. if (netif_running(adapter->netdev)) {
  2480. netdev_notice(adapter->netdev, "resetting\n");
  2481. vmxnet3_quiesce_dev(adapter);
  2482. vmxnet3_reset_dev(adapter);
  2483. vmxnet3_activate_dev(adapter);
  2484. } else {
  2485. netdev_info(adapter->netdev, "already closed\n");
  2486. }
  2487. rtnl_unlock();
  2488. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2489. }
  2490. static int
  2491. vmxnet3_probe_device(struct pci_dev *pdev,
  2492. const struct pci_device_id *id)
  2493. {
  2494. static const struct net_device_ops vmxnet3_netdev_ops = {
  2495. .ndo_open = vmxnet3_open,
  2496. .ndo_stop = vmxnet3_close,
  2497. .ndo_start_xmit = vmxnet3_xmit_frame,
  2498. .ndo_set_mac_address = vmxnet3_set_mac_addr,
  2499. .ndo_change_mtu = vmxnet3_change_mtu,
  2500. .ndo_set_features = vmxnet3_set_features,
  2501. .ndo_get_stats64 = vmxnet3_get_stats64,
  2502. .ndo_tx_timeout = vmxnet3_tx_timeout,
  2503. .ndo_set_rx_mode = vmxnet3_set_mc,
  2504. .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
  2505. .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
  2506. #ifdef CONFIG_NET_POLL_CONTROLLER
  2507. .ndo_poll_controller = vmxnet3_netpoll,
  2508. #endif
  2509. };
  2510. int err;
  2511. bool dma64 = false; /* stupid gcc */
  2512. u32 ver;
  2513. struct net_device *netdev;
  2514. struct vmxnet3_adapter *adapter;
  2515. u8 mac[ETH_ALEN];
  2516. int size;
  2517. int num_tx_queues;
  2518. int num_rx_queues;
  2519. if (!pci_msi_enabled())
  2520. enable_mq = 0;
  2521. #ifdef VMXNET3_RSS
  2522. if (enable_mq)
  2523. num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
  2524. (int)num_online_cpus());
  2525. else
  2526. #endif
  2527. num_rx_queues = 1;
  2528. num_rx_queues = rounddown_pow_of_two(num_rx_queues);
  2529. if (enable_mq)
  2530. num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
  2531. (int)num_online_cpus());
  2532. else
  2533. num_tx_queues = 1;
  2534. num_tx_queues = rounddown_pow_of_two(num_tx_queues);
  2535. netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
  2536. max(num_tx_queues, num_rx_queues));
  2537. dev_info(&pdev->dev,
  2538. "# of Tx queues : %d, # of Rx queues : %d\n",
  2539. num_tx_queues, num_rx_queues);
  2540. if (!netdev)
  2541. return -ENOMEM;
  2542. pci_set_drvdata(pdev, netdev);
  2543. adapter = netdev_priv(netdev);
  2544. adapter->netdev = netdev;
  2545. adapter->pdev = pdev;
  2546. adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
  2547. adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
  2548. adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
  2549. spin_lock_init(&adapter->cmd_lock);
  2550. adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
  2551. sizeof(struct vmxnet3_adapter),
  2552. PCI_DMA_TODEVICE);
  2553. adapter->shared = dma_alloc_coherent(
  2554. &adapter->pdev->dev,
  2555. sizeof(struct Vmxnet3_DriverShared),
  2556. &adapter->shared_pa, GFP_KERNEL);
  2557. if (!adapter->shared) {
  2558. dev_err(&pdev->dev, "Failed to allocate memory\n");
  2559. err = -ENOMEM;
  2560. goto err_alloc_shared;
  2561. }
  2562. adapter->num_rx_queues = num_rx_queues;
  2563. adapter->num_tx_queues = num_tx_queues;
  2564. adapter->rx_buf_per_pkt = 1;
  2565. size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
  2566. size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
  2567. adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
  2568. &adapter->queue_desc_pa,
  2569. GFP_KERNEL);
  2570. if (!adapter->tqd_start) {
  2571. dev_err(&pdev->dev, "Failed to allocate memory\n");
  2572. err = -ENOMEM;
  2573. goto err_alloc_queue_desc;
  2574. }
  2575. adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
  2576. adapter->num_tx_queues);
  2577. adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
  2578. sizeof(struct Vmxnet3_PMConf),
  2579. &adapter->pm_conf_pa,
  2580. GFP_KERNEL);
  2581. if (adapter->pm_conf == NULL) {
  2582. err = -ENOMEM;
  2583. goto err_alloc_pm;
  2584. }
  2585. #ifdef VMXNET3_RSS
  2586. adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
  2587. sizeof(struct UPT1_RSSConf),
  2588. &adapter->rss_conf_pa,
  2589. GFP_KERNEL);
  2590. if (adapter->rss_conf == NULL) {
  2591. err = -ENOMEM;
  2592. goto err_alloc_rss;
  2593. }
  2594. #endif /* VMXNET3_RSS */
  2595. err = vmxnet3_alloc_pci_resources(adapter, &dma64);
  2596. if (err < 0)
  2597. goto err_alloc_pci;
  2598. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
  2599. if (ver & 2) {
  2600. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2);
  2601. adapter->version = 2;
  2602. } else if (ver & 1) {
  2603. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
  2604. adapter->version = 1;
  2605. } else {
  2606. dev_err(&pdev->dev,
  2607. "Incompatible h/w version (0x%x) for adapter\n", ver);
  2608. err = -EBUSY;
  2609. goto err_ver;
  2610. }
  2611. dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
  2612. ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
  2613. if (ver & 1) {
  2614. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
  2615. } else {
  2616. dev_err(&pdev->dev,
  2617. "Incompatible upt version (0x%x) for adapter\n", ver);
  2618. err = -EBUSY;
  2619. goto err_ver;
  2620. }
  2621. SET_NETDEV_DEV(netdev, &pdev->dev);
  2622. vmxnet3_declare_features(adapter, dma64);
  2623. if (adapter->num_tx_queues == adapter->num_rx_queues)
  2624. adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
  2625. else
  2626. adapter->share_intr = VMXNET3_INTR_DONTSHARE;
  2627. vmxnet3_alloc_intr_resources(adapter);
  2628. #ifdef VMXNET3_RSS
  2629. if (adapter->num_rx_queues > 1 &&
  2630. adapter->intr.type == VMXNET3_IT_MSIX) {
  2631. adapter->rss = true;
  2632. netdev->hw_features |= NETIF_F_RXHASH;
  2633. netdev->features |= NETIF_F_RXHASH;
  2634. dev_dbg(&pdev->dev, "RSS is enabled.\n");
  2635. } else {
  2636. adapter->rss = false;
  2637. }
  2638. #endif
  2639. vmxnet3_read_mac_addr(adapter, mac);
  2640. memcpy(netdev->dev_addr, mac, netdev->addr_len);
  2641. netdev->netdev_ops = &vmxnet3_netdev_ops;
  2642. vmxnet3_set_ethtool_ops(netdev);
  2643. netdev->watchdog_timeo = 5 * HZ;
  2644. INIT_WORK(&adapter->work, vmxnet3_reset_work);
  2645. set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
  2646. if (adapter->intr.type == VMXNET3_IT_MSIX) {
  2647. int i;
  2648. for (i = 0; i < adapter->num_rx_queues; i++) {
  2649. netif_napi_add(adapter->netdev,
  2650. &adapter->rx_queue[i].napi,
  2651. vmxnet3_poll_rx_only, 64);
  2652. }
  2653. } else {
  2654. netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
  2655. vmxnet3_poll, 64);
  2656. }
  2657. netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
  2658. netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
  2659. netif_carrier_off(netdev);
  2660. err = register_netdev(netdev);
  2661. if (err) {
  2662. dev_err(&pdev->dev, "Failed to register adapter\n");
  2663. goto err_register;
  2664. }
  2665. vmxnet3_check_link(adapter, false);
  2666. return 0;
  2667. err_register:
  2668. vmxnet3_free_intr_resources(adapter);
  2669. err_ver:
  2670. vmxnet3_free_pci_resources(adapter);
  2671. err_alloc_pci:
  2672. #ifdef VMXNET3_RSS
  2673. dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
  2674. adapter->rss_conf, adapter->rss_conf_pa);
  2675. err_alloc_rss:
  2676. #endif
  2677. dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
  2678. adapter->pm_conf, adapter->pm_conf_pa);
  2679. err_alloc_pm:
  2680. dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
  2681. adapter->queue_desc_pa);
  2682. err_alloc_queue_desc:
  2683. dma_free_coherent(&adapter->pdev->dev,
  2684. sizeof(struct Vmxnet3_DriverShared),
  2685. adapter->shared, adapter->shared_pa);
  2686. err_alloc_shared:
  2687. dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
  2688. sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
  2689. free_netdev(netdev);
  2690. return err;
  2691. }
  2692. static void
  2693. vmxnet3_remove_device(struct pci_dev *pdev)
  2694. {
  2695. struct net_device *netdev = pci_get_drvdata(pdev);
  2696. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2697. int size = 0;
  2698. int num_rx_queues;
  2699. #ifdef VMXNET3_RSS
  2700. if (enable_mq)
  2701. num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
  2702. (int)num_online_cpus());
  2703. else
  2704. #endif
  2705. num_rx_queues = 1;
  2706. num_rx_queues = rounddown_pow_of_two(num_rx_queues);
  2707. cancel_work_sync(&adapter->work);
  2708. unregister_netdev(netdev);
  2709. vmxnet3_free_intr_resources(adapter);
  2710. vmxnet3_free_pci_resources(adapter);
  2711. #ifdef VMXNET3_RSS
  2712. dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
  2713. adapter->rss_conf, adapter->rss_conf_pa);
  2714. #endif
  2715. dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
  2716. adapter->pm_conf, adapter->pm_conf_pa);
  2717. size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
  2718. size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
  2719. dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
  2720. adapter->queue_desc_pa);
  2721. dma_free_coherent(&adapter->pdev->dev,
  2722. sizeof(struct Vmxnet3_DriverShared),
  2723. adapter->shared, adapter->shared_pa);
  2724. dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
  2725. sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
  2726. free_netdev(netdev);
  2727. }
  2728. static void vmxnet3_shutdown_device(struct pci_dev *pdev)
  2729. {
  2730. struct net_device *netdev = pci_get_drvdata(pdev);
  2731. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2732. unsigned long flags;
  2733. /* Reset_work may be in the middle of resetting the device, wait for its
  2734. * completion.
  2735. */
  2736. while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
  2737. msleep(1);
  2738. if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
  2739. &adapter->state)) {
  2740. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2741. return;
  2742. }
  2743. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2744. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2745. VMXNET3_CMD_QUIESCE_DEV);
  2746. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2747. vmxnet3_disable_all_intrs(adapter);
  2748. clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
  2749. }
  2750. #ifdef CONFIG_PM
  2751. static int
  2752. vmxnet3_suspend(struct device *device)
  2753. {
  2754. struct pci_dev *pdev = to_pci_dev(device);
  2755. struct net_device *netdev = pci_get_drvdata(pdev);
  2756. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2757. struct Vmxnet3_PMConf *pmConf;
  2758. struct ethhdr *ehdr;
  2759. struct arphdr *ahdr;
  2760. u8 *arpreq;
  2761. struct in_device *in_dev;
  2762. struct in_ifaddr *ifa;
  2763. unsigned long flags;
  2764. int i = 0;
  2765. if (!netif_running(netdev))
  2766. return 0;
  2767. for (i = 0; i < adapter->num_rx_queues; i++)
  2768. napi_disable(&adapter->rx_queue[i].napi);
  2769. vmxnet3_disable_all_intrs(adapter);
  2770. vmxnet3_free_irqs(adapter);
  2771. vmxnet3_free_intr_resources(adapter);
  2772. netif_device_detach(netdev);
  2773. netif_tx_stop_all_queues(netdev);
  2774. /* Create wake-up filters. */
  2775. pmConf = adapter->pm_conf;
  2776. memset(pmConf, 0, sizeof(*pmConf));
  2777. if (adapter->wol & WAKE_UCAST) {
  2778. pmConf->filters[i].patternSize = ETH_ALEN;
  2779. pmConf->filters[i].maskSize = 1;
  2780. memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
  2781. pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
  2782. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
  2783. i++;
  2784. }
  2785. if (adapter->wol & WAKE_ARP) {
  2786. in_dev = in_dev_get(netdev);
  2787. if (!in_dev)
  2788. goto skip_arp;
  2789. ifa = (struct in_ifaddr *)in_dev->ifa_list;
  2790. if (!ifa)
  2791. goto skip_arp;
  2792. pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
  2793. sizeof(struct arphdr) + /* ARP header */
  2794. 2 * ETH_ALEN + /* 2 Ethernet addresses*/
  2795. 2 * sizeof(u32); /*2 IPv4 addresses */
  2796. pmConf->filters[i].maskSize =
  2797. (pmConf->filters[i].patternSize - 1) / 8 + 1;
  2798. /* ETH_P_ARP in Ethernet header. */
  2799. ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
  2800. ehdr->h_proto = htons(ETH_P_ARP);
  2801. /* ARPOP_REQUEST in ARP header. */
  2802. ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
  2803. ahdr->ar_op = htons(ARPOP_REQUEST);
  2804. arpreq = (u8 *)(ahdr + 1);
  2805. /* The Unicast IPv4 address in 'tip' field. */
  2806. arpreq += 2 * ETH_ALEN + sizeof(u32);
  2807. *(u32 *)arpreq = ifa->ifa_address;
  2808. /* The mask for the relevant bits. */
  2809. pmConf->filters[i].mask[0] = 0x00;
  2810. pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
  2811. pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
  2812. pmConf->filters[i].mask[3] = 0x00;
  2813. pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
  2814. pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
  2815. in_dev_put(in_dev);
  2816. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
  2817. i++;
  2818. }
  2819. skip_arp:
  2820. if (adapter->wol & WAKE_MAGIC)
  2821. pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
  2822. pmConf->numFilters = i;
  2823. adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
  2824. adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
  2825. *pmConf));
  2826. adapter->shared->devRead.pmConfDesc.confPA =
  2827. cpu_to_le64(adapter->pm_conf_pa);
  2828. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2829. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2830. VMXNET3_CMD_UPDATE_PMCFG);
  2831. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2832. pci_save_state(pdev);
  2833. pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
  2834. adapter->wol);
  2835. pci_disable_device(pdev);
  2836. pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
  2837. return 0;
  2838. }
  2839. static int
  2840. vmxnet3_resume(struct device *device)
  2841. {
  2842. int err;
  2843. unsigned long flags;
  2844. struct pci_dev *pdev = to_pci_dev(device);
  2845. struct net_device *netdev = pci_get_drvdata(pdev);
  2846. struct vmxnet3_adapter *adapter = netdev_priv(netdev);
  2847. if (!netif_running(netdev))
  2848. return 0;
  2849. pci_set_power_state(pdev, PCI_D0);
  2850. pci_restore_state(pdev);
  2851. err = pci_enable_device_mem(pdev);
  2852. if (err != 0)
  2853. return err;
  2854. pci_enable_wake(pdev, PCI_D0, 0);
  2855. vmxnet3_alloc_intr_resources(adapter);
  2856. /* During hibernate and suspend, device has to be reinitialized as the
  2857. * device state need not be preserved.
  2858. */
  2859. /* Need not check adapter state as other reset tasks cannot run during
  2860. * device resume.
  2861. */
  2862. spin_lock_irqsave(&adapter->cmd_lock, flags);
  2863. VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
  2864. VMXNET3_CMD_QUIESCE_DEV);
  2865. spin_unlock_irqrestore(&adapter->cmd_lock, flags);
  2866. vmxnet3_tq_cleanup_all(adapter);
  2867. vmxnet3_rq_cleanup_all(adapter);
  2868. vmxnet3_reset_dev(adapter);
  2869. err = vmxnet3_activate_dev(adapter);
  2870. if (err != 0) {
  2871. netdev_err(netdev,
  2872. "failed to re-activate on resume, error: %d", err);
  2873. vmxnet3_force_close(adapter);
  2874. return err;
  2875. }
  2876. netif_device_attach(netdev);
  2877. return 0;
  2878. }
  2879. static const struct dev_pm_ops vmxnet3_pm_ops = {
  2880. .suspend = vmxnet3_suspend,
  2881. .resume = vmxnet3_resume,
  2882. .freeze = vmxnet3_suspend,
  2883. .restore = vmxnet3_resume,
  2884. };
  2885. #endif
  2886. static struct pci_driver vmxnet3_driver = {
  2887. .name = vmxnet3_driver_name,
  2888. .id_table = vmxnet3_pciid_table,
  2889. .probe = vmxnet3_probe_device,
  2890. .remove = vmxnet3_remove_device,
  2891. .shutdown = vmxnet3_shutdown_device,
  2892. #ifdef CONFIG_PM
  2893. .driver.pm = &vmxnet3_pm_ops,
  2894. #endif
  2895. };
  2896. static int __init
  2897. vmxnet3_init_module(void)
  2898. {
  2899. pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
  2900. VMXNET3_DRIVER_VERSION_REPORT);
  2901. return pci_register_driver(&vmxnet3_driver);
  2902. }
  2903. module_init(vmxnet3_init_module);
  2904. static void
  2905. vmxnet3_exit_module(void)
  2906. {
  2907. pci_unregister_driver(&vmxnet3_driver);
  2908. }
  2909. module_exit(vmxnet3_exit_module);
  2910. MODULE_AUTHOR("VMware, Inc.");
  2911. MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
  2912. MODULE_LICENSE("GPL v2");
  2913. MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);