ibmvnic.c 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484
  1. /**************************************************************************/
  2. /* */
  3. /* IBM System i and System p Virtual NIC Device Driver */
  4. /* Copyright (C) 2014 IBM Corp. */
  5. /* Santiago Leon (santi_leon@yahoo.com) */
  6. /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
  7. /* John Allen (jallen@linux.vnet.ibm.com) */
  8. /* */
  9. /* This program is free software; you can redistribute it and/or modify */
  10. /* it under the terms of the GNU General Public License as published by */
  11. /* the Free Software Foundation; either version 2 of the License, or */
  12. /* (at your option) any later version. */
  13. /* */
  14. /* This program is distributed in the hope that it will be useful, */
  15. /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
  16. /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
  17. /* GNU General Public License for more details. */
  18. /* */
  19. /* You should have received a copy of the GNU General Public License */
  20. /* along with this program. */
  21. /* */
  22. /* This module contains the implementation of a virtual ethernet device */
  23. /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
  24. /* option of the RS/6000 Platform Architecture to interface with virtual */
  25. /* ethernet NICs that are presented to the partition by the hypervisor. */
  26. /* */
  27. /* Messages are passed between the VNIC driver and the VNIC server using */
  28. /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
  29. /* issue and receive commands that initiate communication with the server */
  30. /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
  31. /* are used by the driver to notify the server that a packet is */
  32. /* ready for transmission or that a buffer has been added to receive a */
  33. /* packet. Subsequently, sCRQs are used by the server to notify the */
  34. /* driver that a packet transmission has been completed or that a packet */
  35. /* has been received and placed in a waiting buffer. */
  36. /* */
  37. /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
  38. /* which skbs are DMA mapped and immediately unmapped when the transmit */
  39. /* or receive has been completed, the VNIC driver is required to use */
  40. /* "long term mapping". This entails that large, continuous DMA mapped */
  41. /* buffers are allocated on driver initialization and these buffers are */
  42. /* then continuously reused to pass skbs to and from the VNIC server. */
  43. /* */
  44. /**************************************************************************/
  45. #include <linux/module.h>
  46. #include <linux/moduleparam.h>
  47. #include <linux/types.h>
  48. #include <linux/errno.h>
  49. #include <linux/completion.h>
  50. #include <linux/ioport.h>
  51. #include <linux/dma-mapping.h>
  52. #include <linux/kernel.h>
  53. #include <linux/netdevice.h>
  54. #include <linux/etherdevice.h>
  55. #include <linux/skbuff.h>
  56. #include <linux/init.h>
  57. #include <linux/delay.h>
  58. #include <linux/mm.h>
  59. #include <linux/ethtool.h>
  60. #include <linux/proc_fs.h>
  61. #include <linux/in.h>
  62. #include <linux/ip.h>
  63. #include <linux/ipv6.h>
  64. #include <linux/irq.h>
  65. #include <linux/kthread.h>
  66. #include <linux/seq_file.h>
  67. #include <linux/interrupt.h>
  68. #include <net/net_namespace.h>
  69. #include <asm/hvcall.h>
  70. #include <linux/atomic.h>
  71. #include <asm/vio.h>
  72. #include <asm/iommu.h>
  73. #include <linux/uaccess.h>
  74. #include <asm/firmware.h>
  75. #include <linux/workqueue.h>
  76. #include <linux/if_vlan.h>
  77. #include "ibmvnic.h"
  78. static const char ibmvnic_driver_name[] = "ibmvnic";
  79. static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  80. MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
  81. MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  82. MODULE_LICENSE("GPL");
  83. MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  84. static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
  85. static int ibmvnic_remove(struct vio_dev *);
  86. static void release_sub_crqs(struct ibmvnic_adapter *);
  87. static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  88. static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  89. static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  90. static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
  91. static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
  92. union sub_crq *sub_crq);
  93. static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
  94. static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
  95. static int enable_scrq_irq(struct ibmvnic_adapter *,
  96. struct ibmvnic_sub_crq_queue *);
  97. static int disable_scrq_irq(struct ibmvnic_adapter *,
  98. struct ibmvnic_sub_crq_queue *);
  99. static int pending_scrq(struct ibmvnic_adapter *,
  100. struct ibmvnic_sub_crq_queue *);
  101. static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
  102. struct ibmvnic_sub_crq_queue *);
  103. static int ibmvnic_poll(struct napi_struct *napi, int data);
  104. static void send_map_query(struct ibmvnic_adapter *adapter);
  105. static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
  106. static void send_request_unmap(struct ibmvnic_adapter *, u8);
  107. static void send_login(struct ibmvnic_adapter *adapter);
  108. static void send_cap_queries(struct ibmvnic_adapter *adapter);
  109. static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
  110. static int ibmvnic_init(struct ibmvnic_adapter *);
  111. static void release_crq_queue(struct ibmvnic_adapter *);
  112. struct ibmvnic_stat {
  113. char name[ETH_GSTRING_LEN];
  114. int offset;
  115. };
  116. #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
  117. offsetof(struct ibmvnic_statistics, stat))
  118. #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
  119. static const struct ibmvnic_stat ibmvnic_stats[] = {
  120. {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
  121. {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
  122. {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
  123. {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
  124. {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
  125. {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
  126. {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
  127. {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
  128. {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
  129. {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
  130. {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
  131. {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
  132. {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
  133. {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
  134. {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
  135. {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
  136. {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
  137. {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
  138. {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
  139. {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
  140. {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
  141. {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
  142. };
  143. static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
  144. unsigned long length, unsigned long *number,
  145. unsigned long *irq)
  146. {
  147. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  148. long rc;
  149. rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
  150. *number = retbuf[0];
  151. *irq = retbuf[1];
  152. return rc;
  153. }
  154. static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
  155. struct ibmvnic_long_term_buff *ltb, int size)
  156. {
  157. struct device *dev = &adapter->vdev->dev;
  158. ltb->size = size;
  159. ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
  160. GFP_KERNEL);
  161. if (!ltb->buff) {
  162. dev_err(dev, "Couldn't alloc long term buffer\n");
  163. return -ENOMEM;
  164. }
  165. ltb->map_id = adapter->map_id;
  166. adapter->map_id++;
  167. init_completion(&adapter->fw_done);
  168. send_request_map(adapter, ltb->addr,
  169. ltb->size, ltb->map_id);
  170. wait_for_completion(&adapter->fw_done);
  171. return 0;
  172. }
  173. static void free_long_term_buff(struct ibmvnic_adapter *adapter,
  174. struct ibmvnic_long_term_buff *ltb)
  175. {
  176. struct device *dev = &adapter->vdev->dev;
  177. if (!ltb->buff)
  178. return;
  179. if (!adapter->failover)
  180. send_request_unmap(adapter, ltb->map_id);
  181. dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
  182. }
  183. static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
  184. struct ibmvnic_rx_pool *pool)
  185. {
  186. int count = pool->size - atomic_read(&pool->available);
  187. struct device *dev = &adapter->vdev->dev;
  188. int buffers_added = 0;
  189. unsigned long lpar_rc;
  190. union sub_crq sub_crq;
  191. struct sk_buff *skb;
  192. unsigned int offset;
  193. dma_addr_t dma_addr;
  194. unsigned char *dst;
  195. u64 *handle_array;
  196. int shift = 0;
  197. int index;
  198. int i;
  199. handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
  200. be32_to_cpu(adapter->login_rsp_buf->
  201. off_rxadd_subcrqs));
  202. for (i = 0; i < count; ++i) {
  203. skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
  204. if (!skb) {
  205. dev_err(dev, "Couldn't replenish rx buff\n");
  206. adapter->replenish_no_mem++;
  207. break;
  208. }
  209. index = pool->free_map[pool->next_free];
  210. if (pool->rx_buff[index].skb)
  211. dev_err(dev, "Inconsistent free_map!\n");
  212. /* Copy the skb to the long term mapped DMA buffer */
  213. offset = index * pool->buff_size;
  214. dst = pool->long_term_buff.buff + offset;
  215. memset(dst, 0, pool->buff_size);
  216. dma_addr = pool->long_term_buff.addr + offset;
  217. pool->rx_buff[index].data = dst;
  218. pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
  219. pool->rx_buff[index].dma = dma_addr;
  220. pool->rx_buff[index].skb = skb;
  221. pool->rx_buff[index].pool_index = pool->index;
  222. pool->rx_buff[index].size = pool->buff_size;
  223. memset(&sub_crq, 0, sizeof(sub_crq));
  224. sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
  225. sub_crq.rx_add.correlator =
  226. cpu_to_be64((u64)&pool->rx_buff[index]);
  227. sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
  228. sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
  229. /* The length field of the sCRQ is defined to be 24 bits so the
  230. * buffer size needs to be left shifted by a byte before it is
  231. * converted to big endian to prevent the last byte from being
  232. * truncated.
  233. */
  234. #ifdef __LITTLE_ENDIAN__
  235. shift = 8;
  236. #endif
  237. sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
  238. lpar_rc = send_subcrq(adapter, handle_array[pool->index],
  239. &sub_crq);
  240. if (lpar_rc != H_SUCCESS)
  241. goto failure;
  242. buffers_added++;
  243. adapter->replenish_add_buff_success++;
  244. pool->next_free = (pool->next_free + 1) % pool->size;
  245. }
  246. atomic_add(buffers_added, &pool->available);
  247. return;
  248. failure:
  249. dev_info(dev, "replenish pools failure\n");
  250. pool->free_map[pool->next_free] = index;
  251. pool->rx_buff[index].skb = NULL;
  252. if (!dma_mapping_error(dev, dma_addr))
  253. dma_unmap_single(dev, dma_addr, pool->buff_size,
  254. DMA_FROM_DEVICE);
  255. dev_kfree_skb_any(skb);
  256. adapter->replenish_add_buff_failure++;
  257. atomic_add(buffers_added, &pool->available);
  258. }
  259. static void replenish_pools(struct ibmvnic_adapter *adapter)
  260. {
  261. int i;
  262. if (adapter->migrated)
  263. return;
  264. adapter->replenish_task_cycles++;
  265. for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
  266. i++) {
  267. if (adapter->rx_pool[i].active)
  268. replenish_rx_pool(adapter, &adapter->rx_pool[i]);
  269. }
  270. }
  271. static void release_stats_token(struct ibmvnic_adapter *adapter)
  272. {
  273. struct device *dev = &adapter->vdev->dev;
  274. if (!adapter->stats_token)
  275. return;
  276. dma_unmap_single(dev, adapter->stats_token,
  277. sizeof(struct ibmvnic_statistics),
  278. DMA_FROM_DEVICE);
  279. adapter->stats_token = 0;
  280. }
  281. static int init_stats_token(struct ibmvnic_adapter *adapter)
  282. {
  283. struct device *dev = &adapter->vdev->dev;
  284. dma_addr_t stok;
  285. stok = dma_map_single(dev, &adapter->stats,
  286. sizeof(struct ibmvnic_statistics),
  287. DMA_FROM_DEVICE);
  288. if (dma_mapping_error(dev, stok)) {
  289. dev_err(dev, "Couldn't map stats buffer\n");
  290. return -1;
  291. }
  292. adapter->stats_token = stok;
  293. return 0;
  294. }
  295. static void release_rx_pools(struct ibmvnic_adapter *adapter)
  296. {
  297. struct ibmvnic_rx_pool *rx_pool;
  298. int rx_scrqs;
  299. int i, j;
  300. if (!adapter->rx_pool)
  301. return;
  302. rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
  303. for (i = 0; i < rx_scrqs; i++) {
  304. rx_pool = &adapter->rx_pool[i];
  305. kfree(rx_pool->free_map);
  306. free_long_term_buff(adapter, &rx_pool->long_term_buff);
  307. if (!rx_pool->rx_buff)
  308. continue;
  309. for (j = 0; j < rx_pool->size; j++) {
  310. if (rx_pool->rx_buff[j].skb) {
  311. dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
  312. rx_pool->rx_buff[i].skb = NULL;
  313. }
  314. }
  315. kfree(rx_pool->rx_buff);
  316. }
  317. kfree(adapter->rx_pool);
  318. adapter->rx_pool = NULL;
  319. }
  320. static int init_rx_pools(struct net_device *netdev)
  321. {
  322. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  323. struct device *dev = &adapter->vdev->dev;
  324. struct ibmvnic_rx_pool *rx_pool;
  325. int rxadd_subcrqs;
  326. u64 *size_array;
  327. int i, j;
  328. rxadd_subcrqs =
  329. be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
  330. size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
  331. be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
  332. adapter->rx_pool = kcalloc(rxadd_subcrqs,
  333. sizeof(struct ibmvnic_rx_pool),
  334. GFP_KERNEL);
  335. if (!adapter->rx_pool) {
  336. dev_err(dev, "Failed to allocate rx pools\n");
  337. return -1;
  338. }
  339. for (i = 0; i < rxadd_subcrqs; i++) {
  340. rx_pool = &adapter->rx_pool[i];
  341. netdev_dbg(adapter->netdev,
  342. "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
  343. i, adapter->req_rx_add_entries_per_subcrq,
  344. be64_to_cpu(size_array[i]));
  345. rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
  346. rx_pool->index = i;
  347. rx_pool->buff_size = be64_to_cpu(size_array[i]);
  348. rx_pool->active = 1;
  349. rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
  350. GFP_KERNEL);
  351. if (!rx_pool->free_map) {
  352. release_rx_pools(adapter);
  353. return -1;
  354. }
  355. rx_pool->rx_buff = kcalloc(rx_pool->size,
  356. sizeof(struct ibmvnic_rx_buff),
  357. GFP_KERNEL);
  358. if (!rx_pool->rx_buff) {
  359. dev_err(dev, "Couldn't alloc rx buffers\n");
  360. release_rx_pools(adapter);
  361. return -1;
  362. }
  363. if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
  364. rx_pool->size * rx_pool->buff_size)) {
  365. release_rx_pools(adapter);
  366. return -1;
  367. }
  368. for (j = 0; j < rx_pool->size; ++j)
  369. rx_pool->free_map[j] = j;
  370. atomic_set(&rx_pool->available, 0);
  371. rx_pool->next_alloc = 0;
  372. rx_pool->next_free = 0;
  373. }
  374. return 0;
  375. }
  376. static void release_tx_pools(struct ibmvnic_adapter *adapter)
  377. {
  378. struct ibmvnic_tx_pool *tx_pool;
  379. int i, tx_scrqs;
  380. if (!adapter->tx_pool)
  381. return;
  382. tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
  383. for (i = 0; i < tx_scrqs; i++) {
  384. tx_pool = &adapter->tx_pool[i];
  385. kfree(tx_pool->tx_buff);
  386. free_long_term_buff(adapter, &tx_pool->long_term_buff);
  387. kfree(tx_pool->free_map);
  388. }
  389. kfree(adapter->tx_pool);
  390. adapter->tx_pool = NULL;
  391. }
  392. static int init_tx_pools(struct net_device *netdev)
  393. {
  394. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  395. struct device *dev = &adapter->vdev->dev;
  396. struct ibmvnic_tx_pool *tx_pool;
  397. int tx_subcrqs;
  398. int i, j;
  399. tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
  400. adapter->tx_pool = kcalloc(tx_subcrqs,
  401. sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
  402. if (!adapter->tx_pool)
  403. return -1;
  404. for (i = 0; i < tx_subcrqs; i++) {
  405. tx_pool = &adapter->tx_pool[i];
  406. tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
  407. sizeof(struct ibmvnic_tx_buff),
  408. GFP_KERNEL);
  409. if (!tx_pool->tx_buff) {
  410. dev_err(dev, "tx pool buffer allocation failed\n");
  411. release_tx_pools(adapter);
  412. return -1;
  413. }
  414. if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
  415. adapter->req_tx_entries_per_subcrq *
  416. adapter->req_mtu)) {
  417. release_tx_pools(adapter);
  418. return -1;
  419. }
  420. tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
  421. sizeof(int), GFP_KERNEL);
  422. if (!tx_pool->free_map) {
  423. release_tx_pools(adapter);
  424. return -1;
  425. }
  426. for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
  427. tx_pool->free_map[j] = j;
  428. tx_pool->consumer_index = 0;
  429. tx_pool->producer_index = 0;
  430. }
  431. return 0;
  432. }
  433. static void release_error_buffers(struct ibmvnic_adapter *adapter)
  434. {
  435. struct device *dev = &adapter->vdev->dev;
  436. struct ibmvnic_error_buff *error_buff, *tmp;
  437. unsigned long flags;
  438. spin_lock_irqsave(&adapter->error_list_lock, flags);
  439. list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
  440. list_del(&error_buff->list);
  441. dma_unmap_single(dev, error_buff->dma, error_buff->len,
  442. DMA_FROM_DEVICE);
  443. kfree(error_buff->buff);
  444. kfree(error_buff);
  445. }
  446. spin_unlock_irqrestore(&adapter->error_list_lock, flags);
  447. }
  448. static int ibmvnic_login(struct net_device *netdev)
  449. {
  450. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  451. unsigned long timeout = msecs_to_jiffies(30000);
  452. struct device *dev = &adapter->vdev->dev;
  453. do {
  454. if (adapter->renegotiate) {
  455. adapter->renegotiate = false;
  456. release_sub_crqs(adapter);
  457. reinit_completion(&adapter->init_done);
  458. send_cap_queries(adapter);
  459. if (!wait_for_completion_timeout(&adapter->init_done,
  460. timeout)) {
  461. dev_err(dev, "Capabilities query timeout\n");
  462. return -1;
  463. }
  464. }
  465. reinit_completion(&adapter->init_done);
  466. send_login(adapter);
  467. if (!wait_for_completion_timeout(&adapter->init_done,
  468. timeout)) {
  469. dev_err(dev, "Login timeout\n");
  470. return -1;
  471. }
  472. } while (adapter->renegotiate);
  473. return 0;
  474. }
  475. static void release_resources(struct ibmvnic_adapter *adapter)
  476. {
  477. release_tx_pools(adapter);
  478. release_rx_pools(adapter);
  479. release_stats_token(adapter);
  480. release_error_buffers(adapter);
  481. }
  482. static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
  483. {
  484. struct net_device *netdev = adapter->netdev;
  485. unsigned long timeout = msecs_to_jiffies(30000);
  486. union ibmvnic_crq crq;
  487. bool resend;
  488. int rc;
  489. if (adapter->logical_link_state == link_state) {
  490. netdev_dbg(netdev, "Link state already %d\n", link_state);
  491. return 0;
  492. }
  493. netdev_err(netdev, "setting link state %d\n", link_state);
  494. memset(&crq, 0, sizeof(crq));
  495. crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
  496. crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
  497. crq.logical_link_state.link_state = link_state;
  498. do {
  499. resend = false;
  500. reinit_completion(&adapter->init_done);
  501. rc = ibmvnic_send_crq(adapter, &crq);
  502. if (rc) {
  503. netdev_err(netdev, "Failed to set link state\n");
  504. return rc;
  505. }
  506. if (!wait_for_completion_timeout(&adapter->init_done,
  507. timeout)) {
  508. netdev_err(netdev, "timeout setting link state\n");
  509. return -1;
  510. }
  511. if (adapter->init_done_rc == 1) {
  512. /* Partuial success, delay and re-send */
  513. mdelay(1000);
  514. resend = true;
  515. }
  516. } while (resend);
  517. return 0;
  518. }
  519. static int set_real_num_queues(struct net_device *netdev)
  520. {
  521. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  522. int rc;
  523. rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
  524. if (rc) {
  525. netdev_err(netdev, "failed to set the number of tx queues\n");
  526. return rc;
  527. }
  528. rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
  529. if (rc)
  530. netdev_err(netdev, "failed to set the number of rx queues\n");
  531. return rc;
  532. }
  533. static int ibmvnic_open(struct net_device *netdev)
  534. {
  535. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  536. struct device *dev = &adapter->vdev->dev;
  537. int rc = 0;
  538. int i;
  539. if (adapter->is_closed) {
  540. rc = ibmvnic_init(adapter);
  541. if (rc)
  542. return rc;
  543. }
  544. rc = ibmvnic_login(netdev);
  545. if (rc)
  546. return rc;
  547. rc = set_real_num_queues(netdev);
  548. if (rc)
  549. return rc;
  550. rc = init_sub_crq_irqs(adapter);
  551. if (rc) {
  552. dev_err(dev, "failed to initialize sub crq irqs\n");
  553. return -1;
  554. }
  555. rc = init_stats_token(adapter);
  556. if (rc)
  557. return rc;
  558. adapter->map_id = 1;
  559. adapter->napi = kcalloc(adapter->req_rx_queues,
  560. sizeof(struct napi_struct), GFP_KERNEL);
  561. if (!adapter->napi)
  562. goto ibmvnic_open_fail;
  563. for (i = 0; i < adapter->req_rx_queues; i++) {
  564. netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
  565. NAPI_POLL_WEIGHT);
  566. napi_enable(&adapter->napi[i]);
  567. }
  568. send_map_query(adapter);
  569. rc = init_rx_pools(netdev);
  570. if (rc)
  571. goto ibmvnic_open_fail;
  572. rc = init_tx_pools(netdev);
  573. if (rc)
  574. goto ibmvnic_open_fail;
  575. replenish_pools(adapter);
  576. /* We're ready to receive frames, enable the sub-crq interrupts and
  577. * set the logical link state to up
  578. */
  579. for (i = 0; i < adapter->req_rx_queues; i++)
  580. enable_scrq_irq(adapter, adapter->rx_scrq[i]);
  581. for (i = 0; i < adapter->req_tx_queues; i++)
  582. enable_scrq_irq(adapter, adapter->tx_scrq[i]);
  583. rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
  584. if (rc)
  585. goto ibmvnic_open_fail;
  586. netif_tx_start_all_queues(netdev);
  587. adapter->is_closed = false;
  588. return 0;
  589. ibmvnic_open_fail:
  590. for (i = 0; i < adapter->req_rx_queues; i++)
  591. napi_disable(&adapter->napi[i]);
  592. release_resources(adapter);
  593. return -ENOMEM;
  594. }
  595. static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
  596. {
  597. int i;
  598. if (adapter->tx_scrq) {
  599. for (i = 0; i < adapter->req_tx_queues; i++)
  600. if (adapter->tx_scrq[i])
  601. disable_irq(adapter->tx_scrq[i]->irq);
  602. }
  603. if (adapter->rx_scrq) {
  604. for (i = 0; i < adapter->req_rx_queues; i++)
  605. if (adapter->rx_scrq[i])
  606. disable_irq(adapter->rx_scrq[i]->irq);
  607. }
  608. }
  609. static int ibmvnic_close(struct net_device *netdev)
  610. {
  611. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  612. int rc = 0;
  613. int i;
  614. adapter->closing = true;
  615. disable_sub_crqs(adapter);
  616. if (adapter->napi) {
  617. for (i = 0; i < adapter->req_rx_queues; i++)
  618. napi_disable(&adapter->napi[i]);
  619. }
  620. if (!adapter->failover)
  621. netif_tx_stop_all_queues(netdev);
  622. rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
  623. release_resources(adapter);
  624. adapter->is_closed = true;
  625. adapter->closing = false;
  626. return rc;
  627. }
  628. /**
  629. * build_hdr_data - creates L2/L3/L4 header data buffer
  630. * @hdr_field - bitfield determining needed headers
  631. * @skb - socket buffer
  632. * @hdr_len - array of header lengths
  633. * @tot_len - total length of data
  634. *
  635. * Reads hdr_field to determine which headers are needed by firmware.
  636. * Builds a buffer containing these headers. Saves individual header
  637. * lengths and total buffer length to be used to build descriptors.
  638. */
  639. static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
  640. int *hdr_len, u8 *hdr_data)
  641. {
  642. int len = 0;
  643. u8 *hdr;
  644. hdr_len[0] = sizeof(struct ethhdr);
  645. if (skb->protocol == htons(ETH_P_IP)) {
  646. hdr_len[1] = ip_hdr(skb)->ihl * 4;
  647. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  648. hdr_len[2] = tcp_hdrlen(skb);
  649. else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
  650. hdr_len[2] = sizeof(struct udphdr);
  651. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  652. hdr_len[1] = sizeof(struct ipv6hdr);
  653. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  654. hdr_len[2] = tcp_hdrlen(skb);
  655. else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
  656. hdr_len[2] = sizeof(struct udphdr);
  657. }
  658. memset(hdr_data, 0, 120);
  659. if ((hdr_field >> 6) & 1) {
  660. hdr = skb_mac_header(skb);
  661. memcpy(hdr_data, hdr, hdr_len[0]);
  662. len += hdr_len[0];
  663. }
  664. if ((hdr_field >> 5) & 1) {
  665. hdr = skb_network_header(skb);
  666. memcpy(hdr_data + len, hdr, hdr_len[1]);
  667. len += hdr_len[1];
  668. }
  669. if ((hdr_field >> 4) & 1) {
  670. hdr = skb_transport_header(skb);
  671. memcpy(hdr_data + len, hdr, hdr_len[2]);
  672. len += hdr_len[2];
  673. }
  674. return len;
  675. }
  676. /**
  677. * create_hdr_descs - create header and header extension descriptors
  678. * @hdr_field - bitfield determining needed headers
  679. * @data - buffer containing header data
  680. * @len - length of data buffer
  681. * @hdr_len - array of individual header lengths
  682. * @scrq_arr - descriptor array
  683. *
  684. * Creates header and, if needed, header extension descriptors and
  685. * places them in a descriptor array, scrq_arr
  686. */
  687. static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
  688. union sub_crq *scrq_arr)
  689. {
  690. union sub_crq hdr_desc;
  691. int tmp_len = len;
  692. u8 *data, *cur;
  693. int tmp;
  694. while (tmp_len > 0) {
  695. cur = hdr_data + len - tmp_len;
  696. memset(&hdr_desc, 0, sizeof(hdr_desc));
  697. if (cur != hdr_data) {
  698. data = hdr_desc.hdr_ext.data;
  699. tmp = tmp_len > 29 ? 29 : tmp_len;
  700. hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
  701. hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
  702. hdr_desc.hdr_ext.len = tmp;
  703. } else {
  704. data = hdr_desc.hdr.data;
  705. tmp = tmp_len > 24 ? 24 : tmp_len;
  706. hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
  707. hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
  708. hdr_desc.hdr.len = tmp;
  709. hdr_desc.hdr.l2_len = (u8)hdr_len[0];
  710. hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
  711. hdr_desc.hdr.l4_len = (u8)hdr_len[2];
  712. hdr_desc.hdr.flag = hdr_field << 1;
  713. }
  714. memcpy(data, cur, tmp);
  715. tmp_len -= tmp;
  716. *scrq_arr = hdr_desc;
  717. scrq_arr++;
  718. }
  719. }
  720. /**
  721. * build_hdr_descs_arr - build a header descriptor array
  722. * @skb - socket buffer
  723. * @num_entries - number of descriptors to be sent
  724. * @subcrq - first TX descriptor
  725. * @hdr_field - bit field determining which headers will be sent
  726. *
  727. * This function will build a TX descriptor array with applicable
  728. * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
  729. */
  730. static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
  731. int *num_entries, u8 hdr_field)
  732. {
  733. int hdr_len[3] = {0, 0, 0};
  734. int tot_len, len;
  735. u8 *hdr_data = txbuff->hdr_data;
  736. tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
  737. txbuff->hdr_data);
  738. len = tot_len;
  739. len -= 24;
  740. if (len > 0)
  741. num_entries += len % 29 ? len / 29 + 1 : len / 29;
  742. create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
  743. txbuff->indir_arr + 1);
  744. }
  745. static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
  746. {
  747. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  748. int queue_num = skb_get_queue_mapping(skb);
  749. u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
  750. struct device *dev = &adapter->vdev->dev;
  751. struct ibmvnic_tx_buff *tx_buff = NULL;
  752. struct ibmvnic_sub_crq_queue *tx_scrq;
  753. struct ibmvnic_tx_pool *tx_pool;
  754. unsigned int tx_send_failed = 0;
  755. unsigned int tx_map_failed = 0;
  756. unsigned int tx_dropped = 0;
  757. unsigned int tx_packets = 0;
  758. unsigned int tx_bytes = 0;
  759. dma_addr_t data_dma_addr;
  760. struct netdev_queue *txq;
  761. unsigned long lpar_rc;
  762. union sub_crq tx_crq;
  763. unsigned int offset;
  764. int num_entries = 1;
  765. unsigned char *dst;
  766. u64 *handle_array;
  767. int index = 0;
  768. int ret = 0;
  769. tx_pool = &adapter->tx_pool[queue_num];
  770. tx_scrq = adapter->tx_scrq[queue_num];
  771. txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
  772. handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
  773. be32_to_cpu(adapter->login_rsp_buf->
  774. off_txsubm_subcrqs));
  775. if (adapter->migrated) {
  776. if (!netif_subqueue_stopped(netdev, skb))
  777. netif_stop_subqueue(netdev, queue_num);
  778. dev_kfree_skb_any(skb);
  779. tx_send_failed++;
  780. tx_dropped++;
  781. ret = NETDEV_TX_OK;
  782. goto out;
  783. }
  784. index = tx_pool->free_map[tx_pool->consumer_index];
  785. offset = index * adapter->req_mtu;
  786. dst = tx_pool->long_term_buff.buff + offset;
  787. memset(dst, 0, adapter->req_mtu);
  788. skb_copy_from_linear_data(skb, dst, skb->len);
  789. data_dma_addr = tx_pool->long_term_buff.addr + offset;
  790. tx_pool->consumer_index =
  791. (tx_pool->consumer_index + 1) %
  792. adapter->req_tx_entries_per_subcrq;
  793. tx_buff = &tx_pool->tx_buff[index];
  794. tx_buff->skb = skb;
  795. tx_buff->data_dma[0] = data_dma_addr;
  796. tx_buff->data_len[0] = skb->len;
  797. tx_buff->index = index;
  798. tx_buff->pool_index = queue_num;
  799. tx_buff->last_frag = true;
  800. memset(&tx_crq, 0, sizeof(tx_crq));
  801. tx_crq.v1.first = IBMVNIC_CRQ_CMD;
  802. tx_crq.v1.type = IBMVNIC_TX_DESC;
  803. tx_crq.v1.n_crq_elem = 1;
  804. tx_crq.v1.n_sge = 1;
  805. tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
  806. tx_crq.v1.correlator = cpu_to_be32(index);
  807. tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
  808. tx_crq.v1.sge_len = cpu_to_be32(skb->len);
  809. tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
  810. if (adapter->vlan_header_insertion) {
  811. tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
  812. tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
  813. }
  814. if (skb->protocol == htons(ETH_P_IP)) {
  815. if (ip_hdr(skb)->version == 4)
  816. tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
  817. else if (ip_hdr(skb)->version == 6)
  818. tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
  819. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  820. tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
  821. else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
  822. tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
  823. }
  824. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  825. tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
  826. hdrs += 2;
  827. }
  828. /* determine if l2/3/4 headers are sent to firmware */
  829. if ((*hdrs >> 7) & 1 &&
  830. (skb->protocol == htons(ETH_P_IP) ||
  831. skb->protocol == htons(ETH_P_IPV6))) {
  832. build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
  833. tx_crq.v1.n_crq_elem = num_entries;
  834. tx_buff->indir_arr[0] = tx_crq;
  835. tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
  836. sizeof(tx_buff->indir_arr),
  837. DMA_TO_DEVICE);
  838. if (dma_mapping_error(dev, tx_buff->indir_dma)) {
  839. dev_kfree_skb_any(skb);
  840. tx_buff->skb = NULL;
  841. if (!firmware_has_feature(FW_FEATURE_CMO))
  842. dev_err(dev, "tx: unable to map descriptor array\n");
  843. tx_map_failed++;
  844. tx_dropped++;
  845. ret = NETDEV_TX_OK;
  846. goto out;
  847. }
  848. lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
  849. (u64)tx_buff->indir_dma,
  850. (u64)num_entries);
  851. } else {
  852. lpar_rc = send_subcrq(adapter, handle_array[queue_num],
  853. &tx_crq);
  854. }
  855. if (lpar_rc != H_SUCCESS) {
  856. dev_err(dev, "tx failed with code %ld\n", lpar_rc);
  857. if (tx_pool->consumer_index == 0)
  858. tx_pool->consumer_index =
  859. adapter->req_tx_entries_per_subcrq - 1;
  860. else
  861. tx_pool->consumer_index--;
  862. dev_kfree_skb_any(skb);
  863. tx_buff->skb = NULL;
  864. if (lpar_rc == H_CLOSED)
  865. netif_stop_subqueue(netdev, queue_num);
  866. tx_send_failed++;
  867. tx_dropped++;
  868. ret = NETDEV_TX_OK;
  869. goto out;
  870. }
  871. if (atomic_inc_return(&tx_scrq->used)
  872. >= adapter->req_tx_entries_per_subcrq) {
  873. netdev_info(netdev, "Stopping queue %d\n", queue_num);
  874. netif_stop_subqueue(netdev, queue_num);
  875. }
  876. tx_packets++;
  877. tx_bytes += skb->len;
  878. txq->trans_start = jiffies;
  879. ret = NETDEV_TX_OK;
  880. out:
  881. netdev->stats.tx_dropped += tx_dropped;
  882. netdev->stats.tx_bytes += tx_bytes;
  883. netdev->stats.tx_packets += tx_packets;
  884. adapter->tx_send_failed += tx_send_failed;
  885. adapter->tx_map_failed += tx_map_failed;
  886. return ret;
  887. }
  888. static void ibmvnic_set_multi(struct net_device *netdev)
  889. {
  890. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  891. struct netdev_hw_addr *ha;
  892. union ibmvnic_crq crq;
  893. memset(&crq, 0, sizeof(crq));
  894. crq.request_capability.first = IBMVNIC_CRQ_CMD;
  895. crq.request_capability.cmd = REQUEST_CAPABILITY;
  896. if (netdev->flags & IFF_PROMISC) {
  897. if (!adapter->promisc_supported)
  898. return;
  899. } else {
  900. if (netdev->flags & IFF_ALLMULTI) {
  901. /* Accept all multicast */
  902. memset(&crq, 0, sizeof(crq));
  903. crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
  904. crq.multicast_ctrl.cmd = MULTICAST_CTRL;
  905. crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
  906. ibmvnic_send_crq(adapter, &crq);
  907. } else if (netdev_mc_empty(netdev)) {
  908. /* Reject all multicast */
  909. memset(&crq, 0, sizeof(crq));
  910. crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
  911. crq.multicast_ctrl.cmd = MULTICAST_CTRL;
  912. crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
  913. ibmvnic_send_crq(adapter, &crq);
  914. } else {
  915. /* Accept one or more multicast(s) */
  916. netdev_for_each_mc_addr(ha, netdev) {
  917. memset(&crq, 0, sizeof(crq));
  918. crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
  919. crq.multicast_ctrl.cmd = MULTICAST_CTRL;
  920. crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
  921. ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
  922. ha->addr);
  923. ibmvnic_send_crq(adapter, &crq);
  924. }
  925. }
  926. }
  927. }
  928. static int ibmvnic_set_mac(struct net_device *netdev, void *p)
  929. {
  930. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  931. struct sockaddr *addr = p;
  932. union ibmvnic_crq crq;
  933. if (!is_valid_ether_addr(addr->sa_data))
  934. return -EADDRNOTAVAIL;
  935. memset(&crq, 0, sizeof(crq));
  936. crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
  937. crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
  938. ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
  939. ibmvnic_send_crq(adapter, &crq);
  940. /* netdev->dev_addr is changed in handle_change_mac_rsp function */
  941. return 0;
  942. }
  943. static void ibmvnic_tx_timeout(struct net_device *dev)
  944. {
  945. struct ibmvnic_adapter *adapter = netdev_priv(dev);
  946. int rc;
  947. /* Adapter timed out, resetting it */
  948. release_sub_crqs(adapter);
  949. rc = ibmvnic_reset_crq(adapter);
  950. if (rc)
  951. dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
  952. else
  953. ibmvnic_send_crq_init(adapter);
  954. }
  955. static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
  956. struct ibmvnic_rx_buff *rx_buff)
  957. {
  958. struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
  959. rx_buff->skb = NULL;
  960. pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
  961. pool->next_alloc = (pool->next_alloc + 1) % pool->size;
  962. atomic_dec(&pool->available);
  963. }
  964. static int ibmvnic_poll(struct napi_struct *napi, int budget)
  965. {
  966. struct net_device *netdev = napi->dev;
  967. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  968. int scrq_num = (int)(napi - adapter->napi);
  969. int frames_processed = 0;
  970. restart_poll:
  971. while (frames_processed < budget) {
  972. struct sk_buff *skb;
  973. struct ibmvnic_rx_buff *rx_buff;
  974. union sub_crq *next;
  975. u32 length;
  976. u16 offset;
  977. u8 flags = 0;
  978. if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
  979. break;
  980. next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
  981. rx_buff =
  982. (struct ibmvnic_rx_buff *)be64_to_cpu(next->
  983. rx_comp.correlator);
  984. /* do error checking */
  985. if (next->rx_comp.rc) {
  986. netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
  987. /* free the entry */
  988. next->rx_comp.first = 0;
  989. remove_buff_from_pool(adapter, rx_buff);
  990. break;
  991. }
  992. length = be32_to_cpu(next->rx_comp.len);
  993. offset = be16_to_cpu(next->rx_comp.off_frame_data);
  994. flags = next->rx_comp.flags;
  995. skb = rx_buff->skb;
  996. skb_copy_to_linear_data(skb, rx_buff->data + offset,
  997. length);
  998. /* VLAN Header has been stripped by the system firmware and
  999. * needs to be inserted by the driver
  1000. */
  1001. if (adapter->rx_vlan_header_insertion &&
  1002. (flags & IBMVNIC_VLAN_STRIPPED))
  1003. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  1004. ntohs(next->rx_comp.vlan_tci));
  1005. /* free the entry */
  1006. next->rx_comp.first = 0;
  1007. remove_buff_from_pool(adapter, rx_buff);
  1008. skb_put(skb, length);
  1009. skb->protocol = eth_type_trans(skb, netdev);
  1010. if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
  1011. flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
  1012. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1013. }
  1014. length = skb->len;
  1015. napi_gro_receive(napi, skb); /* send it up */
  1016. netdev->stats.rx_packets++;
  1017. netdev->stats.rx_bytes += length;
  1018. frames_processed++;
  1019. }
  1020. replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
  1021. if (frames_processed < budget) {
  1022. enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
  1023. napi_complete_done(napi, frames_processed);
  1024. if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
  1025. napi_reschedule(napi)) {
  1026. disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
  1027. goto restart_poll;
  1028. }
  1029. }
  1030. return frames_processed;
  1031. }
  1032. #ifdef CONFIG_NET_POLL_CONTROLLER
  1033. static void ibmvnic_netpoll_controller(struct net_device *dev)
  1034. {
  1035. struct ibmvnic_adapter *adapter = netdev_priv(dev);
  1036. int i;
  1037. replenish_pools(netdev_priv(dev));
  1038. for (i = 0; i < adapter->req_rx_queues; i++)
  1039. ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
  1040. adapter->rx_scrq[i]);
  1041. }
  1042. #endif
  1043. static const struct net_device_ops ibmvnic_netdev_ops = {
  1044. .ndo_open = ibmvnic_open,
  1045. .ndo_stop = ibmvnic_close,
  1046. .ndo_start_xmit = ibmvnic_xmit,
  1047. .ndo_set_rx_mode = ibmvnic_set_multi,
  1048. .ndo_set_mac_address = ibmvnic_set_mac,
  1049. .ndo_validate_addr = eth_validate_addr,
  1050. .ndo_tx_timeout = ibmvnic_tx_timeout,
  1051. #ifdef CONFIG_NET_POLL_CONTROLLER
  1052. .ndo_poll_controller = ibmvnic_netpoll_controller,
  1053. #endif
  1054. };
  1055. /* ethtool functions */
  1056. static int ibmvnic_get_link_ksettings(struct net_device *netdev,
  1057. struct ethtool_link_ksettings *cmd)
  1058. {
  1059. u32 supported, advertising;
  1060. supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
  1061. SUPPORTED_FIBRE);
  1062. advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
  1063. ADVERTISED_FIBRE);
  1064. cmd->base.speed = SPEED_1000;
  1065. cmd->base.duplex = DUPLEX_FULL;
  1066. cmd->base.port = PORT_FIBRE;
  1067. cmd->base.phy_address = 0;
  1068. cmd->base.autoneg = AUTONEG_ENABLE;
  1069. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  1070. supported);
  1071. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  1072. advertising);
  1073. return 0;
  1074. }
  1075. static void ibmvnic_get_drvinfo(struct net_device *dev,
  1076. struct ethtool_drvinfo *info)
  1077. {
  1078. strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
  1079. strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
  1080. }
  1081. static u32 ibmvnic_get_msglevel(struct net_device *netdev)
  1082. {
  1083. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  1084. return adapter->msg_enable;
  1085. }
  1086. static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
  1087. {
  1088. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  1089. adapter->msg_enable = data;
  1090. }
  1091. static u32 ibmvnic_get_link(struct net_device *netdev)
  1092. {
  1093. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  1094. /* Don't need to send a query because we request a logical link up at
  1095. * init and then we wait for link state indications
  1096. */
  1097. return adapter->logical_link_state;
  1098. }
  1099. static void ibmvnic_get_ringparam(struct net_device *netdev,
  1100. struct ethtool_ringparam *ring)
  1101. {
  1102. ring->rx_max_pending = 0;
  1103. ring->tx_max_pending = 0;
  1104. ring->rx_mini_max_pending = 0;
  1105. ring->rx_jumbo_max_pending = 0;
  1106. ring->rx_pending = 0;
  1107. ring->tx_pending = 0;
  1108. ring->rx_mini_pending = 0;
  1109. ring->rx_jumbo_pending = 0;
  1110. }
  1111. static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1112. {
  1113. int i;
  1114. if (stringset != ETH_SS_STATS)
  1115. return;
  1116. for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
  1117. memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
  1118. }
  1119. static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
  1120. {
  1121. switch (sset) {
  1122. case ETH_SS_STATS:
  1123. return ARRAY_SIZE(ibmvnic_stats);
  1124. default:
  1125. return -EOPNOTSUPP;
  1126. }
  1127. }
  1128. static void ibmvnic_get_ethtool_stats(struct net_device *dev,
  1129. struct ethtool_stats *stats, u64 *data)
  1130. {
  1131. struct ibmvnic_adapter *adapter = netdev_priv(dev);
  1132. union ibmvnic_crq crq;
  1133. int i;
  1134. memset(&crq, 0, sizeof(crq));
  1135. crq.request_statistics.first = IBMVNIC_CRQ_CMD;
  1136. crq.request_statistics.cmd = REQUEST_STATISTICS;
  1137. crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
  1138. crq.request_statistics.len =
  1139. cpu_to_be32(sizeof(struct ibmvnic_statistics));
  1140. /* Wait for data to be written */
  1141. init_completion(&adapter->stats_done);
  1142. ibmvnic_send_crq(adapter, &crq);
  1143. wait_for_completion(&adapter->stats_done);
  1144. for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
  1145. data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
  1146. }
  1147. static const struct ethtool_ops ibmvnic_ethtool_ops = {
  1148. .get_drvinfo = ibmvnic_get_drvinfo,
  1149. .get_msglevel = ibmvnic_get_msglevel,
  1150. .set_msglevel = ibmvnic_set_msglevel,
  1151. .get_link = ibmvnic_get_link,
  1152. .get_ringparam = ibmvnic_get_ringparam,
  1153. .get_strings = ibmvnic_get_strings,
  1154. .get_sset_count = ibmvnic_get_sset_count,
  1155. .get_ethtool_stats = ibmvnic_get_ethtool_stats,
  1156. .get_link_ksettings = ibmvnic_get_link_ksettings,
  1157. };
  1158. /* Routines for managing CRQs/sCRQs */
  1159. static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
  1160. struct ibmvnic_sub_crq_queue *scrq)
  1161. {
  1162. struct device *dev = &adapter->vdev->dev;
  1163. long rc;
  1164. netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
  1165. /* Close the sub-crqs */
  1166. do {
  1167. rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
  1168. adapter->vdev->unit_address,
  1169. scrq->crq_num);
  1170. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  1171. if (rc) {
  1172. netdev_err(adapter->netdev,
  1173. "Failed to release sub-CRQ %16lx, rc = %ld\n",
  1174. scrq->crq_num, rc);
  1175. }
  1176. dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
  1177. DMA_BIDIRECTIONAL);
  1178. free_pages((unsigned long)scrq->msgs, 2);
  1179. kfree(scrq);
  1180. }
  1181. static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
  1182. *adapter)
  1183. {
  1184. struct device *dev = &adapter->vdev->dev;
  1185. struct ibmvnic_sub_crq_queue *scrq;
  1186. int rc;
  1187. scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
  1188. if (!scrq)
  1189. return NULL;
  1190. scrq->msgs =
  1191. (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
  1192. if (!scrq->msgs) {
  1193. dev_warn(dev, "Couldn't allocate crq queue messages page\n");
  1194. goto zero_page_failed;
  1195. }
  1196. scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
  1197. DMA_BIDIRECTIONAL);
  1198. if (dma_mapping_error(dev, scrq->msg_token)) {
  1199. dev_warn(dev, "Couldn't map crq queue messages page\n");
  1200. goto map_failed;
  1201. }
  1202. rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
  1203. 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
  1204. if (rc == H_RESOURCE)
  1205. rc = ibmvnic_reset_crq(adapter);
  1206. if (rc == H_CLOSED) {
  1207. dev_warn(dev, "Partner adapter not ready, waiting.\n");
  1208. } else if (rc) {
  1209. dev_warn(dev, "Error %d registering sub-crq\n", rc);
  1210. goto reg_failed;
  1211. }
  1212. scrq->adapter = adapter;
  1213. scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
  1214. spin_lock_init(&scrq->lock);
  1215. netdev_dbg(adapter->netdev,
  1216. "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
  1217. scrq->crq_num, scrq->hw_irq, scrq->irq);
  1218. return scrq;
  1219. reg_failed:
  1220. dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
  1221. DMA_BIDIRECTIONAL);
  1222. map_failed:
  1223. free_pages((unsigned long)scrq->msgs, 2);
  1224. zero_page_failed:
  1225. kfree(scrq);
  1226. return NULL;
  1227. }
  1228. static void release_sub_crqs(struct ibmvnic_adapter *adapter)
  1229. {
  1230. int i;
  1231. if (adapter->tx_scrq) {
  1232. for (i = 0; i < adapter->req_tx_queues; i++) {
  1233. if (!adapter->tx_scrq[i])
  1234. continue;
  1235. if (adapter->tx_scrq[i]->irq) {
  1236. free_irq(adapter->tx_scrq[i]->irq,
  1237. adapter->tx_scrq[i]);
  1238. irq_dispose_mapping(adapter->tx_scrq[i]->irq);
  1239. adapter->tx_scrq[i]->irq = 0;
  1240. }
  1241. release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
  1242. }
  1243. kfree(adapter->tx_scrq);
  1244. adapter->tx_scrq = NULL;
  1245. }
  1246. if (adapter->rx_scrq) {
  1247. for (i = 0; i < adapter->req_rx_queues; i++) {
  1248. if (!adapter->rx_scrq[i])
  1249. continue;
  1250. if (adapter->rx_scrq[i]->irq) {
  1251. free_irq(adapter->rx_scrq[i]->irq,
  1252. adapter->rx_scrq[i]);
  1253. irq_dispose_mapping(adapter->rx_scrq[i]->irq);
  1254. adapter->rx_scrq[i]->irq = 0;
  1255. }
  1256. release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
  1257. }
  1258. kfree(adapter->rx_scrq);
  1259. adapter->rx_scrq = NULL;
  1260. }
  1261. }
  1262. static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
  1263. struct ibmvnic_sub_crq_queue *scrq)
  1264. {
  1265. struct device *dev = &adapter->vdev->dev;
  1266. unsigned long rc;
  1267. rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
  1268. H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
  1269. if (rc)
  1270. dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
  1271. scrq->hw_irq, rc);
  1272. return rc;
  1273. }
  1274. static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
  1275. struct ibmvnic_sub_crq_queue *scrq)
  1276. {
  1277. struct device *dev = &adapter->vdev->dev;
  1278. unsigned long rc;
  1279. if (scrq->hw_irq > 0x100000000ULL) {
  1280. dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
  1281. return 1;
  1282. }
  1283. rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
  1284. H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
  1285. if (rc)
  1286. dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
  1287. scrq->hw_irq, rc);
  1288. return rc;
  1289. }
  1290. static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
  1291. struct ibmvnic_sub_crq_queue *scrq)
  1292. {
  1293. struct device *dev = &adapter->vdev->dev;
  1294. struct ibmvnic_tx_buff *txbuff;
  1295. union sub_crq *next;
  1296. int index;
  1297. int i, j;
  1298. u8 first;
  1299. restart_loop:
  1300. while (pending_scrq(adapter, scrq)) {
  1301. unsigned int pool = scrq->pool_index;
  1302. next = ibmvnic_next_scrq(adapter, scrq);
  1303. for (i = 0; i < next->tx_comp.num_comps; i++) {
  1304. if (next->tx_comp.rcs[i]) {
  1305. dev_err(dev, "tx error %x\n",
  1306. next->tx_comp.rcs[i]);
  1307. continue;
  1308. }
  1309. index = be32_to_cpu(next->tx_comp.correlators[i]);
  1310. txbuff = &adapter->tx_pool[pool].tx_buff[index];
  1311. for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
  1312. if (!txbuff->data_dma[j])
  1313. continue;
  1314. txbuff->data_dma[j] = 0;
  1315. }
  1316. /* if sub_crq was sent indirectly */
  1317. first = txbuff->indir_arr[0].generic.first;
  1318. if (first == IBMVNIC_CRQ_CMD) {
  1319. dma_unmap_single(dev, txbuff->indir_dma,
  1320. sizeof(txbuff->indir_arr),
  1321. DMA_TO_DEVICE);
  1322. }
  1323. if (txbuff->last_frag) {
  1324. if (atomic_sub_return(next->tx_comp.num_comps,
  1325. &scrq->used) <=
  1326. (adapter->req_tx_entries_per_subcrq / 2) &&
  1327. netif_subqueue_stopped(adapter->netdev,
  1328. txbuff->skb)) {
  1329. netif_wake_subqueue(adapter->netdev,
  1330. scrq->pool_index);
  1331. netdev_dbg(adapter->netdev,
  1332. "Started queue %d\n",
  1333. scrq->pool_index);
  1334. }
  1335. dev_kfree_skb_any(txbuff->skb);
  1336. }
  1337. adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
  1338. producer_index] = index;
  1339. adapter->tx_pool[pool].producer_index =
  1340. (adapter->tx_pool[pool].producer_index + 1) %
  1341. adapter->req_tx_entries_per_subcrq;
  1342. }
  1343. /* remove tx_comp scrq*/
  1344. next->tx_comp.first = 0;
  1345. }
  1346. enable_scrq_irq(adapter, scrq);
  1347. if (pending_scrq(adapter, scrq)) {
  1348. disable_scrq_irq(adapter, scrq);
  1349. goto restart_loop;
  1350. }
  1351. return 0;
  1352. }
  1353. static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
  1354. {
  1355. struct ibmvnic_sub_crq_queue *scrq = instance;
  1356. struct ibmvnic_adapter *adapter = scrq->adapter;
  1357. disable_scrq_irq(adapter, scrq);
  1358. ibmvnic_complete_tx(adapter, scrq);
  1359. return IRQ_HANDLED;
  1360. }
  1361. static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
  1362. {
  1363. struct ibmvnic_sub_crq_queue *scrq = instance;
  1364. struct ibmvnic_adapter *adapter = scrq->adapter;
  1365. if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
  1366. disable_scrq_irq(adapter, scrq);
  1367. __napi_schedule(&adapter->napi[scrq->scrq_num]);
  1368. }
  1369. return IRQ_HANDLED;
  1370. }
  1371. static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
  1372. {
  1373. struct device *dev = &adapter->vdev->dev;
  1374. struct ibmvnic_sub_crq_queue *scrq;
  1375. int i = 0, j = 0;
  1376. int rc = 0;
  1377. for (i = 0; i < adapter->req_tx_queues; i++) {
  1378. scrq = adapter->tx_scrq[i];
  1379. scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
  1380. if (!scrq->irq) {
  1381. rc = -EINVAL;
  1382. dev_err(dev, "Error mapping irq\n");
  1383. goto req_tx_irq_failed;
  1384. }
  1385. rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
  1386. 0, "ibmvnic_tx", scrq);
  1387. if (rc) {
  1388. dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
  1389. scrq->irq, rc);
  1390. irq_dispose_mapping(scrq->irq);
  1391. goto req_rx_irq_failed;
  1392. }
  1393. }
  1394. for (i = 0; i < adapter->req_rx_queues; i++) {
  1395. scrq = adapter->rx_scrq[i];
  1396. scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
  1397. if (!scrq->irq) {
  1398. rc = -EINVAL;
  1399. dev_err(dev, "Error mapping irq\n");
  1400. goto req_rx_irq_failed;
  1401. }
  1402. rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
  1403. 0, "ibmvnic_rx", scrq);
  1404. if (rc) {
  1405. dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
  1406. scrq->irq, rc);
  1407. irq_dispose_mapping(scrq->irq);
  1408. goto req_rx_irq_failed;
  1409. }
  1410. }
  1411. return rc;
  1412. req_rx_irq_failed:
  1413. for (j = 0; j < i; j++) {
  1414. free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
  1415. irq_dispose_mapping(adapter->rx_scrq[j]->irq);
  1416. }
  1417. i = adapter->req_tx_queues;
  1418. req_tx_irq_failed:
  1419. for (j = 0; j < i; j++) {
  1420. free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
  1421. irq_dispose_mapping(adapter->rx_scrq[j]->irq);
  1422. }
  1423. release_sub_crqs(adapter);
  1424. return rc;
  1425. }
  1426. static int init_sub_crqs(struct ibmvnic_adapter *adapter)
  1427. {
  1428. struct device *dev = &adapter->vdev->dev;
  1429. struct ibmvnic_sub_crq_queue **allqueues;
  1430. int registered_queues = 0;
  1431. int total_queues;
  1432. int more = 0;
  1433. int i;
  1434. total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
  1435. allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
  1436. if (!allqueues)
  1437. return -1;
  1438. for (i = 0; i < total_queues; i++) {
  1439. allqueues[i] = init_sub_crq_queue(adapter);
  1440. if (!allqueues[i]) {
  1441. dev_warn(dev, "Couldn't allocate all sub-crqs\n");
  1442. break;
  1443. }
  1444. registered_queues++;
  1445. }
  1446. /* Make sure we were able to register the minimum number of queues */
  1447. if (registered_queues <
  1448. adapter->min_tx_queues + adapter->min_rx_queues) {
  1449. dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
  1450. goto tx_failed;
  1451. }
  1452. /* Distribute the failed allocated queues*/
  1453. for (i = 0; i < total_queues - registered_queues + more ; i++) {
  1454. netdev_dbg(adapter->netdev, "Reducing number of queues\n");
  1455. switch (i % 3) {
  1456. case 0:
  1457. if (adapter->req_rx_queues > adapter->min_rx_queues)
  1458. adapter->req_rx_queues--;
  1459. else
  1460. more++;
  1461. break;
  1462. case 1:
  1463. if (adapter->req_tx_queues > adapter->min_tx_queues)
  1464. adapter->req_tx_queues--;
  1465. else
  1466. more++;
  1467. break;
  1468. }
  1469. }
  1470. adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
  1471. sizeof(*adapter->tx_scrq), GFP_KERNEL);
  1472. if (!adapter->tx_scrq)
  1473. goto tx_failed;
  1474. for (i = 0; i < adapter->req_tx_queues; i++) {
  1475. adapter->tx_scrq[i] = allqueues[i];
  1476. adapter->tx_scrq[i]->pool_index = i;
  1477. }
  1478. adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
  1479. sizeof(*adapter->rx_scrq), GFP_KERNEL);
  1480. if (!adapter->rx_scrq)
  1481. goto rx_failed;
  1482. for (i = 0; i < adapter->req_rx_queues; i++) {
  1483. adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
  1484. adapter->rx_scrq[i]->scrq_num = i;
  1485. }
  1486. kfree(allqueues);
  1487. return 0;
  1488. rx_failed:
  1489. kfree(adapter->tx_scrq);
  1490. adapter->tx_scrq = NULL;
  1491. tx_failed:
  1492. for (i = 0; i < registered_queues; i++)
  1493. release_sub_crq_queue(adapter, allqueues[i]);
  1494. kfree(allqueues);
  1495. return -1;
  1496. }
  1497. static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
  1498. {
  1499. struct device *dev = &adapter->vdev->dev;
  1500. union ibmvnic_crq crq;
  1501. if (!retry) {
  1502. /* Sub-CRQ entries are 32 byte long */
  1503. int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
  1504. if (adapter->min_tx_entries_per_subcrq > entries_page ||
  1505. adapter->min_rx_add_entries_per_subcrq > entries_page) {
  1506. dev_err(dev, "Fatal, invalid entries per sub-crq\n");
  1507. return;
  1508. }
  1509. /* Get the minimum between the queried max and the entries
  1510. * that fit in our PAGE_SIZE
  1511. */
  1512. adapter->req_tx_entries_per_subcrq =
  1513. adapter->max_tx_entries_per_subcrq > entries_page ?
  1514. entries_page : adapter->max_tx_entries_per_subcrq;
  1515. adapter->req_rx_add_entries_per_subcrq =
  1516. adapter->max_rx_add_entries_per_subcrq > entries_page ?
  1517. entries_page : adapter->max_rx_add_entries_per_subcrq;
  1518. adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
  1519. adapter->req_rx_queues = adapter->opt_rx_comp_queues;
  1520. adapter->req_rx_add_queues = adapter->max_rx_add_queues;
  1521. adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
  1522. }
  1523. memset(&crq, 0, sizeof(crq));
  1524. crq.request_capability.first = IBMVNIC_CRQ_CMD;
  1525. crq.request_capability.cmd = REQUEST_CAPABILITY;
  1526. crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
  1527. crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
  1528. atomic_inc(&adapter->running_cap_crqs);
  1529. ibmvnic_send_crq(adapter, &crq);
  1530. crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
  1531. crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
  1532. atomic_inc(&adapter->running_cap_crqs);
  1533. ibmvnic_send_crq(adapter, &crq);
  1534. crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
  1535. crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
  1536. atomic_inc(&adapter->running_cap_crqs);
  1537. ibmvnic_send_crq(adapter, &crq);
  1538. crq.request_capability.capability =
  1539. cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
  1540. crq.request_capability.number =
  1541. cpu_to_be64(adapter->req_tx_entries_per_subcrq);
  1542. atomic_inc(&adapter->running_cap_crqs);
  1543. ibmvnic_send_crq(adapter, &crq);
  1544. crq.request_capability.capability =
  1545. cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
  1546. crq.request_capability.number =
  1547. cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
  1548. atomic_inc(&adapter->running_cap_crqs);
  1549. ibmvnic_send_crq(adapter, &crq);
  1550. crq.request_capability.capability = cpu_to_be16(REQ_MTU);
  1551. crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
  1552. atomic_inc(&adapter->running_cap_crqs);
  1553. ibmvnic_send_crq(adapter, &crq);
  1554. if (adapter->netdev->flags & IFF_PROMISC) {
  1555. if (adapter->promisc_supported) {
  1556. crq.request_capability.capability =
  1557. cpu_to_be16(PROMISC_REQUESTED);
  1558. crq.request_capability.number = cpu_to_be64(1);
  1559. atomic_inc(&adapter->running_cap_crqs);
  1560. ibmvnic_send_crq(adapter, &crq);
  1561. }
  1562. } else {
  1563. crq.request_capability.capability =
  1564. cpu_to_be16(PROMISC_REQUESTED);
  1565. crq.request_capability.number = cpu_to_be64(0);
  1566. atomic_inc(&adapter->running_cap_crqs);
  1567. ibmvnic_send_crq(adapter, &crq);
  1568. }
  1569. }
  1570. static int pending_scrq(struct ibmvnic_adapter *adapter,
  1571. struct ibmvnic_sub_crq_queue *scrq)
  1572. {
  1573. union sub_crq *entry = &scrq->msgs[scrq->cur];
  1574. if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
  1575. return 1;
  1576. else
  1577. return 0;
  1578. }
  1579. static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
  1580. struct ibmvnic_sub_crq_queue *scrq)
  1581. {
  1582. union sub_crq *entry;
  1583. unsigned long flags;
  1584. spin_lock_irqsave(&scrq->lock, flags);
  1585. entry = &scrq->msgs[scrq->cur];
  1586. if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
  1587. if (++scrq->cur == scrq->size)
  1588. scrq->cur = 0;
  1589. } else {
  1590. entry = NULL;
  1591. }
  1592. spin_unlock_irqrestore(&scrq->lock, flags);
  1593. return entry;
  1594. }
  1595. static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
  1596. {
  1597. struct ibmvnic_crq_queue *queue = &adapter->crq;
  1598. union ibmvnic_crq *crq;
  1599. crq = &queue->msgs[queue->cur];
  1600. if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
  1601. if (++queue->cur == queue->size)
  1602. queue->cur = 0;
  1603. } else {
  1604. crq = NULL;
  1605. }
  1606. return crq;
  1607. }
  1608. static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
  1609. union sub_crq *sub_crq)
  1610. {
  1611. unsigned int ua = adapter->vdev->unit_address;
  1612. struct device *dev = &adapter->vdev->dev;
  1613. u64 *u64_crq = (u64 *)sub_crq;
  1614. int rc;
  1615. netdev_dbg(adapter->netdev,
  1616. "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
  1617. (unsigned long int)cpu_to_be64(remote_handle),
  1618. (unsigned long int)cpu_to_be64(u64_crq[0]),
  1619. (unsigned long int)cpu_to_be64(u64_crq[1]),
  1620. (unsigned long int)cpu_to_be64(u64_crq[2]),
  1621. (unsigned long int)cpu_to_be64(u64_crq[3]));
  1622. /* Make sure the hypervisor sees the complete request */
  1623. mb();
  1624. rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
  1625. cpu_to_be64(remote_handle),
  1626. cpu_to_be64(u64_crq[0]),
  1627. cpu_to_be64(u64_crq[1]),
  1628. cpu_to_be64(u64_crq[2]),
  1629. cpu_to_be64(u64_crq[3]));
  1630. if (rc) {
  1631. if (rc == H_CLOSED)
  1632. dev_warn(dev, "CRQ Queue closed\n");
  1633. dev_err(dev, "Send error (rc=%d)\n", rc);
  1634. }
  1635. return rc;
  1636. }
  1637. static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
  1638. u64 remote_handle, u64 ioba, u64 num_entries)
  1639. {
  1640. unsigned int ua = adapter->vdev->unit_address;
  1641. struct device *dev = &adapter->vdev->dev;
  1642. int rc;
  1643. /* Make sure the hypervisor sees the complete request */
  1644. mb();
  1645. rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
  1646. cpu_to_be64(remote_handle),
  1647. ioba, num_entries);
  1648. if (rc) {
  1649. if (rc == H_CLOSED)
  1650. dev_warn(dev, "CRQ Queue closed\n");
  1651. dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
  1652. }
  1653. return rc;
  1654. }
  1655. static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
  1656. union ibmvnic_crq *crq)
  1657. {
  1658. unsigned int ua = adapter->vdev->unit_address;
  1659. struct device *dev = &adapter->vdev->dev;
  1660. u64 *u64_crq = (u64 *)crq;
  1661. int rc;
  1662. netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
  1663. (unsigned long int)cpu_to_be64(u64_crq[0]),
  1664. (unsigned long int)cpu_to_be64(u64_crq[1]));
  1665. /* Make sure the hypervisor sees the complete request */
  1666. mb();
  1667. rc = plpar_hcall_norets(H_SEND_CRQ, ua,
  1668. cpu_to_be64(u64_crq[0]),
  1669. cpu_to_be64(u64_crq[1]));
  1670. if (rc) {
  1671. if (rc == H_CLOSED)
  1672. dev_warn(dev, "CRQ Queue closed\n");
  1673. dev_warn(dev, "Send error (rc=%d)\n", rc);
  1674. }
  1675. return rc;
  1676. }
  1677. static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
  1678. {
  1679. union ibmvnic_crq crq;
  1680. memset(&crq, 0, sizeof(crq));
  1681. crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
  1682. crq.generic.cmd = IBMVNIC_CRQ_INIT;
  1683. netdev_dbg(adapter->netdev, "Sending CRQ init\n");
  1684. return ibmvnic_send_crq(adapter, &crq);
  1685. }
  1686. static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
  1687. {
  1688. union ibmvnic_crq crq;
  1689. memset(&crq, 0, sizeof(crq));
  1690. crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
  1691. crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
  1692. netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
  1693. return ibmvnic_send_crq(adapter, &crq);
  1694. }
  1695. static int send_version_xchg(struct ibmvnic_adapter *adapter)
  1696. {
  1697. union ibmvnic_crq crq;
  1698. memset(&crq, 0, sizeof(crq));
  1699. crq.version_exchange.first = IBMVNIC_CRQ_CMD;
  1700. crq.version_exchange.cmd = VERSION_EXCHANGE;
  1701. crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
  1702. return ibmvnic_send_crq(adapter, &crq);
  1703. }
  1704. static void send_login(struct ibmvnic_adapter *adapter)
  1705. {
  1706. struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
  1707. struct ibmvnic_login_buffer *login_buffer;
  1708. struct device *dev = &adapter->vdev->dev;
  1709. dma_addr_t rsp_buffer_token;
  1710. dma_addr_t buffer_token;
  1711. size_t rsp_buffer_size;
  1712. union ibmvnic_crq crq;
  1713. size_t buffer_size;
  1714. __be64 *tx_list_p;
  1715. __be64 *rx_list_p;
  1716. int i;
  1717. buffer_size =
  1718. sizeof(struct ibmvnic_login_buffer) +
  1719. sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
  1720. login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
  1721. if (!login_buffer)
  1722. goto buf_alloc_failed;
  1723. buffer_token = dma_map_single(dev, login_buffer, buffer_size,
  1724. DMA_TO_DEVICE);
  1725. if (dma_mapping_error(dev, buffer_token)) {
  1726. dev_err(dev, "Couldn't map login buffer\n");
  1727. goto buf_map_failed;
  1728. }
  1729. rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
  1730. sizeof(u64) * adapter->req_tx_queues +
  1731. sizeof(u64) * adapter->req_rx_queues +
  1732. sizeof(u64) * adapter->req_rx_queues +
  1733. sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
  1734. login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
  1735. if (!login_rsp_buffer)
  1736. goto buf_rsp_alloc_failed;
  1737. rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
  1738. rsp_buffer_size, DMA_FROM_DEVICE);
  1739. if (dma_mapping_error(dev, rsp_buffer_token)) {
  1740. dev_err(dev, "Couldn't map login rsp buffer\n");
  1741. goto buf_rsp_map_failed;
  1742. }
  1743. adapter->login_buf = login_buffer;
  1744. adapter->login_buf_token = buffer_token;
  1745. adapter->login_buf_sz = buffer_size;
  1746. adapter->login_rsp_buf = login_rsp_buffer;
  1747. adapter->login_rsp_buf_token = rsp_buffer_token;
  1748. adapter->login_rsp_buf_sz = rsp_buffer_size;
  1749. login_buffer->len = cpu_to_be32(buffer_size);
  1750. login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
  1751. login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
  1752. login_buffer->off_txcomp_subcrqs =
  1753. cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
  1754. login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
  1755. login_buffer->off_rxcomp_subcrqs =
  1756. cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
  1757. sizeof(u64) * adapter->req_tx_queues);
  1758. login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
  1759. login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
  1760. tx_list_p = (__be64 *)((char *)login_buffer +
  1761. sizeof(struct ibmvnic_login_buffer));
  1762. rx_list_p = (__be64 *)((char *)login_buffer +
  1763. sizeof(struct ibmvnic_login_buffer) +
  1764. sizeof(u64) * adapter->req_tx_queues);
  1765. for (i = 0; i < adapter->req_tx_queues; i++) {
  1766. if (adapter->tx_scrq[i]) {
  1767. tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
  1768. crq_num);
  1769. }
  1770. }
  1771. for (i = 0; i < adapter->req_rx_queues; i++) {
  1772. if (adapter->rx_scrq[i]) {
  1773. rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
  1774. crq_num);
  1775. }
  1776. }
  1777. netdev_dbg(adapter->netdev, "Login Buffer:\n");
  1778. for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
  1779. netdev_dbg(adapter->netdev, "%016lx\n",
  1780. ((unsigned long int *)(adapter->login_buf))[i]);
  1781. }
  1782. memset(&crq, 0, sizeof(crq));
  1783. crq.login.first = IBMVNIC_CRQ_CMD;
  1784. crq.login.cmd = LOGIN;
  1785. crq.login.ioba = cpu_to_be32(buffer_token);
  1786. crq.login.len = cpu_to_be32(buffer_size);
  1787. ibmvnic_send_crq(adapter, &crq);
  1788. return;
  1789. buf_rsp_map_failed:
  1790. kfree(login_rsp_buffer);
  1791. buf_rsp_alloc_failed:
  1792. dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
  1793. buf_map_failed:
  1794. kfree(login_buffer);
  1795. buf_alloc_failed:
  1796. return;
  1797. }
  1798. static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
  1799. u32 len, u8 map_id)
  1800. {
  1801. union ibmvnic_crq crq;
  1802. memset(&crq, 0, sizeof(crq));
  1803. crq.request_map.first = IBMVNIC_CRQ_CMD;
  1804. crq.request_map.cmd = REQUEST_MAP;
  1805. crq.request_map.map_id = map_id;
  1806. crq.request_map.ioba = cpu_to_be32(addr);
  1807. crq.request_map.len = cpu_to_be32(len);
  1808. ibmvnic_send_crq(adapter, &crq);
  1809. }
  1810. static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
  1811. {
  1812. union ibmvnic_crq crq;
  1813. memset(&crq, 0, sizeof(crq));
  1814. crq.request_unmap.first = IBMVNIC_CRQ_CMD;
  1815. crq.request_unmap.cmd = REQUEST_UNMAP;
  1816. crq.request_unmap.map_id = map_id;
  1817. ibmvnic_send_crq(adapter, &crq);
  1818. }
  1819. static void send_map_query(struct ibmvnic_adapter *adapter)
  1820. {
  1821. union ibmvnic_crq crq;
  1822. memset(&crq, 0, sizeof(crq));
  1823. crq.query_map.first = IBMVNIC_CRQ_CMD;
  1824. crq.query_map.cmd = QUERY_MAP;
  1825. ibmvnic_send_crq(adapter, &crq);
  1826. }
  1827. /* Send a series of CRQs requesting various capabilities of the VNIC server */
  1828. static void send_cap_queries(struct ibmvnic_adapter *adapter)
  1829. {
  1830. union ibmvnic_crq crq;
  1831. atomic_set(&adapter->running_cap_crqs, 0);
  1832. memset(&crq, 0, sizeof(crq));
  1833. crq.query_capability.first = IBMVNIC_CRQ_CMD;
  1834. crq.query_capability.cmd = QUERY_CAPABILITY;
  1835. crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
  1836. atomic_inc(&adapter->running_cap_crqs);
  1837. ibmvnic_send_crq(adapter, &crq);
  1838. crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
  1839. atomic_inc(&adapter->running_cap_crqs);
  1840. ibmvnic_send_crq(adapter, &crq);
  1841. crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
  1842. atomic_inc(&adapter->running_cap_crqs);
  1843. ibmvnic_send_crq(adapter, &crq);
  1844. crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
  1845. atomic_inc(&adapter->running_cap_crqs);
  1846. ibmvnic_send_crq(adapter, &crq);
  1847. crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
  1848. atomic_inc(&adapter->running_cap_crqs);
  1849. ibmvnic_send_crq(adapter, &crq);
  1850. crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
  1851. atomic_inc(&adapter->running_cap_crqs);
  1852. ibmvnic_send_crq(adapter, &crq);
  1853. crq.query_capability.capability =
  1854. cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
  1855. atomic_inc(&adapter->running_cap_crqs);
  1856. ibmvnic_send_crq(adapter, &crq);
  1857. crq.query_capability.capability =
  1858. cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
  1859. atomic_inc(&adapter->running_cap_crqs);
  1860. ibmvnic_send_crq(adapter, &crq);
  1861. crq.query_capability.capability =
  1862. cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
  1863. atomic_inc(&adapter->running_cap_crqs);
  1864. ibmvnic_send_crq(adapter, &crq);
  1865. crq.query_capability.capability =
  1866. cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
  1867. atomic_inc(&adapter->running_cap_crqs);
  1868. ibmvnic_send_crq(adapter, &crq);
  1869. crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
  1870. atomic_inc(&adapter->running_cap_crqs);
  1871. ibmvnic_send_crq(adapter, &crq);
  1872. crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
  1873. atomic_inc(&adapter->running_cap_crqs);
  1874. ibmvnic_send_crq(adapter, &crq);
  1875. crq.query_capability.capability = cpu_to_be16(MIN_MTU);
  1876. atomic_inc(&adapter->running_cap_crqs);
  1877. ibmvnic_send_crq(adapter, &crq);
  1878. crq.query_capability.capability = cpu_to_be16(MAX_MTU);
  1879. atomic_inc(&adapter->running_cap_crqs);
  1880. ibmvnic_send_crq(adapter, &crq);
  1881. crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
  1882. atomic_inc(&adapter->running_cap_crqs);
  1883. ibmvnic_send_crq(adapter, &crq);
  1884. crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
  1885. atomic_inc(&adapter->running_cap_crqs);
  1886. ibmvnic_send_crq(adapter, &crq);
  1887. crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
  1888. atomic_inc(&adapter->running_cap_crqs);
  1889. ibmvnic_send_crq(adapter, &crq);
  1890. crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
  1891. atomic_inc(&adapter->running_cap_crqs);
  1892. ibmvnic_send_crq(adapter, &crq);
  1893. crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
  1894. atomic_inc(&adapter->running_cap_crqs);
  1895. ibmvnic_send_crq(adapter, &crq);
  1896. crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
  1897. atomic_inc(&adapter->running_cap_crqs);
  1898. ibmvnic_send_crq(adapter, &crq);
  1899. crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
  1900. atomic_inc(&adapter->running_cap_crqs);
  1901. ibmvnic_send_crq(adapter, &crq);
  1902. crq.query_capability.capability =
  1903. cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
  1904. atomic_inc(&adapter->running_cap_crqs);
  1905. ibmvnic_send_crq(adapter, &crq);
  1906. crq.query_capability.capability =
  1907. cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
  1908. atomic_inc(&adapter->running_cap_crqs);
  1909. ibmvnic_send_crq(adapter, &crq);
  1910. crq.query_capability.capability =
  1911. cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
  1912. atomic_inc(&adapter->running_cap_crqs);
  1913. ibmvnic_send_crq(adapter, &crq);
  1914. crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
  1915. atomic_inc(&adapter->running_cap_crqs);
  1916. ibmvnic_send_crq(adapter, &crq);
  1917. }
  1918. static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
  1919. {
  1920. struct device *dev = &adapter->vdev->dev;
  1921. struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
  1922. union ibmvnic_crq crq;
  1923. int i;
  1924. dma_unmap_single(dev, adapter->ip_offload_tok,
  1925. sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
  1926. netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
  1927. for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
  1928. netdev_dbg(adapter->netdev, "%016lx\n",
  1929. ((unsigned long int *)(buf))[i]);
  1930. netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
  1931. netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
  1932. netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
  1933. buf->tcp_ipv4_chksum);
  1934. netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
  1935. buf->tcp_ipv6_chksum);
  1936. netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
  1937. buf->udp_ipv4_chksum);
  1938. netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
  1939. buf->udp_ipv6_chksum);
  1940. netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
  1941. buf->large_tx_ipv4);
  1942. netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
  1943. buf->large_tx_ipv6);
  1944. netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
  1945. buf->large_rx_ipv4);
  1946. netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
  1947. buf->large_rx_ipv6);
  1948. netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
  1949. buf->max_ipv4_header_size);
  1950. netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
  1951. buf->max_ipv6_header_size);
  1952. netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
  1953. buf->max_tcp_header_size);
  1954. netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
  1955. buf->max_udp_header_size);
  1956. netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
  1957. buf->max_large_tx_size);
  1958. netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
  1959. buf->max_large_rx_size);
  1960. netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
  1961. buf->ipv6_extension_header);
  1962. netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
  1963. buf->tcp_pseudosum_req);
  1964. netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
  1965. buf->num_ipv6_ext_headers);
  1966. netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
  1967. buf->off_ipv6_ext_headers);
  1968. adapter->ip_offload_ctrl_tok =
  1969. dma_map_single(dev, &adapter->ip_offload_ctrl,
  1970. sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
  1971. if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
  1972. dev_err(dev, "Couldn't map ip offload control buffer\n");
  1973. return;
  1974. }
  1975. adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
  1976. adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
  1977. adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
  1978. adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
  1979. adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
  1980. /* large_tx/rx disabled for now, additional features needed */
  1981. adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
  1982. adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
  1983. adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
  1984. adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
  1985. adapter->netdev->features = NETIF_F_GSO;
  1986. if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
  1987. adapter->netdev->features |= NETIF_F_IP_CSUM;
  1988. if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
  1989. adapter->netdev->features |= NETIF_F_IPV6_CSUM;
  1990. if ((adapter->netdev->features &
  1991. (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
  1992. adapter->netdev->features |= NETIF_F_RXCSUM;
  1993. memset(&crq, 0, sizeof(crq));
  1994. crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
  1995. crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
  1996. crq.control_ip_offload.len =
  1997. cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
  1998. crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
  1999. ibmvnic_send_crq(adapter, &crq);
  2000. }
  2001. static void handle_error_info_rsp(union ibmvnic_crq *crq,
  2002. struct ibmvnic_adapter *adapter)
  2003. {
  2004. struct device *dev = &adapter->vdev->dev;
  2005. struct ibmvnic_error_buff *error_buff, *tmp;
  2006. unsigned long flags;
  2007. bool found = false;
  2008. int i;
  2009. if (!crq->request_error_rsp.rc.code) {
  2010. dev_info(dev, "Request Error Rsp returned with rc=%x\n",
  2011. crq->request_error_rsp.rc.code);
  2012. return;
  2013. }
  2014. spin_lock_irqsave(&adapter->error_list_lock, flags);
  2015. list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
  2016. if (error_buff->error_id == crq->request_error_rsp.error_id) {
  2017. found = true;
  2018. list_del(&error_buff->list);
  2019. break;
  2020. }
  2021. spin_unlock_irqrestore(&adapter->error_list_lock, flags);
  2022. if (!found) {
  2023. dev_err(dev, "Couldn't find error id %x\n",
  2024. be32_to_cpu(crq->request_error_rsp.error_id));
  2025. return;
  2026. }
  2027. dev_err(dev, "Detailed info for error id %x:",
  2028. be32_to_cpu(crq->request_error_rsp.error_id));
  2029. for (i = 0; i < error_buff->len; i++) {
  2030. pr_cont("%02x", (int)error_buff->buff[i]);
  2031. if (i % 8 == 7)
  2032. pr_cont(" ");
  2033. }
  2034. pr_cont("\n");
  2035. dma_unmap_single(dev, error_buff->dma, error_buff->len,
  2036. DMA_FROM_DEVICE);
  2037. kfree(error_buff->buff);
  2038. kfree(error_buff);
  2039. }
  2040. static void request_error_information(struct ibmvnic_adapter *adapter,
  2041. union ibmvnic_crq *err_crq)
  2042. {
  2043. struct device *dev = &adapter->vdev->dev;
  2044. struct net_device *netdev = adapter->netdev;
  2045. struct ibmvnic_error_buff *error_buff;
  2046. unsigned long timeout = msecs_to_jiffies(30000);
  2047. union ibmvnic_crq crq;
  2048. unsigned long flags;
  2049. int rc, detail_len;
  2050. error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
  2051. if (!error_buff)
  2052. return;
  2053. detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
  2054. error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
  2055. if (!error_buff->buff) {
  2056. kfree(error_buff);
  2057. return;
  2058. }
  2059. error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
  2060. DMA_FROM_DEVICE);
  2061. if (dma_mapping_error(dev, error_buff->dma)) {
  2062. netdev_err(netdev, "Couldn't map error buffer\n");
  2063. kfree(error_buff->buff);
  2064. kfree(error_buff);
  2065. return;
  2066. }
  2067. error_buff->len = detail_len;
  2068. error_buff->error_id = err_crq->error_indication.error_id;
  2069. spin_lock_irqsave(&adapter->error_list_lock, flags);
  2070. list_add_tail(&error_buff->list, &adapter->errors);
  2071. spin_unlock_irqrestore(&adapter->error_list_lock, flags);
  2072. memset(&crq, 0, sizeof(crq));
  2073. crq.request_error_info.first = IBMVNIC_CRQ_CMD;
  2074. crq.request_error_info.cmd = REQUEST_ERROR_INFO;
  2075. crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
  2076. crq.request_error_info.len = cpu_to_be32(detail_len);
  2077. crq.request_error_info.error_id = err_crq->error_indication.error_id;
  2078. rc = ibmvnic_send_crq(adapter, &crq);
  2079. if (rc) {
  2080. netdev_err(netdev, "failed to request error information\n");
  2081. goto err_info_fail;
  2082. }
  2083. if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
  2084. netdev_err(netdev, "timeout waiting for error information\n");
  2085. goto err_info_fail;
  2086. }
  2087. return;
  2088. err_info_fail:
  2089. spin_lock_irqsave(&adapter->error_list_lock, flags);
  2090. list_del(&error_buff->list);
  2091. spin_unlock_irqrestore(&adapter->error_list_lock, flags);
  2092. kfree(error_buff->buff);
  2093. kfree(error_buff);
  2094. }
  2095. static void handle_error_indication(union ibmvnic_crq *crq,
  2096. struct ibmvnic_adapter *adapter)
  2097. {
  2098. struct device *dev = &adapter->vdev->dev;
  2099. dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
  2100. crq->error_indication.flags
  2101. & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
  2102. be32_to_cpu(crq->error_indication.error_id),
  2103. be16_to_cpu(crq->error_indication.error_cause));
  2104. if (be32_to_cpu(crq->error_indication.error_id))
  2105. request_error_information(adapter, crq);
  2106. }
  2107. static void handle_change_mac_rsp(union ibmvnic_crq *crq,
  2108. struct ibmvnic_adapter *adapter)
  2109. {
  2110. struct net_device *netdev = adapter->netdev;
  2111. struct device *dev = &adapter->vdev->dev;
  2112. long rc;
  2113. rc = crq->change_mac_addr_rsp.rc.code;
  2114. if (rc) {
  2115. dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
  2116. return;
  2117. }
  2118. memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
  2119. ETH_ALEN);
  2120. }
  2121. static void handle_request_cap_rsp(union ibmvnic_crq *crq,
  2122. struct ibmvnic_adapter *adapter)
  2123. {
  2124. struct device *dev = &adapter->vdev->dev;
  2125. u64 *req_value;
  2126. char *name;
  2127. atomic_dec(&adapter->running_cap_crqs);
  2128. switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
  2129. case REQ_TX_QUEUES:
  2130. req_value = &adapter->req_tx_queues;
  2131. name = "tx";
  2132. break;
  2133. case REQ_RX_QUEUES:
  2134. req_value = &adapter->req_rx_queues;
  2135. name = "rx";
  2136. break;
  2137. case REQ_RX_ADD_QUEUES:
  2138. req_value = &adapter->req_rx_add_queues;
  2139. name = "rx_add";
  2140. break;
  2141. case REQ_TX_ENTRIES_PER_SUBCRQ:
  2142. req_value = &adapter->req_tx_entries_per_subcrq;
  2143. name = "tx_entries_per_subcrq";
  2144. break;
  2145. case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
  2146. req_value = &adapter->req_rx_add_entries_per_subcrq;
  2147. name = "rx_add_entries_per_subcrq";
  2148. break;
  2149. case REQ_MTU:
  2150. req_value = &adapter->req_mtu;
  2151. name = "mtu";
  2152. break;
  2153. case PROMISC_REQUESTED:
  2154. req_value = &adapter->promisc;
  2155. name = "promisc";
  2156. break;
  2157. default:
  2158. dev_err(dev, "Got invalid cap request rsp %d\n",
  2159. crq->request_capability.capability);
  2160. return;
  2161. }
  2162. switch (crq->request_capability_rsp.rc.code) {
  2163. case SUCCESS:
  2164. break;
  2165. case PARTIALSUCCESS:
  2166. dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
  2167. *req_value,
  2168. (long int)be64_to_cpu(crq->request_capability_rsp.
  2169. number), name);
  2170. release_sub_crqs(adapter);
  2171. *req_value = be64_to_cpu(crq->request_capability_rsp.number);
  2172. ibmvnic_send_req_caps(adapter, 1);
  2173. return;
  2174. default:
  2175. dev_err(dev, "Error %d in request cap rsp\n",
  2176. crq->request_capability_rsp.rc.code);
  2177. return;
  2178. }
  2179. /* Done receiving requested capabilities, query IP offload support */
  2180. if (atomic_read(&adapter->running_cap_crqs) == 0) {
  2181. union ibmvnic_crq newcrq;
  2182. int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
  2183. struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
  2184. &adapter->ip_offload_buf;
  2185. adapter->wait_capability = false;
  2186. adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
  2187. buf_sz,
  2188. DMA_FROM_DEVICE);
  2189. if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
  2190. if (!firmware_has_feature(FW_FEATURE_CMO))
  2191. dev_err(dev, "Couldn't map offload buffer\n");
  2192. return;
  2193. }
  2194. memset(&newcrq, 0, sizeof(newcrq));
  2195. newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
  2196. newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
  2197. newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
  2198. newcrq.query_ip_offload.ioba =
  2199. cpu_to_be32(adapter->ip_offload_tok);
  2200. ibmvnic_send_crq(adapter, &newcrq);
  2201. }
  2202. }
  2203. static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
  2204. struct ibmvnic_adapter *adapter)
  2205. {
  2206. struct device *dev = &adapter->vdev->dev;
  2207. struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
  2208. struct ibmvnic_login_buffer *login = adapter->login_buf;
  2209. int i;
  2210. dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
  2211. DMA_BIDIRECTIONAL);
  2212. dma_unmap_single(dev, adapter->login_rsp_buf_token,
  2213. adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
  2214. /* If the number of queues requested can't be allocated by the
  2215. * server, the login response will return with code 1. We will need
  2216. * to resend the login buffer with fewer queues requested.
  2217. */
  2218. if (login_rsp_crq->generic.rc.code) {
  2219. adapter->renegotiate = true;
  2220. complete(&adapter->init_done);
  2221. return 0;
  2222. }
  2223. netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
  2224. for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
  2225. netdev_dbg(adapter->netdev, "%016lx\n",
  2226. ((unsigned long int *)(adapter->login_rsp_buf))[i]);
  2227. }
  2228. /* Sanity checks */
  2229. if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
  2230. (be32_to_cpu(login->num_rxcomp_subcrqs) *
  2231. adapter->req_rx_add_queues !=
  2232. be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
  2233. dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
  2234. ibmvnic_remove(adapter->vdev);
  2235. return -EIO;
  2236. }
  2237. complete(&adapter->init_done);
  2238. return 0;
  2239. }
  2240. static void handle_request_map_rsp(union ibmvnic_crq *crq,
  2241. struct ibmvnic_adapter *adapter)
  2242. {
  2243. struct device *dev = &adapter->vdev->dev;
  2244. u8 map_id = crq->request_map_rsp.map_id;
  2245. int tx_subcrqs;
  2246. int rx_subcrqs;
  2247. long rc;
  2248. int i;
  2249. tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
  2250. rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
  2251. rc = crq->request_map_rsp.rc.code;
  2252. if (rc) {
  2253. dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
  2254. adapter->map_id--;
  2255. /* need to find and zero tx/rx_pool map_id */
  2256. for (i = 0; i < tx_subcrqs; i++) {
  2257. if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
  2258. adapter->tx_pool[i].long_term_buff.map_id = 0;
  2259. }
  2260. for (i = 0; i < rx_subcrqs; i++) {
  2261. if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
  2262. adapter->rx_pool[i].long_term_buff.map_id = 0;
  2263. }
  2264. }
  2265. complete(&adapter->fw_done);
  2266. }
  2267. static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
  2268. struct ibmvnic_adapter *adapter)
  2269. {
  2270. struct device *dev = &adapter->vdev->dev;
  2271. long rc;
  2272. rc = crq->request_unmap_rsp.rc.code;
  2273. if (rc)
  2274. dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
  2275. }
  2276. static void handle_query_map_rsp(union ibmvnic_crq *crq,
  2277. struct ibmvnic_adapter *adapter)
  2278. {
  2279. struct net_device *netdev = adapter->netdev;
  2280. struct device *dev = &adapter->vdev->dev;
  2281. long rc;
  2282. rc = crq->query_map_rsp.rc.code;
  2283. if (rc) {
  2284. dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
  2285. return;
  2286. }
  2287. netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
  2288. crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
  2289. crq->query_map_rsp.free_pages);
  2290. }
  2291. static void handle_query_cap_rsp(union ibmvnic_crq *crq,
  2292. struct ibmvnic_adapter *adapter)
  2293. {
  2294. struct net_device *netdev = adapter->netdev;
  2295. struct device *dev = &adapter->vdev->dev;
  2296. long rc;
  2297. atomic_dec(&adapter->running_cap_crqs);
  2298. netdev_dbg(netdev, "Outstanding queries: %d\n",
  2299. atomic_read(&adapter->running_cap_crqs));
  2300. rc = crq->query_capability.rc.code;
  2301. if (rc) {
  2302. dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
  2303. goto out;
  2304. }
  2305. switch (be16_to_cpu(crq->query_capability.capability)) {
  2306. case MIN_TX_QUEUES:
  2307. adapter->min_tx_queues =
  2308. be64_to_cpu(crq->query_capability.number);
  2309. netdev_dbg(netdev, "min_tx_queues = %lld\n",
  2310. adapter->min_tx_queues);
  2311. break;
  2312. case MIN_RX_QUEUES:
  2313. adapter->min_rx_queues =
  2314. be64_to_cpu(crq->query_capability.number);
  2315. netdev_dbg(netdev, "min_rx_queues = %lld\n",
  2316. adapter->min_rx_queues);
  2317. break;
  2318. case MIN_RX_ADD_QUEUES:
  2319. adapter->min_rx_add_queues =
  2320. be64_to_cpu(crq->query_capability.number);
  2321. netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
  2322. adapter->min_rx_add_queues);
  2323. break;
  2324. case MAX_TX_QUEUES:
  2325. adapter->max_tx_queues =
  2326. be64_to_cpu(crq->query_capability.number);
  2327. netdev_dbg(netdev, "max_tx_queues = %lld\n",
  2328. adapter->max_tx_queues);
  2329. break;
  2330. case MAX_RX_QUEUES:
  2331. adapter->max_rx_queues =
  2332. be64_to_cpu(crq->query_capability.number);
  2333. netdev_dbg(netdev, "max_rx_queues = %lld\n",
  2334. adapter->max_rx_queues);
  2335. break;
  2336. case MAX_RX_ADD_QUEUES:
  2337. adapter->max_rx_add_queues =
  2338. be64_to_cpu(crq->query_capability.number);
  2339. netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
  2340. adapter->max_rx_add_queues);
  2341. break;
  2342. case MIN_TX_ENTRIES_PER_SUBCRQ:
  2343. adapter->min_tx_entries_per_subcrq =
  2344. be64_to_cpu(crq->query_capability.number);
  2345. netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
  2346. adapter->min_tx_entries_per_subcrq);
  2347. break;
  2348. case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
  2349. adapter->min_rx_add_entries_per_subcrq =
  2350. be64_to_cpu(crq->query_capability.number);
  2351. netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
  2352. adapter->min_rx_add_entries_per_subcrq);
  2353. break;
  2354. case MAX_TX_ENTRIES_PER_SUBCRQ:
  2355. adapter->max_tx_entries_per_subcrq =
  2356. be64_to_cpu(crq->query_capability.number);
  2357. netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
  2358. adapter->max_tx_entries_per_subcrq);
  2359. break;
  2360. case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
  2361. adapter->max_rx_add_entries_per_subcrq =
  2362. be64_to_cpu(crq->query_capability.number);
  2363. netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
  2364. adapter->max_rx_add_entries_per_subcrq);
  2365. break;
  2366. case TCP_IP_OFFLOAD:
  2367. adapter->tcp_ip_offload =
  2368. be64_to_cpu(crq->query_capability.number);
  2369. netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
  2370. adapter->tcp_ip_offload);
  2371. break;
  2372. case PROMISC_SUPPORTED:
  2373. adapter->promisc_supported =
  2374. be64_to_cpu(crq->query_capability.number);
  2375. netdev_dbg(netdev, "promisc_supported = %lld\n",
  2376. adapter->promisc_supported);
  2377. break;
  2378. case MIN_MTU:
  2379. adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
  2380. netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
  2381. netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
  2382. break;
  2383. case MAX_MTU:
  2384. adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
  2385. netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
  2386. netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
  2387. break;
  2388. case MAX_MULTICAST_FILTERS:
  2389. adapter->max_multicast_filters =
  2390. be64_to_cpu(crq->query_capability.number);
  2391. netdev_dbg(netdev, "max_multicast_filters = %lld\n",
  2392. adapter->max_multicast_filters);
  2393. break;
  2394. case VLAN_HEADER_INSERTION:
  2395. adapter->vlan_header_insertion =
  2396. be64_to_cpu(crq->query_capability.number);
  2397. if (adapter->vlan_header_insertion)
  2398. netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
  2399. netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
  2400. adapter->vlan_header_insertion);
  2401. break;
  2402. case RX_VLAN_HEADER_INSERTION:
  2403. adapter->rx_vlan_header_insertion =
  2404. be64_to_cpu(crq->query_capability.number);
  2405. netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
  2406. adapter->rx_vlan_header_insertion);
  2407. break;
  2408. case MAX_TX_SG_ENTRIES:
  2409. adapter->max_tx_sg_entries =
  2410. be64_to_cpu(crq->query_capability.number);
  2411. netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
  2412. adapter->max_tx_sg_entries);
  2413. break;
  2414. case RX_SG_SUPPORTED:
  2415. adapter->rx_sg_supported =
  2416. be64_to_cpu(crq->query_capability.number);
  2417. netdev_dbg(netdev, "rx_sg_supported = %lld\n",
  2418. adapter->rx_sg_supported);
  2419. break;
  2420. case OPT_TX_COMP_SUB_QUEUES:
  2421. adapter->opt_tx_comp_sub_queues =
  2422. be64_to_cpu(crq->query_capability.number);
  2423. netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
  2424. adapter->opt_tx_comp_sub_queues);
  2425. break;
  2426. case OPT_RX_COMP_QUEUES:
  2427. adapter->opt_rx_comp_queues =
  2428. be64_to_cpu(crq->query_capability.number);
  2429. netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
  2430. adapter->opt_rx_comp_queues);
  2431. break;
  2432. case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
  2433. adapter->opt_rx_bufadd_q_per_rx_comp_q =
  2434. be64_to_cpu(crq->query_capability.number);
  2435. netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
  2436. adapter->opt_rx_bufadd_q_per_rx_comp_q);
  2437. break;
  2438. case OPT_TX_ENTRIES_PER_SUBCRQ:
  2439. adapter->opt_tx_entries_per_subcrq =
  2440. be64_to_cpu(crq->query_capability.number);
  2441. netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
  2442. adapter->opt_tx_entries_per_subcrq);
  2443. break;
  2444. case OPT_RXBA_ENTRIES_PER_SUBCRQ:
  2445. adapter->opt_rxba_entries_per_subcrq =
  2446. be64_to_cpu(crq->query_capability.number);
  2447. netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
  2448. adapter->opt_rxba_entries_per_subcrq);
  2449. break;
  2450. case TX_RX_DESC_REQ:
  2451. adapter->tx_rx_desc_req = crq->query_capability.number;
  2452. netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
  2453. adapter->tx_rx_desc_req);
  2454. break;
  2455. default:
  2456. netdev_err(netdev, "Got invalid cap rsp %d\n",
  2457. crq->query_capability.capability);
  2458. }
  2459. out:
  2460. if (atomic_read(&adapter->running_cap_crqs) == 0) {
  2461. adapter->wait_capability = false;
  2462. ibmvnic_send_req_caps(adapter, 0);
  2463. }
  2464. }
  2465. static void ibmvnic_xport_event(struct work_struct *work)
  2466. {
  2467. struct ibmvnic_adapter *adapter = container_of(work,
  2468. struct ibmvnic_adapter,
  2469. ibmvnic_xport);
  2470. struct device *dev = &adapter->vdev->dev;
  2471. long rc;
  2472. release_sub_crqs(adapter);
  2473. if (adapter->migrated) {
  2474. rc = ibmvnic_reenable_crq_queue(adapter);
  2475. if (rc)
  2476. dev_err(dev, "Error after enable rc=%ld\n", rc);
  2477. adapter->migrated = false;
  2478. rc = ibmvnic_send_crq_init(adapter);
  2479. if (rc)
  2480. dev_err(dev, "Error sending init rc=%ld\n", rc);
  2481. }
  2482. }
  2483. static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
  2484. struct ibmvnic_adapter *adapter)
  2485. {
  2486. struct ibmvnic_generic_crq *gen_crq = &crq->generic;
  2487. struct net_device *netdev = adapter->netdev;
  2488. struct device *dev = &adapter->vdev->dev;
  2489. u64 *u64_crq = (u64 *)crq;
  2490. long rc;
  2491. netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
  2492. (unsigned long int)cpu_to_be64(u64_crq[0]),
  2493. (unsigned long int)cpu_to_be64(u64_crq[1]));
  2494. switch (gen_crq->first) {
  2495. case IBMVNIC_CRQ_INIT_RSP:
  2496. switch (gen_crq->cmd) {
  2497. case IBMVNIC_CRQ_INIT:
  2498. dev_info(dev, "Partner initialized\n");
  2499. /* Send back a response */
  2500. rc = ibmvnic_send_crq_init_complete(adapter);
  2501. if (!rc)
  2502. schedule_work(&adapter->vnic_crq_init);
  2503. else
  2504. dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
  2505. break;
  2506. case IBMVNIC_CRQ_INIT_COMPLETE:
  2507. dev_info(dev, "Partner initialization complete\n");
  2508. send_version_xchg(adapter);
  2509. break;
  2510. default:
  2511. dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
  2512. }
  2513. return;
  2514. case IBMVNIC_CRQ_XPORT_EVENT:
  2515. if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
  2516. dev_info(dev, "Re-enabling adapter\n");
  2517. adapter->migrated = true;
  2518. schedule_work(&adapter->ibmvnic_xport);
  2519. } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
  2520. dev_info(dev, "Backing device failover detected\n");
  2521. netif_carrier_off(netdev);
  2522. adapter->failover = true;
  2523. } else {
  2524. /* The adapter lost the connection */
  2525. dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
  2526. gen_crq->cmd);
  2527. schedule_work(&adapter->ibmvnic_xport);
  2528. }
  2529. return;
  2530. case IBMVNIC_CRQ_CMD_RSP:
  2531. break;
  2532. default:
  2533. dev_err(dev, "Got an invalid msg type 0x%02x\n",
  2534. gen_crq->first);
  2535. return;
  2536. }
  2537. switch (gen_crq->cmd) {
  2538. case VERSION_EXCHANGE_RSP:
  2539. rc = crq->version_exchange_rsp.rc.code;
  2540. if (rc) {
  2541. dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
  2542. break;
  2543. }
  2544. dev_info(dev, "Partner protocol version is %d\n",
  2545. crq->version_exchange_rsp.version);
  2546. if (be16_to_cpu(crq->version_exchange_rsp.version) <
  2547. ibmvnic_version)
  2548. ibmvnic_version =
  2549. be16_to_cpu(crq->version_exchange_rsp.version);
  2550. send_cap_queries(adapter);
  2551. break;
  2552. case QUERY_CAPABILITY_RSP:
  2553. handle_query_cap_rsp(crq, adapter);
  2554. break;
  2555. case QUERY_MAP_RSP:
  2556. handle_query_map_rsp(crq, adapter);
  2557. break;
  2558. case REQUEST_MAP_RSP:
  2559. handle_request_map_rsp(crq, adapter);
  2560. break;
  2561. case REQUEST_UNMAP_RSP:
  2562. handle_request_unmap_rsp(crq, adapter);
  2563. break;
  2564. case REQUEST_CAPABILITY_RSP:
  2565. handle_request_cap_rsp(crq, adapter);
  2566. break;
  2567. case LOGIN_RSP:
  2568. netdev_dbg(netdev, "Got Login Response\n");
  2569. handle_login_rsp(crq, adapter);
  2570. break;
  2571. case LOGICAL_LINK_STATE_RSP:
  2572. netdev_dbg(netdev,
  2573. "Got Logical Link State Response, state: %d rc: %d\n",
  2574. crq->logical_link_state_rsp.link_state,
  2575. crq->logical_link_state_rsp.rc.code);
  2576. adapter->logical_link_state =
  2577. crq->logical_link_state_rsp.link_state;
  2578. adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
  2579. complete(&adapter->init_done);
  2580. break;
  2581. case LINK_STATE_INDICATION:
  2582. netdev_dbg(netdev, "Got Logical Link State Indication\n");
  2583. adapter->phys_link_state =
  2584. crq->link_state_indication.phys_link_state;
  2585. adapter->logical_link_state =
  2586. crq->link_state_indication.logical_link_state;
  2587. break;
  2588. case CHANGE_MAC_ADDR_RSP:
  2589. netdev_dbg(netdev, "Got MAC address change Response\n");
  2590. handle_change_mac_rsp(crq, adapter);
  2591. break;
  2592. case ERROR_INDICATION:
  2593. netdev_dbg(netdev, "Got Error Indication\n");
  2594. handle_error_indication(crq, adapter);
  2595. break;
  2596. case REQUEST_ERROR_RSP:
  2597. netdev_dbg(netdev, "Got Error Detail Response\n");
  2598. handle_error_info_rsp(crq, adapter);
  2599. break;
  2600. case REQUEST_STATISTICS_RSP:
  2601. netdev_dbg(netdev, "Got Statistics Response\n");
  2602. complete(&adapter->stats_done);
  2603. break;
  2604. case QUERY_IP_OFFLOAD_RSP:
  2605. netdev_dbg(netdev, "Got Query IP offload Response\n");
  2606. handle_query_ip_offload_rsp(adapter);
  2607. break;
  2608. case MULTICAST_CTRL_RSP:
  2609. netdev_dbg(netdev, "Got multicast control Response\n");
  2610. break;
  2611. case CONTROL_IP_OFFLOAD_RSP:
  2612. netdev_dbg(netdev, "Got Control IP offload Response\n");
  2613. dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
  2614. sizeof(adapter->ip_offload_ctrl),
  2615. DMA_TO_DEVICE);
  2616. complete(&adapter->init_done);
  2617. break;
  2618. case COLLECT_FW_TRACE_RSP:
  2619. netdev_dbg(netdev, "Got Collect firmware trace Response\n");
  2620. complete(&adapter->fw_done);
  2621. break;
  2622. default:
  2623. netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
  2624. gen_crq->cmd);
  2625. }
  2626. }
  2627. static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
  2628. {
  2629. struct ibmvnic_adapter *adapter = instance;
  2630. tasklet_schedule(&adapter->tasklet);
  2631. return IRQ_HANDLED;
  2632. }
  2633. static void ibmvnic_tasklet(void *data)
  2634. {
  2635. struct ibmvnic_adapter *adapter = data;
  2636. struct ibmvnic_crq_queue *queue = &adapter->crq;
  2637. union ibmvnic_crq *crq;
  2638. unsigned long flags;
  2639. bool done = false;
  2640. spin_lock_irqsave(&queue->lock, flags);
  2641. while (!done) {
  2642. /* Pull all the valid messages off the CRQ */
  2643. while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
  2644. ibmvnic_handle_crq(crq, adapter);
  2645. crq->generic.first = 0;
  2646. }
  2647. /* remain in tasklet until all
  2648. * capabilities responses are received
  2649. */
  2650. if (!adapter->wait_capability)
  2651. done = true;
  2652. }
  2653. /* if capabilities CRQ's were sent in this tasklet, the following
  2654. * tasklet must wait until all responses are received
  2655. */
  2656. if (atomic_read(&adapter->running_cap_crqs) != 0)
  2657. adapter->wait_capability = true;
  2658. spin_unlock_irqrestore(&queue->lock, flags);
  2659. }
  2660. static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
  2661. {
  2662. struct vio_dev *vdev = adapter->vdev;
  2663. int rc;
  2664. do {
  2665. rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
  2666. } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
  2667. if (rc)
  2668. dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
  2669. return rc;
  2670. }
  2671. static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
  2672. {
  2673. struct ibmvnic_crq_queue *crq = &adapter->crq;
  2674. struct device *dev = &adapter->vdev->dev;
  2675. struct vio_dev *vdev = adapter->vdev;
  2676. int rc;
  2677. /* Close the CRQ */
  2678. do {
  2679. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  2680. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  2681. /* Clean out the queue */
  2682. memset(crq->msgs, 0, PAGE_SIZE);
  2683. crq->cur = 0;
  2684. /* And re-open it again */
  2685. rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
  2686. crq->msg_token, PAGE_SIZE);
  2687. if (rc == H_CLOSED)
  2688. /* Adapter is good, but other end is not ready */
  2689. dev_warn(dev, "Partner adapter not ready\n");
  2690. else if (rc != 0)
  2691. dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
  2692. return rc;
  2693. }
  2694. static void release_crq_queue(struct ibmvnic_adapter *adapter)
  2695. {
  2696. struct ibmvnic_crq_queue *crq = &adapter->crq;
  2697. struct vio_dev *vdev = adapter->vdev;
  2698. long rc;
  2699. if (!crq->msgs)
  2700. return;
  2701. netdev_dbg(adapter->netdev, "Releasing CRQ\n");
  2702. free_irq(vdev->irq, adapter);
  2703. tasklet_kill(&adapter->tasklet);
  2704. do {
  2705. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  2706. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  2707. dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
  2708. DMA_BIDIRECTIONAL);
  2709. free_page((unsigned long)crq->msgs);
  2710. crq->msgs = NULL;
  2711. }
  2712. static int init_crq_queue(struct ibmvnic_adapter *adapter)
  2713. {
  2714. struct ibmvnic_crq_queue *crq = &adapter->crq;
  2715. struct device *dev = &adapter->vdev->dev;
  2716. struct vio_dev *vdev = adapter->vdev;
  2717. int rc, retrc = -ENOMEM;
  2718. if (crq->msgs)
  2719. return 0;
  2720. crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
  2721. /* Should we allocate more than one page? */
  2722. if (!crq->msgs)
  2723. return -ENOMEM;
  2724. crq->size = PAGE_SIZE / sizeof(*crq->msgs);
  2725. crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
  2726. DMA_BIDIRECTIONAL);
  2727. if (dma_mapping_error(dev, crq->msg_token))
  2728. goto map_failed;
  2729. rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
  2730. crq->msg_token, PAGE_SIZE);
  2731. if (rc == H_RESOURCE)
  2732. /* maybe kexecing and resource is busy. try a reset */
  2733. rc = ibmvnic_reset_crq(adapter);
  2734. retrc = rc;
  2735. if (rc == H_CLOSED) {
  2736. dev_warn(dev, "Partner adapter not ready\n");
  2737. } else if (rc) {
  2738. dev_warn(dev, "Error %d opening adapter\n", rc);
  2739. goto reg_crq_failed;
  2740. }
  2741. retrc = 0;
  2742. tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
  2743. (unsigned long)adapter);
  2744. netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
  2745. rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
  2746. adapter);
  2747. if (rc) {
  2748. dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
  2749. vdev->irq, rc);
  2750. goto req_irq_failed;
  2751. }
  2752. rc = vio_enable_interrupts(vdev);
  2753. if (rc) {
  2754. dev_err(dev, "Error %d enabling interrupts\n", rc);
  2755. goto req_irq_failed;
  2756. }
  2757. crq->cur = 0;
  2758. spin_lock_init(&crq->lock);
  2759. return retrc;
  2760. req_irq_failed:
  2761. tasklet_kill(&adapter->tasklet);
  2762. do {
  2763. rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
  2764. } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  2765. reg_crq_failed:
  2766. dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
  2767. map_failed:
  2768. free_page((unsigned long)crq->msgs);
  2769. crq->msgs = NULL;
  2770. return retrc;
  2771. }
  2772. static void handle_crq_init_rsp(struct work_struct *work)
  2773. {
  2774. struct ibmvnic_adapter *adapter = container_of(work,
  2775. struct ibmvnic_adapter,
  2776. vnic_crq_init);
  2777. struct device *dev = &adapter->vdev->dev;
  2778. struct net_device *netdev = adapter->netdev;
  2779. unsigned long timeout = msecs_to_jiffies(30000);
  2780. bool restart = false;
  2781. int rc;
  2782. if (adapter->failover) {
  2783. release_sub_crqs(adapter);
  2784. if (netif_running(netdev)) {
  2785. netif_tx_disable(netdev);
  2786. ibmvnic_close(netdev);
  2787. restart = true;
  2788. }
  2789. }
  2790. reinit_completion(&adapter->init_done);
  2791. send_version_xchg(adapter);
  2792. if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
  2793. dev_err(dev, "Passive init timeout\n");
  2794. goto task_failed;
  2795. }
  2796. netdev->mtu = adapter->req_mtu - ETH_HLEN;
  2797. if (adapter->failover) {
  2798. adapter->failover = false;
  2799. if (restart) {
  2800. rc = ibmvnic_open(netdev);
  2801. if (rc)
  2802. goto restart_failed;
  2803. }
  2804. netif_carrier_on(netdev);
  2805. return;
  2806. }
  2807. rc = register_netdev(netdev);
  2808. if (rc) {
  2809. dev_err(dev,
  2810. "failed to register netdev rc=%d\n", rc);
  2811. goto register_failed;
  2812. }
  2813. dev_info(dev, "ibmvnic registered\n");
  2814. return;
  2815. restart_failed:
  2816. dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
  2817. register_failed:
  2818. release_sub_crqs(adapter);
  2819. task_failed:
  2820. dev_err(dev, "Passive initialization was not successful\n");
  2821. }
  2822. static int ibmvnic_init(struct ibmvnic_adapter *adapter)
  2823. {
  2824. struct device *dev = &adapter->vdev->dev;
  2825. unsigned long timeout = msecs_to_jiffies(30000);
  2826. int rc;
  2827. rc = init_crq_queue(adapter);
  2828. if (rc) {
  2829. dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
  2830. return rc;
  2831. }
  2832. init_completion(&adapter->init_done);
  2833. ibmvnic_send_crq_init(adapter);
  2834. if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
  2835. dev_err(dev, "Initialization sequence timed out\n");
  2836. release_crq_queue(adapter);
  2837. return -1;
  2838. }
  2839. rc = init_sub_crqs(adapter);
  2840. if (rc) {
  2841. dev_err(dev, "Initialization of sub crqs failed\n");
  2842. release_crq_queue(adapter);
  2843. }
  2844. return rc;
  2845. }
  2846. static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
  2847. {
  2848. struct ibmvnic_adapter *adapter;
  2849. struct net_device *netdev;
  2850. unsigned char *mac_addr_p;
  2851. int rc;
  2852. dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
  2853. dev->unit_address);
  2854. mac_addr_p = (unsigned char *)vio_get_attribute(dev,
  2855. VETH_MAC_ADDR, NULL);
  2856. if (!mac_addr_p) {
  2857. dev_err(&dev->dev,
  2858. "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
  2859. __FILE__, __LINE__);
  2860. return 0;
  2861. }
  2862. netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
  2863. IBMVNIC_MAX_TX_QUEUES);
  2864. if (!netdev)
  2865. return -ENOMEM;
  2866. adapter = netdev_priv(netdev);
  2867. dev_set_drvdata(&dev->dev, netdev);
  2868. adapter->vdev = dev;
  2869. adapter->netdev = netdev;
  2870. adapter->failover = false;
  2871. ether_addr_copy(adapter->mac_addr, mac_addr_p);
  2872. ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
  2873. netdev->irq = dev->irq;
  2874. netdev->netdev_ops = &ibmvnic_netdev_ops;
  2875. netdev->ethtool_ops = &ibmvnic_ethtool_ops;
  2876. SET_NETDEV_DEV(netdev, &dev->dev);
  2877. INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
  2878. INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
  2879. spin_lock_init(&adapter->stats_lock);
  2880. INIT_LIST_HEAD(&adapter->errors);
  2881. spin_lock_init(&adapter->error_list_lock);
  2882. rc = ibmvnic_init(adapter);
  2883. if (rc) {
  2884. free_netdev(netdev);
  2885. return rc;
  2886. }
  2887. netdev->mtu = adapter->req_mtu - ETH_HLEN;
  2888. adapter->is_closed = false;
  2889. rc = register_netdev(netdev);
  2890. if (rc) {
  2891. dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
  2892. free_netdev(netdev);
  2893. return rc;
  2894. }
  2895. dev_info(&dev->dev, "ibmvnic registered\n");
  2896. return 0;
  2897. }
  2898. static int ibmvnic_remove(struct vio_dev *dev)
  2899. {
  2900. struct net_device *netdev = dev_get_drvdata(&dev->dev);
  2901. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  2902. unregister_netdev(netdev);
  2903. release_resources(adapter);
  2904. release_sub_crqs(adapter);
  2905. release_crq_queue(adapter);
  2906. free_netdev(netdev);
  2907. dev_set_drvdata(&dev->dev, NULL);
  2908. return 0;
  2909. }
  2910. static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
  2911. {
  2912. struct net_device *netdev = dev_get_drvdata(&vdev->dev);
  2913. struct ibmvnic_adapter *adapter;
  2914. struct iommu_table *tbl;
  2915. unsigned long ret = 0;
  2916. int i;
  2917. tbl = get_iommu_table_base(&vdev->dev);
  2918. /* netdev inits at probe time along with the structures we need below*/
  2919. if (!netdev)
  2920. return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
  2921. adapter = netdev_priv(netdev);
  2922. ret += PAGE_SIZE; /* the crq message queue */
  2923. ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
  2924. for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
  2925. ret += 4 * PAGE_SIZE; /* the scrq message queue */
  2926. for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
  2927. i++)
  2928. ret += adapter->rx_pool[i].size *
  2929. IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
  2930. return ret;
  2931. }
  2932. static int ibmvnic_resume(struct device *dev)
  2933. {
  2934. struct net_device *netdev = dev_get_drvdata(dev);
  2935. struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  2936. int i;
  2937. /* kick the interrupt handlers just in case we lost an interrupt */
  2938. for (i = 0; i < adapter->req_rx_queues; i++)
  2939. ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
  2940. adapter->rx_scrq[i]);
  2941. return 0;
  2942. }
  2943. static struct vio_device_id ibmvnic_device_table[] = {
  2944. {"network", "IBM,vnic"},
  2945. {"", "" }
  2946. };
  2947. MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
  2948. static const struct dev_pm_ops ibmvnic_pm_ops = {
  2949. .resume = ibmvnic_resume
  2950. };
  2951. static struct vio_driver ibmvnic_driver = {
  2952. .id_table = ibmvnic_device_table,
  2953. .probe = ibmvnic_probe,
  2954. .remove = ibmvnic_remove,
  2955. .get_desired_dma = ibmvnic_get_desired_dma,
  2956. .name = ibmvnic_driver_name,
  2957. .pm = &ibmvnic_pm_ops,
  2958. };
  2959. /* module functions */
  2960. static int __init ibmvnic_module_init(void)
  2961. {
  2962. pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
  2963. IBMVNIC_DRIVER_VERSION);
  2964. return vio_register_driver(&ibmvnic_driver);
  2965. }
  2966. static void __exit ibmvnic_module_exit(void)
  2967. {
  2968. vio_unregister_driver(&ibmvnic_driver);
  2969. }
  2970. module_init(ibmvnic_module_init);
  2971. module_exit(ibmvnic_module_exit);