lio_main.c 120 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/firmware.h>
  21. #include <net/vxlan.h>
  22. #include <linux/kthread.h>
  23. #include "liquidio_common.h"
  24. #include "octeon_droq.h"
  25. #include "octeon_iq.h"
  26. #include "response_manager.h"
  27. #include "octeon_device.h"
  28. #include "octeon_nic.h"
  29. #include "octeon_main.h"
  30. #include "octeon_network.h"
  31. #include "cn66xx_regs.h"
  32. #include "cn66xx_device.h"
  33. #include "cn68xx_device.h"
  34. #include "cn23xx_pf_device.h"
  35. #include "liquidio_image.h"
  36. MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
  37. MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
  38. MODULE_LICENSE("GPL");
  39. MODULE_VERSION(LIQUIDIO_VERSION);
  40. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
  41. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
  42. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
  43. MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX);
  44. static int ddr_timeout = 10000;
  45. module_param(ddr_timeout, int, 0644);
  46. MODULE_PARM_DESC(ddr_timeout,
  47. "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
  48. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  49. static int debug = -1;
  50. module_param(debug, int, 0644);
  51. MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
  52. static char fw_type[LIO_MAX_FW_TYPE_LEN];
  53. module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
  54. MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
  55. static int ptp_enable = 1;
  56. /* Bit mask values for lio->ifstate */
  57. #define LIO_IFSTATE_DROQ_OPS 0x01
  58. #define LIO_IFSTATE_REGISTERED 0x02
  59. #define LIO_IFSTATE_RUNNING 0x04
  60. #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
  61. /* Polling interval for determining when NIC application is alive */
  62. #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
  63. /* runtime link query interval */
  64. #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
  65. struct liquidio_if_cfg_context {
  66. int octeon_id;
  67. wait_queue_head_t wc;
  68. int cond;
  69. };
  70. struct liquidio_if_cfg_resp {
  71. u64 rh;
  72. struct liquidio_if_cfg_info cfg_info;
  73. u64 status;
  74. };
  75. struct liquidio_rx_ctl_context {
  76. int octeon_id;
  77. wait_queue_head_t wc;
  78. int cond;
  79. };
  80. struct oct_link_status_resp {
  81. u64 rh;
  82. struct oct_link_info link_info;
  83. u64 status;
  84. };
  85. struct oct_timestamp_resp {
  86. u64 rh;
  87. u64 timestamp;
  88. u64 status;
  89. };
  90. #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
  91. union tx_info {
  92. u64 u64;
  93. struct {
  94. #ifdef __BIG_ENDIAN_BITFIELD
  95. u16 gso_size;
  96. u16 gso_segs;
  97. u32 reserved;
  98. #else
  99. u32 reserved;
  100. u16 gso_segs;
  101. u16 gso_size;
  102. #endif
  103. } s;
  104. };
  105. /** Octeon device properties to be used by the NIC module.
  106. * Each octeon device in the system will be represented
  107. * by this structure in the NIC module.
  108. */
  109. #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
  110. #define OCTNIC_GSO_MAX_HEADER_SIZE 128
  111. #define OCTNIC_GSO_MAX_SIZE \
  112. (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
  113. /** Structure of a node in list of gather components maintained by
  114. * NIC driver for each network device.
  115. */
  116. struct octnic_gather {
  117. /** List manipulation. Next and prev pointers. */
  118. struct list_head list;
  119. /** Size of the gather component at sg in bytes. */
  120. int sg_size;
  121. /** Number of bytes that sg was adjusted to make it 8B-aligned. */
  122. int adjust;
  123. /** Gather component that can accommodate max sized fragment list
  124. * received from the IP layer.
  125. */
  126. struct octeon_sg_entry *sg;
  127. dma_addr_t sg_dma_ptr;
  128. };
  129. struct handshake {
  130. struct completion init;
  131. struct completion started;
  132. struct pci_dev *pci_dev;
  133. int init_ok;
  134. int started_ok;
  135. };
  136. struct octeon_device_priv {
  137. /** Tasklet structures for this device. */
  138. struct tasklet_struct droq_tasklet;
  139. unsigned long napi_mask;
  140. };
  141. #ifdef CONFIG_PCI_IOV
  142. static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
  143. #endif
  144. static int octeon_device_init(struct octeon_device *);
  145. static int liquidio_stop(struct net_device *netdev);
  146. static void liquidio_remove(struct pci_dev *pdev);
  147. static int liquidio_probe(struct pci_dev *pdev,
  148. const struct pci_device_id *ent);
  149. static struct handshake handshake[MAX_OCTEON_DEVICES];
  150. static struct completion first_stage;
  151. static void octeon_droq_bh(unsigned long pdev)
  152. {
  153. int q_no;
  154. int reschedule = 0;
  155. struct octeon_device *oct = (struct octeon_device *)pdev;
  156. struct octeon_device_priv *oct_priv =
  157. (struct octeon_device_priv *)oct->priv;
  158. for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
  159. if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
  160. continue;
  161. reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
  162. MAX_PACKET_BUDGET);
  163. lio_enable_irq(oct->droq[q_no], NULL);
  164. if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
  165. /* set time and cnt interrupt thresholds for this DROQ
  166. * for NAPI
  167. */
  168. int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
  169. octeon_write_csr64(
  170. oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
  171. 0x5700000040ULL);
  172. octeon_write_csr64(
  173. oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
  174. }
  175. }
  176. if (reschedule)
  177. tasklet_schedule(&oct_priv->droq_tasklet);
  178. }
  179. static int lio_wait_for_oq_pkts(struct octeon_device *oct)
  180. {
  181. struct octeon_device_priv *oct_priv =
  182. (struct octeon_device_priv *)oct->priv;
  183. int retry = 100, pkt_cnt = 0, pending_pkts = 0;
  184. int i;
  185. do {
  186. pending_pkts = 0;
  187. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  188. if (!(oct->io_qmask.oq & BIT_ULL(i)))
  189. continue;
  190. pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
  191. }
  192. if (pkt_cnt > 0) {
  193. pending_pkts += pkt_cnt;
  194. tasklet_schedule(&oct_priv->droq_tasklet);
  195. }
  196. pkt_cnt = 0;
  197. schedule_timeout_uninterruptible(1);
  198. } while (retry-- && pending_pkts);
  199. return pkt_cnt;
  200. }
  201. /**
  202. * \brief Forces all IO queues off on a given device
  203. * @param oct Pointer to Octeon device
  204. */
  205. static void force_io_queues_off(struct octeon_device *oct)
  206. {
  207. if ((oct->chip_id == OCTEON_CN66XX) ||
  208. (oct->chip_id == OCTEON_CN68XX)) {
  209. /* Reset the Enable bits for Input Queues. */
  210. octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
  211. /* Reset the Enable bits for Output Queues. */
  212. octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
  213. }
  214. }
  215. /**
  216. * \brief wait for all pending requests to complete
  217. * @param oct Pointer to Octeon device
  218. *
  219. * Called during shutdown sequence
  220. */
  221. static int wait_for_pending_requests(struct octeon_device *oct)
  222. {
  223. int i, pcount = 0;
  224. for (i = 0; i < 100; i++) {
  225. pcount =
  226. atomic_read(&oct->response_list
  227. [OCTEON_ORDERED_SC_LIST].pending_req_count);
  228. if (pcount)
  229. schedule_timeout_uninterruptible(HZ / 10);
  230. else
  231. break;
  232. }
  233. if (pcount)
  234. return 1;
  235. return 0;
  236. }
  237. /**
  238. * \brief Cause device to go quiet so it can be safely removed/reset/etc
  239. * @param oct Pointer to Octeon device
  240. */
  241. static inline void pcierror_quiesce_device(struct octeon_device *oct)
  242. {
  243. int i;
  244. /* Disable the input and output queues now. No more packets will
  245. * arrive from Octeon, but we should wait for all packet processing
  246. * to finish.
  247. */
  248. force_io_queues_off(oct);
  249. /* To allow for in-flight requests */
  250. schedule_timeout_uninterruptible(100);
  251. if (wait_for_pending_requests(oct))
  252. dev_err(&oct->pci_dev->dev, "There were pending requests\n");
  253. /* Force all requests waiting to be fetched by OCTEON to complete. */
  254. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
  255. struct octeon_instr_queue *iq;
  256. if (!(oct->io_qmask.iq & BIT_ULL(i)))
  257. continue;
  258. iq = oct->instr_queue[i];
  259. if (atomic_read(&iq->instr_pending)) {
  260. spin_lock_bh(&iq->lock);
  261. iq->fill_cnt = 0;
  262. iq->octeon_read_index = iq->host_write_index;
  263. iq->stats.instr_processed +=
  264. atomic_read(&iq->instr_pending);
  265. lio_process_iq_request_list(oct, iq, 0);
  266. spin_unlock_bh(&iq->lock);
  267. }
  268. }
  269. /* Force all pending ordered list requests to time out. */
  270. lio_process_ordered_list(oct, 1);
  271. /* We do not need to wait for output queue packets to be processed. */
  272. }
  273. /**
  274. * \brief Cleanup PCI AER uncorrectable error status
  275. * @param dev Pointer to PCI device
  276. */
  277. static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  278. {
  279. int pos = 0x100;
  280. u32 status, mask;
  281. pr_info("%s :\n", __func__);
  282. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  283. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
  284. if (dev->error_state == pci_channel_io_normal)
  285. status &= ~mask; /* Clear corresponding nonfatal bits */
  286. else
  287. status &= mask; /* Clear corresponding fatal bits */
  288. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  289. }
  290. /**
  291. * \brief Stop all PCI IO to a given device
  292. * @param dev Pointer to Octeon device
  293. */
  294. static void stop_pci_io(struct octeon_device *oct)
  295. {
  296. /* No more instructions will be forwarded. */
  297. atomic_set(&oct->status, OCT_DEV_IN_RESET);
  298. pci_disable_device(oct->pci_dev);
  299. /* Disable interrupts */
  300. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  301. pcierror_quiesce_device(oct);
  302. /* Release the interrupt line */
  303. free_irq(oct->pci_dev->irq, oct);
  304. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  305. pci_disable_msi(oct->pci_dev);
  306. dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
  307. lio_get_state_string(&oct->status));
  308. /* making it a common function for all OCTEON models */
  309. cleanup_aer_uncorrect_error_status(oct->pci_dev);
  310. }
  311. /**
  312. * \brief called when PCI error is detected
  313. * @param pdev Pointer to PCI device
  314. * @param state The current pci connection state
  315. *
  316. * This function is called after a PCI bus error affecting
  317. * this device has been detected.
  318. */
  319. static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
  320. pci_channel_state_t state)
  321. {
  322. struct octeon_device *oct = pci_get_drvdata(pdev);
  323. /* Non-correctable Non-fatal errors */
  324. if (state == pci_channel_io_normal) {
  325. dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
  326. cleanup_aer_uncorrect_error_status(oct->pci_dev);
  327. return PCI_ERS_RESULT_CAN_RECOVER;
  328. }
  329. /* Non-correctable Fatal errors */
  330. dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
  331. stop_pci_io(oct);
  332. /* Always return a DISCONNECT. There is no support for recovery but only
  333. * for a clean shutdown.
  334. */
  335. return PCI_ERS_RESULT_DISCONNECT;
  336. }
  337. /**
  338. * \brief mmio handler
  339. * @param pdev Pointer to PCI device
  340. */
  341. static pci_ers_result_t liquidio_pcie_mmio_enabled(
  342. struct pci_dev *pdev __attribute__((unused)))
  343. {
  344. /* We should never hit this since we never ask for a reset for a Fatal
  345. * Error. We always return DISCONNECT in io_error above.
  346. * But play safe and return RECOVERED for now.
  347. */
  348. return PCI_ERS_RESULT_RECOVERED;
  349. }
  350. /**
  351. * \brief called after the pci bus has been reset.
  352. * @param pdev Pointer to PCI device
  353. *
  354. * Restart the card from scratch, as if from a cold-boot. Implementation
  355. * resembles the first-half of the octeon_resume routine.
  356. */
  357. static pci_ers_result_t liquidio_pcie_slot_reset(
  358. struct pci_dev *pdev __attribute__((unused)))
  359. {
  360. /* We should never hit this since we never ask for a reset for a Fatal
  361. * Error. We always return DISCONNECT in io_error above.
  362. * But play safe and return RECOVERED for now.
  363. */
  364. return PCI_ERS_RESULT_RECOVERED;
  365. }
  366. /**
  367. * \brief called when traffic can start flowing again.
  368. * @param pdev Pointer to PCI device
  369. *
  370. * This callback is called when the error recovery driver tells us that
  371. * its OK to resume normal operation. Implementation resembles the
  372. * second-half of the octeon_resume routine.
  373. */
  374. static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
  375. {
  376. /* Nothing to be done here. */
  377. }
  378. #ifdef CONFIG_PM
  379. /**
  380. * \brief called when suspending
  381. * @param pdev Pointer to PCI device
  382. * @param state state to suspend to
  383. */
  384. static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
  385. pm_message_t state __attribute__((unused)))
  386. {
  387. return 0;
  388. }
  389. /**
  390. * \brief called when resuming
  391. * @param pdev Pointer to PCI device
  392. */
  393. static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
  394. {
  395. return 0;
  396. }
  397. #endif
  398. /* For PCI-E Advanced Error Recovery (AER) Interface */
  399. static const struct pci_error_handlers liquidio_err_handler = {
  400. .error_detected = liquidio_pcie_error_detected,
  401. .mmio_enabled = liquidio_pcie_mmio_enabled,
  402. .slot_reset = liquidio_pcie_slot_reset,
  403. .resume = liquidio_pcie_resume,
  404. };
  405. static const struct pci_device_id liquidio_pci_tbl[] = {
  406. { /* 68xx */
  407. PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
  408. },
  409. { /* 66xx */
  410. PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
  411. },
  412. { /* 23xx pf */
  413. PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
  414. },
  415. {
  416. 0, 0, 0, 0, 0, 0, 0
  417. }
  418. };
  419. MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
  420. static struct pci_driver liquidio_pci_driver = {
  421. .name = "LiquidIO",
  422. .id_table = liquidio_pci_tbl,
  423. .probe = liquidio_probe,
  424. .remove = liquidio_remove,
  425. .err_handler = &liquidio_err_handler, /* For AER */
  426. #ifdef CONFIG_PM
  427. .suspend = liquidio_suspend,
  428. .resume = liquidio_resume,
  429. #endif
  430. #ifdef CONFIG_PCI_IOV
  431. .sriov_configure = liquidio_enable_sriov,
  432. #endif
  433. };
  434. /**
  435. * \brief register PCI driver
  436. */
  437. static int liquidio_init_pci(void)
  438. {
  439. return pci_register_driver(&liquidio_pci_driver);
  440. }
  441. /**
  442. * \brief unregister PCI driver
  443. */
  444. static void liquidio_deinit_pci(void)
  445. {
  446. pci_unregister_driver(&liquidio_pci_driver);
  447. }
  448. /**
  449. * \brief check interface state
  450. * @param lio per-network private data
  451. * @param state_flag flag state to check
  452. */
  453. static inline int ifstate_check(struct lio *lio, int state_flag)
  454. {
  455. return atomic_read(&lio->ifstate) & state_flag;
  456. }
  457. /**
  458. * \brief set interface state
  459. * @param lio per-network private data
  460. * @param state_flag flag state to set
  461. */
  462. static inline void ifstate_set(struct lio *lio, int state_flag)
  463. {
  464. atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
  465. }
  466. /**
  467. * \brief clear interface state
  468. * @param lio per-network private data
  469. * @param state_flag flag state to clear
  470. */
  471. static inline void ifstate_reset(struct lio *lio, int state_flag)
  472. {
  473. atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
  474. }
  475. /**
  476. * \brief Stop Tx queues
  477. * @param netdev network device
  478. */
  479. static inline void txqs_stop(struct net_device *netdev)
  480. {
  481. if (netif_is_multiqueue(netdev)) {
  482. int i;
  483. for (i = 0; i < netdev->num_tx_queues; i++)
  484. netif_stop_subqueue(netdev, i);
  485. } else {
  486. netif_stop_queue(netdev);
  487. }
  488. }
  489. /**
  490. * \brief Start Tx queues
  491. * @param netdev network device
  492. */
  493. static inline void txqs_start(struct net_device *netdev)
  494. {
  495. if (netif_is_multiqueue(netdev)) {
  496. int i;
  497. for (i = 0; i < netdev->num_tx_queues; i++)
  498. netif_start_subqueue(netdev, i);
  499. } else {
  500. netif_start_queue(netdev);
  501. }
  502. }
  503. /**
  504. * \brief Wake Tx queues
  505. * @param netdev network device
  506. */
  507. static inline void txqs_wake(struct net_device *netdev)
  508. {
  509. struct lio *lio = GET_LIO(netdev);
  510. if (netif_is_multiqueue(netdev)) {
  511. int i;
  512. for (i = 0; i < netdev->num_tx_queues; i++) {
  513. int qno = lio->linfo.txpciq[i %
  514. (lio->linfo.num_txpciq)].s.q_no;
  515. if (__netif_subqueue_stopped(netdev, i)) {
  516. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
  517. tx_restart, 1);
  518. netif_wake_subqueue(netdev, i);
  519. }
  520. }
  521. } else {
  522. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
  523. tx_restart, 1);
  524. netif_wake_queue(netdev);
  525. }
  526. }
  527. /**
  528. * \brief Stop Tx queue
  529. * @param netdev network device
  530. */
  531. static void stop_txq(struct net_device *netdev)
  532. {
  533. txqs_stop(netdev);
  534. }
  535. /**
  536. * \brief Start Tx queue
  537. * @param netdev network device
  538. */
  539. static void start_txq(struct net_device *netdev)
  540. {
  541. struct lio *lio = GET_LIO(netdev);
  542. if (lio->linfo.link.s.link_up) {
  543. txqs_start(netdev);
  544. return;
  545. }
  546. }
  547. /**
  548. * \brief Wake a queue
  549. * @param netdev network device
  550. * @param q which queue to wake
  551. */
  552. static inline void wake_q(struct net_device *netdev, int q)
  553. {
  554. if (netif_is_multiqueue(netdev))
  555. netif_wake_subqueue(netdev, q);
  556. else
  557. netif_wake_queue(netdev);
  558. }
  559. /**
  560. * \brief Stop a queue
  561. * @param netdev network device
  562. * @param q which queue to stop
  563. */
  564. static inline void stop_q(struct net_device *netdev, int q)
  565. {
  566. if (netif_is_multiqueue(netdev))
  567. netif_stop_subqueue(netdev, q);
  568. else
  569. netif_stop_queue(netdev);
  570. }
  571. /**
  572. * \brief Check Tx queue status, and take appropriate action
  573. * @param lio per-network private data
  574. * @returns 0 if full, number of queues woken up otherwise
  575. */
  576. static inline int check_txq_status(struct lio *lio)
  577. {
  578. int ret_val = 0;
  579. if (netif_is_multiqueue(lio->netdev)) {
  580. int numqs = lio->netdev->num_tx_queues;
  581. int q, iq = 0;
  582. /* check each sub-queue state */
  583. for (q = 0; q < numqs; q++) {
  584. iq = lio->linfo.txpciq[q %
  585. (lio->linfo.num_txpciq)].s.q_no;
  586. if (octnet_iq_is_full(lio->oct_dev, iq))
  587. continue;
  588. if (__netif_subqueue_stopped(lio->netdev, q)) {
  589. wake_q(lio->netdev, q);
  590. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
  591. tx_restart, 1);
  592. ret_val++;
  593. }
  594. }
  595. } else {
  596. if (octnet_iq_is_full(lio->oct_dev, lio->txq))
  597. return 0;
  598. wake_q(lio->netdev, lio->txq);
  599. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
  600. tx_restart, 1);
  601. ret_val = 1;
  602. }
  603. return ret_val;
  604. }
  605. /**
  606. * Remove the node at the head of the list. The list would be empty at
  607. * the end of this call if there are no more nodes in the list.
  608. */
  609. static inline struct list_head *list_delete_head(struct list_head *root)
  610. {
  611. struct list_head *node;
  612. if ((root->prev == root) && (root->next == root))
  613. node = NULL;
  614. else
  615. node = root->next;
  616. if (node)
  617. list_del(node);
  618. return node;
  619. }
  620. /**
  621. * \brief Delete gather lists
  622. * @param lio per-network private data
  623. */
  624. static void delete_glists(struct lio *lio)
  625. {
  626. struct octnic_gather *g;
  627. int i;
  628. kfree(lio->glist_lock);
  629. lio->glist_lock = NULL;
  630. if (!lio->glist)
  631. return;
  632. for (i = 0; i < lio->linfo.num_txpciq; i++) {
  633. do {
  634. g = (struct octnic_gather *)
  635. list_delete_head(&lio->glist[i]);
  636. if (g)
  637. kfree(g);
  638. } while (g);
  639. if (lio->glists_virt_base && lio->glists_virt_base[i]) {
  640. lio_dma_free(lio->oct_dev,
  641. lio->glist_entry_size * lio->tx_qsize,
  642. lio->glists_virt_base[i],
  643. lio->glists_dma_base[i]);
  644. }
  645. }
  646. kfree(lio->glists_virt_base);
  647. lio->glists_virt_base = NULL;
  648. kfree(lio->glists_dma_base);
  649. lio->glists_dma_base = NULL;
  650. kfree(lio->glist);
  651. lio->glist = NULL;
  652. }
  653. /**
  654. * \brief Setup gather lists
  655. * @param lio per-network private data
  656. */
  657. static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
  658. {
  659. int i, j;
  660. struct octnic_gather *g;
  661. lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
  662. GFP_KERNEL);
  663. if (!lio->glist_lock)
  664. return -ENOMEM;
  665. lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
  666. GFP_KERNEL);
  667. if (!lio->glist) {
  668. kfree(lio->glist_lock);
  669. lio->glist_lock = NULL;
  670. return -ENOMEM;
  671. }
  672. lio->glist_entry_size =
  673. ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
  674. /* allocate memory to store virtual and dma base address of
  675. * per glist consistent memory
  676. */
  677. lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
  678. GFP_KERNEL);
  679. lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
  680. GFP_KERNEL);
  681. if (!lio->glists_virt_base || !lio->glists_dma_base) {
  682. delete_glists(lio);
  683. return -ENOMEM;
  684. }
  685. for (i = 0; i < num_iqs; i++) {
  686. int numa_node = cpu_to_node(i % num_online_cpus());
  687. spin_lock_init(&lio->glist_lock[i]);
  688. INIT_LIST_HEAD(&lio->glist[i]);
  689. lio->glists_virt_base[i] =
  690. lio_dma_alloc(oct,
  691. lio->glist_entry_size * lio->tx_qsize,
  692. &lio->glists_dma_base[i]);
  693. if (!lio->glists_virt_base[i]) {
  694. delete_glists(lio);
  695. return -ENOMEM;
  696. }
  697. for (j = 0; j < lio->tx_qsize; j++) {
  698. g = kzalloc_node(sizeof(*g), GFP_KERNEL,
  699. numa_node);
  700. if (!g)
  701. g = kzalloc(sizeof(*g), GFP_KERNEL);
  702. if (!g)
  703. break;
  704. g->sg = lio->glists_virt_base[i] +
  705. (j * lio->glist_entry_size);
  706. g->sg_dma_ptr = lio->glists_dma_base[i] +
  707. (j * lio->glist_entry_size);
  708. list_add_tail(&g->list, &lio->glist[i]);
  709. }
  710. if (j != lio->tx_qsize) {
  711. delete_glists(lio);
  712. return -ENOMEM;
  713. }
  714. }
  715. return 0;
  716. }
  717. /**
  718. * \brief Print link information
  719. * @param netdev network device
  720. */
  721. static void print_link_info(struct net_device *netdev)
  722. {
  723. struct lio *lio = GET_LIO(netdev);
  724. if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
  725. struct oct_link_info *linfo = &lio->linfo;
  726. if (linfo->link.s.link_up) {
  727. netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
  728. linfo->link.s.speed,
  729. (linfo->link.s.duplex) ? "Full" : "Half");
  730. } else {
  731. netif_info(lio, link, lio->netdev, "Link Down\n");
  732. }
  733. }
  734. }
  735. /**
  736. * \brief Routine to notify MTU change
  737. * @param work work_struct data structure
  738. */
  739. static void octnet_link_status_change(struct work_struct *work)
  740. {
  741. struct cavium_wk *wk = (struct cavium_wk *)work;
  742. struct lio *lio = (struct lio *)wk->ctxptr;
  743. rtnl_lock();
  744. call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
  745. rtnl_unlock();
  746. }
  747. /**
  748. * \brief Sets up the mtu status change work
  749. * @param netdev network device
  750. */
  751. static inline int setup_link_status_change_wq(struct net_device *netdev)
  752. {
  753. struct lio *lio = GET_LIO(netdev);
  754. struct octeon_device *oct = lio->oct_dev;
  755. lio->link_status_wq.wq = alloc_workqueue("link-status",
  756. WQ_MEM_RECLAIM, 0);
  757. if (!lio->link_status_wq.wq) {
  758. dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
  759. return -1;
  760. }
  761. INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
  762. octnet_link_status_change);
  763. lio->link_status_wq.wk.ctxptr = lio;
  764. return 0;
  765. }
  766. static inline void cleanup_link_status_change_wq(struct net_device *netdev)
  767. {
  768. struct lio *lio = GET_LIO(netdev);
  769. if (lio->link_status_wq.wq) {
  770. cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
  771. destroy_workqueue(lio->link_status_wq.wq);
  772. }
  773. }
  774. /**
  775. * \brief Update link status
  776. * @param netdev network device
  777. * @param ls link status structure
  778. *
  779. * Called on receipt of a link status response from the core application to
  780. * update each interface's link status.
  781. */
  782. static inline void update_link_status(struct net_device *netdev,
  783. union oct_link_status *ls)
  784. {
  785. struct lio *lio = GET_LIO(netdev);
  786. int changed = (lio->linfo.link.u64 != ls->u64);
  787. lio->linfo.link.u64 = ls->u64;
  788. if ((lio->intf_open) && (changed)) {
  789. print_link_info(netdev);
  790. lio->link_changes++;
  791. if (lio->linfo.link.s.link_up) {
  792. netif_carrier_on(netdev);
  793. txqs_wake(netdev);
  794. } else {
  795. netif_carrier_off(netdev);
  796. stop_txq(netdev);
  797. }
  798. }
  799. }
  800. /* Runs in interrupt context. */
  801. static void update_txq_status(struct octeon_device *oct, int iq_num)
  802. {
  803. struct net_device *netdev;
  804. struct lio *lio;
  805. struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
  806. netdev = oct->props[iq->ifidx].netdev;
  807. /* This is needed because the first IQ does not have
  808. * a netdev associated with it.
  809. */
  810. if (!netdev)
  811. return;
  812. lio = GET_LIO(netdev);
  813. if (netif_is_multiqueue(netdev)) {
  814. if (__netif_subqueue_stopped(netdev, iq->q_index) &&
  815. lio->linfo.link.s.link_up &&
  816. (!octnet_iq_is_full(oct, iq_num))) {
  817. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
  818. tx_restart, 1);
  819. netif_wake_subqueue(netdev, iq->q_index);
  820. } else {
  821. if (!octnet_iq_is_full(oct, lio->txq)) {
  822. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
  823. lio->txq,
  824. tx_restart, 1);
  825. wake_q(netdev, lio->txq);
  826. }
  827. }
  828. }
  829. }
  830. static
  831. int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
  832. {
  833. struct octeon_device *oct = droq->oct_dev;
  834. struct octeon_device_priv *oct_priv =
  835. (struct octeon_device_priv *)oct->priv;
  836. if (droq->ops.poll_mode) {
  837. droq->ops.napi_fn(droq);
  838. } else {
  839. if (ret & MSIX_PO_INT) {
  840. tasklet_schedule(&oct_priv->droq_tasklet);
  841. return 1;
  842. }
  843. /* this will be flushed periodically by check iq db */
  844. if (ret & MSIX_PI_INT)
  845. return 0;
  846. }
  847. return 0;
  848. }
  849. /**
  850. * \brief Droq packet processor sceduler
  851. * @param oct octeon device
  852. */
  853. static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
  854. {
  855. struct octeon_device_priv *oct_priv =
  856. (struct octeon_device_priv *)oct->priv;
  857. u64 oq_no;
  858. struct octeon_droq *droq;
  859. if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
  860. for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
  861. oq_no++) {
  862. if (!(oct->droq_intr & BIT_ULL(oq_no)))
  863. continue;
  864. droq = oct->droq[oq_no];
  865. if (droq->ops.poll_mode) {
  866. droq->ops.napi_fn(droq);
  867. oct_priv->napi_mask |= (1 << oq_no);
  868. } else {
  869. tasklet_schedule(&oct_priv->droq_tasklet);
  870. }
  871. }
  872. }
  873. }
  874. static irqreturn_t
  875. liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
  876. {
  877. u64 ret;
  878. struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
  879. struct octeon_device *oct = ioq_vector->oct_dev;
  880. struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
  881. ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
  882. if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
  883. liquidio_schedule_msix_droq_pkt_handler(droq, ret);
  884. return IRQ_HANDLED;
  885. }
  886. /**
  887. * \brief Interrupt handler for octeon
  888. * @param irq unused
  889. * @param dev octeon device
  890. */
  891. static
  892. irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
  893. void *dev)
  894. {
  895. struct octeon_device *oct = (struct octeon_device *)dev;
  896. irqreturn_t ret;
  897. /* Disable our interrupts for the duration of ISR */
  898. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  899. ret = oct->fn_list.process_interrupt_regs(oct);
  900. if (ret == IRQ_HANDLED)
  901. liquidio_schedule_droq_pkt_handlers(oct);
  902. /* Re-enable our interrupts */
  903. if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
  904. oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
  905. return ret;
  906. }
  907. /**
  908. * \brief Setup interrupt for octeon device
  909. * @param oct octeon device
  910. *
  911. * Enable interrupt in Octeon device as given in the PCI interrupt mask.
  912. */
  913. static int octeon_setup_interrupt(struct octeon_device *oct)
  914. {
  915. int irqret, err;
  916. struct msix_entry *msix_entries;
  917. int i;
  918. int num_ioq_vectors;
  919. int num_alloc_ioq_vectors;
  920. if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
  921. oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
  922. /* one non ioq interrupt for handling sli_mac_pf_int_sum */
  923. oct->num_msix_irqs += 1;
  924. oct->msix_entries = kcalloc(
  925. oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
  926. if (!oct->msix_entries)
  927. return 1;
  928. msix_entries = (struct msix_entry *)oct->msix_entries;
  929. /*Assumption is that pf msix vectors start from pf srn to pf to
  930. * trs and not from 0. if not change this code
  931. */
  932. for (i = 0; i < oct->num_msix_irqs - 1; i++)
  933. msix_entries[i].entry = oct->sriov_info.pf_srn + i;
  934. msix_entries[oct->num_msix_irqs - 1].entry =
  935. oct->sriov_info.trs;
  936. num_alloc_ioq_vectors = pci_enable_msix_range(
  937. oct->pci_dev, msix_entries,
  938. oct->num_msix_irqs,
  939. oct->num_msix_irqs);
  940. if (num_alloc_ioq_vectors < 0) {
  941. dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
  942. kfree(oct->msix_entries);
  943. oct->msix_entries = NULL;
  944. return 1;
  945. }
  946. dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
  947. num_ioq_vectors = oct->num_msix_irqs;
  948. /** For PF, there is one non-ioq interrupt handler */
  949. num_ioq_vectors -= 1;
  950. irqret = request_irq(msix_entries[num_ioq_vectors].vector,
  951. liquidio_legacy_intr_handler, 0, "octeon",
  952. oct);
  953. if (irqret) {
  954. dev_err(&oct->pci_dev->dev,
  955. "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
  956. irqret);
  957. pci_disable_msix(oct->pci_dev);
  958. kfree(oct->msix_entries);
  959. oct->msix_entries = NULL;
  960. return 1;
  961. }
  962. for (i = 0; i < num_ioq_vectors; i++) {
  963. irqret = request_irq(msix_entries[i].vector,
  964. liquidio_msix_intr_handler, 0,
  965. "octeon", &oct->ioq_vector[i]);
  966. if (irqret) {
  967. dev_err(&oct->pci_dev->dev,
  968. "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
  969. irqret);
  970. /** Freeing the non-ioq irq vector here . */
  971. free_irq(msix_entries[num_ioq_vectors].vector,
  972. oct);
  973. while (i) {
  974. i--;
  975. /** clearing affinity mask. */
  976. irq_set_affinity_hint(
  977. msix_entries[i].vector, NULL);
  978. free_irq(msix_entries[i].vector,
  979. &oct->ioq_vector[i]);
  980. }
  981. pci_disable_msix(oct->pci_dev);
  982. kfree(oct->msix_entries);
  983. oct->msix_entries = NULL;
  984. return 1;
  985. }
  986. oct->ioq_vector[i].vector = msix_entries[i].vector;
  987. /* assign the cpu mask for this msix interrupt vector */
  988. irq_set_affinity_hint(
  989. msix_entries[i].vector,
  990. (&oct->ioq_vector[i].affinity_mask));
  991. }
  992. dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
  993. oct->octeon_id);
  994. } else {
  995. err = pci_enable_msi(oct->pci_dev);
  996. if (err)
  997. dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
  998. err);
  999. else
  1000. oct->flags |= LIO_FLAG_MSI_ENABLED;
  1001. irqret = request_irq(oct->pci_dev->irq,
  1002. liquidio_legacy_intr_handler, IRQF_SHARED,
  1003. "octeon", oct);
  1004. if (irqret) {
  1005. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  1006. pci_disable_msi(oct->pci_dev);
  1007. dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
  1008. irqret);
  1009. return 1;
  1010. }
  1011. }
  1012. return 0;
  1013. }
  1014. static int liquidio_watchdog(void *param)
  1015. {
  1016. u64 wdog;
  1017. u16 mask_of_stuck_cores = 0;
  1018. u16 mask_of_crashed_cores = 0;
  1019. int core_num;
  1020. u8 core_is_stuck[LIO_MAX_CORES];
  1021. u8 core_crashed[LIO_MAX_CORES];
  1022. struct octeon_device *oct = param;
  1023. memset(core_is_stuck, 0, sizeof(core_is_stuck));
  1024. memset(core_crashed, 0, sizeof(core_crashed));
  1025. while (!kthread_should_stop()) {
  1026. mask_of_crashed_cores =
  1027. (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
  1028. for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
  1029. if (!core_is_stuck[core_num]) {
  1030. wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
  1031. /* look at watchdog state field */
  1032. wdog &= CIU3_WDOG_MASK;
  1033. if (wdog) {
  1034. /* this watchdog timer has expired */
  1035. core_is_stuck[core_num] =
  1036. LIO_MONITOR_WDOG_EXPIRE;
  1037. mask_of_stuck_cores |= (1 << core_num);
  1038. }
  1039. }
  1040. if (!core_crashed[core_num])
  1041. core_crashed[core_num] =
  1042. (mask_of_crashed_cores >> core_num) & 1;
  1043. }
  1044. if (mask_of_stuck_cores) {
  1045. for (core_num = 0; core_num < LIO_MAX_CORES;
  1046. core_num++) {
  1047. if (core_is_stuck[core_num] == 1) {
  1048. dev_err(&oct->pci_dev->dev,
  1049. "ERROR: Octeon core %d is stuck!\n",
  1050. core_num);
  1051. /* 2 means we have printk'd an error
  1052. * so no need to repeat the same printk
  1053. */
  1054. core_is_stuck[core_num] =
  1055. LIO_MONITOR_CORE_STUCK_MSGD;
  1056. }
  1057. }
  1058. }
  1059. if (mask_of_crashed_cores) {
  1060. for (core_num = 0; core_num < LIO_MAX_CORES;
  1061. core_num++) {
  1062. if (core_crashed[core_num] == 1) {
  1063. dev_err(&oct->pci_dev->dev,
  1064. "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
  1065. core_num);
  1066. /* 2 means we have printk'd an error
  1067. * so no need to repeat the same printk
  1068. */
  1069. core_crashed[core_num] =
  1070. LIO_MONITOR_CORE_STUCK_MSGD;
  1071. }
  1072. }
  1073. }
  1074. #ifdef CONFIG_MODULE_UNLOAD
  1075. if (mask_of_stuck_cores || mask_of_crashed_cores) {
  1076. /* make module refcount=0 so that rmmod will work */
  1077. long refcount;
  1078. refcount = module_refcount(THIS_MODULE);
  1079. while (refcount > 0) {
  1080. module_put(THIS_MODULE);
  1081. refcount = module_refcount(THIS_MODULE);
  1082. }
  1083. /* compensate for and withstand an unlikely (but still
  1084. * possible) race condition
  1085. */
  1086. while (refcount < 0) {
  1087. try_module_get(THIS_MODULE);
  1088. refcount = module_refcount(THIS_MODULE);
  1089. }
  1090. }
  1091. #endif
  1092. /* sleep for two seconds */
  1093. set_current_state(TASK_INTERRUPTIBLE);
  1094. schedule_timeout(2 * HZ);
  1095. }
  1096. return 0;
  1097. }
  1098. /**
  1099. * \brief PCI probe handler
  1100. * @param pdev PCI device structure
  1101. * @param ent unused
  1102. */
  1103. static int
  1104. liquidio_probe(struct pci_dev *pdev,
  1105. const struct pci_device_id *ent __attribute__((unused)))
  1106. {
  1107. struct octeon_device *oct_dev = NULL;
  1108. struct handshake *hs;
  1109. oct_dev = octeon_allocate_device(pdev->device,
  1110. sizeof(struct octeon_device_priv));
  1111. if (!oct_dev) {
  1112. dev_err(&pdev->dev, "Unable to allocate device\n");
  1113. return -ENOMEM;
  1114. }
  1115. if (pdev->device == OCTEON_CN23XX_PF_VID)
  1116. oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
  1117. dev_info(&pdev->dev, "Initializing device %x:%x.\n",
  1118. (u32)pdev->vendor, (u32)pdev->device);
  1119. /* Assign octeon_device for this device to the private data area. */
  1120. pci_set_drvdata(pdev, oct_dev);
  1121. /* set linux specific device pointer */
  1122. oct_dev->pci_dev = (void *)pdev;
  1123. hs = &handshake[oct_dev->octeon_id];
  1124. init_completion(&hs->init);
  1125. init_completion(&hs->started);
  1126. hs->pci_dev = pdev;
  1127. if (oct_dev->octeon_id == 0)
  1128. /* first LiquidIO NIC is detected */
  1129. complete(&first_stage);
  1130. if (octeon_device_init(oct_dev)) {
  1131. complete(&hs->init);
  1132. liquidio_remove(pdev);
  1133. return -ENOMEM;
  1134. }
  1135. if (OCTEON_CN23XX_PF(oct_dev)) {
  1136. u64 scratch1;
  1137. u8 bus, device, function;
  1138. scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
  1139. if (!(scratch1 & 4ULL)) {
  1140. /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
  1141. * the lio watchdog kernel thread is running for this
  1142. * NIC. Each NIC gets one watchdog kernel thread.
  1143. */
  1144. scratch1 |= 4ULL;
  1145. octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
  1146. scratch1);
  1147. bus = pdev->bus->number;
  1148. device = PCI_SLOT(pdev->devfn);
  1149. function = PCI_FUNC(pdev->devfn);
  1150. oct_dev->watchdog_task = kthread_create(
  1151. liquidio_watchdog, oct_dev,
  1152. "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
  1153. if (!IS_ERR(oct_dev->watchdog_task)) {
  1154. wake_up_process(oct_dev->watchdog_task);
  1155. } else {
  1156. oct_dev->watchdog_task = NULL;
  1157. dev_err(&oct_dev->pci_dev->dev,
  1158. "failed to create kernel_thread\n");
  1159. liquidio_remove(pdev);
  1160. return -1;
  1161. }
  1162. }
  1163. }
  1164. oct_dev->rx_pause = 1;
  1165. oct_dev->tx_pause = 1;
  1166. dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
  1167. return 0;
  1168. }
  1169. /**
  1170. *\brief Destroy resources associated with octeon device
  1171. * @param pdev PCI device structure
  1172. * @param ent unused
  1173. */
  1174. static void octeon_destroy_resources(struct octeon_device *oct)
  1175. {
  1176. int i;
  1177. struct msix_entry *msix_entries;
  1178. struct octeon_device_priv *oct_priv =
  1179. (struct octeon_device_priv *)oct->priv;
  1180. struct handshake *hs;
  1181. switch (atomic_read(&oct->status)) {
  1182. case OCT_DEV_RUNNING:
  1183. case OCT_DEV_CORE_OK:
  1184. /* No more instructions will be forwarded. */
  1185. atomic_set(&oct->status, OCT_DEV_IN_RESET);
  1186. oct->app_mode = CVM_DRV_INVALID_APP;
  1187. dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
  1188. lio_get_state_string(&oct->status));
  1189. schedule_timeout_uninterruptible(HZ / 10);
  1190. /* fallthrough */
  1191. case OCT_DEV_HOST_OK:
  1192. /* fallthrough */
  1193. case OCT_DEV_CONSOLE_INIT_DONE:
  1194. /* Remove any consoles */
  1195. octeon_remove_consoles(oct);
  1196. /* fallthrough */
  1197. case OCT_DEV_IO_QUEUES_DONE:
  1198. if (wait_for_pending_requests(oct))
  1199. dev_err(&oct->pci_dev->dev, "There were pending requests\n");
  1200. if (lio_wait_for_instr_fetch(oct))
  1201. dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
  1202. /* Disable the input and output queues now. No more packets will
  1203. * arrive from Octeon, but we should wait for all packet
  1204. * processing to finish.
  1205. */
  1206. oct->fn_list.disable_io_queues(oct);
  1207. if (lio_wait_for_oq_pkts(oct))
  1208. dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
  1209. /* fallthrough */
  1210. case OCT_DEV_INTR_SET_DONE:
  1211. /* Disable interrupts */
  1212. oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
  1213. if (oct->msix_on) {
  1214. msix_entries = (struct msix_entry *)oct->msix_entries;
  1215. for (i = 0; i < oct->num_msix_irqs - 1; i++) {
  1216. /* clear the affinity_cpumask */
  1217. irq_set_affinity_hint(msix_entries[i].vector,
  1218. NULL);
  1219. free_irq(msix_entries[i].vector,
  1220. &oct->ioq_vector[i]);
  1221. }
  1222. /* non-iov vector's argument is oct struct */
  1223. free_irq(msix_entries[i].vector, oct);
  1224. pci_disable_msix(oct->pci_dev);
  1225. kfree(oct->msix_entries);
  1226. oct->msix_entries = NULL;
  1227. } else {
  1228. /* Release the interrupt line */
  1229. free_irq(oct->pci_dev->irq, oct);
  1230. if (oct->flags & LIO_FLAG_MSI_ENABLED)
  1231. pci_disable_msi(oct->pci_dev);
  1232. }
  1233. /* fallthrough */
  1234. case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
  1235. if (OCTEON_CN23XX_PF(oct))
  1236. octeon_free_ioq_vector(oct);
  1237. /* fallthrough */
  1238. case OCT_DEV_MBOX_SETUP_DONE:
  1239. if (OCTEON_CN23XX_PF(oct))
  1240. oct->fn_list.free_mbox(oct);
  1241. /* fallthrough */
  1242. case OCT_DEV_IN_RESET:
  1243. case OCT_DEV_DROQ_INIT_DONE:
  1244. /* Wait for any pending operations */
  1245. mdelay(100);
  1246. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
  1247. if (!(oct->io_qmask.oq & BIT_ULL(i)))
  1248. continue;
  1249. octeon_delete_droq(oct, i);
  1250. }
  1251. /* Force any pending handshakes to complete */
  1252. for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
  1253. hs = &handshake[i];
  1254. if (hs->pci_dev) {
  1255. handshake[oct->octeon_id].init_ok = 0;
  1256. complete(&handshake[oct->octeon_id].init);
  1257. handshake[oct->octeon_id].started_ok = 0;
  1258. complete(&handshake[oct->octeon_id].started);
  1259. }
  1260. }
  1261. /* fallthrough */
  1262. case OCT_DEV_RESP_LIST_INIT_DONE:
  1263. octeon_delete_response_list(oct);
  1264. /* fallthrough */
  1265. case OCT_DEV_INSTR_QUEUE_INIT_DONE:
  1266. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
  1267. if (!(oct->io_qmask.iq & BIT_ULL(i)))
  1268. continue;
  1269. octeon_delete_instr_queue(oct, i);
  1270. }
  1271. #ifdef CONFIG_PCI_IOV
  1272. if (oct->sriov_info.sriov_enabled)
  1273. pci_disable_sriov(oct->pci_dev);
  1274. #endif
  1275. /* fallthrough */
  1276. case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
  1277. octeon_free_sc_buffer_pool(oct);
  1278. /* fallthrough */
  1279. case OCT_DEV_DISPATCH_INIT_DONE:
  1280. octeon_delete_dispatch_list(oct);
  1281. cancel_delayed_work_sync(&oct->nic_poll_work.work);
  1282. /* fallthrough */
  1283. case OCT_DEV_PCI_MAP_DONE:
  1284. /* Soft reset the octeon device before exiting */
  1285. if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
  1286. oct->fn_list.soft_reset(oct);
  1287. octeon_unmap_pci_barx(oct, 0);
  1288. octeon_unmap_pci_barx(oct, 1);
  1289. /* fallthrough */
  1290. case OCT_DEV_PCI_ENABLE_DONE:
  1291. pci_clear_master(oct->pci_dev);
  1292. /* Disable the device, releasing the PCI INT */
  1293. pci_disable_device(oct->pci_dev);
  1294. /* fallthrough */
  1295. case OCT_DEV_BEGIN_STATE:
  1296. /* Nothing to be done here either */
  1297. break;
  1298. } /* end switch (oct->status) */
  1299. tasklet_kill(&oct_priv->droq_tasklet);
  1300. }
  1301. /**
  1302. * \brief Callback for rx ctrl
  1303. * @param status status of request
  1304. * @param buf pointer to resp structure
  1305. */
  1306. static void rx_ctl_callback(struct octeon_device *oct,
  1307. u32 status,
  1308. void *buf)
  1309. {
  1310. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  1311. struct liquidio_rx_ctl_context *ctx;
  1312. ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
  1313. oct = lio_get_device(ctx->octeon_id);
  1314. if (status)
  1315. dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
  1316. CVM_CAST64(status));
  1317. WRITE_ONCE(ctx->cond, 1);
  1318. /* This barrier is required to be sure that the response has been
  1319. * written fully before waking up the handler
  1320. */
  1321. wmb();
  1322. wake_up_interruptible(&ctx->wc);
  1323. }
  1324. /**
  1325. * \brief Send Rx control command
  1326. * @param lio per-network private data
  1327. * @param start_stop whether to start or stop
  1328. */
  1329. static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
  1330. {
  1331. struct octeon_soft_command *sc;
  1332. struct liquidio_rx_ctl_context *ctx;
  1333. union octnet_cmd *ncmd;
  1334. int ctx_size = sizeof(struct liquidio_rx_ctl_context);
  1335. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1336. int retval;
  1337. if (oct->props[lio->ifidx].rx_on == start_stop)
  1338. return;
  1339. sc = (struct octeon_soft_command *)
  1340. octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
  1341. 16, ctx_size);
  1342. ncmd = (union octnet_cmd *)sc->virtdptr;
  1343. ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
  1344. WRITE_ONCE(ctx->cond, 0);
  1345. ctx->octeon_id = lio_get_device_id(oct);
  1346. init_waitqueue_head(&ctx->wc);
  1347. ncmd->u64 = 0;
  1348. ncmd->s.cmd = OCTNET_CMD_RX_CTL;
  1349. ncmd->s.param1 = start_stop;
  1350. octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
  1351. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1352. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  1353. OPCODE_NIC_CMD, 0, 0, 0);
  1354. sc->callback = rx_ctl_callback;
  1355. sc->callback_arg = sc;
  1356. sc->wait_time = 5000;
  1357. retval = octeon_send_soft_command(oct, sc);
  1358. if (retval == IQ_SEND_FAILED) {
  1359. netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
  1360. } else {
  1361. /* Sleep on a wait queue till the cond flag indicates that the
  1362. * response arrived or timed-out.
  1363. */
  1364. if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
  1365. return;
  1366. oct->props[lio->ifidx].rx_on = start_stop;
  1367. }
  1368. octeon_free_soft_command(oct, sc);
  1369. }
  1370. /**
  1371. * \brief Destroy NIC device interface
  1372. * @param oct octeon device
  1373. * @param ifidx which interface to destroy
  1374. *
  1375. * Cleanup associated with each interface for an Octeon device when NIC
  1376. * module is being unloaded or if initialization fails during load.
  1377. */
  1378. static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
  1379. {
  1380. struct net_device *netdev = oct->props[ifidx].netdev;
  1381. struct lio *lio;
  1382. struct napi_struct *napi, *n;
  1383. if (!netdev) {
  1384. dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
  1385. __func__, ifidx);
  1386. return;
  1387. }
  1388. lio = GET_LIO(netdev);
  1389. dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
  1390. if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
  1391. liquidio_stop(netdev);
  1392. if (oct->props[lio->ifidx].napi_enabled == 1) {
  1393. list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
  1394. napi_disable(napi);
  1395. oct->props[lio->ifidx].napi_enabled = 0;
  1396. if (OCTEON_CN23XX_PF(oct))
  1397. oct->droq[0]->ops.poll_mode = 0;
  1398. }
  1399. if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
  1400. unregister_netdev(netdev);
  1401. cleanup_link_status_change_wq(netdev);
  1402. delete_glists(lio);
  1403. free_netdev(netdev);
  1404. oct->props[ifidx].gmxport = -1;
  1405. oct->props[ifidx].netdev = NULL;
  1406. }
  1407. /**
  1408. * \brief Stop complete NIC functionality
  1409. * @param oct octeon device
  1410. */
  1411. static int liquidio_stop_nic_module(struct octeon_device *oct)
  1412. {
  1413. int i, j;
  1414. struct lio *lio;
  1415. dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
  1416. if (!oct->ifcount) {
  1417. dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
  1418. return 1;
  1419. }
  1420. spin_lock_bh(&oct->cmd_resp_wqlock);
  1421. oct->cmd_resp_state = OCT_DRV_OFFLINE;
  1422. spin_unlock_bh(&oct->cmd_resp_wqlock);
  1423. for (i = 0; i < oct->ifcount; i++) {
  1424. lio = GET_LIO(oct->props[i].netdev);
  1425. for (j = 0; j < lio->linfo.num_rxpciq; j++)
  1426. octeon_unregister_droq_ops(oct,
  1427. lio->linfo.rxpciq[j].s.q_no);
  1428. }
  1429. for (i = 0; i < oct->ifcount; i++)
  1430. liquidio_destroy_nic_device(oct, i);
  1431. dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
  1432. return 0;
  1433. }
  1434. /**
  1435. * \brief Cleans up resources at unload time
  1436. * @param pdev PCI device structure
  1437. */
  1438. static void liquidio_remove(struct pci_dev *pdev)
  1439. {
  1440. struct octeon_device *oct_dev = pci_get_drvdata(pdev);
  1441. dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
  1442. if (oct_dev->watchdog_task)
  1443. kthread_stop(oct_dev->watchdog_task);
  1444. if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
  1445. liquidio_stop_nic_module(oct_dev);
  1446. /* Reset the octeon device and cleanup all memory allocated for
  1447. * the octeon device by driver.
  1448. */
  1449. octeon_destroy_resources(oct_dev);
  1450. dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
  1451. /* This octeon device has been removed. Update the global
  1452. * data structure to reflect this. Free the device structure.
  1453. */
  1454. octeon_free_device_mem(oct_dev);
  1455. }
  1456. /**
  1457. * \brief Identify the Octeon device and to map the BAR address space
  1458. * @param oct octeon device
  1459. */
  1460. static int octeon_chip_specific_setup(struct octeon_device *oct)
  1461. {
  1462. u32 dev_id, rev_id;
  1463. int ret = 1;
  1464. char *s;
  1465. pci_read_config_dword(oct->pci_dev, 0, &dev_id);
  1466. pci_read_config_dword(oct->pci_dev, 8, &rev_id);
  1467. oct->rev_id = rev_id & 0xff;
  1468. switch (dev_id) {
  1469. case OCTEON_CN68XX_PCIID:
  1470. oct->chip_id = OCTEON_CN68XX;
  1471. ret = lio_setup_cn68xx_octeon_device(oct);
  1472. s = "CN68XX";
  1473. break;
  1474. case OCTEON_CN66XX_PCIID:
  1475. oct->chip_id = OCTEON_CN66XX;
  1476. ret = lio_setup_cn66xx_octeon_device(oct);
  1477. s = "CN66XX";
  1478. break;
  1479. case OCTEON_CN23XX_PCIID_PF:
  1480. oct->chip_id = OCTEON_CN23XX_PF_VID;
  1481. ret = setup_cn23xx_octeon_pf_device(oct);
  1482. s = "CN23XX";
  1483. break;
  1484. default:
  1485. s = "?";
  1486. dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
  1487. dev_id);
  1488. }
  1489. if (!ret)
  1490. dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
  1491. OCTEON_MAJOR_REV(oct),
  1492. OCTEON_MINOR_REV(oct),
  1493. octeon_get_conf(oct)->card_name,
  1494. LIQUIDIO_VERSION);
  1495. return ret;
  1496. }
  1497. /**
  1498. * \brief PCI initialization for each Octeon device.
  1499. * @param oct octeon device
  1500. */
  1501. static int octeon_pci_os_setup(struct octeon_device *oct)
  1502. {
  1503. /* setup PCI stuff first */
  1504. if (pci_enable_device(oct->pci_dev)) {
  1505. dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
  1506. return 1;
  1507. }
  1508. if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
  1509. dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
  1510. pci_disable_device(oct->pci_dev);
  1511. return 1;
  1512. }
  1513. /* Enable PCI DMA Master. */
  1514. pci_set_master(oct->pci_dev);
  1515. return 0;
  1516. }
  1517. static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
  1518. {
  1519. int q = 0;
  1520. if (netif_is_multiqueue(lio->netdev))
  1521. q = skb->queue_mapping % lio->linfo.num_txpciq;
  1522. return q;
  1523. }
  1524. /**
  1525. * \brief Check Tx queue state for a given network buffer
  1526. * @param lio per-network private data
  1527. * @param skb network buffer
  1528. */
  1529. static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
  1530. {
  1531. int q = 0, iq = 0;
  1532. if (netif_is_multiqueue(lio->netdev)) {
  1533. q = skb->queue_mapping;
  1534. iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
  1535. } else {
  1536. iq = lio->txq;
  1537. q = iq;
  1538. }
  1539. if (octnet_iq_is_full(lio->oct_dev, iq))
  1540. return 0;
  1541. if (__netif_subqueue_stopped(lio->netdev, q)) {
  1542. INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
  1543. wake_q(lio->netdev, q);
  1544. }
  1545. return 1;
  1546. }
  1547. /**
  1548. * \brief Unmap and free network buffer
  1549. * @param buf buffer
  1550. */
  1551. static void free_netbuf(void *buf)
  1552. {
  1553. struct sk_buff *skb;
  1554. struct octnet_buf_free_info *finfo;
  1555. struct lio *lio;
  1556. finfo = (struct octnet_buf_free_info *)buf;
  1557. skb = finfo->skb;
  1558. lio = finfo->lio;
  1559. dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
  1560. DMA_TO_DEVICE);
  1561. check_txq_state(lio, skb);
  1562. tx_buffer_free(skb);
  1563. }
  1564. /**
  1565. * \brief Unmap and free gather buffer
  1566. * @param buf buffer
  1567. */
  1568. static void free_netsgbuf(void *buf)
  1569. {
  1570. struct octnet_buf_free_info *finfo;
  1571. struct sk_buff *skb;
  1572. struct lio *lio;
  1573. struct octnic_gather *g;
  1574. int i, frags, iq;
  1575. finfo = (struct octnet_buf_free_info *)buf;
  1576. skb = finfo->skb;
  1577. lio = finfo->lio;
  1578. g = finfo->g;
  1579. frags = skb_shinfo(skb)->nr_frags;
  1580. dma_unmap_single(&lio->oct_dev->pci_dev->dev,
  1581. g->sg[0].ptr[0], (skb->len - skb->data_len),
  1582. DMA_TO_DEVICE);
  1583. i = 1;
  1584. while (frags--) {
  1585. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
  1586. pci_unmap_page((lio->oct_dev)->pci_dev,
  1587. g->sg[(i >> 2)].ptr[(i & 3)],
  1588. frag->size, DMA_TO_DEVICE);
  1589. i++;
  1590. }
  1591. iq = skb_iq(lio, skb);
  1592. spin_lock(&lio->glist_lock[iq]);
  1593. list_add_tail(&g->list, &lio->glist[iq]);
  1594. spin_unlock(&lio->glist_lock[iq]);
  1595. check_txq_state(lio, skb); /* mq support: sub-queue state check */
  1596. tx_buffer_free(skb);
  1597. }
  1598. /**
  1599. * \brief Unmap and free gather buffer with response
  1600. * @param buf buffer
  1601. */
  1602. static void free_netsgbuf_with_resp(void *buf)
  1603. {
  1604. struct octeon_soft_command *sc;
  1605. struct octnet_buf_free_info *finfo;
  1606. struct sk_buff *skb;
  1607. struct lio *lio;
  1608. struct octnic_gather *g;
  1609. int i, frags, iq;
  1610. sc = (struct octeon_soft_command *)buf;
  1611. skb = (struct sk_buff *)sc->callback_arg;
  1612. finfo = (struct octnet_buf_free_info *)&skb->cb;
  1613. lio = finfo->lio;
  1614. g = finfo->g;
  1615. frags = skb_shinfo(skb)->nr_frags;
  1616. dma_unmap_single(&lio->oct_dev->pci_dev->dev,
  1617. g->sg[0].ptr[0], (skb->len - skb->data_len),
  1618. DMA_TO_DEVICE);
  1619. i = 1;
  1620. while (frags--) {
  1621. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
  1622. pci_unmap_page((lio->oct_dev)->pci_dev,
  1623. g->sg[(i >> 2)].ptr[(i & 3)],
  1624. frag->size, DMA_TO_DEVICE);
  1625. i++;
  1626. }
  1627. iq = skb_iq(lio, skb);
  1628. spin_lock(&lio->glist_lock[iq]);
  1629. list_add_tail(&g->list, &lio->glist[iq]);
  1630. spin_unlock(&lio->glist_lock[iq]);
  1631. /* Don't free the skb yet */
  1632. check_txq_state(lio, skb);
  1633. }
  1634. /**
  1635. * \brief Adjust ptp frequency
  1636. * @param ptp PTP clock info
  1637. * @param ppb how much to adjust by, in parts-per-billion
  1638. */
  1639. static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  1640. {
  1641. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1642. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1643. u64 comp, delta;
  1644. unsigned long flags;
  1645. bool neg_adj = false;
  1646. if (ppb < 0) {
  1647. neg_adj = true;
  1648. ppb = -ppb;
  1649. }
  1650. /* The hardware adds the clock compensation value to the
  1651. * PTP clock on every coprocessor clock cycle, so we
  1652. * compute the delta in terms of coprocessor clocks.
  1653. */
  1654. delta = (u64)ppb << 32;
  1655. do_div(delta, oct->coproc_clock_rate);
  1656. spin_lock_irqsave(&lio->ptp_lock, flags);
  1657. comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
  1658. if (neg_adj)
  1659. comp -= delta;
  1660. else
  1661. comp += delta;
  1662. lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
  1663. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1664. return 0;
  1665. }
  1666. /**
  1667. * \brief Adjust ptp time
  1668. * @param ptp PTP clock info
  1669. * @param delta how much to adjust by, in nanosecs
  1670. */
  1671. static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  1672. {
  1673. unsigned long flags;
  1674. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1675. spin_lock_irqsave(&lio->ptp_lock, flags);
  1676. lio->ptp_adjust += delta;
  1677. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1678. return 0;
  1679. }
  1680. /**
  1681. * \brief Get hardware clock time, including any adjustment
  1682. * @param ptp PTP clock info
  1683. * @param ts timespec
  1684. */
  1685. static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
  1686. struct timespec64 *ts)
  1687. {
  1688. u64 ns;
  1689. unsigned long flags;
  1690. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1691. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1692. spin_lock_irqsave(&lio->ptp_lock, flags);
  1693. ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
  1694. ns += lio->ptp_adjust;
  1695. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1696. *ts = ns_to_timespec64(ns);
  1697. return 0;
  1698. }
  1699. /**
  1700. * \brief Set hardware clock time. Reset adjustment
  1701. * @param ptp PTP clock info
  1702. * @param ts timespec
  1703. */
  1704. static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
  1705. const struct timespec64 *ts)
  1706. {
  1707. u64 ns;
  1708. unsigned long flags;
  1709. struct lio *lio = container_of(ptp, struct lio, ptp_info);
  1710. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1711. ns = timespec_to_ns(ts);
  1712. spin_lock_irqsave(&lio->ptp_lock, flags);
  1713. lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
  1714. lio->ptp_adjust = 0;
  1715. spin_unlock_irqrestore(&lio->ptp_lock, flags);
  1716. return 0;
  1717. }
  1718. /**
  1719. * \brief Check if PTP is enabled
  1720. * @param ptp PTP clock info
  1721. * @param rq request
  1722. * @param on is it on
  1723. */
  1724. static int
  1725. liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
  1726. struct ptp_clock_request *rq __attribute__((unused)),
  1727. int on __attribute__((unused)))
  1728. {
  1729. return -EOPNOTSUPP;
  1730. }
  1731. /**
  1732. * \brief Open PTP clock source
  1733. * @param netdev network device
  1734. */
  1735. static void oct_ptp_open(struct net_device *netdev)
  1736. {
  1737. struct lio *lio = GET_LIO(netdev);
  1738. struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
  1739. spin_lock_init(&lio->ptp_lock);
  1740. snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
  1741. lio->ptp_info.owner = THIS_MODULE;
  1742. lio->ptp_info.max_adj = 250000000;
  1743. lio->ptp_info.n_alarm = 0;
  1744. lio->ptp_info.n_ext_ts = 0;
  1745. lio->ptp_info.n_per_out = 0;
  1746. lio->ptp_info.pps = 0;
  1747. lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
  1748. lio->ptp_info.adjtime = liquidio_ptp_adjtime;
  1749. lio->ptp_info.gettime64 = liquidio_ptp_gettime;
  1750. lio->ptp_info.settime64 = liquidio_ptp_settime;
  1751. lio->ptp_info.enable = liquidio_ptp_enable;
  1752. lio->ptp_adjust = 0;
  1753. lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
  1754. &oct->pci_dev->dev);
  1755. if (IS_ERR(lio->ptp_clock))
  1756. lio->ptp_clock = NULL;
  1757. }
  1758. /**
  1759. * \brief Init PTP clock
  1760. * @param oct octeon device
  1761. */
  1762. static void liquidio_ptp_init(struct octeon_device *oct)
  1763. {
  1764. u64 clock_comp, cfg;
  1765. clock_comp = (u64)NSEC_PER_SEC << 32;
  1766. do_div(clock_comp, oct->coproc_clock_rate);
  1767. lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
  1768. /* Enable */
  1769. cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
  1770. lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
  1771. }
  1772. /**
  1773. * \brief Load firmware to device
  1774. * @param oct octeon device
  1775. *
  1776. * Maps device to firmware filename, requests firmware, and downloads it
  1777. */
  1778. static int load_firmware(struct octeon_device *oct)
  1779. {
  1780. int ret = 0;
  1781. const struct firmware *fw;
  1782. char fw_name[LIO_MAX_FW_FILENAME_LEN];
  1783. char *tmp_fw_type;
  1784. if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
  1785. sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
  1786. dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
  1787. return ret;
  1788. }
  1789. if (fw_type[0] == '\0')
  1790. tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
  1791. else
  1792. tmp_fw_type = fw_type;
  1793. sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
  1794. octeon_get_conf(oct)->card_name, tmp_fw_type,
  1795. LIO_FW_NAME_SUFFIX);
  1796. ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
  1797. if (ret) {
  1798. dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
  1799. fw_name);
  1800. release_firmware(fw);
  1801. return ret;
  1802. }
  1803. ret = octeon_download_firmware(oct, fw->data, fw->size);
  1804. release_firmware(fw);
  1805. return ret;
  1806. }
  1807. /**
  1808. * \brief Setup output queue
  1809. * @param oct octeon device
  1810. * @param q_no which queue
  1811. * @param num_descs how many descriptors
  1812. * @param desc_size size of each descriptor
  1813. * @param app_ctx application context
  1814. */
  1815. static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
  1816. int desc_size, void *app_ctx)
  1817. {
  1818. int ret_val = 0;
  1819. dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
  1820. /* droq creation and local register settings. */
  1821. ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
  1822. if (ret_val < 0)
  1823. return ret_val;
  1824. if (ret_val == 1) {
  1825. dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
  1826. return 0;
  1827. }
  1828. /* tasklet creation for the droq */
  1829. /* Enable the droq queues */
  1830. octeon_set_droq_pkt_op(oct, q_no, 1);
  1831. /* Send Credit for Octeon Output queues. Credits are always
  1832. * sent after the output queue is enabled.
  1833. */
  1834. writel(oct->droq[q_no]->max_count,
  1835. oct->droq[q_no]->pkts_credit_reg);
  1836. return ret_val;
  1837. }
  1838. /**
  1839. * \brief Callback for getting interface configuration
  1840. * @param status status of request
  1841. * @param buf pointer to resp structure
  1842. */
  1843. static void if_cfg_callback(struct octeon_device *oct,
  1844. u32 status __attribute__((unused)),
  1845. void *buf)
  1846. {
  1847. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  1848. struct liquidio_if_cfg_resp *resp;
  1849. struct liquidio_if_cfg_context *ctx;
  1850. resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
  1851. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  1852. oct = lio_get_device(ctx->octeon_id);
  1853. if (resp->status)
  1854. dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
  1855. CVM_CAST64(resp->status));
  1856. WRITE_ONCE(ctx->cond, 1);
  1857. snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
  1858. resp->cfg_info.liquidio_firmware_version);
  1859. /* This barrier is required to be sure that the response has been
  1860. * written fully before waking up the handler
  1861. */
  1862. wmb();
  1863. wake_up_interruptible(&ctx->wc);
  1864. }
  1865. /** Routine to push packets arriving on Octeon interface upto network layer.
  1866. * @param oct_id - octeon device id.
  1867. * @param skbuff - skbuff struct to be passed to network layer.
  1868. * @param len - size of total data received.
  1869. * @param rh - Control header associated with the packet
  1870. * @param param - additional control data with the packet
  1871. * @param arg - farg registered in droq_ops
  1872. */
  1873. static void
  1874. liquidio_push_packet(u32 octeon_id __attribute__((unused)),
  1875. void *skbuff,
  1876. u32 len,
  1877. union octeon_rh *rh,
  1878. void *param,
  1879. void *arg)
  1880. {
  1881. struct napi_struct *napi = param;
  1882. struct sk_buff *skb = (struct sk_buff *)skbuff;
  1883. struct skb_shared_hwtstamps *shhwtstamps;
  1884. u64 ns;
  1885. u16 vtag = 0;
  1886. u32 r_dh_off;
  1887. struct net_device *netdev = (struct net_device *)arg;
  1888. struct octeon_droq *droq = container_of(param, struct octeon_droq,
  1889. napi);
  1890. if (netdev) {
  1891. int packet_was_received;
  1892. struct lio *lio = GET_LIO(netdev);
  1893. struct octeon_device *oct = lio->oct_dev;
  1894. /* Do not proceed if the interface is not in RUNNING state. */
  1895. if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
  1896. recv_buffer_free(skb);
  1897. droq->stats.rx_dropped++;
  1898. return;
  1899. }
  1900. skb->dev = netdev;
  1901. skb_record_rx_queue(skb, droq->q_no);
  1902. if (likely(len > MIN_SKB_SIZE)) {
  1903. struct octeon_skb_page_info *pg_info;
  1904. unsigned char *va;
  1905. pg_info = ((struct octeon_skb_page_info *)(skb->cb));
  1906. if (pg_info->page) {
  1907. /* For Paged allocation use the frags */
  1908. va = page_address(pg_info->page) +
  1909. pg_info->page_offset;
  1910. memcpy(skb->data, va, MIN_SKB_SIZE);
  1911. skb_put(skb, MIN_SKB_SIZE);
  1912. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  1913. pg_info->page,
  1914. pg_info->page_offset +
  1915. MIN_SKB_SIZE,
  1916. len - MIN_SKB_SIZE,
  1917. LIO_RXBUFFER_SZ);
  1918. }
  1919. } else {
  1920. struct octeon_skb_page_info *pg_info =
  1921. ((struct octeon_skb_page_info *)(skb->cb));
  1922. skb_copy_to_linear_data(skb, page_address(pg_info->page)
  1923. + pg_info->page_offset, len);
  1924. skb_put(skb, len);
  1925. put_page(pg_info->page);
  1926. }
  1927. r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
  1928. if (((oct->chip_id == OCTEON_CN66XX) ||
  1929. (oct->chip_id == OCTEON_CN68XX)) &&
  1930. ptp_enable) {
  1931. if (rh->r_dh.has_hwtstamp) {
  1932. /* timestamp is included from the hardware at
  1933. * the beginning of the packet.
  1934. */
  1935. if (ifstate_check
  1936. (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
  1937. /* Nanoseconds are in the first 64-bits
  1938. * of the packet.
  1939. */
  1940. memcpy(&ns, (skb->data + r_dh_off),
  1941. sizeof(ns));
  1942. r_dh_off -= BYTES_PER_DHLEN_UNIT;
  1943. shhwtstamps = skb_hwtstamps(skb);
  1944. shhwtstamps->hwtstamp =
  1945. ns_to_ktime(ns +
  1946. lio->ptp_adjust);
  1947. }
  1948. }
  1949. }
  1950. if (rh->r_dh.has_hash) {
  1951. __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
  1952. u32 hash = be32_to_cpu(*hash_be);
  1953. skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
  1954. r_dh_off -= BYTES_PER_DHLEN_UNIT;
  1955. }
  1956. skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
  1957. skb->protocol = eth_type_trans(skb, skb->dev);
  1958. if ((netdev->features & NETIF_F_RXCSUM) &&
  1959. (((rh->r_dh.encap_on) &&
  1960. (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
  1961. (!(rh->r_dh.encap_on) &&
  1962. (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
  1963. /* checksum has already been verified */
  1964. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1965. else
  1966. skb->ip_summed = CHECKSUM_NONE;
  1967. /* Setting Encapsulation field on basis of status received
  1968. * from the firmware
  1969. */
  1970. if (rh->r_dh.encap_on) {
  1971. skb->encapsulation = 1;
  1972. skb->csum_level = 1;
  1973. droq->stats.rx_vxlan++;
  1974. }
  1975. /* inbound VLAN tag */
  1976. if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  1977. (rh->r_dh.vlan != 0)) {
  1978. u16 vid = rh->r_dh.vlan;
  1979. u16 priority = rh->r_dh.priority;
  1980. vtag = priority << 13 | vid;
  1981. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
  1982. }
  1983. packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
  1984. if (packet_was_received) {
  1985. droq->stats.rx_bytes_received += len;
  1986. droq->stats.rx_pkts_received++;
  1987. } else {
  1988. droq->stats.rx_dropped++;
  1989. netif_info(lio, rx_err, lio->netdev,
  1990. "droq:%d error rx_dropped:%llu\n",
  1991. droq->q_no, droq->stats.rx_dropped);
  1992. }
  1993. } else {
  1994. recv_buffer_free(skb);
  1995. }
  1996. }
  1997. /**
  1998. * \brief wrapper for calling napi_schedule
  1999. * @param param parameters to pass to napi_schedule
  2000. *
  2001. * Used when scheduling on different CPUs
  2002. */
  2003. static void napi_schedule_wrapper(void *param)
  2004. {
  2005. struct napi_struct *napi = param;
  2006. napi_schedule(napi);
  2007. }
  2008. /**
  2009. * \brief callback when receive interrupt occurs and we are in NAPI mode
  2010. * @param arg pointer to octeon output queue
  2011. */
  2012. static void liquidio_napi_drv_callback(void *arg)
  2013. {
  2014. struct octeon_device *oct;
  2015. struct octeon_droq *droq = arg;
  2016. int this_cpu = smp_processor_id();
  2017. oct = droq->oct_dev;
  2018. if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
  2019. napi_schedule_irqoff(&droq->napi);
  2020. } else {
  2021. struct call_single_data *csd = &droq->csd;
  2022. csd->func = napi_schedule_wrapper;
  2023. csd->info = &droq->napi;
  2024. csd->flags = 0;
  2025. smp_call_function_single_async(droq->cpu_id, csd);
  2026. }
  2027. }
  2028. /**
  2029. * \brief Entry point for NAPI polling
  2030. * @param napi NAPI structure
  2031. * @param budget maximum number of items to process
  2032. */
  2033. static int liquidio_napi_poll(struct napi_struct *napi, int budget)
  2034. {
  2035. struct octeon_droq *droq;
  2036. int work_done;
  2037. int tx_done = 0, iq_no;
  2038. struct octeon_instr_queue *iq;
  2039. struct octeon_device *oct;
  2040. droq = container_of(napi, struct octeon_droq, napi);
  2041. oct = droq->oct_dev;
  2042. iq_no = droq->q_no;
  2043. /* Handle Droq descriptors */
  2044. work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
  2045. POLL_EVENT_PROCESS_PKTS,
  2046. budget);
  2047. /* Flush the instruction queue */
  2048. iq = oct->instr_queue[iq_no];
  2049. if (iq) {
  2050. /* Process iq buffers with in the budget limits */
  2051. tx_done = octeon_flush_iq(oct, iq, budget);
  2052. /* Update iq read-index rather than waiting for next interrupt.
  2053. * Return back if tx_done is false.
  2054. */
  2055. update_txq_status(oct, iq_no);
  2056. } else {
  2057. dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
  2058. __func__, iq_no);
  2059. }
  2060. /* force enable interrupt if reg cnts are high to avoid wraparound */
  2061. if ((work_done < budget && tx_done) ||
  2062. (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
  2063. (droq->pkt_count >= MAX_REG_CNT)) {
  2064. tx_done = 1;
  2065. napi_complete_done(napi, work_done);
  2066. octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
  2067. POLL_EVENT_ENABLE_INTR, 0);
  2068. return 0;
  2069. }
  2070. return (!tx_done) ? (budget) : (work_done);
  2071. }
  2072. /**
  2073. * \brief Setup input and output queues
  2074. * @param octeon_dev octeon device
  2075. * @param ifidx Interface Index
  2076. *
  2077. * Note: Queues are with respect to the octeon device. Thus
  2078. * an input queue is for egress packets, and output queues
  2079. * are for ingress packets.
  2080. */
  2081. static inline int setup_io_queues(struct octeon_device *octeon_dev,
  2082. int ifidx)
  2083. {
  2084. struct octeon_droq_ops droq_ops;
  2085. struct net_device *netdev;
  2086. static int cpu_id;
  2087. static int cpu_id_modulus;
  2088. struct octeon_droq *droq;
  2089. struct napi_struct *napi;
  2090. int q, q_no, retval = 0;
  2091. struct lio *lio;
  2092. int num_tx_descs;
  2093. netdev = octeon_dev->props[ifidx].netdev;
  2094. lio = GET_LIO(netdev);
  2095. memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
  2096. droq_ops.fptr = liquidio_push_packet;
  2097. droq_ops.farg = (void *)netdev;
  2098. droq_ops.poll_mode = 1;
  2099. droq_ops.napi_fn = liquidio_napi_drv_callback;
  2100. cpu_id = 0;
  2101. cpu_id_modulus = num_present_cpus();
  2102. /* set up DROQs. */
  2103. for (q = 0; q < lio->linfo.num_rxpciq; q++) {
  2104. q_no = lio->linfo.rxpciq[q].s.q_no;
  2105. dev_dbg(&octeon_dev->pci_dev->dev,
  2106. "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
  2107. q, q_no);
  2108. retval = octeon_setup_droq(octeon_dev, q_no,
  2109. CFG_GET_NUM_RX_DESCS_NIC_IF
  2110. (octeon_get_conf(octeon_dev),
  2111. lio->ifidx),
  2112. CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
  2113. (octeon_get_conf(octeon_dev),
  2114. lio->ifidx), NULL);
  2115. if (retval) {
  2116. dev_err(&octeon_dev->pci_dev->dev,
  2117. "%s : Runtime DROQ(RxQ) creation failed.\n",
  2118. __func__);
  2119. return 1;
  2120. }
  2121. droq = octeon_dev->droq[q_no];
  2122. napi = &droq->napi;
  2123. dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
  2124. (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
  2125. netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
  2126. /* designate a CPU for this droq */
  2127. droq->cpu_id = cpu_id;
  2128. cpu_id++;
  2129. if (cpu_id >= cpu_id_modulus)
  2130. cpu_id = 0;
  2131. octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
  2132. }
  2133. if (OCTEON_CN23XX_PF(octeon_dev)) {
  2134. /* 23XX PF can receive control messages (via the first PF-owned
  2135. * droq) from the firmware even if the ethX interface is down,
  2136. * so that's why poll_mode must be off for the first droq.
  2137. */
  2138. octeon_dev->droq[0]->ops.poll_mode = 0;
  2139. }
  2140. /* set up IQs. */
  2141. for (q = 0; q < lio->linfo.num_txpciq; q++) {
  2142. num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
  2143. (octeon_dev),
  2144. lio->ifidx);
  2145. retval = octeon_setup_iq(octeon_dev, ifidx, q,
  2146. lio->linfo.txpciq[q], num_tx_descs,
  2147. netdev_get_tx_queue(netdev, q));
  2148. if (retval) {
  2149. dev_err(&octeon_dev->pci_dev->dev,
  2150. " %s : Runtime IQ(TxQ) creation failed.\n",
  2151. __func__);
  2152. return 1;
  2153. }
  2154. }
  2155. return 0;
  2156. }
  2157. /**
  2158. * \brief Poll routine for checking transmit queue status
  2159. * @param work work_struct data structure
  2160. */
  2161. static void octnet_poll_check_txq_status(struct work_struct *work)
  2162. {
  2163. struct cavium_wk *wk = (struct cavium_wk *)work;
  2164. struct lio *lio = (struct lio *)wk->ctxptr;
  2165. if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
  2166. return;
  2167. check_txq_status(lio);
  2168. queue_delayed_work(lio->txq_status_wq.wq,
  2169. &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
  2170. }
  2171. /**
  2172. * \brief Sets up the txq poll check
  2173. * @param netdev network device
  2174. */
  2175. static inline int setup_tx_poll_fn(struct net_device *netdev)
  2176. {
  2177. struct lio *lio = GET_LIO(netdev);
  2178. struct octeon_device *oct = lio->oct_dev;
  2179. lio->txq_status_wq.wq = alloc_workqueue("txq-status",
  2180. WQ_MEM_RECLAIM, 0);
  2181. if (!lio->txq_status_wq.wq) {
  2182. dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
  2183. return -1;
  2184. }
  2185. INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
  2186. octnet_poll_check_txq_status);
  2187. lio->txq_status_wq.wk.ctxptr = lio;
  2188. queue_delayed_work(lio->txq_status_wq.wq,
  2189. &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
  2190. return 0;
  2191. }
  2192. static inline void cleanup_tx_poll_fn(struct net_device *netdev)
  2193. {
  2194. struct lio *lio = GET_LIO(netdev);
  2195. if (lio->txq_status_wq.wq) {
  2196. cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
  2197. destroy_workqueue(lio->txq_status_wq.wq);
  2198. }
  2199. }
  2200. /**
  2201. * \brief Net device open for LiquidIO
  2202. * @param netdev network device
  2203. */
  2204. static int liquidio_open(struct net_device *netdev)
  2205. {
  2206. struct lio *lio = GET_LIO(netdev);
  2207. struct octeon_device *oct = lio->oct_dev;
  2208. struct napi_struct *napi, *n;
  2209. if (oct->props[lio->ifidx].napi_enabled == 0) {
  2210. list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
  2211. napi_enable(napi);
  2212. oct->props[lio->ifidx].napi_enabled = 1;
  2213. if (OCTEON_CN23XX_PF(oct))
  2214. oct->droq[0]->ops.poll_mode = 1;
  2215. }
  2216. if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) &&
  2217. ptp_enable)
  2218. oct_ptp_open(netdev);
  2219. ifstate_set(lio, LIO_IFSTATE_RUNNING);
  2220. /* Ready for link status updates */
  2221. lio->intf_open = 1;
  2222. netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
  2223. if (OCTEON_CN23XX_PF(oct)) {
  2224. if (!oct->msix_on)
  2225. if (setup_tx_poll_fn(netdev))
  2226. return -1;
  2227. } else {
  2228. if (setup_tx_poll_fn(netdev))
  2229. return -1;
  2230. }
  2231. start_txq(netdev);
  2232. /* tell Octeon to start forwarding packets to host */
  2233. send_rx_ctrl_cmd(lio, 1);
  2234. dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
  2235. netdev->name);
  2236. return 0;
  2237. }
  2238. /**
  2239. * \brief Net device stop for LiquidIO
  2240. * @param netdev network device
  2241. */
  2242. static int liquidio_stop(struct net_device *netdev)
  2243. {
  2244. struct lio *lio = GET_LIO(netdev);
  2245. struct octeon_device *oct = lio->oct_dev;
  2246. ifstate_reset(lio, LIO_IFSTATE_RUNNING);
  2247. netif_tx_disable(netdev);
  2248. /* Inform that netif carrier is down */
  2249. netif_carrier_off(netdev);
  2250. lio->intf_open = 0;
  2251. lio->linfo.link.s.link_up = 0;
  2252. lio->link_changes++;
  2253. /* Tell Octeon that nic interface is down. */
  2254. send_rx_ctrl_cmd(lio, 0);
  2255. if (OCTEON_CN23XX_PF(oct)) {
  2256. if (!oct->msix_on)
  2257. cleanup_tx_poll_fn(netdev);
  2258. } else {
  2259. cleanup_tx_poll_fn(netdev);
  2260. }
  2261. if (lio->ptp_clock) {
  2262. ptp_clock_unregister(lio->ptp_clock);
  2263. lio->ptp_clock = NULL;
  2264. }
  2265. dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
  2266. return 0;
  2267. }
  2268. /**
  2269. * \brief Converts a mask based on net device flags
  2270. * @param netdev network device
  2271. *
  2272. * This routine generates a octnet_ifflags mask from the net device flags
  2273. * received from the OS.
  2274. */
  2275. static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
  2276. {
  2277. enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
  2278. if (netdev->flags & IFF_PROMISC)
  2279. f |= OCTNET_IFFLAG_PROMISC;
  2280. if (netdev->flags & IFF_ALLMULTI)
  2281. f |= OCTNET_IFFLAG_ALLMULTI;
  2282. if (netdev->flags & IFF_MULTICAST) {
  2283. f |= OCTNET_IFFLAG_MULTICAST;
  2284. /* Accept all multicast addresses if there are more than we
  2285. * can handle
  2286. */
  2287. if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
  2288. f |= OCTNET_IFFLAG_ALLMULTI;
  2289. }
  2290. if (netdev->flags & IFF_BROADCAST)
  2291. f |= OCTNET_IFFLAG_BROADCAST;
  2292. return f;
  2293. }
  2294. /**
  2295. * \brief Net device set_multicast_list
  2296. * @param netdev network device
  2297. */
  2298. static void liquidio_set_mcast_list(struct net_device *netdev)
  2299. {
  2300. struct lio *lio = GET_LIO(netdev);
  2301. struct octeon_device *oct = lio->oct_dev;
  2302. struct octnic_ctrl_pkt nctrl;
  2303. struct netdev_hw_addr *ha;
  2304. u64 *mc;
  2305. int ret;
  2306. int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
  2307. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2308. /* Create a ctrl pkt command to be sent to core app. */
  2309. nctrl.ncmd.u64 = 0;
  2310. nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
  2311. nctrl.ncmd.s.param1 = get_new_flags(netdev);
  2312. nctrl.ncmd.s.param2 = mc_count;
  2313. nctrl.ncmd.s.more = mc_count;
  2314. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2315. nctrl.netpndev = (u64)netdev;
  2316. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2317. /* copy all the addresses into the udd */
  2318. mc = &nctrl.udd[0];
  2319. netdev_for_each_mc_addr(ha, netdev) {
  2320. *mc = 0;
  2321. memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
  2322. /* no need to swap bytes */
  2323. if (++mc > &nctrl.udd[mc_count])
  2324. break;
  2325. }
  2326. /* Apparently, any activity in this call from the kernel has to
  2327. * be atomic. So we won't wait for response.
  2328. */
  2329. nctrl.wait_time = 0;
  2330. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2331. if (ret < 0) {
  2332. dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
  2333. ret);
  2334. }
  2335. }
  2336. /**
  2337. * \brief Net device set_mac_address
  2338. * @param netdev network device
  2339. */
  2340. static int liquidio_set_mac(struct net_device *netdev, void *p)
  2341. {
  2342. int ret = 0;
  2343. struct lio *lio = GET_LIO(netdev);
  2344. struct octeon_device *oct = lio->oct_dev;
  2345. struct sockaddr *addr = (struct sockaddr *)p;
  2346. struct octnic_ctrl_pkt nctrl;
  2347. if (!is_valid_ether_addr(addr->sa_data))
  2348. return -EADDRNOTAVAIL;
  2349. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2350. nctrl.ncmd.u64 = 0;
  2351. nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
  2352. nctrl.ncmd.s.param1 = 0;
  2353. nctrl.ncmd.s.more = 1;
  2354. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2355. nctrl.netpndev = (u64)netdev;
  2356. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2357. nctrl.wait_time = 100;
  2358. nctrl.udd[0] = 0;
  2359. /* The MAC Address is presented in network byte order. */
  2360. memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
  2361. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2362. if (ret < 0) {
  2363. dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
  2364. return -ENOMEM;
  2365. }
  2366. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  2367. memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
  2368. return 0;
  2369. }
  2370. /**
  2371. * \brief Net device get_stats
  2372. * @param netdev network device
  2373. */
  2374. static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
  2375. {
  2376. struct lio *lio = GET_LIO(netdev);
  2377. struct net_device_stats *stats = &netdev->stats;
  2378. struct octeon_device *oct;
  2379. u64 pkts = 0, drop = 0, bytes = 0;
  2380. struct oct_droq_stats *oq_stats;
  2381. struct oct_iq_stats *iq_stats;
  2382. int i, iq_no, oq_no;
  2383. oct = lio->oct_dev;
  2384. for (i = 0; i < lio->linfo.num_txpciq; i++) {
  2385. iq_no = lio->linfo.txpciq[i].s.q_no;
  2386. iq_stats = &oct->instr_queue[iq_no]->stats;
  2387. pkts += iq_stats->tx_done;
  2388. drop += iq_stats->tx_dropped;
  2389. bytes += iq_stats->tx_tot_bytes;
  2390. }
  2391. stats->tx_packets = pkts;
  2392. stats->tx_bytes = bytes;
  2393. stats->tx_dropped = drop;
  2394. pkts = 0;
  2395. drop = 0;
  2396. bytes = 0;
  2397. for (i = 0; i < lio->linfo.num_rxpciq; i++) {
  2398. oq_no = lio->linfo.rxpciq[i].s.q_no;
  2399. oq_stats = &oct->droq[oq_no]->stats;
  2400. pkts += oq_stats->rx_pkts_received;
  2401. drop += (oq_stats->rx_dropped +
  2402. oq_stats->dropped_nodispatch +
  2403. oq_stats->dropped_toomany +
  2404. oq_stats->dropped_nomem);
  2405. bytes += oq_stats->rx_bytes_received;
  2406. }
  2407. stats->rx_bytes = bytes;
  2408. stats->rx_packets = pkts;
  2409. stats->rx_dropped = drop;
  2410. return stats;
  2411. }
  2412. /**
  2413. * \brief Net device change_mtu
  2414. * @param netdev network device
  2415. */
  2416. static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
  2417. {
  2418. struct lio *lio = GET_LIO(netdev);
  2419. struct octeon_device *oct = lio->oct_dev;
  2420. struct octnic_ctrl_pkt nctrl;
  2421. int ret = 0;
  2422. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2423. nctrl.ncmd.u64 = 0;
  2424. nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
  2425. nctrl.ncmd.s.param1 = new_mtu;
  2426. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2427. nctrl.wait_time = 100;
  2428. nctrl.netpndev = (u64)netdev;
  2429. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2430. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2431. if (ret < 0) {
  2432. dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
  2433. return -1;
  2434. }
  2435. lio->mtu = new_mtu;
  2436. return 0;
  2437. }
  2438. /**
  2439. * \brief Handler for SIOCSHWTSTAMP ioctl
  2440. * @param netdev network device
  2441. * @param ifr interface request
  2442. * @param cmd command
  2443. */
  2444. static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
  2445. {
  2446. struct hwtstamp_config conf;
  2447. struct lio *lio = GET_LIO(netdev);
  2448. if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
  2449. return -EFAULT;
  2450. if (conf.flags)
  2451. return -EINVAL;
  2452. switch (conf.tx_type) {
  2453. case HWTSTAMP_TX_ON:
  2454. case HWTSTAMP_TX_OFF:
  2455. break;
  2456. default:
  2457. return -ERANGE;
  2458. }
  2459. switch (conf.rx_filter) {
  2460. case HWTSTAMP_FILTER_NONE:
  2461. break;
  2462. case HWTSTAMP_FILTER_ALL:
  2463. case HWTSTAMP_FILTER_SOME:
  2464. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  2465. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  2466. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  2467. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  2468. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  2469. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  2470. case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
  2471. case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
  2472. case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
  2473. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  2474. case HWTSTAMP_FILTER_PTP_V2_SYNC:
  2475. case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
  2476. conf.rx_filter = HWTSTAMP_FILTER_ALL;
  2477. break;
  2478. default:
  2479. return -ERANGE;
  2480. }
  2481. if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
  2482. ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
  2483. else
  2484. ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
  2485. return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
  2486. }
  2487. /**
  2488. * \brief ioctl handler
  2489. * @param netdev network device
  2490. * @param ifr interface request
  2491. * @param cmd command
  2492. */
  2493. static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  2494. {
  2495. struct lio *lio = GET_LIO(netdev);
  2496. switch (cmd) {
  2497. case SIOCSHWTSTAMP:
  2498. if ((lio->oct_dev->chip_id == OCTEON_CN66XX ||
  2499. lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable)
  2500. return hwtstamp_ioctl(netdev, ifr);
  2501. default:
  2502. return -EOPNOTSUPP;
  2503. }
  2504. }
  2505. /**
  2506. * \brief handle a Tx timestamp response
  2507. * @param status response status
  2508. * @param buf pointer to skb
  2509. */
  2510. static void handle_timestamp(struct octeon_device *oct,
  2511. u32 status,
  2512. void *buf)
  2513. {
  2514. struct octnet_buf_free_info *finfo;
  2515. struct octeon_soft_command *sc;
  2516. struct oct_timestamp_resp *resp;
  2517. struct lio *lio;
  2518. struct sk_buff *skb = (struct sk_buff *)buf;
  2519. finfo = (struct octnet_buf_free_info *)skb->cb;
  2520. lio = finfo->lio;
  2521. sc = finfo->sc;
  2522. oct = lio->oct_dev;
  2523. resp = (struct oct_timestamp_resp *)sc->virtrptr;
  2524. if (status != OCTEON_REQUEST_DONE) {
  2525. dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
  2526. CVM_CAST64(status));
  2527. resp->timestamp = 0;
  2528. }
  2529. octeon_swap_8B_data(&resp->timestamp, 1);
  2530. if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
  2531. struct skb_shared_hwtstamps ts;
  2532. u64 ns = resp->timestamp;
  2533. netif_info(lio, tx_done, lio->netdev,
  2534. "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
  2535. skb, (unsigned long long)ns);
  2536. ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
  2537. skb_tstamp_tx(skb, &ts);
  2538. }
  2539. octeon_free_soft_command(oct, sc);
  2540. tx_buffer_free(skb);
  2541. }
  2542. /* \brief Send a data packet that will be timestamped
  2543. * @param oct octeon device
  2544. * @param ndata pointer to network data
  2545. * @param finfo pointer to private network data
  2546. */
  2547. static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
  2548. struct octnic_data_pkt *ndata,
  2549. struct octnet_buf_free_info *finfo)
  2550. {
  2551. int retval;
  2552. struct octeon_soft_command *sc;
  2553. struct lio *lio;
  2554. int ring_doorbell;
  2555. u32 len;
  2556. lio = finfo->lio;
  2557. sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
  2558. sizeof(struct oct_timestamp_resp));
  2559. finfo->sc = sc;
  2560. if (!sc) {
  2561. dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
  2562. return IQ_SEND_FAILED;
  2563. }
  2564. if (ndata->reqtype == REQTYPE_NORESP_NET)
  2565. ndata->reqtype = REQTYPE_RESP_NET;
  2566. else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
  2567. ndata->reqtype = REQTYPE_RESP_NET_SG;
  2568. sc->callback = handle_timestamp;
  2569. sc->callback_arg = finfo->skb;
  2570. sc->iq_no = ndata->q_no;
  2571. if (OCTEON_CN23XX_PF(oct))
  2572. len = (u32)((struct octeon_instr_ih3 *)
  2573. (&sc->cmd.cmd3.ih3))->dlengsz;
  2574. else
  2575. len = (u32)((struct octeon_instr_ih2 *)
  2576. (&sc->cmd.cmd2.ih2))->dlengsz;
  2577. ring_doorbell = 1;
  2578. retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
  2579. sc, len, ndata->reqtype);
  2580. if (retval == IQ_SEND_FAILED) {
  2581. dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
  2582. retval);
  2583. octeon_free_soft_command(oct, sc);
  2584. } else {
  2585. netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
  2586. }
  2587. return retval;
  2588. }
  2589. /** \brief Transmit networks packets to the Octeon interface
  2590. * @param skbuff skbuff struct to be passed to network layer.
  2591. * @param netdev pointer to network device
  2592. * @returns whether the packet was transmitted to the device okay or not
  2593. * (NETDEV_TX_OK or NETDEV_TX_BUSY)
  2594. */
  2595. static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
  2596. {
  2597. struct lio *lio;
  2598. struct octnet_buf_free_info *finfo;
  2599. union octnic_cmd_setup cmdsetup;
  2600. struct octnic_data_pkt ndata;
  2601. struct octeon_device *oct;
  2602. struct oct_iq_stats *stats;
  2603. struct octeon_instr_irh *irh;
  2604. union tx_info *tx_info;
  2605. int status = 0;
  2606. int q_idx = 0, iq_no = 0;
  2607. int j;
  2608. u64 dptr = 0;
  2609. u32 tag = 0;
  2610. lio = GET_LIO(netdev);
  2611. oct = lio->oct_dev;
  2612. if (netif_is_multiqueue(netdev)) {
  2613. q_idx = skb->queue_mapping;
  2614. q_idx = (q_idx % (lio->linfo.num_txpciq));
  2615. tag = q_idx;
  2616. iq_no = lio->linfo.txpciq[q_idx].s.q_no;
  2617. } else {
  2618. iq_no = lio->txq;
  2619. }
  2620. stats = &oct->instr_queue[iq_no]->stats;
  2621. /* Check for all conditions in which the current packet cannot be
  2622. * transmitted.
  2623. */
  2624. if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
  2625. (!lio->linfo.link.s.link_up) ||
  2626. (skb->len <= 0)) {
  2627. netif_info(lio, tx_err, lio->netdev,
  2628. "Transmit failed link_status : %d\n",
  2629. lio->linfo.link.s.link_up);
  2630. goto lio_xmit_failed;
  2631. }
  2632. /* Use space in skb->cb to store info used to unmap and
  2633. * free the buffers.
  2634. */
  2635. finfo = (struct octnet_buf_free_info *)skb->cb;
  2636. finfo->lio = lio;
  2637. finfo->skb = skb;
  2638. finfo->sc = NULL;
  2639. /* Prepare the attributes for the data to be passed to OSI. */
  2640. memset(&ndata, 0, sizeof(struct octnic_data_pkt));
  2641. ndata.buf = (void *)finfo;
  2642. ndata.q_no = iq_no;
  2643. if (netif_is_multiqueue(netdev)) {
  2644. if (octnet_iq_is_full(oct, ndata.q_no)) {
  2645. /* defer sending if queue is full */
  2646. netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
  2647. ndata.q_no);
  2648. stats->tx_iq_busy++;
  2649. return NETDEV_TX_BUSY;
  2650. }
  2651. } else {
  2652. if (octnet_iq_is_full(oct, lio->txq)) {
  2653. /* defer sending if queue is full */
  2654. stats->tx_iq_busy++;
  2655. netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
  2656. lio->txq);
  2657. return NETDEV_TX_BUSY;
  2658. }
  2659. }
  2660. /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
  2661. * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
  2662. */
  2663. ndata.datasize = skb->len;
  2664. cmdsetup.u64 = 0;
  2665. cmdsetup.s.iq_no = iq_no;
  2666. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2667. if (skb->encapsulation) {
  2668. cmdsetup.s.tnl_csum = 1;
  2669. stats->tx_vxlan++;
  2670. } else {
  2671. cmdsetup.s.transport_csum = 1;
  2672. }
  2673. }
  2674. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  2675. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  2676. cmdsetup.s.timestamp = 1;
  2677. }
  2678. if (skb_shinfo(skb)->nr_frags == 0) {
  2679. cmdsetup.s.u.datasize = skb->len;
  2680. octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
  2681. /* Offload checksum calculation for TCP/UDP packets */
  2682. dptr = dma_map_single(&oct->pci_dev->dev,
  2683. skb->data,
  2684. skb->len,
  2685. DMA_TO_DEVICE);
  2686. if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
  2687. dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
  2688. __func__);
  2689. return NETDEV_TX_BUSY;
  2690. }
  2691. if (OCTEON_CN23XX_PF(oct))
  2692. ndata.cmd.cmd3.dptr = dptr;
  2693. else
  2694. ndata.cmd.cmd2.dptr = dptr;
  2695. finfo->dptr = dptr;
  2696. ndata.reqtype = REQTYPE_NORESP_NET;
  2697. } else {
  2698. int i, frags;
  2699. struct skb_frag_struct *frag;
  2700. struct octnic_gather *g;
  2701. spin_lock(&lio->glist_lock[q_idx]);
  2702. g = (struct octnic_gather *)
  2703. list_delete_head(&lio->glist[q_idx]);
  2704. spin_unlock(&lio->glist_lock[q_idx]);
  2705. if (!g) {
  2706. netif_info(lio, tx_err, lio->netdev,
  2707. "Transmit scatter gather: glist null!\n");
  2708. goto lio_xmit_failed;
  2709. }
  2710. cmdsetup.s.gather = 1;
  2711. cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
  2712. octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
  2713. memset(g->sg, 0, g->sg_size);
  2714. g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
  2715. skb->data,
  2716. (skb->len - skb->data_len),
  2717. DMA_TO_DEVICE);
  2718. if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
  2719. dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
  2720. __func__);
  2721. return NETDEV_TX_BUSY;
  2722. }
  2723. add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
  2724. frags = skb_shinfo(skb)->nr_frags;
  2725. i = 1;
  2726. while (frags--) {
  2727. frag = &skb_shinfo(skb)->frags[i - 1];
  2728. g->sg[(i >> 2)].ptr[(i & 3)] =
  2729. dma_map_page(&oct->pci_dev->dev,
  2730. frag->page.p,
  2731. frag->page_offset,
  2732. frag->size,
  2733. DMA_TO_DEVICE);
  2734. if (dma_mapping_error(&oct->pci_dev->dev,
  2735. g->sg[i >> 2].ptr[i & 3])) {
  2736. dma_unmap_single(&oct->pci_dev->dev,
  2737. g->sg[0].ptr[0],
  2738. skb->len - skb->data_len,
  2739. DMA_TO_DEVICE);
  2740. for (j = 1; j < i; j++) {
  2741. frag = &skb_shinfo(skb)->frags[j - 1];
  2742. dma_unmap_page(&oct->pci_dev->dev,
  2743. g->sg[j >> 2].ptr[j & 3],
  2744. frag->size,
  2745. DMA_TO_DEVICE);
  2746. }
  2747. dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
  2748. __func__);
  2749. return NETDEV_TX_BUSY;
  2750. }
  2751. add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
  2752. i++;
  2753. }
  2754. dptr = g->sg_dma_ptr;
  2755. if (OCTEON_CN23XX_PF(oct))
  2756. ndata.cmd.cmd3.dptr = dptr;
  2757. else
  2758. ndata.cmd.cmd2.dptr = dptr;
  2759. finfo->dptr = dptr;
  2760. finfo->g = g;
  2761. ndata.reqtype = REQTYPE_NORESP_NET_SG;
  2762. }
  2763. if (OCTEON_CN23XX_PF(oct)) {
  2764. irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
  2765. tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
  2766. } else {
  2767. irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
  2768. tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
  2769. }
  2770. if (skb_shinfo(skb)->gso_size) {
  2771. tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
  2772. tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
  2773. stats->tx_gso++;
  2774. }
  2775. /* HW insert VLAN tag */
  2776. if (skb_vlan_tag_present(skb)) {
  2777. irh->priority = skb_vlan_tag_get(skb) >> 13;
  2778. irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
  2779. }
  2780. if (unlikely(cmdsetup.s.timestamp))
  2781. status = send_nic_timestamp_pkt(oct, &ndata, finfo);
  2782. else
  2783. status = octnet_send_nic_data_pkt(oct, &ndata);
  2784. if (status == IQ_SEND_FAILED)
  2785. goto lio_xmit_failed;
  2786. netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
  2787. if (status == IQ_SEND_STOP)
  2788. stop_q(lio->netdev, q_idx);
  2789. netif_trans_update(netdev);
  2790. if (tx_info->s.gso_segs)
  2791. stats->tx_done += tx_info->s.gso_segs;
  2792. else
  2793. stats->tx_done++;
  2794. stats->tx_tot_bytes += ndata.datasize;
  2795. return NETDEV_TX_OK;
  2796. lio_xmit_failed:
  2797. stats->tx_dropped++;
  2798. netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
  2799. iq_no, stats->tx_dropped);
  2800. if (dptr)
  2801. dma_unmap_single(&oct->pci_dev->dev, dptr,
  2802. ndata.datasize, DMA_TO_DEVICE);
  2803. tx_buffer_free(skb);
  2804. return NETDEV_TX_OK;
  2805. }
  2806. /** \brief Network device Tx timeout
  2807. * @param netdev pointer to network device
  2808. */
  2809. static void liquidio_tx_timeout(struct net_device *netdev)
  2810. {
  2811. struct lio *lio;
  2812. lio = GET_LIO(netdev);
  2813. netif_info(lio, tx_err, lio->netdev,
  2814. "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
  2815. netdev->stats.tx_dropped);
  2816. netif_trans_update(netdev);
  2817. txqs_wake(netdev);
  2818. }
  2819. static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
  2820. __be16 proto __attribute__((unused)),
  2821. u16 vid)
  2822. {
  2823. struct lio *lio = GET_LIO(netdev);
  2824. struct octeon_device *oct = lio->oct_dev;
  2825. struct octnic_ctrl_pkt nctrl;
  2826. int ret = 0;
  2827. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2828. nctrl.ncmd.u64 = 0;
  2829. nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
  2830. nctrl.ncmd.s.param1 = vid;
  2831. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2832. nctrl.wait_time = 100;
  2833. nctrl.netpndev = (u64)netdev;
  2834. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2835. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2836. if (ret < 0) {
  2837. dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
  2838. ret);
  2839. }
  2840. return ret;
  2841. }
  2842. static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
  2843. __be16 proto __attribute__((unused)),
  2844. u16 vid)
  2845. {
  2846. struct lio *lio = GET_LIO(netdev);
  2847. struct octeon_device *oct = lio->oct_dev;
  2848. struct octnic_ctrl_pkt nctrl;
  2849. int ret = 0;
  2850. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  2851. nctrl.ncmd.u64 = 0;
  2852. nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
  2853. nctrl.ncmd.s.param1 = vid;
  2854. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2855. nctrl.wait_time = 100;
  2856. nctrl.netpndev = (u64)netdev;
  2857. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2858. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2859. if (ret < 0) {
  2860. dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
  2861. ret);
  2862. }
  2863. return ret;
  2864. }
  2865. /** Sending command to enable/disable RX checksum offload
  2866. * @param netdev pointer to network device
  2867. * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
  2868. * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
  2869. * OCTNET_CMD_RXCSUM_DISABLE
  2870. * @returns SUCCESS or FAILURE
  2871. */
  2872. static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
  2873. u8 rx_cmd)
  2874. {
  2875. struct lio *lio = GET_LIO(netdev);
  2876. struct octeon_device *oct = lio->oct_dev;
  2877. struct octnic_ctrl_pkt nctrl;
  2878. int ret = 0;
  2879. nctrl.ncmd.u64 = 0;
  2880. nctrl.ncmd.s.cmd = command;
  2881. nctrl.ncmd.s.param1 = rx_cmd;
  2882. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2883. nctrl.wait_time = 100;
  2884. nctrl.netpndev = (u64)netdev;
  2885. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2886. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2887. if (ret < 0) {
  2888. dev_err(&oct->pci_dev->dev,
  2889. "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
  2890. ret);
  2891. }
  2892. return ret;
  2893. }
  2894. /** Sending command to add/delete VxLAN UDP port to firmware
  2895. * @param netdev pointer to network device
  2896. * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
  2897. * @param vxlan_port VxLAN port to be added or deleted
  2898. * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
  2899. * OCTNET_CMD_VXLAN_PORT_DEL
  2900. * @returns SUCCESS or FAILURE
  2901. */
  2902. static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
  2903. u16 vxlan_port, u8 vxlan_cmd_bit)
  2904. {
  2905. struct lio *lio = GET_LIO(netdev);
  2906. struct octeon_device *oct = lio->oct_dev;
  2907. struct octnic_ctrl_pkt nctrl;
  2908. int ret = 0;
  2909. nctrl.ncmd.u64 = 0;
  2910. nctrl.ncmd.s.cmd = command;
  2911. nctrl.ncmd.s.more = vxlan_cmd_bit;
  2912. nctrl.ncmd.s.param1 = vxlan_port;
  2913. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  2914. nctrl.wait_time = 100;
  2915. nctrl.netpndev = (u64)netdev;
  2916. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  2917. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  2918. if (ret < 0) {
  2919. dev_err(&oct->pci_dev->dev,
  2920. "VxLAN port add/delete failed in core (ret:0x%x)\n",
  2921. ret);
  2922. }
  2923. return ret;
  2924. }
  2925. /** \brief Net device fix features
  2926. * @param netdev pointer to network device
  2927. * @param request features requested
  2928. * @returns updated features list
  2929. */
  2930. static netdev_features_t liquidio_fix_features(struct net_device *netdev,
  2931. netdev_features_t request)
  2932. {
  2933. struct lio *lio = netdev_priv(netdev);
  2934. if ((request & NETIF_F_RXCSUM) &&
  2935. !(lio->dev_capability & NETIF_F_RXCSUM))
  2936. request &= ~NETIF_F_RXCSUM;
  2937. if ((request & NETIF_F_HW_CSUM) &&
  2938. !(lio->dev_capability & NETIF_F_HW_CSUM))
  2939. request &= ~NETIF_F_HW_CSUM;
  2940. if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
  2941. request &= ~NETIF_F_TSO;
  2942. if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
  2943. request &= ~NETIF_F_TSO6;
  2944. if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
  2945. request &= ~NETIF_F_LRO;
  2946. /*Disable LRO if RXCSUM is off */
  2947. if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
  2948. (lio->dev_capability & NETIF_F_LRO))
  2949. request &= ~NETIF_F_LRO;
  2950. return request;
  2951. }
  2952. /** \brief Net device set features
  2953. * @param netdev pointer to network device
  2954. * @param features features to enable/disable
  2955. */
  2956. static int liquidio_set_features(struct net_device *netdev,
  2957. netdev_features_t features)
  2958. {
  2959. struct lio *lio = netdev_priv(netdev);
  2960. if (!((netdev->features ^ features) & NETIF_F_LRO))
  2961. return 0;
  2962. if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
  2963. liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
  2964. OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
  2965. else if (!(features & NETIF_F_LRO) &&
  2966. (lio->dev_capability & NETIF_F_LRO))
  2967. liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
  2968. OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
  2969. /* Sending command to firmware to enable/disable RX checksum
  2970. * offload settings using ethtool
  2971. */
  2972. if (!(netdev->features & NETIF_F_RXCSUM) &&
  2973. (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
  2974. (features & NETIF_F_RXCSUM))
  2975. liquidio_set_rxcsum_command(netdev,
  2976. OCTNET_CMD_TNL_RX_CSUM_CTL,
  2977. OCTNET_CMD_RXCSUM_ENABLE);
  2978. else if ((netdev->features & NETIF_F_RXCSUM) &&
  2979. (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
  2980. !(features & NETIF_F_RXCSUM))
  2981. liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
  2982. OCTNET_CMD_RXCSUM_DISABLE);
  2983. return 0;
  2984. }
  2985. static void liquidio_add_vxlan_port(struct net_device *netdev,
  2986. struct udp_tunnel_info *ti)
  2987. {
  2988. if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
  2989. return;
  2990. liquidio_vxlan_port_command(netdev,
  2991. OCTNET_CMD_VXLAN_PORT_CONFIG,
  2992. htons(ti->port),
  2993. OCTNET_CMD_VXLAN_PORT_ADD);
  2994. }
  2995. static void liquidio_del_vxlan_port(struct net_device *netdev,
  2996. struct udp_tunnel_info *ti)
  2997. {
  2998. if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
  2999. return;
  3000. liquidio_vxlan_port_command(netdev,
  3001. OCTNET_CMD_VXLAN_PORT_CONFIG,
  3002. htons(ti->port),
  3003. OCTNET_CMD_VXLAN_PORT_DEL);
  3004. }
  3005. static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
  3006. u8 *mac, bool is_admin_assigned)
  3007. {
  3008. struct lio *lio = GET_LIO(netdev);
  3009. struct octeon_device *oct = lio->oct_dev;
  3010. struct octnic_ctrl_pkt nctrl;
  3011. if (!is_valid_ether_addr(mac))
  3012. return -EINVAL;
  3013. if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
  3014. return -EINVAL;
  3015. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  3016. nctrl.ncmd.u64 = 0;
  3017. nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
  3018. /* vfidx is 0 based, but vf_num (param1) is 1 based */
  3019. nctrl.ncmd.s.param1 = vfidx + 1;
  3020. nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
  3021. nctrl.ncmd.s.more = 1;
  3022. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  3023. nctrl.cb_fn = 0;
  3024. nctrl.wait_time = LIO_CMD_WAIT_TM;
  3025. nctrl.udd[0] = 0;
  3026. /* The MAC Address is presented in network byte order. */
  3027. ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
  3028. oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
  3029. octnet_send_nic_ctrl_pkt(oct, &nctrl);
  3030. return 0;
  3031. }
  3032. static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
  3033. {
  3034. struct lio *lio = GET_LIO(netdev);
  3035. struct octeon_device *oct = lio->oct_dev;
  3036. int retval;
  3037. retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
  3038. if (!retval)
  3039. cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
  3040. return retval;
  3041. }
  3042. static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
  3043. u16 vlan, u8 qos, __be16 vlan_proto)
  3044. {
  3045. struct lio *lio = GET_LIO(netdev);
  3046. struct octeon_device *oct = lio->oct_dev;
  3047. struct octnic_ctrl_pkt nctrl;
  3048. u16 vlantci;
  3049. if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
  3050. return -EINVAL;
  3051. if (vlan_proto != htons(ETH_P_8021Q))
  3052. return -EPROTONOSUPPORT;
  3053. if (vlan >= VLAN_N_VID || qos > 7)
  3054. return -EINVAL;
  3055. if (vlan)
  3056. vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
  3057. else
  3058. vlantci = 0;
  3059. if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
  3060. return 0;
  3061. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  3062. if (vlan)
  3063. nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
  3064. else
  3065. nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
  3066. nctrl.ncmd.s.param1 = vlantci;
  3067. nctrl.ncmd.s.param2 =
  3068. vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
  3069. nctrl.ncmd.s.more = 0;
  3070. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  3071. nctrl.cb_fn = 0;
  3072. nctrl.wait_time = LIO_CMD_WAIT_TM;
  3073. octnet_send_nic_ctrl_pkt(oct, &nctrl);
  3074. oct->sriov_info.vf_vlantci[vfidx] = vlantci;
  3075. return 0;
  3076. }
  3077. static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
  3078. struct ifla_vf_info *ivi)
  3079. {
  3080. struct lio *lio = GET_LIO(netdev);
  3081. struct octeon_device *oct = lio->oct_dev;
  3082. u8 *macaddr;
  3083. if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
  3084. return -EINVAL;
  3085. ivi->vf = vfidx;
  3086. macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
  3087. ether_addr_copy(&ivi->mac[0], macaddr);
  3088. ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
  3089. ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
  3090. ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
  3091. return 0;
  3092. }
  3093. static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
  3094. int linkstate)
  3095. {
  3096. struct lio *lio = GET_LIO(netdev);
  3097. struct octeon_device *oct = lio->oct_dev;
  3098. struct octnic_ctrl_pkt nctrl;
  3099. if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
  3100. return -EINVAL;
  3101. if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
  3102. return 0;
  3103. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  3104. nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
  3105. nctrl.ncmd.s.param1 =
  3106. vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
  3107. nctrl.ncmd.s.param2 = linkstate;
  3108. nctrl.ncmd.s.more = 0;
  3109. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  3110. nctrl.cb_fn = 0;
  3111. nctrl.wait_time = LIO_CMD_WAIT_TM;
  3112. octnet_send_nic_ctrl_pkt(oct, &nctrl);
  3113. oct->sriov_info.vf_linkstate[vfidx] = linkstate;
  3114. return 0;
  3115. }
  3116. static const struct net_device_ops lionetdevops = {
  3117. .ndo_open = liquidio_open,
  3118. .ndo_stop = liquidio_stop,
  3119. .ndo_start_xmit = liquidio_xmit,
  3120. .ndo_get_stats = liquidio_get_stats,
  3121. .ndo_set_mac_address = liquidio_set_mac,
  3122. .ndo_set_rx_mode = liquidio_set_mcast_list,
  3123. .ndo_tx_timeout = liquidio_tx_timeout,
  3124. .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
  3125. .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
  3126. .ndo_change_mtu = liquidio_change_mtu,
  3127. .ndo_do_ioctl = liquidio_ioctl,
  3128. .ndo_fix_features = liquidio_fix_features,
  3129. .ndo_set_features = liquidio_set_features,
  3130. .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
  3131. .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
  3132. .ndo_set_vf_mac = liquidio_set_vf_mac,
  3133. .ndo_set_vf_vlan = liquidio_set_vf_vlan,
  3134. .ndo_get_vf_config = liquidio_get_vf_config,
  3135. .ndo_set_vf_link_state = liquidio_set_vf_link_state,
  3136. };
  3137. /** \brief Entry point for the liquidio module
  3138. */
  3139. static int __init liquidio_init(void)
  3140. {
  3141. int i;
  3142. struct handshake *hs;
  3143. init_completion(&first_stage);
  3144. octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
  3145. if (liquidio_init_pci())
  3146. return -EINVAL;
  3147. wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
  3148. for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
  3149. hs = &handshake[i];
  3150. if (hs->pci_dev) {
  3151. wait_for_completion(&hs->init);
  3152. if (!hs->init_ok) {
  3153. /* init handshake failed */
  3154. dev_err(&hs->pci_dev->dev,
  3155. "Failed to init device\n");
  3156. liquidio_deinit_pci();
  3157. return -EIO;
  3158. }
  3159. }
  3160. }
  3161. for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
  3162. hs = &handshake[i];
  3163. if (hs->pci_dev) {
  3164. wait_for_completion_timeout(&hs->started,
  3165. msecs_to_jiffies(30000));
  3166. if (!hs->started_ok) {
  3167. /* starter handshake failed */
  3168. dev_err(&hs->pci_dev->dev,
  3169. "Firmware failed to start\n");
  3170. liquidio_deinit_pci();
  3171. return -EIO;
  3172. }
  3173. }
  3174. }
  3175. return 0;
  3176. }
  3177. static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
  3178. {
  3179. struct octeon_device *oct = (struct octeon_device *)buf;
  3180. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  3181. int gmxport = 0;
  3182. union oct_link_status *ls;
  3183. int i;
  3184. if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
  3185. dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
  3186. recv_pkt->buffer_size[0],
  3187. recv_pkt->rh.r_nic_info.gmxport);
  3188. goto nic_info_err;
  3189. }
  3190. gmxport = recv_pkt->rh.r_nic_info.gmxport;
  3191. ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
  3192. octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
  3193. for (i = 0; i < oct->ifcount; i++) {
  3194. if (oct->props[i].gmxport == gmxport) {
  3195. update_link_status(oct->props[i].netdev, ls);
  3196. break;
  3197. }
  3198. }
  3199. nic_info_err:
  3200. for (i = 0; i < recv_pkt->buffer_count; i++)
  3201. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  3202. octeon_free_recv_info(recv_info);
  3203. return 0;
  3204. }
  3205. /**
  3206. * \brief Setup network interfaces
  3207. * @param octeon_dev octeon device
  3208. *
  3209. * Called during init time for each device. It assumes the NIC
  3210. * is already up and running. The link information for each
  3211. * interface is passed in link_info.
  3212. */
  3213. static int setup_nic_devices(struct octeon_device *octeon_dev)
  3214. {
  3215. struct lio *lio = NULL;
  3216. struct net_device *netdev;
  3217. u8 mac[6], i, j;
  3218. struct octeon_soft_command *sc;
  3219. struct liquidio_if_cfg_context *ctx;
  3220. struct liquidio_if_cfg_resp *resp;
  3221. struct octdev_props *props;
  3222. int retval, num_iqueues, num_oqueues;
  3223. union oct_nic_if_cfg if_cfg;
  3224. unsigned int base_queue;
  3225. unsigned int gmx_port_id;
  3226. u32 resp_size, ctx_size, data_size;
  3227. u32 ifidx_or_pfnum;
  3228. struct lio_version *vdata;
  3229. /* This is to handle link status changes */
  3230. octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
  3231. OPCODE_NIC_INFO,
  3232. lio_nic_info, octeon_dev);
  3233. /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
  3234. * They are handled directly.
  3235. */
  3236. octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
  3237. free_netbuf);
  3238. octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
  3239. free_netsgbuf);
  3240. octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
  3241. free_netsgbuf_with_resp);
  3242. for (i = 0; i < octeon_dev->ifcount; i++) {
  3243. resp_size = sizeof(struct liquidio_if_cfg_resp);
  3244. ctx_size = sizeof(struct liquidio_if_cfg_context);
  3245. data_size = sizeof(struct lio_version);
  3246. sc = (struct octeon_soft_command *)
  3247. octeon_alloc_soft_command(octeon_dev, data_size,
  3248. resp_size, ctx_size);
  3249. resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
  3250. ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
  3251. vdata = (struct lio_version *)sc->virtdptr;
  3252. *((u64 *)vdata) = 0;
  3253. vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
  3254. vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
  3255. vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
  3256. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3257. num_iqueues = octeon_dev->sriov_info.num_pf_rings;
  3258. num_oqueues = octeon_dev->sriov_info.num_pf_rings;
  3259. base_queue = octeon_dev->sriov_info.pf_srn;
  3260. gmx_port_id = octeon_dev->pf_num;
  3261. ifidx_or_pfnum = octeon_dev->pf_num;
  3262. } else {
  3263. num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
  3264. octeon_get_conf(octeon_dev), i);
  3265. num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
  3266. octeon_get_conf(octeon_dev), i);
  3267. base_queue = CFG_GET_BASE_QUE_NIC_IF(
  3268. octeon_get_conf(octeon_dev), i);
  3269. gmx_port_id = CFG_GET_GMXID_NIC_IF(
  3270. octeon_get_conf(octeon_dev), i);
  3271. ifidx_or_pfnum = i;
  3272. }
  3273. dev_dbg(&octeon_dev->pci_dev->dev,
  3274. "requesting config for interface %d, iqs %d, oqs %d\n",
  3275. ifidx_or_pfnum, num_iqueues, num_oqueues);
  3276. WRITE_ONCE(ctx->cond, 0);
  3277. ctx->octeon_id = lio_get_device_id(octeon_dev);
  3278. init_waitqueue_head(&ctx->wc);
  3279. if_cfg.u64 = 0;
  3280. if_cfg.s.num_iqueues = num_iqueues;
  3281. if_cfg.s.num_oqueues = num_oqueues;
  3282. if_cfg.s.base_queue = base_queue;
  3283. if_cfg.s.gmx_port_id = gmx_port_id;
  3284. sc->iq_no = 0;
  3285. octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
  3286. OPCODE_NIC_IF_CFG, 0,
  3287. if_cfg.u64, 0);
  3288. sc->callback = if_cfg_callback;
  3289. sc->callback_arg = sc;
  3290. sc->wait_time = 3000;
  3291. retval = octeon_send_soft_command(octeon_dev, sc);
  3292. if (retval == IQ_SEND_FAILED) {
  3293. dev_err(&octeon_dev->pci_dev->dev,
  3294. "iq/oq config failed status: %x\n",
  3295. retval);
  3296. /* Soft instr is freed by driver in case of failure. */
  3297. goto setup_nic_dev_fail;
  3298. }
  3299. /* Sleep on a wait queue till the cond flag indicates that the
  3300. * response arrived or timed-out.
  3301. */
  3302. if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
  3303. dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
  3304. goto setup_nic_wait_intr;
  3305. }
  3306. retval = resp->status;
  3307. if (retval) {
  3308. dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
  3309. goto setup_nic_dev_fail;
  3310. }
  3311. octeon_swap_8B_data((u64 *)(&resp->cfg_info),
  3312. (sizeof(struct liquidio_if_cfg_info)) >> 3);
  3313. num_iqueues = hweight64(resp->cfg_info.iqmask);
  3314. num_oqueues = hweight64(resp->cfg_info.oqmask);
  3315. if (!(num_iqueues) || !(num_oqueues)) {
  3316. dev_err(&octeon_dev->pci_dev->dev,
  3317. "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
  3318. resp->cfg_info.iqmask,
  3319. resp->cfg_info.oqmask);
  3320. goto setup_nic_dev_fail;
  3321. }
  3322. dev_dbg(&octeon_dev->pci_dev->dev,
  3323. "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
  3324. i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
  3325. num_iqueues, num_oqueues);
  3326. netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
  3327. if (!netdev) {
  3328. dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
  3329. goto setup_nic_dev_fail;
  3330. }
  3331. SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
  3332. /* Associate the routines that will handle different
  3333. * netdev tasks.
  3334. */
  3335. netdev->netdev_ops = &lionetdevops;
  3336. lio = GET_LIO(netdev);
  3337. memset(lio, 0, sizeof(struct lio));
  3338. lio->ifidx = ifidx_or_pfnum;
  3339. props = &octeon_dev->props[i];
  3340. props->gmxport = resp->cfg_info.linfo.gmxport;
  3341. props->netdev = netdev;
  3342. lio->linfo.num_rxpciq = num_oqueues;
  3343. lio->linfo.num_txpciq = num_iqueues;
  3344. for (j = 0; j < num_oqueues; j++) {
  3345. lio->linfo.rxpciq[j].u64 =
  3346. resp->cfg_info.linfo.rxpciq[j].u64;
  3347. }
  3348. for (j = 0; j < num_iqueues; j++) {
  3349. lio->linfo.txpciq[j].u64 =
  3350. resp->cfg_info.linfo.txpciq[j].u64;
  3351. }
  3352. lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
  3353. lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
  3354. lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
  3355. lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  3356. if (OCTEON_CN23XX_PF(octeon_dev) ||
  3357. OCTEON_CN6XXX(octeon_dev)) {
  3358. lio->dev_capability = NETIF_F_HIGHDMA
  3359. | NETIF_F_IP_CSUM
  3360. | NETIF_F_IPV6_CSUM
  3361. | NETIF_F_SG | NETIF_F_RXCSUM
  3362. | NETIF_F_GRO
  3363. | NETIF_F_TSO | NETIF_F_TSO6
  3364. | NETIF_F_LRO;
  3365. }
  3366. netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
  3367. /* Copy of transmit encapsulation capabilities:
  3368. * TSO, TSO6, Checksums for this device
  3369. */
  3370. lio->enc_dev_capability = NETIF_F_IP_CSUM
  3371. | NETIF_F_IPV6_CSUM
  3372. | NETIF_F_GSO_UDP_TUNNEL
  3373. | NETIF_F_HW_CSUM | NETIF_F_SG
  3374. | NETIF_F_RXCSUM
  3375. | NETIF_F_TSO | NETIF_F_TSO6
  3376. | NETIF_F_LRO;
  3377. netdev->hw_enc_features = (lio->enc_dev_capability &
  3378. ~NETIF_F_LRO);
  3379. lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
  3380. netdev->vlan_features = lio->dev_capability;
  3381. /* Add any unchangeable hw features */
  3382. lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
  3383. NETIF_F_HW_VLAN_CTAG_RX |
  3384. NETIF_F_HW_VLAN_CTAG_TX;
  3385. netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
  3386. netdev->hw_features = lio->dev_capability;
  3387. /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
  3388. netdev->hw_features = netdev->hw_features &
  3389. ~NETIF_F_HW_VLAN_CTAG_RX;
  3390. /* MTU range: 68 - 16000 */
  3391. netdev->min_mtu = LIO_MIN_MTU_SIZE;
  3392. netdev->max_mtu = LIO_MAX_MTU_SIZE;
  3393. /* Point to the properties for octeon device to which this
  3394. * interface belongs.
  3395. */
  3396. lio->oct_dev = octeon_dev;
  3397. lio->octprops = props;
  3398. lio->netdev = netdev;
  3399. dev_dbg(&octeon_dev->pci_dev->dev,
  3400. "if%d gmx: %d hw_addr: 0x%llx\n", i,
  3401. lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
  3402. for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
  3403. u8 vfmac[ETH_ALEN];
  3404. random_ether_addr(&vfmac[0]);
  3405. if (__liquidio_set_vf_mac(netdev, j,
  3406. &vfmac[0], false)) {
  3407. dev_err(&octeon_dev->pci_dev->dev,
  3408. "Error setting VF%d MAC address\n",
  3409. j);
  3410. goto setup_nic_dev_fail;
  3411. }
  3412. }
  3413. /* 64-bit swap required on LE machines */
  3414. octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
  3415. for (j = 0; j < 6; j++)
  3416. mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
  3417. /* Copy MAC Address to OS network device structure */
  3418. ether_addr_copy(netdev->dev_addr, mac);
  3419. /* By default all interfaces on a single Octeon uses the same
  3420. * tx and rx queues
  3421. */
  3422. lio->txq = lio->linfo.txpciq[0].s.q_no;
  3423. lio->rxq = lio->linfo.rxpciq[0].s.q_no;
  3424. if (setup_io_queues(octeon_dev, i)) {
  3425. dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
  3426. goto setup_nic_dev_fail;
  3427. }
  3428. ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
  3429. lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
  3430. lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
  3431. if (setup_glists(octeon_dev, lio, num_iqueues)) {
  3432. dev_err(&octeon_dev->pci_dev->dev,
  3433. "Gather list allocation failed\n");
  3434. goto setup_nic_dev_fail;
  3435. }
  3436. /* Register ethtool support */
  3437. liquidio_set_ethtool_ops(netdev);
  3438. if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
  3439. octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
  3440. else
  3441. octeon_dev->priv_flags = 0x0;
  3442. if (netdev->features & NETIF_F_LRO)
  3443. liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
  3444. OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
  3445. liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
  3446. if ((debug != -1) && (debug & NETIF_MSG_HW))
  3447. liquidio_set_feature(netdev,
  3448. OCTNET_CMD_VERBOSE_ENABLE, 0);
  3449. if (setup_link_status_change_wq(netdev))
  3450. goto setup_nic_dev_fail;
  3451. /* Register the network device with the OS */
  3452. if (register_netdev(netdev)) {
  3453. dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
  3454. goto setup_nic_dev_fail;
  3455. }
  3456. dev_dbg(&octeon_dev->pci_dev->dev,
  3457. "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
  3458. i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  3459. netif_carrier_off(netdev);
  3460. lio->link_changes++;
  3461. ifstate_set(lio, LIO_IFSTATE_REGISTERED);
  3462. /* Sending command to firmware to enable Rx checksum offload
  3463. * by default at the time of setup of Liquidio driver for
  3464. * this device
  3465. */
  3466. liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
  3467. OCTNET_CMD_RXCSUM_ENABLE);
  3468. liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
  3469. OCTNET_CMD_TXCSUM_ENABLE);
  3470. dev_dbg(&octeon_dev->pci_dev->dev,
  3471. "NIC ifidx:%d Setup successful\n", i);
  3472. octeon_free_soft_command(octeon_dev, sc);
  3473. }
  3474. return 0;
  3475. setup_nic_dev_fail:
  3476. octeon_free_soft_command(octeon_dev, sc);
  3477. setup_nic_wait_intr:
  3478. while (i--) {
  3479. dev_err(&octeon_dev->pci_dev->dev,
  3480. "NIC ifidx:%d Setup failed\n", i);
  3481. liquidio_destroy_nic_device(octeon_dev, i);
  3482. }
  3483. return -ENODEV;
  3484. }
  3485. #ifdef CONFIG_PCI_IOV
  3486. static int octeon_enable_sriov(struct octeon_device *oct)
  3487. {
  3488. unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
  3489. struct pci_dev *vfdev;
  3490. int err;
  3491. u32 u;
  3492. if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
  3493. err = pci_enable_sriov(oct->pci_dev,
  3494. oct->sriov_info.num_vfs_alloced);
  3495. if (err) {
  3496. dev_err(&oct->pci_dev->dev,
  3497. "OCTEON: Failed to enable PCI sriov: %d\n",
  3498. err);
  3499. oct->sriov_info.num_vfs_alloced = 0;
  3500. return err;
  3501. }
  3502. oct->sriov_info.sriov_enabled = 1;
  3503. /* init lookup table that maps DPI ring number to VF pci_dev
  3504. * struct pointer
  3505. */
  3506. u = 0;
  3507. vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  3508. OCTEON_CN23XX_VF_VID, NULL);
  3509. while (vfdev) {
  3510. if (vfdev->is_virtfn &&
  3511. (vfdev->physfn == oct->pci_dev)) {
  3512. oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
  3513. vfdev;
  3514. u += oct->sriov_info.rings_per_vf;
  3515. }
  3516. vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
  3517. OCTEON_CN23XX_VF_VID, vfdev);
  3518. }
  3519. }
  3520. return num_vfs_alloced;
  3521. }
  3522. static int lio_pci_sriov_disable(struct octeon_device *oct)
  3523. {
  3524. int u;
  3525. if (pci_vfs_assigned(oct->pci_dev)) {
  3526. dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
  3527. return -EPERM;
  3528. }
  3529. pci_disable_sriov(oct->pci_dev);
  3530. u = 0;
  3531. while (u < MAX_POSSIBLE_VFS) {
  3532. oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
  3533. u += oct->sriov_info.rings_per_vf;
  3534. }
  3535. oct->sriov_info.num_vfs_alloced = 0;
  3536. dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
  3537. oct->pf_num);
  3538. return 0;
  3539. }
  3540. static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
  3541. {
  3542. struct octeon_device *oct = pci_get_drvdata(dev);
  3543. int ret = 0;
  3544. if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
  3545. (oct->sriov_info.sriov_enabled)) {
  3546. dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
  3547. oct->pf_num, num_vfs);
  3548. return 0;
  3549. }
  3550. if (!num_vfs) {
  3551. ret = lio_pci_sriov_disable(oct);
  3552. } else if (num_vfs > oct->sriov_info.max_vfs) {
  3553. dev_err(&oct->pci_dev->dev,
  3554. "OCTEON: Max allowed VFs:%d user requested:%d",
  3555. oct->sriov_info.max_vfs, num_vfs);
  3556. ret = -EPERM;
  3557. } else {
  3558. oct->sriov_info.num_vfs_alloced = num_vfs;
  3559. ret = octeon_enable_sriov(oct);
  3560. dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
  3561. oct->pf_num, num_vfs);
  3562. }
  3563. return ret;
  3564. }
  3565. #endif
  3566. /**
  3567. * \brief initialize the NIC
  3568. * @param oct octeon device
  3569. *
  3570. * This initialization routine is called once the Octeon device application is
  3571. * up and running
  3572. */
  3573. static int liquidio_init_nic_module(struct octeon_device *oct)
  3574. {
  3575. struct oct_intrmod_cfg *intrmod_cfg;
  3576. int i, retval = 0;
  3577. int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
  3578. dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
  3579. /* only default iq and oq were initialized
  3580. * initialize the rest as well
  3581. */
  3582. /* run port_config command for each port */
  3583. oct->ifcount = num_nic_ports;
  3584. memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
  3585. for (i = 0; i < MAX_OCTEON_LINKS; i++)
  3586. oct->props[i].gmxport = -1;
  3587. retval = setup_nic_devices(oct);
  3588. if (retval) {
  3589. dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
  3590. goto octnet_init_failure;
  3591. }
  3592. liquidio_ptp_init(oct);
  3593. /* Initialize interrupt moderation params */
  3594. intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
  3595. intrmod_cfg->rx_enable = 1;
  3596. intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
  3597. intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
  3598. intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
  3599. intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
  3600. intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
  3601. intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
  3602. intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
  3603. intrmod_cfg->tx_enable = 1;
  3604. intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
  3605. intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
  3606. intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
  3607. intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
  3608. intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
  3609. dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
  3610. return retval;
  3611. octnet_init_failure:
  3612. oct->ifcount = 0;
  3613. return retval;
  3614. }
  3615. /**
  3616. * \brief starter callback that invokes the remaining initialization work after
  3617. * the NIC is up and running.
  3618. * @param octptr work struct work_struct
  3619. */
  3620. static void nic_starter(struct work_struct *work)
  3621. {
  3622. struct octeon_device *oct;
  3623. struct cavium_wk *wk = (struct cavium_wk *)work;
  3624. oct = (struct octeon_device *)wk->ctxptr;
  3625. if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
  3626. return;
  3627. /* If the status of the device is CORE_OK, the core
  3628. * application has reported its application type. Call
  3629. * any registered handlers now and move to the RUNNING
  3630. * state.
  3631. */
  3632. if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
  3633. schedule_delayed_work(&oct->nic_poll_work.work,
  3634. LIQUIDIO_STARTER_POLL_INTERVAL_MS);
  3635. return;
  3636. }
  3637. atomic_set(&oct->status, OCT_DEV_RUNNING);
  3638. if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
  3639. dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
  3640. if (liquidio_init_nic_module(oct))
  3641. dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
  3642. else
  3643. handshake[oct->octeon_id].started_ok = 1;
  3644. } else {
  3645. dev_err(&oct->pci_dev->dev,
  3646. "Unexpected application running on NIC (%d). Check firmware.\n",
  3647. oct->app_mode);
  3648. }
  3649. complete(&handshake[oct->octeon_id].started);
  3650. }
  3651. static int
  3652. octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
  3653. {
  3654. struct octeon_device *oct = (struct octeon_device *)buf;
  3655. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  3656. int i, notice, vf_idx;
  3657. u64 *data, vf_num;
  3658. notice = recv_pkt->rh.r.ossp;
  3659. data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
  3660. /* the first 64-bit word of data is the vf_num */
  3661. vf_num = data[0];
  3662. octeon_swap_8B_data(&vf_num, 1);
  3663. vf_idx = (int)vf_num - 1;
  3664. if (notice == VF_DRV_LOADED) {
  3665. if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
  3666. oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
  3667. dev_info(&oct->pci_dev->dev,
  3668. "driver for VF%d was loaded\n", vf_idx);
  3669. try_module_get(THIS_MODULE);
  3670. }
  3671. } else if (notice == VF_DRV_REMOVED) {
  3672. if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
  3673. oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
  3674. dev_info(&oct->pci_dev->dev,
  3675. "driver for VF%d was removed\n", vf_idx);
  3676. module_put(THIS_MODULE);
  3677. }
  3678. } else if (notice == VF_DRV_MACADDR_CHANGED) {
  3679. u8 *b = (u8 *)&data[1];
  3680. oct->sriov_info.vf_macaddr[vf_idx] = data[1];
  3681. dev_info(&oct->pci_dev->dev,
  3682. "VF driver changed VF%d's MAC address to %pM\n",
  3683. vf_idx, b + 2);
  3684. }
  3685. for (i = 0; i < recv_pkt->buffer_count; i++)
  3686. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  3687. octeon_free_recv_info(recv_info);
  3688. return 0;
  3689. }
  3690. /**
  3691. * \brief Device initialization for each Octeon device that is probed
  3692. * @param octeon_dev octeon device
  3693. */
  3694. static int octeon_device_init(struct octeon_device *octeon_dev)
  3695. {
  3696. int j, ret;
  3697. int fw_loaded = 0;
  3698. char bootcmd[] = "\n";
  3699. struct octeon_device_priv *oct_priv =
  3700. (struct octeon_device_priv *)octeon_dev->priv;
  3701. atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
  3702. /* Enable access to the octeon device and make its DMA capability
  3703. * known to the OS.
  3704. */
  3705. if (octeon_pci_os_setup(octeon_dev))
  3706. return 1;
  3707. atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
  3708. /* Identify the Octeon type and map the BAR address space. */
  3709. if (octeon_chip_specific_setup(octeon_dev)) {
  3710. dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
  3711. return 1;
  3712. }
  3713. atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
  3714. octeon_dev->app_mode = CVM_DRV_INVALID_APP;
  3715. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3716. if (!cn23xx_fw_loaded(octeon_dev)) {
  3717. fw_loaded = 0;
  3718. /* Do a soft reset of the Octeon device. */
  3719. if (octeon_dev->fn_list.soft_reset(octeon_dev))
  3720. return 1;
  3721. /* things might have changed */
  3722. if (!cn23xx_fw_loaded(octeon_dev))
  3723. fw_loaded = 0;
  3724. else
  3725. fw_loaded = 1;
  3726. } else {
  3727. fw_loaded = 1;
  3728. }
  3729. } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
  3730. return 1;
  3731. }
  3732. /* Initialize the dispatch mechanism used to push packets arriving on
  3733. * Octeon Output queues.
  3734. */
  3735. if (octeon_init_dispatch_list(octeon_dev))
  3736. return 1;
  3737. octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
  3738. OPCODE_NIC_CORE_DRV_ACTIVE,
  3739. octeon_core_drv_init,
  3740. octeon_dev);
  3741. octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
  3742. OPCODE_NIC_VF_DRV_NOTICE,
  3743. octeon_recv_vf_drv_notice, octeon_dev);
  3744. INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
  3745. octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
  3746. schedule_delayed_work(&octeon_dev->nic_poll_work.work,
  3747. LIQUIDIO_STARTER_POLL_INTERVAL_MS);
  3748. atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
  3749. if (octeon_set_io_queues_off(octeon_dev)) {
  3750. dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
  3751. return 1;
  3752. }
  3753. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3754. ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
  3755. if (ret) {
  3756. dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
  3757. return ret;
  3758. }
  3759. }
  3760. /* Initialize soft command buffer pool
  3761. */
  3762. if (octeon_setup_sc_buffer_pool(octeon_dev)) {
  3763. dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
  3764. return 1;
  3765. }
  3766. atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
  3767. /* Setup the data structures that manage this Octeon's Input queues. */
  3768. if (octeon_setup_instr_queues(octeon_dev)) {
  3769. dev_err(&octeon_dev->pci_dev->dev,
  3770. "instruction queue initialization failed\n");
  3771. return 1;
  3772. }
  3773. atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
  3774. /* Initialize lists to manage the requests of different types that
  3775. * arrive from user & kernel applications for this octeon device.
  3776. */
  3777. if (octeon_setup_response_list(octeon_dev)) {
  3778. dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
  3779. return 1;
  3780. }
  3781. atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
  3782. if (octeon_setup_output_queues(octeon_dev)) {
  3783. dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
  3784. return 1;
  3785. }
  3786. atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
  3787. if (OCTEON_CN23XX_PF(octeon_dev)) {
  3788. if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
  3789. dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
  3790. return 1;
  3791. }
  3792. atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
  3793. if (octeon_allocate_ioq_vector(octeon_dev)) {
  3794. dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
  3795. return 1;
  3796. }
  3797. atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
  3798. } else {
  3799. /* The input and output queue registers were setup earlier (the
  3800. * queues were not enabled). Any additional registers
  3801. * that need to be programmed should be done now.
  3802. */
  3803. ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
  3804. if (ret) {
  3805. dev_err(&octeon_dev->pci_dev->dev,
  3806. "Failed to configure device registers\n");
  3807. return ret;
  3808. }
  3809. }
  3810. /* Initialize the tasklet that handles output queue packet processing.*/
  3811. dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
  3812. tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
  3813. (unsigned long)octeon_dev);
  3814. /* Setup the interrupt handler and record the INT SUM register address
  3815. */
  3816. if (octeon_setup_interrupt(octeon_dev))
  3817. return 1;
  3818. /* Enable Octeon device interrupts */
  3819. octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
  3820. atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
  3821. /* Enable the input and output queues for this Octeon device */
  3822. ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
  3823. if (ret) {
  3824. dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
  3825. return ret;
  3826. }
  3827. atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
  3828. if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
  3829. dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
  3830. if (!ddr_timeout) {
  3831. dev_info(&octeon_dev->pci_dev->dev,
  3832. "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
  3833. }
  3834. schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
  3835. /* Wait for the octeon to initialize DDR after the soft-reset.*/
  3836. while (!ddr_timeout) {
  3837. set_current_state(TASK_INTERRUPTIBLE);
  3838. if (schedule_timeout(HZ / 10)) {
  3839. /* user probably pressed Control-C */
  3840. return 1;
  3841. }
  3842. }
  3843. ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
  3844. if (ret) {
  3845. dev_err(&octeon_dev->pci_dev->dev,
  3846. "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
  3847. ret);
  3848. return 1;
  3849. }
  3850. if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
  3851. dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
  3852. return 1;
  3853. }
  3854. /* Divert uboot to take commands from host instead. */
  3855. ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
  3856. dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
  3857. ret = octeon_init_consoles(octeon_dev);
  3858. if (ret) {
  3859. dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
  3860. return 1;
  3861. }
  3862. ret = octeon_add_console(octeon_dev, 0);
  3863. if (ret) {
  3864. dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
  3865. return 1;
  3866. }
  3867. atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
  3868. dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
  3869. ret = load_firmware(octeon_dev);
  3870. if (ret) {
  3871. dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
  3872. return 1;
  3873. }
  3874. /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
  3875. * loaded
  3876. */
  3877. if (OCTEON_CN23XX_PF(octeon_dev))
  3878. octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
  3879. 2ULL);
  3880. }
  3881. handshake[octeon_dev->octeon_id].init_ok = 1;
  3882. complete(&handshake[octeon_dev->octeon_id].init);
  3883. atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
  3884. /* Send Credit for Octeon Output queues. Credits are always sent after
  3885. * the output queue is enabled.
  3886. */
  3887. for (j = 0; j < octeon_dev->num_oqs; j++)
  3888. writel(octeon_dev->droq[j]->max_count,
  3889. octeon_dev->droq[j]->pkts_credit_reg);
  3890. /* Packets can start arriving on the output queues from this point. */
  3891. return 0;
  3892. }
  3893. /**
  3894. * \brief Exits the module
  3895. */
  3896. static void __exit liquidio_exit(void)
  3897. {
  3898. liquidio_deinit_pci();
  3899. pr_info("LiquidIO network module is now unloaded\n");
  3900. }
  3901. module_init(liquidio_init);
  3902. module_exit(liquidio_exit);