lio_ethtool.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/netdevice.h>
  19. #include <linux/net_tstamp.h>
  20. #include <linux/pci.h>
  21. #include "liquidio_common.h"
  22. #include "octeon_droq.h"
  23. #include "octeon_iq.h"
  24. #include "response_manager.h"
  25. #include "octeon_device.h"
  26. #include "octeon_nic.h"
  27. #include "octeon_main.h"
  28. #include "octeon_network.h"
  29. #include "cn66xx_regs.h"
  30. #include "cn66xx_device.h"
  31. #include "cn23xx_pf_device.h"
  32. #include "cn23xx_vf_device.h"
  33. static int octnet_get_link_stats(struct net_device *netdev);
  34. struct oct_mdio_cmd_context {
  35. int octeon_id;
  36. wait_queue_head_t wc;
  37. int cond;
  38. };
  39. struct oct_mdio_cmd_resp {
  40. u64 rh;
  41. struct oct_mdio_cmd resp;
  42. u64 status;
  43. };
  44. #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
  45. /* Octeon's interface mode of operation */
  46. enum {
  47. INTERFACE_MODE_DISABLED,
  48. INTERFACE_MODE_RGMII,
  49. INTERFACE_MODE_GMII,
  50. INTERFACE_MODE_SPI,
  51. INTERFACE_MODE_PCIE,
  52. INTERFACE_MODE_XAUI,
  53. INTERFACE_MODE_SGMII,
  54. INTERFACE_MODE_PICMG,
  55. INTERFACE_MODE_NPI,
  56. INTERFACE_MODE_LOOP,
  57. INTERFACE_MODE_SRIO,
  58. INTERFACE_MODE_ILK,
  59. INTERFACE_MODE_RXAUI,
  60. INTERFACE_MODE_QSGMII,
  61. INTERFACE_MODE_AGL,
  62. INTERFACE_MODE_XLAUI,
  63. INTERFACE_MODE_XFI,
  64. INTERFACE_MODE_10G_KR,
  65. INTERFACE_MODE_40G_KR4,
  66. INTERFACE_MODE_MIXED,
  67. };
  68. #define OCT_ETHTOOL_REGDUMP_LEN 4096
  69. #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
  70. #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
  71. #define OCT_ETHTOOL_REGSVER 1
  72. /* statistics of PF */
  73. static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
  74. "rx_packets",
  75. "tx_packets",
  76. "rx_bytes",
  77. "tx_bytes",
  78. "rx_errors", /*jabber_err+l2_err+frame_err */
  79. "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
  80. "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
  81. *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
  82. */
  83. "tx_dropped",
  84. "tx_total_sent",
  85. "tx_total_fwd",
  86. "tx_err_pko",
  87. "tx_err_link",
  88. "tx_err_drop",
  89. "tx_tso",
  90. "tx_tso_packets",
  91. "tx_tso_err",
  92. "tx_vxlan",
  93. "mac_tx_total_pkts",
  94. "mac_tx_total_bytes",
  95. "mac_tx_mcast_pkts",
  96. "mac_tx_bcast_pkts",
  97. "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
  98. "mac_tx_total_collisions",
  99. "mac_tx_one_collision",
  100. "mac_tx_multi_collison",
  101. "mac_tx_max_collision_fail",
  102. "mac_tx_max_deferal_fail",
  103. "mac_tx_fifo_err",
  104. "mac_tx_runts",
  105. "rx_total_rcvd",
  106. "rx_total_fwd",
  107. "rx_jabber_err",
  108. "rx_l2_err",
  109. "rx_frame_err",
  110. "rx_err_pko",
  111. "rx_err_link",
  112. "rx_err_drop",
  113. "rx_vxlan",
  114. "rx_vxlan_err",
  115. "rx_lro_pkts",
  116. "rx_lro_bytes",
  117. "rx_total_lro",
  118. "rx_lro_aborts",
  119. "rx_lro_aborts_port",
  120. "rx_lro_aborts_seq",
  121. "rx_lro_aborts_tsval",
  122. "rx_lro_aborts_timer",
  123. "rx_fwd_rate",
  124. "mac_rx_total_rcvd",
  125. "mac_rx_bytes",
  126. "mac_rx_total_bcst",
  127. "mac_rx_total_mcst",
  128. "mac_rx_runts",
  129. "mac_rx_ctl_packets",
  130. "mac_rx_fifo_err",
  131. "mac_rx_dma_drop",
  132. "mac_rx_fcs_err",
  133. "link_state_changes",
  134. };
  135. /* statistics of VF */
  136. static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
  137. "rx_packets",
  138. "tx_packets",
  139. "rx_bytes",
  140. "tx_bytes",
  141. "rx_errors", /* jabber_err + l2_err+frame_err */
  142. "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
  143. "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
  144. "tx_dropped",
  145. "link_state_changes",
  146. };
  147. /* statistics of host tx queue */
  148. static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
  149. "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
  150. "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
  151. "dropped",
  152. "iq_busy",
  153. "sgentry_sent",
  154. "fw_instr_posted",
  155. "fw_instr_processed",
  156. "fw_instr_dropped",
  157. "fw_bytes_sent",
  158. "tso",
  159. "vxlan",
  160. "txq_restart",
  161. };
  162. /* statistics of host rx queue */
  163. static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
  164. "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
  165. "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
  166. "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
  167. *oct->droq[oq_no]->stats.dropped_nodispatch+
  168. *oct->droq[oq_no]->stats.dropped_toomany+
  169. *oct->droq[oq_no]->stats.dropped_nomem
  170. */
  171. "dropped_nomem",
  172. "dropped_toomany",
  173. "fw_dropped",
  174. "fw_pkts_received",
  175. "fw_bytes_received",
  176. "fw_dropped_nodispatch",
  177. "vxlan",
  178. "buffer_alloc_failure",
  179. };
  180. /* LiquidIO driver private flags */
  181. static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
  182. };
  183. #define OCTNIC_NCMD_AUTONEG_ON 0x1
  184. #define OCTNIC_NCMD_PHY_ON 0x2
  185. static int lio_get_link_ksettings(struct net_device *netdev,
  186. struct ethtool_link_ksettings *ecmd)
  187. {
  188. struct lio *lio = GET_LIO(netdev);
  189. struct octeon_device *oct = lio->oct_dev;
  190. struct oct_link_info *linfo;
  191. u32 supported, advertising;
  192. linfo = &lio->linfo;
  193. if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
  194. linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
  195. linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
  196. ecmd->base.port = PORT_FIBRE;
  197. supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
  198. SUPPORTED_Pause);
  199. advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
  200. ethtool_convert_legacy_u32_to_link_mode(
  201. ecmd->link_modes.supported, supported);
  202. ethtool_convert_legacy_u32_to_link_mode(
  203. ecmd->link_modes.advertising, advertising);
  204. ecmd->base.autoneg = AUTONEG_DISABLE;
  205. } else {
  206. dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
  207. linfo->link.s.if_mode);
  208. }
  209. if (linfo->link.s.link_up) {
  210. ecmd->base.speed = linfo->link.s.speed;
  211. ecmd->base.duplex = linfo->link.s.duplex;
  212. } else {
  213. ecmd->base.speed = SPEED_UNKNOWN;
  214. ecmd->base.duplex = DUPLEX_UNKNOWN;
  215. }
  216. return 0;
  217. }
  218. static void
  219. lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  220. {
  221. struct lio *lio;
  222. struct octeon_device *oct;
  223. lio = GET_LIO(netdev);
  224. oct = lio->oct_dev;
  225. memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
  226. strcpy(drvinfo->driver, "liquidio");
  227. strcpy(drvinfo->version, LIQUIDIO_VERSION);
  228. strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
  229. ETHTOOL_FWVERS_LEN);
  230. strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
  231. }
  232. static void
  233. lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  234. {
  235. struct octeon_device *oct;
  236. struct lio *lio;
  237. lio = GET_LIO(netdev);
  238. oct = lio->oct_dev;
  239. memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
  240. strcpy(drvinfo->driver, "liquidio_vf");
  241. strcpy(drvinfo->version, LIQUIDIO_VERSION);
  242. strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
  243. ETHTOOL_FWVERS_LEN);
  244. strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
  245. }
  246. static void
  247. lio_ethtool_get_channels(struct net_device *dev,
  248. struct ethtool_channels *channel)
  249. {
  250. struct lio *lio = GET_LIO(dev);
  251. struct octeon_device *oct = lio->oct_dev;
  252. u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
  253. if (OCTEON_CN6XXX(oct)) {
  254. struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
  255. max_rx = CFG_GET_OQ_MAX_Q(conf6x);
  256. max_tx = CFG_GET_IQ_MAX_Q(conf6x);
  257. rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
  258. tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
  259. } else if (OCTEON_CN23XX_PF(oct)) {
  260. max_rx = oct->sriov_info.num_pf_rings;
  261. max_tx = oct->sriov_info.num_pf_rings;
  262. rx_count = lio->linfo.num_rxpciq;
  263. tx_count = lio->linfo.num_txpciq;
  264. } else if (OCTEON_CN23XX_VF(oct)) {
  265. max_tx = oct->sriov_info.rings_per_vf;
  266. max_rx = oct->sriov_info.rings_per_vf;
  267. rx_count = lio->linfo.num_rxpciq;
  268. tx_count = lio->linfo.num_txpciq;
  269. }
  270. channel->max_rx = max_rx;
  271. channel->max_tx = max_tx;
  272. channel->rx_count = rx_count;
  273. channel->tx_count = tx_count;
  274. }
  275. static int lio_get_eeprom_len(struct net_device *netdev)
  276. {
  277. u8 buf[128];
  278. struct lio *lio = GET_LIO(netdev);
  279. struct octeon_device *oct_dev = lio->oct_dev;
  280. struct octeon_board_info *board_info;
  281. int len;
  282. board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
  283. len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
  284. board_info->name, board_info->serial_number,
  285. board_info->major, board_info->minor);
  286. return len;
  287. }
  288. static int
  289. lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
  290. u8 *bytes)
  291. {
  292. struct lio *lio = GET_LIO(netdev);
  293. struct octeon_device *oct_dev = lio->oct_dev;
  294. struct octeon_board_info *board_info;
  295. if (eeprom->offset)
  296. return -EINVAL;
  297. eeprom->magic = oct_dev->pci_dev->vendor;
  298. board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
  299. sprintf((char *)bytes,
  300. "boardname:%s serialnum:%s maj:%lld min:%lld\n",
  301. board_info->name, board_info->serial_number,
  302. board_info->major, board_info->minor);
  303. return 0;
  304. }
  305. static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
  306. {
  307. struct lio *lio = GET_LIO(netdev);
  308. struct octeon_device *oct = lio->oct_dev;
  309. struct octnic_ctrl_pkt nctrl;
  310. int ret = 0;
  311. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  312. nctrl.ncmd.u64 = 0;
  313. nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
  314. nctrl.ncmd.s.param1 = addr;
  315. nctrl.ncmd.s.param2 = val;
  316. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  317. nctrl.wait_time = 100;
  318. nctrl.netpndev = (u64)netdev;
  319. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  320. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  321. if (ret < 0) {
  322. dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
  323. return -EINVAL;
  324. }
  325. return 0;
  326. }
  327. static int octnet_id_active(struct net_device *netdev, int val)
  328. {
  329. struct lio *lio = GET_LIO(netdev);
  330. struct octeon_device *oct = lio->oct_dev;
  331. struct octnic_ctrl_pkt nctrl;
  332. int ret = 0;
  333. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  334. nctrl.ncmd.u64 = 0;
  335. nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
  336. nctrl.ncmd.s.param1 = val;
  337. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  338. nctrl.wait_time = 100;
  339. nctrl.netpndev = (u64)netdev;
  340. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  341. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  342. if (ret < 0) {
  343. dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
  344. return -EINVAL;
  345. }
  346. return 0;
  347. }
  348. /* Callback for when mdio command response arrives
  349. */
  350. static void octnet_mdio_resp_callback(struct octeon_device *oct,
  351. u32 status,
  352. void *buf)
  353. {
  354. struct oct_mdio_cmd_context *mdio_cmd_ctx;
  355. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  356. mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
  357. oct = lio_get_device(mdio_cmd_ctx->octeon_id);
  358. if (status) {
  359. dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
  360. CVM_CAST64(status));
  361. WRITE_ONCE(mdio_cmd_ctx->cond, -1);
  362. } else {
  363. WRITE_ONCE(mdio_cmd_ctx->cond, 1);
  364. }
  365. wake_up_interruptible(&mdio_cmd_ctx->wc);
  366. }
  367. /* This routine provides PHY access routines for
  368. * mdio clause45 .
  369. */
  370. static int
  371. octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
  372. {
  373. struct octeon_device *oct_dev = lio->oct_dev;
  374. struct octeon_soft_command *sc;
  375. struct oct_mdio_cmd_resp *mdio_cmd_rsp;
  376. struct oct_mdio_cmd_context *mdio_cmd_ctx;
  377. struct oct_mdio_cmd *mdio_cmd;
  378. int retval = 0;
  379. sc = (struct octeon_soft_command *)
  380. octeon_alloc_soft_command(oct_dev,
  381. sizeof(struct oct_mdio_cmd),
  382. sizeof(struct oct_mdio_cmd_resp),
  383. sizeof(struct oct_mdio_cmd_context));
  384. if (!sc)
  385. return -ENOMEM;
  386. mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
  387. mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
  388. mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
  389. WRITE_ONCE(mdio_cmd_ctx->cond, 0);
  390. mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
  391. mdio_cmd->op = op;
  392. mdio_cmd->mdio_addr = loc;
  393. if (op)
  394. mdio_cmd->value1 = *value;
  395. octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
  396. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  397. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
  398. 0, 0, 0);
  399. sc->wait_time = 1000;
  400. sc->callback = octnet_mdio_resp_callback;
  401. sc->callback_arg = sc;
  402. init_waitqueue_head(&mdio_cmd_ctx->wc);
  403. retval = octeon_send_soft_command(oct_dev, sc);
  404. if (retval == IQ_SEND_FAILED) {
  405. dev_err(&oct_dev->pci_dev->dev,
  406. "octnet_mdio45_access instruction failed status: %x\n",
  407. retval);
  408. retval = -EBUSY;
  409. } else {
  410. /* Sleep on a wait queue till the cond flag indicates that the
  411. * response arrived
  412. */
  413. sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
  414. retval = mdio_cmd_rsp->status;
  415. if (retval) {
  416. dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
  417. retval = -EBUSY;
  418. } else {
  419. octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
  420. sizeof(struct oct_mdio_cmd) / 8);
  421. if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
  422. if (!op)
  423. *value = mdio_cmd_rsp->resp.value1;
  424. } else {
  425. retval = -EINVAL;
  426. }
  427. }
  428. }
  429. octeon_free_soft_command(oct_dev, sc);
  430. return retval;
  431. }
  432. static int lio_set_phys_id(struct net_device *netdev,
  433. enum ethtool_phys_id_state state)
  434. {
  435. struct lio *lio = GET_LIO(netdev);
  436. struct octeon_device *oct = lio->oct_dev;
  437. int value, ret;
  438. switch (state) {
  439. case ETHTOOL_ID_ACTIVE:
  440. if (oct->chip_id == OCTEON_CN66XX) {
  441. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  442. VITESSE_PHY_GPIO_DRIVEON);
  443. return 2;
  444. } else if (oct->chip_id == OCTEON_CN68XX) {
  445. /* Save the current LED settings */
  446. ret = octnet_mdio45_access(lio, 0,
  447. LIO68XX_LED_BEACON_ADDR,
  448. &lio->phy_beacon_val);
  449. if (ret)
  450. return ret;
  451. ret = octnet_mdio45_access(lio, 0,
  452. LIO68XX_LED_CTRL_ADDR,
  453. &lio->led_ctrl_val);
  454. if (ret)
  455. return ret;
  456. /* Configure Beacon values */
  457. value = LIO68XX_LED_BEACON_CFGON;
  458. ret = octnet_mdio45_access(lio, 1,
  459. LIO68XX_LED_BEACON_ADDR,
  460. &value);
  461. if (ret)
  462. return ret;
  463. value = LIO68XX_LED_CTRL_CFGON;
  464. ret = octnet_mdio45_access(lio, 1,
  465. LIO68XX_LED_CTRL_ADDR,
  466. &value);
  467. if (ret)
  468. return ret;
  469. } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
  470. octnet_id_active(netdev, LED_IDENTIFICATION_ON);
  471. /* returns 0 since updates are asynchronous */
  472. return 0;
  473. } else {
  474. return -EINVAL;
  475. }
  476. break;
  477. case ETHTOOL_ID_ON:
  478. if (oct->chip_id == OCTEON_CN66XX) {
  479. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  480. VITESSE_PHY_GPIO_HIGH);
  481. } else if (oct->chip_id == OCTEON_CN68XX) {
  482. return -EINVAL;
  483. } else {
  484. return -EINVAL;
  485. }
  486. break;
  487. case ETHTOOL_ID_OFF:
  488. if (oct->chip_id == OCTEON_CN66XX)
  489. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  490. VITESSE_PHY_GPIO_LOW);
  491. else if (oct->chip_id == OCTEON_CN68XX)
  492. return -EINVAL;
  493. else
  494. return -EINVAL;
  495. break;
  496. case ETHTOOL_ID_INACTIVE:
  497. if (oct->chip_id == OCTEON_CN66XX) {
  498. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  499. VITESSE_PHY_GPIO_DRIVEOFF);
  500. } else if (oct->chip_id == OCTEON_CN68XX) {
  501. /* Restore LED settings */
  502. ret = octnet_mdio45_access(lio, 1,
  503. LIO68XX_LED_CTRL_ADDR,
  504. &lio->led_ctrl_val);
  505. if (ret)
  506. return ret;
  507. ret = octnet_mdio45_access(lio, 1,
  508. LIO68XX_LED_BEACON_ADDR,
  509. &lio->phy_beacon_val);
  510. if (ret)
  511. return ret;
  512. } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
  513. octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
  514. return 0;
  515. } else {
  516. return -EINVAL;
  517. }
  518. break;
  519. default:
  520. return -EINVAL;
  521. }
  522. return 0;
  523. }
  524. static void
  525. lio_ethtool_get_ringparam(struct net_device *netdev,
  526. struct ethtool_ringparam *ering)
  527. {
  528. struct lio *lio = GET_LIO(netdev);
  529. struct octeon_device *oct = lio->oct_dev;
  530. u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
  531. rx_pending = 0;
  532. if (OCTEON_CN6XXX(oct)) {
  533. struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
  534. tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
  535. rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
  536. rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
  537. tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
  538. } else if (OCTEON_CN23XX_PF(oct)) {
  539. struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
  540. tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
  541. rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
  542. rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
  543. tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
  544. }
  545. if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
  546. ering->rx_pending = 0;
  547. ering->rx_max_pending = 0;
  548. ering->rx_mini_pending = 0;
  549. ering->rx_jumbo_pending = rx_pending;
  550. ering->rx_mini_max_pending = 0;
  551. ering->rx_jumbo_max_pending = rx_max_pending;
  552. } else {
  553. ering->rx_pending = rx_pending;
  554. ering->rx_max_pending = rx_max_pending;
  555. ering->rx_mini_pending = 0;
  556. ering->rx_jumbo_pending = 0;
  557. ering->rx_mini_max_pending = 0;
  558. ering->rx_jumbo_max_pending = 0;
  559. }
  560. ering->tx_pending = tx_pending;
  561. ering->tx_max_pending = tx_max_pending;
  562. }
  563. static u32 lio_get_msglevel(struct net_device *netdev)
  564. {
  565. struct lio *lio = GET_LIO(netdev);
  566. return lio->msg_enable;
  567. }
  568. static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
  569. {
  570. struct lio *lio = GET_LIO(netdev);
  571. if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
  572. if (msglvl & NETIF_MSG_HW)
  573. liquidio_set_feature(netdev,
  574. OCTNET_CMD_VERBOSE_ENABLE, 0);
  575. else
  576. liquidio_set_feature(netdev,
  577. OCTNET_CMD_VERBOSE_DISABLE, 0);
  578. }
  579. lio->msg_enable = msglvl;
  580. }
  581. static void
  582. lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  583. {
  584. /* Notes: Not supporting any auto negotiation in these
  585. * drivers. Just report pause frame support.
  586. */
  587. struct lio *lio = GET_LIO(netdev);
  588. struct octeon_device *oct = lio->oct_dev;
  589. pause->autoneg = 0;
  590. pause->tx_pause = oct->tx_pause;
  591. pause->rx_pause = oct->rx_pause;
  592. }
  593. static int
  594. lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  595. {
  596. /* Notes: Not supporting any auto negotiation in these
  597. * drivers.
  598. */
  599. struct lio *lio = GET_LIO(netdev);
  600. struct octeon_device *oct = lio->oct_dev;
  601. struct octnic_ctrl_pkt nctrl;
  602. struct oct_link_info *linfo = &lio->linfo;
  603. int ret = 0;
  604. if (oct->chip_id != OCTEON_CN23XX_PF_VID)
  605. return -EINVAL;
  606. if (linfo->link.s.duplex == 0) {
  607. /*no flow control for half duplex*/
  608. if (pause->rx_pause || pause->tx_pause)
  609. return -EINVAL;
  610. }
  611. /*do not support autoneg of link flow control*/
  612. if (pause->autoneg == AUTONEG_ENABLE)
  613. return -EINVAL;
  614. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  615. nctrl.ncmd.u64 = 0;
  616. nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
  617. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  618. nctrl.wait_time = 100;
  619. nctrl.netpndev = (u64)netdev;
  620. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  621. if (pause->rx_pause) {
  622. /*enable rx pause*/
  623. nctrl.ncmd.s.param1 = 1;
  624. } else {
  625. /*disable rx pause*/
  626. nctrl.ncmd.s.param1 = 0;
  627. }
  628. if (pause->tx_pause) {
  629. /*enable tx pause*/
  630. nctrl.ncmd.s.param2 = 1;
  631. } else {
  632. /*disable tx pause*/
  633. nctrl.ncmd.s.param2 = 0;
  634. }
  635. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  636. if (ret < 0) {
  637. dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
  638. return -EINVAL;
  639. }
  640. oct->rx_pause = pause->rx_pause;
  641. oct->tx_pause = pause->tx_pause;
  642. return 0;
  643. }
  644. static void
  645. lio_get_ethtool_stats(struct net_device *netdev,
  646. struct ethtool_stats *stats __attribute__((unused)),
  647. u64 *data)
  648. {
  649. struct lio *lio = GET_LIO(netdev);
  650. struct octeon_device *oct_dev = lio->oct_dev;
  651. struct net_device_stats *netstats = &netdev->stats;
  652. int i = 0, j;
  653. netdev->netdev_ops->ndo_get_stats(netdev);
  654. octnet_get_link_stats(netdev);
  655. /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
  656. data[i++] = CVM_CAST64(netstats->rx_packets);
  657. /*sum of oct->instr_queue[iq_no]->stats.tx_done */
  658. data[i++] = CVM_CAST64(netstats->tx_packets);
  659. /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
  660. data[i++] = CVM_CAST64(netstats->rx_bytes);
  661. /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
  662. data[i++] = CVM_CAST64(netstats->tx_bytes);
  663. data[i++] = CVM_CAST64(netstats->rx_errors);
  664. data[i++] = CVM_CAST64(netstats->tx_errors);
  665. /*sum of oct->droq[oq_no]->stats->rx_dropped +
  666. *oct->droq[oq_no]->stats->dropped_nodispatch +
  667. *oct->droq[oq_no]->stats->dropped_toomany +
  668. *oct->droq[oq_no]->stats->dropped_nomem
  669. */
  670. data[i++] = CVM_CAST64(netstats->rx_dropped);
  671. /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
  672. data[i++] = CVM_CAST64(netstats->tx_dropped);
  673. /* firmware tx stats */
  674. /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
  675. *fromhost.fw_total_sent
  676. */
  677. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
  678. /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
  679. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
  680. /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
  681. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
  682. /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
  683. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
  684. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  685. *fw_err_drop
  686. */
  687. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
  688. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
  689. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
  690. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  691. *fw_tso_fwd
  692. */
  693. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
  694. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  695. *fw_err_tso
  696. */
  697. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
  698. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  699. *fw_tx_vxlan
  700. */
  701. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
  702. /* mac tx statistics */
  703. /*CVMX_BGXX_CMRX_TX_STAT5 */
  704. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
  705. /*CVMX_BGXX_CMRX_TX_STAT4 */
  706. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
  707. /*CVMX_BGXX_CMRX_TX_STAT15 */
  708. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
  709. /*CVMX_BGXX_CMRX_TX_STAT14 */
  710. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
  711. /*CVMX_BGXX_CMRX_TX_STAT17 */
  712. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
  713. /*CVMX_BGXX_CMRX_TX_STAT0 */
  714. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
  715. /*CVMX_BGXX_CMRX_TX_STAT3 */
  716. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
  717. /*CVMX_BGXX_CMRX_TX_STAT2 */
  718. data[i++] =
  719. CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
  720. /*CVMX_BGXX_CMRX_TX_STAT0 */
  721. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
  722. /*CVMX_BGXX_CMRX_TX_STAT1 */
  723. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
  724. /*CVMX_BGXX_CMRX_TX_STAT16 */
  725. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
  726. /*CVMX_BGXX_CMRX_TX_STAT6 */
  727. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
  728. /* RX firmware stats */
  729. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  730. *fw_total_rcvd
  731. */
  732. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
  733. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  734. *fw_total_fwd
  735. */
  736. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
  737. /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
  738. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
  739. /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
  740. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
  741. /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
  742. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
  743. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  744. *fw_err_pko
  745. */
  746. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
  747. /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
  748. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
  749. /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
  750. *fromwire.fw_err_drop
  751. */
  752. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
  753. /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
  754. *fromwire.fw_rx_vxlan
  755. */
  756. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
  757. /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
  758. *fromwire.fw_rx_vxlan_err
  759. */
  760. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
  761. /* LRO */
  762. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  763. *fw_lro_pkts
  764. */
  765. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
  766. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  767. *fw_lro_octs
  768. */
  769. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
  770. /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
  771. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
  772. /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
  773. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
  774. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  775. *fw_lro_aborts_port
  776. */
  777. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
  778. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  779. *fw_lro_aborts_seq
  780. */
  781. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
  782. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  783. *fw_lro_aborts_tsval
  784. */
  785. data[i++] =
  786. CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
  787. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  788. *fw_lro_aborts_timer
  789. */
  790. /* intrmod: packet forward rate */
  791. data[i++] =
  792. CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
  793. /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
  794. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
  795. /* mac: link-level stats */
  796. /*CVMX_BGXX_CMRX_RX_STAT0 */
  797. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
  798. /*CVMX_BGXX_CMRX_RX_STAT1 */
  799. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
  800. /*CVMX_PKI_STATX_STAT5 */
  801. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
  802. /*CVMX_PKI_STATX_STAT5 */
  803. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
  804. /*wqe->word2.err_code or wqe->word2.err_level */
  805. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
  806. /*CVMX_BGXX_CMRX_RX_STAT2 */
  807. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
  808. /*CVMX_BGXX_CMRX_RX_STAT6 */
  809. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
  810. /*CVMX_BGXX_CMRX_RX_STAT4 */
  811. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
  812. /*wqe->word2.err_code or wqe->word2.err_level */
  813. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
  814. /*lio->link_changes*/
  815. data[i++] = CVM_CAST64(lio->link_changes);
  816. for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
  817. if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
  818. continue;
  819. /*packets to network port*/
  820. /*# of packets tx to network */
  821. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
  822. /*# of bytes tx to network */
  823. data[i++] =
  824. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
  825. /*# of packets dropped */
  826. data[i++] =
  827. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
  828. /*# of tx fails due to queue full */
  829. data[i++] =
  830. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
  831. /*XXX gather entries sent */
  832. data[i++] =
  833. CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
  834. /*instruction to firmware: data and control */
  835. /*# of instructions to the queue */
  836. data[i++] =
  837. CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
  838. /*# of instructions processed */
  839. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
  840. stats.instr_processed);
  841. /*# of instructions could not be processed */
  842. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
  843. stats.instr_dropped);
  844. /*bytes sent through the queue */
  845. data[i++] =
  846. CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
  847. /*tso request*/
  848. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
  849. /*vxlan request*/
  850. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
  851. /*txq restart*/
  852. data[i++] =
  853. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
  854. }
  855. /* RX */
  856. for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
  857. if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
  858. continue;
  859. /*packets send to TCP/IP network stack */
  860. /*# of packets to network stack */
  861. data[i++] =
  862. CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
  863. /*# of bytes to network stack */
  864. data[i++] =
  865. CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
  866. /*# of packets dropped */
  867. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
  868. oct_dev->droq[j]->stats.dropped_toomany +
  869. oct_dev->droq[j]->stats.rx_dropped);
  870. data[i++] =
  871. CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
  872. data[i++] =
  873. CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
  874. data[i++] =
  875. CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
  876. /*control and data path*/
  877. data[i++] =
  878. CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
  879. data[i++] =
  880. CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
  881. data[i++] =
  882. CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
  883. data[i++] =
  884. CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
  885. data[i++] =
  886. CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
  887. }
  888. }
  889. static void lio_vf_get_ethtool_stats(struct net_device *netdev,
  890. struct ethtool_stats *stats
  891. __attribute__((unused)),
  892. u64 *data)
  893. {
  894. struct net_device_stats *netstats = &netdev->stats;
  895. struct lio *lio = GET_LIO(netdev);
  896. struct octeon_device *oct_dev = lio->oct_dev;
  897. int i = 0, j, vj;
  898. netdev->netdev_ops->ndo_get_stats(netdev);
  899. /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
  900. data[i++] = CVM_CAST64(netstats->rx_packets);
  901. /* sum of oct->instr_queue[iq_no]->stats.tx_done */
  902. data[i++] = CVM_CAST64(netstats->tx_packets);
  903. /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
  904. data[i++] = CVM_CAST64(netstats->rx_bytes);
  905. /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
  906. data[i++] = CVM_CAST64(netstats->tx_bytes);
  907. data[i++] = CVM_CAST64(netstats->rx_errors);
  908. data[i++] = CVM_CAST64(netstats->tx_errors);
  909. /* sum of oct->droq[oq_no]->stats->rx_dropped +
  910. * oct->droq[oq_no]->stats->dropped_nodispatch +
  911. * oct->droq[oq_no]->stats->dropped_toomany +
  912. * oct->droq[oq_no]->stats->dropped_nomem
  913. */
  914. data[i++] = CVM_CAST64(netstats->rx_dropped);
  915. /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
  916. data[i++] = CVM_CAST64(netstats->tx_dropped);
  917. /* lio->link_changes */
  918. data[i++] = CVM_CAST64(lio->link_changes);
  919. for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
  920. j = lio->linfo.txpciq[vj].s.q_no;
  921. /* packets to network port */
  922. /* # of packets tx to network */
  923. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
  924. /* # of bytes tx to network */
  925. data[i++] = CVM_CAST64(
  926. oct_dev->instr_queue[j]->stats.tx_tot_bytes);
  927. /* # of packets dropped */
  928. data[i++] = CVM_CAST64(
  929. oct_dev->instr_queue[j]->stats.tx_dropped);
  930. /* # of tx fails due to queue full */
  931. data[i++] = CVM_CAST64(
  932. oct_dev->instr_queue[j]->stats.tx_iq_busy);
  933. /* XXX gather entries sent */
  934. data[i++] = CVM_CAST64(
  935. oct_dev->instr_queue[j]->stats.sgentry_sent);
  936. /* instruction to firmware: data and control */
  937. /* # of instructions to the queue */
  938. data[i++] = CVM_CAST64(
  939. oct_dev->instr_queue[j]->stats.instr_posted);
  940. /* # of instructions processed */
  941. data[i++] =
  942. CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
  943. /* # of instructions could not be processed */
  944. data[i++] =
  945. CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
  946. /* bytes sent through the queue */
  947. data[i++] = CVM_CAST64(
  948. oct_dev->instr_queue[j]->stats.bytes_sent);
  949. /* tso request */
  950. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
  951. /* vxlan request */
  952. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
  953. /* txq restart */
  954. data[i++] = CVM_CAST64(
  955. oct_dev->instr_queue[j]->stats.tx_restart);
  956. }
  957. /* RX */
  958. for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
  959. j = lio->linfo.rxpciq[vj].s.q_no;
  960. /* packets send to TCP/IP network stack */
  961. /* # of packets to network stack */
  962. data[i++] = CVM_CAST64(
  963. oct_dev->droq[j]->stats.rx_pkts_received);
  964. /* # of bytes to network stack */
  965. data[i++] = CVM_CAST64(
  966. oct_dev->droq[j]->stats.rx_bytes_received);
  967. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
  968. oct_dev->droq[j]->stats.dropped_toomany +
  969. oct_dev->droq[j]->stats.rx_dropped);
  970. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
  971. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
  972. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
  973. /* control and data path */
  974. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
  975. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
  976. data[i++] =
  977. CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
  978. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
  979. data[i++] =
  980. CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
  981. }
  982. }
  983. static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
  984. {
  985. struct octeon_device *oct_dev = lio->oct_dev;
  986. int i;
  987. switch (oct_dev->chip_id) {
  988. case OCTEON_CN23XX_PF_VID:
  989. case OCTEON_CN23XX_VF_VID:
  990. for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
  991. sprintf(data, "%s", oct_priv_flags_strings[i]);
  992. data += ETH_GSTRING_LEN;
  993. }
  994. break;
  995. case OCTEON_CN68XX:
  996. case OCTEON_CN66XX:
  997. break;
  998. default:
  999. netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
  1000. break;
  1001. }
  1002. }
  1003. static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  1004. {
  1005. struct lio *lio = GET_LIO(netdev);
  1006. struct octeon_device *oct_dev = lio->oct_dev;
  1007. int num_iq_stats, num_oq_stats, i, j;
  1008. int num_stats;
  1009. switch (stringset) {
  1010. case ETH_SS_STATS:
  1011. num_stats = ARRAY_SIZE(oct_stats_strings);
  1012. for (j = 0; j < num_stats; j++) {
  1013. sprintf(data, "%s", oct_stats_strings[j]);
  1014. data += ETH_GSTRING_LEN;
  1015. }
  1016. num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
  1017. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
  1018. if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
  1019. continue;
  1020. for (j = 0; j < num_iq_stats; j++) {
  1021. sprintf(data, "tx-%d-%s", i,
  1022. oct_iq_stats_strings[j]);
  1023. data += ETH_GSTRING_LEN;
  1024. }
  1025. }
  1026. num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
  1027. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
  1028. if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
  1029. continue;
  1030. for (j = 0; j < num_oq_stats; j++) {
  1031. sprintf(data, "rx-%d-%s", i,
  1032. oct_droq_stats_strings[j]);
  1033. data += ETH_GSTRING_LEN;
  1034. }
  1035. }
  1036. break;
  1037. case ETH_SS_PRIV_FLAGS:
  1038. lio_get_priv_flags_strings(lio, data);
  1039. break;
  1040. default:
  1041. netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
  1042. break;
  1043. }
  1044. }
  1045. static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
  1046. u8 *data)
  1047. {
  1048. int num_iq_stats, num_oq_stats, i, j;
  1049. struct lio *lio = GET_LIO(netdev);
  1050. struct octeon_device *oct_dev = lio->oct_dev;
  1051. int num_stats;
  1052. switch (stringset) {
  1053. case ETH_SS_STATS:
  1054. num_stats = ARRAY_SIZE(oct_vf_stats_strings);
  1055. for (j = 0; j < num_stats; j++) {
  1056. sprintf(data, "%s", oct_vf_stats_strings[j]);
  1057. data += ETH_GSTRING_LEN;
  1058. }
  1059. num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
  1060. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
  1061. if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
  1062. continue;
  1063. for (j = 0; j < num_iq_stats; j++) {
  1064. sprintf(data, "tx-%d-%s", i,
  1065. oct_iq_stats_strings[j]);
  1066. data += ETH_GSTRING_LEN;
  1067. }
  1068. }
  1069. num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
  1070. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
  1071. if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
  1072. continue;
  1073. for (j = 0; j < num_oq_stats; j++) {
  1074. sprintf(data, "rx-%d-%s", i,
  1075. oct_droq_stats_strings[j]);
  1076. data += ETH_GSTRING_LEN;
  1077. }
  1078. }
  1079. break;
  1080. case ETH_SS_PRIV_FLAGS:
  1081. lio_get_priv_flags_strings(lio, data);
  1082. break;
  1083. default:
  1084. netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
  1085. break;
  1086. }
  1087. }
  1088. static int lio_get_priv_flags_ss_count(struct lio *lio)
  1089. {
  1090. struct octeon_device *oct_dev = lio->oct_dev;
  1091. switch (oct_dev->chip_id) {
  1092. case OCTEON_CN23XX_PF_VID:
  1093. case OCTEON_CN23XX_VF_VID:
  1094. return ARRAY_SIZE(oct_priv_flags_strings);
  1095. case OCTEON_CN68XX:
  1096. case OCTEON_CN66XX:
  1097. return -EOPNOTSUPP;
  1098. default:
  1099. netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
  1100. return -EOPNOTSUPP;
  1101. }
  1102. }
  1103. static int lio_get_sset_count(struct net_device *netdev, int sset)
  1104. {
  1105. struct lio *lio = GET_LIO(netdev);
  1106. struct octeon_device *oct_dev = lio->oct_dev;
  1107. switch (sset) {
  1108. case ETH_SS_STATS:
  1109. return (ARRAY_SIZE(oct_stats_strings) +
  1110. ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
  1111. ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
  1112. case ETH_SS_PRIV_FLAGS:
  1113. return lio_get_priv_flags_ss_count(lio);
  1114. default:
  1115. return -EOPNOTSUPP;
  1116. }
  1117. }
  1118. static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
  1119. {
  1120. struct lio *lio = GET_LIO(netdev);
  1121. struct octeon_device *oct_dev = lio->oct_dev;
  1122. switch (sset) {
  1123. case ETH_SS_STATS:
  1124. return (ARRAY_SIZE(oct_vf_stats_strings) +
  1125. ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
  1126. ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
  1127. case ETH_SS_PRIV_FLAGS:
  1128. return lio_get_priv_flags_ss_count(lio);
  1129. default:
  1130. return -EOPNOTSUPP;
  1131. }
  1132. }
  1133. static int lio_get_intr_coalesce(struct net_device *netdev,
  1134. struct ethtool_coalesce *intr_coal)
  1135. {
  1136. struct lio *lio = GET_LIO(netdev);
  1137. struct octeon_device *oct = lio->oct_dev;
  1138. struct octeon_instr_queue *iq;
  1139. struct oct_intrmod_cfg *intrmod_cfg;
  1140. intrmod_cfg = &oct->intrmod;
  1141. switch (oct->chip_id) {
  1142. case OCTEON_CN23XX_PF_VID:
  1143. case OCTEON_CN23XX_VF_VID:
  1144. if (!intrmod_cfg->rx_enable) {
  1145. intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
  1146. intr_coal->rx_max_coalesced_frames =
  1147. intrmod_cfg->rx_frames;
  1148. }
  1149. if (!intrmod_cfg->tx_enable)
  1150. intr_coal->tx_max_coalesced_frames =
  1151. intrmod_cfg->tx_frames;
  1152. break;
  1153. case OCTEON_CN68XX:
  1154. case OCTEON_CN66XX: {
  1155. struct octeon_cn6xxx *cn6xxx =
  1156. (struct octeon_cn6xxx *)oct->chip;
  1157. if (!intrmod_cfg->rx_enable) {
  1158. intr_coal->rx_coalesce_usecs =
  1159. CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
  1160. intr_coal->rx_max_coalesced_frames =
  1161. CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
  1162. }
  1163. iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
  1164. intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
  1165. break;
  1166. }
  1167. default:
  1168. netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
  1169. return -EINVAL;
  1170. }
  1171. if (intrmod_cfg->rx_enable) {
  1172. intr_coal->use_adaptive_rx_coalesce =
  1173. intrmod_cfg->rx_enable;
  1174. intr_coal->rate_sample_interval =
  1175. intrmod_cfg->check_intrvl;
  1176. intr_coal->pkt_rate_high =
  1177. intrmod_cfg->maxpkt_ratethr;
  1178. intr_coal->pkt_rate_low =
  1179. intrmod_cfg->minpkt_ratethr;
  1180. intr_coal->rx_max_coalesced_frames_high =
  1181. intrmod_cfg->rx_maxcnt_trigger;
  1182. intr_coal->rx_coalesce_usecs_high =
  1183. intrmod_cfg->rx_maxtmr_trigger;
  1184. intr_coal->rx_coalesce_usecs_low =
  1185. intrmod_cfg->rx_mintmr_trigger;
  1186. intr_coal->rx_max_coalesced_frames_low =
  1187. intrmod_cfg->rx_mincnt_trigger;
  1188. }
  1189. if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
  1190. (intrmod_cfg->tx_enable)) {
  1191. intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
  1192. intr_coal->tx_max_coalesced_frames_high =
  1193. intrmod_cfg->tx_maxcnt_trigger;
  1194. intr_coal->tx_max_coalesced_frames_low =
  1195. intrmod_cfg->tx_mincnt_trigger;
  1196. }
  1197. return 0;
  1198. }
  1199. /* Callback function for intrmod */
  1200. static void octnet_intrmod_callback(struct octeon_device *oct_dev,
  1201. u32 status,
  1202. void *ptr)
  1203. {
  1204. struct oct_intrmod_cmd *cmd = ptr;
  1205. struct octeon_soft_command *sc = cmd->sc;
  1206. oct_dev = cmd->oct_dev;
  1207. if (status)
  1208. dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
  1209. CVM_CAST64(status));
  1210. else
  1211. dev_info(&oct_dev->pci_dev->dev,
  1212. "Rx-Adaptive Interrupt moderation enabled:%llx\n",
  1213. oct_dev->intrmod.rx_enable);
  1214. octeon_free_soft_command(oct_dev, sc);
  1215. }
  1216. /* Configure interrupt moderation parameters */
  1217. static int octnet_set_intrmod_cfg(struct lio *lio,
  1218. struct oct_intrmod_cfg *intr_cfg)
  1219. {
  1220. struct octeon_soft_command *sc;
  1221. struct oct_intrmod_cmd *cmd;
  1222. struct oct_intrmod_cfg *cfg;
  1223. int retval;
  1224. struct octeon_device *oct_dev = lio->oct_dev;
  1225. /* Alloc soft command */
  1226. sc = (struct octeon_soft_command *)
  1227. octeon_alloc_soft_command(oct_dev,
  1228. sizeof(struct oct_intrmod_cfg),
  1229. 0,
  1230. sizeof(struct oct_intrmod_cmd));
  1231. if (!sc)
  1232. return -ENOMEM;
  1233. cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
  1234. cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
  1235. memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
  1236. octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
  1237. cmd->sc = sc;
  1238. cmd->cfg = cfg;
  1239. cmd->oct_dev = oct_dev;
  1240. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1241. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
  1242. OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
  1243. sc->callback = octnet_intrmod_callback;
  1244. sc->callback_arg = cmd;
  1245. sc->wait_time = 1000;
  1246. retval = octeon_send_soft_command(oct_dev, sc);
  1247. if (retval == IQ_SEND_FAILED) {
  1248. octeon_free_soft_command(oct_dev, sc);
  1249. return -EINVAL;
  1250. }
  1251. return 0;
  1252. }
  1253. static void
  1254. octnet_nic_stats_callback(struct octeon_device *oct_dev,
  1255. u32 status, void *ptr)
  1256. {
  1257. struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
  1258. struct oct_nic_stats_resp *resp =
  1259. (struct oct_nic_stats_resp *)sc->virtrptr;
  1260. struct oct_nic_stats_ctrl *ctrl =
  1261. (struct oct_nic_stats_ctrl *)sc->ctxptr;
  1262. struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
  1263. struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
  1264. struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
  1265. struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
  1266. if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
  1267. octeon_swap_8B_data((u64 *)&resp->stats,
  1268. (sizeof(struct oct_link_stats)) >> 3);
  1269. /* RX link-level stats */
  1270. rstats->total_rcvd = rsp_rstats->total_rcvd;
  1271. rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
  1272. rstats->total_bcst = rsp_rstats->total_bcst;
  1273. rstats->total_mcst = rsp_rstats->total_mcst;
  1274. rstats->runts = rsp_rstats->runts;
  1275. rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
  1276. /* Accounts for over/under-run of buffers */
  1277. rstats->fifo_err = rsp_rstats->fifo_err;
  1278. rstats->dmac_drop = rsp_rstats->dmac_drop;
  1279. rstats->fcs_err = rsp_rstats->fcs_err;
  1280. rstats->jabber_err = rsp_rstats->jabber_err;
  1281. rstats->l2_err = rsp_rstats->l2_err;
  1282. rstats->frame_err = rsp_rstats->frame_err;
  1283. /* RX firmware stats */
  1284. rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
  1285. rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
  1286. rstats->fw_err_pko = rsp_rstats->fw_err_pko;
  1287. rstats->fw_err_link = rsp_rstats->fw_err_link;
  1288. rstats->fw_err_drop = rsp_rstats->fw_err_drop;
  1289. rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
  1290. rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
  1291. /* Number of packets that are LROed */
  1292. rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
  1293. /* Number of octets that are LROed */
  1294. rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
  1295. /* Number of LRO packets formed */
  1296. rstats->fw_total_lro = rsp_rstats->fw_total_lro;
  1297. /* Number of times lRO of packet aborted */
  1298. rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
  1299. rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
  1300. rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
  1301. rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
  1302. rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
  1303. /* intrmod: packet forward rate */
  1304. rstats->fwd_rate = rsp_rstats->fwd_rate;
  1305. /* TX link-level stats */
  1306. tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
  1307. tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
  1308. tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
  1309. tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
  1310. tstats->ctl_sent = rsp_tstats->ctl_sent;
  1311. /* Packets sent after one collision*/
  1312. tstats->one_collision_sent = rsp_tstats->one_collision_sent;
  1313. /* Packets sent after multiple collision*/
  1314. tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
  1315. /* Packets not sent due to max collisions */
  1316. tstats->max_collision_fail = rsp_tstats->max_collision_fail;
  1317. /* Packets not sent due to max deferrals */
  1318. tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
  1319. /* Accounts for over/under-run of buffers */
  1320. tstats->fifo_err = rsp_tstats->fifo_err;
  1321. tstats->runts = rsp_tstats->runts;
  1322. /* Total number of collisions detected */
  1323. tstats->total_collisions = rsp_tstats->total_collisions;
  1324. /* firmware stats */
  1325. tstats->fw_total_sent = rsp_tstats->fw_total_sent;
  1326. tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
  1327. tstats->fw_err_pko = rsp_tstats->fw_err_pko;
  1328. tstats->fw_err_link = rsp_tstats->fw_err_link;
  1329. tstats->fw_err_drop = rsp_tstats->fw_err_drop;
  1330. tstats->fw_tso = rsp_tstats->fw_tso;
  1331. tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
  1332. tstats->fw_err_tso = rsp_tstats->fw_err_tso;
  1333. tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
  1334. resp->status = 1;
  1335. } else {
  1336. resp->status = -1;
  1337. }
  1338. complete(&ctrl->complete);
  1339. }
  1340. /* Configure interrupt moderation parameters */
  1341. static int octnet_get_link_stats(struct net_device *netdev)
  1342. {
  1343. struct lio *lio = GET_LIO(netdev);
  1344. struct octeon_device *oct_dev = lio->oct_dev;
  1345. struct octeon_soft_command *sc;
  1346. struct oct_nic_stats_ctrl *ctrl;
  1347. struct oct_nic_stats_resp *resp;
  1348. int retval;
  1349. /* Alloc soft command */
  1350. sc = (struct octeon_soft_command *)
  1351. octeon_alloc_soft_command(oct_dev,
  1352. 0,
  1353. sizeof(struct oct_nic_stats_resp),
  1354. sizeof(struct octnic_ctrl_pkt));
  1355. if (!sc)
  1356. return -ENOMEM;
  1357. resp = (struct oct_nic_stats_resp *)sc->virtrptr;
  1358. memset(resp, 0, sizeof(struct oct_nic_stats_resp));
  1359. ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
  1360. memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
  1361. ctrl->netdev = netdev;
  1362. init_completion(&ctrl->complete);
  1363. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1364. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
  1365. OPCODE_NIC_PORT_STATS, 0, 0, 0);
  1366. sc->callback = octnet_nic_stats_callback;
  1367. sc->callback_arg = sc;
  1368. sc->wait_time = 500; /*in milli seconds*/
  1369. retval = octeon_send_soft_command(oct_dev, sc);
  1370. if (retval == IQ_SEND_FAILED) {
  1371. octeon_free_soft_command(oct_dev, sc);
  1372. return -EINVAL;
  1373. }
  1374. wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
  1375. if (resp->status != 1) {
  1376. octeon_free_soft_command(oct_dev, sc);
  1377. return -EINVAL;
  1378. }
  1379. octeon_free_soft_command(oct_dev, sc);
  1380. return 0;
  1381. }
  1382. /* Enable/Disable auto interrupt Moderation */
  1383. static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
  1384. *intr_coal)
  1385. {
  1386. int ret = 0;
  1387. struct octeon_device *oct = lio->oct_dev;
  1388. struct oct_intrmod_cfg *intrmod_cfg;
  1389. intrmod_cfg = &oct->intrmod;
  1390. if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
  1391. if (intr_coal->rate_sample_interval)
  1392. intrmod_cfg->check_intrvl =
  1393. intr_coal->rate_sample_interval;
  1394. else
  1395. intrmod_cfg->check_intrvl =
  1396. LIO_INTRMOD_CHECK_INTERVAL;
  1397. if (intr_coal->pkt_rate_high)
  1398. intrmod_cfg->maxpkt_ratethr =
  1399. intr_coal->pkt_rate_high;
  1400. else
  1401. intrmod_cfg->maxpkt_ratethr =
  1402. LIO_INTRMOD_MAXPKT_RATETHR;
  1403. if (intr_coal->pkt_rate_low)
  1404. intrmod_cfg->minpkt_ratethr =
  1405. intr_coal->pkt_rate_low;
  1406. else
  1407. intrmod_cfg->minpkt_ratethr =
  1408. LIO_INTRMOD_MINPKT_RATETHR;
  1409. }
  1410. if (oct->intrmod.rx_enable) {
  1411. if (intr_coal->rx_max_coalesced_frames_high)
  1412. intrmod_cfg->rx_maxcnt_trigger =
  1413. intr_coal->rx_max_coalesced_frames_high;
  1414. else
  1415. intrmod_cfg->rx_maxcnt_trigger =
  1416. LIO_INTRMOD_RXMAXCNT_TRIGGER;
  1417. if (intr_coal->rx_coalesce_usecs_high)
  1418. intrmod_cfg->rx_maxtmr_trigger =
  1419. intr_coal->rx_coalesce_usecs_high;
  1420. else
  1421. intrmod_cfg->rx_maxtmr_trigger =
  1422. LIO_INTRMOD_RXMAXTMR_TRIGGER;
  1423. if (intr_coal->rx_coalesce_usecs_low)
  1424. intrmod_cfg->rx_mintmr_trigger =
  1425. intr_coal->rx_coalesce_usecs_low;
  1426. else
  1427. intrmod_cfg->rx_mintmr_trigger =
  1428. LIO_INTRMOD_RXMINTMR_TRIGGER;
  1429. if (intr_coal->rx_max_coalesced_frames_low)
  1430. intrmod_cfg->rx_mincnt_trigger =
  1431. intr_coal->rx_max_coalesced_frames_low;
  1432. else
  1433. intrmod_cfg->rx_mincnt_trigger =
  1434. LIO_INTRMOD_RXMINCNT_TRIGGER;
  1435. }
  1436. if (oct->intrmod.tx_enable) {
  1437. if (intr_coal->tx_max_coalesced_frames_high)
  1438. intrmod_cfg->tx_maxcnt_trigger =
  1439. intr_coal->tx_max_coalesced_frames_high;
  1440. else
  1441. intrmod_cfg->tx_maxcnt_trigger =
  1442. LIO_INTRMOD_TXMAXCNT_TRIGGER;
  1443. if (intr_coal->tx_max_coalesced_frames_low)
  1444. intrmod_cfg->tx_mincnt_trigger =
  1445. intr_coal->tx_max_coalesced_frames_low;
  1446. else
  1447. intrmod_cfg->tx_mincnt_trigger =
  1448. LIO_INTRMOD_TXMINCNT_TRIGGER;
  1449. }
  1450. ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
  1451. return ret;
  1452. }
  1453. static int
  1454. oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
  1455. {
  1456. struct octeon_device *oct = lio->oct_dev;
  1457. u32 rx_max_coalesced_frames;
  1458. /* Config Cnt based interrupt values */
  1459. switch (oct->chip_id) {
  1460. case OCTEON_CN68XX:
  1461. case OCTEON_CN66XX: {
  1462. struct octeon_cn6xxx *cn6xxx =
  1463. (struct octeon_cn6xxx *)oct->chip;
  1464. if (!intr_coal->rx_max_coalesced_frames)
  1465. rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
  1466. else
  1467. rx_max_coalesced_frames =
  1468. intr_coal->rx_max_coalesced_frames;
  1469. octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
  1470. rx_max_coalesced_frames);
  1471. CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
  1472. break;
  1473. }
  1474. case OCTEON_CN23XX_PF_VID: {
  1475. int q_no;
  1476. if (!intr_coal->rx_max_coalesced_frames)
  1477. rx_max_coalesced_frames = oct->intrmod.rx_frames;
  1478. else
  1479. rx_max_coalesced_frames =
  1480. intr_coal->rx_max_coalesced_frames;
  1481. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1482. q_no += oct->sriov_info.pf_srn;
  1483. octeon_write_csr64(
  1484. oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
  1485. (octeon_read_csr64(
  1486. oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
  1487. (0x3fffff00000000UL)) |
  1488. rx_max_coalesced_frames);
  1489. /*consider setting resend bit*/
  1490. }
  1491. oct->intrmod.rx_frames = rx_max_coalesced_frames;
  1492. break;
  1493. }
  1494. case OCTEON_CN23XX_VF_VID: {
  1495. int q_no;
  1496. if (!intr_coal->rx_max_coalesced_frames)
  1497. rx_max_coalesced_frames = oct->intrmod.rx_frames;
  1498. else
  1499. rx_max_coalesced_frames =
  1500. intr_coal->rx_max_coalesced_frames;
  1501. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1502. octeon_write_csr64(
  1503. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
  1504. (octeon_read_csr64(
  1505. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
  1506. (0x3fffff00000000UL)) |
  1507. rx_max_coalesced_frames);
  1508. /* consider writing to resend bit here */
  1509. }
  1510. oct->intrmod.rx_frames = rx_max_coalesced_frames;
  1511. break;
  1512. }
  1513. default:
  1514. return -EINVAL;
  1515. }
  1516. return 0;
  1517. }
  1518. static int oct_cfg_rx_intrtime(struct lio *lio,
  1519. struct ethtool_coalesce *intr_coal)
  1520. {
  1521. struct octeon_device *oct = lio->oct_dev;
  1522. u32 time_threshold, rx_coalesce_usecs;
  1523. /* Config Time based interrupt values */
  1524. switch (oct->chip_id) {
  1525. case OCTEON_CN68XX:
  1526. case OCTEON_CN66XX: {
  1527. struct octeon_cn6xxx *cn6xxx =
  1528. (struct octeon_cn6xxx *)oct->chip;
  1529. if (!intr_coal->rx_coalesce_usecs)
  1530. rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
  1531. else
  1532. rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
  1533. time_threshold = lio_cn6xxx_get_oq_ticks(oct,
  1534. rx_coalesce_usecs);
  1535. octeon_write_csr(oct,
  1536. CN6XXX_SLI_OQ_INT_LEVEL_TIME,
  1537. time_threshold);
  1538. CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
  1539. break;
  1540. }
  1541. case OCTEON_CN23XX_PF_VID: {
  1542. u64 time_threshold;
  1543. int q_no;
  1544. if (!intr_coal->rx_coalesce_usecs)
  1545. rx_coalesce_usecs = oct->intrmod.rx_usecs;
  1546. else
  1547. rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
  1548. time_threshold =
  1549. cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
  1550. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1551. q_no += oct->sriov_info.pf_srn;
  1552. octeon_write_csr64(oct,
  1553. CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
  1554. (oct->intrmod.rx_frames |
  1555. (time_threshold << 32)));
  1556. /*consider writing to resend bit here*/
  1557. }
  1558. oct->intrmod.rx_usecs = rx_coalesce_usecs;
  1559. break;
  1560. }
  1561. case OCTEON_CN23XX_VF_VID: {
  1562. u64 time_threshold;
  1563. int q_no;
  1564. if (!intr_coal->rx_coalesce_usecs)
  1565. rx_coalesce_usecs = oct->intrmod.rx_usecs;
  1566. else
  1567. rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
  1568. time_threshold =
  1569. cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
  1570. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1571. octeon_write_csr64(
  1572. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
  1573. (oct->intrmod.rx_frames |
  1574. (time_threshold << 32)));
  1575. /* consider setting resend bit */
  1576. }
  1577. oct->intrmod.rx_usecs = rx_coalesce_usecs;
  1578. break;
  1579. }
  1580. default:
  1581. return -EINVAL;
  1582. }
  1583. return 0;
  1584. }
  1585. static int
  1586. oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
  1587. __attribute__((unused)))
  1588. {
  1589. struct octeon_device *oct = lio->oct_dev;
  1590. u32 iq_intr_pkt;
  1591. void __iomem *inst_cnt_reg;
  1592. u64 val;
  1593. /* Config Cnt based interrupt values */
  1594. switch (oct->chip_id) {
  1595. case OCTEON_CN68XX:
  1596. case OCTEON_CN66XX:
  1597. break;
  1598. case OCTEON_CN23XX_VF_VID:
  1599. case OCTEON_CN23XX_PF_VID: {
  1600. int q_no;
  1601. if (!intr_coal->tx_max_coalesced_frames)
  1602. iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
  1603. CN23XX_PKT_IN_DONE_WMARK_MASK;
  1604. else
  1605. iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
  1606. CN23XX_PKT_IN_DONE_WMARK_MASK;
  1607. for (q_no = 0; q_no < oct->num_iqs; q_no++) {
  1608. inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
  1609. val = readq(inst_cnt_reg);
  1610. /*clear wmark and count.dont want to write count back*/
  1611. val = (val & 0xFFFF000000000000ULL) |
  1612. ((u64)iq_intr_pkt
  1613. << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
  1614. writeq(val, inst_cnt_reg);
  1615. /*consider setting resend bit*/
  1616. }
  1617. oct->intrmod.tx_frames = iq_intr_pkt;
  1618. break;
  1619. }
  1620. default:
  1621. return -EINVAL;
  1622. }
  1623. return 0;
  1624. }
  1625. static int lio_set_intr_coalesce(struct net_device *netdev,
  1626. struct ethtool_coalesce *intr_coal)
  1627. {
  1628. struct lio *lio = GET_LIO(netdev);
  1629. int ret;
  1630. struct octeon_device *oct = lio->oct_dev;
  1631. u32 j, q_no;
  1632. int db_max, db_min;
  1633. switch (oct->chip_id) {
  1634. case OCTEON_CN68XX:
  1635. case OCTEON_CN66XX:
  1636. db_min = CN6XXX_DB_MIN;
  1637. db_max = CN6XXX_DB_MAX;
  1638. if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
  1639. (intr_coal->tx_max_coalesced_frames <= db_max)) {
  1640. for (j = 0; j < lio->linfo.num_txpciq; j++) {
  1641. q_no = lio->linfo.txpciq[j].s.q_no;
  1642. oct->instr_queue[q_no]->fill_threshold =
  1643. intr_coal->tx_max_coalesced_frames;
  1644. }
  1645. } else {
  1646. dev_err(&oct->pci_dev->dev,
  1647. "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
  1648. intr_coal->tx_max_coalesced_frames, db_min,
  1649. db_max);
  1650. return -EINVAL;
  1651. }
  1652. break;
  1653. case OCTEON_CN23XX_PF_VID:
  1654. case OCTEON_CN23XX_VF_VID:
  1655. break;
  1656. default:
  1657. return -EINVAL;
  1658. }
  1659. oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
  1660. oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
  1661. ret = oct_cfg_adaptive_intr(lio, intr_coal);
  1662. if (!intr_coal->use_adaptive_rx_coalesce) {
  1663. ret = oct_cfg_rx_intrtime(lio, intr_coal);
  1664. if (ret)
  1665. goto ret_intrmod;
  1666. ret = oct_cfg_rx_intrcnt(lio, intr_coal);
  1667. if (ret)
  1668. goto ret_intrmod;
  1669. }
  1670. if (!intr_coal->use_adaptive_tx_coalesce) {
  1671. ret = oct_cfg_tx_intrcnt(lio, intr_coal);
  1672. if (ret)
  1673. goto ret_intrmod;
  1674. }
  1675. return 0;
  1676. ret_intrmod:
  1677. return ret;
  1678. }
  1679. static int lio_get_ts_info(struct net_device *netdev,
  1680. struct ethtool_ts_info *info)
  1681. {
  1682. struct lio *lio = GET_LIO(netdev);
  1683. info->so_timestamping =
  1684. #ifdef PTP_HARDWARE_TIMESTAMPING
  1685. SOF_TIMESTAMPING_TX_HARDWARE |
  1686. SOF_TIMESTAMPING_RX_HARDWARE |
  1687. SOF_TIMESTAMPING_RAW_HARDWARE |
  1688. SOF_TIMESTAMPING_TX_SOFTWARE |
  1689. #endif
  1690. SOF_TIMESTAMPING_RX_SOFTWARE |
  1691. SOF_TIMESTAMPING_SOFTWARE;
  1692. if (lio->ptp_clock)
  1693. info->phc_index = ptp_clock_index(lio->ptp_clock);
  1694. else
  1695. info->phc_index = -1;
  1696. #ifdef PTP_HARDWARE_TIMESTAMPING
  1697. info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
  1698. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  1699. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  1700. (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
  1701. (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
  1702. #endif
  1703. return 0;
  1704. }
  1705. /* Return register dump len. */
  1706. static int lio_get_regs_len(struct net_device *dev)
  1707. {
  1708. struct lio *lio = GET_LIO(dev);
  1709. struct octeon_device *oct = lio->oct_dev;
  1710. switch (oct->chip_id) {
  1711. case OCTEON_CN23XX_PF_VID:
  1712. return OCT_ETHTOOL_REGDUMP_LEN_23XX;
  1713. case OCTEON_CN23XX_VF_VID:
  1714. return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
  1715. default:
  1716. return OCT_ETHTOOL_REGDUMP_LEN;
  1717. }
  1718. }
  1719. static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
  1720. {
  1721. u32 reg;
  1722. u8 pf_num = oct->pf_num;
  1723. int len = 0;
  1724. int i;
  1725. /* PCI Window Registers */
  1726. len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
  1727. /*0x29030 or 0x29040*/
  1728. reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
  1729. len += sprintf(s + len,
  1730. "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
  1731. reg, oct->pcie_port, oct->pf_num,
  1732. (u64)octeon_read_csr64(oct, reg));
  1733. /*0x27080 or 0x27090*/
  1734. reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
  1735. len +=
  1736. sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
  1737. reg, oct->pcie_port, oct->pf_num,
  1738. (u64)octeon_read_csr64(oct, reg));
  1739. /*0x27000 or 0x27010*/
  1740. reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
  1741. len +=
  1742. sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
  1743. reg, oct->pcie_port, oct->pf_num,
  1744. (u64)octeon_read_csr64(oct, reg));
  1745. /*0x29120*/
  1746. reg = 0x29120;
  1747. len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
  1748. (u64)octeon_read_csr64(oct, reg));
  1749. /*0x27300*/
  1750. reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
  1751. (oct->pf_num) * CN23XX_PF_INT_OFFSET;
  1752. len += sprintf(
  1753. s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
  1754. oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
  1755. /*0x27200*/
  1756. reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
  1757. (oct->pf_num) * CN23XX_PF_INT_OFFSET;
  1758. len += sprintf(s + len,
  1759. "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
  1760. reg, oct->pcie_port, oct->pf_num,
  1761. (u64)octeon_read_csr64(oct, reg));
  1762. /*29130*/
  1763. reg = CN23XX_SLI_PKT_CNT_INT;
  1764. len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
  1765. (u64)octeon_read_csr64(oct, reg));
  1766. /*0x29140*/
  1767. reg = CN23XX_SLI_PKT_TIME_INT;
  1768. len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
  1769. (u64)octeon_read_csr64(oct, reg));
  1770. /*0x29160*/
  1771. reg = 0x29160;
  1772. len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
  1773. (u64)octeon_read_csr64(oct, reg));
  1774. /*0x29180*/
  1775. reg = CN23XX_SLI_OQ_WMARK;
  1776. len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
  1777. reg, (u64)octeon_read_csr64(oct, reg));
  1778. /*0x291E0*/
  1779. reg = CN23XX_SLI_PKT_IOQ_RING_RST;
  1780. len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
  1781. (u64)octeon_read_csr64(oct, reg));
  1782. /*0x29210*/
  1783. reg = CN23XX_SLI_GBL_CONTROL;
  1784. len += sprintf(s + len,
  1785. "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
  1786. (u64)octeon_read_csr64(oct, reg));
  1787. /*0x29220*/
  1788. reg = 0x29220;
  1789. len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
  1790. reg, (u64)octeon_read_csr64(oct, reg));
  1791. /*PF only*/
  1792. if (pf_num == 0) {
  1793. /*0x29260*/
  1794. reg = CN23XX_SLI_OUT_BP_EN_W1S;
  1795. len += sprintf(s + len,
  1796. "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
  1797. reg, (u64)octeon_read_csr64(oct, reg));
  1798. } else if (pf_num == 1) {
  1799. /*0x29270*/
  1800. reg = CN23XX_SLI_OUT_BP_EN2_W1S;
  1801. len += sprintf(s + len,
  1802. "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
  1803. reg, (u64)octeon_read_csr64(oct, reg));
  1804. }
  1805. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1806. reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
  1807. len +=
  1808. sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
  1809. reg, i, (u64)octeon_read_csr64(oct, reg));
  1810. }
  1811. /*0x10040*/
  1812. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1813. reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
  1814. len += sprintf(s + len,
  1815. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1816. reg, i, (u64)octeon_read_csr64(oct, reg));
  1817. }
  1818. /*0x10080*/
  1819. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1820. reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
  1821. len += sprintf(s + len,
  1822. "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
  1823. reg, i, (u64)octeon_read_csr64(oct, reg));
  1824. }
  1825. /*0x10090*/
  1826. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1827. reg = CN23XX_SLI_OQ_SIZE(i);
  1828. len += sprintf(
  1829. s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
  1830. reg, i, (u64)octeon_read_csr64(oct, reg));
  1831. }
  1832. /*0x10050*/
  1833. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1834. reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
  1835. len += sprintf(
  1836. s + len,
  1837. "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
  1838. reg, i, (u64)octeon_read_csr64(oct, reg));
  1839. }
  1840. /*0x10070*/
  1841. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1842. reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
  1843. len += sprintf(s + len,
  1844. "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
  1845. reg, i, (u64)octeon_read_csr64(oct, reg));
  1846. }
  1847. /*0x100a0*/
  1848. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1849. reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
  1850. len += sprintf(s + len,
  1851. "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
  1852. reg, i, (u64)octeon_read_csr64(oct, reg));
  1853. }
  1854. /*0x100b0*/
  1855. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1856. reg = CN23XX_SLI_OQ_PKTS_SENT(i);
  1857. len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
  1858. reg, i, (u64)octeon_read_csr64(oct, reg));
  1859. }
  1860. /*0x100c0*/
  1861. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1862. reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
  1863. len += sprintf(s + len,
  1864. "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
  1865. reg, i, (u64)octeon_read_csr64(oct, reg));
  1866. /*0x10000*/
  1867. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1868. reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
  1869. len += sprintf(
  1870. s + len,
  1871. "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
  1872. reg, i, (u64)octeon_read_csr64(oct, reg));
  1873. }
  1874. /*0x10010*/
  1875. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1876. reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
  1877. len += sprintf(
  1878. s + len,
  1879. "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
  1880. i, (u64)octeon_read_csr64(oct, reg));
  1881. }
  1882. /*0x10020*/
  1883. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1884. reg = CN23XX_SLI_IQ_DOORBELL(i);
  1885. len += sprintf(
  1886. s + len,
  1887. "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
  1888. reg, i, (u64)octeon_read_csr64(oct, reg));
  1889. }
  1890. /*0x10030*/
  1891. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1892. reg = CN23XX_SLI_IQ_SIZE(i);
  1893. len += sprintf(
  1894. s + len,
  1895. "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
  1896. reg, i, (u64)octeon_read_csr64(oct, reg));
  1897. }
  1898. /*0x10040*/
  1899. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
  1900. reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
  1901. len += sprintf(s + len,
  1902. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1903. reg, i, (u64)octeon_read_csr64(oct, reg));
  1904. }
  1905. return len;
  1906. }
  1907. static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
  1908. {
  1909. int len = 0;
  1910. u32 reg;
  1911. int i;
  1912. /* PCI Window Registers */
  1913. len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
  1914. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1915. reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
  1916. len += sprintf(s + len,
  1917. "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
  1918. reg, i, (u64)octeon_read_csr64(oct, reg));
  1919. }
  1920. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1921. reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
  1922. len += sprintf(s + len,
  1923. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1924. reg, i, (u64)octeon_read_csr64(oct, reg));
  1925. }
  1926. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1927. reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
  1928. len += sprintf(s + len,
  1929. "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
  1930. reg, i, (u64)octeon_read_csr64(oct, reg));
  1931. }
  1932. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1933. reg = CN23XX_VF_SLI_OQ_SIZE(i);
  1934. len += sprintf(s + len,
  1935. "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
  1936. reg, i, (u64)octeon_read_csr64(oct, reg));
  1937. }
  1938. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1939. reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
  1940. len += sprintf(s + len,
  1941. "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
  1942. reg, i, (u64)octeon_read_csr64(oct, reg));
  1943. }
  1944. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1945. reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
  1946. len += sprintf(s + len,
  1947. "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
  1948. reg, i, (u64)octeon_read_csr64(oct, reg));
  1949. }
  1950. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1951. reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
  1952. len += sprintf(s + len,
  1953. "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
  1954. reg, i, (u64)octeon_read_csr64(oct, reg));
  1955. }
  1956. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1957. reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
  1958. len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
  1959. reg, i, (u64)octeon_read_csr64(oct, reg));
  1960. }
  1961. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1962. reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
  1963. len += sprintf(s + len,
  1964. "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
  1965. reg, i, (u64)octeon_read_csr64(oct, reg));
  1966. }
  1967. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1968. reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
  1969. len += sprintf(s + len,
  1970. "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
  1971. reg, i, (u64)octeon_read_csr64(oct, reg));
  1972. }
  1973. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1974. reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
  1975. len += sprintf(s + len,
  1976. "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
  1977. reg, i, (u64)octeon_read_csr64(oct, reg));
  1978. }
  1979. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1980. reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
  1981. len += sprintf(s + len,
  1982. "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
  1983. reg, i, (u64)octeon_read_csr64(oct, reg));
  1984. }
  1985. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1986. reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
  1987. len += sprintf(s + len,
  1988. "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
  1989. reg, i, (u64)octeon_read_csr64(oct, reg));
  1990. }
  1991. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1992. reg = CN23XX_VF_SLI_IQ_SIZE(i);
  1993. len += sprintf(s + len,
  1994. "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
  1995. reg, i, (u64)octeon_read_csr64(oct, reg));
  1996. }
  1997. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1998. reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
  1999. len += sprintf(s + len,
  2000. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  2001. reg, i, (u64)octeon_read_csr64(oct, reg));
  2002. }
  2003. return len;
  2004. }
  2005. static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
  2006. {
  2007. u32 reg;
  2008. int i, len = 0;
  2009. /* PCI Window Registers */
  2010. len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
  2011. reg = CN6XXX_WIN_WR_ADDR_LO;
  2012. len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
  2013. CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
  2014. reg = CN6XXX_WIN_WR_ADDR_HI;
  2015. len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
  2016. CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
  2017. reg = CN6XXX_WIN_RD_ADDR_LO;
  2018. len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
  2019. CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
  2020. reg = CN6XXX_WIN_RD_ADDR_HI;
  2021. len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
  2022. CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
  2023. reg = CN6XXX_WIN_WR_DATA_LO;
  2024. len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
  2025. CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
  2026. reg = CN6XXX_WIN_WR_DATA_HI;
  2027. len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
  2028. CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
  2029. len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
  2030. CN6XXX_WIN_WR_MASK_REG,
  2031. octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
  2032. /* PCI Interrupt Register */
  2033. len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
  2034. CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
  2035. CN6XXX_SLI_INT_ENB64_PORT0));
  2036. len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
  2037. CN6XXX_SLI_INT_ENB64_PORT1,
  2038. octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
  2039. len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
  2040. octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
  2041. /* PCI Output queue registers */
  2042. for (i = 0; i < oct->num_oqs; i++) {
  2043. reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
  2044. len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
  2045. reg, i, octeon_read_csr(oct, reg));
  2046. reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
  2047. len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
  2048. reg, i, octeon_read_csr(oct, reg));
  2049. }
  2050. reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
  2051. len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
  2052. reg, octeon_read_csr(oct, reg));
  2053. reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
  2054. len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
  2055. reg, octeon_read_csr(oct, reg));
  2056. /* PCI Input queue registers */
  2057. for (i = 0; i <= 3; i++) {
  2058. u32 reg;
  2059. reg = CN6XXX_SLI_IQ_DOORBELL(i);
  2060. len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
  2061. reg, i, octeon_read_csr(oct, reg));
  2062. reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
  2063. len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
  2064. reg, i, octeon_read_csr(oct, reg));
  2065. }
  2066. /* PCI DMA registers */
  2067. len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
  2068. CN6XXX_DMA_CNT(0),
  2069. octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
  2070. reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
  2071. len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
  2072. CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
  2073. reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
  2074. len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
  2075. CN6XXX_DMA_TIME_INT_LEVEL(0),
  2076. octeon_read_csr(oct, reg));
  2077. len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
  2078. CN6XXX_DMA_CNT(1),
  2079. octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
  2080. reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
  2081. len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
  2082. CN6XXX_DMA_PKT_INT_LEVEL(1),
  2083. octeon_read_csr(oct, reg));
  2084. reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
  2085. len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
  2086. CN6XXX_DMA_TIME_INT_LEVEL(1),
  2087. octeon_read_csr(oct, reg));
  2088. /* PCI Index registers */
  2089. len += sprintf(s + len, "\n");
  2090. for (i = 0; i < 16; i++) {
  2091. reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
  2092. len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
  2093. CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
  2094. }
  2095. return len;
  2096. }
  2097. static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
  2098. {
  2099. u32 val;
  2100. int i, len = 0;
  2101. /* PCI CONFIG Registers */
  2102. len += sprintf(s + len,
  2103. "\n\t Octeon Config space Registers\n\n");
  2104. for (i = 0; i <= 13; i++) {
  2105. pci_read_config_dword(oct->pci_dev, (i * 4), &val);
  2106. len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
  2107. (i * 4), i, val);
  2108. }
  2109. for (i = 30; i <= 34; i++) {
  2110. pci_read_config_dword(oct->pci_dev, (i * 4), &val);
  2111. len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
  2112. (i * 4), i, val);
  2113. }
  2114. return len;
  2115. }
  2116. /* Return register dump user app. */
  2117. static void lio_get_regs(struct net_device *dev,
  2118. struct ethtool_regs *regs, void *regbuf)
  2119. {
  2120. struct lio *lio = GET_LIO(dev);
  2121. int len = 0;
  2122. struct octeon_device *oct = lio->oct_dev;
  2123. regs->version = OCT_ETHTOOL_REGSVER;
  2124. switch (oct->chip_id) {
  2125. case OCTEON_CN23XX_PF_VID:
  2126. memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
  2127. len += cn23xx_read_csr_reg(regbuf + len, oct);
  2128. break;
  2129. case OCTEON_CN23XX_VF_VID:
  2130. memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
  2131. len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
  2132. break;
  2133. case OCTEON_CN68XX:
  2134. case OCTEON_CN66XX:
  2135. memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
  2136. len += cn6xxx_read_csr_reg(regbuf + len, oct);
  2137. len += cn6xxx_read_config_reg(regbuf + len, oct);
  2138. break;
  2139. default:
  2140. dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
  2141. __func__, oct->chip_id);
  2142. }
  2143. }
  2144. static u32 lio_get_priv_flags(struct net_device *netdev)
  2145. {
  2146. struct lio *lio = GET_LIO(netdev);
  2147. return lio->oct_dev->priv_flags;
  2148. }
  2149. static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
  2150. {
  2151. struct lio *lio = GET_LIO(netdev);
  2152. bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
  2153. lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
  2154. intr_by_tx_bytes);
  2155. return 0;
  2156. }
  2157. static const struct ethtool_ops lio_ethtool_ops = {
  2158. .get_link_ksettings = lio_get_link_ksettings,
  2159. .get_link = ethtool_op_get_link,
  2160. .get_drvinfo = lio_get_drvinfo,
  2161. .get_ringparam = lio_ethtool_get_ringparam,
  2162. .get_channels = lio_ethtool_get_channels,
  2163. .set_phys_id = lio_set_phys_id,
  2164. .get_eeprom_len = lio_get_eeprom_len,
  2165. .get_eeprom = lio_get_eeprom,
  2166. .get_strings = lio_get_strings,
  2167. .get_ethtool_stats = lio_get_ethtool_stats,
  2168. .get_pauseparam = lio_get_pauseparam,
  2169. .set_pauseparam = lio_set_pauseparam,
  2170. .get_regs_len = lio_get_regs_len,
  2171. .get_regs = lio_get_regs,
  2172. .get_msglevel = lio_get_msglevel,
  2173. .set_msglevel = lio_set_msglevel,
  2174. .get_sset_count = lio_get_sset_count,
  2175. .get_coalesce = lio_get_intr_coalesce,
  2176. .set_coalesce = lio_set_intr_coalesce,
  2177. .get_priv_flags = lio_get_priv_flags,
  2178. .set_priv_flags = lio_set_priv_flags,
  2179. .get_ts_info = lio_get_ts_info,
  2180. };
  2181. static const struct ethtool_ops lio_vf_ethtool_ops = {
  2182. .get_link_ksettings = lio_get_link_ksettings,
  2183. .get_link = ethtool_op_get_link,
  2184. .get_drvinfo = lio_get_vf_drvinfo,
  2185. .get_ringparam = lio_ethtool_get_ringparam,
  2186. .get_channels = lio_ethtool_get_channels,
  2187. .get_strings = lio_vf_get_strings,
  2188. .get_ethtool_stats = lio_vf_get_ethtool_stats,
  2189. .get_regs_len = lio_get_regs_len,
  2190. .get_regs = lio_get_regs,
  2191. .get_msglevel = lio_get_msglevel,
  2192. .set_msglevel = lio_set_msglevel,
  2193. .get_sset_count = lio_vf_get_sset_count,
  2194. .get_coalesce = lio_get_intr_coalesce,
  2195. .set_coalesce = lio_set_intr_coalesce,
  2196. .get_priv_flags = lio_get_priv_flags,
  2197. .set_priv_flags = lio_set_priv_flags,
  2198. .get_ts_info = lio_get_ts_info,
  2199. };
  2200. void liquidio_set_ethtool_ops(struct net_device *netdev)
  2201. {
  2202. struct lio *lio = GET_LIO(netdev);
  2203. struct octeon_device *oct = lio->oct_dev;
  2204. if (OCTEON_CN23XX_VF(oct))
  2205. netdev->ethtool_ops = &lio_vf_ethtool_ops;
  2206. else
  2207. netdev->ethtool_ops = &lio_ethtool_ops;
  2208. }