lio_ethtool.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/netdevice.h>
  19. #include <linux/net_tstamp.h>
  20. #include <linux/pci.h>
  21. #include "liquidio_common.h"
  22. #include "octeon_droq.h"
  23. #include "octeon_iq.h"
  24. #include "response_manager.h"
  25. #include "octeon_device.h"
  26. #include "octeon_nic.h"
  27. #include "octeon_main.h"
  28. #include "octeon_network.h"
  29. #include "cn66xx_regs.h"
  30. #include "cn66xx_device.h"
  31. #include "cn23xx_pf_device.h"
  32. #include "cn23xx_vf_device.h"
  33. static int octnet_get_link_stats(struct net_device *netdev);
  34. struct oct_mdio_cmd_context {
  35. int octeon_id;
  36. wait_queue_head_t wc;
  37. int cond;
  38. };
  39. struct oct_mdio_cmd_resp {
  40. u64 rh;
  41. struct oct_mdio_cmd resp;
  42. u64 status;
  43. };
  44. #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
  45. /* Octeon's interface mode of operation */
  46. enum {
  47. INTERFACE_MODE_DISABLED,
  48. INTERFACE_MODE_RGMII,
  49. INTERFACE_MODE_GMII,
  50. INTERFACE_MODE_SPI,
  51. INTERFACE_MODE_PCIE,
  52. INTERFACE_MODE_XAUI,
  53. INTERFACE_MODE_SGMII,
  54. INTERFACE_MODE_PICMG,
  55. INTERFACE_MODE_NPI,
  56. INTERFACE_MODE_LOOP,
  57. INTERFACE_MODE_SRIO,
  58. INTERFACE_MODE_ILK,
  59. INTERFACE_MODE_RXAUI,
  60. INTERFACE_MODE_QSGMII,
  61. INTERFACE_MODE_AGL,
  62. INTERFACE_MODE_XLAUI,
  63. INTERFACE_MODE_XFI,
  64. INTERFACE_MODE_10G_KR,
  65. INTERFACE_MODE_40G_KR4,
  66. INTERFACE_MODE_MIXED,
  67. };
  68. #define OCT_ETHTOOL_REGDUMP_LEN 4096
  69. #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
  70. #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
  71. #define OCT_ETHTOOL_REGSVER 1
  72. /* statistics of PF */
  73. static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
  74. "rx_packets",
  75. "tx_packets",
  76. "rx_bytes",
  77. "tx_bytes",
  78. "rx_errors", /*jabber_err+l2_err+frame_err */
  79. "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
  80. "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
  81. *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
  82. */
  83. "tx_dropped",
  84. "tx_total_sent",
  85. "tx_total_fwd",
  86. "tx_err_pko",
  87. "tx_err_link",
  88. "tx_err_drop",
  89. "tx_tso",
  90. "tx_tso_packets",
  91. "tx_tso_err",
  92. "tx_vxlan",
  93. "mac_tx_total_pkts",
  94. "mac_tx_total_bytes",
  95. "mac_tx_mcast_pkts",
  96. "mac_tx_bcast_pkts",
  97. "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
  98. "mac_tx_total_collisions",
  99. "mac_tx_one_collision",
  100. "mac_tx_multi_collison",
  101. "mac_tx_max_collision_fail",
  102. "mac_tx_max_deferal_fail",
  103. "mac_tx_fifo_err",
  104. "mac_tx_runts",
  105. "rx_total_rcvd",
  106. "rx_total_fwd",
  107. "rx_jabber_err",
  108. "rx_l2_err",
  109. "rx_frame_err",
  110. "rx_err_pko",
  111. "rx_err_link",
  112. "rx_err_drop",
  113. "rx_vxlan",
  114. "rx_vxlan_err",
  115. "rx_lro_pkts",
  116. "rx_lro_bytes",
  117. "rx_total_lro",
  118. "rx_lro_aborts",
  119. "rx_lro_aborts_port",
  120. "rx_lro_aborts_seq",
  121. "rx_lro_aborts_tsval",
  122. "rx_lro_aborts_timer",
  123. "rx_fwd_rate",
  124. "mac_rx_total_rcvd",
  125. "mac_rx_bytes",
  126. "mac_rx_total_bcst",
  127. "mac_rx_total_mcst",
  128. "mac_rx_runts",
  129. "mac_rx_ctl_packets",
  130. "mac_rx_fifo_err",
  131. "mac_rx_dma_drop",
  132. "mac_rx_fcs_err",
  133. "link_state_changes",
  134. };
  135. /* statistics of VF */
  136. static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
  137. "rx_packets",
  138. "tx_packets",
  139. "rx_bytes",
  140. "tx_bytes",
  141. "rx_errors", /* jabber_err + l2_err+frame_err */
  142. "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
  143. "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
  144. "tx_dropped",
  145. "link_state_changes",
  146. };
  147. /* statistics of host tx queue */
  148. static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
  149. "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
  150. "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
  151. "dropped",
  152. "iq_busy",
  153. "sgentry_sent",
  154. "fw_instr_posted",
  155. "fw_instr_processed",
  156. "fw_instr_dropped",
  157. "fw_bytes_sent",
  158. "tso",
  159. "vxlan",
  160. "txq_restart",
  161. };
  162. /* statistics of host rx queue */
  163. static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
  164. "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
  165. "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
  166. "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
  167. *oct->droq[oq_no]->stats.dropped_nodispatch+
  168. *oct->droq[oq_no]->stats.dropped_toomany+
  169. *oct->droq[oq_no]->stats.dropped_nomem
  170. */
  171. "dropped_nomem",
  172. "dropped_toomany",
  173. "fw_dropped",
  174. "fw_pkts_received",
  175. "fw_bytes_received",
  176. "fw_dropped_nodispatch",
  177. "vxlan",
  178. "buffer_alloc_failure",
  179. };
  180. /* LiquidIO driver private flags */
  181. static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
  182. };
  183. #define OCTNIC_NCMD_AUTONEG_ON 0x1
  184. #define OCTNIC_NCMD_PHY_ON 0x2
  185. static int lio_get_link_ksettings(struct net_device *netdev,
  186. struct ethtool_link_ksettings *ecmd)
  187. {
  188. struct lio *lio = GET_LIO(netdev);
  189. struct octeon_device *oct = lio->oct_dev;
  190. struct oct_link_info *linfo;
  191. u32 supported, advertising;
  192. linfo = &lio->linfo;
  193. if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
  194. linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
  195. linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
  196. ecmd->base.port = PORT_FIBRE;
  197. supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
  198. SUPPORTED_Pause);
  199. advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
  200. ethtool_convert_legacy_u32_to_link_mode(
  201. ecmd->link_modes.supported, supported);
  202. ethtool_convert_legacy_u32_to_link_mode(
  203. ecmd->link_modes.advertising, advertising);
  204. ecmd->base.autoneg = AUTONEG_DISABLE;
  205. } else {
  206. dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
  207. linfo->link.s.if_mode);
  208. }
  209. if (linfo->link.s.link_up) {
  210. ecmd->base.speed = linfo->link.s.speed;
  211. ecmd->base.duplex = linfo->link.s.duplex;
  212. } else {
  213. ecmd->base.speed = SPEED_UNKNOWN;
  214. ecmd->base.duplex = DUPLEX_UNKNOWN;
  215. }
  216. return 0;
  217. }
  218. static void
  219. lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  220. {
  221. struct lio *lio;
  222. struct octeon_device *oct;
  223. lio = GET_LIO(netdev);
  224. oct = lio->oct_dev;
  225. memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
  226. strcpy(drvinfo->driver, "liquidio");
  227. strcpy(drvinfo->version, LIQUIDIO_VERSION);
  228. strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
  229. ETHTOOL_FWVERS_LEN);
  230. strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
  231. }
  232. static void
  233. lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
  234. {
  235. struct octeon_device *oct;
  236. struct lio *lio;
  237. lio = GET_LIO(netdev);
  238. oct = lio->oct_dev;
  239. memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
  240. strcpy(drvinfo->driver, "liquidio_vf");
  241. strcpy(drvinfo->version, LIQUIDIO_VERSION);
  242. strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
  243. ETHTOOL_FWVERS_LEN);
  244. strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
  245. }
  246. static void
  247. lio_ethtool_get_channels(struct net_device *dev,
  248. struct ethtool_channels *channel)
  249. {
  250. struct lio *lio = GET_LIO(dev);
  251. struct octeon_device *oct = lio->oct_dev;
  252. u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
  253. if (OCTEON_CN6XXX(oct)) {
  254. struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
  255. max_rx = CFG_GET_OQ_MAX_Q(conf6x);
  256. max_tx = CFG_GET_IQ_MAX_Q(conf6x);
  257. rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
  258. tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
  259. } else if (OCTEON_CN23XX_PF(oct)) {
  260. struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
  261. max_rx = CFG_GET_OQ_MAX_Q(conf23);
  262. max_tx = CFG_GET_IQ_MAX_Q(conf23);
  263. rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
  264. tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
  265. }
  266. channel->max_rx = max_rx;
  267. channel->max_tx = max_tx;
  268. channel->rx_count = rx_count;
  269. channel->tx_count = tx_count;
  270. }
  271. static int lio_get_eeprom_len(struct net_device *netdev)
  272. {
  273. u8 buf[128];
  274. struct lio *lio = GET_LIO(netdev);
  275. struct octeon_device *oct_dev = lio->oct_dev;
  276. struct octeon_board_info *board_info;
  277. int len;
  278. board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
  279. len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
  280. board_info->name, board_info->serial_number,
  281. board_info->major, board_info->minor);
  282. return len;
  283. }
  284. static int
  285. lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
  286. u8 *bytes)
  287. {
  288. struct lio *lio = GET_LIO(netdev);
  289. struct octeon_device *oct_dev = lio->oct_dev;
  290. struct octeon_board_info *board_info;
  291. if (eeprom->offset)
  292. return -EINVAL;
  293. eeprom->magic = oct_dev->pci_dev->vendor;
  294. board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
  295. sprintf((char *)bytes,
  296. "boardname:%s serialnum:%s maj:%lld min:%lld\n",
  297. board_info->name, board_info->serial_number,
  298. board_info->major, board_info->minor);
  299. return 0;
  300. }
  301. static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
  302. {
  303. struct lio *lio = GET_LIO(netdev);
  304. struct octeon_device *oct = lio->oct_dev;
  305. struct octnic_ctrl_pkt nctrl;
  306. int ret = 0;
  307. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  308. nctrl.ncmd.u64 = 0;
  309. nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
  310. nctrl.ncmd.s.param1 = addr;
  311. nctrl.ncmd.s.param2 = val;
  312. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  313. nctrl.wait_time = 100;
  314. nctrl.netpndev = (u64)netdev;
  315. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  316. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  317. if (ret < 0) {
  318. dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
  319. return -EINVAL;
  320. }
  321. return 0;
  322. }
  323. static int octnet_id_active(struct net_device *netdev, int val)
  324. {
  325. struct lio *lio = GET_LIO(netdev);
  326. struct octeon_device *oct = lio->oct_dev;
  327. struct octnic_ctrl_pkt nctrl;
  328. int ret = 0;
  329. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  330. nctrl.ncmd.u64 = 0;
  331. nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
  332. nctrl.ncmd.s.param1 = val;
  333. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  334. nctrl.wait_time = 100;
  335. nctrl.netpndev = (u64)netdev;
  336. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  337. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  338. if (ret < 0) {
  339. dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
  340. return -EINVAL;
  341. }
  342. return 0;
  343. }
  344. /* Callback for when mdio command response arrives
  345. */
  346. static void octnet_mdio_resp_callback(struct octeon_device *oct,
  347. u32 status,
  348. void *buf)
  349. {
  350. struct oct_mdio_cmd_context *mdio_cmd_ctx;
  351. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  352. mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
  353. oct = lio_get_device(mdio_cmd_ctx->octeon_id);
  354. if (status) {
  355. dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
  356. CVM_CAST64(status));
  357. WRITE_ONCE(mdio_cmd_ctx->cond, -1);
  358. } else {
  359. WRITE_ONCE(mdio_cmd_ctx->cond, 1);
  360. }
  361. wake_up_interruptible(&mdio_cmd_ctx->wc);
  362. }
  363. /* This routine provides PHY access routines for
  364. * mdio clause45 .
  365. */
  366. static int
  367. octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
  368. {
  369. struct octeon_device *oct_dev = lio->oct_dev;
  370. struct octeon_soft_command *sc;
  371. struct oct_mdio_cmd_resp *mdio_cmd_rsp;
  372. struct oct_mdio_cmd_context *mdio_cmd_ctx;
  373. struct oct_mdio_cmd *mdio_cmd;
  374. int retval = 0;
  375. sc = (struct octeon_soft_command *)
  376. octeon_alloc_soft_command(oct_dev,
  377. sizeof(struct oct_mdio_cmd),
  378. sizeof(struct oct_mdio_cmd_resp),
  379. sizeof(struct oct_mdio_cmd_context));
  380. if (!sc)
  381. return -ENOMEM;
  382. mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
  383. mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
  384. mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
  385. WRITE_ONCE(mdio_cmd_ctx->cond, 0);
  386. mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
  387. mdio_cmd->op = op;
  388. mdio_cmd->mdio_addr = loc;
  389. if (op)
  390. mdio_cmd->value1 = *value;
  391. octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
  392. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  393. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
  394. 0, 0, 0);
  395. sc->wait_time = 1000;
  396. sc->callback = octnet_mdio_resp_callback;
  397. sc->callback_arg = sc;
  398. init_waitqueue_head(&mdio_cmd_ctx->wc);
  399. retval = octeon_send_soft_command(oct_dev, sc);
  400. if (retval == IQ_SEND_FAILED) {
  401. dev_err(&oct_dev->pci_dev->dev,
  402. "octnet_mdio45_access instruction failed status: %x\n",
  403. retval);
  404. retval = -EBUSY;
  405. } else {
  406. /* Sleep on a wait queue till the cond flag indicates that the
  407. * response arrived
  408. */
  409. sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
  410. retval = mdio_cmd_rsp->status;
  411. if (retval) {
  412. dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
  413. retval = -EBUSY;
  414. } else {
  415. octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
  416. sizeof(struct oct_mdio_cmd) / 8);
  417. if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
  418. if (!op)
  419. *value = mdio_cmd_rsp->resp.value1;
  420. } else {
  421. retval = -EINVAL;
  422. }
  423. }
  424. }
  425. octeon_free_soft_command(oct_dev, sc);
  426. return retval;
  427. }
  428. static int lio_set_phys_id(struct net_device *netdev,
  429. enum ethtool_phys_id_state state)
  430. {
  431. struct lio *lio = GET_LIO(netdev);
  432. struct octeon_device *oct = lio->oct_dev;
  433. int value, ret;
  434. switch (state) {
  435. case ETHTOOL_ID_ACTIVE:
  436. if (oct->chip_id == OCTEON_CN66XX) {
  437. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  438. VITESSE_PHY_GPIO_DRIVEON);
  439. return 2;
  440. } else if (oct->chip_id == OCTEON_CN68XX) {
  441. /* Save the current LED settings */
  442. ret = octnet_mdio45_access(lio, 0,
  443. LIO68XX_LED_BEACON_ADDR,
  444. &lio->phy_beacon_val);
  445. if (ret)
  446. return ret;
  447. ret = octnet_mdio45_access(lio, 0,
  448. LIO68XX_LED_CTRL_ADDR,
  449. &lio->led_ctrl_val);
  450. if (ret)
  451. return ret;
  452. /* Configure Beacon values */
  453. value = LIO68XX_LED_BEACON_CFGON;
  454. ret = octnet_mdio45_access(lio, 1,
  455. LIO68XX_LED_BEACON_ADDR,
  456. &value);
  457. if (ret)
  458. return ret;
  459. value = LIO68XX_LED_CTRL_CFGON;
  460. ret = octnet_mdio45_access(lio, 1,
  461. LIO68XX_LED_CTRL_ADDR,
  462. &value);
  463. if (ret)
  464. return ret;
  465. } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
  466. octnet_id_active(netdev, LED_IDENTIFICATION_ON);
  467. /* returns 0 since updates are asynchronous */
  468. return 0;
  469. } else {
  470. return -EINVAL;
  471. }
  472. break;
  473. case ETHTOOL_ID_ON:
  474. if (oct->chip_id == OCTEON_CN66XX) {
  475. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  476. VITESSE_PHY_GPIO_HIGH);
  477. } else if (oct->chip_id == OCTEON_CN68XX) {
  478. return -EINVAL;
  479. } else {
  480. return -EINVAL;
  481. }
  482. break;
  483. case ETHTOOL_ID_OFF:
  484. if (oct->chip_id == OCTEON_CN66XX)
  485. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  486. VITESSE_PHY_GPIO_LOW);
  487. else if (oct->chip_id == OCTEON_CN68XX)
  488. return -EINVAL;
  489. else
  490. return -EINVAL;
  491. break;
  492. case ETHTOOL_ID_INACTIVE:
  493. if (oct->chip_id == OCTEON_CN66XX) {
  494. octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
  495. VITESSE_PHY_GPIO_DRIVEOFF);
  496. } else if (oct->chip_id == OCTEON_CN68XX) {
  497. /* Restore LED settings */
  498. ret = octnet_mdio45_access(lio, 1,
  499. LIO68XX_LED_CTRL_ADDR,
  500. &lio->led_ctrl_val);
  501. if (ret)
  502. return ret;
  503. ret = octnet_mdio45_access(lio, 1,
  504. LIO68XX_LED_BEACON_ADDR,
  505. &lio->phy_beacon_val);
  506. if (ret)
  507. return ret;
  508. } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
  509. octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
  510. return 0;
  511. } else {
  512. return -EINVAL;
  513. }
  514. break;
  515. default:
  516. return -EINVAL;
  517. }
  518. return 0;
  519. }
  520. static void
  521. lio_ethtool_get_ringparam(struct net_device *netdev,
  522. struct ethtool_ringparam *ering)
  523. {
  524. struct lio *lio = GET_LIO(netdev);
  525. struct octeon_device *oct = lio->oct_dev;
  526. u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
  527. rx_pending = 0;
  528. if (OCTEON_CN6XXX(oct)) {
  529. struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
  530. tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
  531. rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
  532. rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
  533. tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
  534. } else if (OCTEON_CN23XX_PF(oct)) {
  535. struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
  536. tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
  537. rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
  538. rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
  539. tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
  540. }
  541. if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
  542. ering->rx_pending = 0;
  543. ering->rx_max_pending = 0;
  544. ering->rx_mini_pending = 0;
  545. ering->rx_jumbo_pending = rx_pending;
  546. ering->rx_mini_max_pending = 0;
  547. ering->rx_jumbo_max_pending = rx_max_pending;
  548. } else {
  549. ering->rx_pending = rx_pending;
  550. ering->rx_max_pending = rx_max_pending;
  551. ering->rx_mini_pending = 0;
  552. ering->rx_jumbo_pending = 0;
  553. ering->rx_mini_max_pending = 0;
  554. ering->rx_jumbo_max_pending = 0;
  555. }
  556. ering->tx_pending = tx_pending;
  557. ering->tx_max_pending = tx_max_pending;
  558. }
  559. static u32 lio_get_msglevel(struct net_device *netdev)
  560. {
  561. struct lio *lio = GET_LIO(netdev);
  562. return lio->msg_enable;
  563. }
  564. static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
  565. {
  566. struct lio *lio = GET_LIO(netdev);
  567. if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
  568. if (msglvl & NETIF_MSG_HW)
  569. liquidio_set_feature(netdev,
  570. OCTNET_CMD_VERBOSE_ENABLE, 0);
  571. else
  572. liquidio_set_feature(netdev,
  573. OCTNET_CMD_VERBOSE_DISABLE, 0);
  574. }
  575. lio->msg_enable = msglvl;
  576. }
  577. static void
  578. lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  579. {
  580. /* Notes: Not supporting any auto negotiation in these
  581. * drivers. Just report pause frame support.
  582. */
  583. struct lio *lio = GET_LIO(netdev);
  584. struct octeon_device *oct = lio->oct_dev;
  585. pause->autoneg = 0;
  586. pause->tx_pause = oct->tx_pause;
  587. pause->rx_pause = oct->rx_pause;
  588. }
  589. static int
  590. lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
  591. {
  592. /* Notes: Not supporting any auto negotiation in these
  593. * drivers.
  594. */
  595. struct lio *lio = GET_LIO(netdev);
  596. struct octeon_device *oct = lio->oct_dev;
  597. struct octnic_ctrl_pkt nctrl;
  598. struct oct_link_info *linfo = &lio->linfo;
  599. int ret = 0;
  600. if (oct->chip_id != OCTEON_CN23XX_PF_VID)
  601. return -EINVAL;
  602. if (linfo->link.s.duplex == 0) {
  603. /*no flow control for half duplex*/
  604. if (pause->rx_pause || pause->tx_pause)
  605. return -EINVAL;
  606. }
  607. /*do not support autoneg of link flow control*/
  608. if (pause->autoneg == AUTONEG_ENABLE)
  609. return -EINVAL;
  610. memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
  611. nctrl.ncmd.u64 = 0;
  612. nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
  613. nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
  614. nctrl.wait_time = 100;
  615. nctrl.netpndev = (u64)netdev;
  616. nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
  617. if (pause->rx_pause) {
  618. /*enable rx pause*/
  619. nctrl.ncmd.s.param1 = 1;
  620. } else {
  621. /*disable rx pause*/
  622. nctrl.ncmd.s.param1 = 0;
  623. }
  624. if (pause->tx_pause) {
  625. /*enable tx pause*/
  626. nctrl.ncmd.s.param2 = 1;
  627. } else {
  628. /*disable tx pause*/
  629. nctrl.ncmd.s.param2 = 0;
  630. }
  631. ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
  632. if (ret < 0) {
  633. dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
  634. return -EINVAL;
  635. }
  636. oct->rx_pause = pause->rx_pause;
  637. oct->tx_pause = pause->tx_pause;
  638. return 0;
  639. }
  640. static void
  641. lio_get_ethtool_stats(struct net_device *netdev,
  642. struct ethtool_stats *stats __attribute__((unused)),
  643. u64 *data)
  644. {
  645. struct lio *lio = GET_LIO(netdev);
  646. struct octeon_device *oct_dev = lio->oct_dev;
  647. struct net_device_stats *netstats = &netdev->stats;
  648. int i = 0, j;
  649. netdev->netdev_ops->ndo_get_stats(netdev);
  650. octnet_get_link_stats(netdev);
  651. /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
  652. data[i++] = CVM_CAST64(netstats->rx_packets);
  653. /*sum of oct->instr_queue[iq_no]->stats.tx_done */
  654. data[i++] = CVM_CAST64(netstats->tx_packets);
  655. /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
  656. data[i++] = CVM_CAST64(netstats->rx_bytes);
  657. /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
  658. data[i++] = CVM_CAST64(netstats->tx_bytes);
  659. data[i++] = CVM_CAST64(netstats->rx_errors);
  660. data[i++] = CVM_CAST64(netstats->tx_errors);
  661. /*sum of oct->droq[oq_no]->stats->rx_dropped +
  662. *oct->droq[oq_no]->stats->dropped_nodispatch +
  663. *oct->droq[oq_no]->stats->dropped_toomany +
  664. *oct->droq[oq_no]->stats->dropped_nomem
  665. */
  666. data[i++] = CVM_CAST64(netstats->rx_dropped);
  667. /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
  668. data[i++] = CVM_CAST64(netstats->tx_dropped);
  669. /* firmware tx stats */
  670. /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
  671. *fromhost.fw_total_sent
  672. */
  673. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
  674. /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
  675. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
  676. /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
  677. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
  678. /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
  679. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
  680. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  681. *fw_err_drop
  682. */
  683. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
  684. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
  685. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
  686. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  687. *fw_tso_fwd
  688. */
  689. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
  690. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  691. *fw_err_tso
  692. */
  693. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
  694. /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
  695. *fw_tx_vxlan
  696. */
  697. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
  698. /* mac tx statistics */
  699. /*CVMX_BGXX_CMRX_TX_STAT5 */
  700. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
  701. /*CVMX_BGXX_CMRX_TX_STAT4 */
  702. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
  703. /*CVMX_BGXX_CMRX_TX_STAT15 */
  704. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
  705. /*CVMX_BGXX_CMRX_TX_STAT14 */
  706. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
  707. /*CVMX_BGXX_CMRX_TX_STAT17 */
  708. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
  709. /*CVMX_BGXX_CMRX_TX_STAT0 */
  710. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
  711. /*CVMX_BGXX_CMRX_TX_STAT3 */
  712. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
  713. /*CVMX_BGXX_CMRX_TX_STAT2 */
  714. data[i++] =
  715. CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
  716. /*CVMX_BGXX_CMRX_TX_STAT0 */
  717. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
  718. /*CVMX_BGXX_CMRX_TX_STAT1 */
  719. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
  720. /*CVMX_BGXX_CMRX_TX_STAT16 */
  721. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
  722. /*CVMX_BGXX_CMRX_TX_STAT6 */
  723. data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
  724. /* RX firmware stats */
  725. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  726. *fw_total_rcvd
  727. */
  728. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
  729. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  730. *fw_total_fwd
  731. */
  732. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
  733. /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
  734. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
  735. /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
  736. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
  737. /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
  738. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
  739. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  740. *fw_err_pko
  741. */
  742. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
  743. /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
  744. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
  745. /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
  746. *fromwire.fw_err_drop
  747. */
  748. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
  749. /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
  750. *fromwire.fw_rx_vxlan
  751. */
  752. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
  753. /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
  754. *fromwire.fw_rx_vxlan_err
  755. */
  756. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
  757. /* LRO */
  758. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  759. *fw_lro_pkts
  760. */
  761. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
  762. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  763. *fw_lro_octs
  764. */
  765. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
  766. /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
  767. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
  768. /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
  769. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
  770. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  771. *fw_lro_aborts_port
  772. */
  773. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
  774. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  775. *fw_lro_aborts_seq
  776. */
  777. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
  778. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  779. *fw_lro_aborts_tsval
  780. */
  781. data[i++] =
  782. CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
  783. /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
  784. *fw_lro_aborts_timer
  785. */
  786. /* intrmod: packet forward rate */
  787. data[i++] =
  788. CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
  789. /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
  790. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
  791. /* mac: link-level stats */
  792. /*CVMX_BGXX_CMRX_RX_STAT0 */
  793. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
  794. /*CVMX_BGXX_CMRX_RX_STAT1 */
  795. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
  796. /*CVMX_PKI_STATX_STAT5 */
  797. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
  798. /*CVMX_PKI_STATX_STAT5 */
  799. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
  800. /*wqe->word2.err_code or wqe->word2.err_level */
  801. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
  802. /*CVMX_BGXX_CMRX_RX_STAT2 */
  803. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
  804. /*CVMX_BGXX_CMRX_RX_STAT6 */
  805. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
  806. /*CVMX_BGXX_CMRX_RX_STAT4 */
  807. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
  808. /*wqe->word2.err_code or wqe->word2.err_level */
  809. data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
  810. /*lio->link_changes*/
  811. data[i++] = CVM_CAST64(lio->link_changes);
  812. for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
  813. if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
  814. continue;
  815. /*packets to network port*/
  816. /*# of packets tx to network */
  817. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
  818. /*# of bytes tx to network */
  819. data[i++] =
  820. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
  821. /*# of packets dropped */
  822. data[i++] =
  823. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
  824. /*# of tx fails due to queue full */
  825. data[i++] =
  826. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
  827. /*XXX gather entries sent */
  828. data[i++] =
  829. CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
  830. /*instruction to firmware: data and control */
  831. /*# of instructions to the queue */
  832. data[i++] =
  833. CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
  834. /*# of instructions processed */
  835. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
  836. stats.instr_processed);
  837. /*# of instructions could not be processed */
  838. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
  839. stats.instr_dropped);
  840. /*bytes sent through the queue */
  841. data[i++] =
  842. CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
  843. /*tso request*/
  844. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
  845. /*vxlan request*/
  846. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
  847. /*txq restart*/
  848. data[i++] =
  849. CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
  850. }
  851. /* RX */
  852. for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
  853. if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
  854. continue;
  855. /*packets send to TCP/IP network stack */
  856. /*# of packets to network stack */
  857. data[i++] =
  858. CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
  859. /*# of bytes to network stack */
  860. data[i++] =
  861. CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
  862. /*# of packets dropped */
  863. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
  864. oct_dev->droq[j]->stats.dropped_toomany +
  865. oct_dev->droq[j]->stats.rx_dropped);
  866. data[i++] =
  867. CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
  868. data[i++] =
  869. CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
  870. data[i++] =
  871. CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
  872. /*control and data path*/
  873. data[i++] =
  874. CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
  875. data[i++] =
  876. CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
  877. data[i++] =
  878. CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
  879. data[i++] =
  880. CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
  881. data[i++] =
  882. CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
  883. }
  884. }
  885. static void lio_vf_get_ethtool_stats(struct net_device *netdev,
  886. struct ethtool_stats *stats
  887. __attribute__((unused)),
  888. u64 *data)
  889. {
  890. struct net_device_stats *netstats = &netdev->stats;
  891. struct lio *lio = GET_LIO(netdev);
  892. struct octeon_device *oct_dev = lio->oct_dev;
  893. int i = 0, j, vj;
  894. netdev->netdev_ops->ndo_get_stats(netdev);
  895. /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
  896. data[i++] = CVM_CAST64(netstats->rx_packets);
  897. /* sum of oct->instr_queue[iq_no]->stats.tx_done */
  898. data[i++] = CVM_CAST64(netstats->tx_packets);
  899. /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
  900. data[i++] = CVM_CAST64(netstats->rx_bytes);
  901. /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
  902. data[i++] = CVM_CAST64(netstats->tx_bytes);
  903. data[i++] = CVM_CAST64(netstats->rx_errors);
  904. data[i++] = CVM_CAST64(netstats->tx_errors);
  905. /* sum of oct->droq[oq_no]->stats->rx_dropped +
  906. * oct->droq[oq_no]->stats->dropped_nodispatch +
  907. * oct->droq[oq_no]->stats->dropped_toomany +
  908. * oct->droq[oq_no]->stats->dropped_nomem
  909. */
  910. data[i++] = CVM_CAST64(netstats->rx_dropped);
  911. /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
  912. data[i++] = CVM_CAST64(netstats->tx_dropped);
  913. /* lio->link_changes */
  914. data[i++] = CVM_CAST64(lio->link_changes);
  915. for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
  916. j = lio->linfo.txpciq[vj].s.q_no;
  917. /* packets to network port */
  918. /* # of packets tx to network */
  919. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
  920. /* # of bytes tx to network */
  921. data[i++] = CVM_CAST64(
  922. oct_dev->instr_queue[j]->stats.tx_tot_bytes);
  923. /* # of packets dropped */
  924. data[i++] = CVM_CAST64(
  925. oct_dev->instr_queue[j]->stats.tx_dropped);
  926. /* # of tx fails due to queue full */
  927. data[i++] = CVM_CAST64(
  928. oct_dev->instr_queue[j]->stats.tx_iq_busy);
  929. /* XXX gather entries sent */
  930. data[i++] = CVM_CAST64(
  931. oct_dev->instr_queue[j]->stats.sgentry_sent);
  932. /* instruction to firmware: data and control */
  933. /* # of instructions to the queue */
  934. data[i++] = CVM_CAST64(
  935. oct_dev->instr_queue[j]->stats.instr_posted);
  936. /* # of instructions processed */
  937. data[i++] =
  938. CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
  939. /* # of instructions could not be processed */
  940. data[i++] =
  941. CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
  942. /* bytes sent through the queue */
  943. data[i++] = CVM_CAST64(
  944. oct_dev->instr_queue[j]->stats.bytes_sent);
  945. /* tso request */
  946. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
  947. /* vxlan request */
  948. data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
  949. /* txq restart */
  950. data[i++] = CVM_CAST64(
  951. oct_dev->instr_queue[j]->stats.tx_restart);
  952. }
  953. /* RX */
  954. for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
  955. j = lio->linfo.rxpciq[vj].s.q_no;
  956. /* packets send to TCP/IP network stack */
  957. /* # of packets to network stack */
  958. data[i++] = CVM_CAST64(
  959. oct_dev->droq[j]->stats.rx_pkts_received);
  960. /* # of bytes to network stack */
  961. data[i++] = CVM_CAST64(
  962. oct_dev->droq[j]->stats.rx_bytes_received);
  963. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
  964. oct_dev->droq[j]->stats.dropped_toomany +
  965. oct_dev->droq[j]->stats.rx_dropped);
  966. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
  967. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
  968. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
  969. /* control and data path */
  970. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
  971. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
  972. data[i++] =
  973. CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
  974. data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
  975. data[i++] =
  976. CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
  977. }
  978. }
  979. static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
  980. {
  981. struct octeon_device *oct_dev = lio->oct_dev;
  982. int i;
  983. switch (oct_dev->chip_id) {
  984. case OCTEON_CN23XX_PF_VID:
  985. case OCTEON_CN23XX_VF_VID:
  986. for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
  987. sprintf(data, "%s", oct_priv_flags_strings[i]);
  988. data += ETH_GSTRING_LEN;
  989. }
  990. break;
  991. case OCTEON_CN68XX:
  992. case OCTEON_CN66XX:
  993. break;
  994. default:
  995. netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
  996. break;
  997. }
  998. }
  999. static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  1000. {
  1001. struct lio *lio = GET_LIO(netdev);
  1002. struct octeon_device *oct_dev = lio->oct_dev;
  1003. int num_iq_stats, num_oq_stats, i, j;
  1004. int num_stats;
  1005. switch (stringset) {
  1006. case ETH_SS_STATS:
  1007. num_stats = ARRAY_SIZE(oct_stats_strings);
  1008. for (j = 0; j < num_stats; j++) {
  1009. sprintf(data, "%s", oct_stats_strings[j]);
  1010. data += ETH_GSTRING_LEN;
  1011. }
  1012. num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
  1013. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
  1014. if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
  1015. continue;
  1016. for (j = 0; j < num_iq_stats; j++) {
  1017. sprintf(data, "tx-%d-%s", i,
  1018. oct_iq_stats_strings[j]);
  1019. data += ETH_GSTRING_LEN;
  1020. }
  1021. }
  1022. num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
  1023. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
  1024. if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
  1025. continue;
  1026. for (j = 0; j < num_oq_stats; j++) {
  1027. sprintf(data, "rx-%d-%s", i,
  1028. oct_droq_stats_strings[j]);
  1029. data += ETH_GSTRING_LEN;
  1030. }
  1031. }
  1032. break;
  1033. case ETH_SS_PRIV_FLAGS:
  1034. lio_get_priv_flags_strings(lio, data);
  1035. break;
  1036. default:
  1037. netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
  1038. break;
  1039. }
  1040. }
  1041. static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
  1042. u8 *data)
  1043. {
  1044. int num_iq_stats, num_oq_stats, i, j;
  1045. struct lio *lio = GET_LIO(netdev);
  1046. struct octeon_device *oct_dev = lio->oct_dev;
  1047. int num_stats;
  1048. switch (stringset) {
  1049. case ETH_SS_STATS:
  1050. num_stats = ARRAY_SIZE(oct_vf_stats_strings);
  1051. for (j = 0; j < num_stats; j++) {
  1052. sprintf(data, "%s", oct_vf_stats_strings[j]);
  1053. data += ETH_GSTRING_LEN;
  1054. }
  1055. num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
  1056. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
  1057. if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
  1058. continue;
  1059. for (j = 0; j < num_iq_stats; j++) {
  1060. sprintf(data, "tx-%d-%s", i,
  1061. oct_iq_stats_strings[j]);
  1062. data += ETH_GSTRING_LEN;
  1063. }
  1064. }
  1065. num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
  1066. for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
  1067. if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
  1068. continue;
  1069. for (j = 0; j < num_oq_stats; j++) {
  1070. sprintf(data, "rx-%d-%s", i,
  1071. oct_droq_stats_strings[j]);
  1072. data += ETH_GSTRING_LEN;
  1073. }
  1074. }
  1075. break;
  1076. case ETH_SS_PRIV_FLAGS:
  1077. lio_get_priv_flags_strings(lio, data);
  1078. break;
  1079. default:
  1080. netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
  1081. break;
  1082. }
  1083. }
  1084. static int lio_get_priv_flags_ss_count(struct lio *lio)
  1085. {
  1086. struct octeon_device *oct_dev = lio->oct_dev;
  1087. switch (oct_dev->chip_id) {
  1088. case OCTEON_CN23XX_PF_VID:
  1089. case OCTEON_CN23XX_VF_VID:
  1090. return ARRAY_SIZE(oct_priv_flags_strings);
  1091. case OCTEON_CN68XX:
  1092. case OCTEON_CN66XX:
  1093. return -EOPNOTSUPP;
  1094. default:
  1095. netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
  1096. return -EOPNOTSUPP;
  1097. }
  1098. }
  1099. static int lio_get_sset_count(struct net_device *netdev, int sset)
  1100. {
  1101. struct lio *lio = GET_LIO(netdev);
  1102. struct octeon_device *oct_dev = lio->oct_dev;
  1103. switch (sset) {
  1104. case ETH_SS_STATS:
  1105. return (ARRAY_SIZE(oct_stats_strings) +
  1106. ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
  1107. ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
  1108. case ETH_SS_PRIV_FLAGS:
  1109. return lio_get_priv_flags_ss_count(lio);
  1110. default:
  1111. return -EOPNOTSUPP;
  1112. }
  1113. }
  1114. static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
  1115. {
  1116. struct lio *lio = GET_LIO(netdev);
  1117. struct octeon_device *oct_dev = lio->oct_dev;
  1118. switch (sset) {
  1119. case ETH_SS_STATS:
  1120. return (ARRAY_SIZE(oct_vf_stats_strings) +
  1121. ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
  1122. ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
  1123. case ETH_SS_PRIV_FLAGS:
  1124. return lio_get_priv_flags_ss_count(lio);
  1125. default:
  1126. return -EOPNOTSUPP;
  1127. }
  1128. }
  1129. static int lio_get_intr_coalesce(struct net_device *netdev,
  1130. struct ethtool_coalesce *intr_coal)
  1131. {
  1132. struct lio *lio = GET_LIO(netdev);
  1133. struct octeon_device *oct = lio->oct_dev;
  1134. struct octeon_instr_queue *iq;
  1135. struct oct_intrmod_cfg *intrmod_cfg;
  1136. intrmod_cfg = &oct->intrmod;
  1137. switch (oct->chip_id) {
  1138. case OCTEON_CN23XX_PF_VID:
  1139. case OCTEON_CN23XX_VF_VID:
  1140. if (!intrmod_cfg->rx_enable) {
  1141. intr_coal->rx_coalesce_usecs = intrmod_cfg->rx_usecs;
  1142. intr_coal->rx_max_coalesced_frames =
  1143. intrmod_cfg->rx_frames;
  1144. }
  1145. if (!intrmod_cfg->tx_enable)
  1146. intr_coal->tx_max_coalesced_frames =
  1147. intrmod_cfg->tx_frames;
  1148. break;
  1149. case OCTEON_CN68XX:
  1150. case OCTEON_CN66XX: {
  1151. struct octeon_cn6xxx *cn6xxx =
  1152. (struct octeon_cn6xxx *)oct->chip;
  1153. if (!intrmod_cfg->rx_enable) {
  1154. intr_coal->rx_coalesce_usecs =
  1155. CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
  1156. intr_coal->rx_max_coalesced_frames =
  1157. CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
  1158. }
  1159. iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
  1160. intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
  1161. break;
  1162. }
  1163. default:
  1164. netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
  1165. return -EINVAL;
  1166. }
  1167. if (intrmod_cfg->rx_enable) {
  1168. intr_coal->use_adaptive_rx_coalesce =
  1169. intrmod_cfg->rx_enable;
  1170. intr_coal->rate_sample_interval =
  1171. intrmod_cfg->check_intrvl;
  1172. intr_coal->pkt_rate_high =
  1173. intrmod_cfg->maxpkt_ratethr;
  1174. intr_coal->pkt_rate_low =
  1175. intrmod_cfg->minpkt_ratethr;
  1176. intr_coal->rx_max_coalesced_frames_high =
  1177. intrmod_cfg->rx_maxcnt_trigger;
  1178. intr_coal->rx_coalesce_usecs_high =
  1179. intrmod_cfg->rx_maxtmr_trigger;
  1180. intr_coal->rx_coalesce_usecs_low =
  1181. intrmod_cfg->rx_mintmr_trigger;
  1182. intr_coal->rx_max_coalesced_frames_low =
  1183. intrmod_cfg->rx_mincnt_trigger;
  1184. }
  1185. if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
  1186. (intrmod_cfg->tx_enable)) {
  1187. intr_coal->use_adaptive_tx_coalesce = intrmod_cfg->tx_enable;
  1188. intr_coal->tx_max_coalesced_frames_high =
  1189. intrmod_cfg->tx_maxcnt_trigger;
  1190. intr_coal->tx_max_coalesced_frames_low =
  1191. intrmod_cfg->tx_mincnt_trigger;
  1192. }
  1193. return 0;
  1194. }
  1195. /* Callback function for intrmod */
  1196. static void octnet_intrmod_callback(struct octeon_device *oct_dev,
  1197. u32 status,
  1198. void *ptr)
  1199. {
  1200. struct oct_intrmod_cmd *cmd = ptr;
  1201. struct octeon_soft_command *sc = cmd->sc;
  1202. oct_dev = cmd->oct_dev;
  1203. if (status)
  1204. dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
  1205. CVM_CAST64(status));
  1206. else
  1207. dev_info(&oct_dev->pci_dev->dev,
  1208. "Rx-Adaptive Interrupt moderation enabled:%llx\n",
  1209. oct_dev->intrmod.rx_enable);
  1210. octeon_free_soft_command(oct_dev, sc);
  1211. }
  1212. /* Configure interrupt moderation parameters */
  1213. static int octnet_set_intrmod_cfg(struct lio *lio,
  1214. struct oct_intrmod_cfg *intr_cfg)
  1215. {
  1216. struct octeon_soft_command *sc;
  1217. struct oct_intrmod_cmd *cmd;
  1218. struct oct_intrmod_cfg *cfg;
  1219. int retval;
  1220. struct octeon_device *oct_dev = lio->oct_dev;
  1221. /* Alloc soft command */
  1222. sc = (struct octeon_soft_command *)
  1223. octeon_alloc_soft_command(oct_dev,
  1224. sizeof(struct oct_intrmod_cfg),
  1225. 0,
  1226. sizeof(struct oct_intrmod_cmd));
  1227. if (!sc)
  1228. return -ENOMEM;
  1229. cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
  1230. cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
  1231. memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
  1232. octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
  1233. cmd->sc = sc;
  1234. cmd->cfg = cfg;
  1235. cmd->oct_dev = oct_dev;
  1236. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1237. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
  1238. OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
  1239. sc->callback = octnet_intrmod_callback;
  1240. sc->callback_arg = cmd;
  1241. sc->wait_time = 1000;
  1242. retval = octeon_send_soft_command(oct_dev, sc);
  1243. if (retval == IQ_SEND_FAILED) {
  1244. octeon_free_soft_command(oct_dev, sc);
  1245. return -EINVAL;
  1246. }
  1247. return 0;
  1248. }
  1249. static void
  1250. octnet_nic_stats_callback(struct octeon_device *oct_dev,
  1251. u32 status, void *ptr)
  1252. {
  1253. struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
  1254. struct oct_nic_stats_resp *resp =
  1255. (struct oct_nic_stats_resp *)sc->virtrptr;
  1256. struct oct_nic_stats_ctrl *ctrl =
  1257. (struct oct_nic_stats_ctrl *)sc->ctxptr;
  1258. struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
  1259. struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
  1260. struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
  1261. struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
  1262. if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
  1263. octeon_swap_8B_data((u64 *)&resp->stats,
  1264. (sizeof(struct oct_link_stats)) >> 3);
  1265. /* RX link-level stats */
  1266. rstats->total_rcvd = rsp_rstats->total_rcvd;
  1267. rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
  1268. rstats->total_bcst = rsp_rstats->total_bcst;
  1269. rstats->total_mcst = rsp_rstats->total_mcst;
  1270. rstats->runts = rsp_rstats->runts;
  1271. rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
  1272. /* Accounts for over/under-run of buffers */
  1273. rstats->fifo_err = rsp_rstats->fifo_err;
  1274. rstats->dmac_drop = rsp_rstats->dmac_drop;
  1275. rstats->fcs_err = rsp_rstats->fcs_err;
  1276. rstats->jabber_err = rsp_rstats->jabber_err;
  1277. rstats->l2_err = rsp_rstats->l2_err;
  1278. rstats->frame_err = rsp_rstats->frame_err;
  1279. /* RX firmware stats */
  1280. rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
  1281. rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
  1282. rstats->fw_err_pko = rsp_rstats->fw_err_pko;
  1283. rstats->fw_err_link = rsp_rstats->fw_err_link;
  1284. rstats->fw_err_drop = rsp_rstats->fw_err_drop;
  1285. rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
  1286. rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
  1287. /* Number of packets that are LROed */
  1288. rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
  1289. /* Number of octets that are LROed */
  1290. rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
  1291. /* Number of LRO packets formed */
  1292. rstats->fw_total_lro = rsp_rstats->fw_total_lro;
  1293. /* Number of times lRO of packet aborted */
  1294. rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
  1295. rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
  1296. rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
  1297. rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
  1298. rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
  1299. /* intrmod: packet forward rate */
  1300. rstats->fwd_rate = rsp_rstats->fwd_rate;
  1301. /* TX link-level stats */
  1302. tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
  1303. tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
  1304. tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
  1305. tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
  1306. tstats->ctl_sent = rsp_tstats->ctl_sent;
  1307. /* Packets sent after one collision*/
  1308. tstats->one_collision_sent = rsp_tstats->one_collision_sent;
  1309. /* Packets sent after multiple collision*/
  1310. tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
  1311. /* Packets not sent due to max collisions */
  1312. tstats->max_collision_fail = rsp_tstats->max_collision_fail;
  1313. /* Packets not sent due to max deferrals */
  1314. tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
  1315. /* Accounts for over/under-run of buffers */
  1316. tstats->fifo_err = rsp_tstats->fifo_err;
  1317. tstats->runts = rsp_tstats->runts;
  1318. /* Total number of collisions detected */
  1319. tstats->total_collisions = rsp_tstats->total_collisions;
  1320. /* firmware stats */
  1321. tstats->fw_total_sent = rsp_tstats->fw_total_sent;
  1322. tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
  1323. tstats->fw_err_pko = rsp_tstats->fw_err_pko;
  1324. tstats->fw_err_link = rsp_tstats->fw_err_link;
  1325. tstats->fw_err_drop = rsp_tstats->fw_err_drop;
  1326. tstats->fw_tso = rsp_tstats->fw_tso;
  1327. tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
  1328. tstats->fw_err_tso = rsp_tstats->fw_err_tso;
  1329. tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
  1330. resp->status = 1;
  1331. } else {
  1332. resp->status = -1;
  1333. }
  1334. complete(&ctrl->complete);
  1335. }
  1336. /* Configure interrupt moderation parameters */
  1337. static int octnet_get_link_stats(struct net_device *netdev)
  1338. {
  1339. struct lio *lio = GET_LIO(netdev);
  1340. struct octeon_device *oct_dev = lio->oct_dev;
  1341. struct octeon_soft_command *sc;
  1342. struct oct_nic_stats_ctrl *ctrl;
  1343. struct oct_nic_stats_resp *resp;
  1344. int retval;
  1345. /* Alloc soft command */
  1346. sc = (struct octeon_soft_command *)
  1347. octeon_alloc_soft_command(oct_dev,
  1348. 0,
  1349. sizeof(struct oct_nic_stats_resp),
  1350. sizeof(struct octnic_ctrl_pkt));
  1351. if (!sc)
  1352. return -ENOMEM;
  1353. resp = (struct oct_nic_stats_resp *)sc->virtrptr;
  1354. memset(resp, 0, sizeof(struct oct_nic_stats_resp));
  1355. ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
  1356. memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
  1357. ctrl->netdev = netdev;
  1358. init_completion(&ctrl->complete);
  1359. sc->iq_no = lio->linfo.txpciq[0].s.q_no;
  1360. octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
  1361. OPCODE_NIC_PORT_STATS, 0, 0, 0);
  1362. sc->callback = octnet_nic_stats_callback;
  1363. sc->callback_arg = sc;
  1364. sc->wait_time = 500; /*in milli seconds*/
  1365. retval = octeon_send_soft_command(oct_dev, sc);
  1366. if (retval == IQ_SEND_FAILED) {
  1367. octeon_free_soft_command(oct_dev, sc);
  1368. return -EINVAL;
  1369. }
  1370. wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
  1371. if (resp->status != 1) {
  1372. octeon_free_soft_command(oct_dev, sc);
  1373. return -EINVAL;
  1374. }
  1375. octeon_free_soft_command(oct_dev, sc);
  1376. return 0;
  1377. }
  1378. /* Enable/Disable auto interrupt Moderation */
  1379. static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
  1380. *intr_coal)
  1381. {
  1382. int ret = 0;
  1383. struct octeon_device *oct = lio->oct_dev;
  1384. struct oct_intrmod_cfg *intrmod_cfg;
  1385. intrmod_cfg = &oct->intrmod;
  1386. if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
  1387. if (intr_coal->rate_sample_interval)
  1388. intrmod_cfg->check_intrvl =
  1389. intr_coal->rate_sample_interval;
  1390. else
  1391. intrmod_cfg->check_intrvl =
  1392. LIO_INTRMOD_CHECK_INTERVAL;
  1393. if (intr_coal->pkt_rate_high)
  1394. intrmod_cfg->maxpkt_ratethr =
  1395. intr_coal->pkt_rate_high;
  1396. else
  1397. intrmod_cfg->maxpkt_ratethr =
  1398. LIO_INTRMOD_MAXPKT_RATETHR;
  1399. if (intr_coal->pkt_rate_low)
  1400. intrmod_cfg->minpkt_ratethr =
  1401. intr_coal->pkt_rate_low;
  1402. else
  1403. intrmod_cfg->minpkt_ratethr =
  1404. LIO_INTRMOD_MINPKT_RATETHR;
  1405. }
  1406. if (oct->intrmod.rx_enable) {
  1407. if (intr_coal->rx_max_coalesced_frames_high)
  1408. intrmod_cfg->rx_maxcnt_trigger =
  1409. intr_coal->rx_max_coalesced_frames_high;
  1410. else
  1411. intrmod_cfg->rx_maxcnt_trigger =
  1412. LIO_INTRMOD_RXMAXCNT_TRIGGER;
  1413. if (intr_coal->rx_coalesce_usecs_high)
  1414. intrmod_cfg->rx_maxtmr_trigger =
  1415. intr_coal->rx_coalesce_usecs_high;
  1416. else
  1417. intrmod_cfg->rx_maxtmr_trigger =
  1418. LIO_INTRMOD_RXMAXTMR_TRIGGER;
  1419. if (intr_coal->rx_coalesce_usecs_low)
  1420. intrmod_cfg->rx_mintmr_trigger =
  1421. intr_coal->rx_coalesce_usecs_low;
  1422. else
  1423. intrmod_cfg->rx_mintmr_trigger =
  1424. LIO_INTRMOD_RXMINTMR_TRIGGER;
  1425. if (intr_coal->rx_max_coalesced_frames_low)
  1426. intrmod_cfg->rx_mincnt_trigger =
  1427. intr_coal->rx_max_coalesced_frames_low;
  1428. else
  1429. intrmod_cfg->rx_mincnt_trigger =
  1430. LIO_INTRMOD_RXMINCNT_TRIGGER;
  1431. }
  1432. if (oct->intrmod.tx_enable) {
  1433. if (intr_coal->tx_max_coalesced_frames_high)
  1434. intrmod_cfg->tx_maxcnt_trigger =
  1435. intr_coal->tx_max_coalesced_frames_high;
  1436. else
  1437. intrmod_cfg->tx_maxcnt_trigger =
  1438. LIO_INTRMOD_TXMAXCNT_TRIGGER;
  1439. if (intr_coal->tx_max_coalesced_frames_low)
  1440. intrmod_cfg->tx_mincnt_trigger =
  1441. intr_coal->tx_max_coalesced_frames_low;
  1442. else
  1443. intrmod_cfg->tx_mincnt_trigger =
  1444. LIO_INTRMOD_TXMINCNT_TRIGGER;
  1445. }
  1446. ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
  1447. return ret;
  1448. }
  1449. static int
  1450. oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
  1451. {
  1452. struct octeon_device *oct = lio->oct_dev;
  1453. u32 rx_max_coalesced_frames;
  1454. /* Config Cnt based interrupt values */
  1455. switch (oct->chip_id) {
  1456. case OCTEON_CN68XX:
  1457. case OCTEON_CN66XX: {
  1458. struct octeon_cn6xxx *cn6xxx =
  1459. (struct octeon_cn6xxx *)oct->chip;
  1460. if (!intr_coal->rx_max_coalesced_frames)
  1461. rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
  1462. else
  1463. rx_max_coalesced_frames =
  1464. intr_coal->rx_max_coalesced_frames;
  1465. octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
  1466. rx_max_coalesced_frames);
  1467. CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
  1468. break;
  1469. }
  1470. case OCTEON_CN23XX_PF_VID: {
  1471. int q_no;
  1472. if (!intr_coal->rx_max_coalesced_frames)
  1473. rx_max_coalesced_frames = oct->intrmod.rx_frames;
  1474. else
  1475. rx_max_coalesced_frames =
  1476. intr_coal->rx_max_coalesced_frames;
  1477. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1478. q_no += oct->sriov_info.pf_srn;
  1479. octeon_write_csr64(
  1480. oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
  1481. (octeon_read_csr64(
  1482. oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
  1483. (0x3fffff00000000UL)) |
  1484. rx_max_coalesced_frames);
  1485. /*consider setting resend bit*/
  1486. }
  1487. oct->intrmod.rx_frames = rx_max_coalesced_frames;
  1488. break;
  1489. }
  1490. case OCTEON_CN23XX_VF_VID: {
  1491. int q_no;
  1492. if (!intr_coal->rx_max_coalesced_frames)
  1493. rx_max_coalesced_frames = oct->intrmod.rx_frames;
  1494. else
  1495. rx_max_coalesced_frames =
  1496. intr_coal->rx_max_coalesced_frames;
  1497. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1498. octeon_write_csr64(
  1499. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
  1500. (octeon_read_csr64(
  1501. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
  1502. (0x3fffff00000000UL)) |
  1503. rx_max_coalesced_frames);
  1504. /* consider writing to resend bit here */
  1505. }
  1506. oct->intrmod.rx_frames = rx_max_coalesced_frames;
  1507. break;
  1508. }
  1509. default:
  1510. return -EINVAL;
  1511. }
  1512. return 0;
  1513. }
  1514. static int oct_cfg_rx_intrtime(struct lio *lio,
  1515. struct ethtool_coalesce *intr_coal)
  1516. {
  1517. struct octeon_device *oct = lio->oct_dev;
  1518. u32 time_threshold, rx_coalesce_usecs;
  1519. /* Config Time based interrupt values */
  1520. switch (oct->chip_id) {
  1521. case OCTEON_CN68XX:
  1522. case OCTEON_CN66XX: {
  1523. struct octeon_cn6xxx *cn6xxx =
  1524. (struct octeon_cn6xxx *)oct->chip;
  1525. if (!intr_coal->rx_coalesce_usecs)
  1526. rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
  1527. else
  1528. rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
  1529. time_threshold = lio_cn6xxx_get_oq_ticks(oct,
  1530. rx_coalesce_usecs);
  1531. octeon_write_csr(oct,
  1532. CN6XXX_SLI_OQ_INT_LEVEL_TIME,
  1533. time_threshold);
  1534. CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
  1535. break;
  1536. }
  1537. case OCTEON_CN23XX_PF_VID: {
  1538. u64 time_threshold;
  1539. int q_no;
  1540. if (!intr_coal->rx_coalesce_usecs)
  1541. rx_coalesce_usecs = oct->intrmod.rx_usecs;
  1542. else
  1543. rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
  1544. time_threshold =
  1545. cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
  1546. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1547. q_no += oct->sriov_info.pf_srn;
  1548. octeon_write_csr64(oct,
  1549. CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
  1550. (oct->intrmod.rx_frames |
  1551. (time_threshold << 32)));
  1552. /*consider writing to resend bit here*/
  1553. }
  1554. oct->intrmod.rx_usecs = rx_coalesce_usecs;
  1555. break;
  1556. }
  1557. case OCTEON_CN23XX_VF_VID: {
  1558. u64 time_threshold;
  1559. int q_no;
  1560. if (!intr_coal->rx_coalesce_usecs)
  1561. rx_coalesce_usecs = oct->intrmod.rx_usecs;
  1562. else
  1563. rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
  1564. time_threshold =
  1565. cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
  1566. for (q_no = 0; q_no < oct->num_oqs; q_no++) {
  1567. octeon_write_csr64(
  1568. oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
  1569. (oct->intrmod.rx_frames |
  1570. (time_threshold << 32)));
  1571. /* consider setting resend bit */
  1572. }
  1573. oct->intrmod.rx_usecs = rx_coalesce_usecs;
  1574. break;
  1575. }
  1576. default:
  1577. return -EINVAL;
  1578. }
  1579. return 0;
  1580. }
  1581. static int
  1582. oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
  1583. __attribute__((unused)))
  1584. {
  1585. struct octeon_device *oct = lio->oct_dev;
  1586. u32 iq_intr_pkt;
  1587. void __iomem *inst_cnt_reg;
  1588. u64 val;
  1589. /* Config Cnt based interrupt values */
  1590. switch (oct->chip_id) {
  1591. case OCTEON_CN68XX:
  1592. case OCTEON_CN66XX:
  1593. break;
  1594. case OCTEON_CN23XX_VF_VID:
  1595. case OCTEON_CN23XX_PF_VID: {
  1596. int q_no;
  1597. if (!intr_coal->tx_max_coalesced_frames)
  1598. iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
  1599. CN23XX_PKT_IN_DONE_WMARK_MASK;
  1600. else
  1601. iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
  1602. CN23XX_PKT_IN_DONE_WMARK_MASK;
  1603. for (q_no = 0; q_no < oct->num_iqs; q_no++) {
  1604. inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
  1605. val = readq(inst_cnt_reg);
  1606. /*clear wmark and count.dont want to write count back*/
  1607. val = (val & 0xFFFF000000000000ULL) |
  1608. ((u64)iq_intr_pkt
  1609. << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
  1610. writeq(val, inst_cnt_reg);
  1611. /*consider setting resend bit*/
  1612. }
  1613. oct->intrmod.tx_frames = iq_intr_pkt;
  1614. break;
  1615. }
  1616. default:
  1617. return -EINVAL;
  1618. }
  1619. return 0;
  1620. }
  1621. static int lio_set_intr_coalesce(struct net_device *netdev,
  1622. struct ethtool_coalesce *intr_coal)
  1623. {
  1624. struct lio *lio = GET_LIO(netdev);
  1625. int ret;
  1626. struct octeon_device *oct = lio->oct_dev;
  1627. u32 j, q_no;
  1628. int db_max, db_min;
  1629. switch (oct->chip_id) {
  1630. case OCTEON_CN68XX:
  1631. case OCTEON_CN66XX:
  1632. db_min = CN6XXX_DB_MIN;
  1633. db_max = CN6XXX_DB_MAX;
  1634. if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
  1635. (intr_coal->tx_max_coalesced_frames <= db_max)) {
  1636. for (j = 0; j < lio->linfo.num_txpciq; j++) {
  1637. q_no = lio->linfo.txpciq[j].s.q_no;
  1638. oct->instr_queue[q_no]->fill_threshold =
  1639. intr_coal->tx_max_coalesced_frames;
  1640. }
  1641. } else {
  1642. dev_err(&oct->pci_dev->dev,
  1643. "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
  1644. intr_coal->tx_max_coalesced_frames, db_min,
  1645. db_max);
  1646. return -EINVAL;
  1647. }
  1648. break;
  1649. case OCTEON_CN23XX_PF_VID:
  1650. case OCTEON_CN23XX_VF_VID:
  1651. break;
  1652. default:
  1653. return -EINVAL;
  1654. }
  1655. oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
  1656. oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
  1657. ret = oct_cfg_adaptive_intr(lio, intr_coal);
  1658. if (!intr_coal->use_adaptive_rx_coalesce) {
  1659. ret = oct_cfg_rx_intrtime(lio, intr_coal);
  1660. if (ret)
  1661. goto ret_intrmod;
  1662. ret = oct_cfg_rx_intrcnt(lio, intr_coal);
  1663. if (ret)
  1664. goto ret_intrmod;
  1665. }
  1666. if (!intr_coal->use_adaptive_tx_coalesce) {
  1667. ret = oct_cfg_tx_intrcnt(lio, intr_coal);
  1668. if (ret)
  1669. goto ret_intrmod;
  1670. }
  1671. return 0;
  1672. ret_intrmod:
  1673. return ret;
  1674. }
  1675. static int lio_get_ts_info(struct net_device *netdev,
  1676. struct ethtool_ts_info *info)
  1677. {
  1678. struct lio *lio = GET_LIO(netdev);
  1679. info->so_timestamping =
  1680. #ifdef PTP_HARDWARE_TIMESTAMPING
  1681. SOF_TIMESTAMPING_TX_HARDWARE |
  1682. SOF_TIMESTAMPING_RX_HARDWARE |
  1683. SOF_TIMESTAMPING_RAW_HARDWARE |
  1684. SOF_TIMESTAMPING_TX_SOFTWARE |
  1685. #endif
  1686. SOF_TIMESTAMPING_RX_SOFTWARE |
  1687. SOF_TIMESTAMPING_SOFTWARE;
  1688. if (lio->ptp_clock)
  1689. info->phc_index = ptp_clock_index(lio->ptp_clock);
  1690. else
  1691. info->phc_index = -1;
  1692. #ifdef PTP_HARDWARE_TIMESTAMPING
  1693. info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
  1694. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  1695. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  1696. (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
  1697. (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
  1698. #endif
  1699. return 0;
  1700. }
  1701. /* Return register dump len. */
  1702. static int lio_get_regs_len(struct net_device *dev)
  1703. {
  1704. struct lio *lio = GET_LIO(dev);
  1705. struct octeon_device *oct = lio->oct_dev;
  1706. switch (oct->chip_id) {
  1707. case OCTEON_CN23XX_PF_VID:
  1708. return OCT_ETHTOOL_REGDUMP_LEN_23XX;
  1709. case OCTEON_CN23XX_VF_VID:
  1710. return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
  1711. default:
  1712. return OCT_ETHTOOL_REGDUMP_LEN;
  1713. }
  1714. }
  1715. static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
  1716. {
  1717. u32 reg;
  1718. u8 pf_num = oct->pf_num;
  1719. int len = 0;
  1720. int i;
  1721. /* PCI Window Registers */
  1722. len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
  1723. /*0x29030 or 0x29040*/
  1724. reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
  1725. len += sprintf(s + len,
  1726. "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
  1727. reg, oct->pcie_port, oct->pf_num,
  1728. (u64)octeon_read_csr64(oct, reg));
  1729. /*0x27080 or 0x27090*/
  1730. reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
  1731. len +=
  1732. sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
  1733. reg, oct->pcie_port, oct->pf_num,
  1734. (u64)octeon_read_csr64(oct, reg));
  1735. /*0x27000 or 0x27010*/
  1736. reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
  1737. len +=
  1738. sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
  1739. reg, oct->pcie_port, oct->pf_num,
  1740. (u64)octeon_read_csr64(oct, reg));
  1741. /*0x29120*/
  1742. reg = 0x29120;
  1743. len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
  1744. (u64)octeon_read_csr64(oct, reg));
  1745. /*0x27300*/
  1746. reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
  1747. (oct->pf_num) * CN23XX_PF_INT_OFFSET;
  1748. len += sprintf(
  1749. s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
  1750. oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
  1751. /*0x27200*/
  1752. reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
  1753. (oct->pf_num) * CN23XX_PF_INT_OFFSET;
  1754. len += sprintf(s + len,
  1755. "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
  1756. reg, oct->pcie_port, oct->pf_num,
  1757. (u64)octeon_read_csr64(oct, reg));
  1758. /*29130*/
  1759. reg = CN23XX_SLI_PKT_CNT_INT;
  1760. len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
  1761. (u64)octeon_read_csr64(oct, reg));
  1762. /*0x29140*/
  1763. reg = CN23XX_SLI_PKT_TIME_INT;
  1764. len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
  1765. (u64)octeon_read_csr64(oct, reg));
  1766. /*0x29160*/
  1767. reg = 0x29160;
  1768. len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
  1769. (u64)octeon_read_csr64(oct, reg));
  1770. /*0x29180*/
  1771. reg = CN23XX_SLI_OQ_WMARK;
  1772. len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
  1773. reg, (u64)octeon_read_csr64(oct, reg));
  1774. /*0x291E0*/
  1775. reg = CN23XX_SLI_PKT_IOQ_RING_RST;
  1776. len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
  1777. (u64)octeon_read_csr64(oct, reg));
  1778. /*0x29210*/
  1779. reg = CN23XX_SLI_GBL_CONTROL;
  1780. len += sprintf(s + len,
  1781. "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
  1782. (u64)octeon_read_csr64(oct, reg));
  1783. /*0x29220*/
  1784. reg = 0x29220;
  1785. len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
  1786. reg, (u64)octeon_read_csr64(oct, reg));
  1787. /*PF only*/
  1788. if (pf_num == 0) {
  1789. /*0x29260*/
  1790. reg = CN23XX_SLI_OUT_BP_EN_W1S;
  1791. len += sprintf(s + len,
  1792. "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
  1793. reg, (u64)octeon_read_csr64(oct, reg));
  1794. } else if (pf_num == 1) {
  1795. /*0x29270*/
  1796. reg = CN23XX_SLI_OUT_BP_EN2_W1S;
  1797. len += sprintf(s + len,
  1798. "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
  1799. reg, (u64)octeon_read_csr64(oct, reg));
  1800. }
  1801. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1802. reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
  1803. len +=
  1804. sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
  1805. reg, i, (u64)octeon_read_csr64(oct, reg));
  1806. }
  1807. /*0x10040*/
  1808. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1809. reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
  1810. len += sprintf(s + len,
  1811. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1812. reg, i, (u64)octeon_read_csr64(oct, reg));
  1813. }
  1814. /*0x10080*/
  1815. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1816. reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
  1817. len += sprintf(s + len,
  1818. "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
  1819. reg, i, (u64)octeon_read_csr64(oct, reg));
  1820. }
  1821. /*0x10090*/
  1822. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1823. reg = CN23XX_SLI_OQ_SIZE(i);
  1824. len += sprintf(
  1825. s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
  1826. reg, i, (u64)octeon_read_csr64(oct, reg));
  1827. }
  1828. /*0x10050*/
  1829. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1830. reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
  1831. len += sprintf(
  1832. s + len,
  1833. "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
  1834. reg, i, (u64)octeon_read_csr64(oct, reg));
  1835. }
  1836. /*0x10070*/
  1837. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1838. reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
  1839. len += sprintf(s + len,
  1840. "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
  1841. reg, i, (u64)octeon_read_csr64(oct, reg));
  1842. }
  1843. /*0x100a0*/
  1844. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1845. reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
  1846. len += sprintf(s + len,
  1847. "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
  1848. reg, i, (u64)octeon_read_csr64(oct, reg));
  1849. }
  1850. /*0x100b0*/
  1851. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1852. reg = CN23XX_SLI_OQ_PKTS_SENT(i);
  1853. len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
  1854. reg, i, (u64)octeon_read_csr64(oct, reg));
  1855. }
  1856. /*0x100c0*/
  1857. for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
  1858. reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
  1859. len += sprintf(s + len,
  1860. "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
  1861. reg, i, (u64)octeon_read_csr64(oct, reg));
  1862. /*0x10000*/
  1863. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1864. reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
  1865. len += sprintf(
  1866. s + len,
  1867. "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
  1868. reg, i, (u64)octeon_read_csr64(oct, reg));
  1869. }
  1870. /*0x10010*/
  1871. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1872. reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
  1873. len += sprintf(
  1874. s + len,
  1875. "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
  1876. i, (u64)octeon_read_csr64(oct, reg));
  1877. }
  1878. /*0x10020*/
  1879. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1880. reg = CN23XX_SLI_IQ_DOORBELL(i);
  1881. len += sprintf(
  1882. s + len,
  1883. "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
  1884. reg, i, (u64)octeon_read_csr64(oct, reg));
  1885. }
  1886. /*0x10030*/
  1887. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
  1888. reg = CN23XX_SLI_IQ_SIZE(i);
  1889. len += sprintf(
  1890. s + len,
  1891. "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
  1892. reg, i, (u64)octeon_read_csr64(oct, reg));
  1893. }
  1894. /*0x10040*/
  1895. for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
  1896. reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
  1897. len += sprintf(s + len,
  1898. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1899. reg, i, (u64)octeon_read_csr64(oct, reg));
  1900. }
  1901. return len;
  1902. }
  1903. static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
  1904. {
  1905. int len = 0;
  1906. u32 reg;
  1907. int i;
  1908. /* PCI Window Registers */
  1909. len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
  1910. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1911. reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
  1912. len += sprintf(s + len,
  1913. "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
  1914. reg, i, (u64)octeon_read_csr64(oct, reg));
  1915. }
  1916. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1917. reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
  1918. len += sprintf(s + len,
  1919. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1920. reg, i, (u64)octeon_read_csr64(oct, reg));
  1921. }
  1922. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1923. reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
  1924. len += sprintf(s + len,
  1925. "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
  1926. reg, i, (u64)octeon_read_csr64(oct, reg));
  1927. }
  1928. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1929. reg = CN23XX_VF_SLI_OQ_SIZE(i);
  1930. len += sprintf(s + len,
  1931. "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
  1932. reg, i, (u64)octeon_read_csr64(oct, reg));
  1933. }
  1934. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1935. reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
  1936. len += sprintf(s + len,
  1937. "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
  1938. reg, i, (u64)octeon_read_csr64(oct, reg));
  1939. }
  1940. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1941. reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
  1942. len += sprintf(s + len,
  1943. "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
  1944. reg, i, (u64)octeon_read_csr64(oct, reg));
  1945. }
  1946. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1947. reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
  1948. len += sprintf(s + len,
  1949. "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
  1950. reg, i, (u64)octeon_read_csr64(oct, reg));
  1951. }
  1952. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1953. reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
  1954. len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
  1955. reg, i, (u64)octeon_read_csr64(oct, reg));
  1956. }
  1957. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1958. reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
  1959. len += sprintf(s + len,
  1960. "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
  1961. reg, i, (u64)octeon_read_csr64(oct, reg));
  1962. }
  1963. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1964. reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
  1965. len += sprintf(s + len,
  1966. "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
  1967. reg, i, (u64)octeon_read_csr64(oct, reg));
  1968. }
  1969. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1970. reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
  1971. len += sprintf(s + len,
  1972. "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
  1973. reg, i, (u64)octeon_read_csr64(oct, reg));
  1974. }
  1975. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1976. reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
  1977. len += sprintf(s + len,
  1978. "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
  1979. reg, i, (u64)octeon_read_csr64(oct, reg));
  1980. }
  1981. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1982. reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
  1983. len += sprintf(s + len,
  1984. "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
  1985. reg, i, (u64)octeon_read_csr64(oct, reg));
  1986. }
  1987. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1988. reg = CN23XX_VF_SLI_IQ_SIZE(i);
  1989. len += sprintf(s + len,
  1990. "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
  1991. reg, i, (u64)octeon_read_csr64(oct, reg));
  1992. }
  1993. for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
  1994. reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
  1995. len += sprintf(s + len,
  1996. "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
  1997. reg, i, (u64)octeon_read_csr64(oct, reg));
  1998. }
  1999. return len;
  2000. }
  2001. static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
  2002. {
  2003. u32 reg;
  2004. int i, len = 0;
  2005. /* PCI Window Registers */
  2006. len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
  2007. reg = CN6XXX_WIN_WR_ADDR_LO;
  2008. len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
  2009. CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
  2010. reg = CN6XXX_WIN_WR_ADDR_HI;
  2011. len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
  2012. CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
  2013. reg = CN6XXX_WIN_RD_ADDR_LO;
  2014. len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
  2015. CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
  2016. reg = CN6XXX_WIN_RD_ADDR_HI;
  2017. len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
  2018. CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
  2019. reg = CN6XXX_WIN_WR_DATA_LO;
  2020. len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
  2021. CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
  2022. reg = CN6XXX_WIN_WR_DATA_HI;
  2023. len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
  2024. CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
  2025. len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
  2026. CN6XXX_WIN_WR_MASK_REG,
  2027. octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
  2028. /* PCI Interrupt Register */
  2029. len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
  2030. CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
  2031. CN6XXX_SLI_INT_ENB64_PORT0));
  2032. len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
  2033. CN6XXX_SLI_INT_ENB64_PORT1,
  2034. octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
  2035. len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
  2036. octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
  2037. /* PCI Output queue registers */
  2038. for (i = 0; i < oct->num_oqs; i++) {
  2039. reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
  2040. len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
  2041. reg, i, octeon_read_csr(oct, reg));
  2042. reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
  2043. len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
  2044. reg, i, octeon_read_csr(oct, reg));
  2045. }
  2046. reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
  2047. len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
  2048. reg, octeon_read_csr(oct, reg));
  2049. reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
  2050. len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
  2051. reg, octeon_read_csr(oct, reg));
  2052. /* PCI Input queue registers */
  2053. for (i = 0; i <= 3; i++) {
  2054. u32 reg;
  2055. reg = CN6XXX_SLI_IQ_DOORBELL(i);
  2056. len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
  2057. reg, i, octeon_read_csr(oct, reg));
  2058. reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
  2059. len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
  2060. reg, i, octeon_read_csr(oct, reg));
  2061. }
  2062. /* PCI DMA registers */
  2063. len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
  2064. CN6XXX_DMA_CNT(0),
  2065. octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
  2066. reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
  2067. len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
  2068. CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
  2069. reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
  2070. len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
  2071. CN6XXX_DMA_TIME_INT_LEVEL(0),
  2072. octeon_read_csr(oct, reg));
  2073. len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
  2074. CN6XXX_DMA_CNT(1),
  2075. octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
  2076. reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
  2077. len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
  2078. CN6XXX_DMA_PKT_INT_LEVEL(1),
  2079. octeon_read_csr(oct, reg));
  2080. reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
  2081. len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
  2082. CN6XXX_DMA_TIME_INT_LEVEL(1),
  2083. octeon_read_csr(oct, reg));
  2084. /* PCI Index registers */
  2085. len += sprintf(s + len, "\n");
  2086. for (i = 0; i < 16; i++) {
  2087. reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
  2088. len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
  2089. CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
  2090. }
  2091. return len;
  2092. }
  2093. static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
  2094. {
  2095. u32 val;
  2096. int i, len = 0;
  2097. /* PCI CONFIG Registers */
  2098. len += sprintf(s + len,
  2099. "\n\t Octeon Config space Registers\n\n");
  2100. for (i = 0; i <= 13; i++) {
  2101. pci_read_config_dword(oct->pci_dev, (i * 4), &val);
  2102. len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
  2103. (i * 4), i, val);
  2104. }
  2105. for (i = 30; i <= 34; i++) {
  2106. pci_read_config_dword(oct->pci_dev, (i * 4), &val);
  2107. len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
  2108. (i * 4), i, val);
  2109. }
  2110. return len;
  2111. }
  2112. /* Return register dump user app. */
  2113. static void lio_get_regs(struct net_device *dev,
  2114. struct ethtool_regs *regs, void *regbuf)
  2115. {
  2116. struct lio *lio = GET_LIO(dev);
  2117. int len = 0;
  2118. struct octeon_device *oct = lio->oct_dev;
  2119. regs->version = OCT_ETHTOOL_REGSVER;
  2120. switch (oct->chip_id) {
  2121. case OCTEON_CN23XX_PF_VID:
  2122. memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
  2123. len += cn23xx_read_csr_reg(regbuf + len, oct);
  2124. break;
  2125. case OCTEON_CN23XX_VF_VID:
  2126. memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
  2127. len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
  2128. break;
  2129. case OCTEON_CN68XX:
  2130. case OCTEON_CN66XX:
  2131. memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
  2132. len += cn6xxx_read_csr_reg(regbuf + len, oct);
  2133. len += cn6xxx_read_config_reg(regbuf + len, oct);
  2134. break;
  2135. default:
  2136. dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
  2137. __func__, oct->chip_id);
  2138. }
  2139. }
  2140. static u32 lio_get_priv_flags(struct net_device *netdev)
  2141. {
  2142. struct lio *lio = GET_LIO(netdev);
  2143. return lio->oct_dev->priv_flags;
  2144. }
  2145. static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
  2146. {
  2147. struct lio *lio = GET_LIO(netdev);
  2148. bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
  2149. lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
  2150. intr_by_tx_bytes);
  2151. return 0;
  2152. }
  2153. static const struct ethtool_ops lio_ethtool_ops = {
  2154. .get_link_ksettings = lio_get_link_ksettings,
  2155. .get_link = ethtool_op_get_link,
  2156. .get_drvinfo = lio_get_drvinfo,
  2157. .get_ringparam = lio_ethtool_get_ringparam,
  2158. .get_channels = lio_ethtool_get_channels,
  2159. .set_phys_id = lio_set_phys_id,
  2160. .get_eeprom_len = lio_get_eeprom_len,
  2161. .get_eeprom = lio_get_eeprom,
  2162. .get_strings = lio_get_strings,
  2163. .get_ethtool_stats = lio_get_ethtool_stats,
  2164. .get_pauseparam = lio_get_pauseparam,
  2165. .set_pauseparam = lio_set_pauseparam,
  2166. .get_regs_len = lio_get_regs_len,
  2167. .get_regs = lio_get_regs,
  2168. .get_msglevel = lio_get_msglevel,
  2169. .set_msglevel = lio_set_msglevel,
  2170. .get_sset_count = lio_get_sset_count,
  2171. .get_coalesce = lio_get_intr_coalesce,
  2172. .set_coalesce = lio_set_intr_coalesce,
  2173. .get_priv_flags = lio_get_priv_flags,
  2174. .set_priv_flags = lio_set_priv_flags,
  2175. .get_ts_info = lio_get_ts_info,
  2176. };
  2177. static const struct ethtool_ops lio_vf_ethtool_ops = {
  2178. .get_link_ksettings = lio_get_link_ksettings,
  2179. .get_link = ethtool_op_get_link,
  2180. .get_drvinfo = lio_get_vf_drvinfo,
  2181. .get_ringparam = lio_ethtool_get_ringparam,
  2182. .get_channels = lio_ethtool_get_channels,
  2183. .get_strings = lio_vf_get_strings,
  2184. .get_ethtool_stats = lio_vf_get_ethtool_stats,
  2185. .get_regs_len = lio_get_regs_len,
  2186. .get_regs = lio_get_regs,
  2187. .get_msglevel = lio_get_msglevel,
  2188. .set_msglevel = lio_set_msglevel,
  2189. .get_sset_count = lio_vf_get_sset_count,
  2190. .get_coalesce = lio_get_intr_coalesce,
  2191. .set_coalesce = lio_set_intr_coalesce,
  2192. .get_priv_flags = lio_get_priv_flags,
  2193. .set_priv_flags = lio_set_priv_flags,
  2194. .get_ts_info = lio_get_ts_info,
  2195. };
  2196. void liquidio_set_ethtool_ops(struct net_device *netdev)
  2197. {
  2198. struct lio *lio = GET_LIO(netdev);
  2199. struct octeon_device *oct = lio->oct_dev;
  2200. if (OCTEON_CN23XX_VF(oct))
  2201. netdev->ethtool_ops = &lio_vf_ethtool_ops;
  2202. else
  2203. netdev->ethtool_ops = &lio_ethtool_ops;
  2204. }