thunder_bgx.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <linux/acpi.h>
  9. #include <linux/module.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/phy.h>
  15. #include <linux/of.h>
  16. #include <linux/of_mdio.h>
  17. #include <linux/of_net.h>
  18. #include "nic_reg.h"
  19. #include "nic.h"
  20. #include "thunder_bgx.h"
  21. #define DRV_NAME "thunder-BGX"
  22. #define DRV_VERSION "1.0"
  23. struct lmac {
  24. struct bgx *bgx;
  25. int dmac;
  26. u8 mac[ETH_ALEN];
  27. u8 lmac_type;
  28. u8 lane_to_sds;
  29. bool use_training;
  30. bool autoneg;
  31. bool link_up;
  32. int lmacid; /* ID within BGX */
  33. int lmacid_bd; /* ID on board */
  34. struct net_device netdev;
  35. struct phy_device *phydev;
  36. unsigned int last_duplex;
  37. unsigned int last_link;
  38. unsigned int last_speed;
  39. bool is_sgmii;
  40. struct delayed_work dwork;
  41. struct workqueue_struct *check_link;
  42. };
  43. struct bgx {
  44. u8 bgx_id;
  45. struct lmac lmac[MAX_LMAC_PER_BGX];
  46. u8 lmac_count;
  47. u8 max_lmac;
  48. u8 acpi_lmac_idx;
  49. void __iomem *reg_base;
  50. struct pci_dev *pdev;
  51. bool is_dlm;
  52. bool is_rgx;
  53. };
  54. static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
  55. static int lmac_count; /* Total no of LMACs in system */
  56. static int bgx_xaui_check_link(struct lmac *lmac);
  57. /* Supported devices */
  58. static const struct pci_device_id bgx_id_table[] = {
  59. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
  60. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
  61. { 0, } /* end of table */
  62. };
  63. MODULE_AUTHOR("Cavium Inc");
  64. MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
  65. MODULE_LICENSE("GPL v2");
  66. MODULE_VERSION(DRV_VERSION);
  67. MODULE_DEVICE_TABLE(pci, bgx_id_table);
  68. /* The Cavium ThunderX network controller can *only* be found in SoCs
  69. * containing the ThunderX ARM64 CPU implementation. All accesses to the device
  70. * registers on this platform are implicitly strongly ordered with respect
  71. * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
  72. * with no memory barriers in this driver. The readq()/writeq() functions add
  73. * explicit ordering operation which in this case are redundant, and only
  74. * add overhead.
  75. */
  76. /* Register read/write APIs */
  77. static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
  78. {
  79. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  80. return readq_relaxed(addr);
  81. }
  82. static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
  83. {
  84. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  85. writeq_relaxed(val, addr);
  86. }
  87. static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
  88. {
  89. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  90. writeq_relaxed(val | readq_relaxed(addr), addr);
  91. }
  92. static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
  93. {
  94. int timeout = 100;
  95. u64 reg_val;
  96. while (timeout) {
  97. reg_val = bgx_reg_read(bgx, lmac, reg);
  98. if (zero && !(reg_val & mask))
  99. return 0;
  100. if (!zero && (reg_val & mask))
  101. return 0;
  102. usleep_range(1000, 2000);
  103. timeout--;
  104. }
  105. return 1;
  106. }
  107. static int max_bgx_per_node;
  108. static void set_max_bgx_per_node(struct pci_dev *pdev)
  109. {
  110. u16 sdevid;
  111. if (max_bgx_per_node)
  112. return;
  113. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
  114. switch (sdevid) {
  115. case PCI_SUBSYS_DEVID_81XX_BGX:
  116. case PCI_SUBSYS_DEVID_81XX_RGX:
  117. max_bgx_per_node = MAX_BGX_PER_CN81XX;
  118. break;
  119. case PCI_SUBSYS_DEVID_83XX_BGX:
  120. max_bgx_per_node = MAX_BGX_PER_CN83XX;
  121. break;
  122. case PCI_SUBSYS_DEVID_88XX_BGX:
  123. default:
  124. max_bgx_per_node = MAX_BGX_PER_CN88XX;
  125. break;
  126. }
  127. }
  128. static struct bgx *get_bgx(int node, int bgx_idx)
  129. {
  130. int idx = (node * max_bgx_per_node) + bgx_idx;
  131. return bgx_vnic[idx];
  132. }
  133. /* Return number of BGX present in HW */
  134. unsigned bgx_get_map(int node)
  135. {
  136. int i;
  137. unsigned map = 0;
  138. for (i = 0; i < max_bgx_per_node; i++) {
  139. if (bgx_vnic[(node * max_bgx_per_node) + i])
  140. map |= (1 << i);
  141. }
  142. return map;
  143. }
  144. EXPORT_SYMBOL(bgx_get_map);
  145. /* Return number of LMAC configured for this BGX */
  146. int bgx_get_lmac_count(int node, int bgx_idx)
  147. {
  148. struct bgx *bgx;
  149. bgx = get_bgx(node, bgx_idx);
  150. if (bgx)
  151. return bgx->lmac_count;
  152. return 0;
  153. }
  154. EXPORT_SYMBOL(bgx_get_lmac_count);
  155. /* Returns the current link status of LMAC */
  156. void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
  157. {
  158. struct bgx_link_status *link = (struct bgx_link_status *)status;
  159. struct bgx *bgx;
  160. struct lmac *lmac;
  161. bgx = get_bgx(node, bgx_idx);
  162. if (!bgx)
  163. return;
  164. lmac = &bgx->lmac[lmacid];
  165. link->mac_type = lmac->lmac_type;
  166. link->link_up = lmac->link_up;
  167. link->duplex = lmac->last_duplex;
  168. link->speed = lmac->last_speed;
  169. }
  170. EXPORT_SYMBOL(bgx_get_lmac_link_state);
  171. const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
  172. {
  173. struct bgx *bgx = get_bgx(node, bgx_idx);
  174. if (bgx)
  175. return bgx->lmac[lmacid].mac;
  176. return NULL;
  177. }
  178. EXPORT_SYMBOL(bgx_get_lmac_mac);
  179. void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
  180. {
  181. struct bgx *bgx = get_bgx(node, bgx_idx);
  182. if (!bgx)
  183. return;
  184. ether_addr_copy(bgx->lmac[lmacid].mac, mac);
  185. }
  186. EXPORT_SYMBOL(bgx_set_lmac_mac);
  187. void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
  188. {
  189. struct bgx *bgx = get_bgx(node, bgx_idx);
  190. struct lmac *lmac;
  191. u64 cfg;
  192. if (!bgx)
  193. return;
  194. lmac = &bgx->lmac[lmacid];
  195. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  196. if (enable)
  197. cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
  198. else
  199. cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
  200. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  201. if (bgx->is_rgx)
  202. xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
  203. }
  204. EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
  205. void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
  206. {
  207. struct pfc *pfc = (struct pfc *)pause;
  208. struct bgx *bgx = get_bgx(node, bgx_idx);
  209. struct lmac *lmac;
  210. u64 cfg;
  211. if (!bgx)
  212. return;
  213. lmac = &bgx->lmac[lmacid];
  214. if (lmac->is_sgmii)
  215. return;
  216. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
  217. pfc->fc_rx = cfg & RX_EN;
  218. pfc->fc_tx = cfg & TX_EN;
  219. pfc->autoneg = 0;
  220. }
  221. EXPORT_SYMBOL(bgx_lmac_get_pfc);
  222. void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
  223. {
  224. struct pfc *pfc = (struct pfc *)pause;
  225. struct bgx *bgx = get_bgx(node, bgx_idx);
  226. struct lmac *lmac;
  227. u64 cfg;
  228. if (!bgx)
  229. return;
  230. lmac = &bgx->lmac[lmacid];
  231. if (lmac->is_sgmii)
  232. return;
  233. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
  234. cfg &= ~(RX_EN | TX_EN);
  235. cfg |= (pfc->fc_rx ? RX_EN : 0x00);
  236. cfg |= (pfc->fc_tx ? TX_EN : 0x00);
  237. bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
  238. }
  239. EXPORT_SYMBOL(bgx_lmac_set_pfc);
  240. static void bgx_sgmii_change_link_state(struct lmac *lmac)
  241. {
  242. struct bgx *bgx = lmac->bgx;
  243. u64 cmr_cfg;
  244. u64 port_cfg = 0;
  245. u64 misc_ctl = 0;
  246. cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
  247. cmr_cfg &= ~CMR_EN;
  248. bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
  249. port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
  250. misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
  251. if (lmac->link_up) {
  252. misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
  253. port_cfg &= ~GMI_PORT_CFG_DUPLEX;
  254. port_cfg |= (lmac->last_duplex << 2);
  255. } else {
  256. misc_ctl |= PCS_MISC_CTL_GMX_ENO;
  257. }
  258. switch (lmac->last_speed) {
  259. case 10:
  260. port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
  261. port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
  262. port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
  263. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  264. misc_ctl |= 50; /* samp_pt */
  265. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
  266. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
  267. break;
  268. case 100:
  269. port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
  270. port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
  271. port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
  272. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  273. misc_ctl |= 5; /* samp_pt */
  274. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
  275. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
  276. break;
  277. case 1000:
  278. port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
  279. port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
  280. port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
  281. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  282. misc_ctl |= 1; /* samp_pt */
  283. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
  284. if (lmac->last_duplex)
  285. bgx_reg_write(bgx, lmac->lmacid,
  286. BGX_GMP_GMI_TXX_BURST, 0);
  287. else
  288. bgx_reg_write(bgx, lmac->lmacid,
  289. BGX_GMP_GMI_TXX_BURST, 8192);
  290. break;
  291. default:
  292. break;
  293. }
  294. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
  295. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
  296. port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
  297. /* Re-enable lmac */
  298. cmr_cfg |= CMR_EN;
  299. bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
  300. if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
  301. xcv_setup_link(lmac->link_up, lmac->last_speed);
  302. }
  303. static void bgx_lmac_handler(struct net_device *netdev)
  304. {
  305. struct lmac *lmac = container_of(netdev, struct lmac, netdev);
  306. struct phy_device *phydev;
  307. int link_changed = 0;
  308. if (!lmac)
  309. return;
  310. phydev = lmac->phydev;
  311. if (!phydev->link && lmac->last_link)
  312. link_changed = -1;
  313. if (phydev->link &&
  314. (lmac->last_duplex != phydev->duplex ||
  315. lmac->last_link != phydev->link ||
  316. lmac->last_speed != phydev->speed)) {
  317. link_changed = 1;
  318. }
  319. lmac->last_link = phydev->link;
  320. lmac->last_speed = phydev->speed;
  321. lmac->last_duplex = phydev->duplex;
  322. if (!link_changed)
  323. return;
  324. if (link_changed > 0)
  325. lmac->link_up = true;
  326. else
  327. lmac->link_up = false;
  328. if (lmac->is_sgmii)
  329. bgx_sgmii_change_link_state(lmac);
  330. else
  331. bgx_xaui_check_link(lmac);
  332. }
  333. u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
  334. {
  335. struct bgx *bgx;
  336. bgx = get_bgx(node, bgx_idx);
  337. if (!bgx)
  338. return 0;
  339. if (idx > 8)
  340. lmac = 0;
  341. return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
  342. }
  343. EXPORT_SYMBOL(bgx_get_rx_stats);
  344. u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
  345. {
  346. struct bgx *bgx;
  347. bgx = get_bgx(node, bgx_idx);
  348. if (!bgx)
  349. return 0;
  350. return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
  351. }
  352. EXPORT_SYMBOL(bgx_get_tx_stats);
  353. static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
  354. {
  355. u64 offset;
  356. while (bgx->lmac[lmac].dmac > 0) {
  357. offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
  358. (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
  359. bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
  360. bgx->lmac[lmac].dmac--;
  361. }
  362. }
  363. /* Configure BGX LMAC in internal loopback mode */
  364. void bgx_lmac_internal_loopback(int node, int bgx_idx,
  365. int lmac_idx, bool enable)
  366. {
  367. struct bgx *bgx;
  368. struct lmac *lmac;
  369. u64 cfg;
  370. bgx = get_bgx(node, bgx_idx);
  371. if (!bgx)
  372. return;
  373. lmac = &bgx->lmac[lmac_idx];
  374. if (lmac->is_sgmii) {
  375. cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
  376. if (enable)
  377. cfg |= PCS_MRX_CTL_LOOPBACK1;
  378. else
  379. cfg &= ~PCS_MRX_CTL_LOOPBACK1;
  380. bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
  381. } else {
  382. cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
  383. if (enable)
  384. cfg |= SPU_CTL_LOOPBACK;
  385. else
  386. cfg &= ~SPU_CTL_LOOPBACK;
  387. bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
  388. }
  389. }
  390. EXPORT_SYMBOL(bgx_lmac_internal_loopback);
  391. static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
  392. {
  393. int lmacid = lmac->lmacid;
  394. u64 cfg;
  395. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
  396. /* max packet size */
  397. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
  398. /* Disable frame alignment if using preamble */
  399. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
  400. if (cfg & 1)
  401. bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
  402. /* Enable lmac */
  403. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  404. /* PCS reset */
  405. bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
  406. if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
  407. PCS_MRX_CTL_RESET, true)) {
  408. dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
  409. return -1;
  410. }
  411. /* power down, reset autoneg, autoneg enable */
  412. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
  413. cfg &= ~PCS_MRX_CTL_PWR_DN;
  414. cfg |= PCS_MRX_CTL_RST_AN;
  415. if (lmac->phydev) {
  416. cfg |= PCS_MRX_CTL_AN_EN;
  417. } else {
  418. /* In scenarios where PHY driver is not present or it's a
  419. * non-standard PHY, FW sets AN_EN to inform Linux driver
  420. * to do auto-neg and link polling or not.
  421. */
  422. if (cfg & PCS_MRX_CTL_AN_EN)
  423. lmac->autoneg = true;
  424. }
  425. bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
  426. if (lmac->lmac_type == BGX_MODE_QSGMII) {
  427. /* Disable disparity check for QSGMII */
  428. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
  429. cfg &= ~PCS_MISC_CTL_DISP_EN;
  430. bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
  431. return 0;
  432. }
  433. if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
  434. if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
  435. PCS_MRX_STATUS_AN_CPT, false)) {
  436. dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
  437. return -1;
  438. }
  439. }
  440. return 0;
  441. }
  442. static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
  443. {
  444. u64 cfg;
  445. int lmacid = lmac->lmacid;
  446. /* Reset SPU */
  447. bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
  448. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
  449. dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
  450. return -1;
  451. }
  452. /* Disable LMAC */
  453. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  454. cfg &= ~CMR_EN;
  455. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  456. bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
  457. /* Set interleaved running disparity for RXAUI */
  458. if (lmac->lmac_type == BGX_MODE_RXAUI)
  459. bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
  460. SPU_MISC_CTL_INTLV_RDISP);
  461. /* Clear receive packet disable */
  462. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
  463. cfg &= ~SPU_MISC_CTL_RX_DIS;
  464. bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
  465. /* clear all interrupts */
  466. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
  467. bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
  468. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
  469. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
  470. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  471. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  472. if (lmac->use_training) {
  473. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
  474. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
  475. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
  476. /* training enable */
  477. bgx_reg_modify(bgx, lmacid,
  478. BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
  479. }
  480. /* Append FCS to each packet */
  481. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
  482. /* Disable forward error correction */
  483. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
  484. cfg &= ~SPU_FEC_CTL_FEC_EN;
  485. bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
  486. /* Disable autoneg */
  487. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
  488. cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
  489. bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
  490. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
  491. if (lmac->lmac_type == BGX_MODE_10G_KR)
  492. cfg |= (1 << 23);
  493. else if (lmac->lmac_type == BGX_MODE_40G_KR)
  494. cfg |= (1 << 24);
  495. else
  496. cfg &= ~((1 << 23) | (1 << 24));
  497. cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
  498. bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
  499. cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
  500. cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
  501. bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
  502. /* Enable lmac */
  503. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  504. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
  505. cfg &= ~SPU_CTL_LOW_POWER;
  506. bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
  507. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
  508. cfg &= ~SMU_TX_CTL_UNI_EN;
  509. cfg |= SMU_TX_CTL_DIC_EN;
  510. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
  511. /* Enable receive and transmission of pause frames */
  512. bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
  513. BCK_EN | DRP_EN | TX_EN | RX_EN));
  514. /* Configure pause time and interval */
  515. bgx_reg_write(bgx, lmacid,
  516. BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
  517. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
  518. cfg &= ~0xFFFFull;
  519. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
  520. cfg | (DEFAULT_PAUSE_TIME - 0x1000));
  521. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
  522. /* take lmac_count into account */
  523. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
  524. /* max packet size */
  525. bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
  526. return 0;
  527. }
  528. static int bgx_xaui_check_link(struct lmac *lmac)
  529. {
  530. struct bgx *bgx = lmac->bgx;
  531. int lmacid = lmac->lmacid;
  532. int lmac_type = lmac->lmac_type;
  533. u64 cfg;
  534. if (lmac->use_training) {
  535. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  536. if (!(cfg & (1ull << 13))) {
  537. cfg = (1ull << 13) | (1ull << 14);
  538. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  539. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
  540. cfg |= (1ull << 0);
  541. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
  542. return -1;
  543. }
  544. }
  545. /* wait for PCS to come out of reset */
  546. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
  547. dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
  548. return -1;
  549. }
  550. if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
  551. (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
  552. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
  553. SPU_BR_STATUS_BLK_LOCK, false)) {
  554. dev_err(&bgx->pdev->dev,
  555. "SPU_BR_STATUS_BLK_LOCK not completed\n");
  556. return -1;
  557. }
  558. } else {
  559. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
  560. SPU_BX_STATUS_RX_ALIGN, false)) {
  561. dev_err(&bgx->pdev->dev,
  562. "SPU_BX_STATUS_RX_ALIGN not completed\n");
  563. return -1;
  564. }
  565. }
  566. /* Clear rcvflt bit (latching high) and read it back */
  567. if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
  568. bgx_reg_modify(bgx, lmacid,
  569. BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
  570. if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
  571. dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
  572. if (lmac->use_training) {
  573. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  574. if (!(cfg & (1ull << 13))) {
  575. cfg = (1ull << 13) | (1ull << 14);
  576. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  577. cfg = bgx_reg_read(bgx, lmacid,
  578. BGX_SPUX_BR_PMD_CRTL);
  579. cfg |= (1ull << 0);
  580. bgx_reg_write(bgx, lmacid,
  581. BGX_SPUX_BR_PMD_CRTL, cfg);
  582. return -1;
  583. }
  584. }
  585. return -1;
  586. }
  587. /* Wait for BGX RX to be idle */
  588. if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
  589. dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
  590. return -1;
  591. }
  592. /* Wait for BGX TX to be idle */
  593. if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
  594. dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
  595. return -1;
  596. }
  597. /* Check for MAC RX faults */
  598. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
  599. /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
  600. cfg &= SMU_RX_CTL_STATUS;
  601. if (!cfg)
  602. return 0;
  603. /* Rx local/remote fault seen.
  604. * Do lmac reinit to see if condition recovers
  605. */
  606. bgx_lmac_xaui_init(bgx, lmac);
  607. return -1;
  608. }
  609. static void bgx_poll_for_sgmii_link(struct lmac *lmac)
  610. {
  611. u64 pcs_link, an_result;
  612. u8 speed;
  613. pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
  614. BGX_GMP_PCS_MRX_STATUS);
  615. /*Link state bit is sticky, read it again*/
  616. if (!(pcs_link & PCS_MRX_STATUS_LINK))
  617. pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
  618. BGX_GMP_PCS_MRX_STATUS);
  619. if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
  620. PCS_MRX_STATUS_AN_CPT, false)) {
  621. lmac->link_up = false;
  622. lmac->last_speed = SPEED_UNKNOWN;
  623. lmac->last_duplex = DUPLEX_UNKNOWN;
  624. goto next_poll;
  625. }
  626. lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
  627. an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
  628. BGX_GMP_PCS_ANX_AN_RESULTS);
  629. speed = (an_result >> 3) & 0x3;
  630. lmac->last_duplex = (an_result >> 1) & 0x1;
  631. switch (speed) {
  632. case 0:
  633. lmac->last_speed = 10;
  634. break;
  635. case 1:
  636. lmac->last_speed = 100;
  637. break;
  638. case 2:
  639. lmac->last_speed = 1000;
  640. break;
  641. default:
  642. lmac->link_up = false;
  643. lmac->last_speed = SPEED_UNKNOWN;
  644. lmac->last_duplex = DUPLEX_UNKNOWN;
  645. break;
  646. }
  647. next_poll:
  648. if (lmac->last_link != lmac->link_up) {
  649. if (lmac->link_up)
  650. bgx_sgmii_change_link_state(lmac);
  651. lmac->last_link = lmac->link_up;
  652. }
  653. queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
  654. }
  655. static void bgx_poll_for_link(struct work_struct *work)
  656. {
  657. struct lmac *lmac;
  658. u64 spu_link, smu_link;
  659. lmac = container_of(work, struct lmac, dwork.work);
  660. if (lmac->is_sgmii) {
  661. bgx_poll_for_sgmii_link(lmac);
  662. return;
  663. }
  664. /* Receive link is latching low. Force it high and verify it */
  665. bgx_reg_modify(lmac->bgx, lmac->lmacid,
  666. BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
  667. bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
  668. SPU_STATUS1_RCV_LNK, false);
  669. spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
  670. smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
  671. if ((spu_link & SPU_STATUS1_RCV_LNK) &&
  672. !(smu_link & SMU_RX_CTL_STATUS)) {
  673. lmac->link_up = 1;
  674. if (lmac->lmac_type == BGX_MODE_XLAUI)
  675. lmac->last_speed = 40000;
  676. else
  677. lmac->last_speed = 10000;
  678. lmac->last_duplex = 1;
  679. } else {
  680. lmac->link_up = 0;
  681. lmac->last_speed = SPEED_UNKNOWN;
  682. lmac->last_duplex = DUPLEX_UNKNOWN;
  683. }
  684. if (lmac->last_link != lmac->link_up) {
  685. if (lmac->link_up) {
  686. if (bgx_xaui_check_link(lmac)) {
  687. /* Errors, clear link_up state */
  688. lmac->link_up = 0;
  689. lmac->last_speed = SPEED_UNKNOWN;
  690. lmac->last_duplex = DUPLEX_UNKNOWN;
  691. }
  692. }
  693. lmac->last_link = lmac->link_up;
  694. }
  695. queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
  696. }
  697. static int phy_interface_mode(u8 lmac_type)
  698. {
  699. if (lmac_type == BGX_MODE_QSGMII)
  700. return PHY_INTERFACE_MODE_QSGMII;
  701. if (lmac_type == BGX_MODE_RGMII)
  702. return PHY_INTERFACE_MODE_RGMII;
  703. return PHY_INTERFACE_MODE_SGMII;
  704. }
  705. static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
  706. {
  707. struct lmac *lmac;
  708. u64 cfg;
  709. lmac = &bgx->lmac[lmacid];
  710. lmac->bgx = bgx;
  711. if ((lmac->lmac_type == BGX_MODE_SGMII) ||
  712. (lmac->lmac_type == BGX_MODE_QSGMII) ||
  713. (lmac->lmac_type == BGX_MODE_RGMII)) {
  714. lmac->is_sgmii = 1;
  715. if (bgx_lmac_sgmii_init(bgx, lmac))
  716. return -1;
  717. } else {
  718. lmac->is_sgmii = 0;
  719. if (bgx_lmac_xaui_init(bgx, lmac))
  720. return -1;
  721. }
  722. if (lmac->is_sgmii) {
  723. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
  724. cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
  725. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
  726. bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
  727. } else {
  728. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
  729. cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
  730. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
  731. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
  732. }
  733. /* Enable lmac */
  734. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  735. /* Restore default cfg, incase low level firmware changed it */
  736. bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
  737. if ((lmac->lmac_type != BGX_MODE_XFI) &&
  738. (lmac->lmac_type != BGX_MODE_XLAUI) &&
  739. (lmac->lmac_type != BGX_MODE_40G_KR) &&
  740. (lmac->lmac_type != BGX_MODE_10G_KR)) {
  741. if (!lmac->phydev) {
  742. if (lmac->autoneg) {
  743. bgx_reg_write(bgx, lmacid,
  744. BGX_GMP_PCS_LINKX_TIMER,
  745. PCS_LINKX_TIMER_COUNT);
  746. goto poll;
  747. } else {
  748. /* Default to below link speed and duplex */
  749. lmac->link_up = true;
  750. lmac->last_speed = 1000;
  751. lmac->last_duplex = 1;
  752. bgx_sgmii_change_link_state(lmac);
  753. return 0;
  754. }
  755. }
  756. lmac->phydev->dev_flags = 0;
  757. if (phy_connect_direct(&lmac->netdev, lmac->phydev,
  758. bgx_lmac_handler,
  759. phy_interface_mode(lmac->lmac_type)))
  760. return -ENODEV;
  761. phy_start_aneg(lmac->phydev);
  762. return 0;
  763. }
  764. poll:
  765. lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
  766. WQ_MEM_RECLAIM, 1);
  767. if (!lmac->check_link)
  768. return -ENOMEM;
  769. INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
  770. queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
  771. return 0;
  772. }
  773. static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
  774. {
  775. struct lmac *lmac;
  776. u64 cfg;
  777. lmac = &bgx->lmac[lmacid];
  778. if (lmac->check_link) {
  779. /* Destroy work queue */
  780. cancel_delayed_work_sync(&lmac->dwork);
  781. destroy_workqueue(lmac->check_link);
  782. }
  783. /* Disable packet reception */
  784. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  785. cfg &= ~CMR_PKT_RX_EN;
  786. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  787. /* Give chance for Rx/Tx FIFO to get drained */
  788. bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
  789. bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
  790. /* Disable packet transmission */
  791. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  792. cfg &= ~CMR_PKT_TX_EN;
  793. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  794. /* Disable serdes lanes */
  795. if (!lmac->is_sgmii)
  796. bgx_reg_modify(bgx, lmacid,
  797. BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
  798. else
  799. bgx_reg_modify(bgx, lmacid,
  800. BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
  801. /* Disable LMAC */
  802. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  803. cfg &= ~CMR_EN;
  804. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  805. bgx_flush_dmac_addrs(bgx, lmacid);
  806. if ((lmac->lmac_type != BGX_MODE_XFI) &&
  807. (lmac->lmac_type != BGX_MODE_XLAUI) &&
  808. (lmac->lmac_type != BGX_MODE_40G_KR) &&
  809. (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
  810. phy_disconnect(lmac->phydev);
  811. lmac->phydev = NULL;
  812. }
  813. static void bgx_init_hw(struct bgx *bgx)
  814. {
  815. int i;
  816. struct lmac *lmac;
  817. bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
  818. if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
  819. dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
  820. /* Set lmac type and lane2serdes mapping */
  821. for (i = 0; i < bgx->lmac_count; i++) {
  822. lmac = &bgx->lmac[i];
  823. bgx_reg_write(bgx, i, BGX_CMRX_CFG,
  824. (lmac->lmac_type << 8) | lmac->lane_to_sds);
  825. bgx->lmac[i].lmacid_bd = lmac_count;
  826. lmac_count++;
  827. }
  828. bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
  829. bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
  830. /* Set the backpressure AND mask */
  831. for (i = 0; i < bgx->lmac_count; i++)
  832. bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
  833. ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
  834. (i * MAX_BGX_CHANS_PER_LMAC));
  835. /* Disable all MAC filtering */
  836. for (i = 0; i < RX_DMAC_COUNT; i++)
  837. bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
  838. /* Disable MAC steering (NCSI traffic) */
  839. for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
  840. bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
  841. }
  842. static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
  843. {
  844. return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
  845. }
  846. static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
  847. {
  848. struct device *dev = &bgx->pdev->dev;
  849. struct lmac *lmac;
  850. char str[20];
  851. if (!bgx->is_dlm && lmacid)
  852. return;
  853. lmac = &bgx->lmac[lmacid];
  854. if (!bgx->is_dlm)
  855. sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
  856. else
  857. sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
  858. switch (lmac->lmac_type) {
  859. case BGX_MODE_SGMII:
  860. dev_info(dev, "%s: SGMII\n", (char *)str);
  861. break;
  862. case BGX_MODE_XAUI:
  863. dev_info(dev, "%s: XAUI\n", (char *)str);
  864. break;
  865. case BGX_MODE_RXAUI:
  866. dev_info(dev, "%s: RXAUI\n", (char *)str);
  867. break;
  868. case BGX_MODE_XFI:
  869. if (!lmac->use_training)
  870. dev_info(dev, "%s: XFI\n", (char *)str);
  871. else
  872. dev_info(dev, "%s: 10G_KR\n", (char *)str);
  873. break;
  874. case BGX_MODE_XLAUI:
  875. if (!lmac->use_training)
  876. dev_info(dev, "%s: XLAUI\n", (char *)str);
  877. else
  878. dev_info(dev, "%s: 40G_KR4\n", (char *)str);
  879. break;
  880. case BGX_MODE_QSGMII:
  881. dev_info(dev, "%s: QSGMII\n", (char *)str);
  882. break;
  883. case BGX_MODE_RGMII:
  884. dev_info(dev, "%s: RGMII\n", (char *)str);
  885. break;
  886. case BGX_MODE_INVALID:
  887. /* Nothing to do */
  888. break;
  889. }
  890. }
  891. static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
  892. {
  893. switch (lmac->lmac_type) {
  894. case BGX_MODE_SGMII:
  895. case BGX_MODE_XFI:
  896. lmac->lane_to_sds = lmac->lmacid;
  897. break;
  898. case BGX_MODE_XAUI:
  899. case BGX_MODE_XLAUI:
  900. case BGX_MODE_RGMII:
  901. lmac->lane_to_sds = 0xE4;
  902. break;
  903. case BGX_MODE_RXAUI:
  904. lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
  905. break;
  906. case BGX_MODE_QSGMII:
  907. /* There is no way to determine if DLM0/2 is QSGMII or
  908. * DLM1/3 is configured to QSGMII as bootloader will
  909. * configure all LMACs, so take whatever is configured
  910. * by low level firmware.
  911. */
  912. lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
  913. break;
  914. default:
  915. lmac->lane_to_sds = 0;
  916. break;
  917. }
  918. }
  919. static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
  920. {
  921. if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
  922. (lmac->lmac_type != BGX_MODE_40G_KR)) {
  923. lmac->use_training = 0;
  924. return;
  925. }
  926. lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
  927. SPU_PMD_CRTL_TRAIN_EN;
  928. }
  929. static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
  930. {
  931. struct lmac *lmac;
  932. u64 cmr_cfg;
  933. u8 lmac_type;
  934. u8 lane_to_sds;
  935. lmac = &bgx->lmac[idx];
  936. if (!bgx->is_dlm || bgx->is_rgx) {
  937. /* Read LMAC0 type to figure out QLM mode
  938. * This is configured by low level firmware
  939. */
  940. cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
  941. lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
  942. if (bgx->is_rgx)
  943. lmac->lmac_type = BGX_MODE_RGMII;
  944. lmac_set_training(bgx, lmac, 0);
  945. lmac_set_lane2sds(bgx, lmac);
  946. return;
  947. }
  948. /* For DLMs or SLMs on 80/81/83xx so many lane configurations
  949. * are possible and vary across boards. Also Kernel doesn't have
  950. * any way to identify board type/info and since firmware does,
  951. * just take lmac type and serdes lane config as is.
  952. */
  953. cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
  954. lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
  955. lane_to_sds = (u8)(cmr_cfg & 0xFF);
  956. /* Check if config is reset value */
  957. if ((lmac_type == 0) && (lane_to_sds == 0xE4))
  958. lmac->lmac_type = BGX_MODE_INVALID;
  959. else
  960. lmac->lmac_type = lmac_type;
  961. lmac->lane_to_sds = lane_to_sds;
  962. lmac_set_training(bgx, lmac, lmac->lmacid);
  963. }
  964. static void bgx_get_qlm_mode(struct bgx *bgx)
  965. {
  966. struct lmac *lmac;
  967. u8 idx;
  968. /* Init all LMAC's type to invalid */
  969. for (idx = 0; idx < bgx->max_lmac; idx++) {
  970. lmac = &bgx->lmac[idx];
  971. lmac->lmacid = idx;
  972. lmac->lmac_type = BGX_MODE_INVALID;
  973. lmac->use_training = false;
  974. }
  975. /* It is assumed that low level firmware sets this value */
  976. bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
  977. if (bgx->lmac_count > bgx->max_lmac)
  978. bgx->lmac_count = bgx->max_lmac;
  979. for (idx = 0; idx < bgx->lmac_count; idx++) {
  980. bgx_set_lmac_config(bgx, idx);
  981. bgx_print_qlm_mode(bgx, idx);
  982. }
  983. }
  984. #ifdef CONFIG_ACPI
  985. static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
  986. u8 *dst)
  987. {
  988. u8 mac[ETH_ALEN];
  989. int ret;
  990. ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
  991. "mac-address", mac, ETH_ALEN);
  992. if (ret)
  993. goto out;
  994. if (!is_valid_ether_addr(mac)) {
  995. dev_err(dev, "MAC address invalid: %pM\n", mac);
  996. ret = -EINVAL;
  997. goto out;
  998. }
  999. dev_info(dev, "MAC address set to: %pM\n", mac);
  1000. memcpy(dst, mac, ETH_ALEN);
  1001. out:
  1002. return ret;
  1003. }
  1004. /* Currently only sets the MAC address. */
  1005. static acpi_status bgx_acpi_register_phy(acpi_handle handle,
  1006. u32 lvl, void *context, void **rv)
  1007. {
  1008. struct bgx *bgx = context;
  1009. struct device *dev = &bgx->pdev->dev;
  1010. struct acpi_device *adev;
  1011. if (acpi_bus_get_device(handle, &adev))
  1012. goto out;
  1013. acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
  1014. SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
  1015. bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
  1016. bgx->acpi_lmac_idx++; /* move to next LMAC */
  1017. out:
  1018. return AE_OK;
  1019. }
  1020. static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
  1021. void *context, void **ret_val)
  1022. {
  1023. struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
  1024. struct bgx *bgx = context;
  1025. char bgx_sel[5];
  1026. snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
  1027. if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
  1028. pr_warn("Invalid link device\n");
  1029. return AE_OK;
  1030. }
  1031. if (strncmp(string.pointer, bgx_sel, 4))
  1032. return AE_OK;
  1033. acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  1034. bgx_acpi_register_phy, NULL, bgx, NULL);
  1035. kfree(string.pointer);
  1036. return AE_CTRL_TERMINATE;
  1037. }
  1038. static int bgx_init_acpi_phy(struct bgx *bgx)
  1039. {
  1040. acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
  1041. return 0;
  1042. }
  1043. #else
  1044. static int bgx_init_acpi_phy(struct bgx *bgx)
  1045. {
  1046. return -ENODEV;
  1047. }
  1048. #endif /* CONFIG_ACPI */
  1049. #if IS_ENABLED(CONFIG_OF_MDIO)
  1050. static int bgx_init_of_phy(struct bgx *bgx)
  1051. {
  1052. struct fwnode_handle *fwn;
  1053. struct device_node *node = NULL;
  1054. u8 lmac = 0;
  1055. device_for_each_child_node(&bgx->pdev->dev, fwn) {
  1056. struct phy_device *pd;
  1057. struct device_node *phy_np;
  1058. const char *mac;
  1059. /* Should always be an OF node. But if it is not, we
  1060. * cannot handle it, so exit the loop.
  1061. */
  1062. node = to_of_node(fwn);
  1063. if (!node)
  1064. break;
  1065. mac = of_get_mac_address(node);
  1066. if (mac)
  1067. ether_addr_copy(bgx->lmac[lmac].mac, mac);
  1068. SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
  1069. bgx->lmac[lmac].lmacid = lmac;
  1070. phy_np = of_parse_phandle(node, "phy-handle", 0);
  1071. /* If there is no phy or defective firmware presents
  1072. * this cortina phy, for which there is no driver
  1073. * support, ignore it.
  1074. */
  1075. if (phy_np &&
  1076. !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
  1077. /* Wait until the phy drivers are available */
  1078. pd = of_phy_find_device(phy_np);
  1079. if (!pd)
  1080. goto defer;
  1081. bgx->lmac[lmac].phydev = pd;
  1082. }
  1083. lmac++;
  1084. if (lmac == bgx->max_lmac) {
  1085. of_node_put(node);
  1086. break;
  1087. }
  1088. }
  1089. return 0;
  1090. defer:
  1091. /* We are bailing out, try not to leak device reference counts
  1092. * for phy devices we may have already found.
  1093. */
  1094. while (lmac) {
  1095. if (bgx->lmac[lmac].phydev) {
  1096. put_device(&bgx->lmac[lmac].phydev->mdio.dev);
  1097. bgx->lmac[lmac].phydev = NULL;
  1098. }
  1099. lmac--;
  1100. }
  1101. of_node_put(node);
  1102. return -EPROBE_DEFER;
  1103. }
  1104. #else
  1105. static int bgx_init_of_phy(struct bgx *bgx)
  1106. {
  1107. return -ENODEV;
  1108. }
  1109. #endif /* CONFIG_OF_MDIO */
  1110. static int bgx_init_phy(struct bgx *bgx)
  1111. {
  1112. if (!acpi_disabled)
  1113. return bgx_init_acpi_phy(bgx);
  1114. return bgx_init_of_phy(bgx);
  1115. }
  1116. static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1117. {
  1118. int err;
  1119. struct device *dev = &pdev->dev;
  1120. struct bgx *bgx = NULL;
  1121. u8 lmac;
  1122. u16 sdevid;
  1123. bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
  1124. if (!bgx)
  1125. return -ENOMEM;
  1126. bgx->pdev = pdev;
  1127. pci_set_drvdata(pdev, bgx);
  1128. err = pci_enable_device(pdev);
  1129. if (err) {
  1130. dev_err(dev, "Failed to enable PCI device\n");
  1131. pci_set_drvdata(pdev, NULL);
  1132. return err;
  1133. }
  1134. err = pci_request_regions(pdev, DRV_NAME);
  1135. if (err) {
  1136. dev_err(dev, "PCI request regions failed 0x%x\n", err);
  1137. goto err_disable_device;
  1138. }
  1139. /* MAP configuration registers */
  1140. bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
  1141. if (!bgx->reg_base) {
  1142. dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
  1143. err = -ENOMEM;
  1144. goto err_release_regions;
  1145. }
  1146. set_max_bgx_per_node(pdev);
  1147. pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
  1148. if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
  1149. bgx->bgx_id = (pci_resource_start(pdev,
  1150. PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
  1151. bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
  1152. bgx->max_lmac = MAX_LMAC_PER_BGX;
  1153. bgx_vnic[bgx->bgx_id] = bgx;
  1154. } else {
  1155. bgx->is_rgx = true;
  1156. bgx->max_lmac = 1;
  1157. bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
  1158. bgx_vnic[bgx->bgx_id] = bgx;
  1159. xcv_init_hw();
  1160. }
  1161. /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
  1162. * BGX i.e BGX2 can be split across 2 DLMs.
  1163. */
  1164. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
  1165. if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
  1166. ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
  1167. bgx->is_dlm = true;
  1168. bgx_get_qlm_mode(bgx);
  1169. err = bgx_init_phy(bgx);
  1170. if (err)
  1171. goto err_enable;
  1172. bgx_init_hw(bgx);
  1173. /* Enable all LMACs */
  1174. for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
  1175. err = bgx_lmac_enable(bgx, lmac);
  1176. if (err) {
  1177. dev_err(dev, "BGX%d failed to enable lmac%d\n",
  1178. bgx->bgx_id, lmac);
  1179. while (lmac)
  1180. bgx_lmac_disable(bgx, --lmac);
  1181. goto err_enable;
  1182. }
  1183. }
  1184. return 0;
  1185. err_enable:
  1186. bgx_vnic[bgx->bgx_id] = NULL;
  1187. err_release_regions:
  1188. pci_release_regions(pdev);
  1189. err_disable_device:
  1190. pci_disable_device(pdev);
  1191. pci_set_drvdata(pdev, NULL);
  1192. return err;
  1193. }
  1194. static void bgx_remove(struct pci_dev *pdev)
  1195. {
  1196. struct bgx *bgx = pci_get_drvdata(pdev);
  1197. u8 lmac;
  1198. /* Disable all LMACs */
  1199. for (lmac = 0; lmac < bgx->lmac_count; lmac++)
  1200. bgx_lmac_disable(bgx, lmac);
  1201. bgx_vnic[bgx->bgx_id] = NULL;
  1202. pci_release_regions(pdev);
  1203. pci_disable_device(pdev);
  1204. pci_set_drvdata(pdev, NULL);
  1205. }
  1206. static struct pci_driver bgx_driver = {
  1207. .name = DRV_NAME,
  1208. .id_table = bgx_id_table,
  1209. .probe = bgx_probe,
  1210. .remove = bgx_remove,
  1211. };
  1212. static int __init bgx_init_module(void)
  1213. {
  1214. pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
  1215. return pci_register_driver(&bgx_driver);
  1216. }
  1217. static void __exit bgx_cleanup_module(void)
  1218. {
  1219. pci_unregister_driver(&bgx_driver);
  1220. }
  1221. module_init(bgx_init_module);
  1222. module_exit(bgx_cleanup_module);