thunder_bgx.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <linux/acpi.h>
  9. #include <linux/module.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/phy.h>
  15. #include <linux/of.h>
  16. #include <linux/of_mdio.h>
  17. #include <linux/of_net.h>
  18. #include "nic_reg.h"
  19. #include "nic.h"
  20. #include "thunder_bgx.h"
  21. #define DRV_NAME "thunder-BGX"
  22. #define DRV_VERSION "1.0"
  23. struct lmac {
  24. struct bgx *bgx;
  25. int dmac;
  26. u8 mac[ETH_ALEN];
  27. u8 lmac_type;
  28. u8 lane_to_sds;
  29. bool use_training;
  30. bool autoneg;
  31. bool link_up;
  32. int lmacid; /* ID within BGX */
  33. int lmacid_bd; /* ID on board */
  34. struct net_device netdev;
  35. struct phy_device *phydev;
  36. unsigned int last_duplex;
  37. unsigned int last_link;
  38. unsigned int last_speed;
  39. bool is_sgmii;
  40. struct delayed_work dwork;
  41. struct workqueue_struct *check_link;
  42. };
  43. struct bgx {
  44. u8 bgx_id;
  45. struct lmac lmac[MAX_LMAC_PER_BGX];
  46. u8 lmac_count;
  47. u8 max_lmac;
  48. u8 acpi_lmac_idx;
  49. void __iomem *reg_base;
  50. struct pci_dev *pdev;
  51. bool is_dlm;
  52. bool is_rgx;
  53. };
  54. static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
  55. static int lmac_count; /* Total no of LMACs in system */
  56. static int bgx_xaui_check_link(struct lmac *lmac);
  57. /* Supported devices */
  58. static const struct pci_device_id bgx_id_table[] = {
  59. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
  60. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
  61. { 0, } /* end of table */
  62. };
  63. MODULE_AUTHOR("Cavium Inc");
  64. MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
  65. MODULE_LICENSE("GPL v2");
  66. MODULE_VERSION(DRV_VERSION);
  67. MODULE_DEVICE_TABLE(pci, bgx_id_table);
  68. /* The Cavium ThunderX network controller can *only* be found in SoCs
  69. * containing the ThunderX ARM64 CPU implementation. All accesses to the device
  70. * registers on this platform are implicitly strongly ordered with respect
  71. * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
  72. * with no memory barriers in this driver. The readq()/writeq() functions add
  73. * explicit ordering operation which in this case are redundant, and only
  74. * add overhead.
  75. */
  76. /* Register read/write APIs */
  77. static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
  78. {
  79. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  80. return readq_relaxed(addr);
  81. }
  82. static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
  83. {
  84. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  85. writeq_relaxed(val, addr);
  86. }
  87. static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
  88. {
  89. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  90. writeq_relaxed(val | readq_relaxed(addr), addr);
  91. }
  92. static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
  93. {
  94. int timeout = 100;
  95. u64 reg_val;
  96. while (timeout) {
  97. reg_val = bgx_reg_read(bgx, lmac, reg);
  98. if (zero && !(reg_val & mask))
  99. return 0;
  100. if (!zero && (reg_val & mask))
  101. return 0;
  102. usleep_range(1000, 2000);
  103. timeout--;
  104. }
  105. return 1;
  106. }
  107. static int max_bgx_per_node;
  108. static void set_max_bgx_per_node(struct pci_dev *pdev)
  109. {
  110. u16 sdevid;
  111. if (max_bgx_per_node)
  112. return;
  113. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
  114. switch (sdevid) {
  115. case PCI_SUBSYS_DEVID_81XX_BGX:
  116. case PCI_SUBSYS_DEVID_81XX_RGX:
  117. max_bgx_per_node = MAX_BGX_PER_CN81XX;
  118. break;
  119. case PCI_SUBSYS_DEVID_83XX_BGX:
  120. max_bgx_per_node = MAX_BGX_PER_CN83XX;
  121. break;
  122. case PCI_SUBSYS_DEVID_88XX_BGX:
  123. default:
  124. max_bgx_per_node = MAX_BGX_PER_CN88XX;
  125. break;
  126. }
  127. }
  128. static struct bgx *get_bgx(int node, int bgx_idx)
  129. {
  130. int idx = (node * max_bgx_per_node) + bgx_idx;
  131. return bgx_vnic[idx];
  132. }
  133. /* Return number of BGX present in HW */
  134. unsigned bgx_get_map(int node)
  135. {
  136. int i;
  137. unsigned map = 0;
  138. for (i = 0; i < max_bgx_per_node; i++) {
  139. if (bgx_vnic[(node * max_bgx_per_node) + i])
  140. map |= (1 << i);
  141. }
  142. return map;
  143. }
  144. EXPORT_SYMBOL(bgx_get_map);
  145. /* Return number of LMAC configured for this BGX */
  146. int bgx_get_lmac_count(int node, int bgx_idx)
  147. {
  148. struct bgx *bgx;
  149. bgx = get_bgx(node, bgx_idx);
  150. if (bgx)
  151. return bgx->lmac_count;
  152. return 0;
  153. }
  154. EXPORT_SYMBOL(bgx_get_lmac_count);
  155. /* Returns the current link status of LMAC */
  156. void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
  157. {
  158. struct bgx_link_status *link = (struct bgx_link_status *)status;
  159. struct bgx *bgx;
  160. struct lmac *lmac;
  161. bgx = get_bgx(node, bgx_idx);
  162. if (!bgx)
  163. return;
  164. lmac = &bgx->lmac[lmacid];
  165. link->mac_type = lmac->lmac_type;
  166. link->link_up = lmac->link_up;
  167. link->duplex = lmac->last_duplex;
  168. link->speed = lmac->last_speed;
  169. }
  170. EXPORT_SYMBOL(bgx_get_lmac_link_state);
  171. const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
  172. {
  173. struct bgx *bgx = get_bgx(node, bgx_idx);
  174. if (bgx)
  175. return bgx->lmac[lmacid].mac;
  176. return NULL;
  177. }
  178. EXPORT_SYMBOL(bgx_get_lmac_mac);
  179. void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
  180. {
  181. struct bgx *bgx = get_bgx(node, bgx_idx);
  182. if (!bgx)
  183. return;
  184. ether_addr_copy(bgx->lmac[lmacid].mac, mac);
  185. }
  186. EXPORT_SYMBOL(bgx_set_lmac_mac);
  187. void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
  188. {
  189. struct bgx *bgx = get_bgx(node, bgx_idx);
  190. struct lmac *lmac;
  191. u64 cfg;
  192. if (!bgx)
  193. return;
  194. lmac = &bgx->lmac[lmacid];
  195. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  196. if (enable)
  197. cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
  198. else
  199. cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
  200. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  201. if (bgx->is_rgx)
  202. xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
  203. }
  204. EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
  205. void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
  206. {
  207. struct pfc *pfc = (struct pfc *)pause;
  208. struct bgx *bgx = get_bgx(node, bgx_idx);
  209. struct lmac *lmac;
  210. u64 cfg;
  211. if (!bgx)
  212. return;
  213. lmac = &bgx->lmac[lmacid];
  214. if (lmac->is_sgmii)
  215. return;
  216. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
  217. pfc->fc_rx = cfg & RX_EN;
  218. pfc->fc_tx = cfg & TX_EN;
  219. pfc->autoneg = 0;
  220. }
  221. EXPORT_SYMBOL(bgx_lmac_get_pfc);
  222. void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
  223. {
  224. struct pfc *pfc = (struct pfc *)pause;
  225. struct bgx *bgx = get_bgx(node, bgx_idx);
  226. struct lmac *lmac;
  227. u64 cfg;
  228. if (!bgx)
  229. return;
  230. lmac = &bgx->lmac[lmacid];
  231. if (lmac->is_sgmii)
  232. return;
  233. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
  234. cfg &= ~(RX_EN | TX_EN);
  235. cfg |= (pfc->fc_rx ? RX_EN : 0x00);
  236. cfg |= (pfc->fc_tx ? TX_EN : 0x00);
  237. bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
  238. }
  239. EXPORT_SYMBOL(bgx_lmac_set_pfc);
  240. static void bgx_sgmii_change_link_state(struct lmac *lmac)
  241. {
  242. struct bgx *bgx = lmac->bgx;
  243. u64 cmr_cfg;
  244. u64 port_cfg = 0;
  245. u64 misc_ctl = 0;
  246. bool tx_en, rx_en;
  247. cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
  248. tx_en = cmr_cfg & CMR_PKT_TX_EN;
  249. rx_en = cmr_cfg & CMR_PKT_RX_EN;
  250. cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
  251. bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
  252. /* Wait for BGX RX to be idle */
  253. if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
  254. GMI_PORT_CFG_RX_IDLE, false)) {
  255. dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
  256. bgx->bgx_id, lmac->lmacid);
  257. return;
  258. }
  259. /* Wait for BGX TX to be idle */
  260. if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
  261. GMI_PORT_CFG_TX_IDLE, false)) {
  262. dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
  263. bgx->bgx_id, lmac->lmacid);
  264. return;
  265. }
  266. port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
  267. misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
  268. if (lmac->link_up) {
  269. misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
  270. port_cfg &= ~GMI_PORT_CFG_DUPLEX;
  271. port_cfg |= (lmac->last_duplex << 2);
  272. } else {
  273. misc_ctl |= PCS_MISC_CTL_GMX_ENO;
  274. }
  275. switch (lmac->last_speed) {
  276. case 10:
  277. port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
  278. port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
  279. port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
  280. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  281. misc_ctl |= 50; /* samp_pt */
  282. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
  283. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
  284. break;
  285. case 100:
  286. port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
  287. port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
  288. port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
  289. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  290. misc_ctl |= 5; /* samp_pt */
  291. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
  292. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
  293. break;
  294. case 1000:
  295. port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
  296. port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
  297. port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
  298. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  299. misc_ctl |= 1; /* samp_pt */
  300. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
  301. if (lmac->last_duplex)
  302. bgx_reg_write(bgx, lmac->lmacid,
  303. BGX_GMP_GMI_TXX_BURST, 0);
  304. else
  305. bgx_reg_write(bgx, lmac->lmacid,
  306. BGX_GMP_GMI_TXX_BURST, 8192);
  307. break;
  308. default:
  309. break;
  310. }
  311. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
  312. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
  313. /* Restore CMR config settings */
  314. cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
  315. bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
  316. if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
  317. xcv_setup_link(lmac->link_up, lmac->last_speed);
  318. }
  319. static void bgx_lmac_handler(struct net_device *netdev)
  320. {
  321. struct lmac *lmac = container_of(netdev, struct lmac, netdev);
  322. struct phy_device *phydev;
  323. int link_changed = 0;
  324. if (!lmac)
  325. return;
  326. phydev = lmac->phydev;
  327. if (!phydev->link && lmac->last_link)
  328. link_changed = -1;
  329. if (phydev->link &&
  330. (lmac->last_duplex != phydev->duplex ||
  331. lmac->last_link != phydev->link ||
  332. lmac->last_speed != phydev->speed)) {
  333. link_changed = 1;
  334. }
  335. lmac->last_link = phydev->link;
  336. lmac->last_speed = phydev->speed;
  337. lmac->last_duplex = phydev->duplex;
  338. if (!link_changed)
  339. return;
  340. if (link_changed > 0)
  341. lmac->link_up = true;
  342. else
  343. lmac->link_up = false;
  344. if (lmac->is_sgmii)
  345. bgx_sgmii_change_link_state(lmac);
  346. else
  347. bgx_xaui_check_link(lmac);
  348. }
  349. u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
  350. {
  351. struct bgx *bgx;
  352. bgx = get_bgx(node, bgx_idx);
  353. if (!bgx)
  354. return 0;
  355. if (idx > 8)
  356. lmac = 0;
  357. return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
  358. }
  359. EXPORT_SYMBOL(bgx_get_rx_stats);
  360. u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
  361. {
  362. struct bgx *bgx;
  363. bgx = get_bgx(node, bgx_idx);
  364. if (!bgx)
  365. return 0;
  366. return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
  367. }
  368. EXPORT_SYMBOL(bgx_get_tx_stats);
  369. static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
  370. {
  371. u64 offset;
  372. while (bgx->lmac[lmac].dmac > 0) {
  373. offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
  374. (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
  375. bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
  376. bgx->lmac[lmac].dmac--;
  377. }
  378. }
  379. /* Configure BGX LMAC in internal loopback mode */
  380. void bgx_lmac_internal_loopback(int node, int bgx_idx,
  381. int lmac_idx, bool enable)
  382. {
  383. struct bgx *bgx;
  384. struct lmac *lmac;
  385. u64 cfg;
  386. bgx = get_bgx(node, bgx_idx);
  387. if (!bgx)
  388. return;
  389. lmac = &bgx->lmac[lmac_idx];
  390. if (lmac->is_sgmii) {
  391. cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
  392. if (enable)
  393. cfg |= PCS_MRX_CTL_LOOPBACK1;
  394. else
  395. cfg &= ~PCS_MRX_CTL_LOOPBACK1;
  396. bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
  397. } else {
  398. cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
  399. if (enable)
  400. cfg |= SPU_CTL_LOOPBACK;
  401. else
  402. cfg &= ~SPU_CTL_LOOPBACK;
  403. bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
  404. }
  405. }
  406. EXPORT_SYMBOL(bgx_lmac_internal_loopback);
  407. static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
  408. {
  409. int lmacid = lmac->lmacid;
  410. u64 cfg;
  411. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
  412. /* max packet size */
  413. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
  414. /* Disable frame alignment if using preamble */
  415. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
  416. if (cfg & 1)
  417. bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
  418. /* Enable lmac */
  419. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  420. /* PCS reset */
  421. bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
  422. if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
  423. PCS_MRX_CTL_RESET, true)) {
  424. dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
  425. return -1;
  426. }
  427. /* power down, reset autoneg, autoneg enable */
  428. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
  429. cfg &= ~PCS_MRX_CTL_PWR_DN;
  430. cfg |= PCS_MRX_CTL_RST_AN;
  431. if (lmac->phydev) {
  432. cfg |= PCS_MRX_CTL_AN_EN;
  433. } else {
  434. /* In scenarios where PHY driver is not present or it's a
  435. * non-standard PHY, FW sets AN_EN to inform Linux driver
  436. * to do auto-neg and link polling or not.
  437. */
  438. if (cfg & PCS_MRX_CTL_AN_EN)
  439. lmac->autoneg = true;
  440. }
  441. bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
  442. if (lmac->lmac_type == BGX_MODE_QSGMII) {
  443. /* Disable disparity check for QSGMII */
  444. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
  445. cfg &= ~PCS_MISC_CTL_DISP_EN;
  446. bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
  447. return 0;
  448. }
  449. if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
  450. if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
  451. PCS_MRX_STATUS_AN_CPT, false)) {
  452. dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
  453. return -1;
  454. }
  455. }
  456. return 0;
  457. }
  458. static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
  459. {
  460. u64 cfg;
  461. int lmacid = lmac->lmacid;
  462. /* Reset SPU */
  463. bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
  464. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
  465. dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
  466. return -1;
  467. }
  468. /* Disable LMAC */
  469. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  470. cfg &= ~CMR_EN;
  471. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  472. bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
  473. /* Set interleaved running disparity for RXAUI */
  474. if (lmac->lmac_type == BGX_MODE_RXAUI)
  475. bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
  476. SPU_MISC_CTL_INTLV_RDISP);
  477. /* Clear receive packet disable */
  478. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
  479. cfg &= ~SPU_MISC_CTL_RX_DIS;
  480. bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
  481. /* clear all interrupts */
  482. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
  483. bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
  484. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
  485. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
  486. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  487. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  488. if (lmac->use_training) {
  489. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
  490. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
  491. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
  492. /* training enable */
  493. bgx_reg_modify(bgx, lmacid,
  494. BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
  495. }
  496. /* Append FCS to each packet */
  497. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
  498. /* Disable forward error correction */
  499. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
  500. cfg &= ~SPU_FEC_CTL_FEC_EN;
  501. bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
  502. /* Disable autoneg */
  503. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
  504. cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
  505. bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
  506. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
  507. if (lmac->lmac_type == BGX_MODE_10G_KR)
  508. cfg |= (1 << 23);
  509. else if (lmac->lmac_type == BGX_MODE_40G_KR)
  510. cfg |= (1 << 24);
  511. else
  512. cfg &= ~((1 << 23) | (1 << 24));
  513. cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
  514. bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
  515. cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
  516. cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
  517. bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
  518. /* Enable lmac */
  519. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  520. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
  521. cfg &= ~SPU_CTL_LOW_POWER;
  522. bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
  523. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
  524. cfg &= ~SMU_TX_CTL_UNI_EN;
  525. cfg |= SMU_TX_CTL_DIC_EN;
  526. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
  527. /* Enable receive and transmission of pause frames */
  528. bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
  529. BCK_EN | DRP_EN | TX_EN | RX_EN));
  530. /* Configure pause time and interval */
  531. bgx_reg_write(bgx, lmacid,
  532. BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
  533. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
  534. cfg &= ~0xFFFFull;
  535. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
  536. cfg | (DEFAULT_PAUSE_TIME - 0x1000));
  537. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
  538. /* take lmac_count into account */
  539. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
  540. /* max packet size */
  541. bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
  542. return 0;
  543. }
  544. static int bgx_xaui_check_link(struct lmac *lmac)
  545. {
  546. struct bgx *bgx = lmac->bgx;
  547. int lmacid = lmac->lmacid;
  548. int lmac_type = lmac->lmac_type;
  549. u64 cfg;
  550. if (lmac->use_training) {
  551. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  552. if (!(cfg & (1ull << 13))) {
  553. cfg = (1ull << 13) | (1ull << 14);
  554. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  555. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
  556. cfg |= (1ull << 0);
  557. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
  558. return -1;
  559. }
  560. }
  561. /* wait for PCS to come out of reset */
  562. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
  563. dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
  564. return -1;
  565. }
  566. if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
  567. (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
  568. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
  569. SPU_BR_STATUS_BLK_LOCK, false)) {
  570. dev_err(&bgx->pdev->dev,
  571. "SPU_BR_STATUS_BLK_LOCK not completed\n");
  572. return -1;
  573. }
  574. } else {
  575. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
  576. SPU_BX_STATUS_RX_ALIGN, false)) {
  577. dev_err(&bgx->pdev->dev,
  578. "SPU_BX_STATUS_RX_ALIGN not completed\n");
  579. return -1;
  580. }
  581. }
  582. /* Clear rcvflt bit (latching high) and read it back */
  583. if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
  584. bgx_reg_modify(bgx, lmacid,
  585. BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
  586. if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
  587. dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
  588. if (lmac->use_training) {
  589. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  590. if (!(cfg & (1ull << 13))) {
  591. cfg = (1ull << 13) | (1ull << 14);
  592. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  593. cfg = bgx_reg_read(bgx, lmacid,
  594. BGX_SPUX_BR_PMD_CRTL);
  595. cfg |= (1ull << 0);
  596. bgx_reg_write(bgx, lmacid,
  597. BGX_SPUX_BR_PMD_CRTL, cfg);
  598. return -1;
  599. }
  600. }
  601. return -1;
  602. }
  603. /* Wait for BGX RX to be idle */
  604. if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
  605. dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
  606. return -1;
  607. }
  608. /* Wait for BGX TX to be idle */
  609. if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
  610. dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
  611. return -1;
  612. }
  613. /* Check for MAC RX faults */
  614. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
  615. /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
  616. cfg &= SMU_RX_CTL_STATUS;
  617. if (!cfg)
  618. return 0;
  619. /* Rx local/remote fault seen.
  620. * Do lmac reinit to see if condition recovers
  621. */
  622. bgx_lmac_xaui_init(bgx, lmac);
  623. return -1;
  624. }
  625. static void bgx_poll_for_sgmii_link(struct lmac *lmac)
  626. {
  627. u64 pcs_link, an_result;
  628. u8 speed;
  629. pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
  630. BGX_GMP_PCS_MRX_STATUS);
  631. /*Link state bit is sticky, read it again*/
  632. if (!(pcs_link & PCS_MRX_STATUS_LINK))
  633. pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
  634. BGX_GMP_PCS_MRX_STATUS);
  635. if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
  636. PCS_MRX_STATUS_AN_CPT, false)) {
  637. lmac->link_up = false;
  638. lmac->last_speed = SPEED_UNKNOWN;
  639. lmac->last_duplex = DUPLEX_UNKNOWN;
  640. goto next_poll;
  641. }
  642. lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
  643. an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
  644. BGX_GMP_PCS_ANX_AN_RESULTS);
  645. speed = (an_result >> 3) & 0x3;
  646. lmac->last_duplex = (an_result >> 1) & 0x1;
  647. switch (speed) {
  648. case 0:
  649. lmac->last_speed = 10;
  650. break;
  651. case 1:
  652. lmac->last_speed = 100;
  653. break;
  654. case 2:
  655. lmac->last_speed = 1000;
  656. break;
  657. default:
  658. lmac->link_up = false;
  659. lmac->last_speed = SPEED_UNKNOWN;
  660. lmac->last_duplex = DUPLEX_UNKNOWN;
  661. break;
  662. }
  663. next_poll:
  664. if (lmac->last_link != lmac->link_up) {
  665. if (lmac->link_up)
  666. bgx_sgmii_change_link_state(lmac);
  667. lmac->last_link = lmac->link_up;
  668. }
  669. queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
  670. }
  671. static void bgx_poll_for_link(struct work_struct *work)
  672. {
  673. struct lmac *lmac;
  674. u64 spu_link, smu_link;
  675. lmac = container_of(work, struct lmac, dwork.work);
  676. if (lmac->is_sgmii) {
  677. bgx_poll_for_sgmii_link(lmac);
  678. return;
  679. }
  680. /* Receive link is latching low. Force it high and verify it */
  681. bgx_reg_modify(lmac->bgx, lmac->lmacid,
  682. BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
  683. bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
  684. SPU_STATUS1_RCV_LNK, false);
  685. spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
  686. smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
  687. if ((spu_link & SPU_STATUS1_RCV_LNK) &&
  688. !(smu_link & SMU_RX_CTL_STATUS)) {
  689. lmac->link_up = 1;
  690. if (lmac->lmac_type == BGX_MODE_XLAUI)
  691. lmac->last_speed = 40000;
  692. else
  693. lmac->last_speed = 10000;
  694. lmac->last_duplex = 1;
  695. } else {
  696. lmac->link_up = 0;
  697. lmac->last_speed = SPEED_UNKNOWN;
  698. lmac->last_duplex = DUPLEX_UNKNOWN;
  699. }
  700. if (lmac->last_link != lmac->link_up) {
  701. if (lmac->link_up) {
  702. if (bgx_xaui_check_link(lmac)) {
  703. /* Errors, clear link_up state */
  704. lmac->link_up = 0;
  705. lmac->last_speed = SPEED_UNKNOWN;
  706. lmac->last_duplex = DUPLEX_UNKNOWN;
  707. }
  708. }
  709. lmac->last_link = lmac->link_up;
  710. }
  711. queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
  712. }
  713. static int phy_interface_mode(u8 lmac_type)
  714. {
  715. if (lmac_type == BGX_MODE_QSGMII)
  716. return PHY_INTERFACE_MODE_QSGMII;
  717. if (lmac_type == BGX_MODE_RGMII)
  718. return PHY_INTERFACE_MODE_RGMII;
  719. return PHY_INTERFACE_MODE_SGMII;
  720. }
  721. static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
  722. {
  723. struct lmac *lmac;
  724. u64 cfg;
  725. lmac = &bgx->lmac[lmacid];
  726. lmac->bgx = bgx;
  727. if ((lmac->lmac_type == BGX_MODE_SGMII) ||
  728. (lmac->lmac_type == BGX_MODE_QSGMII) ||
  729. (lmac->lmac_type == BGX_MODE_RGMII)) {
  730. lmac->is_sgmii = 1;
  731. if (bgx_lmac_sgmii_init(bgx, lmac))
  732. return -1;
  733. } else {
  734. lmac->is_sgmii = 0;
  735. if (bgx_lmac_xaui_init(bgx, lmac))
  736. return -1;
  737. }
  738. if (lmac->is_sgmii) {
  739. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
  740. cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
  741. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
  742. bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
  743. } else {
  744. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
  745. cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
  746. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
  747. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
  748. }
  749. /* Enable lmac */
  750. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  751. /* Restore default cfg, incase low level firmware changed it */
  752. bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
  753. if ((lmac->lmac_type != BGX_MODE_XFI) &&
  754. (lmac->lmac_type != BGX_MODE_XLAUI) &&
  755. (lmac->lmac_type != BGX_MODE_40G_KR) &&
  756. (lmac->lmac_type != BGX_MODE_10G_KR)) {
  757. if (!lmac->phydev) {
  758. if (lmac->autoneg) {
  759. bgx_reg_write(bgx, lmacid,
  760. BGX_GMP_PCS_LINKX_TIMER,
  761. PCS_LINKX_TIMER_COUNT);
  762. goto poll;
  763. } else {
  764. /* Default to below link speed and duplex */
  765. lmac->link_up = true;
  766. lmac->last_speed = 1000;
  767. lmac->last_duplex = 1;
  768. bgx_sgmii_change_link_state(lmac);
  769. return 0;
  770. }
  771. }
  772. lmac->phydev->dev_flags = 0;
  773. if (phy_connect_direct(&lmac->netdev, lmac->phydev,
  774. bgx_lmac_handler,
  775. phy_interface_mode(lmac->lmac_type)))
  776. return -ENODEV;
  777. phy_start_aneg(lmac->phydev);
  778. return 0;
  779. }
  780. poll:
  781. lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
  782. WQ_MEM_RECLAIM, 1);
  783. if (!lmac->check_link)
  784. return -ENOMEM;
  785. INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
  786. queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
  787. return 0;
  788. }
  789. static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
  790. {
  791. struct lmac *lmac;
  792. u64 cfg;
  793. lmac = &bgx->lmac[lmacid];
  794. if (lmac->check_link) {
  795. /* Destroy work queue */
  796. cancel_delayed_work_sync(&lmac->dwork);
  797. destroy_workqueue(lmac->check_link);
  798. }
  799. /* Disable packet reception */
  800. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  801. cfg &= ~CMR_PKT_RX_EN;
  802. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  803. /* Give chance for Rx/Tx FIFO to get drained */
  804. bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
  805. bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
  806. /* Disable packet transmission */
  807. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  808. cfg &= ~CMR_PKT_TX_EN;
  809. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  810. /* Disable serdes lanes */
  811. if (!lmac->is_sgmii)
  812. bgx_reg_modify(bgx, lmacid,
  813. BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
  814. else
  815. bgx_reg_modify(bgx, lmacid,
  816. BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
  817. /* Disable LMAC */
  818. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  819. cfg &= ~CMR_EN;
  820. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  821. bgx_flush_dmac_addrs(bgx, lmacid);
  822. if ((lmac->lmac_type != BGX_MODE_XFI) &&
  823. (lmac->lmac_type != BGX_MODE_XLAUI) &&
  824. (lmac->lmac_type != BGX_MODE_40G_KR) &&
  825. (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
  826. phy_disconnect(lmac->phydev);
  827. lmac->phydev = NULL;
  828. }
  829. static void bgx_init_hw(struct bgx *bgx)
  830. {
  831. int i;
  832. struct lmac *lmac;
  833. bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
  834. if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
  835. dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
  836. /* Set lmac type and lane2serdes mapping */
  837. for (i = 0; i < bgx->lmac_count; i++) {
  838. lmac = &bgx->lmac[i];
  839. bgx_reg_write(bgx, i, BGX_CMRX_CFG,
  840. (lmac->lmac_type << 8) | lmac->lane_to_sds);
  841. bgx->lmac[i].lmacid_bd = lmac_count;
  842. lmac_count++;
  843. }
  844. bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
  845. bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
  846. /* Set the backpressure AND mask */
  847. for (i = 0; i < bgx->lmac_count; i++)
  848. bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
  849. ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
  850. (i * MAX_BGX_CHANS_PER_LMAC));
  851. /* Disable all MAC filtering */
  852. for (i = 0; i < RX_DMAC_COUNT; i++)
  853. bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
  854. /* Disable MAC steering (NCSI traffic) */
  855. for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
  856. bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
  857. }
  858. static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
  859. {
  860. return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
  861. }
  862. static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
  863. {
  864. struct device *dev = &bgx->pdev->dev;
  865. struct lmac *lmac;
  866. char str[27];
  867. if (!bgx->is_dlm && lmacid)
  868. return;
  869. lmac = &bgx->lmac[lmacid];
  870. if (!bgx->is_dlm)
  871. sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
  872. else
  873. sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
  874. switch (lmac->lmac_type) {
  875. case BGX_MODE_SGMII:
  876. dev_info(dev, "%s: SGMII\n", (char *)str);
  877. break;
  878. case BGX_MODE_XAUI:
  879. dev_info(dev, "%s: XAUI\n", (char *)str);
  880. break;
  881. case BGX_MODE_RXAUI:
  882. dev_info(dev, "%s: RXAUI\n", (char *)str);
  883. break;
  884. case BGX_MODE_XFI:
  885. if (!lmac->use_training)
  886. dev_info(dev, "%s: XFI\n", (char *)str);
  887. else
  888. dev_info(dev, "%s: 10G_KR\n", (char *)str);
  889. break;
  890. case BGX_MODE_XLAUI:
  891. if (!lmac->use_training)
  892. dev_info(dev, "%s: XLAUI\n", (char *)str);
  893. else
  894. dev_info(dev, "%s: 40G_KR4\n", (char *)str);
  895. break;
  896. case BGX_MODE_QSGMII:
  897. dev_info(dev, "%s: QSGMII\n", (char *)str);
  898. break;
  899. case BGX_MODE_RGMII:
  900. dev_info(dev, "%s: RGMII\n", (char *)str);
  901. break;
  902. case BGX_MODE_INVALID:
  903. /* Nothing to do */
  904. break;
  905. }
  906. }
  907. static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
  908. {
  909. switch (lmac->lmac_type) {
  910. case BGX_MODE_SGMII:
  911. case BGX_MODE_XFI:
  912. lmac->lane_to_sds = lmac->lmacid;
  913. break;
  914. case BGX_MODE_XAUI:
  915. case BGX_MODE_XLAUI:
  916. case BGX_MODE_RGMII:
  917. lmac->lane_to_sds = 0xE4;
  918. break;
  919. case BGX_MODE_RXAUI:
  920. lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
  921. break;
  922. case BGX_MODE_QSGMII:
  923. /* There is no way to determine if DLM0/2 is QSGMII or
  924. * DLM1/3 is configured to QSGMII as bootloader will
  925. * configure all LMACs, so take whatever is configured
  926. * by low level firmware.
  927. */
  928. lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
  929. break;
  930. default:
  931. lmac->lane_to_sds = 0;
  932. break;
  933. }
  934. }
  935. static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
  936. {
  937. if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
  938. (lmac->lmac_type != BGX_MODE_40G_KR)) {
  939. lmac->use_training = 0;
  940. return;
  941. }
  942. lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
  943. SPU_PMD_CRTL_TRAIN_EN;
  944. }
  945. static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
  946. {
  947. struct lmac *lmac;
  948. u64 cmr_cfg;
  949. u8 lmac_type;
  950. u8 lane_to_sds;
  951. lmac = &bgx->lmac[idx];
  952. if (!bgx->is_dlm || bgx->is_rgx) {
  953. /* Read LMAC0 type to figure out QLM mode
  954. * This is configured by low level firmware
  955. */
  956. cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
  957. lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
  958. if (bgx->is_rgx)
  959. lmac->lmac_type = BGX_MODE_RGMII;
  960. lmac_set_training(bgx, lmac, 0);
  961. lmac_set_lane2sds(bgx, lmac);
  962. return;
  963. }
  964. /* For DLMs or SLMs on 80/81/83xx so many lane configurations
  965. * are possible and vary across boards. Also Kernel doesn't have
  966. * any way to identify board type/info and since firmware does,
  967. * just take lmac type and serdes lane config as is.
  968. */
  969. cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
  970. lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
  971. lane_to_sds = (u8)(cmr_cfg & 0xFF);
  972. /* Check if config is reset value */
  973. if ((lmac_type == 0) && (lane_to_sds == 0xE4))
  974. lmac->lmac_type = BGX_MODE_INVALID;
  975. else
  976. lmac->lmac_type = lmac_type;
  977. lmac->lane_to_sds = lane_to_sds;
  978. lmac_set_training(bgx, lmac, lmac->lmacid);
  979. }
  980. static void bgx_get_qlm_mode(struct bgx *bgx)
  981. {
  982. struct lmac *lmac;
  983. u8 idx;
  984. /* Init all LMAC's type to invalid */
  985. for (idx = 0; idx < bgx->max_lmac; idx++) {
  986. lmac = &bgx->lmac[idx];
  987. lmac->lmacid = idx;
  988. lmac->lmac_type = BGX_MODE_INVALID;
  989. lmac->use_training = false;
  990. }
  991. /* It is assumed that low level firmware sets this value */
  992. bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
  993. if (bgx->lmac_count > bgx->max_lmac)
  994. bgx->lmac_count = bgx->max_lmac;
  995. for (idx = 0; idx < bgx->lmac_count; idx++) {
  996. bgx_set_lmac_config(bgx, idx);
  997. bgx_print_qlm_mode(bgx, idx);
  998. }
  999. }
  1000. #ifdef CONFIG_ACPI
  1001. static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
  1002. u8 *dst)
  1003. {
  1004. u8 mac[ETH_ALEN];
  1005. int ret;
  1006. ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
  1007. "mac-address", mac, ETH_ALEN);
  1008. if (ret)
  1009. goto out;
  1010. if (!is_valid_ether_addr(mac)) {
  1011. dev_err(dev, "MAC address invalid: %pM\n", mac);
  1012. ret = -EINVAL;
  1013. goto out;
  1014. }
  1015. dev_info(dev, "MAC address set to: %pM\n", mac);
  1016. memcpy(dst, mac, ETH_ALEN);
  1017. out:
  1018. return ret;
  1019. }
  1020. /* Currently only sets the MAC address. */
  1021. static acpi_status bgx_acpi_register_phy(acpi_handle handle,
  1022. u32 lvl, void *context, void **rv)
  1023. {
  1024. struct bgx *bgx = context;
  1025. struct device *dev = &bgx->pdev->dev;
  1026. struct acpi_device *adev;
  1027. if (acpi_bus_get_device(handle, &adev))
  1028. goto out;
  1029. acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
  1030. SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
  1031. bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
  1032. bgx->acpi_lmac_idx++; /* move to next LMAC */
  1033. out:
  1034. return AE_OK;
  1035. }
  1036. static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
  1037. void *context, void **ret_val)
  1038. {
  1039. struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
  1040. struct bgx *bgx = context;
  1041. char bgx_sel[5];
  1042. snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
  1043. if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
  1044. pr_warn("Invalid link device\n");
  1045. return AE_OK;
  1046. }
  1047. if (strncmp(string.pointer, bgx_sel, 4))
  1048. return AE_OK;
  1049. acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  1050. bgx_acpi_register_phy, NULL, bgx, NULL);
  1051. kfree(string.pointer);
  1052. return AE_CTRL_TERMINATE;
  1053. }
  1054. static int bgx_init_acpi_phy(struct bgx *bgx)
  1055. {
  1056. acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
  1057. return 0;
  1058. }
  1059. #else
  1060. static int bgx_init_acpi_phy(struct bgx *bgx)
  1061. {
  1062. return -ENODEV;
  1063. }
  1064. #endif /* CONFIG_ACPI */
  1065. #if IS_ENABLED(CONFIG_OF_MDIO)
  1066. static int bgx_init_of_phy(struct bgx *bgx)
  1067. {
  1068. struct fwnode_handle *fwn;
  1069. struct device_node *node = NULL;
  1070. u8 lmac = 0;
  1071. device_for_each_child_node(&bgx->pdev->dev, fwn) {
  1072. struct phy_device *pd;
  1073. struct device_node *phy_np;
  1074. const char *mac;
  1075. /* Should always be an OF node. But if it is not, we
  1076. * cannot handle it, so exit the loop.
  1077. */
  1078. node = to_of_node(fwn);
  1079. if (!node)
  1080. break;
  1081. mac = of_get_mac_address(node);
  1082. if (mac)
  1083. ether_addr_copy(bgx->lmac[lmac].mac, mac);
  1084. SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
  1085. bgx->lmac[lmac].lmacid = lmac;
  1086. phy_np = of_parse_phandle(node, "phy-handle", 0);
  1087. /* If there is no phy or defective firmware presents
  1088. * this cortina phy, for which there is no driver
  1089. * support, ignore it.
  1090. */
  1091. if (phy_np &&
  1092. !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
  1093. /* Wait until the phy drivers are available */
  1094. pd = of_phy_find_device(phy_np);
  1095. if (!pd)
  1096. goto defer;
  1097. bgx->lmac[lmac].phydev = pd;
  1098. }
  1099. lmac++;
  1100. if (lmac == bgx->max_lmac) {
  1101. of_node_put(node);
  1102. break;
  1103. }
  1104. }
  1105. return 0;
  1106. defer:
  1107. /* We are bailing out, try not to leak device reference counts
  1108. * for phy devices we may have already found.
  1109. */
  1110. while (lmac) {
  1111. if (bgx->lmac[lmac].phydev) {
  1112. put_device(&bgx->lmac[lmac].phydev->mdio.dev);
  1113. bgx->lmac[lmac].phydev = NULL;
  1114. }
  1115. lmac--;
  1116. }
  1117. of_node_put(node);
  1118. return -EPROBE_DEFER;
  1119. }
  1120. #else
  1121. static int bgx_init_of_phy(struct bgx *bgx)
  1122. {
  1123. return -ENODEV;
  1124. }
  1125. #endif /* CONFIG_OF_MDIO */
  1126. static int bgx_init_phy(struct bgx *bgx)
  1127. {
  1128. if (!acpi_disabled)
  1129. return bgx_init_acpi_phy(bgx);
  1130. return bgx_init_of_phy(bgx);
  1131. }
  1132. static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1133. {
  1134. int err;
  1135. struct device *dev = &pdev->dev;
  1136. struct bgx *bgx = NULL;
  1137. u8 lmac;
  1138. u16 sdevid;
  1139. bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
  1140. if (!bgx)
  1141. return -ENOMEM;
  1142. bgx->pdev = pdev;
  1143. pci_set_drvdata(pdev, bgx);
  1144. err = pci_enable_device(pdev);
  1145. if (err) {
  1146. dev_err(dev, "Failed to enable PCI device\n");
  1147. pci_set_drvdata(pdev, NULL);
  1148. return err;
  1149. }
  1150. err = pci_request_regions(pdev, DRV_NAME);
  1151. if (err) {
  1152. dev_err(dev, "PCI request regions failed 0x%x\n", err);
  1153. goto err_disable_device;
  1154. }
  1155. /* MAP configuration registers */
  1156. bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
  1157. if (!bgx->reg_base) {
  1158. dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
  1159. err = -ENOMEM;
  1160. goto err_release_regions;
  1161. }
  1162. set_max_bgx_per_node(pdev);
  1163. pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
  1164. if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
  1165. bgx->bgx_id = (pci_resource_start(pdev,
  1166. PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
  1167. bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
  1168. bgx->max_lmac = MAX_LMAC_PER_BGX;
  1169. bgx_vnic[bgx->bgx_id] = bgx;
  1170. } else {
  1171. bgx->is_rgx = true;
  1172. bgx->max_lmac = 1;
  1173. bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
  1174. bgx_vnic[bgx->bgx_id] = bgx;
  1175. xcv_init_hw();
  1176. }
  1177. /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
  1178. * BGX i.e BGX2 can be split across 2 DLMs.
  1179. */
  1180. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
  1181. if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
  1182. ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
  1183. bgx->is_dlm = true;
  1184. bgx_get_qlm_mode(bgx);
  1185. err = bgx_init_phy(bgx);
  1186. if (err)
  1187. goto err_enable;
  1188. bgx_init_hw(bgx);
  1189. /* Enable all LMACs */
  1190. for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
  1191. err = bgx_lmac_enable(bgx, lmac);
  1192. if (err) {
  1193. dev_err(dev, "BGX%d failed to enable lmac%d\n",
  1194. bgx->bgx_id, lmac);
  1195. while (lmac)
  1196. bgx_lmac_disable(bgx, --lmac);
  1197. goto err_enable;
  1198. }
  1199. }
  1200. return 0;
  1201. err_enable:
  1202. bgx_vnic[bgx->bgx_id] = NULL;
  1203. err_release_regions:
  1204. pci_release_regions(pdev);
  1205. err_disable_device:
  1206. pci_disable_device(pdev);
  1207. pci_set_drvdata(pdev, NULL);
  1208. return err;
  1209. }
  1210. static void bgx_remove(struct pci_dev *pdev)
  1211. {
  1212. struct bgx *bgx = pci_get_drvdata(pdev);
  1213. u8 lmac;
  1214. /* Disable all LMACs */
  1215. for (lmac = 0; lmac < bgx->lmac_count; lmac++)
  1216. bgx_lmac_disable(bgx, lmac);
  1217. bgx_vnic[bgx->bgx_id] = NULL;
  1218. pci_release_regions(pdev);
  1219. pci_disable_device(pdev);
  1220. pci_set_drvdata(pdev, NULL);
  1221. }
  1222. static struct pci_driver bgx_driver = {
  1223. .name = DRV_NAME,
  1224. .id_table = bgx_id_table,
  1225. .probe = bgx_probe,
  1226. .remove = bgx_remove,
  1227. };
  1228. static int __init bgx_init_module(void)
  1229. {
  1230. pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
  1231. return pci_register_driver(&bgx_driver);
  1232. }
  1233. static void __exit bgx_cleanup_module(void)
  1234. {
  1235. pci_unregister_driver(&bgx_driver);
  1236. }
  1237. module_init(bgx_init_module);
  1238. module_exit(bgx_cleanup_module);