xgene_enet_sgmac.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Keyur Chudgar <kchudgar@apm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "xgene_enet_main.h"
  21. #include "xgene_enet_hw.h"
  22. #include "xgene_enet_sgmac.h"
  23. #include "xgene_enet_xgmac.h"
  24. static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
  25. {
  26. iowrite32(val, p->eth_csr_addr + offset);
  27. }
  28. static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
  29. u32 val)
  30. {
  31. iowrite32(val, p->base_addr + offset);
  32. }
  33. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
  34. u32 offset, u32 val)
  35. {
  36. iowrite32(val, p->eth_ring_if_addr + offset);
  37. }
  38. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
  39. u32 offset, u32 val)
  40. {
  41. iowrite32(val, p->eth_diag_csr_addr + offset);
  42. }
  43. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  44. u32 offset, u32 val)
  45. {
  46. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  47. iowrite32(val, addr);
  48. }
  49. static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
  50. u32 wr_addr, u32 wr_data)
  51. {
  52. int i;
  53. iowrite32(wr_addr, ctl->addr);
  54. iowrite32(wr_data, ctl->ctl);
  55. iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
  56. /* wait for write command to complete */
  57. for (i = 0; i < 10; i++) {
  58. if (ioread32(ctl->cmd_done)) {
  59. iowrite32(0, ctl->cmd);
  60. return true;
  61. }
  62. udelay(1);
  63. }
  64. return false;
  65. }
  66. static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
  67. u32 wr_addr, u32 wr_data)
  68. {
  69. struct xgene_indirect_ctl ctl = {
  70. .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
  71. .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
  72. .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
  73. .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
  74. };
  75. if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
  76. netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
  77. }
  78. static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
  79. {
  80. return ioread32(p->eth_csr_addr + offset);
  81. }
  82. static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
  83. {
  84. return ioread32(p->eth_diag_csr_addr + offset);
  85. }
  86. static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
  87. {
  88. return ioread32(p->mcx_mac_csr_addr + offset);
  89. }
  90. static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
  91. {
  92. u32 rd_data;
  93. int i;
  94. iowrite32(rd_addr, ctl->addr);
  95. iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
  96. /* wait for read command to complete */
  97. for (i = 0; i < 10; i++) {
  98. if (ioread32(ctl->cmd_done)) {
  99. rd_data = ioread32(ctl->ctl);
  100. iowrite32(0, ctl->cmd);
  101. return rd_data;
  102. }
  103. udelay(1);
  104. }
  105. pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
  106. return 0;
  107. }
  108. static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
  109. {
  110. struct xgene_indirect_ctl ctl = {
  111. .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
  112. .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
  113. .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
  114. .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
  115. };
  116. return xgene_enet_rd_indirect(&ctl, rd_addr);
  117. }
  118. static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
  119. {
  120. struct net_device *ndev = p->ndev;
  121. u32 data, shutdown;
  122. int i = 0;
  123. shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
  124. data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
  125. if (!shutdown && data == ~0U) {
  126. netdev_dbg(ndev, "+ ecc_init done, skipping\n");
  127. return 0;
  128. }
  129. xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
  130. do {
  131. usleep_range(100, 110);
  132. data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
  133. if (data == ~0U)
  134. return 0;
  135. } while (++i < 10);
  136. netdev_err(ndev, "Failed to release memory from shutdown\n");
  137. return -ENODEV;
  138. }
  139. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
  140. {
  141. u32 val;
  142. val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
  143. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
  144. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  145. }
  146. static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
  147. u32 reg, u16 data)
  148. {
  149. u32 addr, wr_data, done;
  150. int i;
  151. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  152. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  153. wr_data = PHY_CONTROL(data);
  154. xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
  155. for (i = 0; i < 10; i++) {
  156. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  157. if (!(done & BUSY_MASK))
  158. return;
  159. usleep_range(10, 20);
  160. }
  161. netdev_err(p->ndev, "MII_MGMT write failed\n");
  162. }
  163. static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
  164. {
  165. u32 addr, data, done;
  166. int i;
  167. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  168. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  169. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  170. for (i = 0; i < 10; i++) {
  171. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  172. if (!(done & BUSY_MASK)) {
  173. data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
  174. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
  175. return data;
  176. }
  177. usleep_range(10, 20);
  178. }
  179. netdev_err(p->ndev, "MII_MGMT read failed\n");
  180. return 0;
  181. }
  182. static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
  183. {
  184. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  185. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
  186. }
  187. static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
  188. {
  189. u32 addr0, addr1;
  190. u8 *dev_addr = p->ndev->dev_addr;
  191. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  192. (dev_addr[1] << 8) | dev_addr[0];
  193. xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
  194. addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
  195. addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
  196. xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
  197. }
  198. static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
  199. {
  200. u32 data;
  201. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  202. SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
  203. if (LINK_SPEED(data) == PHY_SPEED_1000)
  204. p->phy_speed = SPEED_1000;
  205. else if (LINK_SPEED(data) == PHY_SPEED_100)
  206. p->phy_speed = SPEED_100;
  207. else
  208. p->phy_speed = SPEED_10;
  209. return data & LINK_UP;
  210. }
  211. static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
  212. {
  213. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
  214. 0x8000);
  215. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
  216. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
  217. }
  218. static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
  219. {
  220. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
  221. 0x8000);
  222. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
  223. }
  224. static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
  225. {
  226. u32 value;
  227. if (p->phy_speed == SPEED_UNKNOWN)
  228. return;
  229. value = xgene_mii_phy_read(p, INT_PHY_ADDR,
  230. SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
  231. if (!(value & LINK_UP))
  232. xgene_sgmii_tbi_control_reset(p);
  233. }
  234. static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
  235. {
  236. u32 icm0_addr, icm2_addr, debug_addr;
  237. u32 icm0, icm2, intf_ctl;
  238. u32 mc2, value;
  239. xgene_sgmii_reset(p);
  240. if (p->enet_id == XGENE_ENET1) {
  241. icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
  242. icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
  243. debug_addr = DEBUG_REG_ADDR;
  244. } else {
  245. icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR;
  246. icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR;
  247. debug_addr = XG_DEBUG_REG_ADDR;
  248. }
  249. icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
  250. icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
  251. mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
  252. intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
  253. switch (p->phy_speed) {
  254. case SPEED_10:
  255. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  256. intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
  257. CFG_MACMODE_SET(&icm0, 0);
  258. CFG_WAITASYNCRD_SET(&icm2, 500);
  259. break;
  260. case SPEED_100:
  261. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  262. intf_ctl &= ~ENET_GHD_MODE;
  263. intf_ctl |= ENET_LHD_MODE;
  264. CFG_MACMODE_SET(&icm0, 1);
  265. CFG_WAITASYNCRD_SET(&icm2, 80);
  266. break;
  267. default:
  268. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  269. intf_ctl &= ~ENET_LHD_MODE;
  270. intf_ctl |= ENET_GHD_MODE;
  271. CFG_MACMODE_SET(&icm0, 2);
  272. CFG_WAITASYNCRD_SET(&icm2, 16);
  273. value = xgene_enet_rd_csr(p, debug_addr);
  274. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  275. xgene_enet_wr_csr(p, debug_addr, value);
  276. break;
  277. }
  278. mc2 |= FULL_DUPLEX2 | PAD_CRC;
  279. xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
  280. xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
  281. xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
  282. xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
  283. }
  284. static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
  285. {
  286. xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
  287. }
  288. static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
  289. {
  290. u32 data, loop = 10;
  291. xgene_sgmii_configure(p);
  292. while (loop--) {
  293. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  294. SGMII_STATUS_ADDR >> 2);
  295. if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
  296. break;
  297. usleep_range(1000, 2000);
  298. }
  299. if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
  300. netdev_err(p->ndev, "Auto-negotiation failed\n");
  301. }
  302. static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
  303. {
  304. u32 data;
  305. data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
  306. if (set)
  307. data |= bits;
  308. else
  309. data &= ~bits;
  310. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
  311. }
  312. static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable)
  313. {
  314. xgene_sgmac_rxtx(p, TX_FLOW_EN, enable);
  315. p->mac_ops->enable_tx_pause(p, enable);
  316. }
  317. static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
  318. {
  319. xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable);
  320. }
  321. static void xgene_sgmac_init(struct xgene_enet_pdata *p)
  322. {
  323. u32 pause_thres_reg, pause_off_thres_reg;
  324. u32 enet_spare_cfg_reg, rsif_config_reg;
  325. u32 cfg_bypass_reg, rx_dv_gate_reg;
  326. u32 data, data1, data2, offset;
  327. u32 multi_dpf_reg;
  328. if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
  329. xgene_sgmac_reset(p);
  330. xgene_sgmii_enable_autoneg(p);
  331. xgene_sgmac_set_speed(p);
  332. xgene_sgmac_set_mac_addr(p);
  333. if (p->enet_id == XGENE_ENET1) {
  334. enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
  335. rsif_config_reg = RSIF_CONFIG_REG_ADDR;
  336. cfg_bypass_reg = CFG_BYPASS_ADDR;
  337. offset = p->port_id * OFFSET_4;
  338. rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset;
  339. } else {
  340. enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
  341. rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
  342. cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
  343. rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
  344. }
  345. data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
  346. data |= MPA_IDLE_WITH_QMI_EMPTY;
  347. xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
  348. /* Adjust MDC clock frequency */
  349. data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
  350. MGMT_CLOCK_SEL_SET(&data, 7);
  351. xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
  352. /* Enable drop if bufpool not available */
  353. data = xgene_enet_rd_csr(p, rsif_config_reg);
  354. data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  355. xgene_enet_wr_csr(p, rsif_config_reg, data);
  356. /* Configure HW pause frame generation */
  357. multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR :
  358. XG_MCX_MULTI_DPF0_ADDR;
  359. data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg);
  360. data = (DEF_QUANTA << 16) | (data & 0xffff);
  361. xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data);
  362. if (p->enet_id != XGENE_ENET1) {
  363. data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR);
  364. data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
  365. xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data);
  366. }
  367. pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH :
  368. XG_RXBUF_PAUSE_THRESH;
  369. pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ?
  370. RXBUF_PAUSE_OFF_THRESH : 0;
  371. if (p->enet_id == XGENE_ENET1) {
  372. data1 = xgene_enet_rd_csr(p, pause_thres_reg);
  373. data2 = xgene_enet_rd_csr(p, pause_off_thres_reg);
  374. if (!(p->port_id % 2)) {
  375. data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES;
  376. data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES;
  377. } else {
  378. data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16);
  379. data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16);
  380. }
  381. xgene_enet_wr_csr(p, pause_thres_reg, data1);
  382. xgene_enet_wr_csr(p, pause_off_thres_reg, data2);
  383. } else {
  384. data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES;
  385. xgene_enet_wr_csr(p, pause_thres_reg, data);
  386. }
  387. xgene_sgmac_flowctl_tx(p, p->tx_pause);
  388. xgene_sgmac_flowctl_rx(p, p->rx_pause);
  389. /* Bypass traffic gating */
  390. xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
  391. xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
  392. xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
  393. }
  394. static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
  395. {
  396. xgene_sgmac_rxtx(p, RX_EN, true);
  397. }
  398. static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
  399. {
  400. xgene_sgmac_rxtx(p, TX_EN, true);
  401. }
  402. static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
  403. {
  404. xgene_sgmac_rxtx(p, RX_EN, false);
  405. }
  406. static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
  407. {
  408. xgene_sgmac_rxtx(p, TX_EN, false);
  409. }
  410. static int xgene_enet_reset(struct xgene_enet_pdata *p)
  411. {
  412. struct device *dev = &p->pdev->dev;
  413. if (!xgene_ring_mgr_init(p))
  414. return -ENODEV;
  415. if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
  416. xgene_enet_config_ring_if_assoc(p);
  417. return 0;
  418. }
  419. if (p->enet_id == XGENE_ENET2)
  420. xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
  421. if (dev->of_node) {
  422. if (!IS_ERR(p->clk)) {
  423. clk_prepare_enable(p->clk);
  424. udelay(5);
  425. clk_disable_unprepare(p->clk);
  426. udelay(5);
  427. clk_prepare_enable(p->clk);
  428. udelay(5);
  429. }
  430. } else {
  431. #ifdef CONFIG_ACPI
  432. if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
  433. acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
  434. "_RST", NULL, NULL);
  435. else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
  436. acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
  437. "_INI", NULL, NULL);
  438. #endif
  439. }
  440. if (!p->port_id) {
  441. xgene_enet_ecc_init(p);
  442. xgene_enet_config_ring_if_assoc(p);
  443. }
  444. return 0;
  445. }
  446. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
  447. u32 dst_ring_num, u16 bufpool_id,
  448. u16 nxtbufpool_id)
  449. {
  450. u32 cle_bypass_reg0, cle_bypass_reg1;
  451. u32 offset = p->port_id * MAC_OFFSET;
  452. u32 data, fpsel, nxtfpsel;
  453. if (p->enet_id == XGENE_ENET1) {
  454. cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
  455. cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
  456. } else {
  457. cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
  458. cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
  459. }
  460. data = CFG_CLE_BYPASS_EN0;
  461. xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
  462. fpsel = xgene_enet_get_fpsel(bufpool_id);
  463. nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
  464. data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) |
  465. CFG_CLE_NXTFPSEL0(nxtfpsel);
  466. xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
  467. }
  468. static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
  469. struct xgene_enet_desc_ring *ring)
  470. {
  471. u32 addr, data;
  472. if (xgene_enet_is_bufpool(ring->id)) {
  473. addr = ENET_CFGSSQMIFPRESET_ADDR;
  474. data = BIT(xgene_enet_get_fpsel(ring->id));
  475. } else {
  476. addr = ENET_CFGSSQMIWQRESET_ADDR;
  477. data = BIT(xgene_enet_ring_bufnum(ring->id));
  478. }
  479. xgene_enet_wr_ring_if(pdata, addr, data);
  480. }
  481. static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
  482. {
  483. struct device *dev = &p->pdev->dev;
  484. struct xgene_enet_desc_ring *ring;
  485. u32 pb;
  486. int i;
  487. pb = 0;
  488. for (i = 0; i < p->rxq_cnt; i++) {
  489. ring = p->rx_ring[i]->buf_pool;
  490. pb |= BIT(xgene_enet_get_fpsel(ring->id));
  491. ring = p->rx_ring[i]->page_pool;
  492. if (ring)
  493. pb |= BIT(xgene_enet_get_fpsel(ring->id));
  494. }
  495. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
  496. pb = 0;
  497. for (i = 0; i < p->txq_cnt; i++) {
  498. ring = p->tx_ring[i];
  499. pb |= BIT(xgene_enet_ring_bufnum(ring->id));
  500. }
  501. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
  502. if (dev->of_node) {
  503. if (!IS_ERR(p->clk))
  504. clk_disable_unprepare(p->clk);
  505. }
  506. }
  507. static void xgene_enet_link_state(struct work_struct *work)
  508. {
  509. struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
  510. struct xgene_enet_pdata, link_work);
  511. struct net_device *ndev = p->ndev;
  512. u32 link, poll_interval;
  513. link = xgene_enet_link_status(p);
  514. if (link) {
  515. if (!netif_carrier_ok(ndev)) {
  516. netif_carrier_on(ndev);
  517. xgene_sgmac_set_speed(p);
  518. xgene_sgmac_rx_enable(p);
  519. xgene_sgmac_tx_enable(p);
  520. netdev_info(ndev, "Link is Up - %dMbps\n",
  521. p->phy_speed);
  522. }
  523. poll_interval = PHY_POLL_LINK_ON;
  524. } else {
  525. if (netif_carrier_ok(ndev)) {
  526. xgene_sgmac_rx_disable(p);
  527. xgene_sgmac_tx_disable(p);
  528. netif_carrier_off(ndev);
  529. netdev_info(ndev, "Link is Down\n");
  530. }
  531. poll_interval = PHY_POLL_LINK_OFF;
  532. }
  533. schedule_delayed_work(&p->link_work, poll_interval);
  534. }
  535. static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable)
  536. {
  537. u32 data, ecm_cfg_addr;
  538. if (p->enet_id == XGENE_ENET1) {
  539. ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR :
  540. CSR_ECM_CFG_1_ADDR;
  541. } else {
  542. ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR;
  543. }
  544. data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr);
  545. if (enable)
  546. data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
  547. else
  548. data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
  549. xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data);
  550. }
  551. const struct xgene_mac_ops xgene_sgmac_ops = {
  552. .init = xgene_sgmac_init,
  553. .reset = xgene_sgmac_reset,
  554. .rx_enable = xgene_sgmac_rx_enable,
  555. .tx_enable = xgene_sgmac_tx_enable,
  556. .rx_disable = xgene_sgmac_rx_disable,
  557. .tx_disable = xgene_sgmac_tx_disable,
  558. .set_speed = xgene_sgmac_set_speed,
  559. .set_mac_addr = xgene_sgmac_set_mac_addr,
  560. .set_framesize = xgene_sgmac_set_frame_size,
  561. .link_state = xgene_enet_link_state,
  562. .enable_tx_pause = xgene_sgmac_enable_tx_pause,
  563. .flowctl_tx = xgene_sgmac_flowctl_tx,
  564. .flowctl_rx = xgene_sgmac_flowctl_rx
  565. };
  566. const struct xgene_port_ops xgene_sgport_ops = {
  567. .reset = xgene_enet_reset,
  568. .clear = xgene_enet_clear,
  569. .cle_bypass = xgene_enet_cle_bypass,
  570. .shutdown = xgene_enet_shutdown
  571. };