xgene_enet_sgmac.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Keyur Chudgar <kchudgar@apm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "xgene_enet_main.h"
  21. #include "xgene_enet_hw.h"
  22. #include "xgene_enet_sgmac.h"
  23. #include "xgene_enet_xgmac.h"
  24. static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
  25. {
  26. iowrite32(val, p->eth_csr_addr + offset);
  27. }
  28. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
  29. u32 offset, u32 val)
  30. {
  31. iowrite32(val, p->eth_ring_if_addr + offset);
  32. }
  33. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
  34. u32 offset, u32 val)
  35. {
  36. iowrite32(val, p->eth_diag_csr_addr + offset);
  37. }
  38. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  39. u32 offset, u32 val)
  40. {
  41. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  42. iowrite32(val, addr);
  43. }
  44. static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
  45. u32 wr_addr, u32 wr_data)
  46. {
  47. int i;
  48. iowrite32(wr_addr, ctl->addr);
  49. iowrite32(wr_data, ctl->ctl);
  50. iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
  51. /* wait for write command to complete */
  52. for (i = 0; i < 10; i++) {
  53. if (ioread32(ctl->cmd_done)) {
  54. iowrite32(0, ctl->cmd);
  55. return true;
  56. }
  57. udelay(1);
  58. }
  59. return false;
  60. }
  61. static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
  62. u32 wr_addr, u32 wr_data)
  63. {
  64. struct xgene_indirect_ctl ctl = {
  65. .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
  66. .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
  67. .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
  68. .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
  69. };
  70. if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
  71. netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
  72. }
  73. static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
  74. {
  75. return ioread32(p->eth_csr_addr + offset);
  76. }
  77. static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
  78. {
  79. return ioread32(p->eth_diag_csr_addr + offset);
  80. }
  81. static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
  82. {
  83. u32 rd_data;
  84. int i;
  85. iowrite32(rd_addr, ctl->addr);
  86. iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
  87. /* wait for read command to complete */
  88. for (i = 0; i < 10; i++) {
  89. if (ioread32(ctl->cmd_done)) {
  90. rd_data = ioread32(ctl->ctl);
  91. iowrite32(0, ctl->cmd);
  92. return rd_data;
  93. }
  94. udelay(1);
  95. }
  96. pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
  97. return 0;
  98. }
  99. static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
  100. {
  101. struct xgene_indirect_ctl ctl = {
  102. .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
  103. .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
  104. .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
  105. .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
  106. };
  107. return xgene_enet_rd_indirect(&ctl, rd_addr);
  108. }
  109. static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
  110. {
  111. struct net_device *ndev = p->ndev;
  112. u32 data;
  113. int i = 0;
  114. xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
  115. do {
  116. usleep_range(100, 110);
  117. data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
  118. if (data == ~0U)
  119. return 0;
  120. } while (++i < 10);
  121. netdev_err(ndev, "Failed to release memory from shutdown\n");
  122. return -ENODEV;
  123. }
  124. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
  125. {
  126. u32 val;
  127. val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
  128. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
  129. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  130. }
  131. static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
  132. u32 reg, u16 data)
  133. {
  134. u32 addr, wr_data, done;
  135. int i;
  136. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  137. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  138. wr_data = PHY_CONTROL(data);
  139. xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
  140. for (i = 0; i < 10; i++) {
  141. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  142. if (!(done & BUSY_MASK))
  143. return;
  144. usleep_range(10, 20);
  145. }
  146. netdev_err(p->ndev, "MII_MGMT write failed\n");
  147. }
  148. static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
  149. {
  150. u32 addr, data, done;
  151. int i;
  152. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  153. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  154. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  155. for (i = 0; i < 10; i++) {
  156. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  157. if (!(done & BUSY_MASK)) {
  158. data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
  159. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
  160. return data;
  161. }
  162. usleep_range(10, 20);
  163. }
  164. netdev_err(p->ndev, "MII_MGMT read failed\n");
  165. return 0;
  166. }
  167. static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
  168. {
  169. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  170. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
  171. }
  172. static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
  173. {
  174. u32 addr0, addr1;
  175. u8 *dev_addr = p->ndev->dev_addr;
  176. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  177. (dev_addr[1] << 8) | dev_addr[0];
  178. xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
  179. addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
  180. addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
  181. xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
  182. }
  183. static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
  184. {
  185. u32 data;
  186. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  187. SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
  188. return data & LINK_UP;
  189. }
  190. static void xgene_sgmac_init(struct xgene_enet_pdata *p)
  191. {
  192. u32 data, loop = 10;
  193. u32 offset = p->port_id * 4;
  194. u32 enet_spare_cfg_reg, rsif_config_reg;
  195. u32 cfg_bypass_reg, rx_dv_gate_reg;
  196. xgene_sgmac_reset(p);
  197. /* Enable auto-negotiation */
  198. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
  199. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
  200. while (loop--) {
  201. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  202. SGMII_STATUS_ADDR >> 2);
  203. if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
  204. break;
  205. usleep_range(1000, 2000);
  206. }
  207. if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
  208. netdev_err(p->ndev, "Auto-negotiation failed\n");
  209. data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
  210. ENET_INTERFACE_MODE2_SET(&data, 2);
  211. xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
  212. xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
  213. if (p->enet_id == XGENE_ENET1) {
  214. enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
  215. rsif_config_reg = RSIF_CONFIG_REG_ADDR;
  216. cfg_bypass_reg = CFG_BYPASS_ADDR;
  217. rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
  218. } else {
  219. enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
  220. rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
  221. cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
  222. rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
  223. }
  224. data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
  225. data |= MPA_IDLE_WITH_QMI_EMPTY;
  226. xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
  227. xgene_sgmac_set_mac_addr(p);
  228. /* Adjust MDC clock frequency */
  229. data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
  230. MGMT_CLOCK_SEL_SET(&data, 7);
  231. xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
  232. /* Enable drop if bufpool not available */
  233. data = xgene_enet_rd_csr(p, rsif_config_reg);
  234. data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  235. xgene_enet_wr_csr(p, rsif_config_reg, data);
  236. /* Bypass traffic gating */
  237. xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
  238. xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
  239. xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
  240. }
  241. static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
  242. {
  243. u32 data;
  244. data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
  245. if (set)
  246. data |= bits;
  247. else
  248. data &= ~bits;
  249. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
  250. }
  251. static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
  252. {
  253. xgene_sgmac_rxtx(p, RX_EN, true);
  254. }
  255. static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
  256. {
  257. xgene_sgmac_rxtx(p, TX_EN, true);
  258. }
  259. static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
  260. {
  261. xgene_sgmac_rxtx(p, RX_EN, false);
  262. }
  263. static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
  264. {
  265. xgene_sgmac_rxtx(p, TX_EN, false);
  266. }
  267. static int xgene_enet_reset(struct xgene_enet_pdata *p)
  268. {
  269. if (!xgene_ring_mgr_init(p))
  270. return -ENODEV;
  271. if (!IS_ERR(p->clk)) {
  272. clk_prepare_enable(p->clk);
  273. clk_disable_unprepare(p->clk);
  274. clk_prepare_enable(p->clk);
  275. }
  276. xgene_enet_ecc_init(p);
  277. xgene_enet_config_ring_if_assoc(p);
  278. return 0;
  279. }
  280. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
  281. u32 dst_ring_num, u16 bufpool_id)
  282. {
  283. u32 data, fpsel;
  284. u32 cle_bypass_reg0, cle_bypass_reg1;
  285. u32 offset = p->port_id * MAC_OFFSET;
  286. if (p->enet_id == XGENE_ENET1) {
  287. cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
  288. cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
  289. } else {
  290. cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
  291. cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
  292. }
  293. data = CFG_CLE_BYPASS_EN0;
  294. xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
  295. fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
  296. data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
  297. xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
  298. }
  299. static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
  300. {
  301. if (!IS_ERR(p->clk))
  302. clk_disable_unprepare(p->clk);
  303. }
  304. static void xgene_enet_link_state(struct work_struct *work)
  305. {
  306. struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
  307. struct xgene_enet_pdata, link_work);
  308. struct net_device *ndev = p->ndev;
  309. u32 link, poll_interval;
  310. link = xgene_enet_link_status(p);
  311. if (link) {
  312. if (!netif_carrier_ok(ndev)) {
  313. netif_carrier_on(ndev);
  314. xgene_sgmac_init(p);
  315. xgene_sgmac_rx_enable(p);
  316. xgene_sgmac_tx_enable(p);
  317. netdev_info(ndev, "Link is Up - 1Gbps\n");
  318. }
  319. poll_interval = PHY_POLL_LINK_ON;
  320. } else {
  321. if (netif_carrier_ok(ndev)) {
  322. xgene_sgmac_rx_disable(p);
  323. xgene_sgmac_tx_disable(p);
  324. netif_carrier_off(ndev);
  325. netdev_info(ndev, "Link is Down\n");
  326. }
  327. poll_interval = PHY_POLL_LINK_OFF;
  328. }
  329. schedule_delayed_work(&p->link_work, poll_interval);
  330. }
  331. struct xgene_mac_ops xgene_sgmac_ops = {
  332. .init = xgene_sgmac_init,
  333. .reset = xgene_sgmac_reset,
  334. .rx_enable = xgene_sgmac_rx_enable,
  335. .tx_enable = xgene_sgmac_tx_enable,
  336. .rx_disable = xgene_sgmac_rx_disable,
  337. .tx_disable = xgene_sgmac_tx_disable,
  338. .set_mac_addr = xgene_sgmac_set_mac_addr,
  339. .link_state = xgene_enet_link_state
  340. };
  341. struct xgene_port_ops xgene_sgport_ops = {
  342. .reset = xgene_enet_reset,
  343. .cle_bypass = xgene_enet_cle_bypass,
  344. .shutdown = xgene_enet_shutdown
  345. };