xgene_enet_sgmac.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Keyur Chudgar <kchudgar@apm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "xgene_enet_main.h"
  21. #include "xgene_enet_hw.h"
  22. #include "xgene_enet_sgmac.h"
  23. static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
  24. {
  25. iowrite32(val, p->eth_csr_addr + offset);
  26. }
  27. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
  28. u32 offset, u32 val)
  29. {
  30. iowrite32(val, p->eth_ring_if_addr + offset);
  31. }
  32. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
  33. u32 offset, u32 val)
  34. {
  35. iowrite32(val, p->eth_diag_csr_addr + offset);
  36. }
  37. static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
  38. u32 wr_addr, u32 wr_data)
  39. {
  40. int i;
  41. iowrite32(wr_addr, ctl->addr);
  42. iowrite32(wr_data, ctl->ctl);
  43. iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
  44. /* wait for write command to complete */
  45. for (i = 0; i < 10; i++) {
  46. if (ioread32(ctl->cmd_done)) {
  47. iowrite32(0, ctl->cmd);
  48. return true;
  49. }
  50. udelay(1);
  51. }
  52. return false;
  53. }
  54. static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
  55. u32 wr_addr, u32 wr_data)
  56. {
  57. struct xgene_indirect_ctl ctl = {
  58. .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
  59. .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
  60. .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
  61. .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
  62. };
  63. if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
  64. netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
  65. }
  66. static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
  67. {
  68. return ioread32(p->eth_csr_addr + offset);
  69. }
  70. static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
  71. {
  72. return ioread32(p->eth_diag_csr_addr + offset);
  73. }
  74. static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
  75. {
  76. u32 rd_data;
  77. int i;
  78. iowrite32(rd_addr, ctl->addr);
  79. iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
  80. /* wait for read command to complete */
  81. for (i = 0; i < 10; i++) {
  82. if (ioread32(ctl->cmd_done)) {
  83. rd_data = ioread32(ctl->ctl);
  84. iowrite32(0, ctl->cmd);
  85. return rd_data;
  86. }
  87. udelay(1);
  88. }
  89. pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
  90. return 0;
  91. }
  92. static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
  93. {
  94. struct xgene_indirect_ctl ctl = {
  95. .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
  96. .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
  97. .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
  98. .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
  99. };
  100. return xgene_enet_rd_indirect(&ctl, rd_addr);
  101. }
  102. static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
  103. {
  104. struct net_device *ndev = p->ndev;
  105. u32 data;
  106. int i = 0;
  107. xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
  108. do {
  109. usleep_range(100, 110);
  110. data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
  111. if (data == ~0U)
  112. return 0;
  113. } while (++i < 10);
  114. netdev_err(ndev, "Failed to release memory from shutdown\n");
  115. return -ENODEV;
  116. }
  117. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
  118. {
  119. u32 val = 0xffffffff;
  120. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
  121. xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  122. }
  123. static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
  124. u32 reg, u16 data)
  125. {
  126. u32 addr, wr_data, done;
  127. int i;
  128. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  129. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  130. wr_data = PHY_CONTROL(data);
  131. xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
  132. for (i = 0; i < 10; i++) {
  133. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  134. if (!(done & BUSY_MASK))
  135. return;
  136. usleep_range(10, 20);
  137. }
  138. netdev_err(p->ndev, "MII_MGMT write failed\n");
  139. }
  140. static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
  141. {
  142. u32 addr, data, done;
  143. int i;
  144. addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
  145. xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
  146. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  147. for (i = 0; i < 10; i++) {
  148. done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
  149. if (!(done & BUSY_MASK)) {
  150. data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
  151. xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
  152. return data;
  153. }
  154. usleep_range(10, 20);
  155. }
  156. netdev_err(p->ndev, "MII_MGMT read failed\n");
  157. return 0;
  158. }
  159. static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
  160. {
  161. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  162. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
  163. }
  164. static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
  165. {
  166. u32 addr0, addr1;
  167. u8 *dev_addr = p->ndev->dev_addr;
  168. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  169. (dev_addr[1] << 8) | dev_addr[0];
  170. xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
  171. addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
  172. addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
  173. xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
  174. }
  175. static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
  176. {
  177. u32 data;
  178. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  179. SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
  180. return data & LINK_UP;
  181. }
  182. static void xgene_sgmac_init(struct xgene_enet_pdata *p)
  183. {
  184. u32 data, loop = 10;
  185. xgene_sgmac_reset(p);
  186. /* Enable auto-negotiation */
  187. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
  188. xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
  189. while (loop--) {
  190. data = xgene_mii_phy_read(p, INT_PHY_ADDR,
  191. SGMII_STATUS_ADDR >> 2);
  192. if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
  193. break;
  194. usleep_range(10, 20);
  195. }
  196. if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
  197. netdev_err(p->ndev, "Auto-negotiation failed\n");
  198. data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
  199. ENET_INTERFACE_MODE2_SET(&data, 2);
  200. xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
  201. xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
  202. data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
  203. data |= MPA_IDLE_WITH_QMI_EMPTY;
  204. xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
  205. xgene_sgmac_set_mac_addr(p);
  206. data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
  207. data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  208. xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
  209. /* Adjust MDC clock frequency */
  210. data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
  211. MGMT_CLOCK_SEL_SET(&data, 7);
  212. xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
  213. /* Enable drop if bufpool not available */
  214. data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
  215. data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  216. xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
  217. /* Rtype should be copied from FP */
  218. xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
  219. /* Bypass traffic gating */
  220. xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
  221. xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
  222. xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0);
  223. }
  224. static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
  225. {
  226. u32 data;
  227. data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
  228. if (set)
  229. data |= bits;
  230. else
  231. data &= ~bits;
  232. xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
  233. }
  234. static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
  235. {
  236. xgene_sgmac_rxtx(p, RX_EN, true);
  237. }
  238. static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
  239. {
  240. xgene_sgmac_rxtx(p, TX_EN, true);
  241. }
  242. static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
  243. {
  244. xgene_sgmac_rxtx(p, RX_EN, false);
  245. }
  246. static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
  247. {
  248. xgene_sgmac_rxtx(p, TX_EN, false);
  249. }
  250. static int xgene_enet_reset(struct xgene_enet_pdata *p)
  251. {
  252. if (!xgene_ring_mgr_init(p))
  253. return -ENODEV;
  254. clk_prepare_enable(p->clk);
  255. clk_disable_unprepare(p->clk);
  256. clk_prepare_enable(p->clk);
  257. xgene_enet_ecc_init(p);
  258. xgene_enet_config_ring_if_assoc(p);
  259. return 0;
  260. }
  261. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
  262. u32 dst_ring_num, u16 bufpool_id)
  263. {
  264. u32 data, fpsel;
  265. data = CFG_CLE_BYPASS_EN0;
  266. xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data);
  267. fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
  268. data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
  269. xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data);
  270. }
  271. static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
  272. {
  273. clk_disable_unprepare(p->clk);
  274. }
  275. static void xgene_enet_link_state(struct work_struct *work)
  276. {
  277. struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
  278. struct xgene_enet_pdata, link_work);
  279. struct net_device *ndev = p->ndev;
  280. u32 link, poll_interval;
  281. link = xgene_enet_link_status(p);
  282. if (link) {
  283. if (!netif_carrier_ok(ndev)) {
  284. netif_carrier_on(ndev);
  285. xgene_sgmac_init(p);
  286. xgene_sgmac_rx_enable(p);
  287. xgene_sgmac_tx_enable(p);
  288. netdev_info(ndev, "Link is Up - 1Gbps\n");
  289. }
  290. poll_interval = PHY_POLL_LINK_ON;
  291. } else {
  292. if (netif_carrier_ok(ndev)) {
  293. xgene_sgmac_rx_disable(p);
  294. xgene_sgmac_tx_disable(p);
  295. netif_carrier_off(ndev);
  296. netdev_info(ndev, "Link is Down\n");
  297. }
  298. poll_interval = PHY_POLL_LINK_OFF;
  299. }
  300. schedule_delayed_work(&p->link_work, poll_interval);
  301. }
  302. struct xgene_mac_ops xgene_sgmac_ops = {
  303. .init = xgene_sgmac_init,
  304. .reset = xgene_sgmac_reset,
  305. .rx_enable = xgene_sgmac_rx_enable,
  306. .tx_enable = xgene_sgmac_tx_enable,
  307. .rx_disable = xgene_sgmac_rx_disable,
  308. .tx_disable = xgene_sgmac_tx_disable,
  309. .set_mac_addr = xgene_sgmac_set_mac_addr,
  310. .link_state = xgene_enet_link_state
  311. };
  312. struct xgene_port_ops xgene_sgport_ops = {
  313. .reset = xgene_enet_reset,
  314. .cle_bypass = xgene_enet_cle_bypass,
  315. .shutdown = xgene_enet_shutdown
  316. };