xgene_enet_hw.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Ravi Patel <rapatel@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. #include "xgene_enet_hw.h"
  23. static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
  24. {
  25. u32 *ring_cfg = ring->state;
  26. u64 addr = ring->dma;
  27. enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
  28. ring_cfg[4] |= (1 << SELTHRSH_POS) &
  29. CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
  30. ring_cfg[3] |= ACCEPTLERR;
  31. ring_cfg[2] |= QCOHERENT;
  32. addr >>= 8;
  33. ring_cfg[2] |= (addr << RINGADDRL_POS) &
  34. CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
  35. addr >>= RINGADDRL_LEN;
  36. ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
  37. ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
  38. CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
  39. }
  40. static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
  41. {
  42. u32 *ring_cfg = ring->state;
  43. bool is_bufpool;
  44. u32 val;
  45. is_bufpool = xgene_enet_is_bufpool(ring->id);
  46. val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
  47. ring_cfg[4] |= (val << RINGTYPE_POS) &
  48. CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
  49. if (is_bufpool) {
  50. ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
  51. CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
  52. }
  53. }
  54. static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
  55. {
  56. u32 *ring_cfg = ring->state;
  57. ring_cfg[3] |= RECOMBBUF;
  58. ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
  59. CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
  60. ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
  61. }
  62. static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
  63. u32 offset, u32 data)
  64. {
  65. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  66. iowrite32(data, pdata->ring_csr_addr + offset);
  67. }
  68. static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
  69. u32 offset, u32 *data)
  70. {
  71. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  72. *data = ioread32(pdata->ring_csr_addr + offset);
  73. }
  74. static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
  75. {
  76. int i;
  77. xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
  78. for (i = 0; i < NUM_RING_CONFIG; i++) {
  79. xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
  80. ring->state[i]);
  81. }
  82. }
  83. static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
  84. {
  85. memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
  86. xgene_enet_write_ring_state(ring);
  87. }
  88. static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
  89. {
  90. xgene_enet_ring_set_type(ring);
  91. if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
  92. xgene_enet_ring_set_recombbuf(ring);
  93. xgene_enet_ring_init(ring);
  94. xgene_enet_write_ring_state(ring);
  95. }
  96. static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
  97. {
  98. u32 ring_id_val, ring_id_buf;
  99. bool is_bufpool;
  100. is_bufpool = xgene_enet_is_bufpool(ring->id);
  101. ring_id_val = ring->id & GENMASK(9, 0);
  102. ring_id_val |= OVERWRITE;
  103. ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
  104. ring_id_buf |= PREFETCH_BUF_EN;
  105. if (is_bufpool)
  106. ring_id_buf |= IS_BUFFER_POOL;
  107. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
  108. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
  109. }
  110. static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
  111. {
  112. u32 ring_id;
  113. ring_id = ring->id | OVERWRITE;
  114. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
  115. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
  116. }
  117. struct xgene_enet_desc_ring *xgene_enet_setup_ring(
  118. struct xgene_enet_desc_ring *ring)
  119. {
  120. u32 size = ring->size;
  121. u32 i, data;
  122. bool is_bufpool;
  123. xgene_enet_clr_ring_state(ring);
  124. xgene_enet_set_ring_state(ring);
  125. xgene_enet_set_ring_id(ring);
  126. ring->slots = xgene_enet_get_numslots(ring->id, size);
  127. is_bufpool = xgene_enet_is_bufpool(ring->id);
  128. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  129. return ring;
  130. for (i = 0; i < ring->slots; i++)
  131. xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
  132. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  133. data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
  134. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  135. return ring;
  136. }
  137. void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
  138. {
  139. u32 data;
  140. bool is_bufpool;
  141. is_bufpool = xgene_enet_is_bufpool(ring->id);
  142. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  143. goto out;
  144. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  145. data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
  146. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  147. out:
  148. xgene_enet_clr_desc_ring_id(ring);
  149. xgene_enet_clr_ring_state(ring);
  150. }
  151. void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
  152. struct xgene_enet_pdata *pdata,
  153. enum xgene_enet_err_code status)
  154. {
  155. struct rtnl_link_stats64 *stats = &pdata->stats;
  156. switch (status) {
  157. case INGRESS_CRC:
  158. stats->rx_crc_errors++;
  159. break;
  160. case INGRESS_CHECKSUM:
  161. case INGRESS_CHECKSUM_COMPUTE:
  162. stats->rx_errors++;
  163. break;
  164. case INGRESS_TRUNC_FRAME:
  165. stats->rx_frame_errors++;
  166. break;
  167. case INGRESS_PKT_LEN:
  168. stats->rx_length_errors++;
  169. break;
  170. case INGRESS_PKT_UNDER:
  171. stats->rx_frame_errors++;
  172. break;
  173. case INGRESS_FIFO_OVERRUN:
  174. stats->rx_fifo_errors++;
  175. break;
  176. default:
  177. break;
  178. }
  179. }
  180. static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
  181. u32 offset, u32 val)
  182. {
  183. void __iomem *addr = pdata->eth_csr_addr + offset;
  184. iowrite32(val, addr);
  185. }
  186. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
  187. u32 offset, u32 val)
  188. {
  189. void __iomem *addr = pdata->eth_ring_if_addr + offset;
  190. iowrite32(val, addr);
  191. }
  192. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
  193. u32 offset, u32 val)
  194. {
  195. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  196. iowrite32(val, addr);
  197. }
  198. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  199. u32 offset, u32 val)
  200. {
  201. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  202. iowrite32(val, addr);
  203. }
  204. static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
  205. void __iomem *cmd, void __iomem *cmd_done,
  206. u32 wr_addr, u32 wr_data)
  207. {
  208. u32 done;
  209. u8 wait = 10;
  210. iowrite32(wr_addr, addr);
  211. iowrite32(wr_data, wr);
  212. iowrite32(XGENE_ENET_WR_CMD, cmd);
  213. /* wait for write command to complete */
  214. while (!(done = ioread32(cmd_done)) && wait--)
  215. udelay(1);
  216. if (!done)
  217. return false;
  218. iowrite32(0, cmd);
  219. return true;
  220. }
  221. static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
  222. u32 wr_addr, u32 wr_data)
  223. {
  224. void __iomem *addr, *wr, *cmd, *cmd_done;
  225. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  226. wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
  227. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  228. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  229. if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
  230. netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
  231. wr_addr);
  232. }
  233. static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
  234. u32 offset, u32 *val)
  235. {
  236. void __iomem *addr = pdata->eth_csr_addr + offset;
  237. *val = ioread32(addr);
  238. }
  239. static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
  240. u32 offset, u32 *val)
  241. {
  242. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  243. *val = ioread32(addr);
  244. }
  245. static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
  246. u32 offset, u32 *val)
  247. {
  248. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  249. *val = ioread32(addr);
  250. }
  251. static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
  252. void __iomem *cmd, void __iomem *cmd_done,
  253. u32 rd_addr, u32 *rd_data)
  254. {
  255. u32 done;
  256. u8 wait = 10;
  257. iowrite32(rd_addr, addr);
  258. iowrite32(XGENE_ENET_RD_CMD, cmd);
  259. /* wait for read command to complete */
  260. while (!(done = ioread32(cmd_done)) && wait--)
  261. udelay(1);
  262. if (!done)
  263. return false;
  264. *rd_data = ioread32(rd);
  265. iowrite32(0, cmd);
  266. return true;
  267. }
  268. static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
  269. u32 rd_addr, u32 *rd_data)
  270. {
  271. void __iomem *addr, *rd, *cmd, *cmd_done;
  272. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  273. rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
  274. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  275. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  276. if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
  277. netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
  278. rd_addr);
  279. }
  280. static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
  281. u32 reg, u16 data)
  282. {
  283. u32 addr = 0, wr_data = 0;
  284. u32 done;
  285. u8 wait = 10;
  286. PHY_ADDR_SET(&addr, phy_id);
  287. REG_ADDR_SET(&addr, reg);
  288. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
  289. PHY_CONTROL_SET(&wr_data, data);
  290. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
  291. do {
  292. usleep_range(5, 10);
  293. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
  294. } while ((done & BUSY_MASK) && wait--);
  295. if (done & BUSY_MASK) {
  296. netdev_err(pdata->ndev, "MII_MGMT write failed\n");
  297. return -EBUSY;
  298. }
  299. return 0;
  300. }
  301. static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
  302. u8 phy_id, u32 reg)
  303. {
  304. u32 addr = 0;
  305. u32 data, done;
  306. u8 wait = 10;
  307. PHY_ADDR_SET(&addr, phy_id);
  308. REG_ADDR_SET(&addr, reg);
  309. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
  310. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  311. do {
  312. usleep_range(5, 10);
  313. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
  314. } while ((done & BUSY_MASK) && wait--);
  315. if (done & BUSY_MASK) {
  316. netdev_err(pdata->ndev, "MII_MGMT read failed\n");
  317. return -EBUSY;
  318. }
  319. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
  320. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
  321. return data;
  322. }
  323. static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
  324. {
  325. u32 addr0, addr1;
  326. u8 *dev_addr = pdata->ndev->dev_addr;
  327. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  328. (dev_addr[1] << 8) | dev_addr[0];
  329. addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
  330. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
  331. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
  332. }
  333. static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
  334. {
  335. struct net_device *ndev = pdata->ndev;
  336. u32 data;
  337. u8 wait = 10;
  338. xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
  339. do {
  340. usleep_range(100, 110);
  341. xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
  342. } while ((data != 0xffffffff) && wait--);
  343. if (data != 0xffffffff) {
  344. netdev_err(ndev, "Failed to release memory from shutdown\n");
  345. return -ENODEV;
  346. }
  347. return 0;
  348. }
  349. static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
  350. {
  351. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  352. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
  353. }
  354. static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
  355. {
  356. u32 value, mc2;
  357. u32 intf_ctl, rgmii;
  358. u32 icm0, icm2;
  359. xgene_gmac_reset(pdata);
  360. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
  361. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
  362. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
  363. xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
  364. xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
  365. switch (pdata->phy_speed) {
  366. case SPEED_10:
  367. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  368. CFG_MACMODE_SET(&icm0, 0);
  369. CFG_WAITASYNCRD_SET(&icm2, 500);
  370. rgmii &= ~CFG_SPEED_1250;
  371. break;
  372. case SPEED_100:
  373. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  374. intf_ctl |= ENET_LHD_MODE;
  375. CFG_MACMODE_SET(&icm0, 1);
  376. CFG_WAITASYNCRD_SET(&icm2, 80);
  377. rgmii &= ~CFG_SPEED_1250;
  378. break;
  379. default:
  380. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  381. intf_ctl |= ENET_GHD_MODE;
  382. CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
  383. xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
  384. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  385. xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
  386. break;
  387. }
  388. mc2 |= FULL_DUPLEX2;
  389. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
  390. xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
  391. xgene_gmac_set_mac_addr(pdata);
  392. /* Adjust MDC clock frequency */
  393. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
  394. MGMT_CLOCK_SEL_SET(&value, 7);
  395. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
  396. /* Enable drop if bufpool not available */
  397. xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
  398. value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  399. xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
  400. /* Rtype should be copied from FP */
  401. xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
  402. xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
  403. /* Rx-Tx traffic resume */
  404. xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
  405. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
  406. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
  407. xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
  408. value &= ~TX_DV_GATE_EN0;
  409. value &= ~RX_DV_GATE_EN0;
  410. value |= RESUME_RX0;
  411. xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
  412. xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
  413. }
  414. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
  415. {
  416. u32 val = 0xffffffff;
  417. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
  418. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  419. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
  420. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
  421. }
  422. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
  423. u32 dst_ring_num, u16 bufpool_id)
  424. {
  425. u32 cb;
  426. u32 fpsel;
  427. fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
  428. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
  429. cb |= CFG_CLE_BYPASS_EN0;
  430. CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
  431. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
  432. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
  433. CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
  434. CFG_CLE_FPSEL0_SET(&cb, fpsel);
  435. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
  436. }
  437. static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
  438. {
  439. u32 data;
  440. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  441. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
  442. }
  443. static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
  444. {
  445. u32 data;
  446. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  447. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
  448. }
  449. static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
  450. {
  451. u32 data;
  452. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  453. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
  454. }
  455. static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
  456. {
  457. u32 data;
  458. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  459. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
  460. }
  461. bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
  462. {
  463. if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
  464. return false;
  465. if (ioread32(p->ring_csr_addr + SRST_ADDR))
  466. return false;
  467. return true;
  468. }
  469. static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
  470. {
  471. u32 val;
  472. if (!xgene_ring_mgr_init(pdata))
  473. return -ENODEV;
  474. clk_prepare_enable(pdata->clk);
  475. clk_disable_unprepare(pdata->clk);
  476. clk_prepare_enable(pdata->clk);
  477. xgene_enet_ecc_init(pdata);
  478. xgene_enet_config_ring_if_assoc(pdata);
  479. /* Enable auto-incr for scanning */
  480. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
  481. val |= SCAN_AUTO_INCR;
  482. MGMT_CLOCK_SEL_SET(&val, 1);
  483. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
  484. return 0;
  485. }
  486. static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
  487. {
  488. clk_disable_unprepare(pdata->clk);
  489. }
  490. static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  491. {
  492. struct xgene_enet_pdata *pdata = bus->priv;
  493. u32 val;
  494. val = xgene_mii_phy_read(pdata, mii_id, regnum);
  495. netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
  496. mii_id, regnum, val);
  497. return val;
  498. }
  499. static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  500. u16 val)
  501. {
  502. struct xgene_enet_pdata *pdata = bus->priv;
  503. netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
  504. mii_id, regnum, val);
  505. return xgene_mii_phy_write(pdata, mii_id, regnum, val);
  506. }
  507. static void xgene_enet_adjust_link(struct net_device *ndev)
  508. {
  509. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  510. struct phy_device *phydev = pdata->phy_dev;
  511. if (phydev->link) {
  512. if (pdata->phy_speed != phydev->speed) {
  513. pdata->phy_speed = phydev->speed;
  514. xgene_gmac_init(pdata);
  515. xgene_gmac_rx_enable(pdata);
  516. xgene_gmac_tx_enable(pdata);
  517. phy_print_status(phydev);
  518. }
  519. } else {
  520. xgene_gmac_rx_disable(pdata);
  521. xgene_gmac_tx_disable(pdata);
  522. pdata->phy_speed = SPEED_UNKNOWN;
  523. phy_print_status(phydev);
  524. }
  525. }
  526. static int xgene_enet_phy_connect(struct net_device *ndev)
  527. {
  528. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  529. struct device_node *phy_np;
  530. struct phy_device *phy_dev;
  531. struct device *dev = &pdata->pdev->dev;
  532. phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
  533. if (!phy_np) {
  534. netdev_dbg(ndev, "No phy-handle found\n");
  535. return -ENODEV;
  536. }
  537. phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
  538. 0, pdata->phy_mode);
  539. if (!phy_dev) {
  540. netdev_err(ndev, "Could not connect to PHY\n");
  541. return -ENODEV;
  542. }
  543. pdata->phy_speed = SPEED_UNKNOWN;
  544. phy_dev->supported &= ~SUPPORTED_10baseT_Half &
  545. ~SUPPORTED_100baseT_Half &
  546. ~SUPPORTED_1000baseT_Half;
  547. phy_dev->advertising = phy_dev->supported;
  548. pdata->phy_dev = phy_dev;
  549. return 0;
  550. }
  551. int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
  552. {
  553. struct net_device *ndev = pdata->ndev;
  554. struct device *dev = &pdata->pdev->dev;
  555. struct device_node *child_np;
  556. struct device_node *mdio_np = NULL;
  557. struct mii_bus *mdio_bus;
  558. int ret;
  559. for_each_child_of_node(dev->of_node, child_np) {
  560. if (of_device_is_compatible(child_np, "apm,xgene-mdio")) {
  561. mdio_np = child_np;
  562. break;
  563. }
  564. }
  565. if (!mdio_np) {
  566. netdev_dbg(ndev, "No mdio node in the dts\n");
  567. return -ENXIO;
  568. }
  569. mdio_bus = mdiobus_alloc();
  570. if (!mdio_bus)
  571. return -ENOMEM;
  572. mdio_bus->name = "APM X-Gene MDIO bus";
  573. mdio_bus->read = xgene_enet_mdio_read;
  574. mdio_bus->write = xgene_enet_mdio_write;
  575. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
  576. ndev->name);
  577. mdio_bus->priv = pdata;
  578. mdio_bus->parent = &ndev->dev;
  579. ret = of_mdiobus_register(mdio_bus, mdio_np);
  580. if (ret) {
  581. netdev_err(ndev, "Failed to register MDIO bus\n");
  582. mdiobus_free(mdio_bus);
  583. return ret;
  584. }
  585. pdata->mdio_bus = mdio_bus;
  586. ret = xgene_enet_phy_connect(ndev);
  587. if (ret)
  588. xgene_enet_mdio_remove(pdata);
  589. return ret;
  590. }
  591. void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
  592. {
  593. mdiobus_unregister(pdata->mdio_bus);
  594. mdiobus_free(pdata->mdio_bus);
  595. pdata->mdio_bus = NULL;
  596. }
  597. struct xgene_mac_ops xgene_gmac_ops = {
  598. .init = xgene_gmac_init,
  599. .reset = xgene_gmac_reset,
  600. .rx_enable = xgene_gmac_rx_enable,
  601. .tx_enable = xgene_gmac_tx_enable,
  602. .rx_disable = xgene_gmac_rx_disable,
  603. .tx_disable = xgene_gmac_tx_disable,
  604. .set_mac_addr = xgene_gmac_set_mac_addr,
  605. };
  606. struct xgene_port_ops xgene_gport_ops = {
  607. .reset = xgene_enet_reset,
  608. .cle_bypass = xgene_enet_cle_bypass,
  609. .shutdown = xgene_gport_shutdown,
  610. };