xgene_enet_hw.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Ravi Patel <rapatel@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. #include "xgene_enet_hw.h"
  23. static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
  24. {
  25. u32 *ring_cfg = ring->state;
  26. u64 addr = ring->dma;
  27. enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
  28. ring_cfg[4] |= (1 << SELTHRSH_POS) &
  29. CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
  30. ring_cfg[3] |= ACCEPTLERR;
  31. ring_cfg[2] |= QCOHERENT;
  32. addr >>= 8;
  33. ring_cfg[2] |= (addr << RINGADDRL_POS) &
  34. CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
  35. addr >>= RINGADDRL_LEN;
  36. ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
  37. ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
  38. CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
  39. }
  40. static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
  41. {
  42. u32 *ring_cfg = ring->state;
  43. bool is_bufpool;
  44. u32 val;
  45. is_bufpool = xgene_enet_is_bufpool(ring->id);
  46. val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
  47. ring_cfg[4] |= (val << RINGTYPE_POS) &
  48. CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
  49. if (is_bufpool) {
  50. ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
  51. CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
  52. }
  53. }
  54. static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
  55. {
  56. u32 *ring_cfg = ring->state;
  57. ring_cfg[3] |= RECOMBBUF;
  58. ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
  59. CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
  60. ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
  61. }
  62. static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
  63. u32 offset, u32 data)
  64. {
  65. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  66. iowrite32(data, pdata->ring_csr_addr + offset);
  67. }
  68. static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
  69. u32 offset, u32 *data)
  70. {
  71. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  72. *data = ioread32(pdata->ring_csr_addr + offset);
  73. }
  74. static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
  75. {
  76. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  77. int i;
  78. xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
  79. for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
  80. xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
  81. ring->state[i]);
  82. }
  83. }
  84. static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
  85. {
  86. memset(ring->state, 0, sizeof(ring->state));
  87. xgene_enet_write_ring_state(ring);
  88. }
  89. static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
  90. {
  91. xgene_enet_ring_set_type(ring);
  92. if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
  93. xgene_enet_ring_set_recombbuf(ring);
  94. xgene_enet_ring_init(ring);
  95. xgene_enet_write_ring_state(ring);
  96. }
  97. static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
  98. {
  99. u32 ring_id_val, ring_id_buf;
  100. bool is_bufpool;
  101. is_bufpool = xgene_enet_is_bufpool(ring->id);
  102. ring_id_val = ring->id & GENMASK(9, 0);
  103. ring_id_val |= OVERWRITE;
  104. ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
  105. ring_id_buf |= PREFETCH_BUF_EN;
  106. if (is_bufpool)
  107. ring_id_buf |= IS_BUFFER_POOL;
  108. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
  109. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
  110. }
  111. static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
  112. {
  113. u32 ring_id;
  114. ring_id = ring->id | OVERWRITE;
  115. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
  116. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
  117. }
  118. static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
  119. struct xgene_enet_desc_ring *ring)
  120. {
  121. u32 size = ring->size;
  122. u32 i, data;
  123. bool is_bufpool;
  124. xgene_enet_clr_ring_state(ring);
  125. xgene_enet_set_ring_state(ring);
  126. xgene_enet_set_ring_id(ring);
  127. ring->slots = xgene_enet_get_numslots(ring->id, size);
  128. is_bufpool = xgene_enet_is_bufpool(ring->id);
  129. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  130. return ring;
  131. for (i = 0; i < ring->slots; i++)
  132. xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
  133. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  134. data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
  135. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  136. return ring;
  137. }
  138. static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
  139. {
  140. u32 data;
  141. bool is_bufpool;
  142. is_bufpool = xgene_enet_is_bufpool(ring->id);
  143. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  144. goto out;
  145. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  146. data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
  147. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  148. out:
  149. xgene_enet_clr_desc_ring_id(ring);
  150. xgene_enet_clr_ring_state(ring);
  151. }
  152. static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
  153. {
  154. iowrite32(count, ring->cmd);
  155. }
  156. static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
  157. {
  158. u32 __iomem *cmd_base = ring->cmd_base;
  159. u32 ring_state, num_msgs;
  160. ring_state = ioread32(&cmd_base[1]);
  161. num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
  162. return num_msgs;
  163. }
  164. void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
  165. struct xgene_enet_pdata *pdata,
  166. enum xgene_enet_err_code status)
  167. {
  168. struct rtnl_link_stats64 *stats = &pdata->stats;
  169. switch (status) {
  170. case INGRESS_CRC:
  171. stats->rx_crc_errors++;
  172. break;
  173. case INGRESS_CHECKSUM:
  174. case INGRESS_CHECKSUM_COMPUTE:
  175. stats->rx_errors++;
  176. break;
  177. case INGRESS_TRUNC_FRAME:
  178. stats->rx_frame_errors++;
  179. break;
  180. case INGRESS_PKT_LEN:
  181. stats->rx_length_errors++;
  182. break;
  183. case INGRESS_PKT_UNDER:
  184. stats->rx_frame_errors++;
  185. break;
  186. case INGRESS_FIFO_OVERRUN:
  187. stats->rx_fifo_errors++;
  188. break;
  189. default:
  190. break;
  191. }
  192. }
  193. static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
  194. u32 offset, u32 val)
  195. {
  196. void __iomem *addr = pdata->eth_csr_addr + offset;
  197. iowrite32(val, addr);
  198. }
  199. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
  200. u32 offset, u32 val)
  201. {
  202. void __iomem *addr = pdata->eth_ring_if_addr + offset;
  203. iowrite32(val, addr);
  204. }
  205. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
  206. u32 offset, u32 val)
  207. {
  208. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  209. iowrite32(val, addr);
  210. }
  211. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  212. u32 offset, u32 val)
  213. {
  214. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  215. iowrite32(val, addr);
  216. }
  217. static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
  218. void __iomem *cmd, void __iomem *cmd_done,
  219. u32 wr_addr, u32 wr_data)
  220. {
  221. u32 done;
  222. u8 wait = 10;
  223. iowrite32(wr_addr, addr);
  224. iowrite32(wr_data, wr);
  225. iowrite32(XGENE_ENET_WR_CMD, cmd);
  226. /* wait for write command to complete */
  227. while (!(done = ioread32(cmd_done)) && wait--)
  228. udelay(1);
  229. if (!done)
  230. return false;
  231. iowrite32(0, cmd);
  232. return true;
  233. }
  234. static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
  235. u32 wr_addr, u32 wr_data)
  236. {
  237. void __iomem *addr, *wr, *cmd, *cmd_done;
  238. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  239. wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
  240. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  241. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  242. if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
  243. netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
  244. wr_addr);
  245. }
  246. static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
  247. u32 offset, u32 *val)
  248. {
  249. void __iomem *addr = pdata->eth_csr_addr + offset;
  250. *val = ioread32(addr);
  251. }
  252. static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
  253. u32 offset, u32 *val)
  254. {
  255. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  256. *val = ioread32(addr);
  257. }
  258. static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
  259. u32 offset, u32 *val)
  260. {
  261. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  262. *val = ioread32(addr);
  263. }
  264. static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
  265. void __iomem *cmd, void __iomem *cmd_done,
  266. u32 rd_addr, u32 *rd_data)
  267. {
  268. u32 done;
  269. u8 wait = 10;
  270. iowrite32(rd_addr, addr);
  271. iowrite32(XGENE_ENET_RD_CMD, cmd);
  272. /* wait for read command to complete */
  273. while (!(done = ioread32(cmd_done)) && wait--)
  274. udelay(1);
  275. if (!done)
  276. return false;
  277. *rd_data = ioread32(rd);
  278. iowrite32(0, cmd);
  279. return true;
  280. }
  281. static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
  282. u32 rd_addr, u32 *rd_data)
  283. {
  284. void __iomem *addr, *rd, *cmd, *cmd_done;
  285. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  286. rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
  287. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  288. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  289. if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
  290. netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
  291. rd_addr);
  292. }
  293. static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
  294. u32 reg, u16 data)
  295. {
  296. u32 addr = 0, wr_data = 0;
  297. u32 done;
  298. u8 wait = 10;
  299. PHY_ADDR_SET(&addr, phy_id);
  300. REG_ADDR_SET(&addr, reg);
  301. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
  302. PHY_CONTROL_SET(&wr_data, data);
  303. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
  304. do {
  305. usleep_range(5, 10);
  306. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
  307. } while ((done & BUSY_MASK) && wait--);
  308. if (done & BUSY_MASK) {
  309. netdev_err(pdata->ndev, "MII_MGMT write failed\n");
  310. return -EBUSY;
  311. }
  312. return 0;
  313. }
  314. static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
  315. u8 phy_id, u32 reg)
  316. {
  317. u32 addr = 0;
  318. u32 data, done;
  319. u8 wait = 10;
  320. PHY_ADDR_SET(&addr, phy_id);
  321. REG_ADDR_SET(&addr, reg);
  322. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
  323. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  324. do {
  325. usleep_range(5, 10);
  326. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
  327. } while ((done & BUSY_MASK) && wait--);
  328. if (done & BUSY_MASK) {
  329. netdev_err(pdata->ndev, "MII_MGMT read failed\n");
  330. return -EBUSY;
  331. }
  332. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
  333. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
  334. return data;
  335. }
  336. static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
  337. {
  338. u32 addr0, addr1;
  339. u8 *dev_addr = pdata->ndev->dev_addr;
  340. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  341. (dev_addr[1] << 8) | dev_addr[0];
  342. addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
  343. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
  344. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
  345. }
  346. static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
  347. {
  348. struct net_device *ndev = pdata->ndev;
  349. u32 data;
  350. u8 wait = 10;
  351. xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
  352. do {
  353. usleep_range(100, 110);
  354. xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
  355. } while ((data != 0xffffffff) && wait--);
  356. if (data != 0xffffffff) {
  357. netdev_err(ndev, "Failed to release memory from shutdown\n");
  358. return -ENODEV;
  359. }
  360. return 0;
  361. }
  362. static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
  363. {
  364. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  365. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
  366. }
  367. static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
  368. {
  369. u32 value, mc2;
  370. u32 intf_ctl, rgmii;
  371. u32 icm0, icm2;
  372. xgene_gmac_reset(pdata);
  373. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
  374. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
  375. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
  376. xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
  377. xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
  378. switch (pdata->phy_speed) {
  379. case SPEED_10:
  380. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  381. CFG_MACMODE_SET(&icm0, 0);
  382. CFG_WAITASYNCRD_SET(&icm2, 500);
  383. rgmii &= ~CFG_SPEED_1250;
  384. break;
  385. case SPEED_100:
  386. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  387. intf_ctl |= ENET_LHD_MODE;
  388. CFG_MACMODE_SET(&icm0, 1);
  389. CFG_WAITASYNCRD_SET(&icm2, 80);
  390. rgmii &= ~CFG_SPEED_1250;
  391. break;
  392. default:
  393. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  394. intf_ctl |= ENET_GHD_MODE;
  395. CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
  396. xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
  397. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  398. xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
  399. break;
  400. }
  401. mc2 |= FULL_DUPLEX2;
  402. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
  403. xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
  404. xgene_gmac_set_mac_addr(pdata);
  405. /* Adjust MDC clock frequency */
  406. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
  407. MGMT_CLOCK_SEL_SET(&value, 7);
  408. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
  409. /* Enable drop if bufpool not available */
  410. xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
  411. value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  412. xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
  413. /* Rtype should be copied from FP */
  414. xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
  415. xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
  416. /* Rx-Tx traffic resume */
  417. xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
  418. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
  419. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
  420. xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
  421. value &= ~TX_DV_GATE_EN0;
  422. value &= ~RX_DV_GATE_EN0;
  423. value |= RESUME_RX0;
  424. xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
  425. xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
  426. }
  427. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
  428. {
  429. u32 val = 0xffffffff;
  430. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
  431. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  432. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
  433. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
  434. }
  435. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
  436. u32 dst_ring_num, u16 bufpool_id)
  437. {
  438. u32 cb;
  439. u32 fpsel;
  440. fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
  441. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
  442. cb |= CFG_CLE_BYPASS_EN0;
  443. CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
  444. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
  445. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
  446. CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
  447. CFG_CLE_FPSEL0_SET(&cb, fpsel);
  448. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
  449. }
  450. static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
  451. {
  452. u32 data;
  453. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  454. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
  455. }
  456. static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
  457. {
  458. u32 data;
  459. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  460. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
  461. }
  462. static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
  463. {
  464. u32 data;
  465. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  466. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
  467. }
  468. static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
  469. {
  470. u32 data;
  471. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  472. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
  473. }
  474. bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
  475. {
  476. if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
  477. return false;
  478. if (ioread32(p->ring_csr_addr + SRST_ADDR))
  479. return false;
  480. return true;
  481. }
  482. static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
  483. {
  484. u32 val;
  485. if (!xgene_ring_mgr_init(pdata))
  486. return -ENODEV;
  487. if (!IS_ERR(pdata->clk)) {
  488. clk_prepare_enable(pdata->clk);
  489. clk_disable_unprepare(pdata->clk);
  490. clk_prepare_enable(pdata->clk);
  491. xgene_enet_ecc_init(pdata);
  492. }
  493. xgene_enet_config_ring_if_assoc(pdata);
  494. /* Enable auto-incr for scanning */
  495. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
  496. val |= SCAN_AUTO_INCR;
  497. MGMT_CLOCK_SEL_SET(&val, 1);
  498. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
  499. return 0;
  500. }
  501. static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
  502. {
  503. if (!IS_ERR(pdata->clk))
  504. clk_disable_unprepare(pdata->clk);
  505. }
  506. static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  507. {
  508. struct xgene_enet_pdata *pdata = bus->priv;
  509. u32 val;
  510. val = xgene_mii_phy_read(pdata, mii_id, regnum);
  511. netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
  512. mii_id, regnum, val);
  513. return val;
  514. }
  515. static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  516. u16 val)
  517. {
  518. struct xgene_enet_pdata *pdata = bus->priv;
  519. netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
  520. mii_id, regnum, val);
  521. return xgene_mii_phy_write(pdata, mii_id, regnum, val);
  522. }
  523. static void xgene_enet_adjust_link(struct net_device *ndev)
  524. {
  525. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  526. struct phy_device *phydev = pdata->phy_dev;
  527. if (phydev->link) {
  528. if (pdata->phy_speed != phydev->speed) {
  529. pdata->phy_speed = phydev->speed;
  530. xgene_gmac_init(pdata);
  531. xgene_gmac_rx_enable(pdata);
  532. xgene_gmac_tx_enable(pdata);
  533. phy_print_status(phydev);
  534. }
  535. } else {
  536. xgene_gmac_rx_disable(pdata);
  537. xgene_gmac_tx_disable(pdata);
  538. pdata->phy_speed = SPEED_UNKNOWN;
  539. phy_print_status(phydev);
  540. }
  541. }
  542. static int xgene_enet_phy_connect(struct net_device *ndev)
  543. {
  544. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  545. struct device_node *phy_np;
  546. struct phy_device *phy_dev;
  547. struct device *dev = &pdata->pdev->dev;
  548. if (dev->of_node) {
  549. phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
  550. if (!phy_np) {
  551. netdev_dbg(ndev, "No phy-handle found in DT\n");
  552. return -ENODEV;
  553. }
  554. phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
  555. 0, pdata->phy_mode);
  556. if (!phy_dev) {
  557. netdev_err(ndev, "Could not connect to PHY\n");
  558. return -ENODEV;
  559. }
  560. pdata->phy_dev = phy_dev;
  561. } else {
  562. phy_dev = pdata->phy_dev;
  563. if (!phy_dev ||
  564. phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
  565. pdata->phy_mode)) {
  566. netdev_err(ndev, "Could not connect to PHY\n");
  567. return -ENODEV;
  568. }
  569. }
  570. pdata->phy_speed = SPEED_UNKNOWN;
  571. phy_dev->supported &= ~SUPPORTED_10baseT_Half &
  572. ~SUPPORTED_100baseT_Half &
  573. ~SUPPORTED_1000baseT_Half;
  574. phy_dev->advertising = phy_dev->supported;
  575. return 0;
  576. }
  577. static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
  578. struct mii_bus *mdio)
  579. {
  580. struct device *dev = &pdata->pdev->dev;
  581. struct net_device *ndev = pdata->ndev;
  582. struct phy_device *phy;
  583. struct device_node *child_np;
  584. struct device_node *mdio_np = NULL;
  585. int ret;
  586. u32 phy_id;
  587. if (dev->of_node) {
  588. for_each_child_of_node(dev->of_node, child_np) {
  589. if (of_device_is_compatible(child_np,
  590. "apm,xgene-mdio")) {
  591. mdio_np = child_np;
  592. break;
  593. }
  594. }
  595. if (!mdio_np) {
  596. netdev_dbg(ndev, "No mdio node in the dts\n");
  597. return -ENXIO;
  598. }
  599. return of_mdiobus_register(mdio, mdio_np);
  600. }
  601. /* Mask out all PHYs from auto probing. */
  602. mdio->phy_mask = ~0;
  603. /* Register the MDIO bus */
  604. ret = mdiobus_register(mdio);
  605. if (ret)
  606. return ret;
  607. ret = device_property_read_u32(dev, "phy-channel", &phy_id);
  608. if (ret)
  609. ret = device_property_read_u32(dev, "phy-addr", &phy_id);
  610. if (ret)
  611. return -EINVAL;
  612. phy = get_phy_device(mdio, phy_id, false);
  613. if (!phy || IS_ERR(phy))
  614. return -EIO;
  615. ret = phy_device_register(phy);
  616. if (ret)
  617. phy_device_free(phy);
  618. else
  619. pdata->phy_dev = phy;
  620. return ret;
  621. }
  622. int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
  623. {
  624. struct net_device *ndev = pdata->ndev;
  625. struct mii_bus *mdio_bus;
  626. int ret;
  627. mdio_bus = mdiobus_alloc();
  628. if (!mdio_bus)
  629. return -ENOMEM;
  630. mdio_bus->name = "APM X-Gene MDIO bus";
  631. mdio_bus->read = xgene_enet_mdio_read;
  632. mdio_bus->write = xgene_enet_mdio_write;
  633. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
  634. ndev->name);
  635. mdio_bus->priv = pdata;
  636. mdio_bus->parent = &ndev->dev;
  637. ret = xgene_mdiobus_register(pdata, mdio_bus);
  638. if (ret) {
  639. netdev_err(ndev, "Failed to register MDIO bus\n");
  640. mdiobus_free(mdio_bus);
  641. return ret;
  642. }
  643. pdata->mdio_bus = mdio_bus;
  644. ret = xgene_enet_phy_connect(ndev);
  645. if (ret)
  646. xgene_enet_mdio_remove(pdata);
  647. return ret;
  648. }
  649. void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
  650. {
  651. if (pdata->phy_dev)
  652. phy_disconnect(pdata->phy_dev);
  653. mdiobus_unregister(pdata->mdio_bus);
  654. mdiobus_free(pdata->mdio_bus);
  655. pdata->mdio_bus = NULL;
  656. }
  657. struct xgene_mac_ops xgene_gmac_ops = {
  658. .init = xgene_gmac_init,
  659. .reset = xgene_gmac_reset,
  660. .rx_enable = xgene_gmac_rx_enable,
  661. .tx_enable = xgene_gmac_tx_enable,
  662. .rx_disable = xgene_gmac_rx_disable,
  663. .tx_disable = xgene_gmac_tx_disable,
  664. .set_mac_addr = xgene_gmac_set_mac_addr,
  665. };
  666. struct xgene_port_ops xgene_gport_ops = {
  667. .reset = xgene_enet_reset,
  668. .cle_bypass = xgene_enet_cle_bypass,
  669. .shutdown = xgene_gport_shutdown,
  670. };
  671. struct xgene_ring_ops xgene_ring1_ops = {
  672. .num_ring_config = NUM_RING_CONFIG,
  673. .num_ring_id_shift = 6,
  674. .setup = xgene_enet_setup_ring,
  675. .clear = xgene_enet_clear_ring,
  676. .wr_cmd = xgene_enet_wr_cmd,
  677. .len = xgene_enet_ring_len,
  678. };