xgene_enet_hw.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Ravi Patel <rapatel@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. #include "xgene_enet_hw.h"
  23. static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
  24. {
  25. u32 *ring_cfg = ring->state;
  26. u64 addr = ring->dma;
  27. enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
  28. ring_cfg[4] |= (1 << SELTHRSH_POS) &
  29. CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
  30. ring_cfg[3] |= ACCEPTLERR;
  31. ring_cfg[2] |= QCOHERENT;
  32. addr >>= 8;
  33. ring_cfg[2] |= (addr << RINGADDRL_POS) &
  34. CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
  35. addr >>= RINGADDRL_LEN;
  36. ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
  37. ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
  38. CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
  39. }
  40. static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
  41. {
  42. u32 *ring_cfg = ring->state;
  43. bool is_bufpool;
  44. u32 val;
  45. is_bufpool = xgene_enet_is_bufpool(ring->id);
  46. val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
  47. ring_cfg[4] |= (val << RINGTYPE_POS) &
  48. CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
  49. if (is_bufpool) {
  50. ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
  51. CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
  52. }
  53. }
  54. static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
  55. {
  56. u32 *ring_cfg = ring->state;
  57. ring_cfg[3] |= RECOMBBUF;
  58. ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
  59. CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
  60. ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
  61. }
  62. static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
  63. u32 offset, u32 data)
  64. {
  65. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  66. iowrite32(data, pdata->ring_csr_addr + offset);
  67. }
  68. static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
  69. u32 offset, u32 *data)
  70. {
  71. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  72. *data = ioread32(pdata->ring_csr_addr + offset);
  73. }
  74. static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
  75. {
  76. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  77. int i;
  78. xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
  79. for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
  80. xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
  81. ring->state[i]);
  82. }
  83. }
  84. static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
  85. {
  86. memset(ring->state, 0, sizeof(ring->state));
  87. xgene_enet_write_ring_state(ring);
  88. }
  89. static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
  90. {
  91. xgene_enet_ring_set_type(ring);
  92. if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
  93. xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
  94. xgene_enet_ring_set_recombbuf(ring);
  95. xgene_enet_ring_init(ring);
  96. xgene_enet_write_ring_state(ring);
  97. }
  98. static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
  99. {
  100. u32 ring_id_val, ring_id_buf;
  101. bool is_bufpool;
  102. is_bufpool = xgene_enet_is_bufpool(ring->id);
  103. ring_id_val = ring->id & GENMASK(9, 0);
  104. ring_id_val |= OVERWRITE;
  105. ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
  106. ring_id_buf |= PREFETCH_BUF_EN;
  107. if (is_bufpool)
  108. ring_id_buf |= IS_BUFFER_POOL;
  109. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
  110. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
  111. }
  112. static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
  113. {
  114. u32 ring_id;
  115. ring_id = ring->id | OVERWRITE;
  116. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
  117. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
  118. }
  119. static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
  120. struct xgene_enet_desc_ring *ring)
  121. {
  122. u32 size = ring->size;
  123. u32 i, data;
  124. bool is_bufpool;
  125. xgene_enet_clr_ring_state(ring);
  126. xgene_enet_set_ring_state(ring);
  127. xgene_enet_set_ring_id(ring);
  128. ring->slots = xgene_enet_get_numslots(ring->id, size);
  129. is_bufpool = xgene_enet_is_bufpool(ring->id);
  130. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  131. return ring;
  132. for (i = 0; i < ring->slots; i++)
  133. xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
  134. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  135. data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
  136. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  137. return ring;
  138. }
  139. static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
  140. {
  141. u32 data;
  142. bool is_bufpool;
  143. is_bufpool = xgene_enet_is_bufpool(ring->id);
  144. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  145. goto out;
  146. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  147. data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
  148. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  149. out:
  150. xgene_enet_clr_desc_ring_id(ring);
  151. xgene_enet_clr_ring_state(ring);
  152. }
  153. static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
  154. {
  155. iowrite32(count, ring->cmd);
  156. }
  157. static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
  158. {
  159. u32 __iomem *cmd_base = ring->cmd_base;
  160. u32 ring_state, num_msgs;
  161. ring_state = ioread32(&cmd_base[1]);
  162. num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
  163. return num_msgs;
  164. }
  165. void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
  166. struct xgene_enet_pdata *pdata,
  167. enum xgene_enet_err_code status)
  168. {
  169. switch (status) {
  170. case INGRESS_CRC:
  171. ring->rx_crc_errors++;
  172. ring->rx_dropped++;
  173. break;
  174. case INGRESS_CHECKSUM:
  175. case INGRESS_CHECKSUM_COMPUTE:
  176. ring->rx_errors++;
  177. ring->rx_dropped++;
  178. break;
  179. case INGRESS_TRUNC_FRAME:
  180. ring->rx_frame_errors++;
  181. ring->rx_dropped++;
  182. break;
  183. case INGRESS_PKT_LEN:
  184. ring->rx_length_errors++;
  185. ring->rx_dropped++;
  186. break;
  187. case INGRESS_PKT_UNDER:
  188. ring->rx_frame_errors++;
  189. ring->rx_dropped++;
  190. break;
  191. case INGRESS_FIFO_OVERRUN:
  192. ring->rx_fifo_errors++;
  193. break;
  194. default:
  195. break;
  196. }
  197. }
  198. static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
  199. u32 offset, u32 val)
  200. {
  201. void __iomem *addr = pdata->eth_csr_addr + offset;
  202. iowrite32(val, addr);
  203. }
  204. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
  205. u32 offset, u32 val)
  206. {
  207. void __iomem *addr = pdata->eth_ring_if_addr + offset;
  208. iowrite32(val, addr);
  209. }
  210. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
  211. u32 offset, u32 val)
  212. {
  213. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  214. iowrite32(val, addr);
  215. }
  216. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  217. u32 offset, u32 val)
  218. {
  219. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  220. iowrite32(val, addr);
  221. }
  222. static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
  223. void __iomem *cmd, void __iomem *cmd_done,
  224. u32 wr_addr, u32 wr_data)
  225. {
  226. u32 done;
  227. u8 wait = 10;
  228. iowrite32(wr_addr, addr);
  229. iowrite32(wr_data, wr);
  230. iowrite32(XGENE_ENET_WR_CMD, cmd);
  231. /* wait for write command to complete */
  232. while (!(done = ioread32(cmd_done)) && wait--)
  233. udelay(1);
  234. if (!done)
  235. return false;
  236. iowrite32(0, cmd);
  237. return true;
  238. }
  239. static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
  240. u32 wr_addr, u32 wr_data)
  241. {
  242. void __iomem *addr, *wr, *cmd, *cmd_done;
  243. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  244. wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
  245. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  246. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  247. if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
  248. netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
  249. wr_addr);
  250. }
  251. static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
  252. u32 offset, u32 *val)
  253. {
  254. void __iomem *addr = pdata->eth_csr_addr + offset;
  255. *val = ioread32(addr);
  256. }
  257. static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
  258. u32 offset, u32 *val)
  259. {
  260. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  261. *val = ioread32(addr);
  262. }
  263. static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
  264. u32 offset, u32 *val)
  265. {
  266. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  267. *val = ioread32(addr);
  268. }
  269. static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
  270. void __iomem *cmd, void __iomem *cmd_done,
  271. u32 rd_addr, u32 *rd_data)
  272. {
  273. u32 done;
  274. u8 wait = 10;
  275. iowrite32(rd_addr, addr);
  276. iowrite32(XGENE_ENET_RD_CMD, cmd);
  277. /* wait for read command to complete */
  278. while (!(done = ioread32(cmd_done)) && wait--)
  279. udelay(1);
  280. if (!done)
  281. return false;
  282. *rd_data = ioread32(rd);
  283. iowrite32(0, cmd);
  284. return true;
  285. }
  286. static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
  287. u32 rd_addr, u32 *rd_data)
  288. {
  289. void __iomem *addr, *rd, *cmd, *cmd_done;
  290. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  291. rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
  292. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  293. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  294. if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
  295. netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
  296. rd_addr);
  297. }
  298. static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
  299. {
  300. u32 addr0, addr1;
  301. u8 *dev_addr = pdata->ndev->dev_addr;
  302. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  303. (dev_addr[1] << 8) | dev_addr[0];
  304. addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
  305. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
  306. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
  307. }
  308. static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
  309. {
  310. struct net_device *ndev = pdata->ndev;
  311. u32 data;
  312. u8 wait = 10;
  313. xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
  314. do {
  315. usleep_range(100, 110);
  316. xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
  317. } while ((data != 0xffffffff) && wait--);
  318. if (data != 0xffffffff) {
  319. netdev_err(ndev, "Failed to release memory from shutdown\n");
  320. return -ENODEV;
  321. }
  322. return 0;
  323. }
  324. static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
  325. {
  326. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  327. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
  328. }
  329. static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
  330. {
  331. struct device *dev = &pdata->pdev->dev;
  332. if (dev->of_node) {
  333. struct clk *parent = clk_get_parent(pdata->clk);
  334. switch (pdata->phy_speed) {
  335. case SPEED_10:
  336. clk_set_rate(parent, 2500000);
  337. break;
  338. case SPEED_100:
  339. clk_set_rate(parent, 25000000);
  340. break;
  341. default:
  342. clk_set_rate(parent, 125000000);
  343. break;
  344. }
  345. }
  346. #ifdef CONFIG_ACPI
  347. else {
  348. switch (pdata->phy_speed) {
  349. case SPEED_10:
  350. acpi_evaluate_object(ACPI_HANDLE(dev),
  351. "S10", NULL, NULL);
  352. break;
  353. case SPEED_100:
  354. acpi_evaluate_object(ACPI_HANDLE(dev),
  355. "S100", NULL, NULL);
  356. break;
  357. default:
  358. acpi_evaluate_object(ACPI_HANDLE(dev),
  359. "S1G", NULL, NULL);
  360. break;
  361. }
  362. }
  363. #endif
  364. }
  365. static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
  366. {
  367. struct device *dev = &pdata->pdev->dev;
  368. u32 icm0, icm2, mc2;
  369. u32 intf_ctl, rgmii, value;
  370. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
  371. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
  372. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
  373. xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
  374. xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
  375. switch (pdata->phy_speed) {
  376. case SPEED_10:
  377. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  378. intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
  379. CFG_MACMODE_SET(&icm0, 0);
  380. CFG_WAITASYNCRD_SET(&icm2, 500);
  381. rgmii &= ~CFG_SPEED_1250;
  382. break;
  383. case SPEED_100:
  384. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  385. intf_ctl &= ~ENET_GHD_MODE;
  386. intf_ctl |= ENET_LHD_MODE;
  387. CFG_MACMODE_SET(&icm0, 1);
  388. CFG_WAITASYNCRD_SET(&icm2, 80);
  389. rgmii &= ~CFG_SPEED_1250;
  390. break;
  391. default:
  392. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  393. intf_ctl &= ~ENET_LHD_MODE;
  394. intf_ctl |= ENET_GHD_MODE;
  395. CFG_MACMODE_SET(&icm0, 2);
  396. CFG_WAITASYNCRD_SET(&icm2, 0);
  397. if (dev->of_node) {
  398. CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
  399. CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
  400. }
  401. rgmii |= CFG_SPEED_1250;
  402. xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
  403. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  404. xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
  405. break;
  406. }
  407. mc2 |= FULL_DUPLEX2 | PAD_CRC;
  408. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
  409. xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
  410. xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
  411. xgene_enet_configure_clock(pdata);
  412. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
  413. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
  414. }
  415. static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
  416. {
  417. xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
  418. }
  419. static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
  420. bool enable)
  421. {
  422. u32 data;
  423. xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
  424. if (enable)
  425. data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
  426. else
  427. data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
  428. xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
  429. }
  430. static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
  431. {
  432. u32 data;
  433. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  434. if (enable)
  435. data |= TX_FLOW_EN;
  436. else
  437. data &= ~TX_FLOW_EN;
  438. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
  439. pdata->mac_ops->enable_tx_pause(pdata, enable);
  440. }
  441. static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
  442. {
  443. u32 data;
  444. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  445. if (enable)
  446. data |= RX_FLOW_EN;
  447. else
  448. data &= ~RX_FLOW_EN;
  449. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
  450. }
  451. static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
  452. {
  453. u32 value;
  454. if (!pdata->mdio_driver)
  455. xgene_gmac_reset(pdata);
  456. xgene_gmac_set_speed(pdata);
  457. xgene_gmac_set_mac_addr(pdata);
  458. /* Adjust MDC clock frequency */
  459. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
  460. MGMT_CLOCK_SEL_SET(&value, 7);
  461. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
  462. /* Enable drop if bufpool not available */
  463. xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
  464. value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  465. xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
  466. /* Rtype should be copied from FP */
  467. xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
  468. /* Configure HW pause frame generation */
  469. xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
  470. value = (DEF_QUANTA << 16) | (value & 0xFFFF);
  471. xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
  472. xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
  473. xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
  474. xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
  475. xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
  476. /* Rx-Tx traffic resume */
  477. xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
  478. xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
  479. value &= ~TX_DV_GATE_EN0;
  480. value &= ~RX_DV_GATE_EN0;
  481. value |= RESUME_RX0;
  482. xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
  483. xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
  484. }
  485. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
  486. {
  487. u32 val = 0xffffffff;
  488. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
  489. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  490. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
  491. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
  492. }
  493. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
  494. u32 dst_ring_num, u16 bufpool_id,
  495. u16 nxtbufpool_id)
  496. {
  497. u32 cb;
  498. u32 fpsel, nxtfpsel;
  499. fpsel = xgene_enet_get_fpsel(bufpool_id);
  500. nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
  501. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
  502. cb |= CFG_CLE_BYPASS_EN0;
  503. CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
  504. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
  505. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
  506. CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
  507. CFG_CLE_FPSEL0_SET(&cb, fpsel);
  508. CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
  509. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
  510. }
  511. static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
  512. {
  513. u32 data;
  514. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  515. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
  516. }
  517. static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
  518. {
  519. u32 data;
  520. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  521. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
  522. }
  523. static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
  524. {
  525. u32 data;
  526. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  527. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
  528. }
  529. static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
  530. {
  531. u32 data;
  532. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  533. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
  534. }
  535. bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
  536. {
  537. if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
  538. return false;
  539. if (ioread32(p->ring_csr_addr + SRST_ADDR))
  540. return false;
  541. return true;
  542. }
  543. static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
  544. {
  545. struct device *dev = &pdata->pdev->dev;
  546. if (!xgene_ring_mgr_init(pdata))
  547. return -ENODEV;
  548. if (pdata->mdio_driver) {
  549. xgene_enet_config_ring_if_assoc(pdata);
  550. return 0;
  551. }
  552. if (dev->of_node) {
  553. clk_prepare_enable(pdata->clk);
  554. udelay(5);
  555. clk_disable_unprepare(pdata->clk);
  556. udelay(5);
  557. clk_prepare_enable(pdata->clk);
  558. udelay(5);
  559. } else {
  560. #ifdef CONFIG_ACPI
  561. if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
  562. acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
  563. "_RST", NULL, NULL);
  564. } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
  565. "_INI")) {
  566. acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
  567. "_INI", NULL, NULL);
  568. }
  569. #endif
  570. }
  571. xgene_enet_ecc_init(pdata);
  572. xgene_enet_config_ring_if_assoc(pdata);
  573. return 0;
  574. }
  575. static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
  576. struct xgene_enet_desc_ring *ring)
  577. {
  578. u32 addr, data;
  579. if (xgene_enet_is_bufpool(ring->id)) {
  580. addr = ENET_CFGSSQMIFPRESET_ADDR;
  581. data = BIT(xgene_enet_get_fpsel(ring->id));
  582. } else {
  583. addr = ENET_CFGSSQMIWQRESET_ADDR;
  584. data = BIT(xgene_enet_ring_bufnum(ring->id));
  585. }
  586. xgene_enet_wr_ring_if(pdata, addr, data);
  587. }
  588. static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
  589. {
  590. struct device *dev = &pdata->pdev->dev;
  591. struct xgene_enet_desc_ring *ring;
  592. u32 pb;
  593. int i;
  594. pb = 0;
  595. for (i = 0; i < pdata->rxq_cnt; i++) {
  596. ring = pdata->rx_ring[i]->buf_pool;
  597. pb |= BIT(xgene_enet_get_fpsel(ring->id));
  598. ring = pdata->rx_ring[i]->page_pool;
  599. if (ring)
  600. pb |= BIT(xgene_enet_get_fpsel(ring->id));
  601. }
  602. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
  603. pb = 0;
  604. for (i = 0; i < pdata->txq_cnt; i++) {
  605. ring = pdata->tx_ring[i];
  606. pb |= BIT(xgene_enet_ring_bufnum(ring->id));
  607. }
  608. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
  609. if (dev->of_node) {
  610. if (!IS_ERR(pdata->clk))
  611. clk_disable_unprepare(pdata->clk);
  612. }
  613. }
  614. static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
  615. {
  616. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  617. struct phy_device *phydev = ndev->phydev;
  618. u16 lcladv, rmtadv = 0;
  619. u32 rx_pause, tx_pause;
  620. u8 flowctl = 0;
  621. if (!phydev->duplex || !pdata->pause_autoneg)
  622. return 0;
  623. if (pdata->tx_pause)
  624. flowctl |= FLOW_CTRL_TX;
  625. if (pdata->rx_pause)
  626. flowctl |= FLOW_CTRL_RX;
  627. lcladv = mii_advertise_flowctrl(flowctl);
  628. if (phydev->pause)
  629. rmtadv = LPA_PAUSE_CAP;
  630. if (phydev->asym_pause)
  631. rmtadv |= LPA_PAUSE_ASYM;
  632. flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
  633. tx_pause = !!(flowctl & FLOW_CTRL_TX);
  634. rx_pause = !!(flowctl & FLOW_CTRL_RX);
  635. if (tx_pause != pdata->tx_pause) {
  636. pdata->tx_pause = tx_pause;
  637. pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
  638. }
  639. if (rx_pause != pdata->rx_pause) {
  640. pdata->rx_pause = rx_pause;
  641. pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
  642. }
  643. return 0;
  644. }
  645. static void xgene_enet_adjust_link(struct net_device *ndev)
  646. {
  647. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  648. const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
  649. struct phy_device *phydev = ndev->phydev;
  650. if (phydev->link) {
  651. if (pdata->phy_speed != phydev->speed) {
  652. pdata->phy_speed = phydev->speed;
  653. mac_ops->set_speed(pdata);
  654. mac_ops->rx_enable(pdata);
  655. mac_ops->tx_enable(pdata);
  656. phy_print_status(phydev);
  657. }
  658. xgene_enet_flowctrl_cfg(ndev);
  659. } else {
  660. mac_ops->rx_disable(pdata);
  661. mac_ops->tx_disable(pdata);
  662. pdata->phy_speed = SPEED_UNKNOWN;
  663. phy_print_status(phydev);
  664. }
  665. }
  666. #ifdef CONFIG_ACPI
  667. static struct acpi_device *acpi_phy_find_device(struct device *dev)
  668. {
  669. struct acpi_reference_args args;
  670. struct fwnode_handle *fw_node;
  671. int status;
  672. fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
  673. status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
  674. &args);
  675. if (ACPI_FAILURE(status)) {
  676. dev_dbg(dev, "No matching phy in ACPI table\n");
  677. return NULL;
  678. }
  679. return args.adev;
  680. }
  681. #endif
  682. int xgene_enet_phy_connect(struct net_device *ndev)
  683. {
  684. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  685. struct device_node *np;
  686. struct phy_device *phy_dev;
  687. struct device *dev = &pdata->pdev->dev;
  688. int i;
  689. if (dev->of_node) {
  690. for (i = 0 ; i < 2; i++) {
  691. np = of_parse_phandle(dev->of_node, "phy-handle", i);
  692. phy_dev = of_phy_connect(ndev, np,
  693. &xgene_enet_adjust_link,
  694. 0, pdata->phy_mode);
  695. of_node_put(np);
  696. if (phy_dev)
  697. break;
  698. }
  699. if (!phy_dev) {
  700. netdev_err(ndev, "Could not connect to PHY\n");
  701. return -ENODEV;
  702. }
  703. } else {
  704. #ifdef CONFIG_ACPI
  705. struct acpi_device *adev = acpi_phy_find_device(dev);
  706. if (adev)
  707. phy_dev = adev->driver_data;
  708. else
  709. phy_dev = NULL;
  710. if (!phy_dev ||
  711. phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
  712. pdata->phy_mode)) {
  713. netdev_err(ndev, "Could not connect to PHY\n");
  714. return -ENODEV;
  715. }
  716. #else
  717. return -ENODEV;
  718. #endif
  719. }
  720. pdata->phy_speed = SPEED_UNKNOWN;
  721. phy_dev->supported &= ~SUPPORTED_10baseT_Half &
  722. ~SUPPORTED_100baseT_Half &
  723. ~SUPPORTED_1000baseT_Half;
  724. phy_dev->supported |= SUPPORTED_Pause |
  725. SUPPORTED_Asym_Pause;
  726. phy_dev->advertising = phy_dev->supported;
  727. return 0;
  728. }
  729. static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
  730. struct mii_bus *mdio)
  731. {
  732. struct device *dev = &pdata->pdev->dev;
  733. struct net_device *ndev = pdata->ndev;
  734. struct phy_device *phy;
  735. struct device_node *child_np;
  736. struct device_node *mdio_np = NULL;
  737. u32 phy_addr;
  738. int ret;
  739. if (dev->of_node) {
  740. for_each_child_of_node(dev->of_node, child_np) {
  741. if (of_device_is_compatible(child_np,
  742. "apm,xgene-mdio")) {
  743. mdio_np = child_np;
  744. break;
  745. }
  746. }
  747. if (!mdio_np) {
  748. netdev_dbg(ndev, "No mdio node in the dts\n");
  749. return -ENXIO;
  750. }
  751. return of_mdiobus_register(mdio, mdio_np);
  752. }
  753. /* Mask out all PHYs from auto probing. */
  754. mdio->phy_mask = ~0;
  755. /* Register the MDIO bus */
  756. ret = mdiobus_register(mdio);
  757. if (ret)
  758. return ret;
  759. ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
  760. if (ret)
  761. ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
  762. if (ret)
  763. return -EINVAL;
  764. phy = xgene_enet_phy_register(mdio, phy_addr);
  765. if (!phy)
  766. return -EIO;
  767. return ret;
  768. }
  769. int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
  770. {
  771. struct net_device *ndev = pdata->ndev;
  772. struct mii_bus *mdio_bus;
  773. int ret;
  774. mdio_bus = mdiobus_alloc();
  775. if (!mdio_bus)
  776. return -ENOMEM;
  777. mdio_bus->name = "APM X-Gene MDIO bus";
  778. mdio_bus->read = xgene_mdio_rgmii_read;
  779. mdio_bus->write = xgene_mdio_rgmii_write;
  780. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
  781. ndev->name);
  782. mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
  783. mdio_bus->parent = &pdata->pdev->dev;
  784. ret = xgene_mdiobus_register(pdata, mdio_bus);
  785. if (ret) {
  786. netdev_err(ndev, "Failed to register MDIO bus\n");
  787. mdiobus_free(mdio_bus);
  788. return ret;
  789. }
  790. pdata->mdio_bus = mdio_bus;
  791. ret = xgene_enet_phy_connect(ndev);
  792. if (ret)
  793. xgene_enet_mdio_remove(pdata);
  794. return ret;
  795. }
  796. void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
  797. {
  798. struct net_device *ndev = pdata->ndev;
  799. if (ndev->phydev)
  800. phy_disconnect(ndev->phydev);
  801. }
  802. void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
  803. {
  804. struct net_device *ndev = pdata->ndev;
  805. if (ndev->phydev)
  806. phy_disconnect(ndev->phydev);
  807. mdiobus_unregister(pdata->mdio_bus);
  808. mdiobus_free(pdata->mdio_bus);
  809. pdata->mdio_bus = NULL;
  810. }
  811. const struct xgene_mac_ops xgene_gmac_ops = {
  812. .init = xgene_gmac_init,
  813. .reset = xgene_gmac_reset,
  814. .rx_enable = xgene_gmac_rx_enable,
  815. .tx_enable = xgene_gmac_tx_enable,
  816. .rx_disable = xgene_gmac_rx_disable,
  817. .tx_disable = xgene_gmac_tx_disable,
  818. .set_speed = xgene_gmac_set_speed,
  819. .set_mac_addr = xgene_gmac_set_mac_addr,
  820. .set_framesize = xgene_enet_set_frame_size,
  821. .enable_tx_pause = xgene_gmac_enable_tx_pause,
  822. .flowctl_tx = xgene_gmac_flowctl_tx,
  823. .flowctl_rx = xgene_gmac_flowctl_rx,
  824. };
  825. const struct xgene_port_ops xgene_gport_ops = {
  826. .reset = xgene_enet_reset,
  827. .clear = xgene_enet_clear,
  828. .cle_bypass = xgene_enet_cle_bypass,
  829. .shutdown = xgene_gport_shutdown,
  830. };
  831. struct xgene_ring_ops xgene_ring1_ops = {
  832. .num_ring_config = NUM_RING_CONFIG,
  833. .num_ring_id_shift = 6,
  834. .setup = xgene_enet_setup_ring,
  835. .clear = xgene_enet_clear_ring,
  836. .wr_cmd = xgene_enet_wr_cmd,
  837. .len = xgene_enet_ring_len,
  838. };