xgene_enet_hw.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Ravi Patel <rapatel@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. #include "xgene_enet_hw.h"
  23. static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
  24. {
  25. u32 *ring_cfg = ring->state;
  26. u64 addr = ring->dma;
  27. enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
  28. ring_cfg[4] |= (1 << SELTHRSH_POS) &
  29. CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
  30. ring_cfg[3] |= ACCEPTLERR;
  31. ring_cfg[2] |= QCOHERENT;
  32. addr >>= 8;
  33. ring_cfg[2] |= (addr << RINGADDRL_POS) &
  34. CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
  35. addr >>= RINGADDRL_LEN;
  36. ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
  37. ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
  38. CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
  39. }
  40. static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
  41. {
  42. u32 *ring_cfg = ring->state;
  43. bool is_bufpool;
  44. u32 val;
  45. is_bufpool = xgene_enet_is_bufpool(ring->id);
  46. val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
  47. ring_cfg[4] |= (val << RINGTYPE_POS) &
  48. CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
  49. if (is_bufpool) {
  50. ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
  51. CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
  52. }
  53. }
  54. static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
  55. {
  56. u32 *ring_cfg = ring->state;
  57. ring_cfg[3] |= RECOMBBUF;
  58. ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
  59. CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
  60. ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
  61. }
  62. static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
  63. u32 offset, u32 data)
  64. {
  65. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  66. iowrite32(data, pdata->ring_csr_addr + offset);
  67. }
  68. static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
  69. u32 offset, u32 *data)
  70. {
  71. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  72. *data = ioread32(pdata->ring_csr_addr + offset);
  73. }
  74. static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
  75. {
  76. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  77. int i;
  78. xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
  79. for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
  80. xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
  81. ring->state[i]);
  82. }
  83. }
  84. static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
  85. {
  86. memset(ring->state, 0, sizeof(ring->state));
  87. xgene_enet_write_ring_state(ring);
  88. }
  89. static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
  90. {
  91. xgene_enet_ring_set_type(ring);
  92. if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
  93. xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
  94. xgene_enet_ring_set_recombbuf(ring);
  95. xgene_enet_ring_init(ring);
  96. xgene_enet_write_ring_state(ring);
  97. }
  98. static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
  99. {
  100. u32 ring_id_val, ring_id_buf;
  101. bool is_bufpool;
  102. is_bufpool = xgene_enet_is_bufpool(ring->id);
  103. ring_id_val = ring->id & GENMASK(9, 0);
  104. ring_id_val |= OVERWRITE;
  105. ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
  106. ring_id_buf |= PREFETCH_BUF_EN;
  107. if (is_bufpool)
  108. ring_id_buf |= IS_BUFFER_POOL;
  109. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
  110. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
  111. }
  112. static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
  113. {
  114. u32 ring_id;
  115. ring_id = ring->id | OVERWRITE;
  116. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
  117. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
  118. }
  119. static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
  120. struct xgene_enet_desc_ring *ring)
  121. {
  122. u32 size = ring->size;
  123. u32 i, data;
  124. bool is_bufpool;
  125. xgene_enet_clr_ring_state(ring);
  126. xgene_enet_set_ring_state(ring);
  127. xgene_enet_set_ring_id(ring);
  128. ring->slots = xgene_enet_get_numslots(ring->id, size);
  129. is_bufpool = xgene_enet_is_bufpool(ring->id);
  130. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  131. return ring;
  132. for (i = 0; i < ring->slots; i++)
  133. xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
  134. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  135. data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
  136. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  137. return ring;
  138. }
  139. static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
  140. {
  141. u32 data;
  142. bool is_bufpool;
  143. is_bufpool = xgene_enet_is_bufpool(ring->id);
  144. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  145. goto out;
  146. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  147. data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
  148. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  149. out:
  150. xgene_enet_clr_desc_ring_id(ring);
  151. xgene_enet_clr_ring_state(ring);
  152. }
  153. static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
  154. {
  155. iowrite32(count, ring->cmd);
  156. }
  157. static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
  158. {
  159. u32 __iomem *cmd_base = ring->cmd_base;
  160. u32 ring_state, num_msgs;
  161. ring_state = ioread32(&cmd_base[1]);
  162. num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
  163. return num_msgs;
  164. }
  165. static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
  166. {
  167. u32 data = 0x7777;
  168. xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
  169. xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
  170. xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
  171. xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
  172. xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
  173. }
  174. void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
  175. struct xgene_enet_pdata *pdata,
  176. enum xgene_enet_err_code status)
  177. {
  178. switch (status) {
  179. case INGRESS_CRC:
  180. ring->rx_crc_errors++;
  181. ring->rx_dropped++;
  182. break;
  183. case INGRESS_CHECKSUM:
  184. case INGRESS_CHECKSUM_COMPUTE:
  185. ring->rx_errors++;
  186. ring->rx_dropped++;
  187. break;
  188. case INGRESS_TRUNC_FRAME:
  189. ring->rx_frame_errors++;
  190. ring->rx_dropped++;
  191. break;
  192. case INGRESS_PKT_LEN:
  193. ring->rx_length_errors++;
  194. ring->rx_dropped++;
  195. break;
  196. case INGRESS_PKT_UNDER:
  197. ring->rx_frame_errors++;
  198. ring->rx_dropped++;
  199. break;
  200. case INGRESS_FIFO_OVERRUN:
  201. ring->rx_fifo_errors++;
  202. break;
  203. default:
  204. break;
  205. }
  206. }
  207. static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
  208. u32 offset, u32 val)
  209. {
  210. void __iomem *addr = pdata->eth_csr_addr + offset;
  211. iowrite32(val, addr);
  212. }
  213. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
  214. u32 offset, u32 val)
  215. {
  216. void __iomem *addr = pdata->eth_ring_if_addr + offset;
  217. iowrite32(val, addr);
  218. }
  219. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
  220. u32 offset, u32 val)
  221. {
  222. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  223. iowrite32(val, addr);
  224. }
  225. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  226. u32 offset, u32 val)
  227. {
  228. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  229. iowrite32(val, addr);
  230. }
  231. static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
  232. void __iomem *cmd, void __iomem *cmd_done,
  233. u32 wr_addr, u32 wr_data)
  234. {
  235. u32 done;
  236. u8 wait = 10;
  237. iowrite32(wr_addr, addr);
  238. iowrite32(wr_data, wr);
  239. iowrite32(XGENE_ENET_WR_CMD, cmd);
  240. /* wait for write command to complete */
  241. while (!(done = ioread32(cmd_done)) && wait--)
  242. udelay(1);
  243. if (!done)
  244. return false;
  245. iowrite32(0, cmd);
  246. return true;
  247. }
  248. static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
  249. u32 wr_addr, u32 wr_data)
  250. {
  251. void __iomem *addr, *wr, *cmd, *cmd_done;
  252. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  253. wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
  254. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  255. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  256. if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
  257. netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
  258. wr_addr);
  259. }
  260. static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
  261. u32 offset, u32 *val)
  262. {
  263. void __iomem *addr = pdata->eth_csr_addr + offset;
  264. *val = ioread32(addr);
  265. }
  266. static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
  267. u32 offset, u32 *val)
  268. {
  269. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  270. *val = ioread32(addr);
  271. }
  272. static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
  273. u32 offset, u32 *val)
  274. {
  275. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  276. *val = ioread32(addr);
  277. }
  278. static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
  279. void __iomem *cmd, void __iomem *cmd_done,
  280. u32 rd_addr, u32 *rd_data)
  281. {
  282. u32 done;
  283. u8 wait = 10;
  284. iowrite32(rd_addr, addr);
  285. iowrite32(XGENE_ENET_RD_CMD, cmd);
  286. /* wait for read command to complete */
  287. while (!(done = ioread32(cmd_done)) && wait--)
  288. udelay(1);
  289. if (!done)
  290. return false;
  291. *rd_data = ioread32(rd);
  292. iowrite32(0, cmd);
  293. return true;
  294. }
  295. static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
  296. u32 rd_addr, u32 *rd_data)
  297. {
  298. void __iomem *addr, *rd, *cmd, *cmd_done;
  299. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  300. rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
  301. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  302. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  303. if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
  304. netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
  305. rd_addr);
  306. }
  307. static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
  308. u32 reg, u16 data)
  309. {
  310. u32 addr = 0, wr_data = 0;
  311. u32 done;
  312. u8 wait = 10;
  313. PHY_ADDR_SET(&addr, phy_id);
  314. REG_ADDR_SET(&addr, reg);
  315. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
  316. PHY_CONTROL_SET(&wr_data, data);
  317. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
  318. do {
  319. usleep_range(5, 10);
  320. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
  321. } while ((done & BUSY_MASK) && wait--);
  322. if (done & BUSY_MASK) {
  323. netdev_err(pdata->ndev, "MII_MGMT write failed\n");
  324. return -EBUSY;
  325. }
  326. return 0;
  327. }
  328. static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
  329. u8 phy_id, u32 reg)
  330. {
  331. u32 addr = 0;
  332. u32 data, done;
  333. u8 wait = 10;
  334. PHY_ADDR_SET(&addr, phy_id);
  335. REG_ADDR_SET(&addr, reg);
  336. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
  337. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
  338. do {
  339. usleep_range(5, 10);
  340. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
  341. } while ((done & BUSY_MASK) && wait--);
  342. if (done & BUSY_MASK) {
  343. netdev_err(pdata->ndev, "MII_MGMT read failed\n");
  344. return -EBUSY;
  345. }
  346. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
  347. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
  348. return data;
  349. }
  350. static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
  351. {
  352. u32 addr0, addr1;
  353. u8 *dev_addr = pdata->ndev->dev_addr;
  354. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  355. (dev_addr[1] << 8) | dev_addr[0];
  356. addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
  357. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
  358. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
  359. }
  360. static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
  361. {
  362. struct net_device *ndev = pdata->ndev;
  363. u32 data;
  364. u8 wait = 10;
  365. xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
  366. do {
  367. usleep_range(100, 110);
  368. xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
  369. } while ((data != 0xffffffff) && wait--);
  370. if (data != 0xffffffff) {
  371. netdev_err(ndev, "Failed to release memory from shutdown\n");
  372. return -ENODEV;
  373. }
  374. return 0;
  375. }
  376. static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
  377. {
  378. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  379. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
  380. }
  381. static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
  382. {
  383. struct device *dev = &pdata->pdev->dev;
  384. if (dev->of_node) {
  385. struct clk *parent = clk_get_parent(pdata->clk);
  386. switch (pdata->phy_speed) {
  387. case SPEED_10:
  388. clk_set_rate(parent, 2500000);
  389. break;
  390. case SPEED_100:
  391. clk_set_rate(parent, 25000000);
  392. break;
  393. default:
  394. clk_set_rate(parent, 125000000);
  395. break;
  396. }
  397. }
  398. #ifdef CONFIG_ACPI
  399. else {
  400. switch (pdata->phy_speed) {
  401. case SPEED_10:
  402. acpi_evaluate_object(ACPI_HANDLE(dev),
  403. "S10", NULL, NULL);
  404. break;
  405. case SPEED_100:
  406. acpi_evaluate_object(ACPI_HANDLE(dev),
  407. "S100", NULL, NULL);
  408. break;
  409. default:
  410. acpi_evaluate_object(ACPI_HANDLE(dev),
  411. "S1G", NULL, NULL);
  412. break;
  413. }
  414. }
  415. #endif
  416. }
  417. static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
  418. {
  419. struct device *dev = &pdata->pdev->dev;
  420. u32 value, mc2;
  421. u32 intf_ctl, rgmii;
  422. u32 icm0, icm2;
  423. xgene_gmac_reset(pdata);
  424. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
  425. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
  426. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
  427. xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
  428. xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
  429. switch (pdata->phy_speed) {
  430. case SPEED_10:
  431. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  432. intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
  433. CFG_MACMODE_SET(&icm0, 0);
  434. CFG_WAITASYNCRD_SET(&icm2, 500);
  435. rgmii &= ~CFG_SPEED_1250;
  436. break;
  437. case SPEED_100:
  438. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  439. intf_ctl &= ~ENET_GHD_MODE;
  440. intf_ctl |= ENET_LHD_MODE;
  441. CFG_MACMODE_SET(&icm0, 1);
  442. CFG_WAITASYNCRD_SET(&icm2, 80);
  443. rgmii &= ~CFG_SPEED_1250;
  444. break;
  445. default:
  446. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  447. intf_ctl &= ~ENET_LHD_MODE;
  448. intf_ctl |= ENET_GHD_MODE;
  449. CFG_MACMODE_SET(&icm0, 2);
  450. CFG_WAITASYNCRD_SET(&icm2, 0);
  451. if (dev->of_node) {
  452. CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
  453. CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
  454. }
  455. rgmii |= CFG_SPEED_1250;
  456. xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
  457. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  458. xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
  459. break;
  460. }
  461. mc2 |= FULL_DUPLEX2 | PAD_CRC;
  462. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
  463. xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
  464. xgene_gmac_set_mac_addr(pdata);
  465. /* Adjust MDC clock frequency */
  466. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
  467. MGMT_CLOCK_SEL_SET(&value, 7);
  468. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
  469. /* Enable drop if bufpool not available */
  470. xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
  471. value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  472. xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
  473. /* Rtype should be copied from FP */
  474. xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
  475. xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
  476. xgene_enet_configure_clock(pdata);
  477. /* Rx-Tx traffic resume */
  478. xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
  479. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
  480. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
  481. xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
  482. value &= ~TX_DV_GATE_EN0;
  483. value &= ~RX_DV_GATE_EN0;
  484. value |= RESUME_RX0;
  485. xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
  486. xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
  487. }
  488. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
  489. {
  490. u32 val = 0xffffffff;
  491. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
  492. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  493. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
  494. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
  495. }
  496. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
  497. u32 dst_ring_num, u16 bufpool_id)
  498. {
  499. u32 cb;
  500. u32 fpsel;
  501. fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
  502. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
  503. cb |= CFG_CLE_BYPASS_EN0;
  504. CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
  505. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
  506. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
  507. CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
  508. CFG_CLE_FPSEL0_SET(&cb, fpsel);
  509. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
  510. }
  511. static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
  512. {
  513. u32 data;
  514. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  515. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
  516. }
  517. static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
  518. {
  519. u32 data;
  520. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  521. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
  522. }
  523. static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
  524. {
  525. u32 data;
  526. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  527. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
  528. }
  529. static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
  530. {
  531. u32 data;
  532. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  533. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
  534. }
  535. bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
  536. {
  537. if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
  538. return false;
  539. if (ioread32(p->ring_csr_addr + SRST_ADDR))
  540. return false;
  541. return true;
  542. }
  543. static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
  544. {
  545. u32 val;
  546. if (!xgene_ring_mgr_init(pdata))
  547. return -ENODEV;
  548. if (!IS_ERR(pdata->clk)) {
  549. clk_prepare_enable(pdata->clk);
  550. clk_disable_unprepare(pdata->clk);
  551. clk_prepare_enable(pdata->clk);
  552. xgene_enet_ecc_init(pdata);
  553. }
  554. xgene_enet_config_ring_if_assoc(pdata);
  555. /* Enable auto-incr for scanning */
  556. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
  557. val |= SCAN_AUTO_INCR;
  558. MGMT_CLOCK_SEL_SET(&val, 1);
  559. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
  560. return 0;
  561. }
  562. static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
  563. {
  564. if (!IS_ERR(pdata->clk))
  565. clk_disable_unprepare(pdata->clk);
  566. }
  567. static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  568. {
  569. struct xgene_enet_pdata *pdata = bus->priv;
  570. u32 val;
  571. val = xgene_mii_phy_read(pdata, mii_id, regnum);
  572. netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
  573. mii_id, regnum, val);
  574. return val;
  575. }
  576. static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  577. u16 val)
  578. {
  579. struct xgene_enet_pdata *pdata = bus->priv;
  580. netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
  581. mii_id, regnum, val);
  582. return xgene_mii_phy_write(pdata, mii_id, regnum, val);
  583. }
  584. static void xgene_enet_adjust_link(struct net_device *ndev)
  585. {
  586. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  587. struct phy_device *phydev = pdata->phy_dev;
  588. if (phydev->link) {
  589. if (pdata->phy_speed != phydev->speed) {
  590. pdata->phy_speed = phydev->speed;
  591. xgene_gmac_init(pdata);
  592. xgene_gmac_rx_enable(pdata);
  593. xgene_gmac_tx_enable(pdata);
  594. phy_print_status(phydev);
  595. }
  596. } else {
  597. xgene_gmac_rx_disable(pdata);
  598. xgene_gmac_tx_disable(pdata);
  599. pdata->phy_speed = SPEED_UNKNOWN;
  600. phy_print_status(phydev);
  601. }
  602. }
  603. static int xgene_enet_phy_connect(struct net_device *ndev)
  604. {
  605. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  606. struct device_node *phy_np;
  607. struct phy_device *phy_dev;
  608. struct device *dev = &pdata->pdev->dev;
  609. if (dev->of_node) {
  610. phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
  611. if (!phy_np) {
  612. netdev_dbg(ndev, "No phy-handle found in DT\n");
  613. return -ENODEV;
  614. }
  615. phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
  616. 0, pdata->phy_mode);
  617. if (!phy_dev) {
  618. netdev_err(ndev, "Could not connect to PHY\n");
  619. return -ENODEV;
  620. }
  621. pdata->phy_dev = phy_dev;
  622. } else {
  623. phy_dev = pdata->phy_dev;
  624. if (!phy_dev ||
  625. phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
  626. pdata->phy_mode)) {
  627. netdev_err(ndev, "Could not connect to PHY\n");
  628. return -ENODEV;
  629. }
  630. }
  631. pdata->phy_speed = SPEED_UNKNOWN;
  632. phy_dev->supported &= ~SUPPORTED_10baseT_Half &
  633. ~SUPPORTED_100baseT_Half &
  634. ~SUPPORTED_1000baseT_Half;
  635. phy_dev->advertising = phy_dev->supported;
  636. return 0;
  637. }
  638. static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
  639. struct mii_bus *mdio)
  640. {
  641. struct device *dev = &pdata->pdev->dev;
  642. struct net_device *ndev = pdata->ndev;
  643. struct phy_device *phy;
  644. struct device_node *child_np;
  645. struct device_node *mdio_np = NULL;
  646. int ret;
  647. u32 phy_id;
  648. if (dev->of_node) {
  649. for_each_child_of_node(dev->of_node, child_np) {
  650. if (of_device_is_compatible(child_np,
  651. "apm,xgene-mdio")) {
  652. mdio_np = child_np;
  653. break;
  654. }
  655. }
  656. if (!mdio_np) {
  657. netdev_dbg(ndev, "No mdio node in the dts\n");
  658. return -ENXIO;
  659. }
  660. return of_mdiobus_register(mdio, mdio_np);
  661. }
  662. /* Mask out all PHYs from auto probing. */
  663. mdio->phy_mask = ~0;
  664. /* Register the MDIO bus */
  665. ret = mdiobus_register(mdio);
  666. if (ret)
  667. return ret;
  668. ret = device_property_read_u32(dev, "phy-channel", &phy_id);
  669. if (ret)
  670. ret = device_property_read_u32(dev, "phy-addr", &phy_id);
  671. if (ret)
  672. return -EINVAL;
  673. phy = get_phy_device(mdio, phy_id, false);
  674. if (!phy || IS_ERR(phy))
  675. return -EIO;
  676. ret = phy_device_register(phy);
  677. if (ret)
  678. phy_device_free(phy);
  679. else
  680. pdata->phy_dev = phy;
  681. return ret;
  682. }
  683. int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
  684. {
  685. struct net_device *ndev = pdata->ndev;
  686. struct mii_bus *mdio_bus;
  687. int ret;
  688. mdio_bus = mdiobus_alloc();
  689. if (!mdio_bus)
  690. return -ENOMEM;
  691. mdio_bus->name = "APM X-Gene MDIO bus";
  692. mdio_bus->read = xgene_enet_mdio_read;
  693. mdio_bus->write = xgene_enet_mdio_write;
  694. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
  695. ndev->name);
  696. mdio_bus->priv = pdata;
  697. mdio_bus->parent = &ndev->dev;
  698. ret = xgene_mdiobus_register(pdata, mdio_bus);
  699. if (ret) {
  700. netdev_err(ndev, "Failed to register MDIO bus\n");
  701. mdiobus_free(mdio_bus);
  702. return ret;
  703. }
  704. pdata->mdio_bus = mdio_bus;
  705. ret = xgene_enet_phy_connect(ndev);
  706. if (ret)
  707. xgene_enet_mdio_remove(pdata);
  708. return ret;
  709. }
  710. void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
  711. {
  712. if (pdata->phy_dev)
  713. phy_disconnect(pdata->phy_dev);
  714. mdiobus_unregister(pdata->mdio_bus);
  715. mdiobus_free(pdata->mdio_bus);
  716. pdata->mdio_bus = NULL;
  717. }
  718. const struct xgene_mac_ops xgene_gmac_ops = {
  719. .init = xgene_gmac_init,
  720. .reset = xgene_gmac_reset,
  721. .rx_enable = xgene_gmac_rx_enable,
  722. .tx_enable = xgene_gmac_tx_enable,
  723. .rx_disable = xgene_gmac_rx_disable,
  724. .tx_disable = xgene_gmac_tx_disable,
  725. .set_mac_addr = xgene_gmac_set_mac_addr,
  726. };
  727. const struct xgene_port_ops xgene_gport_ops = {
  728. .reset = xgene_enet_reset,
  729. .cle_bypass = xgene_enet_cle_bypass,
  730. .shutdown = xgene_gport_shutdown,
  731. };
  732. struct xgene_ring_ops xgene_ring1_ops = {
  733. .num_ring_config = NUM_RING_CONFIG,
  734. .num_ring_id_shift = 6,
  735. .setup = xgene_enet_setup_ring,
  736. .clear = xgene_enet_clear_ring,
  737. .wr_cmd = xgene_enet_wr_cmd,
  738. .len = xgene_enet_ring_len,
  739. .coalesce = xgene_enet_setup_coalescing,
  740. };