xgene_enet_hw.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Ravi Patel <rapatel@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. #include "xgene_enet_hw.h"
  23. static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
  24. {
  25. u32 *ring_cfg = ring->state;
  26. u64 addr = ring->dma;
  27. enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
  28. ring_cfg[4] |= (1 << SELTHRSH_POS) &
  29. CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
  30. ring_cfg[3] |= ACCEPTLERR;
  31. ring_cfg[2] |= QCOHERENT;
  32. addr >>= 8;
  33. ring_cfg[2] |= (addr << RINGADDRL_POS) &
  34. CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
  35. addr >>= RINGADDRL_LEN;
  36. ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
  37. ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
  38. CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
  39. }
  40. static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
  41. {
  42. u32 *ring_cfg = ring->state;
  43. bool is_bufpool;
  44. u32 val;
  45. is_bufpool = xgene_enet_is_bufpool(ring->id);
  46. val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
  47. ring_cfg[4] |= (val << RINGTYPE_POS) &
  48. CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
  49. if (is_bufpool) {
  50. ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
  51. CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
  52. }
  53. }
  54. static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
  55. {
  56. u32 *ring_cfg = ring->state;
  57. ring_cfg[3] |= RECOMBBUF;
  58. ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
  59. CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
  60. ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
  61. }
  62. static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
  63. u32 offset, u32 data)
  64. {
  65. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  66. iowrite32(data, pdata->ring_csr_addr + offset);
  67. }
  68. static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
  69. u32 offset, u32 *data)
  70. {
  71. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  72. *data = ioread32(pdata->ring_csr_addr + offset);
  73. }
  74. static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
  75. {
  76. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  77. int i;
  78. xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
  79. for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
  80. xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
  81. ring->state[i]);
  82. }
  83. }
  84. static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
  85. {
  86. memset(ring->state, 0, sizeof(ring->state));
  87. xgene_enet_write_ring_state(ring);
  88. }
  89. static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
  90. {
  91. xgene_enet_ring_set_type(ring);
  92. if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
  93. xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
  94. xgene_enet_ring_set_recombbuf(ring);
  95. xgene_enet_ring_init(ring);
  96. xgene_enet_write_ring_state(ring);
  97. }
  98. static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
  99. {
  100. u32 ring_id_val, ring_id_buf;
  101. bool is_bufpool;
  102. is_bufpool = xgene_enet_is_bufpool(ring->id);
  103. ring_id_val = ring->id & GENMASK(9, 0);
  104. ring_id_val |= OVERWRITE;
  105. ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
  106. ring_id_buf |= PREFETCH_BUF_EN;
  107. if (is_bufpool)
  108. ring_id_buf |= IS_BUFFER_POOL;
  109. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
  110. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
  111. }
  112. static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
  113. {
  114. u32 ring_id;
  115. ring_id = ring->id | OVERWRITE;
  116. xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
  117. xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
  118. }
  119. static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
  120. struct xgene_enet_desc_ring *ring)
  121. {
  122. u32 size = ring->size;
  123. u32 i, data;
  124. bool is_bufpool;
  125. xgene_enet_clr_ring_state(ring);
  126. xgene_enet_set_ring_state(ring);
  127. xgene_enet_set_ring_id(ring);
  128. ring->slots = xgene_enet_get_numslots(ring->id, size);
  129. is_bufpool = xgene_enet_is_bufpool(ring->id);
  130. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  131. return ring;
  132. for (i = 0; i < ring->slots; i++)
  133. xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
  134. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  135. data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
  136. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  137. return ring;
  138. }
  139. static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
  140. {
  141. u32 data;
  142. bool is_bufpool;
  143. is_bufpool = xgene_enet_is_bufpool(ring->id);
  144. if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
  145. goto out;
  146. xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
  147. data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
  148. xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
  149. out:
  150. xgene_enet_clr_desc_ring_id(ring);
  151. xgene_enet_clr_ring_state(ring);
  152. }
  153. static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
  154. {
  155. iowrite32(count, ring->cmd);
  156. }
  157. static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
  158. {
  159. u32 __iomem *cmd_base = ring->cmd_base;
  160. u32 ring_state, num_msgs;
  161. ring_state = ioread32(&cmd_base[1]);
  162. num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
  163. return num_msgs;
  164. }
  165. void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
  166. struct xgene_enet_pdata *pdata,
  167. enum xgene_enet_err_code status)
  168. {
  169. switch (status) {
  170. case INGRESS_CRC:
  171. ring->rx_crc_errors++;
  172. ring->rx_dropped++;
  173. break;
  174. case INGRESS_CHECKSUM:
  175. case INGRESS_CHECKSUM_COMPUTE:
  176. ring->rx_errors++;
  177. ring->rx_dropped++;
  178. break;
  179. case INGRESS_TRUNC_FRAME:
  180. ring->rx_frame_errors++;
  181. ring->rx_dropped++;
  182. break;
  183. case INGRESS_PKT_LEN:
  184. ring->rx_length_errors++;
  185. ring->rx_dropped++;
  186. break;
  187. case INGRESS_PKT_UNDER:
  188. ring->rx_frame_errors++;
  189. ring->rx_dropped++;
  190. break;
  191. case INGRESS_FIFO_OVERRUN:
  192. ring->rx_fifo_errors++;
  193. break;
  194. default:
  195. break;
  196. }
  197. }
  198. static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
  199. u32 offset, u32 val)
  200. {
  201. void __iomem *addr = pdata->eth_csr_addr + offset;
  202. iowrite32(val, addr);
  203. }
  204. static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
  205. u32 offset, u32 val)
  206. {
  207. void __iomem *addr = pdata->eth_ring_if_addr + offset;
  208. iowrite32(val, addr);
  209. }
  210. static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
  211. u32 offset, u32 val)
  212. {
  213. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  214. iowrite32(val, addr);
  215. }
  216. static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
  217. u32 offset, u32 val)
  218. {
  219. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  220. iowrite32(val, addr);
  221. }
  222. static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
  223. void __iomem *cmd, void __iomem *cmd_done,
  224. u32 wr_addr, u32 wr_data)
  225. {
  226. u32 done;
  227. u8 wait = 10;
  228. iowrite32(wr_addr, addr);
  229. iowrite32(wr_data, wr);
  230. iowrite32(XGENE_ENET_WR_CMD, cmd);
  231. /* wait for write command to complete */
  232. while (!(done = ioread32(cmd_done)) && wait--)
  233. udelay(1);
  234. if (!done)
  235. return false;
  236. iowrite32(0, cmd);
  237. return true;
  238. }
  239. static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
  240. u32 wr_addr, u32 wr_data)
  241. {
  242. void __iomem *addr, *wr, *cmd, *cmd_done;
  243. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  244. wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
  245. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  246. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  247. if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
  248. netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
  249. wr_addr);
  250. }
  251. static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
  252. u32 offset, u32 *val)
  253. {
  254. void __iomem *addr = pdata->eth_csr_addr + offset;
  255. *val = ioread32(addr);
  256. }
  257. static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
  258. u32 offset, u32 *val)
  259. {
  260. void __iomem *addr = pdata->eth_diag_csr_addr + offset;
  261. *val = ioread32(addr);
  262. }
  263. static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
  264. u32 offset, u32 *val)
  265. {
  266. void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
  267. *val = ioread32(addr);
  268. }
  269. static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
  270. void __iomem *cmd, void __iomem *cmd_done,
  271. u32 rd_addr, u32 *rd_data)
  272. {
  273. u32 done;
  274. u8 wait = 10;
  275. iowrite32(rd_addr, addr);
  276. iowrite32(XGENE_ENET_RD_CMD, cmd);
  277. /* wait for read command to complete */
  278. while (!(done = ioread32(cmd_done)) && wait--)
  279. udelay(1);
  280. if (!done)
  281. return false;
  282. *rd_data = ioread32(rd);
  283. iowrite32(0, cmd);
  284. return true;
  285. }
  286. static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
  287. u32 rd_addr, u32 *rd_data)
  288. {
  289. void __iomem *addr, *rd, *cmd, *cmd_done;
  290. addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
  291. rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
  292. cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
  293. cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
  294. if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
  295. netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
  296. rd_addr);
  297. }
  298. static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
  299. {
  300. u32 addr0, addr1;
  301. u8 *dev_addr = pdata->ndev->dev_addr;
  302. addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
  303. (dev_addr[1] << 8) | dev_addr[0];
  304. addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
  305. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
  306. xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
  307. }
  308. static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
  309. {
  310. struct net_device *ndev = pdata->ndev;
  311. u32 data;
  312. u8 wait = 10;
  313. xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
  314. do {
  315. usleep_range(100, 110);
  316. xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
  317. } while ((data != 0xffffffff) && wait--);
  318. if (data != 0xffffffff) {
  319. netdev_err(ndev, "Failed to release memory from shutdown\n");
  320. return -ENODEV;
  321. }
  322. return 0;
  323. }
  324. static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
  325. {
  326. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
  327. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
  328. }
  329. static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
  330. {
  331. struct device *dev = &pdata->pdev->dev;
  332. if (dev->of_node) {
  333. struct clk *parent = clk_get_parent(pdata->clk);
  334. switch (pdata->phy_speed) {
  335. case SPEED_10:
  336. clk_set_rate(parent, 2500000);
  337. break;
  338. case SPEED_100:
  339. clk_set_rate(parent, 25000000);
  340. break;
  341. default:
  342. clk_set_rate(parent, 125000000);
  343. break;
  344. }
  345. }
  346. #ifdef CONFIG_ACPI
  347. else {
  348. switch (pdata->phy_speed) {
  349. case SPEED_10:
  350. acpi_evaluate_object(ACPI_HANDLE(dev),
  351. "S10", NULL, NULL);
  352. break;
  353. case SPEED_100:
  354. acpi_evaluate_object(ACPI_HANDLE(dev),
  355. "S100", NULL, NULL);
  356. break;
  357. default:
  358. acpi_evaluate_object(ACPI_HANDLE(dev),
  359. "S1G", NULL, NULL);
  360. break;
  361. }
  362. }
  363. #endif
  364. }
  365. static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
  366. {
  367. struct device *dev = &pdata->pdev->dev;
  368. u32 icm0, icm2, mc2;
  369. u32 intf_ctl, rgmii, value;
  370. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
  371. xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
  372. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
  373. xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
  374. xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
  375. switch (pdata->phy_speed) {
  376. case SPEED_10:
  377. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  378. intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
  379. CFG_MACMODE_SET(&icm0, 0);
  380. CFG_WAITASYNCRD_SET(&icm2, 500);
  381. rgmii &= ~CFG_SPEED_1250;
  382. break;
  383. case SPEED_100:
  384. ENET_INTERFACE_MODE2_SET(&mc2, 1);
  385. intf_ctl &= ~ENET_GHD_MODE;
  386. intf_ctl |= ENET_LHD_MODE;
  387. CFG_MACMODE_SET(&icm0, 1);
  388. CFG_WAITASYNCRD_SET(&icm2, 80);
  389. rgmii &= ~CFG_SPEED_1250;
  390. break;
  391. default:
  392. ENET_INTERFACE_MODE2_SET(&mc2, 2);
  393. intf_ctl &= ~ENET_LHD_MODE;
  394. intf_ctl |= ENET_GHD_MODE;
  395. CFG_MACMODE_SET(&icm0, 2);
  396. CFG_WAITASYNCRD_SET(&icm2, 0);
  397. if (dev->of_node) {
  398. CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
  399. CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
  400. }
  401. rgmii |= CFG_SPEED_1250;
  402. xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
  403. value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
  404. xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
  405. break;
  406. }
  407. mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
  408. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
  409. xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
  410. xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
  411. xgene_enet_configure_clock(pdata);
  412. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
  413. xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
  414. }
  415. static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
  416. {
  417. xgene_enet_wr_mcx_mac(pdata, MAX_FRAME_LEN_ADDR, size);
  418. }
  419. static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
  420. bool enable)
  421. {
  422. u32 data;
  423. xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
  424. if (enable)
  425. data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
  426. else
  427. data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
  428. xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
  429. }
  430. static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
  431. {
  432. u32 data;
  433. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  434. if (enable)
  435. data |= TX_FLOW_EN;
  436. else
  437. data &= ~TX_FLOW_EN;
  438. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
  439. pdata->mac_ops->enable_tx_pause(pdata, enable);
  440. }
  441. static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
  442. {
  443. u32 data;
  444. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  445. if (enable)
  446. data |= RX_FLOW_EN;
  447. else
  448. data &= ~RX_FLOW_EN;
  449. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data);
  450. }
  451. static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
  452. {
  453. u32 value;
  454. if (!pdata->mdio_driver)
  455. xgene_gmac_reset(pdata);
  456. xgene_gmac_set_speed(pdata);
  457. xgene_gmac_set_mac_addr(pdata);
  458. /* Adjust MDC clock frequency */
  459. xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
  460. MGMT_CLOCK_SEL_SET(&value, 7);
  461. xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
  462. /* Enable drop if bufpool not available */
  463. xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
  464. value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
  465. xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
  466. /* Rtype should be copied from FP */
  467. xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
  468. /* Configure HW pause frame generation */
  469. xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
  470. value = (DEF_QUANTA << 16) | (value & 0xFFFF);
  471. xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
  472. xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
  473. xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
  474. xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
  475. xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
  476. /* Rx-Tx traffic resume */
  477. xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
  478. xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
  479. value &= ~TX_DV_GATE_EN0;
  480. value &= ~RX_DV_GATE_EN0;
  481. value |= RESUME_RX0;
  482. xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
  483. xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
  484. }
  485. static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
  486. {
  487. u32 val = 0xffffffff;
  488. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
  489. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
  490. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
  491. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
  492. }
  493. static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
  494. u32 dst_ring_num, u16 bufpool_id,
  495. u16 nxtbufpool_id)
  496. {
  497. u32 cb;
  498. u32 fpsel, nxtfpsel;
  499. fpsel = xgene_enet_get_fpsel(bufpool_id);
  500. nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
  501. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
  502. cb |= CFG_CLE_BYPASS_EN0;
  503. CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
  504. CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
  505. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
  506. xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
  507. CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
  508. CFG_CLE_FPSEL0_SET(&cb, fpsel);
  509. CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
  510. xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
  511. }
  512. static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
  513. {
  514. u32 data;
  515. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  516. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
  517. }
  518. static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
  519. {
  520. u32 data;
  521. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  522. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
  523. }
  524. static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
  525. {
  526. u32 data;
  527. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  528. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
  529. }
  530. static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
  531. {
  532. u32 data;
  533. xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
  534. xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
  535. }
  536. bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
  537. {
  538. if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
  539. return false;
  540. if (ioread32(p->ring_csr_addr + SRST_ADDR))
  541. return false;
  542. return true;
  543. }
  544. static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
  545. {
  546. struct device *dev = &pdata->pdev->dev;
  547. if (!xgene_ring_mgr_init(pdata))
  548. return -ENODEV;
  549. if (pdata->mdio_driver) {
  550. xgene_enet_config_ring_if_assoc(pdata);
  551. return 0;
  552. }
  553. if (dev->of_node) {
  554. clk_prepare_enable(pdata->clk);
  555. udelay(5);
  556. clk_disable_unprepare(pdata->clk);
  557. udelay(5);
  558. clk_prepare_enable(pdata->clk);
  559. udelay(5);
  560. } else {
  561. #ifdef CONFIG_ACPI
  562. if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
  563. acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
  564. "_RST", NULL, NULL);
  565. } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
  566. "_INI")) {
  567. acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
  568. "_INI", NULL, NULL);
  569. }
  570. #endif
  571. }
  572. xgene_enet_ecc_init(pdata);
  573. xgene_enet_config_ring_if_assoc(pdata);
  574. return 0;
  575. }
  576. static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
  577. struct xgene_enet_desc_ring *ring)
  578. {
  579. u32 addr, data;
  580. if (xgene_enet_is_bufpool(ring->id)) {
  581. addr = ENET_CFGSSQMIFPRESET_ADDR;
  582. data = BIT(xgene_enet_get_fpsel(ring->id));
  583. } else {
  584. addr = ENET_CFGSSQMIWQRESET_ADDR;
  585. data = BIT(xgene_enet_ring_bufnum(ring->id));
  586. }
  587. xgene_enet_wr_ring_if(pdata, addr, data);
  588. }
  589. static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
  590. {
  591. struct device *dev = &pdata->pdev->dev;
  592. struct xgene_enet_desc_ring *ring;
  593. u32 pb;
  594. int i;
  595. pb = 0;
  596. for (i = 0; i < pdata->rxq_cnt; i++) {
  597. ring = pdata->rx_ring[i]->buf_pool;
  598. pb |= BIT(xgene_enet_get_fpsel(ring->id));
  599. ring = pdata->rx_ring[i]->page_pool;
  600. if (ring)
  601. pb |= BIT(xgene_enet_get_fpsel(ring->id));
  602. }
  603. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
  604. pb = 0;
  605. for (i = 0; i < pdata->txq_cnt; i++) {
  606. ring = pdata->tx_ring[i];
  607. pb |= BIT(xgene_enet_ring_bufnum(ring->id));
  608. }
  609. xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
  610. if (dev->of_node) {
  611. if (!IS_ERR(pdata->clk))
  612. clk_disable_unprepare(pdata->clk);
  613. }
  614. }
  615. static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
  616. {
  617. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  618. struct phy_device *phydev = ndev->phydev;
  619. u16 lcladv, rmtadv = 0;
  620. u32 rx_pause, tx_pause;
  621. u8 flowctl = 0;
  622. if (!phydev->duplex || !pdata->pause_autoneg)
  623. return 0;
  624. if (pdata->tx_pause)
  625. flowctl |= FLOW_CTRL_TX;
  626. if (pdata->rx_pause)
  627. flowctl |= FLOW_CTRL_RX;
  628. lcladv = mii_advertise_flowctrl(flowctl);
  629. if (phydev->pause)
  630. rmtadv = LPA_PAUSE_CAP;
  631. if (phydev->asym_pause)
  632. rmtadv |= LPA_PAUSE_ASYM;
  633. flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
  634. tx_pause = !!(flowctl & FLOW_CTRL_TX);
  635. rx_pause = !!(flowctl & FLOW_CTRL_RX);
  636. if (tx_pause != pdata->tx_pause) {
  637. pdata->tx_pause = tx_pause;
  638. pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
  639. }
  640. if (rx_pause != pdata->rx_pause) {
  641. pdata->rx_pause = rx_pause;
  642. pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
  643. }
  644. return 0;
  645. }
  646. static void xgene_enet_adjust_link(struct net_device *ndev)
  647. {
  648. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  649. const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
  650. struct phy_device *phydev = ndev->phydev;
  651. if (phydev->link) {
  652. if (pdata->phy_speed != phydev->speed) {
  653. pdata->phy_speed = phydev->speed;
  654. mac_ops->set_speed(pdata);
  655. mac_ops->rx_enable(pdata);
  656. mac_ops->tx_enable(pdata);
  657. phy_print_status(phydev);
  658. }
  659. xgene_enet_flowctrl_cfg(ndev);
  660. } else {
  661. mac_ops->rx_disable(pdata);
  662. mac_ops->tx_disable(pdata);
  663. pdata->phy_speed = SPEED_UNKNOWN;
  664. phy_print_status(phydev);
  665. }
  666. }
  667. #ifdef CONFIG_ACPI
  668. static struct acpi_device *acpi_phy_find_device(struct device *dev)
  669. {
  670. struct acpi_reference_args args;
  671. struct fwnode_handle *fw_node;
  672. int status;
  673. fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
  674. status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
  675. &args);
  676. if (ACPI_FAILURE(status)) {
  677. dev_dbg(dev, "No matching phy in ACPI table\n");
  678. return NULL;
  679. }
  680. return args.adev;
  681. }
  682. #endif
  683. int xgene_enet_phy_connect(struct net_device *ndev)
  684. {
  685. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  686. struct device_node *np;
  687. struct phy_device *phy_dev;
  688. struct device *dev = &pdata->pdev->dev;
  689. int i;
  690. if (dev->of_node) {
  691. for (i = 0 ; i < 2; i++) {
  692. np = of_parse_phandle(dev->of_node, "phy-handle", i);
  693. phy_dev = of_phy_connect(ndev, np,
  694. &xgene_enet_adjust_link,
  695. 0, pdata->phy_mode);
  696. of_node_put(np);
  697. if (phy_dev)
  698. break;
  699. }
  700. if (!phy_dev) {
  701. netdev_err(ndev, "Could not connect to PHY\n");
  702. return -ENODEV;
  703. }
  704. } else {
  705. #ifdef CONFIG_ACPI
  706. struct acpi_device *adev = acpi_phy_find_device(dev);
  707. if (adev)
  708. phy_dev = adev->driver_data;
  709. else
  710. phy_dev = NULL;
  711. if (!phy_dev ||
  712. phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
  713. pdata->phy_mode)) {
  714. netdev_err(ndev, "Could not connect to PHY\n");
  715. return -ENODEV;
  716. }
  717. #else
  718. return -ENODEV;
  719. #endif
  720. }
  721. pdata->phy_speed = SPEED_UNKNOWN;
  722. phy_dev->supported &= ~SUPPORTED_10baseT_Half &
  723. ~SUPPORTED_100baseT_Half &
  724. ~SUPPORTED_1000baseT_Half;
  725. phy_dev->supported |= SUPPORTED_Pause |
  726. SUPPORTED_Asym_Pause;
  727. phy_dev->advertising = phy_dev->supported;
  728. return 0;
  729. }
  730. static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
  731. struct mii_bus *mdio)
  732. {
  733. struct device *dev = &pdata->pdev->dev;
  734. struct net_device *ndev = pdata->ndev;
  735. struct phy_device *phy;
  736. struct device_node *child_np;
  737. struct device_node *mdio_np = NULL;
  738. u32 phy_addr;
  739. int ret;
  740. if (dev->of_node) {
  741. for_each_child_of_node(dev->of_node, child_np) {
  742. if (of_device_is_compatible(child_np,
  743. "apm,xgene-mdio")) {
  744. mdio_np = child_np;
  745. break;
  746. }
  747. }
  748. if (!mdio_np) {
  749. netdev_dbg(ndev, "No mdio node in the dts\n");
  750. return -ENXIO;
  751. }
  752. return of_mdiobus_register(mdio, mdio_np);
  753. }
  754. /* Mask out all PHYs from auto probing. */
  755. mdio->phy_mask = ~0;
  756. /* Register the MDIO bus */
  757. ret = mdiobus_register(mdio);
  758. if (ret)
  759. return ret;
  760. ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
  761. if (ret)
  762. ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
  763. if (ret)
  764. return -EINVAL;
  765. phy = xgene_enet_phy_register(mdio, phy_addr);
  766. if (!phy)
  767. return -EIO;
  768. return ret;
  769. }
  770. int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
  771. {
  772. struct net_device *ndev = pdata->ndev;
  773. struct mii_bus *mdio_bus;
  774. int ret;
  775. mdio_bus = mdiobus_alloc();
  776. if (!mdio_bus)
  777. return -ENOMEM;
  778. mdio_bus->name = "APM X-Gene MDIO bus";
  779. mdio_bus->read = xgene_mdio_rgmii_read;
  780. mdio_bus->write = xgene_mdio_rgmii_write;
  781. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
  782. ndev->name);
  783. mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
  784. mdio_bus->parent = &pdata->pdev->dev;
  785. ret = xgene_mdiobus_register(pdata, mdio_bus);
  786. if (ret) {
  787. netdev_err(ndev, "Failed to register MDIO bus\n");
  788. mdiobus_free(mdio_bus);
  789. return ret;
  790. }
  791. pdata->mdio_bus = mdio_bus;
  792. ret = xgene_enet_phy_connect(ndev);
  793. if (ret)
  794. xgene_enet_mdio_remove(pdata);
  795. return ret;
  796. }
  797. void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
  798. {
  799. struct net_device *ndev = pdata->ndev;
  800. if (ndev->phydev)
  801. phy_disconnect(ndev->phydev);
  802. }
  803. void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
  804. {
  805. struct net_device *ndev = pdata->ndev;
  806. if (ndev->phydev)
  807. phy_disconnect(ndev->phydev);
  808. mdiobus_unregister(pdata->mdio_bus);
  809. mdiobus_free(pdata->mdio_bus);
  810. pdata->mdio_bus = NULL;
  811. }
  812. const struct xgene_mac_ops xgene_gmac_ops = {
  813. .init = xgene_gmac_init,
  814. .reset = xgene_gmac_reset,
  815. .rx_enable = xgene_gmac_rx_enable,
  816. .tx_enable = xgene_gmac_tx_enable,
  817. .rx_disable = xgene_gmac_rx_disable,
  818. .tx_disable = xgene_gmac_tx_disable,
  819. .set_speed = xgene_gmac_set_speed,
  820. .set_mac_addr = xgene_gmac_set_mac_addr,
  821. .set_framesize = xgene_enet_set_frame_size,
  822. .enable_tx_pause = xgene_gmac_enable_tx_pause,
  823. .flowctl_tx = xgene_gmac_flowctl_tx,
  824. .flowctl_rx = xgene_gmac_flowctl_rx,
  825. };
  826. const struct xgene_port_ops xgene_gport_ops = {
  827. .reset = xgene_enet_reset,
  828. .clear = xgene_enet_clear,
  829. .cle_bypass = xgene_enet_cle_bypass,
  830. .shutdown = xgene_gport_shutdown,
  831. };
  832. struct xgene_ring_ops xgene_ring1_ops = {
  833. .num_ring_config = NUM_RING_CONFIG,
  834. .num_ring_id_shift = 6,
  835. .setup = xgene_enet_setup_ring,
  836. .clear = xgene_enet_clear_ring,
  837. .wr_cmd = xgene_enet_wr_cmd,
  838. .len = xgene_enet_ring_len,
  839. };