xgene_enet_main.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /* Applied Micro X-Gene SoC Ethernet Driver
  2. *
  3. * Copyright (c) 2014, Applied Micro Circuits Corporation
  4. * Authors: Iyappan Subramanian <isubramanian@apm.com>
  5. * Ravi Patel <rapatel@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "xgene_enet_main.h"
  22. #include "xgene_enet_hw.h"
  23. #include "xgene_enet_sgmac.h"
  24. #include "xgene_enet_xgmac.h"
  25. static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
  26. {
  27. struct xgene_enet_raw_desc16 *raw_desc;
  28. int i;
  29. for (i = 0; i < buf_pool->slots; i++) {
  30. raw_desc = &buf_pool->raw_desc16[i];
  31. /* Hardware expects descriptor in little endian format */
  32. raw_desc->m0 = cpu_to_le64(i |
  33. SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
  34. SET_VAL(STASH, 3));
  35. }
  36. }
  37. static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
  38. u32 nbuf)
  39. {
  40. struct sk_buff *skb;
  41. struct xgene_enet_raw_desc16 *raw_desc;
  42. struct net_device *ndev;
  43. struct device *dev;
  44. dma_addr_t dma_addr;
  45. u32 tail = buf_pool->tail;
  46. u32 slots = buf_pool->slots - 1;
  47. u16 bufdatalen, len;
  48. int i;
  49. ndev = buf_pool->ndev;
  50. dev = ndev_to_dev(buf_pool->ndev);
  51. bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
  52. len = XGENE_ENET_MAX_MTU;
  53. for (i = 0; i < nbuf; i++) {
  54. raw_desc = &buf_pool->raw_desc16[tail];
  55. skb = netdev_alloc_skb_ip_align(ndev, len);
  56. if (unlikely(!skb))
  57. return -ENOMEM;
  58. buf_pool->rx_skb[tail] = skb;
  59. dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
  60. if (dma_mapping_error(dev, dma_addr)) {
  61. netdev_err(ndev, "DMA mapping error\n");
  62. dev_kfree_skb_any(skb);
  63. return -EINVAL;
  64. }
  65. raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
  66. SET_VAL(BUFDATALEN, bufdatalen) |
  67. SET_BIT(COHERENT));
  68. tail = (tail + 1) & slots;
  69. }
  70. iowrite32(nbuf, buf_pool->cmd);
  71. buf_pool->tail = tail;
  72. return 0;
  73. }
  74. static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
  75. {
  76. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  77. return ((u16)pdata->rm << 10) | ring->num;
  78. }
  79. static u8 xgene_enet_hdr_len(const void *data)
  80. {
  81. const struct ethhdr *eth = data;
  82. return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
  83. }
  84. static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
  85. {
  86. u32 __iomem *cmd_base = ring->cmd_base;
  87. u32 ring_state, num_msgs;
  88. ring_state = ioread32(&cmd_base[1]);
  89. num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
  90. return num_msgs >> NUMMSGSINQ_POS;
  91. }
  92. static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
  93. {
  94. struct xgene_enet_raw_desc16 *raw_desc;
  95. u32 slots = buf_pool->slots - 1;
  96. u32 tail = buf_pool->tail;
  97. u32 userinfo;
  98. int i, len;
  99. len = xgene_enet_ring_len(buf_pool);
  100. for (i = 0; i < len; i++) {
  101. tail = (tail - 1) & slots;
  102. raw_desc = &buf_pool->raw_desc16[tail];
  103. /* Hardware stores descriptor in little endian format */
  104. userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
  105. dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
  106. }
  107. iowrite32(-len, buf_pool->cmd);
  108. buf_pool->tail = tail;
  109. }
  110. static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
  111. {
  112. struct xgene_enet_desc_ring *rx_ring = data;
  113. if (napi_schedule_prep(&rx_ring->napi)) {
  114. disable_irq_nosync(irq);
  115. __napi_schedule(&rx_ring->napi);
  116. }
  117. return IRQ_HANDLED;
  118. }
  119. static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
  120. struct xgene_enet_raw_desc *raw_desc)
  121. {
  122. struct sk_buff *skb;
  123. struct device *dev;
  124. u16 skb_index;
  125. u8 status;
  126. int ret = 0;
  127. skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
  128. skb = cp_ring->cp_skb[skb_index];
  129. dev = ndev_to_dev(cp_ring->ndev);
  130. dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
  131. GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
  132. DMA_TO_DEVICE);
  133. /* Checking for error */
  134. status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
  135. if (unlikely(status > 2)) {
  136. xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
  137. status);
  138. ret = -EIO;
  139. }
  140. if (likely(skb)) {
  141. dev_kfree_skb_any(skb);
  142. } else {
  143. netdev_err(cp_ring->ndev, "completion skb is NULL\n");
  144. ret = -EIO;
  145. }
  146. return ret;
  147. }
  148. static u64 xgene_enet_work_msg(struct sk_buff *skb)
  149. {
  150. struct iphdr *iph;
  151. u8 l3hlen, l4hlen = 0;
  152. u8 csum_enable = 0;
  153. u8 proto = 0;
  154. u8 ethhdr;
  155. u64 hopinfo;
  156. if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
  157. unlikely(skb->protocol != htons(ETH_P_8021Q)))
  158. goto out;
  159. if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
  160. goto out;
  161. iph = ip_hdr(skb);
  162. if (unlikely(ip_is_fragment(iph)))
  163. goto out;
  164. if (likely(iph->protocol == IPPROTO_TCP)) {
  165. l4hlen = tcp_hdrlen(skb) >> 2;
  166. csum_enable = 1;
  167. proto = TSO_IPPROTO_TCP;
  168. } else if (iph->protocol == IPPROTO_UDP) {
  169. l4hlen = UDP_HDR_SIZE;
  170. csum_enable = 1;
  171. }
  172. out:
  173. l3hlen = ip_hdrlen(skb) >> 2;
  174. ethhdr = xgene_enet_hdr_len(skb->data);
  175. hopinfo = SET_VAL(TCPHDR, l4hlen) |
  176. SET_VAL(IPHDR, l3hlen) |
  177. SET_VAL(ETHHDR, ethhdr) |
  178. SET_VAL(EC, csum_enable) |
  179. SET_VAL(IS, proto) |
  180. SET_BIT(IC) |
  181. SET_BIT(TYPE_ETH_WORK_MESSAGE);
  182. return hopinfo;
  183. }
  184. static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
  185. struct sk_buff *skb)
  186. {
  187. struct device *dev = ndev_to_dev(tx_ring->ndev);
  188. struct xgene_enet_raw_desc *raw_desc;
  189. dma_addr_t dma_addr;
  190. u16 tail = tx_ring->tail;
  191. u64 hopinfo;
  192. raw_desc = &tx_ring->raw_desc[tail];
  193. memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
  194. dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
  195. if (dma_mapping_error(dev, dma_addr)) {
  196. netdev_err(tx_ring->ndev, "DMA mapping error\n");
  197. return -EINVAL;
  198. }
  199. /* Hardware expects descriptor in little endian format */
  200. raw_desc->m0 = cpu_to_le64(tail);
  201. raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
  202. SET_VAL(BUFDATALEN, skb->len) |
  203. SET_BIT(COHERENT));
  204. hopinfo = xgene_enet_work_msg(skb);
  205. raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
  206. hopinfo);
  207. tx_ring->cp_ring->cp_skb[tail] = skb;
  208. return 0;
  209. }
  210. static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
  211. struct net_device *ndev)
  212. {
  213. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  214. struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
  215. struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
  216. u32 tx_level, cq_level;
  217. tx_level = xgene_enet_ring_len(tx_ring);
  218. cq_level = xgene_enet_ring_len(cp_ring);
  219. if (unlikely(tx_level > pdata->tx_qcnt_hi ||
  220. cq_level > pdata->cp_qcnt_hi)) {
  221. netif_stop_queue(ndev);
  222. return NETDEV_TX_BUSY;
  223. }
  224. if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
  225. dev_kfree_skb_any(skb);
  226. return NETDEV_TX_OK;
  227. }
  228. iowrite32(1, tx_ring->cmd);
  229. skb_tx_timestamp(skb);
  230. tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
  231. pdata->stats.tx_packets++;
  232. pdata->stats.tx_bytes += skb->len;
  233. return NETDEV_TX_OK;
  234. }
  235. static void xgene_enet_skip_csum(struct sk_buff *skb)
  236. {
  237. struct iphdr *iph = ip_hdr(skb);
  238. if (!ip_is_fragment(iph) ||
  239. (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
  240. skb->ip_summed = CHECKSUM_UNNECESSARY;
  241. }
  242. }
  243. static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
  244. struct xgene_enet_raw_desc *raw_desc)
  245. {
  246. struct net_device *ndev;
  247. struct xgene_enet_pdata *pdata;
  248. struct device *dev;
  249. struct xgene_enet_desc_ring *buf_pool;
  250. u32 datalen, skb_index;
  251. struct sk_buff *skb;
  252. u8 status;
  253. int ret = 0;
  254. ndev = rx_ring->ndev;
  255. pdata = netdev_priv(ndev);
  256. dev = ndev_to_dev(rx_ring->ndev);
  257. buf_pool = rx_ring->buf_pool;
  258. dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
  259. XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
  260. skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
  261. skb = buf_pool->rx_skb[skb_index];
  262. /* checking for error */
  263. status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
  264. if (unlikely(status > 2)) {
  265. dev_kfree_skb_any(skb);
  266. xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
  267. status);
  268. pdata->stats.rx_dropped++;
  269. ret = -EIO;
  270. goto out;
  271. }
  272. /* strip off CRC as HW isn't doing this */
  273. datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
  274. datalen -= 4;
  275. prefetch(skb->data - NET_IP_ALIGN);
  276. skb_put(skb, datalen);
  277. skb_checksum_none_assert(skb);
  278. skb->protocol = eth_type_trans(skb, ndev);
  279. if (likely((ndev->features & NETIF_F_IP_CSUM) &&
  280. skb->protocol == htons(ETH_P_IP))) {
  281. xgene_enet_skip_csum(skb);
  282. }
  283. pdata->stats.rx_packets++;
  284. pdata->stats.rx_bytes += datalen;
  285. napi_gro_receive(&rx_ring->napi, skb);
  286. out:
  287. if (--rx_ring->nbufpool == 0) {
  288. ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
  289. rx_ring->nbufpool = NUM_BUFPOOL;
  290. }
  291. return ret;
  292. }
  293. static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
  294. {
  295. return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
  296. }
  297. static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
  298. int budget)
  299. {
  300. struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
  301. struct xgene_enet_raw_desc *raw_desc;
  302. u16 head = ring->head;
  303. u16 slots = ring->slots - 1;
  304. int ret, count = 0;
  305. do {
  306. raw_desc = &ring->raw_desc[head];
  307. if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
  308. break;
  309. if (is_rx_desc(raw_desc))
  310. ret = xgene_enet_rx_frame(ring, raw_desc);
  311. else
  312. ret = xgene_enet_tx_completion(ring, raw_desc);
  313. xgene_enet_mark_desc_slot_empty(raw_desc);
  314. head = (head + 1) & slots;
  315. count++;
  316. if (ret)
  317. break;
  318. } while (--budget);
  319. if (likely(count)) {
  320. iowrite32(-count, ring->cmd);
  321. ring->head = head;
  322. if (netif_queue_stopped(ring->ndev)) {
  323. if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
  324. netif_wake_queue(ring->ndev);
  325. }
  326. }
  327. return count;
  328. }
  329. static int xgene_enet_napi(struct napi_struct *napi, const int budget)
  330. {
  331. struct xgene_enet_desc_ring *ring;
  332. int processed;
  333. ring = container_of(napi, struct xgene_enet_desc_ring, napi);
  334. processed = xgene_enet_process_ring(ring, budget);
  335. if (processed != budget) {
  336. napi_complete(napi);
  337. enable_irq(ring->irq);
  338. }
  339. return processed;
  340. }
  341. static void xgene_enet_timeout(struct net_device *ndev)
  342. {
  343. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  344. pdata->mac_ops->reset(pdata);
  345. }
  346. static int xgene_enet_register_irq(struct net_device *ndev)
  347. {
  348. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  349. struct device *dev = ndev_to_dev(ndev);
  350. int ret;
  351. ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq,
  352. IRQF_SHARED, ndev->name, pdata->rx_ring);
  353. if (ret) {
  354. netdev_err(ndev, "rx%d interrupt request failed\n",
  355. pdata->rx_ring->irq);
  356. }
  357. return ret;
  358. }
  359. static void xgene_enet_free_irq(struct net_device *ndev)
  360. {
  361. struct xgene_enet_pdata *pdata;
  362. struct device *dev;
  363. pdata = netdev_priv(ndev);
  364. dev = ndev_to_dev(ndev);
  365. devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
  366. }
  367. static int xgene_enet_open(struct net_device *ndev)
  368. {
  369. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  370. struct xgene_mac_ops *mac_ops = pdata->mac_ops;
  371. int ret;
  372. mac_ops->tx_enable(pdata);
  373. mac_ops->rx_enable(pdata);
  374. ret = xgene_enet_register_irq(ndev);
  375. if (ret)
  376. return ret;
  377. napi_enable(&pdata->rx_ring->napi);
  378. if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
  379. phy_start(pdata->phy_dev);
  380. else
  381. schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
  382. netif_start_queue(ndev);
  383. return ret;
  384. }
  385. static int xgene_enet_close(struct net_device *ndev)
  386. {
  387. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  388. struct xgene_mac_ops *mac_ops = pdata->mac_ops;
  389. netif_stop_queue(ndev);
  390. if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
  391. phy_stop(pdata->phy_dev);
  392. else
  393. cancel_delayed_work_sync(&pdata->link_work);
  394. napi_disable(&pdata->rx_ring->napi);
  395. xgene_enet_free_irq(ndev);
  396. xgene_enet_process_ring(pdata->rx_ring, -1);
  397. mac_ops->tx_disable(pdata);
  398. mac_ops->rx_disable(pdata);
  399. return 0;
  400. }
  401. static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
  402. {
  403. struct xgene_enet_pdata *pdata;
  404. struct device *dev;
  405. pdata = netdev_priv(ring->ndev);
  406. dev = ndev_to_dev(ring->ndev);
  407. xgene_enet_clear_ring(ring);
  408. dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
  409. }
  410. static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
  411. {
  412. struct xgene_enet_desc_ring *buf_pool;
  413. if (pdata->tx_ring) {
  414. xgene_enet_delete_ring(pdata->tx_ring);
  415. pdata->tx_ring = NULL;
  416. }
  417. if (pdata->rx_ring) {
  418. buf_pool = pdata->rx_ring->buf_pool;
  419. xgene_enet_delete_bufpool(buf_pool);
  420. xgene_enet_delete_ring(buf_pool);
  421. xgene_enet_delete_ring(pdata->rx_ring);
  422. pdata->rx_ring = NULL;
  423. }
  424. }
  425. static int xgene_enet_get_ring_size(struct device *dev,
  426. enum xgene_enet_ring_cfgsize cfgsize)
  427. {
  428. int size = -EINVAL;
  429. switch (cfgsize) {
  430. case RING_CFGSIZE_512B:
  431. size = 0x200;
  432. break;
  433. case RING_CFGSIZE_2KB:
  434. size = 0x800;
  435. break;
  436. case RING_CFGSIZE_16KB:
  437. size = 0x4000;
  438. break;
  439. case RING_CFGSIZE_64KB:
  440. size = 0x10000;
  441. break;
  442. case RING_CFGSIZE_512KB:
  443. size = 0x80000;
  444. break;
  445. default:
  446. dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
  447. break;
  448. }
  449. return size;
  450. }
  451. static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
  452. {
  453. struct device *dev;
  454. if (!ring)
  455. return;
  456. dev = ndev_to_dev(ring->ndev);
  457. if (ring->desc_addr) {
  458. xgene_enet_clear_ring(ring);
  459. dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
  460. }
  461. devm_kfree(dev, ring);
  462. }
  463. static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
  464. {
  465. struct device *dev = &pdata->pdev->dev;
  466. struct xgene_enet_desc_ring *ring;
  467. ring = pdata->tx_ring;
  468. if (ring) {
  469. if (ring->cp_ring && ring->cp_ring->cp_skb)
  470. devm_kfree(dev, ring->cp_ring->cp_skb);
  471. xgene_enet_free_desc_ring(ring);
  472. }
  473. ring = pdata->rx_ring;
  474. if (ring) {
  475. if (ring->buf_pool) {
  476. if (ring->buf_pool->rx_skb)
  477. devm_kfree(dev, ring->buf_pool->rx_skb);
  478. xgene_enet_free_desc_ring(ring->buf_pool);
  479. }
  480. xgene_enet_free_desc_ring(ring);
  481. }
  482. }
  483. static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
  484. struct net_device *ndev, u32 ring_num,
  485. enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
  486. {
  487. struct xgene_enet_desc_ring *ring;
  488. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  489. struct device *dev = ndev_to_dev(ndev);
  490. int size;
  491. size = xgene_enet_get_ring_size(dev, cfgsize);
  492. if (size < 0)
  493. return NULL;
  494. ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
  495. GFP_KERNEL);
  496. if (!ring)
  497. return NULL;
  498. ring->ndev = ndev;
  499. ring->num = ring_num;
  500. ring->cfgsize = cfgsize;
  501. ring->id = ring_id;
  502. ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
  503. GFP_KERNEL);
  504. if (!ring->desc_addr) {
  505. devm_kfree(dev, ring);
  506. return NULL;
  507. }
  508. ring->size = size;
  509. ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
  510. ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
  511. ring = xgene_enet_setup_ring(ring);
  512. netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
  513. ring->num, ring->size, ring->id, ring->slots);
  514. return ring;
  515. }
  516. static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
  517. {
  518. return (owner << 6) | (bufnum & GENMASK(5, 0));
  519. }
  520. static int xgene_enet_create_desc_rings(struct net_device *ndev)
  521. {
  522. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  523. struct device *dev = ndev_to_dev(ndev);
  524. struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
  525. struct xgene_enet_desc_ring *buf_pool = NULL;
  526. u8 cpu_bufnum = 0, eth_bufnum = 0;
  527. u8 bp_bufnum = 0x20;
  528. u16 ring_id, ring_num = 0;
  529. int ret;
  530. /* allocate rx descriptor ring */
  531. ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
  532. rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
  533. RING_CFGSIZE_16KB, ring_id);
  534. if (!rx_ring) {
  535. ret = -ENOMEM;
  536. goto err;
  537. }
  538. /* allocate buffer pool for receiving packets */
  539. ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
  540. buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
  541. RING_CFGSIZE_2KB, ring_id);
  542. if (!buf_pool) {
  543. ret = -ENOMEM;
  544. goto err;
  545. }
  546. rx_ring->nbufpool = NUM_BUFPOOL;
  547. rx_ring->buf_pool = buf_pool;
  548. rx_ring->irq = pdata->rx_irq;
  549. buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
  550. sizeof(struct sk_buff *), GFP_KERNEL);
  551. if (!buf_pool->rx_skb) {
  552. ret = -ENOMEM;
  553. goto err;
  554. }
  555. buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
  556. rx_ring->buf_pool = buf_pool;
  557. pdata->rx_ring = rx_ring;
  558. /* allocate tx descriptor ring */
  559. ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
  560. tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
  561. RING_CFGSIZE_16KB, ring_id);
  562. if (!tx_ring) {
  563. ret = -ENOMEM;
  564. goto err;
  565. }
  566. pdata->tx_ring = tx_ring;
  567. cp_ring = pdata->rx_ring;
  568. cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
  569. sizeof(struct sk_buff *), GFP_KERNEL);
  570. if (!cp_ring->cp_skb) {
  571. ret = -ENOMEM;
  572. goto err;
  573. }
  574. pdata->tx_ring->cp_ring = cp_ring;
  575. pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
  576. pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
  577. pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
  578. pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
  579. return 0;
  580. err:
  581. xgene_enet_free_desc_rings(pdata);
  582. return ret;
  583. }
  584. static struct rtnl_link_stats64 *xgene_enet_get_stats64(
  585. struct net_device *ndev,
  586. struct rtnl_link_stats64 *storage)
  587. {
  588. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  589. struct rtnl_link_stats64 *stats = &pdata->stats;
  590. stats->rx_errors += stats->rx_length_errors +
  591. stats->rx_crc_errors +
  592. stats->rx_frame_errors +
  593. stats->rx_fifo_errors;
  594. memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
  595. return storage;
  596. }
  597. static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
  598. {
  599. struct xgene_enet_pdata *pdata = netdev_priv(ndev);
  600. int ret;
  601. ret = eth_mac_addr(ndev, addr);
  602. if (ret)
  603. return ret;
  604. pdata->mac_ops->set_mac_addr(pdata);
  605. return ret;
  606. }
  607. static const struct net_device_ops xgene_ndev_ops = {
  608. .ndo_open = xgene_enet_open,
  609. .ndo_stop = xgene_enet_close,
  610. .ndo_start_xmit = xgene_enet_start_xmit,
  611. .ndo_tx_timeout = xgene_enet_timeout,
  612. .ndo_get_stats64 = xgene_enet_get_stats64,
  613. .ndo_change_mtu = eth_change_mtu,
  614. .ndo_set_mac_address = xgene_enet_set_mac_address,
  615. };
  616. static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
  617. {
  618. struct platform_device *pdev;
  619. struct net_device *ndev;
  620. struct device *dev;
  621. struct resource *res;
  622. void __iomem *base_addr;
  623. const char *mac;
  624. int ret;
  625. pdev = pdata->pdev;
  626. dev = &pdev->dev;
  627. ndev = pdata->ndev;
  628. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
  629. if (!res) {
  630. dev_err(dev, "Resource enet_csr not defined\n");
  631. return -ENODEV;
  632. }
  633. pdata->base_addr = devm_ioremap_resource(dev, res);
  634. if (IS_ERR(pdata->base_addr)) {
  635. dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
  636. return PTR_ERR(pdata->base_addr);
  637. }
  638. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
  639. if (!res) {
  640. dev_err(dev, "Resource ring_csr not defined\n");
  641. return -ENODEV;
  642. }
  643. pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
  644. if (IS_ERR(pdata->ring_csr_addr)) {
  645. dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
  646. return PTR_ERR(pdata->ring_csr_addr);
  647. }
  648. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
  649. if (!res) {
  650. dev_err(dev, "Resource ring_cmd not defined\n");
  651. return -ENODEV;
  652. }
  653. pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
  654. if (IS_ERR(pdata->ring_cmd_addr)) {
  655. dev_err(dev, "Unable to retrieve ENET Ring command region\n");
  656. return PTR_ERR(pdata->ring_cmd_addr);
  657. }
  658. ret = platform_get_irq(pdev, 0);
  659. if (ret <= 0) {
  660. dev_err(dev, "Unable to get ENET Rx IRQ\n");
  661. ret = ret ? : -ENXIO;
  662. return ret;
  663. }
  664. pdata->rx_irq = ret;
  665. mac = of_get_mac_address(dev->of_node);
  666. if (mac)
  667. memcpy(ndev->dev_addr, mac, ndev->addr_len);
  668. else
  669. eth_hw_addr_random(ndev);
  670. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  671. pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
  672. if (pdata->phy_mode < 0) {
  673. dev_err(dev, "Unable to get phy-connection-type\n");
  674. return pdata->phy_mode;
  675. }
  676. if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
  677. pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
  678. pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
  679. dev_err(dev, "Incorrect phy-connection-type specified\n");
  680. return -ENODEV;
  681. }
  682. pdata->clk = devm_clk_get(&pdev->dev, NULL);
  683. ret = IS_ERR(pdata->clk);
  684. if (IS_ERR(pdata->clk)) {
  685. dev_err(&pdev->dev, "can't get clock\n");
  686. ret = PTR_ERR(pdata->clk);
  687. return ret;
  688. }
  689. base_addr = pdata->base_addr;
  690. pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
  691. pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
  692. pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
  693. if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
  694. pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
  695. pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
  696. pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
  697. } else {
  698. pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
  699. pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
  700. }
  701. pdata->rx_buff_cnt = NUM_PKT_BUF;
  702. return 0;
  703. }
  704. static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
  705. {
  706. struct net_device *ndev = pdata->ndev;
  707. struct xgene_enet_desc_ring *buf_pool;
  708. u16 dst_ring_num;
  709. int ret;
  710. pdata->port_ops->reset(pdata);
  711. ret = xgene_enet_create_desc_rings(ndev);
  712. if (ret) {
  713. netdev_err(ndev, "Error in ring configuration\n");
  714. return ret;
  715. }
  716. /* setup buffer pool */
  717. buf_pool = pdata->rx_ring->buf_pool;
  718. xgene_enet_init_bufpool(buf_pool);
  719. ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
  720. if (ret) {
  721. xgene_enet_delete_desc_rings(pdata);
  722. return ret;
  723. }
  724. dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
  725. pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
  726. pdata->mac_ops->init(pdata);
  727. return ret;
  728. }
  729. static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
  730. {
  731. switch (pdata->phy_mode) {
  732. case PHY_INTERFACE_MODE_RGMII:
  733. pdata->mac_ops = &xgene_gmac_ops;
  734. pdata->port_ops = &xgene_gport_ops;
  735. pdata->rm = RM3;
  736. break;
  737. case PHY_INTERFACE_MODE_SGMII:
  738. pdata->mac_ops = &xgene_sgmac_ops;
  739. pdata->port_ops = &xgene_sgport_ops;
  740. pdata->rm = RM1;
  741. break;
  742. default:
  743. pdata->mac_ops = &xgene_xgmac_ops;
  744. pdata->port_ops = &xgene_xgport_ops;
  745. pdata->rm = RM0;
  746. break;
  747. }
  748. }
  749. static int xgene_enet_probe(struct platform_device *pdev)
  750. {
  751. struct net_device *ndev;
  752. struct xgene_enet_pdata *pdata;
  753. struct device *dev = &pdev->dev;
  754. struct napi_struct *napi;
  755. struct xgene_mac_ops *mac_ops;
  756. int ret;
  757. ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
  758. if (!ndev)
  759. return -ENOMEM;
  760. pdata = netdev_priv(ndev);
  761. pdata->pdev = pdev;
  762. pdata->ndev = ndev;
  763. SET_NETDEV_DEV(ndev, dev);
  764. platform_set_drvdata(pdev, pdata);
  765. ndev->netdev_ops = &xgene_ndev_ops;
  766. xgene_enet_set_ethtool_ops(ndev);
  767. ndev->features |= NETIF_F_IP_CSUM |
  768. NETIF_F_GSO |
  769. NETIF_F_GRO;
  770. ret = xgene_enet_get_resources(pdata);
  771. if (ret)
  772. goto err;
  773. xgene_enet_setup_ops(pdata);
  774. ret = register_netdev(ndev);
  775. if (ret) {
  776. netdev_err(ndev, "Failed to register netdev\n");
  777. goto err;
  778. }
  779. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
  780. if (ret) {
  781. netdev_err(ndev, "No usable DMA configuration\n");
  782. goto err;
  783. }
  784. ret = xgene_enet_init_hw(pdata);
  785. if (ret)
  786. goto err;
  787. napi = &pdata->rx_ring->napi;
  788. netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
  789. mac_ops = pdata->mac_ops;
  790. if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
  791. ret = xgene_enet_mdio_config(pdata);
  792. else
  793. INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
  794. return ret;
  795. err:
  796. free_netdev(ndev);
  797. return ret;
  798. }
  799. static int xgene_enet_remove(struct platform_device *pdev)
  800. {
  801. struct xgene_enet_pdata *pdata;
  802. struct xgene_mac_ops *mac_ops;
  803. struct net_device *ndev;
  804. pdata = platform_get_drvdata(pdev);
  805. mac_ops = pdata->mac_ops;
  806. ndev = pdata->ndev;
  807. mac_ops->rx_disable(pdata);
  808. mac_ops->tx_disable(pdata);
  809. netif_napi_del(&pdata->rx_ring->napi);
  810. xgene_enet_mdio_remove(pdata);
  811. xgene_enet_delete_desc_rings(pdata);
  812. unregister_netdev(ndev);
  813. pdata->port_ops->shutdown(pdata);
  814. free_netdev(ndev);
  815. return 0;
  816. }
  817. static struct of_device_id xgene_enet_match[] = {
  818. {.compatible = "apm,xgene-enet",},
  819. {},
  820. };
  821. MODULE_DEVICE_TABLE(of, xgene_enet_match);
  822. static struct platform_driver xgene_enet_driver = {
  823. .driver = {
  824. .name = "xgene-enet",
  825. .of_match_table = xgene_enet_match,
  826. },
  827. .probe = xgene_enet_probe,
  828. .remove = xgene_enet_remove,
  829. };
  830. module_platform_driver(xgene_enet_driver);
  831. MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
  832. MODULE_VERSION(XGENE_DRV_VERSION);
  833. MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
  834. MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
  835. MODULE_LICENSE("GPL");