main.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /*
  2. * Applied Micro X-Gene SoC Ethernet v2 Driver
  3. *
  4. * Copyright (c) 2017, Applied Micro Circuits Corporation
  5. * Author(s): Iyappan Subramanian <isubramanian@apm.com>
  6. * Keyur Chudgar <kchudgar@apm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include "main.h"
  22. static const struct acpi_device_id xge_acpi_match[];
  23. static int xge_get_resources(struct xge_pdata *pdata)
  24. {
  25. struct platform_device *pdev;
  26. struct net_device *ndev;
  27. int phy_mode, ret = 0;
  28. struct resource *res;
  29. struct device *dev;
  30. pdev = pdata->pdev;
  31. dev = &pdev->dev;
  32. ndev = pdata->ndev;
  33. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  34. if (!res) {
  35. dev_err(dev, "Resource enet_csr not defined\n");
  36. return -ENODEV;
  37. }
  38. pdata->resources.base_addr = devm_ioremap(dev, res->start,
  39. resource_size(res));
  40. if (!pdata->resources.base_addr) {
  41. dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
  42. return -ENOMEM;
  43. }
  44. if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
  45. eth_hw_addr_random(ndev);
  46. memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
  47. phy_mode = device_get_phy_mode(dev);
  48. if (phy_mode < 0) {
  49. dev_err(dev, "Unable to get phy-connection-type\n");
  50. return phy_mode;
  51. }
  52. pdata->resources.phy_mode = phy_mode;
  53. if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
  54. dev_err(dev, "Incorrect phy-connection-type specified\n");
  55. return -ENODEV;
  56. }
  57. ret = platform_get_irq(pdev, 0);
  58. if (ret < 0) {
  59. dev_err(dev, "Unable to get irq\n");
  60. return ret;
  61. }
  62. pdata->resources.irq = ret;
  63. return 0;
  64. }
  65. static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
  66. {
  67. struct xge_pdata *pdata = netdev_priv(ndev);
  68. struct xge_desc_ring *ring = pdata->rx_ring;
  69. const u8 slots = XGENE_ENET_NUM_DESC - 1;
  70. struct device *dev = &pdata->pdev->dev;
  71. struct xge_raw_desc *raw_desc;
  72. u64 addr_lo, addr_hi;
  73. u8 tail = ring->tail;
  74. struct sk_buff *skb;
  75. dma_addr_t dma_addr;
  76. u16 len;
  77. int i;
  78. for (i = 0; i < nbuf; i++) {
  79. raw_desc = &ring->raw_desc[tail];
  80. len = XGENE_ENET_STD_MTU;
  81. skb = netdev_alloc_skb(ndev, len);
  82. if (unlikely(!skb))
  83. return -ENOMEM;
  84. dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
  85. if (dma_mapping_error(dev, dma_addr)) {
  86. netdev_err(ndev, "DMA mapping error\n");
  87. dev_kfree_skb_any(skb);
  88. return -EINVAL;
  89. }
  90. ring->pkt_info[tail].skb = skb;
  91. ring->pkt_info[tail].dma_addr = dma_addr;
  92. addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
  93. addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
  94. raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
  95. SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
  96. SET_BITS(PKT_ADDRH,
  97. upper_32_bits(dma_addr)));
  98. dma_wmb();
  99. raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
  100. SET_BITS(E, 1));
  101. tail = (tail + 1) & slots;
  102. }
  103. ring->tail = tail;
  104. return 0;
  105. }
  106. static int xge_init_hw(struct net_device *ndev)
  107. {
  108. struct xge_pdata *pdata = netdev_priv(ndev);
  109. int ret;
  110. ret = xge_port_reset(ndev);
  111. if (ret)
  112. return ret;
  113. xge_port_init(ndev);
  114. pdata->nbufs = NUM_BUFS;
  115. return 0;
  116. }
  117. static irqreturn_t xge_irq(const int irq, void *data)
  118. {
  119. struct xge_pdata *pdata = data;
  120. if (napi_schedule_prep(&pdata->napi)) {
  121. xge_intr_disable(pdata);
  122. __napi_schedule(&pdata->napi);
  123. }
  124. return IRQ_HANDLED;
  125. }
  126. static int xge_request_irq(struct net_device *ndev)
  127. {
  128. struct xge_pdata *pdata = netdev_priv(ndev);
  129. int ret;
  130. snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
  131. ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
  132. pdata);
  133. if (ret)
  134. netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
  135. return ret;
  136. }
  137. static void xge_free_irq(struct net_device *ndev)
  138. {
  139. struct xge_pdata *pdata = netdev_priv(ndev);
  140. free_irq(pdata->resources.irq, pdata);
  141. }
  142. static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
  143. {
  144. if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
  145. (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
  146. return true;
  147. return false;
  148. }
  149. static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
  150. {
  151. struct xge_pdata *pdata = netdev_priv(ndev);
  152. struct device *dev = &pdata->pdev->dev;
  153. struct xge_desc_ring *tx_ring;
  154. struct xge_raw_desc *raw_desc;
  155. static dma_addr_t dma_addr;
  156. u64 addr_lo, addr_hi;
  157. void *pkt_buf;
  158. u8 tail;
  159. u16 len;
  160. tx_ring = pdata->tx_ring;
  161. tail = tx_ring->tail;
  162. len = skb_headlen(skb);
  163. raw_desc = &tx_ring->raw_desc[tail];
  164. if (!is_tx_slot_available(raw_desc)) {
  165. netif_stop_queue(ndev);
  166. return NETDEV_TX_BUSY;
  167. }
  168. /* Packet buffers should be 64B aligned */
  169. pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
  170. GFP_ATOMIC);
  171. if (unlikely(!pkt_buf)) {
  172. dev_kfree_skb_any(skb);
  173. return NETDEV_TX_OK;
  174. }
  175. memcpy(pkt_buf, skb->data, len);
  176. addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
  177. addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
  178. raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
  179. SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
  180. SET_BITS(PKT_ADDRH,
  181. upper_32_bits(dma_addr)));
  182. tx_ring->pkt_info[tail].skb = skb;
  183. tx_ring->pkt_info[tail].dma_addr = dma_addr;
  184. tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
  185. dma_wmb();
  186. raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
  187. SET_BITS(PKT_SIZE, len) |
  188. SET_BITS(E, 0));
  189. skb_tx_timestamp(skb);
  190. xge_wr_csr(pdata, DMATXCTRL, 1);
  191. tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
  192. return NETDEV_TX_OK;
  193. }
  194. static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
  195. {
  196. if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
  197. !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
  198. return true;
  199. return false;
  200. }
  201. static void xge_txc_poll(struct net_device *ndev)
  202. {
  203. struct xge_pdata *pdata = netdev_priv(ndev);
  204. struct device *dev = &pdata->pdev->dev;
  205. struct xge_desc_ring *tx_ring;
  206. struct xge_raw_desc *raw_desc;
  207. dma_addr_t dma_addr;
  208. struct sk_buff *skb;
  209. void *pkt_buf;
  210. u32 data;
  211. u8 head;
  212. tx_ring = pdata->tx_ring;
  213. head = tx_ring->head;
  214. data = xge_rd_csr(pdata, DMATXSTATUS);
  215. if (!GET_BITS(TXPKTCOUNT, data))
  216. return;
  217. while (1) {
  218. raw_desc = &tx_ring->raw_desc[head];
  219. if (!is_tx_hw_done(raw_desc))
  220. break;
  221. dma_rmb();
  222. skb = tx_ring->pkt_info[head].skb;
  223. dma_addr = tx_ring->pkt_info[head].dma_addr;
  224. pkt_buf = tx_ring->pkt_info[head].pkt_buf;
  225. pdata->stats.tx_packets++;
  226. pdata->stats.tx_bytes += skb->len;
  227. dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
  228. dev_kfree_skb_any(skb);
  229. /* clear pktstart address and pktsize */
  230. raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
  231. SET_BITS(PKT_SIZE, SLOT_EMPTY));
  232. xge_wr_csr(pdata, DMATXSTATUS, 1);
  233. head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
  234. }
  235. if (netif_queue_stopped(ndev))
  236. netif_wake_queue(ndev);
  237. tx_ring->head = head;
  238. }
  239. static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
  240. {
  241. struct xge_pdata *pdata = netdev_priv(ndev);
  242. struct device *dev = &pdata->pdev->dev;
  243. struct xge_desc_ring *rx_ring;
  244. struct xge_raw_desc *raw_desc;
  245. struct sk_buff *skb;
  246. dma_addr_t dma_addr;
  247. int processed = 0;
  248. u8 head, rx_error;
  249. int i, ret;
  250. u32 data;
  251. u16 len;
  252. rx_ring = pdata->rx_ring;
  253. head = rx_ring->head;
  254. data = xge_rd_csr(pdata, DMARXSTATUS);
  255. if (!GET_BITS(RXPKTCOUNT, data))
  256. return 0;
  257. for (i = 0; i < budget; i++) {
  258. raw_desc = &rx_ring->raw_desc[head];
  259. if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
  260. break;
  261. dma_rmb();
  262. skb = rx_ring->pkt_info[head].skb;
  263. rx_ring->pkt_info[head].skb = NULL;
  264. dma_addr = rx_ring->pkt_info[head].dma_addr;
  265. len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
  266. dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
  267. DMA_FROM_DEVICE);
  268. rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
  269. if (unlikely(rx_error)) {
  270. pdata->stats.rx_errors++;
  271. dev_kfree_skb_any(skb);
  272. goto out;
  273. }
  274. skb_put(skb, len);
  275. skb->protocol = eth_type_trans(skb, ndev);
  276. pdata->stats.rx_packets++;
  277. pdata->stats.rx_bytes += len;
  278. napi_gro_receive(&pdata->napi, skb);
  279. out:
  280. ret = xge_refill_buffers(ndev, 1);
  281. xge_wr_csr(pdata, DMARXSTATUS, 1);
  282. xge_wr_csr(pdata, DMARXCTRL, 1);
  283. if (ret)
  284. break;
  285. head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
  286. processed++;
  287. }
  288. rx_ring->head = head;
  289. return processed;
  290. }
  291. static void xge_delete_desc_ring(struct net_device *ndev,
  292. struct xge_desc_ring *ring)
  293. {
  294. struct xge_pdata *pdata = netdev_priv(ndev);
  295. struct device *dev = &pdata->pdev->dev;
  296. u16 size;
  297. if (!ring)
  298. return;
  299. size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
  300. if (ring->desc_addr)
  301. dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
  302. kfree(ring->pkt_info);
  303. kfree(ring);
  304. }
  305. static void xge_free_buffers(struct net_device *ndev)
  306. {
  307. struct xge_pdata *pdata = netdev_priv(ndev);
  308. struct xge_desc_ring *ring = pdata->rx_ring;
  309. struct device *dev = &pdata->pdev->dev;
  310. struct sk_buff *skb;
  311. dma_addr_t dma_addr;
  312. int i;
  313. for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
  314. skb = ring->pkt_info[i].skb;
  315. dma_addr = ring->pkt_info[i].dma_addr;
  316. if (!skb)
  317. continue;
  318. dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
  319. DMA_FROM_DEVICE);
  320. dev_kfree_skb_any(skb);
  321. }
  322. }
  323. static void xge_delete_desc_rings(struct net_device *ndev)
  324. {
  325. struct xge_pdata *pdata = netdev_priv(ndev);
  326. xge_txc_poll(ndev);
  327. xge_delete_desc_ring(ndev, pdata->tx_ring);
  328. xge_rx_poll(ndev, 64);
  329. xge_free_buffers(ndev);
  330. xge_delete_desc_ring(ndev, pdata->rx_ring);
  331. }
  332. static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
  333. {
  334. struct xge_pdata *pdata = netdev_priv(ndev);
  335. struct device *dev = &pdata->pdev->dev;
  336. struct xge_desc_ring *ring;
  337. u16 size;
  338. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  339. if (!ring)
  340. return NULL;
  341. ring->ndev = ndev;
  342. size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
  343. ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
  344. GFP_KERNEL);
  345. if (!ring->desc_addr)
  346. goto err;
  347. ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
  348. GFP_KERNEL);
  349. if (!ring->pkt_info)
  350. goto err;
  351. xge_setup_desc(ring);
  352. return ring;
  353. err:
  354. xge_delete_desc_ring(ndev, ring);
  355. return NULL;
  356. }
  357. static int xge_create_desc_rings(struct net_device *ndev)
  358. {
  359. struct xge_pdata *pdata = netdev_priv(ndev);
  360. struct xge_desc_ring *ring;
  361. int ret;
  362. /* create tx ring */
  363. ring = xge_create_desc_ring(ndev);
  364. if (!ring)
  365. goto err;
  366. pdata->tx_ring = ring;
  367. xge_update_tx_desc_addr(pdata);
  368. /* create rx ring */
  369. ring = xge_create_desc_ring(ndev);
  370. if (!ring)
  371. goto err;
  372. pdata->rx_ring = ring;
  373. xge_update_rx_desc_addr(pdata);
  374. ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
  375. if (ret)
  376. goto err;
  377. return 0;
  378. err:
  379. xge_delete_desc_rings(ndev);
  380. return -ENOMEM;
  381. }
  382. static int xge_open(struct net_device *ndev)
  383. {
  384. struct xge_pdata *pdata = netdev_priv(ndev);
  385. int ret;
  386. ret = xge_create_desc_rings(ndev);
  387. if (ret)
  388. return ret;
  389. napi_enable(&pdata->napi);
  390. ret = xge_request_irq(ndev);
  391. if (ret)
  392. return ret;
  393. xge_intr_enable(pdata);
  394. xge_wr_csr(pdata, DMARXCTRL, 1);
  395. phy_start(ndev->phydev);
  396. xge_mac_enable(pdata);
  397. netif_start_queue(ndev);
  398. return 0;
  399. }
  400. static int xge_close(struct net_device *ndev)
  401. {
  402. struct xge_pdata *pdata = netdev_priv(ndev);
  403. netif_stop_queue(ndev);
  404. xge_mac_disable(pdata);
  405. phy_stop(ndev->phydev);
  406. xge_intr_disable(pdata);
  407. xge_free_irq(ndev);
  408. napi_disable(&pdata->napi);
  409. xge_delete_desc_rings(ndev);
  410. return 0;
  411. }
  412. static int xge_napi(struct napi_struct *napi, const int budget)
  413. {
  414. struct net_device *ndev = napi->dev;
  415. struct xge_pdata *pdata;
  416. int processed;
  417. pdata = netdev_priv(ndev);
  418. xge_txc_poll(ndev);
  419. processed = xge_rx_poll(ndev, budget);
  420. if (processed < budget) {
  421. napi_complete_done(napi, processed);
  422. xge_intr_enable(pdata);
  423. }
  424. return processed;
  425. }
  426. static int xge_set_mac_addr(struct net_device *ndev, void *addr)
  427. {
  428. struct xge_pdata *pdata = netdev_priv(ndev);
  429. int ret;
  430. ret = eth_mac_addr(ndev, addr);
  431. if (ret)
  432. return ret;
  433. xge_mac_set_station_addr(pdata);
  434. return 0;
  435. }
  436. static bool is_tx_pending(struct xge_raw_desc *raw_desc)
  437. {
  438. if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
  439. return true;
  440. return false;
  441. }
  442. static void xge_free_pending_skb(struct net_device *ndev)
  443. {
  444. struct xge_pdata *pdata = netdev_priv(ndev);
  445. struct device *dev = &pdata->pdev->dev;
  446. struct xge_desc_ring *tx_ring;
  447. struct xge_raw_desc *raw_desc;
  448. dma_addr_t dma_addr;
  449. struct sk_buff *skb;
  450. void *pkt_buf;
  451. int i;
  452. tx_ring = pdata->tx_ring;
  453. for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
  454. raw_desc = &tx_ring->raw_desc[i];
  455. if (!is_tx_pending(raw_desc))
  456. continue;
  457. skb = tx_ring->pkt_info[i].skb;
  458. dma_addr = tx_ring->pkt_info[i].dma_addr;
  459. pkt_buf = tx_ring->pkt_info[i].pkt_buf;
  460. dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
  461. dev_kfree_skb_any(skb);
  462. }
  463. }
  464. static void xge_timeout(struct net_device *ndev)
  465. {
  466. struct xge_pdata *pdata = netdev_priv(ndev);
  467. rtnl_lock();
  468. if (!netif_running(ndev))
  469. goto out;
  470. netif_stop_queue(ndev);
  471. xge_intr_disable(pdata);
  472. napi_disable(&pdata->napi);
  473. xge_wr_csr(pdata, DMATXCTRL, 0);
  474. xge_txc_poll(ndev);
  475. xge_free_pending_skb(ndev);
  476. xge_wr_csr(pdata, DMATXSTATUS, ~0U);
  477. xge_setup_desc(pdata->tx_ring);
  478. xge_update_tx_desc_addr(pdata);
  479. xge_mac_init(pdata);
  480. napi_enable(&pdata->napi);
  481. xge_intr_enable(pdata);
  482. xge_mac_enable(pdata);
  483. netif_start_queue(ndev);
  484. out:
  485. rtnl_unlock();
  486. }
  487. static void xge_get_stats64(struct net_device *ndev,
  488. struct rtnl_link_stats64 *storage)
  489. {
  490. struct xge_pdata *pdata = netdev_priv(ndev);
  491. struct xge_stats *stats = &pdata->stats;
  492. storage->tx_packets += stats->tx_packets;
  493. storage->tx_bytes += stats->tx_bytes;
  494. storage->rx_packets += stats->rx_packets;
  495. storage->rx_bytes += stats->rx_bytes;
  496. storage->rx_errors += stats->rx_errors;
  497. }
  498. static const struct net_device_ops xgene_ndev_ops = {
  499. .ndo_open = xge_open,
  500. .ndo_stop = xge_close,
  501. .ndo_start_xmit = xge_start_xmit,
  502. .ndo_set_mac_address = xge_set_mac_addr,
  503. .ndo_tx_timeout = xge_timeout,
  504. .ndo_get_stats64 = xge_get_stats64,
  505. };
  506. static int xge_probe(struct platform_device *pdev)
  507. {
  508. struct device *dev = &pdev->dev;
  509. struct net_device *ndev;
  510. struct xge_pdata *pdata;
  511. int ret;
  512. ndev = alloc_etherdev(sizeof(*pdata));
  513. if (!ndev)
  514. return -ENOMEM;
  515. pdata = netdev_priv(ndev);
  516. pdata->pdev = pdev;
  517. pdata->ndev = ndev;
  518. SET_NETDEV_DEV(ndev, dev);
  519. platform_set_drvdata(pdev, pdata);
  520. ndev->netdev_ops = &xgene_ndev_ops;
  521. ndev->features |= NETIF_F_GSO |
  522. NETIF_F_GRO;
  523. ret = xge_get_resources(pdata);
  524. if (ret)
  525. goto err;
  526. ndev->hw_features = ndev->features;
  527. xge_set_ethtool_ops(ndev);
  528. ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
  529. if (ret) {
  530. netdev_err(ndev, "No usable DMA configuration\n");
  531. goto err;
  532. }
  533. ret = xge_init_hw(ndev);
  534. if (ret)
  535. goto err;
  536. ret = xge_mdio_config(ndev);
  537. if (ret)
  538. goto err;
  539. netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
  540. ret = register_netdev(ndev);
  541. if (ret) {
  542. netdev_err(ndev, "Failed to register netdev\n");
  543. goto err;
  544. }
  545. return 0;
  546. err:
  547. free_netdev(ndev);
  548. return ret;
  549. }
  550. static int xge_remove(struct platform_device *pdev)
  551. {
  552. struct xge_pdata *pdata;
  553. struct net_device *ndev;
  554. pdata = platform_get_drvdata(pdev);
  555. ndev = pdata->ndev;
  556. rtnl_lock();
  557. if (netif_running(ndev))
  558. dev_close(ndev);
  559. rtnl_unlock();
  560. xge_mdio_remove(ndev);
  561. unregister_netdev(ndev);
  562. free_netdev(ndev);
  563. return 0;
  564. }
  565. static void xge_shutdown(struct platform_device *pdev)
  566. {
  567. struct xge_pdata *pdata;
  568. pdata = platform_get_drvdata(pdev);
  569. if (!pdata)
  570. return;
  571. if (!pdata->ndev)
  572. return;
  573. xge_remove(pdev);
  574. }
  575. static const struct acpi_device_id xge_acpi_match[] = {
  576. { "APMC0D80" },
  577. { }
  578. };
  579. MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
  580. static struct platform_driver xge_driver = {
  581. .driver = {
  582. .name = "xgene-enet-v2",
  583. .acpi_match_table = ACPI_PTR(xge_acpi_match),
  584. },
  585. .probe = xge_probe,
  586. .remove = xge_remove,
  587. .shutdown = xge_shutdown,
  588. };
  589. module_platform_driver(xge_driver);
  590. MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
  591. MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
  592. MODULE_VERSION(XGENE_ENET_V2_VERSION);
  593. MODULE_LICENSE("GPL");