lio_vf_rep.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2017 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. #include <linux/pci.h>
  19. #include <linux/if_vlan.h>
  20. #include "liquidio_common.h"
  21. #include "octeon_droq.h"
  22. #include "octeon_iq.h"
  23. #include "response_manager.h"
  24. #include "octeon_device.h"
  25. #include "octeon_nic.h"
  26. #include "octeon_main.h"
  27. #include "octeon_network.h"
  28. #include <net/switchdev.h>
  29. #include "lio_vf_rep.h"
  30. #include "octeon_network.h"
  31. static int lio_vf_rep_open(struct net_device *ndev);
  32. static int lio_vf_rep_stop(struct net_device *ndev);
  33. static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev);
  34. static void lio_vf_rep_tx_timeout(struct net_device *netdev);
  35. static int lio_vf_rep_phys_port_name(struct net_device *dev,
  36. char *buf, size_t len);
  37. static void lio_vf_rep_get_stats64(struct net_device *dev,
  38. struct rtnl_link_stats64 *stats64);
  39. static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
  40. static const struct net_device_ops lio_vf_rep_ndev_ops = {
  41. .ndo_open = lio_vf_rep_open,
  42. .ndo_stop = lio_vf_rep_stop,
  43. .ndo_start_xmit = lio_vf_rep_pkt_xmit,
  44. .ndo_tx_timeout = lio_vf_rep_tx_timeout,
  45. .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
  46. .ndo_get_stats64 = lio_vf_rep_get_stats64,
  47. .ndo_change_mtu = lio_vf_rep_change_mtu,
  48. };
  49. static void
  50. lio_vf_rep_send_sc_complete(struct octeon_device *oct,
  51. u32 status, void *ptr)
  52. {
  53. struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
  54. struct lio_vf_rep_sc_ctx *ctx =
  55. (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
  56. struct lio_vf_rep_resp *resp =
  57. (struct lio_vf_rep_resp *)sc->virtrptr;
  58. if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
  59. WRITE_ONCE(resp->status, 0);
  60. complete(&ctx->complete);
  61. }
  62. static int
  63. lio_vf_rep_send_soft_command(struct octeon_device *oct,
  64. void *req, int req_size,
  65. void *resp, int resp_size)
  66. {
  67. int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
  68. int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
  69. struct octeon_soft_command *sc = NULL;
  70. struct lio_vf_rep_resp *rep_resp;
  71. struct lio_vf_rep_sc_ctx *ctx;
  72. void *sc_req;
  73. int err;
  74. sc = (struct octeon_soft_command *)
  75. octeon_alloc_soft_command(oct, req_size,
  76. tot_resp_size, ctx_size);
  77. if (!sc)
  78. return -ENOMEM;
  79. ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
  80. memset(ctx, 0, ctx_size);
  81. init_completion(&ctx->complete);
  82. sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
  83. memcpy(sc_req, req, req_size);
  84. rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
  85. memset(rep_resp, 0, tot_resp_size);
  86. WRITE_ONCE(rep_resp->status, 1);
  87. sc->iq_no = 0;
  88. octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
  89. OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
  90. sc->callback = lio_vf_rep_send_sc_complete;
  91. sc->callback_arg = sc;
  92. sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
  93. err = octeon_send_soft_command(oct, sc);
  94. if (err == IQ_SEND_FAILED)
  95. goto free_buff;
  96. wait_for_completion_timeout(&ctx->complete,
  97. msecs_to_jiffies
  98. (2 * LIO_VF_REP_REQ_TMO_MS));
  99. err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
  100. if (err)
  101. dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
  102. if (resp)
  103. memcpy(resp, (rep_resp + 1), resp_size);
  104. free_buff:
  105. octeon_free_soft_command(oct, sc);
  106. return err;
  107. }
  108. static int
  109. lio_vf_rep_open(struct net_device *ndev)
  110. {
  111. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  112. struct lio_vf_rep_req rep_cfg;
  113. struct octeon_device *oct;
  114. int ret;
  115. oct = vf_rep->oct;
  116. memset(&rep_cfg, 0, sizeof(rep_cfg));
  117. rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
  118. rep_cfg.ifidx = vf_rep->ifidx;
  119. rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
  120. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  121. sizeof(rep_cfg), NULL, 0);
  122. if (ret) {
  123. dev_err(&oct->pci_dev->dev,
  124. "VF_REP open failed with err %d\n", ret);
  125. return -EIO;
  126. }
  127. atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
  128. LIO_IFSTATE_RUNNING));
  129. netif_carrier_on(ndev);
  130. netif_start_queue(ndev);
  131. return 0;
  132. }
  133. static int
  134. lio_vf_rep_stop(struct net_device *ndev)
  135. {
  136. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  137. struct lio_vf_rep_req rep_cfg;
  138. struct octeon_device *oct;
  139. int ret;
  140. oct = vf_rep->oct;
  141. memset(&rep_cfg, 0, sizeof(rep_cfg));
  142. rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
  143. rep_cfg.ifidx = vf_rep->ifidx;
  144. rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
  145. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  146. sizeof(rep_cfg), NULL, 0);
  147. if (ret) {
  148. dev_err(&oct->pci_dev->dev,
  149. "VF_REP dev stop failed with err %d\n", ret);
  150. return -EIO;
  151. }
  152. atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
  153. ~LIO_IFSTATE_RUNNING));
  154. netif_tx_disable(ndev);
  155. netif_carrier_off(ndev);
  156. return 0;
  157. }
  158. static void
  159. lio_vf_rep_tx_timeout(struct net_device *ndev)
  160. {
  161. netif_trans_update(ndev);
  162. netif_wake_queue(ndev);
  163. }
  164. static void
  165. lio_vf_rep_get_stats64(struct net_device *dev,
  166. struct rtnl_link_stats64 *stats64)
  167. {
  168. struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
  169. /* Swap tx and rx stats as VF rep is a switch port */
  170. stats64->tx_packets = vf_rep->stats.rx_packets;
  171. stats64->tx_bytes = vf_rep->stats.rx_bytes;
  172. stats64->tx_dropped = vf_rep->stats.rx_dropped;
  173. stats64->rx_packets = vf_rep->stats.tx_packets;
  174. stats64->rx_bytes = vf_rep->stats.tx_bytes;
  175. stats64->rx_dropped = vf_rep->stats.tx_dropped;
  176. }
  177. static int
  178. lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
  179. {
  180. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  181. struct lio_vf_rep_req rep_cfg;
  182. struct octeon_device *oct;
  183. int ret;
  184. oct = vf_rep->oct;
  185. memset(&rep_cfg, 0, sizeof(rep_cfg));
  186. rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
  187. rep_cfg.ifidx = vf_rep->ifidx;
  188. rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
  189. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  190. sizeof(rep_cfg), NULL, 0);
  191. if (ret) {
  192. dev_err(&oct->pci_dev->dev,
  193. "Change MTU failed with err %d\n", ret);
  194. return -EIO;
  195. }
  196. ndev->mtu = new_mtu;
  197. return 0;
  198. }
  199. static int
  200. lio_vf_rep_phys_port_name(struct net_device *dev,
  201. char *buf, size_t len)
  202. {
  203. struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
  204. struct octeon_device *oct = vf_rep->oct;
  205. int ret;
  206. ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
  207. vf_rep->ifidx - oct->pf_num * 64 - 1);
  208. if (ret >= len)
  209. return -EOPNOTSUPP;
  210. return 0;
  211. }
  212. static struct net_device *
  213. lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
  214. {
  215. int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
  216. int vfid_mask = max_vfs - 1;
  217. if (ifidx <= oct->pf_num * max_vfs ||
  218. ifidx >= oct->pf_num * max_vfs + max_vfs)
  219. return NULL;
  220. /* ifidx 1-63 for PF0 VFs
  221. * ifidx 65-127 for PF1 VFs
  222. */
  223. vf_id = (ifidx & vfid_mask) - 1;
  224. return oct->vf_rep_list.ndev[vf_id];
  225. }
  226. static void
  227. lio_vf_rep_copy_packet(struct octeon_device *oct,
  228. struct sk_buff *skb,
  229. int len)
  230. {
  231. if (likely(len > MIN_SKB_SIZE)) {
  232. struct octeon_skb_page_info *pg_info;
  233. unsigned char *va;
  234. pg_info = ((struct octeon_skb_page_info *)(skb->cb));
  235. if (pg_info->page) {
  236. va = page_address(pg_info->page) +
  237. pg_info->page_offset;
  238. memcpy(skb->data, va, MIN_SKB_SIZE);
  239. skb_put(skb, MIN_SKB_SIZE);
  240. }
  241. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  242. pg_info->page,
  243. pg_info->page_offset + MIN_SKB_SIZE,
  244. len - MIN_SKB_SIZE,
  245. LIO_RXBUFFER_SZ);
  246. } else {
  247. struct octeon_skb_page_info *pg_info =
  248. ((struct octeon_skb_page_info *)(skb->cb));
  249. skb_copy_to_linear_data(skb, page_address(pg_info->page) +
  250. pg_info->page_offset, len);
  251. skb_put(skb, len);
  252. put_page(pg_info->page);
  253. }
  254. }
  255. static int
  256. lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
  257. {
  258. struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
  259. struct lio_vf_rep_desc *vf_rep;
  260. struct net_device *vf_ndev;
  261. struct octeon_device *oct;
  262. union octeon_rh *rh;
  263. struct sk_buff *skb;
  264. int i, ifidx;
  265. oct = lio_get_device(recv_pkt->octeon_id);
  266. if (!oct)
  267. goto free_buffers;
  268. skb = recv_pkt->buffer_ptr[0];
  269. rh = &recv_pkt->rh;
  270. ifidx = rh->r.ossp;
  271. vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
  272. if (!vf_ndev)
  273. goto free_buffers;
  274. vf_rep = netdev_priv(vf_ndev);
  275. if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
  276. recv_pkt->buffer_count > 1)
  277. goto free_buffers;
  278. skb->dev = vf_ndev;
  279. /* Multiple buffers are not used for vf_rep packets.
  280. * So just buffer_size[0] is valid.
  281. */
  282. lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
  283. skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
  284. skb->protocol = eth_type_trans(skb, skb->dev);
  285. skb->ip_summed = CHECKSUM_NONE;
  286. netif_rx(skb);
  287. octeon_free_recv_info(recv_info);
  288. return 0;
  289. free_buffers:
  290. for (i = 0; i < recv_pkt->buffer_count; i++)
  291. recv_buffer_free(recv_pkt->buffer_ptr[i]);
  292. octeon_free_recv_info(recv_info);
  293. return 0;
  294. }
  295. static void
  296. lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
  297. u32 status, void *buf)
  298. {
  299. struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
  300. struct sk_buff *skb = sc->ctxptr;
  301. struct net_device *ndev = skb->dev;
  302. dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
  303. sc->datasize, DMA_TO_DEVICE);
  304. dev_kfree_skb_any(skb);
  305. octeon_free_soft_command(oct, sc);
  306. if (octnet_iq_is_full(oct, sc->iq_no))
  307. return;
  308. if (netif_queue_stopped(ndev))
  309. netif_wake_queue(ndev);
  310. }
  311. static int
  312. lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
  313. {
  314. struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
  315. struct net_device *parent_ndev = vf_rep->parent_ndev;
  316. struct octeon_device *oct = vf_rep->oct;
  317. struct octeon_instr_pki_ih3 *pki_ih3;
  318. struct octeon_soft_command *sc;
  319. struct lio *parent_lio;
  320. int status;
  321. parent_lio = GET_LIO(parent_ndev);
  322. if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
  323. skb->len <= 0)
  324. goto xmit_failed;
  325. if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
  326. dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
  327. netif_stop_queue(ndev);
  328. return NETDEV_TX_BUSY;
  329. }
  330. sc = (struct octeon_soft_command *)
  331. octeon_alloc_soft_command(oct, 0, 0, 0);
  332. if (!sc) {
  333. dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
  334. goto xmit_failed;
  335. }
  336. /* Multiple buffers are not used for vf_rep packets. */
  337. if (skb_shinfo(skb)->nr_frags != 0) {
  338. dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
  339. goto xmit_failed;
  340. }
  341. sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
  342. skb->data, skb->len, DMA_TO_DEVICE);
  343. if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
  344. dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
  345. goto xmit_failed;
  346. }
  347. sc->virtdptr = skb->data;
  348. sc->datasize = skb->len;
  349. sc->ctxptr = skb;
  350. sc->iq_no = parent_lio->txq;
  351. octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
  352. vf_rep->ifidx, 0, 0);
  353. pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
  354. pki_ih3->tagtype = ORDERED_TAG;
  355. sc->callback = lio_vf_rep_packet_sent_callback;
  356. sc->callback_arg = sc;
  357. status = octeon_send_soft_command(oct, sc);
  358. if (status == IQ_SEND_FAILED) {
  359. dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
  360. sc->datasize, DMA_TO_DEVICE);
  361. goto xmit_failed;
  362. }
  363. if (status == IQ_SEND_STOP)
  364. netif_stop_queue(ndev);
  365. netif_trans_update(ndev);
  366. return NETDEV_TX_OK;
  367. xmit_failed:
  368. dev_kfree_skb_any(skb);
  369. return NETDEV_TX_OK;
  370. }
  371. static int
  372. lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
  373. {
  374. struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
  375. struct net_device *parent_ndev = vf_rep->parent_ndev;
  376. struct lio *lio = GET_LIO(parent_ndev);
  377. switch (attr->id) {
  378. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  379. attr->u.ppid.id_len = ETH_ALEN;
  380. ether_addr_copy(attr->u.ppid.id,
  381. (void *)&lio->linfo.hw_addr + 2);
  382. break;
  383. default:
  384. return -EOPNOTSUPP;
  385. }
  386. return 0;
  387. }
  388. static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
  389. .switchdev_port_attr_get = lio_vf_rep_attr_get,
  390. };
  391. static void
  392. lio_vf_rep_fetch_stats(struct work_struct *work)
  393. {
  394. struct cavium_wk *wk = (struct cavium_wk *)work;
  395. struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
  396. struct lio_vf_rep_stats stats;
  397. struct lio_vf_rep_req rep_cfg;
  398. struct octeon_device *oct;
  399. int ret;
  400. oct = vf_rep->oct;
  401. memset(&rep_cfg, 0, sizeof(rep_cfg));
  402. rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
  403. rep_cfg.ifidx = vf_rep->ifidx;
  404. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
  405. &stats, sizeof(stats));
  406. if (!ret) {
  407. octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
  408. memcpy(&vf_rep->stats, &stats, sizeof(stats));
  409. }
  410. schedule_delayed_work(&vf_rep->stats_wk.work,
  411. msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
  412. }
  413. int
  414. lio_vf_rep_create(struct octeon_device *oct)
  415. {
  416. struct lio_vf_rep_desc *vf_rep;
  417. struct net_device *ndev;
  418. int i, num_vfs;
  419. if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  420. return 0;
  421. if (!oct->sriov_info.sriov_enabled)
  422. return 0;
  423. num_vfs = oct->sriov_info.num_vfs_alloced;
  424. oct->vf_rep_list.num_vfs = 0;
  425. for (i = 0; i < num_vfs; i++) {
  426. ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
  427. if (!ndev) {
  428. dev_err(&oct->pci_dev->dev,
  429. "VF rep device %d creation failed\n", i);
  430. goto cleanup;
  431. }
  432. ndev->min_mtu = LIO_MIN_MTU_SIZE;
  433. ndev->max_mtu = LIO_MAX_MTU_SIZE;
  434. ndev->netdev_ops = &lio_vf_rep_ndev_ops;
  435. SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
  436. vf_rep = netdev_priv(ndev);
  437. memset(vf_rep, 0, sizeof(*vf_rep));
  438. vf_rep->ndev = ndev;
  439. vf_rep->oct = oct;
  440. vf_rep->parent_ndev = oct->props[0].netdev;
  441. vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
  442. eth_hw_addr_random(ndev);
  443. if (register_netdev(ndev)) {
  444. dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
  445. free_netdev(ndev);
  446. goto cleanup;
  447. }
  448. netif_carrier_off(ndev);
  449. INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
  450. lio_vf_rep_fetch_stats);
  451. vf_rep->stats_wk.ctxptr = (void *)vf_rep;
  452. schedule_delayed_work(&vf_rep->stats_wk.work,
  453. msecs_to_jiffies
  454. (LIO_VF_REP_STATS_POLL_TIME_MS));
  455. oct->vf_rep_list.num_vfs++;
  456. oct->vf_rep_list.ndev[i] = ndev;
  457. }
  458. if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
  459. OPCODE_NIC_VF_REP_PKT,
  460. lio_vf_rep_pkt_recv, oct)) {
  461. dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
  462. goto cleanup;
  463. }
  464. return 0;
  465. cleanup:
  466. for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
  467. ndev = oct->vf_rep_list.ndev[i];
  468. oct->vf_rep_list.ndev[i] = NULL;
  469. if (ndev) {
  470. vf_rep = netdev_priv(ndev);
  471. cancel_delayed_work_sync
  472. (&vf_rep->stats_wk.work);
  473. unregister_netdev(ndev);
  474. free_netdev(ndev);
  475. }
  476. }
  477. oct->vf_rep_list.num_vfs = 0;
  478. return -1;
  479. }
  480. void
  481. lio_vf_rep_destroy(struct octeon_device *oct)
  482. {
  483. struct lio_vf_rep_desc *vf_rep;
  484. struct net_device *ndev;
  485. int i;
  486. if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  487. return;
  488. if (!oct->sriov_info.sriov_enabled)
  489. return;
  490. for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
  491. ndev = oct->vf_rep_list.ndev[i];
  492. oct->vf_rep_list.ndev[i] = NULL;
  493. if (ndev) {
  494. vf_rep = netdev_priv(ndev);
  495. cancel_delayed_work_sync
  496. (&vf_rep->stats_wk.work);
  497. netif_tx_disable(ndev);
  498. netif_carrier_off(ndev);
  499. unregister_netdev(ndev);
  500. free_netdev(ndev);
  501. }
  502. }
  503. oct->vf_rep_list.num_vfs = 0;
  504. }
  505. static int
  506. lio_vf_rep_netdev_event(struct notifier_block *nb,
  507. unsigned long event, void *ptr)
  508. {
  509. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  510. struct lio_vf_rep_desc *vf_rep;
  511. struct lio_vf_rep_req rep_cfg;
  512. struct octeon_device *oct;
  513. int ret;
  514. switch (event) {
  515. case NETDEV_REGISTER:
  516. case NETDEV_CHANGENAME:
  517. break;
  518. default:
  519. return NOTIFY_DONE;
  520. }
  521. if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
  522. return NOTIFY_DONE;
  523. vf_rep = netdev_priv(ndev);
  524. oct = vf_rep->oct;
  525. if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
  526. dev_err(&oct->pci_dev->dev,
  527. "Device name change sync failed as the size is > %d\n",
  528. LIO_IF_NAME_SIZE);
  529. return NOTIFY_DONE;
  530. }
  531. memset(&rep_cfg, 0, sizeof(rep_cfg));
  532. rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
  533. rep_cfg.ifidx = vf_rep->ifidx;
  534. strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
  535. ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
  536. sizeof(rep_cfg), NULL, 0);
  537. if (ret)
  538. dev_err(&oct->pci_dev->dev,
  539. "vf_rep netdev name change failed with err %d\n", ret);
  540. return NOTIFY_DONE;
  541. }
  542. static struct notifier_block lio_vf_rep_netdev_notifier = {
  543. .notifier_call = lio_vf_rep_netdev_event,
  544. };
  545. int
  546. lio_vf_rep_modinit(void)
  547. {
  548. if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
  549. pr_err("netdev notifier registration failed\n");
  550. return -EFAULT;
  551. }
  552. return 0;
  553. }
  554. void
  555. lio_vf_rep_modexit(void)
  556. {
  557. if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
  558. pr_err("netdev notifier unregister failed\n");
  559. }