i40evf_virtchnl.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include "i40evf.h"
  4. #include "i40e_prototype.h"
  5. #include "i40evf_client.h"
  6. /* busy wait delay in msec */
  7. #define I40EVF_BUSY_WAIT_DELAY 10
  8. #define I40EVF_BUSY_WAIT_COUNT 50
  9. /**
  10. * i40evf_send_pf_msg
  11. * @adapter: adapter structure
  12. * @op: virtual channel opcode
  13. * @msg: pointer to message buffer
  14. * @len: message length
  15. *
  16. * Send message to PF and print status if failure.
  17. **/
  18. static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
  19. enum virtchnl_ops op, u8 *msg, u16 len)
  20. {
  21. struct i40e_hw *hw = &adapter->hw;
  22. i40e_status err;
  23. if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
  24. return 0; /* nothing to see here, move along */
  25. err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
  26. if (err)
  27. dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
  28. op, i40evf_stat_str(hw, err),
  29. i40evf_aq_str(hw, hw->aq.asq_last_status));
  30. return err;
  31. }
  32. /**
  33. * i40evf_send_api_ver
  34. * @adapter: adapter structure
  35. *
  36. * Send API version admin queue message to the PF. The reply is not checked
  37. * in this function. Returns 0 if the message was successfully
  38. * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
  39. **/
  40. int i40evf_send_api_ver(struct i40evf_adapter *adapter)
  41. {
  42. struct virtchnl_version_info vvi;
  43. vvi.major = VIRTCHNL_VERSION_MAJOR;
  44. vvi.minor = VIRTCHNL_VERSION_MINOR;
  45. return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
  46. sizeof(vvi));
  47. }
  48. /**
  49. * i40evf_verify_api_ver
  50. * @adapter: adapter structure
  51. *
  52. * Compare API versions with the PF. Must be called after admin queue is
  53. * initialized. Returns 0 if API versions match, -EIO if they do not,
  54. * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
  55. * from the firmware are propagated.
  56. **/
  57. int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
  58. {
  59. struct virtchnl_version_info *pf_vvi;
  60. struct i40e_hw *hw = &adapter->hw;
  61. struct i40e_arq_event_info event;
  62. enum virtchnl_ops op;
  63. i40e_status err;
  64. event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
  65. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  66. if (!event.msg_buf) {
  67. err = -ENOMEM;
  68. goto out;
  69. }
  70. while (1) {
  71. err = i40evf_clean_arq_element(hw, &event, NULL);
  72. /* When the AQ is empty, i40evf_clean_arq_element will return
  73. * nonzero and this loop will terminate.
  74. */
  75. if (err)
  76. goto out_alloc;
  77. op =
  78. (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  79. if (op == VIRTCHNL_OP_VERSION)
  80. break;
  81. }
  82. err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
  83. if (err)
  84. goto out_alloc;
  85. if (op != VIRTCHNL_OP_VERSION) {
  86. dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
  87. op);
  88. err = -EIO;
  89. goto out_alloc;
  90. }
  91. pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
  92. adapter->pf_version = *pf_vvi;
  93. if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
  94. ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
  95. (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
  96. err = -EIO;
  97. out_alloc:
  98. kfree(event.msg_buf);
  99. out:
  100. return err;
  101. }
  102. /**
  103. * i40evf_send_vf_config_msg
  104. * @adapter: adapter structure
  105. *
  106. * Send VF configuration request admin queue message to the PF. The reply
  107. * is not checked in this function. Returns 0 if the message was
  108. * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
  109. **/
  110. int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
  111. {
  112. u32 caps;
  113. caps = VIRTCHNL_VF_OFFLOAD_L2 |
  114. VIRTCHNL_VF_OFFLOAD_RSS_PF |
  115. VIRTCHNL_VF_OFFLOAD_RSS_AQ |
  116. VIRTCHNL_VF_OFFLOAD_RSS_REG |
  117. VIRTCHNL_VF_OFFLOAD_VLAN |
  118. VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
  119. VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
  120. VIRTCHNL_VF_OFFLOAD_ENCAP |
  121. VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
  122. VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
  123. VIRTCHNL_VF_OFFLOAD_ADQ;
  124. adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
  125. adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
  126. if (PF_IS_V11(adapter))
  127. return i40evf_send_pf_msg(adapter,
  128. VIRTCHNL_OP_GET_VF_RESOURCES,
  129. (u8 *)&caps, sizeof(caps));
  130. else
  131. return i40evf_send_pf_msg(adapter,
  132. VIRTCHNL_OP_GET_VF_RESOURCES,
  133. NULL, 0);
  134. }
  135. /**
  136. * i40evf_get_vf_config
  137. * @adapter: private adapter structure
  138. *
  139. * Get VF configuration from PF and populate hw structure. Must be called after
  140. * admin queue is initialized. Busy waits until response is received from PF,
  141. * with maximum timeout. Response from PF is returned in the buffer for further
  142. * processing by the caller.
  143. **/
  144. int i40evf_get_vf_config(struct i40evf_adapter *adapter)
  145. {
  146. struct i40e_hw *hw = &adapter->hw;
  147. struct i40e_arq_event_info event;
  148. enum virtchnl_ops op;
  149. i40e_status err;
  150. u16 len;
  151. len = sizeof(struct virtchnl_vf_resource) +
  152. I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
  153. event.buf_len = len;
  154. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  155. if (!event.msg_buf) {
  156. err = -ENOMEM;
  157. goto out;
  158. }
  159. while (1) {
  160. /* When the AQ is empty, i40evf_clean_arq_element will return
  161. * nonzero and this loop will terminate.
  162. */
  163. err = i40evf_clean_arq_element(hw, &event, NULL);
  164. if (err)
  165. goto out_alloc;
  166. op =
  167. (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  168. if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
  169. break;
  170. }
  171. err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
  172. memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
  173. i40e_vf_parse_hw_config(hw, adapter->vf_res);
  174. out_alloc:
  175. kfree(event.msg_buf);
  176. out:
  177. return err;
  178. }
  179. /**
  180. * i40evf_configure_queues
  181. * @adapter: adapter structure
  182. *
  183. * Request that the PF set up our (previously allocated) queues.
  184. **/
  185. void i40evf_configure_queues(struct i40evf_adapter *adapter)
  186. {
  187. struct virtchnl_vsi_queue_config_info *vqci;
  188. struct virtchnl_queue_pair_info *vqpi;
  189. int pairs = adapter->num_active_queues;
  190. int i, len, max_frame = I40E_MAX_RXBUFFER;
  191. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  192. /* bail because we already have a command pending */
  193. dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
  194. adapter->current_op);
  195. return;
  196. }
  197. adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
  198. len = sizeof(struct virtchnl_vsi_queue_config_info) +
  199. (sizeof(struct virtchnl_queue_pair_info) * pairs);
  200. vqci = kzalloc(len, GFP_KERNEL);
  201. if (!vqci)
  202. return;
  203. /* Limit maximum frame size when jumbo frames is not enabled */
  204. if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
  205. (adapter->netdev->mtu <= ETH_DATA_LEN))
  206. max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
  207. vqci->vsi_id = adapter->vsi_res->vsi_id;
  208. vqci->num_queue_pairs = pairs;
  209. vqpi = vqci->qpair;
  210. /* Size check is not needed here - HW max is 16 queue pairs, and we
  211. * can fit info for 31 of them into the AQ buffer before it overflows.
  212. */
  213. for (i = 0; i < pairs; i++) {
  214. vqpi->txq.vsi_id = vqci->vsi_id;
  215. vqpi->txq.queue_id = i;
  216. vqpi->txq.ring_len = adapter->tx_rings[i].count;
  217. vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
  218. vqpi->rxq.vsi_id = vqci->vsi_id;
  219. vqpi->rxq.queue_id = i;
  220. vqpi->rxq.ring_len = adapter->rx_rings[i].count;
  221. vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
  222. vqpi->rxq.max_pkt_size = max_frame;
  223. vqpi->rxq.databuffer_size =
  224. ALIGN(adapter->rx_rings[i].rx_buf_len,
  225. BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
  226. vqpi++;
  227. }
  228. adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
  229. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
  230. (u8 *)vqci, len);
  231. kfree(vqci);
  232. }
  233. /**
  234. * i40evf_enable_queues
  235. * @adapter: adapter structure
  236. *
  237. * Request that the PF enable all of our queues.
  238. **/
  239. void i40evf_enable_queues(struct i40evf_adapter *adapter)
  240. {
  241. struct virtchnl_queue_select vqs;
  242. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  243. /* bail because we already have a command pending */
  244. dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
  245. adapter->current_op);
  246. return;
  247. }
  248. adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
  249. vqs.vsi_id = adapter->vsi_res->vsi_id;
  250. vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
  251. vqs.rx_queues = vqs.tx_queues;
  252. adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
  253. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
  254. (u8 *)&vqs, sizeof(vqs));
  255. }
  256. /**
  257. * i40evf_disable_queues
  258. * @adapter: adapter structure
  259. *
  260. * Request that the PF disable all of our queues.
  261. **/
  262. void i40evf_disable_queues(struct i40evf_adapter *adapter)
  263. {
  264. struct virtchnl_queue_select vqs;
  265. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  266. /* bail because we already have a command pending */
  267. dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
  268. adapter->current_op);
  269. return;
  270. }
  271. adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
  272. vqs.vsi_id = adapter->vsi_res->vsi_id;
  273. vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
  274. vqs.rx_queues = vqs.tx_queues;
  275. adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
  276. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
  277. (u8 *)&vqs, sizeof(vqs));
  278. }
  279. /**
  280. * i40evf_map_queues
  281. * @adapter: adapter structure
  282. *
  283. * Request that the PF map queues to interrupt vectors. Misc causes, including
  284. * admin queue, are always mapped to vector 0.
  285. **/
  286. void i40evf_map_queues(struct i40evf_adapter *adapter)
  287. {
  288. struct virtchnl_irq_map_info *vimi;
  289. struct virtchnl_vector_map *vecmap;
  290. int v_idx, q_vectors, len;
  291. struct i40e_q_vector *q_vector;
  292. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  293. /* bail because we already have a command pending */
  294. dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
  295. adapter->current_op);
  296. return;
  297. }
  298. adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
  299. q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  300. len = sizeof(struct virtchnl_irq_map_info) +
  301. (adapter->num_msix_vectors *
  302. sizeof(struct virtchnl_vector_map));
  303. vimi = kzalloc(len, GFP_KERNEL);
  304. if (!vimi)
  305. return;
  306. vimi->num_vectors = adapter->num_msix_vectors;
  307. /* Queue vectors first */
  308. for (v_idx = 0; v_idx < q_vectors; v_idx++) {
  309. q_vector = &adapter->q_vectors[v_idx];
  310. vecmap = &vimi->vecmap[v_idx];
  311. vecmap->vsi_id = adapter->vsi_res->vsi_id;
  312. vecmap->vector_id = v_idx + NONQ_VECS;
  313. vecmap->txq_map = q_vector->ring_mask;
  314. vecmap->rxq_map = q_vector->ring_mask;
  315. vecmap->rxitr_idx = I40E_RX_ITR;
  316. vecmap->txitr_idx = I40E_TX_ITR;
  317. }
  318. /* Misc vector last - this is only for AdminQ messages */
  319. vecmap = &vimi->vecmap[v_idx];
  320. vecmap->vsi_id = adapter->vsi_res->vsi_id;
  321. vecmap->vector_id = 0;
  322. vecmap->txq_map = 0;
  323. vecmap->rxq_map = 0;
  324. adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
  325. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
  326. (u8 *)vimi, len);
  327. kfree(vimi);
  328. }
  329. /**
  330. * i40evf_request_queues
  331. * @adapter: adapter structure
  332. * @num: number of requested queues
  333. *
  334. * We get a default number of queues from the PF. This enables us to request a
  335. * different number. Returns 0 on success, negative on failure
  336. **/
  337. int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
  338. {
  339. struct virtchnl_vf_res_request vfres;
  340. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  341. /* bail because we already have a command pending */
  342. dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
  343. adapter->current_op);
  344. return -EBUSY;
  345. }
  346. vfres.num_queue_pairs = num;
  347. adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
  348. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  349. return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
  350. (u8 *)&vfres, sizeof(vfres));
  351. }
  352. /**
  353. * i40evf_add_ether_addrs
  354. * @adapter: adapter structure
  355. *
  356. * Request that the PF add one or more addresses to our filters.
  357. **/
  358. void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
  359. {
  360. struct virtchnl_ether_addr_list *veal;
  361. int len, i = 0, count = 0;
  362. struct i40evf_mac_filter *f;
  363. bool more = false;
  364. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  365. /* bail because we already have a command pending */
  366. dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
  367. adapter->current_op);
  368. return;
  369. }
  370. spin_lock_bh(&adapter->mac_vlan_list_lock);
  371. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  372. if (f->add)
  373. count++;
  374. }
  375. if (!count) {
  376. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
  377. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  378. return;
  379. }
  380. adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
  381. len = sizeof(struct virtchnl_ether_addr_list) +
  382. (count * sizeof(struct virtchnl_ether_addr));
  383. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  384. dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
  385. count = (I40EVF_MAX_AQ_BUF_SIZE -
  386. sizeof(struct virtchnl_ether_addr_list)) /
  387. sizeof(struct virtchnl_ether_addr);
  388. len = sizeof(struct virtchnl_ether_addr_list) +
  389. (count * sizeof(struct virtchnl_ether_addr));
  390. more = true;
  391. }
  392. veal = kzalloc(len, GFP_ATOMIC);
  393. if (!veal) {
  394. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  395. return;
  396. }
  397. veal->vsi_id = adapter->vsi_res->vsi_id;
  398. veal->num_elements = count;
  399. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  400. if (f->add) {
  401. ether_addr_copy(veal->list[i].addr, f->macaddr);
  402. i++;
  403. f->add = false;
  404. if (i == count)
  405. break;
  406. }
  407. }
  408. if (!more)
  409. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
  410. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  411. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
  412. (u8 *)veal, len);
  413. kfree(veal);
  414. }
  415. /**
  416. * i40evf_del_ether_addrs
  417. * @adapter: adapter structure
  418. *
  419. * Request that the PF remove one or more addresses from our filters.
  420. **/
  421. void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
  422. {
  423. struct virtchnl_ether_addr_list *veal;
  424. struct i40evf_mac_filter *f, *ftmp;
  425. int len, i = 0, count = 0;
  426. bool more = false;
  427. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  428. /* bail because we already have a command pending */
  429. dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
  430. adapter->current_op);
  431. return;
  432. }
  433. spin_lock_bh(&adapter->mac_vlan_list_lock);
  434. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  435. if (f->remove)
  436. count++;
  437. }
  438. if (!count) {
  439. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  440. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  441. return;
  442. }
  443. adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
  444. len = sizeof(struct virtchnl_ether_addr_list) +
  445. (count * sizeof(struct virtchnl_ether_addr));
  446. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  447. dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
  448. count = (I40EVF_MAX_AQ_BUF_SIZE -
  449. sizeof(struct virtchnl_ether_addr_list)) /
  450. sizeof(struct virtchnl_ether_addr);
  451. len = sizeof(struct virtchnl_ether_addr_list) +
  452. (count * sizeof(struct virtchnl_ether_addr));
  453. more = true;
  454. }
  455. veal = kzalloc(len, GFP_ATOMIC);
  456. if (!veal) {
  457. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  458. return;
  459. }
  460. veal->vsi_id = adapter->vsi_res->vsi_id;
  461. veal->num_elements = count;
  462. list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
  463. if (f->remove) {
  464. ether_addr_copy(veal->list[i].addr, f->macaddr);
  465. i++;
  466. list_del(&f->list);
  467. kfree(f);
  468. if (i == count)
  469. break;
  470. }
  471. }
  472. if (!more)
  473. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
  474. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  475. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
  476. (u8 *)veal, len);
  477. kfree(veal);
  478. }
  479. /**
  480. * i40evf_add_vlans
  481. * @adapter: adapter structure
  482. *
  483. * Request that the PF add one or more VLAN filters to our VSI.
  484. **/
  485. void i40evf_add_vlans(struct i40evf_adapter *adapter)
  486. {
  487. struct virtchnl_vlan_filter_list *vvfl;
  488. int len, i = 0, count = 0;
  489. struct i40evf_vlan_filter *f;
  490. bool more = false;
  491. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  492. /* bail because we already have a command pending */
  493. dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
  494. adapter->current_op);
  495. return;
  496. }
  497. spin_lock_bh(&adapter->mac_vlan_list_lock);
  498. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  499. if (f->add)
  500. count++;
  501. }
  502. if (!count) {
  503. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
  504. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  505. return;
  506. }
  507. adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
  508. len = sizeof(struct virtchnl_vlan_filter_list) +
  509. (count * sizeof(u16));
  510. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  511. dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
  512. count = (I40EVF_MAX_AQ_BUF_SIZE -
  513. sizeof(struct virtchnl_vlan_filter_list)) /
  514. sizeof(u16);
  515. len = sizeof(struct virtchnl_vlan_filter_list) +
  516. (count * sizeof(u16));
  517. more = true;
  518. }
  519. vvfl = kzalloc(len, GFP_ATOMIC);
  520. if (!vvfl) {
  521. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  522. return;
  523. }
  524. vvfl->vsi_id = adapter->vsi_res->vsi_id;
  525. vvfl->num_elements = count;
  526. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  527. if (f->add) {
  528. vvfl->vlan_id[i] = f->vlan;
  529. i++;
  530. f->add = false;
  531. if (i == count)
  532. break;
  533. }
  534. }
  535. if (!more)
  536. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
  537. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  538. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
  539. kfree(vvfl);
  540. }
  541. /**
  542. * i40evf_del_vlans
  543. * @adapter: adapter structure
  544. *
  545. * Request that the PF remove one or more VLAN filters from our VSI.
  546. **/
  547. void i40evf_del_vlans(struct i40evf_adapter *adapter)
  548. {
  549. struct virtchnl_vlan_filter_list *vvfl;
  550. struct i40evf_vlan_filter *f, *ftmp;
  551. int len, i = 0, count = 0;
  552. bool more = false;
  553. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  554. /* bail because we already have a command pending */
  555. dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
  556. adapter->current_op);
  557. return;
  558. }
  559. spin_lock_bh(&adapter->mac_vlan_list_lock);
  560. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  561. if (f->remove)
  562. count++;
  563. }
  564. if (!count) {
  565. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
  566. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  567. return;
  568. }
  569. adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
  570. len = sizeof(struct virtchnl_vlan_filter_list) +
  571. (count * sizeof(u16));
  572. if (len > I40EVF_MAX_AQ_BUF_SIZE) {
  573. dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
  574. count = (I40EVF_MAX_AQ_BUF_SIZE -
  575. sizeof(struct virtchnl_vlan_filter_list)) /
  576. sizeof(u16);
  577. len = sizeof(struct virtchnl_vlan_filter_list) +
  578. (count * sizeof(u16));
  579. more = true;
  580. }
  581. vvfl = kzalloc(len, GFP_ATOMIC);
  582. if (!vvfl) {
  583. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  584. return;
  585. }
  586. vvfl->vsi_id = adapter->vsi_res->vsi_id;
  587. vvfl->num_elements = count;
  588. list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
  589. if (f->remove) {
  590. vvfl->vlan_id[i] = f->vlan;
  591. i++;
  592. list_del(&f->list);
  593. kfree(f);
  594. if (i == count)
  595. break;
  596. }
  597. }
  598. if (!more)
  599. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
  600. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  601. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
  602. kfree(vvfl);
  603. }
  604. /**
  605. * i40evf_set_promiscuous
  606. * @adapter: adapter structure
  607. * @flags: bitmask to control unicast/multicast promiscuous.
  608. *
  609. * Request that the PF enable promiscuous mode for our VSI.
  610. **/
  611. void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
  612. {
  613. struct virtchnl_promisc_info vpi;
  614. int promisc_all;
  615. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  616. /* bail because we already have a command pending */
  617. dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
  618. adapter->current_op);
  619. return;
  620. }
  621. promisc_all = FLAG_VF_UNICAST_PROMISC |
  622. FLAG_VF_MULTICAST_PROMISC;
  623. if ((flags & promisc_all) == promisc_all) {
  624. adapter->flags |= I40EVF_FLAG_PROMISC_ON;
  625. adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
  626. dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
  627. }
  628. if (flags & FLAG_VF_MULTICAST_PROMISC) {
  629. adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
  630. adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
  631. dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
  632. }
  633. if (!flags) {
  634. adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
  635. I40EVF_FLAG_ALLMULTI_ON);
  636. adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
  637. I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
  638. dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
  639. }
  640. adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
  641. vpi.vsi_id = adapter->vsi_res->vsi_id;
  642. vpi.flags = flags;
  643. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
  644. (u8 *)&vpi, sizeof(vpi));
  645. }
  646. /**
  647. * i40evf_request_stats
  648. * @adapter: adapter structure
  649. *
  650. * Request VSI statistics from PF.
  651. **/
  652. void i40evf_request_stats(struct i40evf_adapter *adapter)
  653. {
  654. struct virtchnl_queue_select vqs;
  655. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  656. /* no error message, this isn't crucial */
  657. return;
  658. }
  659. adapter->current_op = VIRTCHNL_OP_GET_STATS;
  660. vqs.vsi_id = adapter->vsi_res->vsi_id;
  661. /* queue maps are ignored for this message - only the vsi is used */
  662. if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
  663. (u8 *)&vqs, sizeof(vqs)))
  664. /* if the request failed, don't lock out others */
  665. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  666. }
  667. /**
  668. * i40evf_get_hena
  669. * @adapter: adapter structure
  670. *
  671. * Request hash enable capabilities from PF
  672. **/
  673. void i40evf_get_hena(struct i40evf_adapter *adapter)
  674. {
  675. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  676. /* bail because we already have a command pending */
  677. dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
  678. adapter->current_op);
  679. return;
  680. }
  681. adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
  682. adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
  683. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
  684. NULL, 0);
  685. }
  686. /**
  687. * i40evf_set_hena
  688. * @adapter: adapter structure
  689. *
  690. * Request the PF to set our RSS hash capabilities
  691. **/
  692. void i40evf_set_hena(struct i40evf_adapter *adapter)
  693. {
  694. struct virtchnl_rss_hena vrh;
  695. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  696. /* bail because we already have a command pending */
  697. dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
  698. adapter->current_op);
  699. return;
  700. }
  701. vrh.hena = adapter->hena;
  702. adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
  703. adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
  704. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
  705. (u8 *)&vrh, sizeof(vrh));
  706. }
  707. /**
  708. * i40evf_set_rss_key
  709. * @adapter: adapter structure
  710. *
  711. * Request the PF to set our RSS hash key
  712. **/
  713. void i40evf_set_rss_key(struct i40evf_adapter *adapter)
  714. {
  715. struct virtchnl_rss_key *vrk;
  716. int len;
  717. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  718. /* bail because we already have a command pending */
  719. dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
  720. adapter->current_op);
  721. return;
  722. }
  723. len = sizeof(struct virtchnl_rss_key) +
  724. (adapter->rss_key_size * sizeof(u8)) - 1;
  725. vrk = kzalloc(len, GFP_KERNEL);
  726. if (!vrk)
  727. return;
  728. vrk->vsi_id = adapter->vsi.id;
  729. vrk->key_len = adapter->rss_key_size;
  730. memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
  731. adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
  732. adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
  733. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
  734. (u8 *)vrk, len);
  735. kfree(vrk);
  736. }
  737. /**
  738. * i40evf_set_rss_lut
  739. * @adapter: adapter structure
  740. *
  741. * Request the PF to set our RSS lookup table
  742. **/
  743. void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
  744. {
  745. struct virtchnl_rss_lut *vrl;
  746. int len;
  747. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  748. /* bail because we already have a command pending */
  749. dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
  750. adapter->current_op);
  751. return;
  752. }
  753. len = sizeof(struct virtchnl_rss_lut) +
  754. (adapter->rss_lut_size * sizeof(u8)) - 1;
  755. vrl = kzalloc(len, GFP_KERNEL);
  756. if (!vrl)
  757. return;
  758. vrl->vsi_id = adapter->vsi.id;
  759. vrl->lut_entries = adapter->rss_lut_size;
  760. memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
  761. adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
  762. adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
  763. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
  764. (u8 *)vrl, len);
  765. kfree(vrl);
  766. }
  767. /**
  768. * i40evf_enable_vlan_stripping
  769. * @adapter: adapter structure
  770. *
  771. * Request VLAN header stripping to be enabled
  772. **/
  773. void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
  774. {
  775. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  776. /* bail because we already have a command pending */
  777. dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
  778. adapter->current_op);
  779. return;
  780. }
  781. adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
  782. adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
  783. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
  784. NULL, 0);
  785. }
  786. /**
  787. * i40evf_disable_vlan_stripping
  788. * @adapter: adapter structure
  789. *
  790. * Request VLAN header stripping to be disabled
  791. **/
  792. void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
  793. {
  794. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  795. /* bail because we already have a command pending */
  796. dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
  797. adapter->current_op);
  798. return;
  799. }
  800. adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
  801. adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
  802. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
  803. NULL, 0);
  804. }
  805. /**
  806. * i40evf_print_link_message - print link up or down
  807. * @adapter: adapter structure
  808. *
  809. * Log a message telling the world of our wonderous link status
  810. */
  811. static void i40evf_print_link_message(struct i40evf_adapter *adapter)
  812. {
  813. struct net_device *netdev = adapter->netdev;
  814. char *speed = "Unknown ";
  815. if (!adapter->link_up) {
  816. netdev_info(netdev, "NIC Link is Down\n");
  817. return;
  818. }
  819. switch (adapter->link_speed) {
  820. case I40E_LINK_SPEED_40GB:
  821. speed = "40 G";
  822. break;
  823. case I40E_LINK_SPEED_25GB:
  824. speed = "25 G";
  825. break;
  826. case I40E_LINK_SPEED_20GB:
  827. speed = "20 G";
  828. break;
  829. case I40E_LINK_SPEED_10GB:
  830. speed = "10 G";
  831. break;
  832. case I40E_LINK_SPEED_1GB:
  833. speed = "1000 M";
  834. break;
  835. case I40E_LINK_SPEED_100MB:
  836. speed = "100 M";
  837. break;
  838. default:
  839. break;
  840. }
  841. netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
  842. }
  843. /**
  844. * i40evf_enable_channel
  845. * @adapter: adapter structure
  846. *
  847. * Request that the PF enable channels as specified by
  848. * the user via tc tool.
  849. **/
  850. void i40evf_enable_channels(struct i40evf_adapter *adapter)
  851. {
  852. struct virtchnl_tc_info *vti = NULL;
  853. u16 len;
  854. int i;
  855. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  856. /* bail because we already have a command pending */
  857. dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
  858. adapter->current_op);
  859. return;
  860. }
  861. len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
  862. sizeof(struct virtchnl_tc_info);
  863. vti = kzalloc(len, GFP_KERNEL);
  864. if (!vti)
  865. return;
  866. vti->num_tc = adapter->num_tc;
  867. for (i = 0; i < vti->num_tc; i++) {
  868. vti->list[i].count = adapter->ch_config.ch_info[i].count;
  869. vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
  870. vti->list[i].pad = 0;
  871. vti->list[i].max_tx_rate =
  872. adapter->ch_config.ch_info[i].max_tx_rate;
  873. }
  874. adapter->ch_config.state = __I40EVF_TC_RUNNING;
  875. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  876. adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
  877. adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
  878. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
  879. (u8 *)vti, len);
  880. kfree(vti);
  881. }
  882. /**
  883. * i40evf_disable_channel
  884. * @adapter: adapter structure
  885. *
  886. * Request that the PF disable channels that are configured
  887. **/
  888. void i40evf_disable_channels(struct i40evf_adapter *adapter)
  889. {
  890. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  891. /* bail because we already have a command pending */
  892. dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
  893. adapter->current_op);
  894. return;
  895. }
  896. adapter->ch_config.state = __I40EVF_TC_INVALID;
  897. adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
  898. adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
  899. adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
  900. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
  901. NULL, 0);
  902. }
  903. /**
  904. * i40evf_print_cloud_filter
  905. * @adapter: adapter structure
  906. * @f: cloud filter to print
  907. *
  908. * Print the cloud filter
  909. **/
  910. static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
  911. struct virtchnl_filter *f)
  912. {
  913. switch (f->flow_type) {
  914. case VIRTCHNL_TCP_V4_FLOW:
  915. dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
  916. &f->data.tcp_spec.dst_mac,
  917. &f->data.tcp_spec.src_mac,
  918. ntohs(f->data.tcp_spec.vlan_id),
  919. &f->data.tcp_spec.dst_ip[0],
  920. &f->data.tcp_spec.src_ip[0],
  921. ntohs(f->data.tcp_spec.dst_port),
  922. ntohs(f->data.tcp_spec.src_port));
  923. break;
  924. case VIRTCHNL_TCP_V6_FLOW:
  925. dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
  926. &f->data.tcp_spec.dst_mac,
  927. &f->data.tcp_spec.src_mac,
  928. ntohs(f->data.tcp_spec.vlan_id),
  929. &f->data.tcp_spec.dst_ip,
  930. &f->data.tcp_spec.src_ip,
  931. ntohs(f->data.tcp_spec.dst_port),
  932. ntohs(f->data.tcp_spec.src_port));
  933. break;
  934. }
  935. }
  936. /**
  937. * i40evf_add_cloud_filter
  938. * @adapter: adapter structure
  939. *
  940. * Request that the PF add cloud filters as specified
  941. * by the user via tc tool.
  942. **/
  943. void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
  944. {
  945. struct i40evf_cloud_filter *cf;
  946. struct virtchnl_filter *f;
  947. int len = 0, count = 0;
  948. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  949. /* bail because we already have a command pending */
  950. dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
  951. adapter->current_op);
  952. return;
  953. }
  954. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  955. if (cf->add) {
  956. count++;
  957. break;
  958. }
  959. }
  960. if (!count) {
  961. adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
  962. return;
  963. }
  964. adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
  965. len = sizeof(struct virtchnl_filter);
  966. f = kzalloc(len, GFP_KERNEL);
  967. if (!f)
  968. return;
  969. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  970. if (cf->add) {
  971. memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
  972. cf->add = false;
  973. cf->state = __I40EVF_CF_ADD_PENDING;
  974. i40evf_send_pf_msg(adapter,
  975. VIRTCHNL_OP_ADD_CLOUD_FILTER,
  976. (u8 *)f, len);
  977. }
  978. }
  979. kfree(f);
  980. }
  981. /**
  982. * i40evf_del_cloud_filter
  983. * @adapter: adapter structure
  984. *
  985. * Request that the PF delete cloud filters as specified
  986. * by the user via tc tool.
  987. **/
  988. void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
  989. {
  990. struct i40evf_cloud_filter *cf, *cftmp;
  991. struct virtchnl_filter *f;
  992. int len = 0, count = 0;
  993. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  994. /* bail because we already have a command pending */
  995. dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
  996. adapter->current_op);
  997. return;
  998. }
  999. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1000. if (cf->del) {
  1001. count++;
  1002. break;
  1003. }
  1004. }
  1005. if (!count) {
  1006. adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
  1007. return;
  1008. }
  1009. adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
  1010. len = sizeof(struct virtchnl_filter);
  1011. f = kzalloc(len, GFP_KERNEL);
  1012. if (!f)
  1013. return;
  1014. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
  1015. if (cf->del) {
  1016. memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
  1017. cf->del = false;
  1018. cf->state = __I40EVF_CF_DEL_PENDING;
  1019. i40evf_send_pf_msg(adapter,
  1020. VIRTCHNL_OP_DEL_CLOUD_FILTER,
  1021. (u8 *)f, len);
  1022. }
  1023. }
  1024. kfree(f);
  1025. }
  1026. /**
  1027. * i40evf_request_reset
  1028. * @adapter: adapter structure
  1029. *
  1030. * Request that the PF reset this VF. No response is expected.
  1031. **/
  1032. void i40evf_request_reset(struct i40evf_adapter *adapter)
  1033. {
  1034. /* Don't check CURRENT_OP - this is always higher priority */
  1035. i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
  1036. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1037. }
  1038. /**
  1039. * i40evf_virtchnl_completion
  1040. * @adapter: adapter structure
  1041. * @v_opcode: opcode sent by PF
  1042. * @v_retval: retval sent by PF
  1043. * @msg: message sent by PF
  1044. * @msglen: message length
  1045. *
  1046. * Asynchronous completion function for admin queue messages. Rather than busy
  1047. * wait, we fire off our requests and assume that no errors will be returned.
  1048. * This function handles the reply messages.
  1049. **/
  1050. void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
  1051. enum virtchnl_ops v_opcode,
  1052. i40e_status v_retval,
  1053. u8 *msg, u16 msglen)
  1054. {
  1055. struct net_device *netdev = adapter->netdev;
  1056. if (v_opcode == VIRTCHNL_OP_EVENT) {
  1057. struct virtchnl_pf_event *vpe =
  1058. (struct virtchnl_pf_event *)msg;
  1059. bool link_up = vpe->event_data.link_event.link_status;
  1060. switch (vpe->event) {
  1061. case VIRTCHNL_EVENT_LINK_CHANGE:
  1062. adapter->link_speed =
  1063. vpe->event_data.link_event.link_speed;
  1064. /* we've already got the right link status, bail */
  1065. if (adapter->link_up == link_up)
  1066. break;
  1067. if (link_up) {
  1068. /* If we get link up message and start queues
  1069. * before our queues are configured it will
  1070. * trigger a TX hang. In that case, just ignore
  1071. * the link status message,we'll get another one
  1072. * after we enable queues and actually prepared
  1073. * to send traffic.
  1074. */
  1075. if (adapter->state != __I40EVF_RUNNING)
  1076. break;
  1077. /* For ADq enabled VF, we reconfigure VSIs and
  1078. * re-allocate queues. Hence wait till all
  1079. * queues are enabled.
  1080. */
  1081. if (adapter->flags &
  1082. I40EVF_FLAG_QUEUES_DISABLED)
  1083. break;
  1084. }
  1085. adapter->link_up = link_up;
  1086. if (link_up) {
  1087. netif_tx_start_all_queues(netdev);
  1088. netif_carrier_on(netdev);
  1089. } else {
  1090. netif_tx_stop_all_queues(netdev);
  1091. netif_carrier_off(netdev);
  1092. }
  1093. i40evf_print_link_message(adapter);
  1094. break;
  1095. case VIRTCHNL_EVENT_RESET_IMPENDING:
  1096. dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
  1097. if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
  1098. adapter->flags |= I40EVF_FLAG_RESET_PENDING;
  1099. dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
  1100. schedule_work(&adapter->reset_task);
  1101. }
  1102. break;
  1103. default:
  1104. dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
  1105. vpe->event);
  1106. break;
  1107. }
  1108. return;
  1109. }
  1110. if (v_retval) {
  1111. switch (v_opcode) {
  1112. case VIRTCHNL_OP_ADD_VLAN:
  1113. dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
  1114. i40evf_stat_str(&adapter->hw, v_retval));
  1115. break;
  1116. case VIRTCHNL_OP_ADD_ETH_ADDR:
  1117. dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
  1118. i40evf_stat_str(&adapter->hw, v_retval));
  1119. break;
  1120. case VIRTCHNL_OP_DEL_VLAN:
  1121. dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
  1122. i40evf_stat_str(&adapter->hw, v_retval));
  1123. break;
  1124. case VIRTCHNL_OP_DEL_ETH_ADDR:
  1125. dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
  1126. i40evf_stat_str(&adapter->hw, v_retval));
  1127. break;
  1128. case VIRTCHNL_OP_ENABLE_CHANNELS:
  1129. dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
  1130. i40evf_stat_str(&adapter->hw, v_retval));
  1131. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1132. adapter->ch_config.state = __I40EVF_TC_INVALID;
  1133. netdev_reset_tc(netdev);
  1134. netif_tx_start_all_queues(netdev);
  1135. break;
  1136. case VIRTCHNL_OP_DISABLE_CHANNELS:
  1137. dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
  1138. i40evf_stat_str(&adapter->hw, v_retval));
  1139. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1140. adapter->ch_config.state = __I40EVF_TC_RUNNING;
  1141. netif_tx_start_all_queues(netdev);
  1142. break;
  1143. case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
  1144. struct i40evf_cloud_filter *cf, *cftmp;
  1145. list_for_each_entry_safe(cf, cftmp,
  1146. &adapter->cloud_filter_list,
  1147. list) {
  1148. if (cf->state == __I40EVF_CF_ADD_PENDING) {
  1149. cf->state = __I40EVF_CF_INVALID;
  1150. dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
  1151. i40evf_stat_str(&adapter->hw,
  1152. v_retval));
  1153. i40evf_print_cloud_filter(adapter,
  1154. &cf->f);
  1155. list_del(&cf->list);
  1156. kfree(cf);
  1157. adapter->num_cloud_filters--;
  1158. }
  1159. }
  1160. }
  1161. break;
  1162. case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
  1163. struct i40evf_cloud_filter *cf;
  1164. list_for_each_entry(cf, &adapter->cloud_filter_list,
  1165. list) {
  1166. if (cf->state == __I40EVF_CF_DEL_PENDING) {
  1167. cf->state = __I40EVF_CF_ACTIVE;
  1168. dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
  1169. i40evf_stat_str(&adapter->hw,
  1170. v_retval));
  1171. i40evf_print_cloud_filter(adapter,
  1172. &cf->f);
  1173. }
  1174. }
  1175. }
  1176. break;
  1177. default:
  1178. dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
  1179. v_retval,
  1180. i40evf_stat_str(&adapter->hw, v_retval),
  1181. v_opcode);
  1182. }
  1183. }
  1184. switch (v_opcode) {
  1185. case VIRTCHNL_OP_GET_STATS: {
  1186. struct i40e_eth_stats *stats =
  1187. (struct i40e_eth_stats *)msg;
  1188. netdev->stats.rx_packets = stats->rx_unicast +
  1189. stats->rx_multicast +
  1190. stats->rx_broadcast;
  1191. netdev->stats.tx_packets = stats->tx_unicast +
  1192. stats->tx_multicast +
  1193. stats->tx_broadcast;
  1194. netdev->stats.rx_bytes = stats->rx_bytes;
  1195. netdev->stats.tx_bytes = stats->tx_bytes;
  1196. netdev->stats.tx_errors = stats->tx_errors;
  1197. netdev->stats.rx_dropped = stats->rx_discards;
  1198. netdev->stats.tx_dropped = stats->tx_discards;
  1199. adapter->current_stats = *stats;
  1200. }
  1201. break;
  1202. case VIRTCHNL_OP_GET_VF_RESOURCES: {
  1203. u16 len = sizeof(struct virtchnl_vf_resource) +
  1204. I40E_MAX_VF_VSI *
  1205. sizeof(struct virtchnl_vsi_resource);
  1206. memcpy(adapter->vf_res, msg, min(msglen, len));
  1207. i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
  1208. /* restore current mac address */
  1209. ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
  1210. i40evf_process_config(adapter);
  1211. }
  1212. break;
  1213. case VIRTCHNL_OP_ENABLE_QUEUES:
  1214. /* enable transmits */
  1215. i40evf_irq_enable(adapter, true);
  1216. adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
  1217. break;
  1218. case VIRTCHNL_OP_DISABLE_QUEUES:
  1219. i40evf_free_all_tx_resources(adapter);
  1220. i40evf_free_all_rx_resources(adapter);
  1221. if (adapter->state == __I40EVF_DOWN_PENDING) {
  1222. adapter->state = __I40EVF_DOWN;
  1223. wake_up(&adapter->down_waitqueue);
  1224. }
  1225. break;
  1226. case VIRTCHNL_OP_VERSION:
  1227. case VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1228. /* Don't display an error if we get these out of sequence.
  1229. * If the firmware needed to get kicked, we'll get these and
  1230. * it's no problem.
  1231. */
  1232. if (v_opcode != adapter->current_op)
  1233. return;
  1234. break;
  1235. case VIRTCHNL_OP_IWARP:
  1236. /* Gobble zero-length replies from the PF. They indicate that
  1237. * a previous message was received OK, and the client doesn't
  1238. * care about that.
  1239. */
  1240. if (msglen && CLIENT_ENABLED(adapter))
  1241. i40evf_notify_client_message(&adapter->vsi,
  1242. msg, msglen);
  1243. break;
  1244. case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
  1245. adapter->client_pending &=
  1246. ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
  1247. break;
  1248. case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
  1249. struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
  1250. if (msglen == sizeof(*vrh))
  1251. adapter->hena = vrh->hena;
  1252. else
  1253. dev_warn(&adapter->pdev->dev,
  1254. "Invalid message %d from PF\n", v_opcode);
  1255. }
  1256. break;
  1257. case VIRTCHNL_OP_REQUEST_QUEUES: {
  1258. struct virtchnl_vf_res_request *vfres =
  1259. (struct virtchnl_vf_res_request *)msg;
  1260. if (vfres->num_queue_pairs != adapter->num_req_queues) {
  1261. dev_info(&adapter->pdev->dev,
  1262. "Requested %d queues, PF can support %d\n",
  1263. adapter->num_req_queues,
  1264. vfres->num_queue_pairs);
  1265. adapter->num_req_queues = 0;
  1266. adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
  1267. }
  1268. }
  1269. break;
  1270. case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
  1271. struct i40evf_cloud_filter *cf;
  1272. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1273. if (cf->state == __I40EVF_CF_ADD_PENDING)
  1274. cf->state = __I40EVF_CF_ACTIVE;
  1275. }
  1276. }
  1277. break;
  1278. case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
  1279. struct i40evf_cloud_filter *cf, *cftmp;
  1280. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
  1281. list) {
  1282. if (cf->state == __I40EVF_CF_DEL_PENDING) {
  1283. cf->state = __I40EVF_CF_INVALID;
  1284. list_del(&cf->list);
  1285. kfree(cf);
  1286. adapter->num_cloud_filters--;
  1287. }
  1288. }
  1289. }
  1290. break;
  1291. default:
  1292. if (adapter->current_op && (v_opcode != adapter->current_op))
  1293. dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
  1294. adapter->current_op, v_opcode);
  1295. break;
  1296. } /* switch v_opcode */
  1297. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1298. }