iavf_virtchnl.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2013 - 2018 Intel Corporation. */
  3. #include "iavf.h"
  4. #include "iavf_prototype.h"
  5. #include "iavf_client.h"
  6. /* busy wait delay in msec */
  7. #define IAVF_BUSY_WAIT_DELAY 10
  8. #define IAVF_BUSY_WAIT_COUNT 50
  9. /**
  10. * iavf_send_pf_msg
  11. * @adapter: adapter structure
  12. * @op: virtual channel opcode
  13. * @msg: pointer to message buffer
  14. * @len: message length
  15. *
  16. * Send message to PF and print status if failure.
  17. **/
  18. static int iavf_send_pf_msg(struct iavf_adapter *adapter,
  19. enum virtchnl_ops op, u8 *msg, u16 len)
  20. {
  21. struct iavf_hw *hw = &adapter->hw;
  22. iavf_status err;
  23. if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
  24. return 0; /* nothing to see here, move along */
  25. err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
  26. if (err)
  27. dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
  28. op, iavf_stat_str(hw, err),
  29. iavf_aq_str(hw, hw->aq.asq_last_status));
  30. return err;
  31. }
  32. /**
  33. * iavf_send_api_ver
  34. * @adapter: adapter structure
  35. *
  36. * Send API version admin queue message to the PF. The reply is not checked
  37. * in this function. Returns 0 if the message was successfully
  38. * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
  39. **/
  40. int iavf_send_api_ver(struct iavf_adapter *adapter)
  41. {
  42. struct virtchnl_version_info vvi;
  43. vvi.major = VIRTCHNL_VERSION_MAJOR;
  44. vvi.minor = VIRTCHNL_VERSION_MINOR;
  45. return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
  46. sizeof(vvi));
  47. }
  48. /**
  49. * iavf_verify_api_ver
  50. * @adapter: adapter structure
  51. *
  52. * Compare API versions with the PF. Must be called after admin queue is
  53. * initialized. Returns 0 if API versions match, -EIO if they do not,
  54. * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
  55. * from the firmware are propagated.
  56. **/
  57. int iavf_verify_api_ver(struct iavf_adapter *adapter)
  58. {
  59. struct virtchnl_version_info *pf_vvi;
  60. struct iavf_hw *hw = &adapter->hw;
  61. struct i40e_arq_event_info event;
  62. enum virtchnl_ops op;
  63. iavf_status err;
  64. event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
  65. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  66. if (!event.msg_buf) {
  67. err = -ENOMEM;
  68. goto out;
  69. }
  70. while (1) {
  71. err = iavf_clean_arq_element(hw, &event, NULL);
  72. /* When the AQ is empty, iavf_clean_arq_element will return
  73. * nonzero and this loop will terminate.
  74. */
  75. if (err)
  76. goto out_alloc;
  77. op =
  78. (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  79. if (op == VIRTCHNL_OP_VERSION)
  80. break;
  81. }
  82. err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
  83. if (err)
  84. goto out_alloc;
  85. if (op != VIRTCHNL_OP_VERSION) {
  86. dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
  87. op);
  88. err = -EIO;
  89. goto out_alloc;
  90. }
  91. pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
  92. adapter->pf_version = *pf_vvi;
  93. if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
  94. ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
  95. (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
  96. err = -EIO;
  97. out_alloc:
  98. kfree(event.msg_buf);
  99. out:
  100. return err;
  101. }
  102. /**
  103. * iavf_send_vf_config_msg
  104. * @adapter: adapter structure
  105. *
  106. * Send VF configuration request admin queue message to the PF. The reply
  107. * is not checked in this function. Returns 0 if the message was
  108. * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
  109. **/
  110. int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
  111. {
  112. u32 caps;
  113. caps = VIRTCHNL_VF_OFFLOAD_L2 |
  114. VIRTCHNL_VF_OFFLOAD_RSS_PF |
  115. VIRTCHNL_VF_OFFLOAD_RSS_AQ |
  116. VIRTCHNL_VF_OFFLOAD_RSS_REG |
  117. VIRTCHNL_VF_OFFLOAD_VLAN |
  118. VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
  119. VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
  120. VIRTCHNL_VF_OFFLOAD_ENCAP |
  121. VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
  122. VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
  123. VIRTCHNL_VF_OFFLOAD_ADQ;
  124. adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
  125. adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
  126. if (PF_IS_V11(adapter))
  127. return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
  128. (u8 *)&caps, sizeof(caps));
  129. else
  130. return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
  131. NULL, 0);
  132. }
  133. /**
  134. * iavf_validate_num_queues
  135. * @adapter: adapter structure
  136. *
  137. * Validate that the number of queues the PF has sent in
  138. * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
  139. **/
  140. static void iavf_validate_num_queues(struct iavf_adapter *adapter)
  141. {
  142. if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
  143. struct virtchnl_vsi_resource *vsi_res;
  144. int i;
  145. dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
  146. adapter->vf_res->num_queue_pairs,
  147. IAVF_MAX_REQ_QUEUES);
  148. dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
  149. IAVF_MAX_REQ_QUEUES);
  150. adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
  151. for (i = 0; i < adapter->vf_res->num_vsis; i++) {
  152. vsi_res = &adapter->vf_res->vsi_res[i];
  153. vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
  154. }
  155. }
  156. }
  157. /**
  158. * iavf_get_vf_config
  159. * @adapter: private adapter structure
  160. *
  161. * Get VF configuration from PF and populate hw structure. Must be called after
  162. * admin queue is initialized. Busy waits until response is received from PF,
  163. * with maximum timeout. Response from PF is returned in the buffer for further
  164. * processing by the caller.
  165. **/
  166. int iavf_get_vf_config(struct iavf_adapter *adapter)
  167. {
  168. struct iavf_hw *hw = &adapter->hw;
  169. struct i40e_arq_event_info event;
  170. enum virtchnl_ops op;
  171. iavf_status err;
  172. u16 len;
  173. len = sizeof(struct virtchnl_vf_resource) +
  174. IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
  175. event.buf_len = len;
  176. event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
  177. if (!event.msg_buf) {
  178. err = -ENOMEM;
  179. goto out;
  180. }
  181. while (1) {
  182. /* When the AQ is empty, iavf_clean_arq_element will return
  183. * nonzero and this loop will terminate.
  184. */
  185. err = iavf_clean_arq_element(hw, &event, NULL);
  186. if (err)
  187. goto out_alloc;
  188. op =
  189. (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
  190. if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
  191. break;
  192. }
  193. err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
  194. memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
  195. /* some PFs send more queues than we should have so validate that
  196. * we aren't getting too many queues
  197. */
  198. if (!err)
  199. iavf_validate_num_queues(adapter);
  200. iavf_vf_parse_hw_config(hw, adapter->vf_res);
  201. out_alloc:
  202. kfree(event.msg_buf);
  203. out:
  204. return err;
  205. }
  206. /**
  207. * iavf_configure_queues
  208. * @adapter: adapter structure
  209. *
  210. * Request that the PF set up our (previously allocated) queues.
  211. **/
  212. void iavf_configure_queues(struct iavf_adapter *adapter)
  213. {
  214. struct virtchnl_vsi_queue_config_info *vqci;
  215. struct virtchnl_queue_pair_info *vqpi;
  216. int pairs = adapter->num_active_queues;
  217. int i, len, max_frame = IAVF_MAX_RXBUFFER;
  218. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  219. /* bail because we already have a command pending */
  220. dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
  221. adapter->current_op);
  222. return;
  223. }
  224. adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
  225. len = sizeof(struct virtchnl_vsi_queue_config_info) +
  226. (sizeof(struct virtchnl_queue_pair_info) * pairs);
  227. vqci = kzalloc(len, GFP_KERNEL);
  228. if (!vqci)
  229. return;
  230. /* Limit maximum frame size when jumbo frames is not enabled */
  231. if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
  232. (adapter->netdev->mtu <= ETH_DATA_LEN))
  233. max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
  234. vqci->vsi_id = adapter->vsi_res->vsi_id;
  235. vqci->num_queue_pairs = pairs;
  236. vqpi = vqci->qpair;
  237. /* Size check is not needed here - HW max is 16 queue pairs, and we
  238. * can fit info for 31 of them into the AQ buffer before it overflows.
  239. */
  240. for (i = 0; i < pairs; i++) {
  241. vqpi->txq.vsi_id = vqci->vsi_id;
  242. vqpi->txq.queue_id = i;
  243. vqpi->txq.ring_len = adapter->tx_rings[i].count;
  244. vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
  245. vqpi->rxq.vsi_id = vqci->vsi_id;
  246. vqpi->rxq.queue_id = i;
  247. vqpi->rxq.ring_len = adapter->rx_rings[i].count;
  248. vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
  249. vqpi->rxq.max_pkt_size = max_frame;
  250. vqpi->rxq.databuffer_size =
  251. ALIGN(adapter->rx_rings[i].rx_buf_len,
  252. BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
  253. vqpi++;
  254. }
  255. adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
  256. iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
  257. (u8 *)vqci, len);
  258. kfree(vqci);
  259. }
  260. /**
  261. * iavf_enable_queues
  262. * @adapter: adapter structure
  263. *
  264. * Request that the PF enable all of our queues.
  265. **/
  266. void iavf_enable_queues(struct iavf_adapter *adapter)
  267. {
  268. struct virtchnl_queue_select vqs;
  269. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  270. /* bail because we already have a command pending */
  271. dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
  272. adapter->current_op);
  273. return;
  274. }
  275. adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
  276. vqs.vsi_id = adapter->vsi_res->vsi_id;
  277. vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
  278. vqs.rx_queues = vqs.tx_queues;
  279. adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
  280. iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
  281. (u8 *)&vqs, sizeof(vqs));
  282. }
  283. /**
  284. * iavf_disable_queues
  285. * @adapter: adapter structure
  286. *
  287. * Request that the PF disable all of our queues.
  288. **/
  289. void iavf_disable_queues(struct iavf_adapter *adapter)
  290. {
  291. struct virtchnl_queue_select vqs;
  292. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  293. /* bail because we already have a command pending */
  294. dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
  295. adapter->current_op);
  296. return;
  297. }
  298. adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
  299. vqs.vsi_id = adapter->vsi_res->vsi_id;
  300. vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
  301. vqs.rx_queues = vqs.tx_queues;
  302. adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
  303. iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
  304. (u8 *)&vqs, sizeof(vqs));
  305. }
  306. /**
  307. * iavf_map_queues
  308. * @adapter: adapter structure
  309. *
  310. * Request that the PF map queues to interrupt vectors. Misc causes, including
  311. * admin queue, are always mapped to vector 0.
  312. **/
  313. void iavf_map_queues(struct iavf_adapter *adapter)
  314. {
  315. struct virtchnl_irq_map_info *vimi;
  316. struct virtchnl_vector_map *vecmap;
  317. int v_idx, q_vectors, len;
  318. struct iavf_q_vector *q_vector;
  319. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  320. /* bail because we already have a command pending */
  321. dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
  322. adapter->current_op);
  323. return;
  324. }
  325. adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
  326. q_vectors = adapter->num_msix_vectors - NONQ_VECS;
  327. len = sizeof(struct virtchnl_irq_map_info) +
  328. (adapter->num_msix_vectors *
  329. sizeof(struct virtchnl_vector_map));
  330. vimi = kzalloc(len, GFP_KERNEL);
  331. if (!vimi)
  332. return;
  333. vimi->num_vectors = adapter->num_msix_vectors;
  334. /* Queue vectors first */
  335. for (v_idx = 0; v_idx < q_vectors; v_idx++) {
  336. q_vector = &adapter->q_vectors[v_idx];
  337. vecmap = &vimi->vecmap[v_idx];
  338. vecmap->vsi_id = adapter->vsi_res->vsi_id;
  339. vecmap->vector_id = v_idx + NONQ_VECS;
  340. vecmap->txq_map = q_vector->ring_mask;
  341. vecmap->rxq_map = q_vector->ring_mask;
  342. vecmap->rxitr_idx = IAVF_RX_ITR;
  343. vecmap->txitr_idx = IAVF_TX_ITR;
  344. }
  345. /* Misc vector last - this is only for AdminQ messages */
  346. vecmap = &vimi->vecmap[v_idx];
  347. vecmap->vsi_id = adapter->vsi_res->vsi_id;
  348. vecmap->vector_id = 0;
  349. vecmap->txq_map = 0;
  350. vecmap->rxq_map = 0;
  351. adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
  352. iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
  353. (u8 *)vimi, len);
  354. kfree(vimi);
  355. }
  356. /**
  357. * iavf_request_queues
  358. * @adapter: adapter structure
  359. * @num: number of requested queues
  360. *
  361. * We get a default number of queues from the PF. This enables us to request a
  362. * different number. Returns 0 on success, negative on failure
  363. **/
  364. int iavf_request_queues(struct iavf_adapter *adapter, int num)
  365. {
  366. struct virtchnl_vf_res_request vfres;
  367. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  368. /* bail because we already have a command pending */
  369. dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
  370. adapter->current_op);
  371. return -EBUSY;
  372. }
  373. vfres.num_queue_pairs = num;
  374. adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
  375. adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
  376. return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
  377. (u8 *)&vfres, sizeof(vfres));
  378. }
  379. /**
  380. * iavf_add_ether_addrs
  381. * @adapter: adapter structure
  382. *
  383. * Request that the PF add one or more addresses to our filters.
  384. **/
  385. void iavf_add_ether_addrs(struct iavf_adapter *adapter)
  386. {
  387. struct virtchnl_ether_addr_list *veal;
  388. int len, i = 0, count = 0;
  389. struct iavf_mac_filter *f;
  390. bool more = false;
  391. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  392. /* bail because we already have a command pending */
  393. dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
  394. adapter->current_op);
  395. return;
  396. }
  397. spin_lock_bh(&adapter->mac_vlan_list_lock);
  398. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  399. if (f->add)
  400. count++;
  401. }
  402. if (!count) {
  403. adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
  404. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  405. return;
  406. }
  407. adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
  408. len = sizeof(struct virtchnl_ether_addr_list) +
  409. (count * sizeof(struct virtchnl_ether_addr));
  410. if (len > IAVF_MAX_AQ_BUF_SIZE) {
  411. dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
  412. count = (IAVF_MAX_AQ_BUF_SIZE -
  413. sizeof(struct virtchnl_ether_addr_list)) /
  414. sizeof(struct virtchnl_ether_addr);
  415. len = sizeof(struct virtchnl_ether_addr_list) +
  416. (count * sizeof(struct virtchnl_ether_addr));
  417. more = true;
  418. }
  419. veal = kzalloc(len, GFP_ATOMIC);
  420. if (!veal) {
  421. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  422. return;
  423. }
  424. veal->vsi_id = adapter->vsi_res->vsi_id;
  425. veal->num_elements = count;
  426. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  427. if (f->add) {
  428. ether_addr_copy(veal->list[i].addr, f->macaddr);
  429. i++;
  430. f->add = false;
  431. if (i == count)
  432. break;
  433. }
  434. }
  435. if (!more)
  436. adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
  437. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  438. iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
  439. kfree(veal);
  440. }
  441. /**
  442. * iavf_del_ether_addrs
  443. * @adapter: adapter structure
  444. *
  445. * Request that the PF remove one or more addresses from our filters.
  446. **/
  447. void iavf_del_ether_addrs(struct iavf_adapter *adapter)
  448. {
  449. struct virtchnl_ether_addr_list *veal;
  450. struct iavf_mac_filter *f, *ftmp;
  451. int len, i = 0, count = 0;
  452. bool more = false;
  453. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  454. /* bail because we already have a command pending */
  455. dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
  456. adapter->current_op);
  457. return;
  458. }
  459. spin_lock_bh(&adapter->mac_vlan_list_lock);
  460. list_for_each_entry(f, &adapter->mac_filter_list, list) {
  461. if (f->remove)
  462. count++;
  463. }
  464. if (!count) {
  465. adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
  466. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  467. return;
  468. }
  469. adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
  470. len = sizeof(struct virtchnl_ether_addr_list) +
  471. (count * sizeof(struct virtchnl_ether_addr));
  472. if (len > IAVF_MAX_AQ_BUF_SIZE) {
  473. dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
  474. count = (IAVF_MAX_AQ_BUF_SIZE -
  475. sizeof(struct virtchnl_ether_addr_list)) /
  476. sizeof(struct virtchnl_ether_addr);
  477. len = sizeof(struct virtchnl_ether_addr_list) +
  478. (count * sizeof(struct virtchnl_ether_addr));
  479. more = true;
  480. }
  481. veal = kzalloc(len, GFP_ATOMIC);
  482. if (!veal) {
  483. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  484. return;
  485. }
  486. veal->vsi_id = adapter->vsi_res->vsi_id;
  487. veal->num_elements = count;
  488. list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
  489. if (f->remove) {
  490. ether_addr_copy(veal->list[i].addr, f->macaddr);
  491. i++;
  492. list_del(&f->list);
  493. kfree(f);
  494. if (i == count)
  495. break;
  496. }
  497. }
  498. if (!more)
  499. adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
  500. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  501. iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
  502. kfree(veal);
  503. }
  504. /**
  505. * iavf_add_vlans
  506. * @adapter: adapter structure
  507. *
  508. * Request that the PF add one or more VLAN filters to our VSI.
  509. **/
  510. void iavf_add_vlans(struct iavf_adapter *adapter)
  511. {
  512. struct virtchnl_vlan_filter_list *vvfl;
  513. int len, i = 0, count = 0;
  514. struct iavf_vlan_filter *f;
  515. bool more = false;
  516. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  517. /* bail because we already have a command pending */
  518. dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
  519. adapter->current_op);
  520. return;
  521. }
  522. spin_lock_bh(&adapter->mac_vlan_list_lock);
  523. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  524. if (f->add)
  525. count++;
  526. }
  527. if (!count) {
  528. adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
  529. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  530. return;
  531. }
  532. adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
  533. len = sizeof(struct virtchnl_vlan_filter_list) +
  534. (count * sizeof(u16));
  535. if (len > IAVF_MAX_AQ_BUF_SIZE) {
  536. dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
  537. count = (IAVF_MAX_AQ_BUF_SIZE -
  538. sizeof(struct virtchnl_vlan_filter_list)) /
  539. sizeof(u16);
  540. len = sizeof(struct virtchnl_vlan_filter_list) +
  541. (count * sizeof(u16));
  542. more = true;
  543. }
  544. vvfl = kzalloc(len, GFP_ATOMIC);
  545. if (!vvfl) {
  546. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  547. return;
  548. }
  549. vvfl->vsi_id = adapter->vsi_res->vsi_id;
  550. vvfl->num_elements = count;
  551. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  552. if (f->add) {
  553. vvfl->vlan_id[i] = f->vlan;
  554. i++;
  555. f->add = false;
  556. if (i == count)
  557. break;
  558. }
  559. }
  560. if (!more)
  561. adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
  562. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  563. iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
  564. kfree(vvfl);
  565. }
  566. /**
  567. * iavf_del_vlans
  568. * @adapter: adapter structure
  569. *
  570. * Request that the PF remove one or more VLAN filters from our VSI.
  571. **/
  572. void iavf_del_vlans(struct iavf_adapter *adapter)
  573. {
  574. struct virtchnl_vlan_filter_list *vvfl;
  575. struct iavf_vlan_filter *f, *ftmp;
  576. int len, i = 0, count = 0;
  577. bool more = false;
  578. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  579. /* bail because we already have a command pending */
  580. dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
  581. adapter->current_op);
  582. return;
  583. }
  584. spin_lock_bh(&adapter->mac_vlan_list_lock);
  585. list_for_each_entry(f, &adapter->vlan_filter_list, list) {
  586. if (f->remove)
  587. count++;
  588. }
  589. if (!count) {
  590. adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
  591. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  592. return;
  593. }
  594. adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
  595. len = sizeof(struct virtchnl_vlan_filter_list) +
  596. (count * sizeof(u16));
  597. if (len > IAVF_MAX_AQ_BUF_SIZE) {
  598. dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
  599. count = (IAVF_MAX_AQ_BUF_SIZE -
  600. sizeof(struct virtchnl_vlan_filter_list)) /
  601. sizeof(u16);
  602. len = sizeof(struct virtchnl_vlan_filter_list) +
  603. (count * sizeof(u16));
  604. more = true;
  605. }
  606. vvfl = kzalloc(len, GFP_ATOMIC);
  607. if (!vvfl) {
  608. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  609. return;
  610. }
  611. vvfl->vsi_id = adapter->vsi_res->vsi_id;
  612. vvfl->num_elements = count;
  613. list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
  614. if (f->remove) {
  615. vvfl->vlan_id[i] = f->vlan;
  616. i++;
  617. list_del(&f->list);
  618. kfree(f);
  619. if (i == count)
  620. break;
  621. }
  622. }
  623. if (!more)
  624. adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
  625. spin_unlock_bh(&adapter->mac_vlan_list_lock);
  626. iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
  627. kfree(vvfl);
  628. }
  629. /**
  630. * iavf_set_promiscuous
  631. * @adapter: adapter structure
  632. * @flags: bitmask to control unicast/multicast promiscuous.
  633. *
  634. * Request that the PF enable promiscuous mode for our VSI.
  635. **/
  636. void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
  637. {
  638. struct virtchnl_promisc_info vpi;
  639. int promisc_all;
  640. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  641. /* bail because we already have a command pending */
  642. dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
  643. adapter->current_op);
  644. return;
  645. }
  646. promisc_all = FLAG_VF_UNICAST_PROMISC |
  647. FLAG_VF_MULTICAST_PROMISC;
  648. if ((flags & promisc_all) == promisc_all) {
  649. adapter->flags |= IAVF_FLAG_PROMISC_ON;
  650. adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
  651. dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
  652. }
  653. if (flags & FLAG_VF_MULTICAST_PROMISC) {
  654. adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
  655. adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
  656. dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
  657. }
  658. if (!flags) {
  659. adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
  660. IAVF_FLAG_ALLMULTI_ON);
  661. adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
  662. IAVF_FLAG_AQ_RELEASE_ALLMULTI);
  663. dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
  664. }
  665. adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
  666. vpi.vsi_id = adapter->vsi_res->vsi_id;
  667. vpi.flags = flags;
  668. iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
  669. (u8 *)&vpi, sizeof(vpi));
  670. }
  671. /**
  672. * iavf_request_stats
  673. * @adapter: adapter structure
  674. *
  675. * Request VSI statistics from PF.
  676. **/
  677. void iavf_request_stats(struct iavf_adapter *adapter)
  678. {
  679. struct virtchnl_queue_select vqs;
  680. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  681. /* no error message, this isn't crucial */
  682. return;
  683. }
  684. adapter->current_op = VIRTCHNL_OP_GET_STATS;
  685. vqs.vsi_id = adapter->vsi_res->vsi_id;
  686. /* queue maps are ignored for this message - only the vsi is used */
  687. if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
  688. sizeof(vqs)))
  689. /* if the request failed, don't lock out others */
  690. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  691. }
  692. /**
  693. * iavf_get_hena
  694. * @adapter: adapter structure
  695. *
  696. * Request hash enable capabilities from PF
  697. **/
  698. void iavf_get_hena(struct iavf_adapter *adapter)
  699. {
  700. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  701. /* bail because we already have a command pending */
  702. dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
  703. adapter->current_op);
  704. return;
  705. }
  706. adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
  707. adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
  708. iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
  709. }
  710. /**
  711. * iavf_set_hena
  712. * @adapter: adapter structure
  713. *
  714. * Request the PF to set our RSS hash capabilities
  715. **/
  716. void iavf_set_hena(struct iavf_adapter *adapter)
  717. {
  718. struct virtchnl_rss_hena vrh;
  719. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  720. /* bail because we already have a command pending */
  721. dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
  722. adapter->current_op);
  723. return;
  724. }
  725. vrh.hena = adapter->hena;
  726. adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
  727. adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
  728. iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
  729. sizeof(vrh));
  730. }
  731. /**
  732. * iavf_set_rss_key
  733. * @adapter: adapter structure
  734. *
  735. * Request the PF to set our RSS hash key
  736. **/
  737. void iavf_set_rss_key(struct iavf_adapter *adapter)
  738. {
  739. struct virtchnl_rss_key *vrk;
  740. int len;
  741. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  742. /* bail because we already have a command pending */
  743. dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
  744. adapter->current_op);
  745. return;
  746. }
  747. len = sizeof(struct virtchnl_rss_key) +
  748. (adapter->rss_key_size * sizeof(u8)) - 1;
  749. vrk = kzalloc(len, GFP_KERNEL);
  750. if (!vrk)
  751. return;
  752. vrk->vsi_id = adapter->vsi.id;
  753. vrk->key_len = adapter->rss_key_size;
  754. memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
  755. adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
  756. adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
  757. iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
  758. kfree(vrk);
  759. }
  760. /**
  761. * iavf_set_rss_lut
  762. * @adapter: adapter structure
  763. *
  764. * Request the PF to set our RSS lookup table
  765. **/
  766. void iavf_set_rss_lut(struct iavf_adapter *adapter)
  767. {
  768. struct virtchnl_rss_lut *vrl;
  769. int len;
  770. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  771. /* bail because we already have a command pending */
  772. dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
  773. adapter->current_op);
  774. return;
  775. }
  776. len = sizeof(struct virtchnl_rss_lut) +
  777. (adapter->rss_lut_size * sizeof(u8)) - 1;
  778. vrl = kzalloc(len, GFP_KERNEL);
  779. if (!vrl)
  780. return;
  781. vrl->vsi_id = adapter->vsi.id;
  782. vrl->lut_entries = adapter->rss_lut_size;
  783. memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
  784. adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
  785. adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
  786. iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
  787. kfree(vrl);
  788. }
  789. /**
  790. * iavf_enable_vlan_stripping
  791. * @adapter: adapter structure
  792. *
  793. * Request VLAN header stripping to be enabled
  794. **/
  795. void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
  796. {
  797. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  798. /* bail because we already have a command pending */
  799. dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
  800. adapter->current_op);
  801. return;
  802. }
  803. adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
  804. adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
  805. iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
  806. }
  807. /**
  808. * iavf_disable_vlan_stripping
  809. * @adapter: adapter structure
  810. *
  811. * Request VLAN header stripping to be disabled
  812. **/
  813. void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
  814. {
  815. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  816. /* bail because we already have a command pending */
  817. dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
  818. adapter->current_op);
  819. return;
  820. }
  821. adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
  822. adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
  823. iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
  824. }
  825. /**
  826. * iavf_print_link_message - print link up or down
  827. * @adapter: adapter structure
  828. *
  829. * Log a message telling the world of our wonderous link status
  830. */
  831. static void iavf_print_link_message(struct iavf_adapter *adapter)
  832. {
  833. struct net_device *netdev = adapter->netdev;
  834. char *speed = "Unknown ";
  835. if (!adapter->link_up) {
  836. netdev_info(netdev, "NIC Link is Down\n");
  837. return;
  838. }
  839. switch (adapter->link_speed) {
  840. case I40E_LINK_SPEED_40GB:
  841. speed = "40 G";
  842. break;
  843. case I40E_LINK_SPEED_25GB:
  844. speed = "25 G";
  845. break;
  846. case I40E_LINK_SPEED_20GB:
  847. speed = "20 G";
  848. break;
  849. case I40E_LINK_SPEED_10GB:
  850. speed = "10 G";
  851. break;
  852. case I40E_LINK_SPEED_1GB:
  853. speed = "1000 M";
  854. break;
  855. case I40E_LINK_SPEED_100MB:
  856. speed = "100 M";
  857. break;
  858. default:
  859. break;
  860. }
  861. netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
  862. }
  863. /**
  864. * iavf_enable_channel
  865. * @adapter: adapter structure
  866. *
  867. * Request that the PF enable channels as specified by
  868. * the user via tc tool.
  869. **/
  870. void iavf_enable_channels(struct iavf_adapter *adapter)
  871. {
  872. struct virtchnl_tc_info *vti = NULL;
  873. u16 len;
  874. int i;
  875. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  876. /* bail because we already have a command pending */
  877. dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
  878. adapter->current_op);
  879. return;
  880. }
  881. len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
  882. sizeof(struct virtchnl_tc_info);
  883. vti = kzalloc(len, GFP_KERNEL);
  884. if (!vti)
  885. return;
  886. vti->num_tc = adapter->num_tc;
  887. for (i = 0; i < vti->num_tc; i++) {
  888. vti->list[i].count = adapter->ch_config.ch_info[i].count;
  889. vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
  890. vti->list[i].pad = 0;
  891. vti->list[i].max_tx_rate =
  892. adapter->ch_config.ch_info[i].max_tx_rate;
  893. }
  894. adapter->ch_config.state = __IAVF_TC_RUNNING;
  895. adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
  896. adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
  897. adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
  898. iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
  899. kfree(vti);
  900. }
  901. /**
  902. * iavf_disable_channel
  903. * @adapter: adapter structure
  904. *
  905. * Request that the PF disable channels that are configured
  906. **/
  907. void iavf_disable_channels(struct iavf_adapter *adapter)
  908. {
  909. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  910. /* bail because we already have a command pending */
  911. dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
  912. adapter->current_op);
  913. return;
  914. }
  915. adapter->ch_config.state = __IAVF_TC_INVALID;
  916. adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
  917. adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
  918. adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
  919. iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
  920. }
  921. /**
  922. * iavf_print_cloud_filter
  923. * @adapter: adapter structure
  924. * @f: cloud filter to print
  925. *
  926. * Print the cloud filter
  927. **/
  928. static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
  929. struct virtchnl_filter *f)
  930. {
  931. switch (f->flow_type) {
  932. case VIRTCHNL_TCP_V4_FLOW:
  933. dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
  934. &f->data.tcp_spec.dst_mac,
  935. &f->data.tcp_spec.src_mac,
  936. ntohs(f->data.tcp_spec.vlan_id),
  937. &f->data.tcp_spec.dst_ip[0],
  938. &f->data.tcp_spec.src_ip[0],
  939. ntohs(f->data.tcp_spec.dst_port),
  940. ntohs(f->data.tcp_spec.src_port));
  941. break;
  942. case VIRTCHNL_TCP_V6_FLOW:
  943. dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
  944. &f->data.tcp_spec.dst_mac,
  945. &f->data.tcp_spec.src_mac,
  946. ntohs(f->data.tcp_spec.vlan_id),
  947. &f->data.tcp_spec.dst_ip,
  948. &f->data.tcp_spec.src_ip,
  949. ntohs(f->data.tcp_spec.dst_port),
  950. ntohs(f->data.tcp_spec.src_port));
  951. break;
  952. }
  953. }
  954. /**
  955. * iavf_add_cloud_filter
  956. * @adapter: adapter structure
  957. *
  958. * Request that the PF add cloud filters as specified
  959. * by the user via tc tool.
  960. **/
  961. void iavf_add_cloud_filter(struct iavf_adapter *adapter)
  962. {
  963. struct iavf_cloud_filter *cf;
  964. struct virtchnl_filter *f;
  965. int len = 0, count = 0;
  966. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  967. /* bail because we already have a command pending */
  968. dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
  969. adapter->current_op);
  970. return;
  971. }
  972. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  973. if (cf->add) {
  974. count++;
  975. break;
  976. }
  977. }
  978. if (!count) {
  979. adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
  980. return;
  981. }
  982. adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
  983. len = sizeof(struct virtchnl_filter);
  984. f = kzalloc(len, GFP_KERNEL);
  985. if (!f)
  986. return;
  987. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  988. if (cf->add) {
  989. memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
  990. cf->add = false;
  991. cf->state = __IAVF_CF_ADD_PENDING;
  992. iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
  993. (u8 *)f, len);
  994. }
  995. }
  996. kfree(f);
  997. }
  998. /**
  999. * iavf_del_cloud_filter
  1000. * @adapter: adapter structure
  1001. *
  1002. * Request that the PF delete cloud filters as specified
  1003. * by the user via tc tool.
  1004. **/
  1005. void iavf_del_cloud_filter(struct iavf_adapter *adapter)
  1006. {
  1007. struct iavf_cloud_filter *cf, *cftmp;
  1008. struct virtchnl_filter *f;
  1009. int len = 0, count = 0;
  1010. if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
  1011. /* bail because we already have a command pending */
  1012. dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
  1013. adapter->current_op);
  1014. return;
  1015. }
  1016. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1017. if (cf->del) {
  1018. count++;
  1019. break;
  1020. }
  1021. }
  1022. if (!count) {
  1023. adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
  1024. return;
  1025. }
  1026. adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
  1027. len = sizeof(struct virtchnl_filter);
  1028. f = kzalloc(len, GFP_KERNEL);
  1029. if (!f)
  1030. return;
  1031. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
  1032. if (cf->del) {
  1033. memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
  1034. cf->del = false;
  1035. cf->state = __IAVF_CF_DEL_PENDING;
  1036. iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
  1037. (u8 *)f, len);
  1038. }
  1039. }
  1040. kfree(f);
  1041. }
  1042. /**
  1043. * iavf_request_reset
  1044. * @adapter: adapter structure
  1045. *
  1046. * Request that the PF reset this VF. No response is expected.
  1047. **/
  1048. void iavf_request_reset(struct iavf_adapter *adapter)
  1049. {
  1050. /* Don't check CURRENT_OP - this is always higher priority */
  1051. iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
  1052. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1053. }
  1054. /**
  1055. * iavf_virtchnl_completion
  1056. * @adapter: adapter structure
  1057. * @v_opcode: opcode sent by PF
  1058. * @v_retval: retval sent by PF
  1059. * @msg: message sent by PF
  1060. * @msglen: message length
  1061. *
  1062. * Asynchronous completion function for admin queue messages. Rather than busy
  1063. * wait, we fire off our requests and assume that no errors will be returned.
  1064. * This function handles the reply messages.
  1065. **/
  1066. void iavf_virtchnl_completion(struct iavf_adapter *adapter,
  1067. enum virtchnl_ops v_opcode, iavf_status v_retval,
  1068. u8 *msg, u16 msglen)
  1069. {
  1070. struct net_device *netdev = adapter->netdev;
  1071. if (v_opcode == VIRTCHNL_OP_EVENT) {
  1072. struct virtchnl_pf_event *vpe =
  1073. (struct virtchnl_pf_event *)msg;
  1074. bool link_up = vpe->event_data.link_event.link_status;
  1075. switch (vpe->event) {
  1076. case VIRTCHNL_EVENT_LINK_CHANGE:
  1077. adapter->link_speed =
  1078. vpe->event_data.link_event.link_speed;
  1079. /* we've already got the right link status, bail */
  1080. if (adapter->link_up == link_up)
  1081. break;
  1082. if (link_up) {
  1083. /* If we get link up message and start queues
  1084. * before our queues are configured it will
  1085. * trigger a TX hang. In that case, just ignore
  1086. * the link status message,we'll get another one
  1087. * after we enable queues and actually prepared
  1088. * to send traffic.
  1089. */
  1090. if (adapter->state != __IAVF_RUNNING)
  1091. break;
  1092. /* For ADq enabled VF, we reconfigure VSIs and
  1093. * re-allocate queues. Hence wait till all
  1094. * queues are enabled.
  1095. */
  1096. if (adapter->flags &
  1097. IAVF_FLAG_QUEUES_DISABLED)
  1098. break;
  1099. }
  1100. adapter->link_up = link_up;
  1101. if (link_up) {
  1102. netif_tx_start_all_queues(netdev);
  1103. netif_carrier_on(netdev);
  1104. } else {
  1105. netif_tx_stop_all_queues(netdev);
  1106. netif_carrier_off(netdev);
  1107. }
  1108. iavf_print_link_message(adapter);
  1109. break;
  1110. case VIRTCHNL_EVENT_RESET_IMPENDING:
  1111. dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
  1112. if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
  1113. adapter->flags |= IAVF_FLAG_RESET_PENDING;
  1114. dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
  1115. schedule_work(&adapter->reset_task);
  1116. }
  1117. break;
  1118. default:
  1119. dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
  1120. vpe->event);
  1121. break;
  1122. }
  1123. return;
  1124. }
  1125. if (v_retval) {
  1126. switch (v_opcode) {
  1127. case VIRTCHNL_OP_ADD_VLAN:
  1128. dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
  1129. iavf_stat_str(&adapter->hw, v_retval));
  1130. break;
  1131. case VIRTCHNL_OP_ADD_ETH_ADDR:
  1132. dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
  1133. iavf_stat_str(&adapter->hw, v_retval));
  1134. break;
  1135. case VIRTCHNL_OP_DEL_VLAN:
  1136. dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
  1137. iavf_stat_str(&adapter->hw, v_retval));
  1138. break;
  1139. case VIRTCHNL_OP_DEL_ETH_ADDR:
  1140. dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
  1141. iavf_stat_str(&adapter->hw, v_retval));
  1142. break;
  1143. case VIRTCHNL_OP_ENABLE_CHANNELS:
  1144. dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
  1145. iavf_stat_str(&adapter->hw, v_retval));
  1146. adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
  1147. adapter->ch_config.state = __IAVF_TC_INVALID;
  1148. netdev_reset_tc(netdev);
  1149. netif_tx_start_all_queues(netdev);
  1150. break;
  1151. case VIRTCHNL_OP_DISABLE_CHANNELS:
  1152. dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
  1153. iavf_stat_str(&adapter->hw, v_retval));
  1154. adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
  1155. adapter->ch_config.state = __IAVF_TC_RUNNING;
  1156. netif_tx_start_all_queues(netdev);
  1157. break;
  1158. case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
  1159. struct iavf_cloud_filter *cf, *cftmp;
  1160. list_for_each_entry_safe(cf, cftmp,
  1161. &adapter->cloud_filter_list,
  1162. list) {
  1163. if (cf->state == __IAVF_CF_ADD_PENDING) {
  1164. cf->state = __IAVF_CF_INVALID;
  1165. dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
  1166. iavf_stat_str(&adapter->hw,
  1167. v_retval));
  1168. iavf_print_cloud_filter(adapter,
  1169. &cf->f);
  1170. list_del(&cf->list);
  1171. kfree(cf);
  1172. adapter->num_cloud_filters--;
  1173. }
  1174. }
  1175. }
  1176. break;
  1177. case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
  1178. struct iavf_cloud_filter *cf;
  1179. list_for_each_entry(cf, &adapter->cloud_filter_list,
  1180. list) {
  1181. if (cf->state == __IAVF_CF_DEL_PENDING) {
  1182. cf->state = __IAVF_CF_ACTIVE;
  1183. dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
  1184. iavf_stat_str(&adapter->hw,
  1185. v_retval));
  1186. iavf_print_cloud_filter(adapter,
  1187. &cf->f);
  1188. }
  1189. }
  1190. }
  1191. break;
  1192. default:
  1193. dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
  1194. v_retval, iavf_stat_str(&adapter->hw, v_retval),
  1195. v_opcode);
  1196. }
  1197. }
  1198. switch (v_opcode) {
  1199. case VIRTCHNL_OP_GET_STATS: {
  1200. struct iavf_eth_stats *stats =
  1201. (struct iavf_eth_stats *)msg;
  1202. netdev->stats.rx_packets = stats->rx_unicast +
  1203. stats->rx_multicast +
  1204. stats->rx_broadcast;
  1205. netdev->stats.tx_packets = stats->tx_unicast +
  1206. stats->tx_multicast +
  1207. stats->tx_broadcast;
  1208. netdev->stats.rx_bytes = stats->rx_bytes;
  1209. netdev->stats.tx_bytes = stats->tx_bytes;
  1210. netdev->stats.tx_errors = stats->tx_errors;
  1211. netdev->stats.rx_dropped = stats->rx_discards;
  1212. netdev->stats.tx_dropped = stats->tx_discards;
  1213. adapter->current_stats = *stats;
  1214. }
  1215. break;
  1216. case VIRTCHNL_OP_GET_VF_RESOURCES: {
  1217. u16 len = sizeof(struct virtchnl_vf_resource) +
  1218. IAVF_MAX_VF_VSI *
  1219. sizeof(struct virtchnl_vsi_resource);
  1220. memcpy(adapter->vf_res, msg, min(msglen, len));
  1221. iavf_validate_num_queues(adapter);
  1222. iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
  1223. if (is_zero_ether_addr(adapter->hw.mac.addr)) {
  1224. /* restore current mac address */
  1225. ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
  1226. } else {
  1227. /* refresh current mac address if changed */
  1228. ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
  1229. ether_addr_copy(netdev->perm_addr,
  1230. adapter->hw.mac.addr);
  1231. }
  1232. iavf_process_config(adapter);
  1233. }
  1234. break;
  1235. case VIRTCHNL_OP_ENABLE_QUEUES:
  1236. /* enable transmits */
  1237. iavf_irq_enable(adapter, true);
  1238. adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
  1239. break;
  1240. case VIRTCHNL_OP_DISABLE_QUEUES:
  1241. iavf_free_all_tx_resources(adapter);
  1242. iavf_free_all_rx_resources(adapter);
  1243. if (adapter->state == __IAVF_DOWN_PENDING) {
  1244. adapter->state = __IAVF_DOWN;
  1245. wake_up(&adapter->down_waitqueue);
  1246. }
  1247. break;
  1248. case VIRTCHNL_OP_VERSION:
  1249. case VIRTCHNL_OP_CONFIG_IRQ_MAP:
  1250. /* Don't display an error if we get these out of sequence.
  1251. * If the firmware needed to get kicked, we'll get these and
  1252. * it's no problem.
  1253. */
  1254. if (v_opcode != adapter->current_op)
  1255. return;
  1256. break;
  1257. case VIRTCHNL_OP_IWARP:
  1258. /* Gobble zero-length replies from the PF. They indicate that
  1259. * a previous message was received OK, and the client doesn't
  1260. * care about that.
  1261. */
  1262. if (msglen && CLIENT_ENABLED(adapter))
  1263. iavf_notify_client_message(&adapter->vsi, msg, msglen);
  1264. break;
  1265. case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
  1266. adapter->client_pending &=
  1267. ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
  1268. break;
  1269. case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
  1270. struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
  1271. if (msglen == sizeof(*vrh))
  1272. adapter->hena = vrh->hena;
  1273. else
  1274. dev_warn(&adapter->pdev->dev,
  1275. "Invalid message %d from PF\n", v_opcode);
  1276. }
  1277. break;
  1278. case VIRTCHNL_OP_REQUEST_QUEUES: {
  1279. struct virtchnl_vf_res_request *vfres =
  1280. (struct virtchnl_vf_res_request *)msg;
  1281. if (vfres->num_queue_pairs != adapter->num_req_queues) {
  1282. dev_info(&adapter->pdev->dev,
  1283. "Requested %d queues, PF can support %d\n",
  1284. adapter->num_req_queues,
  1285. vfres->num_queue_pairs);
  1286. adapter->num_req_queues = 0;
  1287. adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
  1288. }
  1289. }
  1290. break;
  1291. case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
  1292. struct iavf_cloud_filter *cf;
  1293. list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
  1294. if (cf->state == __IAVF_CF_ADD_PENDING)
  1295. cf->state = __IAVF_CF_ACTIVE;
  1296. }
  1297. }
  1298. break;
  1299. case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
  1300. struct iavf_cloud_filter *cf, *cftmp;
  1301. list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
  1302. list) {
  1303. if (cf->state == __IAVF_CF_DEL_PENDING) {
  1304. cf->state = __IAVF_CF_INVALID;
  1305. list_del(&cf->list);
  1306. kfree(cf);
  1307. adapter->num_cloud_filters--;
  1308. }
  1309. }
  1310. }
  1311. break;
  1312. default:
  1313. if (adapter->current_op && (v_opcode != adapter->current_op))
  1314. dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
  1315. adapter->current_op, v_opcode);
  1316. break;
  1317. } /* switch v_opcode */
  1318. adapter->current_op = VIRTCHNL_OP_UNKNOWN;
  1319. }