hclge_mbx.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright (c) 2016-2017 Hisilicon Limited.
  3. #include "hclge_main.h"
  4. #include "hclge_mbx.h"
  5. #include "hnae3.h"
  6. /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
  7. * receives a mailbox message from VF.
  8. * @vport: pointer to struct hclge_vport
  9. * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
  10. * message
  11. * @resp_status: indicate to VF whether its request success(0) or failed.
  12. */
  13. static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
  14. struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
  15. int resp_status,
  16. u8 *resp_data, u16 resp_data_len)
  17. {
  18. struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
  19. struct hclge_dev *hdev = vport->back;
  20. enum hclge_cmd_status status;
  21. struct hclge_desc desc;
  22. resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
  23. if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
  24. dev_err(&hdev->pdev->dev,
  25. "PF fail to gen resp to VF len %d exceeds max len %d\n",
  26. resp_data_len,
  27. HCLGE_MBX_MAX_RESP_DATA_SIZE);
  28. }
  29. hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
  30. resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
  31. resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
  32. resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
  33. resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
  34. resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
  35. resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
  36. if (resp_data && resp_data_len > 0)
  37. memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
  38. status = hclge_cmd_send(&hdev->hw, &desc, 1);
  39. if (status)
  40. dev_err(&hdev->pdev->dev,
  41. "PF failed(=%d) to send response to VF\n", status);
  42. return status;
  43. }
  44. static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
  45. u16 mbx_opcode, u8 dest_vfid)
  46. {
  47. struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
  48. struct hclge_dev *hdev = vport->back;
  49. enum hclge_cmd_status status;
  50. struct hclge_desc desc;
  51. resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
  52. hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
  53. resp_pf_to_vf->dest_vfid = dest_vfid;
  54. resp_pf_to_vf->msg_len = msg_len;
  55. resp_pf_to_vf->msg[0] = mbx_opcode;
  56. memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
  57. status = hclge_cmd_send(&hdev->hw, &desc, 1);
  58. if (status)
  59. dev_err(&hdev->pdev->dev,
  60. "PF failed(=%d) to send mailbox message to VF\n",
  61. status);
  62. return status;
  63. }
  64. int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
  65. {
  66. u8 msg_data[2];
  67. u8 dest_vfid;
  68. dest_vfid = (u8)vport->vport_id;
  69. /* send this requested info to VF */
  70. return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
  71. HCLGE_MBX_ASSERTING_RESET, dest_vfid);
  72. }
  73. static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
  74. {
  75. struct hnae3_ring_chain_node *chain_tmp, *chain;
  76. chain = head->next;
  77. while (chain) {
  78. chain_tmp = chain->next;
  79. kzfree(chain);
  80. chain = chain_tmp;
  81. }
  82. }
  83. /* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
  84. * msg[0]: opcode
  85. * msg[1]: <not relevant to this function>
  86. * msg[2]: ring_num
  87. * msg[3]: first ring type (TX|RX)
  88. * msg[4]: first tqp id
  89. * msg[5] ~ msg[14]: other ring type and tqp id
  90. */
  91. static int hclge_get_ring_chain_from_mbx(
  92. struct hclge_mbx_vf_to_pf_cmd *req,
  93. struct hnae3_ring_chain_node *ring_chain,
  94. struct hclge_vport *vport)
  95. {
  96. struct hnae3_ring_chain_node *cur_chain, *new_chain;
  97. int ring_num;
  98. int i;
  99. ring_num = req->msg[2];
  100. if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
  101. HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
  102. HCLGE_MBX_RING_NODE_VARIABLE_NUM))
  103. return -ENOMEM;
  104. hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
  105. ring_chain->tqp_index =
  106. hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
  107. hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
  108. HCLGE_INT_GL_IDX_S,
  109. req->msg[5]);
  110. cur_chain = ring_chain;
  111. for (i = 1; i < ring_num; i++) {
  112. new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
  113. if (!new_chain)
  114. goto err;
  115. hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
  116. req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
  117. HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
  118. new_chain->tqp_index =
  119. hclge_get_queue_id(vport->nic.kinfo.tqp
  120. [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
  121. HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
  122. hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
  123. HCLGE_INT_GL_IDX_S,
  124. req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
  125. HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
  126. cur_chain->next = new_chain;
  127. cur_chain = new_chain;
  128. }
  129. return 0;
  130. err:
  131. hclge_free_vector_ring_chain(ring_chain);
  132. return -ENOMEM;
  133. }
  134. static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
  135. struct hclge_mbx_vf_to_pf_cmd *req)
  136. {
  137. struct hnae3_ring_chain_node ring_chain;
  138. int vector_id = req->msg[1];
  139. int ret;
  140. memset(&ring_chain, 0, sizeof(ring_chain));
  141. ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
  142. if (ret)
  143. return ret;
  144. ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
  145. if (ret)
  146. return ret;
  147. hclge_free_vector_ring_chain(&ring_chain);
  148. return 0;
  149. }
  150. static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
  151. struct hclge_mbx_vf_to_pf_cmd *req)
  152. {
  153. bool en = req->msg[1] ? true : false;
  154. struct hclge_promisc_param param;
  155. /* always enable broadcast promisc bit */
  156. hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
  157. return hclge_cmd_set_promisc_mode(vport->back, &param);
  158. }
  159. static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
  160. struct hclge_mbx_vf_to_pf_cmd *mbx_req,
  161. bool gen_resp)
  162. {
  163. const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
  164. struct hclge_dev *hdev = vport->back;
  165. int status;
  166. if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
  167. const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
  168. hclge_rm_uc_addr_common(vport, old_addr);
  169. status = hclge_add_uc_addr_common(vport, mac_addr);
  170. if (status)
  171. hclge_add_uc_addr_common(vport, old_addr);
  172. } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
  173. status = hclge_add_uc_addr_common(vport, mac_addr);
  174. } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
  175. status = hclge_rm_uc_addr_common(vport, mac_addr);
  176. } else {
  177. dev_err(&hdev->pdev->dev,
  178. "failed to set unicast mac addr, unknown subcode %d\n",
  179. mbx_req->msg[1]);
  180. return -EIO;
  181. }
  182. if (gen_resp)
  183. hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
  184. return 0;
  185. }
  186. static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
  187. struct hclge_mbx_vf_to_pf_cmd *mbx_req,
  188. bool gen_resp)
  189. {
  190. const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
  191. struct hclge_dev *hdev = vport->back;
  192. int status;
  193. if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
  194. status = hclge_add_mc_addr_common(vport, mac_addr);
  195. } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
  196. status = hclge_rm_mc_addr_common(vport, mac_addr);
  197. } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
  198. u8 func_id = vport->vport_id;
  199. bool enable = mbx_req->msg[2];
  200. status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
  201. } else {
  202. dev_err(&hdev->pdev->dev,
  203. "failed to set mcast mac addr, unknown subcode %d\n",
  204. mbx_req->msg[1]);
  205. return -EIO;
  206. }
  207. if (gen_resp)
  208. hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
  209. return 0;
  210. }
  211. static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
  212. struct hclge_mbx_vf_to_pf_cmd *mbx_req,
  213. bool gen_resp)
  214. {
  215. struct hclge_dev *hdev = vport->back;
  216. int status = 0;
  217. if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
  218. u16 vlan, proto;
  219. bool is_kill;
  220. is_kill = !!mbx_req->msg[2];
  221. memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
  222. memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
  223. status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
  224. is_kill, vlan, 0,
  225. cpu_to_be16(proto));
  226. }
  227. if (gen_resp)
  228. status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
  229. return status;
  230. }
  231. static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
  232. struct hclge_mbx_vf_to_pf_cmd *mbx_req,
  233. bool gen_resp)
  234. {
  235. struct hclge_dev *hdev = vport->back;
  236. int ret;
  237. ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
  238. sizeof(u8));
  239. return ret;
  240. }
  241. static int hclge_get_vf_queue_info(struct hclge_vport *vport,
  242. struct hclge_mbx_vf_to_pf_cmd *mbx_req,
  243. bool gen_resp)
  244. {
  245. #define HCLGE_TQPS_RSS_INFO_LEN 8
  246. u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
  247. struct hclge_dev *hdev = vport->back;
  248. /* get the queue related info */
  249. memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
  250. memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
  251. memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
  252. memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
  253. return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
  254. HCLGE_TQPS_RSS_INFO_LEN);
  255. }
  256. static int hclge_get_link_info(struct hclge_vport *vport,
  257. struct hclge_mbx_vf_to_pf_cmd *mbx_req)
  258. {
  259. struct hclge_dev *hdev = vport->back;
  260. u16 link_status;
  261. u8 msg_data[8];
  262. u8 dest_vfid;
  263. u16 duplex;
  264. /* mac.link can only be 0 or 1 */
  265. link_status = (u16)hdev->hw.mac.link;
  266. duplex = hdev->hw.mac.duplex;
  267. memcpy(&msg_data[0], &link_status, sizeof(u16));
  268. memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
  269. memcpy(&msg_data[6], &duplex, sizeof(u16));
  270. dest_vfid = mbx_req->mbx_src_vfid;
  271. /* send this requested info to VF */
  272. return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
  273. HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
  274. }
  275. static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
  276. struct hclge_mbx_vf_to_pf_cmd *mbx_req)
  277. {
  278. u16 queue_id;
  279. memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
  280. hclge_reset_vf_queue(vport, queue_id);
  281. /* send response msg to VF after queue reset complete*/
  282. hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
  283. }
  284. static void hclge_reset_vf(struct hclge_vport *vport,
  285. struct hclge_mbx_vf_to_pf_cmd *mbx_req)
  286. {
  287. struct hclge_dev *hdev = vport->back;
  288. int ret;
  289. dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
  290. mbx_req->mbx_src_vfid);
  291. /* Acknowledge VF that PF is now about to assert the reset for the VF.
  292. * On receiving this message VF will get into pending state and will
  293. * start polling for the hardware reset completion status.
  294. */
  295. ret = hclge_inform_reset_assert_to_vf(vport);
  296. if (ret) {
  297. dev_err(&hdev->pdev->dev,
  298. "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
  299. ret, vport->vport_id);
  300. return;
  301. }
  302. dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
  303. mbx_req->mbx_src_vfid);
  304. /* reset this virtual function */
  305. hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
  306. }
  307. void hclge_mbx_handler(struct hclge_dev *hdev)
  308. {
  309. struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
  310. struct hclge_mbx_vf_to_pf_cmd *req;
  311. struct hclge_vport *vport;
  312. struct hclge_desc *desc;
  313. int ret, flag;
  314. flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
  315. /* handle all the mailbox requests in the queue */
  316. while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) {
  317. desc = &crq->desc[crq->next_to_use];
  318. req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
  319. vport = &hdev->vport[req->mbx_src_vfid];
  320. switch (req->msg[0]) {
  321. case HCLGE_MBX_MAP_RING_TO_VECTOR:
  322. ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
  323. req);
  324. break;
  325. case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
  326. ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
  327. req);
  328. break;
  329. case HCLGE_MBX_SET_PROMISC_MODE:
  330. ret = hclge_set_vf_promisc_mode(vport, req);
  331. if (ret)
  332. dev_err(&hdev->pdev->dev,
  333. "PF fail(%d) to set VF promisc mode\n",
  334. ret);
  335. break;
  336. case HCLGE_MBX_SET_UNICAST:
  337. ret = hclge_set_vf_uc_mac_addr(vport, req, true);
  338. if (ret)
  339. dev_err(&hdev->pdev->dev,
  340. "PF fail(%d) to set VF UC MAC Addr\n",
  341. ret);
  342. break;
  343. case HCLGE_MBX_SET_MULTICAST:
  344. ret = hclge_set_vf_mc_mac_addr(vport, req, false);
  345. if (ret)
  346. dev_err(&hdev->pdev->dev,
  347. "PF fail(%d) to set VF MC MAC Addr\n",
  348. ret);
  349. break;
  350. case HCLGE_MBX_SET_VLAN:
  351. ret = hclge_set_vf_vlan_cfg(vport, req, false);
  352. if (ret)
  353. dev_err(&hdev->pdev->dev,
  354. "PF failed(%d) to config VF's VLAN\n",
  355. ret);
  356. break;
  357. case HCLGE_MBX_GET_QINFO:
  358. ret = hclge_get_vf_queue_info(vport, req, true);
  359. if (ret)
  360. dev_err(&hdev->pdev->dev,
  361. "PF failed(%d) to get Q info for VF\n",
  362. ret);
  363. break;
  364. case HCLGE_MBX_GET_TCINFO:
  365. ret = hclge_get_vf_tcinfo(vport, req, true);
  366. if (ret)
  367. dev_err(&hdev->pdev->dev,
  368. "PF failed(%d) to get TC info for VF\n",
  369. ret);
  370. break;
  371. case HCLGE_MBX_GET_LINK_STATUS:
  372. ret = hclge_get_link_info(vport, req);
  373. if (ret)
  374. dev_err(&hdev->pdev->dev,
  375. "PF fail(%d) to get link stat for VF\n",
  376. ret);
  377. break;
  378. case HCLGE_MBX_QUEUE_RESET:
  379. hclge_mbx_reset_vf_queue(vport, req);
  380. break;
  381. case HCLGE_MBX_RESET:
  382. hclge_reset_vf(vport, req);
  383. break;
  384. default:
  385. dev_err(&hdev->pdev->dev,
  386. "un-supported mailbox message, code = %d\n",
  387. req->msg[0]);
  388. break;
  389. }
  390. crq->desc[crq->next_to_use].flag = 0;
  391. hclge_mbx_ring_ptr_move_crq(crq);
  392. flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
  393. }
  394. /* Write back CMDQ_RQ header pointer, M7 need this pointer */
  395. hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
  396. }