bnxt_sriov.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. * Copyright (c) 2016-2018 Broadcom Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/if_vlan.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/etherdevice.h>
  16. #include "bnxt_hsi.h"
  17. #include "bnxt.h"
  18. #include "bnxt_ulp.h"
  19. #include "bnxt_sriov.h"
  20. #include "bnxt_vfr.h"
  21. #include "bnxt_ethtool.h"
  22. #ifdef CONFIG_BNXT_SRIOV
  23. static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
  24. struct bnxt_vf_info *vf, u16 event_id)
  25. {
  26. struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
  27. struct hwrm_fwd_async_event_cmpl_input req = {0};
  28. struct hwrm_async_event_cmpl *async_cmpl;
  29. int rc = 0;
  30. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
  31. if (vf)
  32. req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
  33. else
  34. /* broadcast this async event to all VFs */
  35. req.encap_async_event_target_id = cpu_to_le16(0xffff);
  36. async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
  37. async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
  38. async_cmpl->event_id = cpu_to_le16(event_id);
  39. mutex_lock(&bp->hwrm_cmd_lock);
  40. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  41. if (rc) {
  42. netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
  43. rc);
  44. goto fwd_async_event_cmpl_exit;
  45. }
  46. if (resp->error_code) {
  47. netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
  48. resp->error_code);
  49. rc = -1;
  50. }
  51. fwd_async_event_cmpl_exit:
  52. mutex_unlock(&bp->hwrm_cmd_lock);
  53. return rc;
  54. }
  55. static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
  56. {
  57. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  58. netdev_err(bp->dev, "vf ndo called though PF is down\n");
  59. return -EINVAL;
  60. }
  61. if (!bp->pf.active_vfs) {
  62. netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
  63. return -EINVAL;
  64. }
  65. if (vf_id >= bp->pf.active_vfs) {
  66. netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
  67. return -EINVAL;
  68. }
  69. return 0;
  70. }
  71. int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
  72. {
  73. struct hwrm_func_cfg_input req = {0};
  74. struct bnxt *bp = netdev_priv(dev);
  75. struct bnxt_vf_info *vf;
  76. bool old_setting = false;
  77. u32 func_flags;
  78. int rc;
  79. if (bp->hwrm_spec_code < 0x10701)
  80. return -ENOTSUPP;
  81. rc = bnxt_vf_ndo_prep(bp, vf_id);
  82. if (rc)
  83. return rc;
  84. vf = &bp->pf.vf[vf_id];
  85. if (vf->flags & BNXT_VF_SPOOFCHK)
  86. old_setting = true;
  87. if (old_setting == setting)
  88. return 0;
  89. func_flags = vf->func_flags;
  90. if (setting)
  91. func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
  92. else
  93. func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
  94. /*TODO: if the driver supports VLAN filter on guest VLAN,
  95. * the spoof check should also include vlan anti-spoofing
  96. */
  97. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  98. req.fid = cpu_to_le16(vf->fw_fid);
  99. req.flags = cpu_to_le32(func_flags);
  100. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  101. if (!rc) {
  102. vf->func_flags = func_flags;
  103. if (setting)
  104. vf->flags |= BNXT_VF_SPOOFCHK;
  105. else
  106. vf->flags &= ~BNXT_VF_SPOOFCHK;
  107. }
  108. return rc;
  109. }
  110. int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
  111. {
  112. struct bnxt *bp = netdev_priv(dev);
  113. struct bnxt_vf_info *vf;
  114. if (bnxt_vf_ndo_prep(bp, vf_id))
  115. return -EINVAL;
  116. vf = &bp->pf.vf[vf_id];
  117. if (trusted)
  118. vf->flags |= BNXT_VF_TRUST;
  119. else
  120. vf->flags &= ~BNXT_VF_TRUST;
  121. return 0;
  122. }
  123. int bnxt_get_vf_config(struct net_device *dev, int vf_id,
  124. struct ifla_vf_info *ivi)
  125. {
  126. struct bnxt *bp = netdev_priv(dev);
  127. struct bnxt_vf_info *vf;
  128. int rc;
  129. rc = bnxt_vf_ndo_prep(bp, vf_id);
  130. if (rc)
  131. return rc;
  132. ivi->vf = vf_id;
  133. vf = &bp->pf.vf[vf_id];
  134. if (is_valid_ether_addr(vf->mac_addr))
  135. memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
  136. else
  137. memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
  138. ivi->max_tx_rate = vf->max_tx_rate;
  139. ivi->min_tx_rate = vf->min_tx_rate;
  140. ivi->vlan = vf->vlan;
  141. if (vf->flags & BNXT_VF_QOS)
  142. ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
  143. else
  144. ivi->qos = 0;
  145. ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
  146. ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
  147. if (!(vf->flags & BNXT_VF_LINK_FORCED))
  148. ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
  149. else if (vf->flags & BNXT_VF_LINK_UP)
  150. ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
  151. else
  152. ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
  153. return 0;
  154. }
  155. int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
  156. {
  157. struct hwrm_func_cfg_input req = {0};
  158. struct bnxt *bp = netdev_priv(dev);
  159. struct bnxt_vf_info *vf;
  160. int rc;
  161. rc = bnxt_vf_ndo_prep(bp, vf_id);
  162. if (rc)
  163. return rc;
  164. /* reject bc or mc mac addr, zero mac addr means allow
  165. * VF to use its own mac addr
  166. */
  167. if (is_multicast_ether_addr(mac)) {
  168. netdev_err(dev, "Invalid VF ethernet address\n");
  169. return -EINVAL;
  170. }
  171. vf = &bp->pf.vf[vf_id];
  172. memcpy(vf->mac_addr, mac, ETH_ALEN);
  173. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  174. req.fid = cpu_to_le16(vf->fw_fid);
  175. req.flags = cpu_to_le32(vf->func_flags);
  176. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
  177. memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
  178. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  179. }
  180. int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
  181. __be16 vlan_proto)
  182. {
  183. struct hwrm_func_cfg_input req = {0};
  184. struct bnxt *bp = netdev_priv(dev);
  185. struct bnxt_vf_info *vf;
  186. u16 vlan_tag;
  187. int rc;
  188. if (bp->hwrm_spec_code < 0x10201)
  189. return -ENOTSUPP;
  190. if (vlan_proto != htons(ETH_P_8021Q))
  191. return -EPROTONOSUPPORT;
  192. rc = bnxt_vf_ndo_prep(bp, vf_id);
  193. if (rc)
  194. return rc;
  195. /* TODO: needed to implement proper handling of user priority,
  196. * currently fail the command if there is valid priority
  197. */
  198. if (vlan_id > 4095 || qos)
  199. return -EINVAL;
  200. vf = &bp->pf.vf[vf_id];
  201. vlan_tag = vlan_id;
  202. if (vlan_tag == vf->vlan)
  203. return 0;
  204. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  205. req.fid = cpu_to_le16(vf->fw_fid);
  206. req.flags = cpu_to_le32(vf->func_flags);
  207. req.dflt_vlan = cpu_to_le16(vlan_tag);
  208. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
  209. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  210. if (!rc)
  211. vf->vlan = vlan_tag;
  212. return rc;
  213. }
  214. int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
  215. int max_tx_rate)
  216. {
  217. struct hwrm_func_cfg_input req = {0};
  218. struct bnxt *bp = netdev_priv(dev);
  219. struct bnxt_vf_info *vf;
  220. u32 pf_link_speed;
  221. int rc;
  222. rc = bnxt_vf_ndo_prep(bp, vf_id);
  223. if (rc)
  224. return rc;
  225. vf = &bp->pf.vf[vf_id];
  226. pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
  227. if (max_tx_rate > pf_link_speed) {
  228. netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
  229. max_tx_rate, vf_id);
  230. return -EINVAL;
  231. }
  232. if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
  233. netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
  234. min_tx_rate, vf_id);
  235. return -EINVAL;
  236. }
  237. if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
  238. return 0;
  239. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  240. req.fid = cpu_to_le16(vf->fw_fid);
  241. req.flags = cpu_to_le32(vf->func_flags);
  242. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
  243. req.max_bw = cpu_to_le32(max_tx_rate);
  244. req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
  245. req.min_bw = cpu_to_le32(min_tx_rate);
  246. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  247. if (!rc) {
  248. vf->min_tx_rate = min_tx_rate;
  249. vf->max_tx_rate = max_tx_rate;
  250. }
  251. return rc;
  252. }
  253. int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
  254. {
  255. struct bnxt *bp = netdev_priv(dev);
  256. struct bnxt_vf_info *vf;
  257. int rc;
  258. rc = bnxt_vf_ndo_prep(bp, vf_id);
  259. if (rc)
  260. return rc;
  261. vf = &bp->pf.vf[vf_id];
  262. vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
  263. switch (link) {
  264. case IFLA_VF_LINK_STATE_AUTO:
  265. vf->flags |= BNXT_VF_LINK_UP;
  266. break;
  267. case IFLA_VF_LINK_STATE_DISABLE:
  268. vf->flags |= BNXT_VF_LINK_FORCED;
  269. break;
  270. case IFLA_VF_LINK_STATE_ENABLE:
  271. vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
  272. break;
  273. default:
  274. netdev_err(bp->dev, "Invalid link option\n");
  275. rc = -EINVAL;
  276. break;
  277. }
  278. if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
  279. rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
  280. ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
  281. return rc;
  282. }
  283. static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
  284. {
  285. int i;
  286. struct bnxt_vf_info *vf;
  287. for (i = 0; i < num_vfs; i++) {
  288. vf = &bp->pf.vf[i];
  289. memset(vf, 0, sizeof(*vf));
  290. }
  291. return 0;
  292. }
  293. static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
  294. {
  295. int i, rc = 0;
  296. struct bnxt_pf_info *pf = &bp->pf;
  297. struct hwrm_func_vf_resc_free_input req = {0};
  298. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
  299. mutex_lock(&bp->hwrm_cmd_lock);
  300. for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
  301. req.vf_id = cpu_to_le16(i);
  302. rc = _hwrm_send_message(bp, &req, sizeof(req),
  303. HWRM_CMD_TIMEOUT);
  304. if (rc)
  305. break;
  306. }
  307. mutex_unlock(&bp->hwrm_cmd_lock);
  308. return rc;
  309. }
  310. static void bnxt_free_vf_resources(struct bnxt *bp)
  311. {
  312. struct pci_dev *pdev = bp->pdev;
  313. int i;
  314. kfree(bp->pf.vf_event_bmap);
  315. bp->pf.vf_event_bmap = NULL;
  316. for (i = 0; i < 4; i++) {
  317. if (bp->pf.hwrm_cmd_req_addr[i]) {
  318. dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
  319. bp->pf.hwrm_cmd_req_addr[i],
  320. bp->pf.hwrm_cmd_req_dma_addr[i]);
  321. bp->pf.hwrm_cmd_req_addr[i] = NULL;
  322. }
  323. }
  324. kfree(bp->pf.vf);
  325. bp->pf.vf = NULL;
  326. }
  327. static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
  328. {
  329. struct pci_dev *pdev = bp->pdev;
  330. u32 nr_pages, size, i, j, k = 0;
  331. bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
  332. if (!bp->pf.vf)
  333. return -ENOMEM;
  334. bnxt_set_vf_attr(bp, num_vfs);
  335. size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
  336. nr_pages = size / BNXT_PAGE_SIZE;
  337. if (size & (BNXT_PAGE_SIZE - 1))
  338. nr_pages++;
  339. for (i = 0; i < nr_pages; i++) {
  340. bp->pf.hwrm_cmd_req_addr[i] =
  341. dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
  342. &bp->pf.hwrm_cmd_req_dma_addr[i],
  343. GFP_KERNEL);
  344. if (!bp->pf.hwrm_cmd_req_addr[i])
  345. return -ENOMEM;
  346. for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
  347. struct bnxt_vf_info *vf = &bp->pf.vf[k];
  348. vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
  349. j * BNXT_HWRM_REQ_MAX_SIZE;
  350. vf->hwrm_cmd_req_dma_addr =
  351. bp->pf.hwrm_cmd_req_dma_addr[i] + j *
  352. BNXT_HWRM_REQ_MAX_SIZE;
  353. k++;
  354. }
  355. }
  356. /* Max 128 VF's */
  357. bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
  358. if (!bp->pf.vf_event_bmap)
  359. return -ENOMEM;
  360. bp->pf.hwrm_cmd_req_pages = nr_pages;
  361. return 0;
  362. }
  363. static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
  364. {
  365. struct hwrm_func_buf_rgtr_input req = {0};
  366. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
  367. req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
  368. req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
  369. req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
  370. req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
  371. req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
  372. req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
  373. req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
  374. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  375. }
  376. /* Only called by PF to reserve resources for VFs, returns actual number of
  377. * VFs configured, or < 0 on error.
  378. */
  379. static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
  380. {
  381. struct hwrm_func_vf_resource_cfg_input req = {0};
  382. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  383. u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
  384. u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
  385. struct bnxt_pf_info *pf = &bp->pf;
  386. int i, rc = 0;
  387. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
  388. vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
  389. vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
  390. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  391. vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
  392. else
  393. vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
  394. vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
  395. vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
  396. vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
  397. vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
  398. req.min_rsscos_ctx = cpu_to_le16(1);
  399. req.max_rsscos_ctx = cpu_to_le16(1);
  400. if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
  401. req.min_cmpl_rings = cpu_to_le16(1);
  402. req.min_tx_rings = cpu_to_le16(1);
  403. req.min_rx_rings = cpu_to_le16(1);
  404. req.min_l2_ctxs = cpu_to_le16(1);
  405. req.min_vnics = cpu_to_le16(1);
  406. req.min_stat_ctx = cpu_to_le16(1);
  407. req.min_hw_ring_grps = cpu_to_le16(1);
  408. } else {
  409. vf_cp_rings /= num_vfs;
  410. vf_tx_rings /= num_vfs;
  411. vf_rx_rings /= num_vfs;
  412. vf_vnics /= num_vfs;
  413. vf_stat_ctx /= num_vfs;
  414. vf_ring_grps /= num_vfs;
  415. req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
  416. req.min_tx_rings = cpu_to_le16(vf_tx_rings);
  417. req.min_rx_rings = cpu_to_le16(vf_rx_rings);
  418. req.min_l2_ctxs = cpu_to_le16(4);
  419. req.min_vnics = cpu_to_le16(vf_vnics);
  420. req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
  421. req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  422. }
  423. req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
  424. req.max_tx_rings = cpu_to_le16(vf_tx_rings);
  425. req.max_rx_rings = cpu_to_le16(vf_rx_rings);
  426. req.max_l2_ctxs = cpu_to_le16(4);
  427. req.max_vnics = cpu_to_le16(vf_vnics);
  428. req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
  429. req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  430. mutex_lock(&bp->hwrm_cmd_lock);
  431. for (i = 0; i < num_vfs; i++) {
  432. req.vf_id = cpu_to_le16(pf->first_vf_id + i);
  433. rc = _hwrm_send_message(bp, &req, sizeof(req),
  434. HWRM_CMD_TIMEOUT);
  435. if (rc) {
  436. rc = -ENOMEM;
  437. break;
  438. }
  439. pf->active_vfs = i + 1;
  440. pf->vf[i].fw_fid = pf->first_vf_id + i;
  441. }
  442. mutex_unlock(&bp->hwrm_cmd_lock);
  443. if (pf->active_vfs) {
  444. u16 n = pf->active_vfs;
  445. hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
  446. hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
  447. hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
  448. n;
  449. hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
  450. hw_resc->max_rsscos_ctxs -= pf->active_vfs;
  451. hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
  452. hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
  453. rc = pf->active_vfs;
  454. }
  455. return rc;
  456. }
  457. /* Only called by PF to reserve resources for VFs, returns actual number of
  458. * VFs configured, or < 0 on error.
  459. */
  460. static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
  461. {
  462. u32 rc = 0, mtu, i;
  463. u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
  464. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  465. u16 vf_ring_grps, max_stat_ctxs;
  466. struct hwrm_func_cfg_input req = {0};
  467. struct bnxt_pf_info *pf = &bp->pf;
  468. int total_vf_tx_rings = 0;
  469. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  470. max_stat_ctxs = hw_resc->max_stat_ctxs;
  471. /* Remaining rings are distributed equally amongs VF's for now */
  472. vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
  473. vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
  474. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  475. vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
  476. num_vfs;
  477. else
  478. vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
  479. num_vfs;
  480. vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
  481. vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
  482. vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
  483. vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
  484. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
  485. FUNC_CFG_REQ_ENABLES_MRU |
  486. FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
  487. FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
  488. FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
  489. FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
  490. FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
  491. FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
  492. FUNC_CFG_REQ_ENABLES_NUM_VNICS |
  493. FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
  494. mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  495. req.mru = cpu_to_le16(mtu);
  496. req.mtu = cpu_to_le16(mtu);
  497. req.num_rsscos_ctxs = cpu_to_le16(1);
  498. req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
  499. req.num_tx_rings = cpu_to_le16(vf_tx_rings);
  500. req.num_rx_rings = cpu_to_le16(vf_rx_rings);
  501. req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
  502. req.num_l2_ctxs = cpu_to_le16(4);
  503. req.num_vnics = cpu_to_le16(vf_vnics);
  504. /* FIXME spec currently uses 1 bit for stats ctx */
  505. req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
  506. mutex_lock(&bp->hwrm_cmd_lock);
  507. for (i = 0; i < num_vfs; i++) {
  508. int vf_tx_rsvd = vf_tx_rings;
  509. req.fid = cpu_to_le16(pf->first_vf_id + i);
  510. rc = _hwrm_send_message(bp, &req, sizeof(req),
  511. HWRM_CMD_TIMEOUT);
  512. if (rc)
  513. break;
  514. pf->active_vfs = i + 1;
  515. pf->vf[i].fw_fid = le16_to_cpu(req.fid);
  516. rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
  517. &vf_tx_rsvd);
  518. if (rc)
  519. break;
  520. total_vf_tx_rings += vf_tx_rsvd;
  521. }
  522. mutex_unlock(&bp->hwrm_cmd_lock);
  523. if (rc)
  524. rc = -ENOMEM;
  525. if (pf->active_vfs) {
  526. hw_resc->max_tx_rings -= total_vf_tx_rings;
  527. hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
  528. hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
  529. hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
  530. hw_resc->max_rsscos_ctxs -= num_vfs;
  531. hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
  532. hw_resc->max_vnics -= vf_vnics * num_vfs;
  533. rc = pf->active_vfs;
  534. }
  535. return rc;
  536. }
  537. static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
  538. {
  539. if (bp->flags & BNXT_FLAG_NEW_RM)
  540. return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
  541. else
  542. return bnxt_hwrm_func_cfg(bp, num_vfs);
  543. }
  544. static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
  545. {
  546. int rc = 0, vfs_supported;
  547. int min_rx_rings, min_tx_rings, min_rss_ctxs;
  548. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  549. int tx_ok = 0, rx_ok = 0, rss_ok = 0;
  550. int avail_cp, avail_stat;
  551. /* Check if we can enable requested num of vf's. At a mininum
  552. * we require 1 RX 1 TX rings for each VF. In this minimum conf
  553. * features like TPA will not be available.
  554. */
  555. vfs_supported = *num_vfs;
  556. avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
  557. avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
  558. avail_cp = min_t(int, avail_cp, avail_stat);
  559. while (vfs_supported) {
  560. min_rx_rings = vfs_supported;
  561. min_tx_rings = vfs_supported;
  562. min_rss_ctxs = vfs_supported;
  563. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  564. if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
  565. min_rx_rings)
  566. rx_ok = 1;
  567. } else {
  568. if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
  569. min_rx_rings)
  570. rx_ok = 1;
  571. }
  572. if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
  573. avail_cp < min_rx_rings)
  574. rx_ok = 0;
  575. if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
  576. avail_cp >= min_tx_rings)
  577. tx_ok = 1;
  578. if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
  579. min_rss_ctxs)
  580. rss_ok = 1;
  581. if (tx_ok && rx_ok && rss_ok)
  582. break;
  583. vfs_supported--;
  584. }
  585. if (!vfs_supported) {
  586. netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
  587. return -EINVAL;
  588. }
  589. if (vfs_supported != *num_vfs) {
  590. netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
  591. *num_vfs, vfs_supported);
  592. *num_vfs = vfs_supported;
  593. }
  594. rc = bnxt_alloc_vf_resources(bp, *num_vfs);
  595. if (rc)
  596. goto err_out1;
  597. /* Reserve resources for VFs */
  598. rc = bnxt_func_cfg(bp, *num_vfs);
  599. if (rc != *num_vfs) {
  600. if (rc <= 0) {
  601. netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
  602. *num_vfs = 0;
  603. goto err_out2;
  604. }
  605. netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
  606. *num_vfs = rc;
  607. }
  608. /* Register buffers for VFs */
  609. rc = bnxt_hwrm_func_buf_rgtr(bp);
  610. if (rc)
  611. goto err_out2;
  612. bnxt_ulp_sriov_cfg(bp, *num_vfs);
  613. rc = pci_enable_sriov(bp->pdev, *num_vfs);
  614. if (rc)
  615. goto err_out2;
  616. return 0;
  617. err_out2:
  618. /* Free the resources reserved for various VF's */
  619. bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
  620. err_out1:
  621. bnxt_free_vf_resources(bp);
  622. return rc;
  623. }
  624. void bnxt_sriov_disable(struct bnxt *bp)
  625. {
  626. u16 num_vfs = pci_num_vf(bp->pdev);
  627. if (!num_vfs)
  628. return;
  629. /* synchronize VF and VF-rep create and destroy */
  630. mutex_lock(&bp->sriov_lock);
  631. bnxt_vf_reps_destroy(bp);
  632. if (pci_vfs_assigned(bp->pdev)) {
  633. bnxt_hwrm_fwd_async_event_cmpl(
  634. bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
  635. netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
  636. num_vfs);
  637. } else {
  638. pci_disable_sriov(bp->pdev);
  639. /* Free the HW resources reserved for various VF's */
  640. bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
  641. }
  642. mutex_unlock(&bp->sriov_lock);
  643. bnxt_free_vf_resources(bp);
  644. bp->pf.active_vfs = 0;
  645. /* Reclaim all resources for the PF. */
  646. rtnl_lock();
  647. bnxt_restore_pf_fw_resources(bp);
  648. rtnl_unlock();
  649. bnxt_ulp_sriov_cfg(bp, 0);
  650. }
  651. int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
  652. {
  653. struct net_device *dev = pci_get_drvdata(pdev);
  654. struct bnxt *bp = netdev_priv(dev);
  655. if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
  656. netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
  657. return 0;
  658. }
  659. rtnl_lock();
  660. if (!netif_running(dev)) {
  661. netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
  662. rtnl_unlock();
  663. return 0;
  664. }
  665. bp->sriov_cfg = true;
  666. rtnl_unlock();
  667. if (pci_vfs_assigned(bp->pdev)) {
  668. netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
  669. num_vfs = 0;
  670. goto sriov_cfg_exit;
  671. }
  672. /* Check if enabled VFs is same as requested */
  673. if (num_vfs && num_vfs == bp->pf.active_vfs)
  674. goto sriov_cfg_exit;
  675. /* if there are previous existing VFs, clean them up */
  676. bnxt_sriov_disable(bp);
  677. if (!num_vfs)
  678. goto sriov_cfg_exit;
  679. bnxt_sriov_enable(bp, &num_vfs);
  680. sriov_cfg_exit:
  681. bp->sriov_cfg = false;
  682. wake_up(&bp->sriov_cfg_wait);
  683. return num_vfs;
  684. }
  685. static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  686. void *encap_resp, __le64 encap_resp_addr,
  687. __le16 encap_resp_cpr, u32 msg_size)
  688. {
  689. int rc = 0;
  690. struct hwrm_fwd_resp_input req = {0};
  691. struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  692. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
  693. /* Set the new target id */
  694. req.target_id = cpu_to_le16(vf->fw_fid);
  695. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  696. req.encap_resp_len = cpu_to_le16(msg_size);
  697. req.encap_resp_addr = encap_resp_addr;
  698. req.encap_resp_cmpl_ring = encap_resp_cpr;
  699. memcpy(req.encap_resp, encap_resp, msg_size);
  700. mutex_lock(&bp->hwrm_cmd_lock);
  701. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  702. if (rc) {
  703. netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
  704. goto fwd_resp_exit;
  705. }
  706. if (resp->error_code) {
  707. netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
  708. resp->error_code);
  709. rc = -1;
  710. }
  711. fwd_resp_exit:
  712. mutex_unlock(&bp->hwrm_cmd_lock);
  713. return rc;
  714. }
  715. static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  716. u32 msg_size)
  717. {
  718. int rc = 0;
  719. struct hwrm_reject_fwd_resp_input req = {0};
  720. struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  721. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
  722. /* Set the new target id */
  723. req.target_id = cpu_to_le16(vf->fw_fid);
  724. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  725. memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
  726. mutex_lock(&bp->hwrm_cmd_lock);
  727. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  728. if (rc) {
  729. netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
  730. goto fwd_err_resp_exit;
  731. }
  732. if (resp->error_code) {
  733. netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
  734. resp->error_code);
  735. rc = -1;
  736. }
  737. fwd_err_resp_exit:
  738. mutex_unlock(&bp->hwrm_cmd_lock);
  739. return rc;
  740. }
  741. static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
  742. u32 msg_size)
  743. {
  744. int rc = 0;
  745. struct hwrm_exec_fwd_resp_input req = {0};
  746. struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
  747. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
  748. /* Set the new target id */
  749. req.target_id = cpu_to_le16(vf->fw_fid);
  750. req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
  751. memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
  752. mutex_lock(&bp->hwrm_cmd_lock);
  753. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  754. if (rc) {
  755. netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
  756. goto exec_fwd_resp_exit;
  757. }
  758. if (resp->error_code) {
  759. netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
  760. resp->error_code);
  761. rc = -1;
  762. }
  763. exec_fwd_resp_exit:
  764. mutex_unlock(&bp->hwrm_cmd_lock);
  765. return rc;
  766. }
  767. static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
  768. {
  769. u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
  770. struct hwrm_func_vf_cfg_input *req =
  771. (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
  772. /* Allow VF to set a valid MAC address, if trust is set to on or
  773. * if the PF assigned MAC address is zero
  774. */
  775. if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
  776. if (is_valid_ether_addr(req->dflt_mac_addr) &&
  777. ((vf->flags & BNXT_VF_TRUST) ||
  778. (!is_valid_ether_addr(vf->mac_addr)))) {
  779. ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
  780. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  781. }
  782. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  783. }
  784. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  785. }
  786. static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
  787. {
  788. u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
  789. struct hwrm_cfa_l2_filter_alloc_input *req =
  790. (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
  791. bool mac_ok = false;
  792. if (!is_valid_ether_addr((const u8 *)req->l2_addr))
  793. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  794. /* Allow VF to set a valid MAC address, if trust is set to on.
  795. * Or VF MAC address must first match MAC address in PF's context.
  796. * Otherwise, it must match the VF MAC address if firmware spec >=
  797. * 1.2.2
  798. */
  799. if (vf->flags & BNXT_VF_TRUST) {
  800. mac_ok = true;
  801. } else if (is_valid_ether_addr(vf->mac_addr)) {
  802. if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
  803. mac_ok = true;
  804. } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
  805. if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
  806. mac_ok = true;
  807. } else if (bp->hwrm_spec_code < 0x10202) {
  808. mac_ok = true;
  809. } else {
  810. mac_ok = true;
  811. }
  812. if (mac_ok)
  813. return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
  814. return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
  815. }
  816. static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
  817. {
  818. int rc = 0;
  819. if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
  820. /* real link */
  821. rc = bnxt_hwrm_exec_fwd_resp(
  822. bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
  823. } else {
  824. struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
  825. struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
  826. phy_qcfg_req =
  827. (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
  828. mutex_lock(&bp->hwrm_cmd_lock);
  829. memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
  830. sizeof(phy_qcfg_resp));
  831. mutex_unlock(&bp->hwrm_cmd_lock);
  832. phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
  833. phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
  834. phy_qcfg_resp.valid = 1;
  835. if (vf->flags & BNXT_VF_LINK_UP) {
  836. /* if physical link is down, force link up on VF */
  837. if (phy_qcfg_resp.link !=
  838. PORT_PHY_QCFG_RESP_LINK_LINK) {
  839. phy_qcfg_resp.link =
  840. PORT_PHY_QCFG_RESP_LINK_LINK;
  841. phy_qcfg_resp.link_speed = cpu_to_le16(
  842. PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
  843. phy_qcfg_resp.duplex_cfg =
  844. PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
  845. phy_qcfg_resp.duplex_state =
  846. PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
  847. phy_qcfg_resp.pause =
  848. (PORT_PHY_QCFG_RESP_PAUSE_TX |
  849. PORT_PHY_QCFG_RESP_PAUSE_RX);
  850. }
  851. } else {
  852. /* force link down */
  853. phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
  854. phy_qcfg_resp.link_speed = 0;
  855. phy_qcfg_resp.duplex_state =
  856. PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
  857. phy_qcfg_resp.pause = 0;
  858. }
  859. rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
  860. phy_qcfg_req->resp_addr,
  861. phy_qcfg_req->cmpl_ring,
  862. sizeof(phy_qcfg_resp));
  863. }
  864. return rc;
  865. }
  866. static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
  867. {
  868. int rc = 0;
  869. struct input *encap_req = vf->hwrm_cmd_req_addr;
  870. u32 req_type = le16_to_cpu(encap_req->req_type);
  871. switch (req_type) {
  872. case HWRM_FUNC_VF_CFG:
  873. rc = bnxt_vf_configure_mac(bp, vf);
  874. break;
  875. case HWRM_CFA_L2_FILTER_ALLOC:
  876. rc = bnxt_vf_validate_set_mac(bp, vf);
  877. break;
  878. case HWRM_FUNC_CFG:
  879. /* TODO Validate if VF is allowed to change mac address,
  880. * mtu, num of rings etc
  881. */
  882. rc = bnxt_hwrm_exec_fwd_resp(
  883. bp, vf, sizeof(struct hwrm_func_cfg_input));
  884. break;
  885. case HWRM_PORT_PHY_QCFG:
  886. rc = bnxt_vf_set_link(bp, vf);
  887. break;
  888. default:
  889. break;
  890. }
  891. return rc;
  892. }
  893. void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
  894. {
  895. u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
  896. /* Scan through VF's and process commands */
  897. while (1) {
  898. vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
  899. if (vf_id >= active_vfs)
  900. break;
  901. clear_bit(vf_id, bp->pf.vf_event_bmap);
  902. bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
  903. i = vf_id + 1;
  904. }
  905. }
  906. void bnxt_update_vf_mac(struct bnxt *bp)
  907. {
  908. struct hwrm_func_qcaps_input req = {0};
  909. struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  910. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
  911. req.fid = cpu_to_le16(0xffff);
  912. mutex_lock(&bp->hwrm_cmd_lock);
  913. if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
  914. goto update_vf_mac_exit;
  915. /* Store MAC address from the firmware. There are 2 cases:
  916. * 1. MAC address is valid. It is assigned from the PF and we
  917. * need to override the current VF MAC address with it.
  918. * 2. MAC address is zero. The VF will use a random MAC address by
  919. * default but the stored zero MAC will allow the VF user to change
  920. * the random MAC address using ndo_set_mac_address() if he wants.
  921. */
  922. if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
  923. memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
  924. /* overwrite netdev dev_addr with admin VF MAC */
  925. if (is_valid_ether_addr(bp->vf.mac_addr))
  926. memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
  927. update_vf_mac_exit:
  928. mutex_unlock(&bp->hwrm_cmd_lock);
  929. }
  930. int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
  931. {
  932. struct hwrm_func_vf_cfg_input req = {0};
  933. int rc = 0;
  934. if (!BNXT_VF(bp))
  935. return 0;
  936. if (bp->hwrm_spec_code < 0x10202) {
  937. if (is_valid_ether_addr(bp->vf.mac_addr))
  938. rc = -EADDRNOTAVAIL;
  939. goto mac_done;
  940. }
  941. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
  942. req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
  943. memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
  944. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  945. mac_done:
  946. if (rc) {
  947. rc = -EADDRNOTAVAIL;
  948. netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
  949. mac);
  950. }
  951. return rc;
  952. }
  953. #else
  954. void bnxt_sriov_disable(struct bnxt *bp)
  955. {
  956. }
  957. void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
  958. {
  959. netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
  960. }
  961. void bnxt_update_vf_mac(struct bnxt *bp)
  962. {
  963. }
  964. int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
  965. {
  966. return 0;
  967. }
  968. #endif