qed_vf.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/etherdevice.h>
  10. #include "qed.h"
  11. #include "qed_sriov.h"
  12. #include "qed_vf.h"
  13. static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
  14. {
  15. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  16. void *p_tlv;
  17. /* This lock is released when we receive PF's response
  18. * in qed_send_msg2pf().
  19. * So, qed_vf_pf_prep() and qed_send_msg2pf()
  20. * must come in sequence.
  21. */
  22. mutex_lock(&(p_iov->mutex));
  23. DP_VERBOSE(p_hwfn,
  24. QED_MSG_IOV,
  25. "preparing to send 0x%04x tlv over vf pf channel\n",
  26. type);
  27. /* Reset Requst offset */
  28. p_iov->offset = (u8 *)p_iov->vf2pf_request;
  29. /* Clear mailbox - both request and reply */
  30. memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
  31. memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
  32. /* Init type and length */
  33. p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
  34. /* Init first tlv header */
  35. ((struct vfpf_first_tlv *)p_tlv)->reply_address =
  36. (u64)p_iov->pf2vf_reply_phys;
  37. return p_tlv;
  38. }
  39. static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
  40. {
  41. union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
  42. struct ustorm_trigger_vf_zone trigger;
  43. struct ustorm_vf_zone *zone_data;
  44. int rc = 0, time = 100;
  45. zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
  46. /* output tlvs list */
  47. qed_dp_tlv_list(p_hwfn, p_req);
  48. /* need to add the END TLV to the message size */
  49. resp_size += sizeof(struct channel_list_end_tlv);
  50. /* Send TLVs over HW channel */
  51. memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
  52. trigger.vf_pf_msg_valid = 1;
  53. DP_VERBOSE(p_hwfn,
  54. QED_MSG_IOV,
  55. "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
  56. GET_FIELD(p_hwfn->hw_info.concrete_fid,
  57. PXP_CONCRETE_FID_PFID),
  58. upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
  59. lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
  60. &zone_data->non_trigger.vf_pf_msg_addr,
  61. *((u32 *)&trigger), &zone_data->trigger);
  62. REG_WR(p_hwfn,
  63. (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
  64. lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
  65. REG_WR(p_hwfn,
  66. (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
  67. upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
  68. /* The message data must be written first, to prevent trigger before
  69. * data is written.
  70. */
  71. wmb();
  72. REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
  73. /* When PF would be done with the response, it would write back to the
  74. * `done' address. Poll until then.
  75. */
  76. while ((!*done) && time) {
  77. msleep(25);
  78. time--;
  79. }
  80. if (!*done) {
  81. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  82. "VF <-- PF Timeout [Type %d]\n",
  83. p_req->first_tlv.tl.type);
  84. rc = -EBUSY;
  85. goto exit;
  86. } else {
  87. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  88. "PF response: %d [Type %d]\n",
  89. *done, p_req->first_tlv.tl.type);
  90. }
  91. exit:
  92. mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
  93. return rc;
  94. }
  95. #define VF_ACQUIRE_THRESH 3
  96. static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
  97. struct vf_pf_resc_request *p_req,
  98. struct pf_vf_resc *p_resp)
  99. {
  100. DP_VERBOSE(p_hwfn,
  101. QED_MSG_IOV,
  102. "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
  103. p_req->num_rxqs,
  104. p_resp->num_rxqs,
  105. p_req->num_rxqs,
  106. p_resp->num_txqs,
  107. p_req->num_sbs,
  108. p_resp->num_sbs,
  109. p_req->num_mac_filters,
  110. p_resp->num_mac_filters,
  111. p_req->num_vlan_filters,
  112. p_resp->num_vlan_filters,
  113. p_req->num_mc_filters, p_resp->num_mc_filters);
  114. /* humble our request */
  115. p_req->num_txqs = p_resp->num_txqs;
  116. p_req->num_rxqs = p_resp->num_rxqs;
  117. p_req->num_sbs = p_resp->num_sbs;
  118. p_req->num_mac_filters = p_resp->num_mac_filters;
  119. p_req->num_vlan_filters = p_resp->num_vlan_filters;
  120. p_req->num_mc_filters = p_resp->num_mc_filters;
  121. }
  122. static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
  123. {
  124. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  125. struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
  126. struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
  127. struct vf_pf_resc_request *p_resc;
  128. bool resources_acquired = false;
  129. struct vfpf_acquire_tlv *req;
  130. int rc = 0, attempts = 0;
  131. /* clear mailbox and prep first tlv */
  132. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  133. p_resc = &req->resc_request;
  134. /* starting filling the request */
  135. req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
  136. p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
  137. p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
  138. p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
  139. p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
  140. p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
  141. req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
  142. req->vfdev_info.fw_major = FW_MAJOR_VERSION;
  143. req->vfdev_info.fw_minor = FW_MINOR_VERSION;
  144. req->vfdev_info.fw_revision = FW_REVISION_VERSION;
  145. req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
  146. req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
  147. req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
  148. /* Fill capability field with any non-deprecated config we support */
  149. req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
  150. /* pf 2 vf bulletin board address */
  151. req->bulletin_addr = p_iov->bulletin.phys;
  152. req->bulletin_size = p_iov->bulletin.size;
  153. /* add list termination tlv */
  154. qed_add_tlv(p_hwfn, &p_iov->offset,
  155. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  156. while (!resources_acquired) {
  157. DP_VERBOSE(p_hwfn,
  158. QED_MSG_IOV, "attempting to acquire resources\n");
  159. /* send acquire request */
  160. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  161. if (rc)
  162. return rc;
  163. /* copy acquire response from buffer to p_hwfn */
  164. memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
  165. attempts++;
  166. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  167. /* PF agrees to allocate our resources */
  168. if (!(resp->pfdev_info.capabilities &
  169. PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
  170. DP_INFO(p_hwfn,
  171. "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
  172. return -EINVAL;
  173. }
  174. DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
  175. resources_acquired = true;
  176. } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
  177. attempts < VF_ACQUIRE_THRESH) {
  178. qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
  179. &resp->resc);
  180. /* Clear response buffer */
  181. memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
  182. } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
  183. pfdev_info->major_fp_hsi &&
  184. (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
  185. DP_NOTICE(p_hwfn,
  186. "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
  187. pfdev_info->major_fp_hsi,
  188. pfdev_info->minor_fp_hsi,
  189. ETH_HSI_VER_MAJOR,
  190. ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
  191. return -EINVAL;
  192. } else {
  193. DP_ERR(p_hwfn,
  194. "PF returned error %d to VF acquisition request\n",
  195. resp->hdr.status);
  196. return -EAGAIN;
  197. }
  198. }
  199. /* Update bulletin board size with response from PF */
  200. p_iov->bulletin.size = resp->bulletin_size;
  201. /* get HW info */
  202. p_hwfn->cdev->type = resp->pfdev_info.dev_type;
  203. p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
  204. p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
  205. /* Learn of the possibility of CMT */
  206. if (IS_LEAD_HWFN(p_hwfn)) {
  207. if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
  208. DP_NOTICE(p_hwfn, "100g VF\n");
  209. p_hwfn->cdev->num_hwfns = 2;
  210. }
  211. }
  212. if (ETH_HSI_VER_MINOR &&
  213. (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
  214. DP_INFO(p_hwfn,
  215. "PF is using older fastpath HSI; %02x.%02x is configured\n",
  216. ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
  217. }
  218. return 0;
  219. }
  220. int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
  221. {
  222. struct qed_vf_iov *p_iov;
  223. u32 reg;
  224. /* Set number of hwfns - might be overriden once leading hwfn learns
  225. * actual configuration from PF.
  226. */
  227. if (IS_LEAD_HWFN(p_hwfn))
  228. p_hwfn->cdev->num_hwfns = 1;
  229. /* Set the doorbell bar. Assumption: regview is set */
  230. p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
  231. PXP_VF_BAR0_START_DQ;
  232. reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
  233. p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
  234. reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
  235. p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
  236. /* Allocate vf sriov info */
  237. p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
  238. if (!p_iov) {
  239. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
  240. return -ENOMEM;
  241. }
  242. /* Allocate vf2pf msg */
  243. p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  244. sizeof(union vfpf_tlvs),
  245. &p_iov->vf2pf_request_phys,
  246. GFP_KERNEL);
  247. if (!p_iov->vf2pf_request) {
  248. DP_NOTICE(p_hwfn,
  249. "Failed to allocate `vf2pf_request' DMA memory\n");
  250. goto free_p_iov;
  251. }
  252. p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  253. sizeof(union pfvf_tlvs),
  254. &p_iov->pf2vf_reply_phys,
  255. GFP_KERNEL);
  256. if (!p_iov->pf2vf_reply) {
  257. DP_NOTICE(p_hwfn,
  258. "Failed to allocate `pf2vf_reply' DMA memory\n");
  259. goto free_vf2pf_request;
  260. }
  261. DP_VERBOSE(p_hwfn,
  262. QED_MSG_IOV,
  263. "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
  264. p_iov->vf2pf_request,
  265. (u64) p_iov->vf2pf_request_phys,
  266. p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
  267. /* Allocate Bulletin board */
  268. p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
  269. p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  270. p_iov->bulletin.size,
  271. &p_iov->bulletin.phys,
  272. GFP_KERNEL);
  273. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  274. "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
  275. p_iov->bulletin.p_virt,
  276. (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
  277. mutex_init(&p_iov->mutex);
  278. p_hwfn->vf_iov_info = p_iov;
  279. p_hwfn->hw_info.personality = QED_PCI_ETH;
  280. return qed_vf_pf_acquire(p_hwfn);
  281. free_vf2pf_request:
  282. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  283. sizeof(union vfpf_tlvs),
  284. p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
  285. free_p_iov:
  286. kfree(p_iov);
  287. return -ENOMEM;
  288. }
  289. int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
  290. u8 rx_qid,
  291. u16 sb,
  292. u8 sb_index,
  293. u16 bd_max_bytes,
  294. dma_addr_t bd_chain_phys_addr,
  295. dma_addr_t cqe_pbl_addr,
  296. u16 cqe_pbl_size, void __iomem **pp_prod)
  297. {
  298. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  299. struct pfvf_start_queue_resp_tlv *resp;
  300. struct vfpf_start_rxq_tlv *req;
  301. int rc;
  302. /* clear mailbox and prep first tlv */
  303. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
  304. req->rx_qid = rx_qid;
  305. req->cqe_pbl_addr = cqe_pbl_addr;
  306. req->cqe_pbl_size = cqe_pbl_size;
  307. req->rxq_addr = bd_chain_phys_addr;
  308. req->hw_sb = sb;
  309. req->sb_index = sb_index;
  310. req->bd_max_bytes = bd_max_bytes;
  311. req->stat_id = -1;
  312. /* add list termination tlv */
  313. qed_add_tlv(p_hwfn, &p_iov->offset,
  314. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  315. resp = &p_iov->pf2vf_reply->queue_start;
  316. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  317. if (rc)
  318. return rc;
  319. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  320. return -EINVAL;
  321. /* Learn the address of the producer from the response */
  322. if (pp_prod) {
  323. u32 init_prod_val = 0;
  324. *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
  325. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  326. "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
  327. rx_qid, *pp_prod, resp->offset);
  328. /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
  329. __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
  330. (u32 *)&init_prod_val);
  331. }
  332. return rc;
  333. }
  334. int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
  335. {
  336. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  337. struct vfpf_stop_rxqs_tlv *req;
  338. struct pfvf_def_resp_tlv *resp;
  339. int rc;
  340. /* clear mailbox and prep first tlv */
  341. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
  342. req->rx_qid = rx_qid;
  343. req->num_rxqs = 1;
  344. req->cqe_completion = cqe_completion;
  345. /* add list termination tlv */
  346. qed_add_tlv(p_hwfn, &p_iov->offset,
  347. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  348. resp = &p_iov->pf2vf_reply->default_resp;
  349. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  350. if (rc)
  351. return rc;
  352. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  353. return -EINVAL;
  354. return rc;
  355. }
  356. int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
  357. u16 tx_queue_id,
  358. u16 sb,
  359. u8 sb_index,
  360. dma_addr_t pbl_addr,
  361. u16 pbl_size, void __iomem **pp_doorbell)
  362. {
  363. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  364. struct pfvf_start_queue_resp_tlv *resp;
  365. struct vfpf_start_txq_tlv *req;
  366. int rc;
  367. /* clear mailbox and prep first tlv */
  368. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
  369. req->tx_qid = tx_queue_id;
  370. /* Tx */
  371. req->pbl_addr = pbl_addr;
  372. req->pbl_size = pbl_size;
  373. req->hw_sb = sb;
  374. req->sb_index = sb_index;
  375. /* add list termination tlv */
  376. qed_add_tlv(p_hwfn, &p_iov->offset,
  377. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  378. resp = &p_iov->pf2vf_reply->queue_start;
  379. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  380. if (rc)
  381. goto exit;
  382. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  383. rc = -EINVAL;
  384. goto exit;
  385. }
  386. if (pp_doorbell) {
  387. *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
  388. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  389. "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
  390. tx_queue_id, *pp_doorbell, resp->offset);
  391. }
  392. exit:
  393. return rc;
  394. }
  395. int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
  396. {
  397. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  398. struct vfpf_stop_txqs_tlv *req;
  399. struct pfvf_def_resp_tlv *resp;
  400. int rc;
  401. /* clear mailbox and prep first tlv */
  402. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
  403. req->tx_qid = tx_qid;
  404. req->num_txqs = 1;
  405. /* add list termination tlv */
  406. qed_add_tlv(p_hwfn, &p_iov->offset,
  407. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  408. resp = &p_iov->pf2vf_reply->default_resp;
  409. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  410. if (rc)
  411. return rc;
  412. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  413. return -EINVAL;
  414. return rc;
  415. }
  416. int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
  417. u8 vport_id,
  418. u16 mtu,
  419. u8 inner_vlan_removal,
  420. enum qed_tpa_mode tpa_mode,
  421. u8 max_buffers_per_cqe, u8 only_untagged)
  422. {
  423. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  424. struct vfpf_vport_start_tlv *req;
  425. struct pfvf_def_resp_tlv *resp;
  426. int rc, i;
  427. /* clear mailbox and prep first tlv */
  428. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
  429. req->mtu = mtu;
  430. req->vport_id = vport_id;
  431. req->inner_vlan_removal = inner_vlan_removal;
  432. req->tpa_mode = tpa_mode;
  433. req->max_buffers_per_cqe = max_buffers_per_cqe;
  434. req->only_untagged = only_untagged;
  435. /* status blocks */
  436. for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
  437. if (p_hwfn->sbs_info[i])
  438. req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
  439. /* add list termination tlv */
  440. qed_add_tlv(p_hwfn, &p_iov->offset,
  441. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  442. resp = &p_iov->pf2vf_reply->default_resp;
  443. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  444. if (rc)
  445. return rc;
  446. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  447. return -EINVAL;
  448. return rc;
  449. }
  450. int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
  451. {
  452. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  453. struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
  454. int rc;
  455. /* clear mailbox and prep first tlv */
  456. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
  457. sizeof(struct vfpf_first_tlv));
  458. /* add list termination tlv */
  459. qed_add_tlv(p_hwfn, &p_iov->offset,
  460. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  461. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  462. if (rc)
  463. return rc;
  464. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  465. return -EINVAL;
  466. return rc;
  467. }
  468. static bool
  469. qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
  470. struct qed_sp_vport_update_params *p_data,
  471. u16 tlv)
  472. {
  473. switch (tlv) {
  474. case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
  475. return !!(p_data->update_vport_active_rx_flg ||
  476. p_data->update_vport_active_tx_flg);
  477. case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
  478. return !!p_data->update_tx_switching_flg;
  479. case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
  480. return !!p_data->update_inner_vlan_removal_flg;
  481. case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
  482. return !!p_data->update_accept_any_vlan_flg;
  483. case CHANNEL_TLV_VPORT_UPDATE_MCAST:
  484. return !!p_data->update_approx_mcast_flg;
  485. case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
  486. return !!(p_data->accept_flags.update_rx_mode_config ||
  487. p_data->accept_flags.update_tx_mode_config);
  488. case CHANNEL_TLV_VPORT_UPDATE_RSS:
  489. return !!p_data->rss_params;
  490. case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
  491. return !!p_data->sge_tpa_params;
  492. default:
  493. DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
  494. tlv);
  495. return false;
  496. }
  497. }
  498. static void
  499. qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
  500. struct qed_sp_vport_update_params *p_data)
  501. {
  502. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  503. struct pfvf_def_resp_tlv *p_resp;
  504. u16 tlv;
  505. for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
  506. tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
  507. if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
  508. continue;
  509. p_resp = (struct pfvf_def_resp_tlv *)
  510. qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
  511. tlv);
  512. if (p_resp && p_resp->hdr.status)
  513. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  514. "TLV[%d] Configuration %s\n",
  515. tlv,
  516. (p_resp && p_resp->hdr.status) ? "succeeded"
  517. : "failed");
  518. }
  519. }
  520. int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
  521. struct qed_sp_vport_update_params *p_params)
  522. {
  523. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  524. struct vfpf_vport_update_tlv *req;
  525. struct pfvf_def_resp_tlv *resp;
  526. u8 update_rx, update_tx;
  527. u32 resp_size = 0;
  528. u16 size, tlv;
  529. int rc;
  530. resp = &p_iov->pf2vf_reply->default_resp;
  531. resp_size = sizeof(*resp);
  532. update_rx = p_params->update_vport_active_rx_flg;
  533. update_tx = p_params->update_vport_active_tx_flg;
  534. /* clear mailbox and prep header tlv */
  535. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
  536. /* Prepare extended tlvs */
  537. if (update_rx || update_tx) {
  538. struct vfpf_vport_update_activate_tlv *p_act_tlv;
  539. size = sizeof(struct vfpf_vport_update_activate_tlv);
  540. p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  541. CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
  542. size);
  543. resp_size += sizeof(struct pfvf_def_resp_tlv);
  544. if (update_rx) {
  545. p_act_tlv->update_rx = update_rx;
  546. p_act_tlv->active_rx = p_params->vport_active_rx_flg;
  547. }
  548. if (update_tx) {
  549. p_act_tlv->update_tx = update_tx;
  550. p_act_tlv->active_tx = p_params->vport_active_tx_flg;
  551. }
  552. }
  553. if (p_params->update_tx_switching_flg) {
  554. struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
  555. size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
  556. tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
  557. p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  558. tlv, size);
  559. resp_size += sizeof(struct pfvf_def_resp_tlv);
  560. p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
  561. }
  562. if (p_params->update_approx_mcast_flg) {
  563. struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
  564. size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
  565. p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  566. CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
  567. resp_size += sizeof(struct pfvf_def_resp_tlv);
  568. memcpy(p_mcast_tlv->bins, p_params->bins,
  569. sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
  570. }
  571. update_rx = p_params->accept_flags.update_rx_mode_config;
  572. update_tx = p_params->accept_flags.update_tx_mode_config;
  573. if (update_rx || update_tx) {
  574. struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
  575. tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
  576. size = sizeof(struct vfpf_vport_update_accept_param_tlv);
  577. p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
  578. resp_size += sizeof(struct pfvf_def_resp_tlv);
  579. if (update_rx) {
  580. p_accept_tlv->update_rx_mode = update_rx;
  581. p_accept_tlv->rx_accept_filter =
  582. p_params->accept_flags.rx_accept_filter;
  583. }
  584. if (update_tx) {
  585. p_accept_tlv->update_tx_mode = update_tx;
  586. p_accept_tlv->tx_accept_filter =
  587. p_params->accept_flags.tx_accept_filter;
  588. }
  589. }
  590. if (p_params->rss_params) {
  591. struct qed_rss_params *rss_params = p_params->rss_params;
  592. struct vfpf_vport_update_rss_tlv *p_rss_tlv;
  593. size = sizeof(struct vfpf_vport_update_rss_tlv);
  594. p_rss_tlv = qed_add_tlv(p_hwfn,
  595. &p_iov->offset,
  596. CHANNEL_TLV_VPORT_UPDATE_RSS, size);
  597. resp_size += sizeof(struct pfvf_def_resp_tlv);
  598. if (rss_params->update_rss_config)
  599. p_rss_tlv->update_rss_flags |=
  600. VFPF_UPDATE_RSS_CONFIG_FLAG;
  601. if (rss_params->update_rss_capabilities)
  602. p_rss_tlv->update_rss_flags |=
  603. VFPF_UPDATE_RSS_CAPS_FLAG;
  604. if (rss_params->update_rss_ind_table)
  605. p_rss_tlv->update_rss_flags |=
  606. VFPF_UPDATE_RSS_IND_TABLE_FLAG;
  607. if (rss_params->update_rss_key)
  608. p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
  609. p_rss_tlv->rss_enable = rss_params->rss_enable;
  610. p_rss_tlv->rss_caps = rss_params->rss_caps;
  611. p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
  612. memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
  613. sizeof(rss_params->rss_ind_table));
  614. memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
  615. sizeof(rss_params->rss_key));
  616. }
  617. if (p_params->update_accept_any_vlan_flg) {
  618. struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
  619. size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
  620. tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
  621. p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
  622. resp_size += sizeof(struct pfvf_def_resp_tlv);
  623. p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
  624. p_any_vlan_tlv->update_accept_any_vlan_flg =
  625. p_params->update_accept_any_vlan_flg;
  626. }
  627. /* add list termination tlv */
  628. qed_add_tlv(p_hwfn, &p_iov->offset,
  629. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  630. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
  631. if (rc)
  632. return rc;
  633. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  634. return -EINVAL;
  635. qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
  636. return rc;
  637. }
  638. int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
  639. {
  640. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  641. struct pfvf_def_resp_tlv *resp;
  642. struct vfpf_first_tlv *req;
  643. int rc;
  644. /* clear mailbox and prep first tlv */
  645. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
  646. /* add list termination tlv */
  647. qed_add_tlv(p_hwfn, &p_iov->offset,
  648. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  649. resp = &p_iov->pf2vf_reply->default_resp;
  650. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  651. if (rc)
  652. return rc;
  653. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  654. return -EAGAIN;
  655. p_hwfn->b_int_enabled = 0;
  656. return 0;
  657. }
  658. int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
  659. {
  660. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  661. struct pfvf_def_resp_tlv *resp;
  662. struct vfpf_first_tlv *req;
  663. u32 size;
  664. int rc;
  665. /* clear mailbox and prep first tlv */
  666. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
  667. /* add list termination tlv */
  668. qed_add_tlv(p_hwfn, &p_iov->offset,
  669. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  670. resp = &p_iov->pf2vf_reply->default_resp;
  671. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  672. if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
  673. rc = -EAGAIN;
  674. p_hwfn->b_int_enabled = 0;
  675. if (p_iov->vf2pf_request)
  676. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  677. sizeof(union vfpf_tlvs),
  678. p_iov->vf2pf_request,
  679. p_iov->vf2pf_request_phys);
  680. if (p_iov->pf2vf_reply)
  681. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  682. sizeof(union pfvf_tlvs),
  683. p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
  684. if (p_iov->bulletin.p_virt) {
  685. size = sizeof(struct qed_bulletin_content);
  686. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  687. size,
  688. p_iov->bulletin.p_virt, p_iov->bulletin.phys);
  689. }
  690. kfree(p_hwfn->vf_iov_info);
  691. p_hwfn->vf_iov_info = NULL;
  692. return rc;
  693. }
  694. void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
  695. struct qed_filter_mcast *p_filter_cmd)
  696. {
  697. struct qed_sp_vport_update_params sp_params;
  698. int i;
  699. memset(&sp_params, 0, sizeof(sp_params));
  700. sp_params.update_approx_mcast_flg = 1;
  701. if (p_filter_cmd->opcode == QED_FILTER_ADD) {
  702. for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
  703. u32 bit;
  704. bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
  705. __set_bit(bit, sp_params.bins);
  706. }
  707. }
  708. qed_vf_pf_vport_update(p_hwfn, &sp_params);
  709. }
  710. int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
  711. struct qed_filter_ucast *p_ucast)
  712. {
  713. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  714. struct vfpf_ucast_filter_tlv *req;
  715. struct pfvf_def_resp_tlv *resp;
  716. int rc;
  717. /* clear mailbox and prep first tlv */
  718. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
  719. req->opcode = (u8) p_ucast->opcode;
  720. req->type = (u8) p_ucast->type;
  721. memcpy(req->mac, p_ucast->mac, ETH_ALEN);
  722. req->vlan = p_ucast->vlan;
  723. /* add list termination tlv */
  724. qed_add_tlv(p_hwfn, &p_iov->offset,
  725. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  726. resp = &p_iov->pf2vf_reply->default_resp;
  727. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  728. if (rc)
  729. return rc;
  730. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  731. return -EAGAIN;
  732. return 0;
  733. }
  734. int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
  735. {
  736. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  737. struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
  738. int rc;
  739. /* clear mailbox and prep first tlv */
  740. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
  741. sizeof(struct vfpf_first_tlv));
  742. /* add list termination tlv */
  743. qed_add_tlv(p_hwfn, &p_iov->offset,
  744. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  745. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  746. if (rc)
  747. return rc;
  748. if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  749. return -EINVAL;
  750. return 0;
  751. }
  752. u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
  753. {
  754. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  755. if (!p_iov) {
  756. DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
  757. return 0;
  758. }
  759. return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
  760. }
  761. int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
  762. {
  763. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  764. struct qed_bulletin_content shadow;
  765. u32 crc, crc_size;
  766. crc_size = sizeof(p_iov->bulletin.p_virt->crc);
  767. *p_change = 0;
  768. /* Need to guarantee PF is not in the middle of writing it */
  769. memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
  770. /* If version did not update, no need to do anything */
  771. if (shadow.version == p_iov->bulletin_shadow.version)
  772. return 0;
  773. /* Verify the bulletin we see is valid */
  774. crc = crc32(0, (u8 *)&shadow + crc_size,
  775. p_iov->bulletin.size - crc_size);
  776. if (crc != shadow.crc)
  777. return -EAGAIN;
  778. /* Set the shadow bulletin and process it */
  779. memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
  780. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  781. "Read a bulletin update %08x\n", shadow.version);
  782. *p_change = 1;
  783. return 0;
  784. }
  785. void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  786. struct qed_mcp_link_params *p_params,
  787. struct qed_bulletin_content *p_bulletin)
  788. {
  789. memset(p_params, 0, sizeof(*p_params));
  790. p_params->speed.autoneg = p_bulletin->req_autoneg;
  791. p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
  792. p_params->speed.forced_speed = p_bulletin->req_forced_speed;
  793. p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
  794. p_params->pause.forced_rx = p_bulletin->req_forced_rx;
  795. p_params->pause.forced_tx = p_bulletin->req_forced_tx;
  796. p_params->loopback_mode = p_bulletin->req_loopback;
  797. }
  798. void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  799. struct qed_mcp_link_params *params)
  800. {
  801. __qed_vf_get_link_params(p_hwfn, params,
  802. &(p_hwfn->vf_iov_info->bulletin_shadow));
  803. }
  804. void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  805. struct qed_mcp_link_state *p_link,
  806. struct qed_bulletin_content *p_bulletin)
  807. {
  808. memset(p_link, 0, sizeof(*p_link));
  809. p_link->link_up = p_bulletin->link_up;
  810. p_link->speed = p_bulletin->speed;
  811. p_link->full_duplex = p_bulletin->full_duplex;
  812. p_link->an = p_bulletin->autoneg;
  813. p_link->an_complete = p_bulletin->autoneg_complete;
  814. p_link->parallel_detection = p_bulletin->parallel_detection;
  815. p_link->pfc_enabled = p_bulletin->pfc_enabled;
  816. p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
  817. p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
  818. p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
  819. p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
  820. p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
  821. }
  822. void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  823. struct qed_mcp_link_state *link)
  824. {
  825. __qed_vf_get_link_state(p_hwfn, link,
  826. &(p_hwfn->vf_iov_info->bulletin_shadow));
  827. }
  828. void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  829. struct qed_mcp_link_capabilities *p_link_caps,
  830. struct qed_bulletin_content *p_bulletin)
  831. {
  832. memset(p_link_caps, 0, sizeof(*p_link_caps));
  833. p_link_caps->speed_capabilities = p_bulletin->capability_speed;
  834. }
  835. void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  836. struct qed_mcp_link_capabilities *p_link_caps)
  837. {
  838. __qed_vf_get_link_caps(p_hwfn, p_link_caps,
  839. &(p_hwfn->vf_iov_info->bulletin_shadow));
  840. }
  841. void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
  842. {
  843. *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
  844. }
  845. void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
  846. {
  847. memcpy(port_mac,
  848. p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
  849. }
  850. void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
  851. {
  852. struct qed_vf_iov *p_vf;
  853. p_vf = p_hwfn->vf_iov_info;
  854. *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
  855. }
  856. bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
  857. {
  858. struct qed_bulletin_content *bulletin;
  859. bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
  860. if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
  861. return true;
  862. /* Forbid VF from changing a MAC enforced by PF */
  863. if (ether_addr_equal(bulletin->mac, mac))
  864. return false;
  865. return false;
  866. }
  867. bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
  868. u8 *dst_mac, u8 *p_is_forced)
  869. {
  870. struct qed_bulletin_content *bulletin;
  871. bulletin = &hwfn->vf_iov_info->bulletin_shadow;
  872. if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
  873. if (p_is_forced)
  874. *p_is_forced = 1;
  875. } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
  876. if (p_is_forced)
  877. *p_is_forced = 0;
  878. } else {
  879. return false;
  880. }
  881. ether_addr_copy(dst_mac, bulletin->mac);
  882. return true;
  883. }
  884. void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
  885. u16 *fw_major, u16 *fw_minor,
  886. u16 *fw_rev, u16 *fw_eng)
  887. {
  888. struct pf_vf_pfdev_info *info;
  889. info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
  890. *fw_major = info->fw_major;
  891. *fw_minor = info->fw_minor;
  892. *fw_rev = info->fw_rev;
  893. *fw_eng = info->fw_eng;
  894. }
  895. static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
  896. {
  897. struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
  898. u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
  899. void *cookie = hwfn->cdev->ops_cookie;
  900. is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
  901. &is_mac_forced);
  902. if (is_mac_exist && is_mac_forced && cookie)
  903. ops->force_mac(cookie, mac);
  904. /* Always update link configuration according to bulletin */
  905. qed_link_update(hwfn);
  906. }
  907. void qed_iov_vf_task(struct work_struct *work)
  908. {
  909. struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
  910. iov_task.work);
  911. u8 change = 0;
  912. if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
  913. return;
  914. /* Handle bulletin board changes */
  915. qed_vf_read_bulletin(hwfn, &change);
  916. if (change)
  917. qed_handle_bulletin_change(hwfn);
  918. /* As VF is polling bulletin board, need to constantly re-schedule */
  919. queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
  920. }