qed_vf.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/etherdevice.h>
  10. #include "qed.h"
  11. #include "qed_sriov.h"
  12. #include "qed_vf.h"
  13. static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
  14. {
  15. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  16. void *p_tlv;
  17. /* This lock is released when we receive PF's response
  18. * in qed_send_msg2pf().
  19. * So, qed_vf_pf_prep() and qed_send_msg2pf()
  20. * must come in sequence.
  21. */
  22. mutex_lock(&(p_iov->mutex));
  23. DP_VERBOSE(p_hwfn,
  24. QED_MSG_IOV,
  25. "preparing to send 0x%04x tlv over vf pf channel\n",
  26. type);
  27. /* Reset Requst offset */
  28. p_iov->offset = (u8 *)p_iov->vf2pf_request;
  29. /* Clear mailbox - both request and reply */
  30. memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
  31. memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
  32. /* Init type and length */
  33. p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
  34. /* Init first tlv header */
  35. ((struct vfpf_first_tlv *)p_tlv)->reply_address =
  36. (u64)p_iov->pf2vf_reply_phys;
  37. return p_tlv;
  38. }
  39. static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
  40. {
  41. union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
  42. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  43. "VF request status = 0x%x, PF reply status = 0x%x\n",
  44. req_status, resp->default_resp.hdr.status);
  45. mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
  46. }
  47. static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
  48. {
  49. union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
  50. struct ustorm_trigger_vf_zone trigger;
  51. struct ustorm_vf_zone *zone_data;
  52. int rc = 0, time = 100;
  53. zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
  54. /* output tlvs list */
  55. qed_dp_tlv_list(p_hwfn, p_req);
  56. /* need to add the END TLV to the message size */
  57. resp_size += sizeof(struct channel_list_end_tlv);
  58. /* Send TLVs over HW channel */
  59. memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
  60. trigger.vf_pf_msg_valid = 1;
  61. DP_VERBOSE(p_hwfn,
  62. QED_MSG_IOV,
  63. "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
  64. GET_FIELD(p_hwfn->hw_info.concrete_fid,
  65. PXP_CONCRETE_FID_PFID),
  66. upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
  67. lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
  68. &zone_data->non_trigger.vf_pf_msg_addr,
  69. *((u32 *)&trigger), &zone_data->trigger);
  70. REG_WR(p_hwfn,
  71. (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
  72. lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
  73. REG_WR(p_hwfn,
  74. (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
  75. upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
  76. /* The message data must be written first, to prevent trigger before
  77. * data is written.
  78. */
  79. wmb();
  80. REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
  81. /* When PF would be done with the response, it would write back to the
  82. * `done' address. Poll until then.
  83. */
  84. while ((!*done) && time) {
  85. msleep(25);
  86. time--;
  87. }
  88. if (!*done) {
  89. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  90. "VF <-- PF Timeout [Type %d]\n",
  91. p_req->first_tlv.tl.type);
  92. rc = -EBUSY;
  93. } else {
  94. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  95. "PF response: %d [Type %d]\n",
  96. *done, p_req->first_tlv.tl.type);
  97. }
  98. return rc;
  99. }
  100. #define VF_ACQUIRE_THRESH 3
  101. static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
  102. struct vf_pf_resc_request *p_req,
  103. struct pf_vf_resc *p_resp)
  104. {
  105. DP_VERBOSE(p_hwfn,
  106. QED_MSG_IOV,
  107. "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
  108. p_req->num_rxqs,
  109. p_resp->num_rxqs,
  110. p_req->num_rxqs,
  111. p_resp->num_txqs,
  112. p_req->num_sbs,
  113. p_resp->num_sbs,
  114. p_req->num_mac_filters,
  115. p_resp->num_mac_filters,
  116. p_req->num_vlan_filters,
  117. p_resp->num_vlan_filters,
  118. p_req->num_mc_filters, p_resp->num_mc_filters);
  119. /* humble our request */
  120. p_req->num_txqs = p_resp->num_txqs;
  121. p_req->num_rxqs = p_resp->num_rxqs;
  122. p_req->num_sbs = p_resp->num_sbs;
  123. p_req->num_mac_filters = p_resp->num_mac_filters;
  124. p_req->num_vlan_filters = p_resp->num_vlan_filters;
  125. p_req->num_mc_filters = p_resp->num_mc_filters;
  126. }
  127. static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
  128. {
  129. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  130. struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
  131. struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
  132. struct vf_pf_resc_request *p_resc;
  133. bool resources_acquired = false;
  134. struct vfpf_acquire_tlv *req;
  135. int rc = 0, attempts = 0;
  136. /* clear mailbox and prep first tlv */
  137. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  138. p_resc = &req->resc_request;
  139. /* starting filling the request */
  140. req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
  141. p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
  142. p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
  143. p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
  144. p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
  145. p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
  146. req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
  147. req->vfdev_info.fw_major = FW_MAJOR_VERSION;
  148. req->vfdev_info.fw_minor = FW_MINOR_VERSION;
  149. req->vfdev_info.fw_revision = FW_REVISION_VERSION;
  150. req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
  151. req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
  152. req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
  153. /* Fill capability field with any non-deprecated config we support */
  154. req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
  155. /* pf 2 vf bulletin board address */
  156. req->bulletin_addr = p_iov->bulletin.phys;
  157. req->bulletin_size = p_iov->bulletin.size;
  158. /* add list termination tlv */
  159. qed_add_tlv(p_hwfn, &p_iov->offset,
  160. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  161. while (!resources_acquired) {
  162. DP_VERBOSE(p_hwfn,
  163. QED_MSG_IOV, "attempting to acquire resources\n");
  164. /* Clear response buffer, as this might be a re-send */
  165. memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
  166. /* send acquire request */
  167. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  168. if (rc)
  169. return rc;
  170. /* copy acquire response from buffer to p_hwfn */
  171. memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
  172. attempts++;
  173. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  174. /* PF agrees to allocate our resources */
  175. if (!(resp->pfdev_info.capabilities &
  176. PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
  177. /* It's possible legacy PF mistakenly accepted;
  178. * but we don't care - simply mark it as
  179. * legacy and continue.
  180. */
  181. req->vfdev_info.capabilities |=
  182. VFPF_ACQUIRE_CAP_PRE_FP_HSI;
  183. }
  184. DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
  185. resources_acquired = true;
  186. } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
  187. attempts < VF_ACQUIRE_THRESH) {
  188. qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
  189. &resp->resc);
  190. } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
  191. if (pfdev_info->major_fp_hsi &&
  192. (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
  193. DP_NOTICE(p_hwfn,
  194. "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
  195. pfdev_info->major_fp_hsi,
  196. pfdev_info->minor_fp_hsi,
  197. ETH_HSI_VER_MAJOR,
  198. ETH_HSI_VER_MINOR,
  199. pfdev_info->major_fp_hsi);
  200. rc = -EINVAL;
  201. goto exit;
  202. }
  203. if (!pfdev_info->major_fp_hsi) {
  204. if (req->vfdev_info.capabilities &
  205. VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
  206. DP_NOTICE(p_hwfn,
  207. "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
  208. rc = -EINVAL;
  209. goto exit;
  210. } else {
  211. DP_INFO(p_hwfn,
  212. "PF is old - try re-acquire to see if it supports FW-version override\n");
  213. req->vfdev_info.capabilities |=
  214. VFPF_ACQUIRE_CAP_PRE_FP_HSI;
  215. continue;
  216. }
  217. }
  218. /* If PF/VF are using same Major, PF must have had
  219. * it's reasons. Simply fail.
  220. */
  221. DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
  222. rc = -EINVAL;
  223. goto exit;
  224. } else {
  225. DP_ERR(p_hwfn,
  226. "PF returned error %d to VF acquisition request\n",
  227. resp->hdr.status);
  228. rc = -EAGAIN;
  229. goto exit;
  230. }
  231. }
  232. /* Mark the PF as legacy, if needed */
  233. if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
  234. p_iov->b_pre_fp_hsi = true;
  235. /* Update bulletin board size with response from PF */
  236. p_iov->bulletin.size = resp->bulletin_size;
  237. /* get HW info */
  238. p_hwfn->cdev->type = resp->pfdev_info.dev_type;
  239. p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
  240. p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
  241. /* Learn of the possibility of CMT */
  242. if (IS_LEAD_HWFN(p_hwfn)) {
  243. if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
  244. DP_NOTICE(p_hwfn, "100g VF\n");
  245. p_hwfn->cdev->num_hwfns = 2;
  246. }
  247. }
  248. if (!p_iov->b_pre_fp_hsi &&
  249. ETH_HSI_VER_MINOR &&
  250. (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
  251. DP_INFO(p_hwfn,
  252. "PF is using older fastpath HSI; %02x.%02x is configured\n",
  253. ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
  254. }
  255. exit:
  256. qed_vf_pf_req_end(p_hwfn, rc);
  257. return rc;
  258. }
  259. int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
  260. {
  261. struct qed_vf_iov *p_iov;
  262. u32 reg;
  263. /* Set number of hwfns - might be overriden once leading hwfn learns
  264. * actual configuration from PF.
  265. */
  266. if (IS_LEAD_HWFN(p_hwfn))
  267. p_hwfn->cdev->num_hwfns = 1;
  268. /* Set the doorbell bar. Assumption: regview is set */
  269. p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
  270. PXP_VF_BAR0_START_DQ;
  271. reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
  272. p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
  273. reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
  274. p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
  275. /* Allocate vf sriov info */
  276. p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
  277. if (!p_iov)
  278. return -ENOMEM;
  279. /* Allocate vf2pf msg */
  280. p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  281. sizeof(union vfpf_tlvs),
  282. &p_iov->vf2pf_request_phys,
  283. GFP_KERNEL);
  284. if (!p_iov->vf2pf_request)
  285. goto free_p_iov;
  286. p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  287. sizeof(union pfvf_tlvs),
  288. &p_iov->pf2vf_reply_phys,
  289. GFP_KERNEL);
  290. if (!p_iov->pf2vf_reply)
  291. goto free_vf2pf_request;
  292. DP_VERBOSE(p_hwfn,
  293. QED_MSG_IOV,
  294. "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
  295. p_iov->vf2pf_request,
  296. (u64) p_iov->vf2pf_request_phys,
  297. p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
  298. /* Allocate Bulletin board */
  299. p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
  300. p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  301. p_iov->bulletin.size,
  302. &p_iov->bulletin.phys,
  303. GFP_KERNEL);
  304. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  305. "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
  306. p_iov->bulletin.p_virt,
  307. (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
  308. mutex_init(&p_iov->mutex);
  309. p_hwfn->vf_iov_info = p_iov;
  310. p_hwfn->hw_info.personality = QED_PCI_ETH;
  311. return qed_vf_pf_acquire(p_hwfn);
  312. free_vf2pf_request:
  313. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  314. sizeof(union vfpf_tlvs),
  315. p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
  316. free_p_iov:
  317. kfree(p_iov);
  318. return -ENOMEM;
  319. }
  320. #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
  321. #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
  322. (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
  323. int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
  324. u8 rx_qid,
  325. u16 sb,
  326. u8 sb_index,
  327. u16 bd_max_bytes,
  328. dma_addr_t bd_chain_phys_addr,
  329. dma_addr_t cqe_pbl_addr,
  330. u16 cqe_pbl_size, void __iomem **pp_prod)
  331. {
  332. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  333. struct pfvf_start_queue_resp_tlv *resp;
  334. struct vfpf_start_rxq_tlv *req;
  335. int rc;
  336. /* clear mailbox and prep first tlv */
  337. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
  338. req->rx_qid = rx_qid;
  339. req->cqe_pbl_addr = cqe_pbl_addr;
  340. req->cqe_pbl_size = cqe_pbl_size;
  341. req->rxq_addr = bd_chain_phys_addr;
  342. req->hw_sb = sb;
  343. req->sb_index = sb_index;
  344. req->bd_max_bytes = bd_max_bytes;
  345. req->stat_id = -1;
  346. /* If PF is legacy, we'll need to calculate producers ourselves
  347. * as well as clean them.
  348. */
  349. if (pp_prod && p_iov->b_pre_fp_hsi) {
  350. u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
  351. u32 init_prod_val = 0;
  352. *pp_prod = (u8 __iomem *)p_hwfn->regview +
  353. MSTORM_QZONE_START(p_hwfn->cdev) +
  354. hw_qid * MSTORM_QZONE_SIZE;
  355. /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
  356. __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
  357. (u32 *)(&init_prod_val));
  358. }
  359. /* add list termination tlv */
  360. qed_add_tlv(p_hwfn, &p_iov->offset,
  361. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  362. resp = &p_iov->pf2vf_reply->queue_start;
  363. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  364. if (rc)
  365. goto exit;
  366. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  367. rc = -EINVAL;
  368. goto exit;
  369. }
  370. /* Learn the address of the producer from the response */
  371. if (pp_prod && !p_iov->b_pre_fp_hsi) {
  372. u32 init_prod_val = 0;
  373. *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
  374. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  375. "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
  376. rx_qid, *pp_prod, resp->offset);
  377. /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
  378. __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
  379. (u32 *)&init_prod_val);
  380. }
  381. exit:
  382. qed_vf_pf_req_end(p_hwfn, rc);
  383. return rc;
  384. }
  385. int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
  386. {
  387. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  388. struct vfpf_stop_rxqs_tlv *req;
  389. struct pfvf_def_resp_tlv *resp;
  390. int rc;
  391. /* clear mailbox and prep first tlv */
  392. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
  393. req->rx_qid = rx_qid;
  394. req->num_rxqs = 1;
  395. req->cqe_completion = cqe_completion;
  396. /* add list termination tlv */
  397. qed_add_tlv(p_hwfn, &p_iov->offset,
  398. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  399. resp = &p_iov->pf2vf_reply->default_resp;
  400. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  401. if (rc)
  402. goto exit;
  403. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  404. rc = -EINVAL;
  405. goto exit;
  406. }
  407. exit:
  408. qed_vf_pf_req_end(p_hwfn, rc);
  409. return rc;
  410. }
  411. int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
  412. u16 tx_queue_id,
  413. u16 sb,
  414. u8 sb_index,
  415. dma_addr_t pbl_addr,
  416. u16 pbl_size, void __iomem **pp_doorbell)
  417. {
  418. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  419. struct pfvf_start_queue_resp_tlv *resp;
  420. struct vfpf_start_txq_tlv *req;
  421. int rc;
  422. /* clear mailbox and prep first tlv */
  423. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
  424. req->tx_qid = tx_queue_id;
  425. /* Tx */
  426. req->pbl_addr = pbl_addr;
  427. req->pbl_size = pbl_size;
  428. req->hw_sb = sb;
  429. req->sb_index = sb_index;
  430. /* add list termination tlv */
  431. qed_add_tlv(p_hwfn, &p_iov->offset,
  432. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  433. resp = &p_iov->pf2vf_reply->queue_start;
  434. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  435. if (rc)
  436. goto exit;
  437. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  438. rc = -EINVAL;
  439. goto exit;
  440. }
  441. if (pp_doorbell) {
  442. /* Modern PFs provide the actual offsets, while legacy
  443. * provided only the queue id.
  444. */
  445. if (!p_iov->b_pre_fp_hsi) {
  446. *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
  447. resp->offset;
  448. } else {
  449. u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
  450. u32 db_addr;
  451. db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY);
  452. *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
  453. db_addr;
  454. }
  455. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  456. "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
  457. tx_queue_id, *pp_doorbell, resp->offset);
  458. }
  459. exit:
  460. qed_vf_pf_req_end(p_hwfn, rc);
  461. return rc;
  462. }
  463. int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
  464. {
  465. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  466. struct vfpf_stop_txqs_tlv *req;
  467. struct pfvf_def_resp_tlv *resp;
  468. int rc;
  469. /* clear mailbox and prep first tlv */
  470. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
  471. req->tx_qid = tx_qid;
  472. req->num_txqs = 1;
  473. /* add list termination tlv */
  474. qed_add_tlv(p_hwfn, &p_iov->offset,
  475. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  476. resp = &p_iov->pf2vf_reply->default_resp;
  477. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  478. if (rc)
  479. goto exit;
  480. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  481. rc = -EINVAL;
  482. goto exit;
  483. }
  484. exit:
  485. qed_vf_pf_req_end(p_hwfn, rc);
  486. return rc;
  487. }
  488. int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
  489. u8 vport_id,
  490. u16 mtu,
  491. u8 inner_vlan_removal,
  492. enum qed_tpa_mode tpa_mode,
  493. u8 max_buffers_per_cqe, u8 only_untagged)
  494. {
  495. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  496. struct vfpf_vport_start_tlv *req;
  497. struct pfvf_def_resp_tlv *resp;
  498. int rc, i;
  499. /* clear mailbox and prep first tlv */
  500. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
  501. req->mtu = mtu;
  502. req->vport_id = vport_id;
  503. req->inner_vlan_removal = inner_vlan_removal;
  504. req->tpa_mode = tpa_mode;
  505. req->max_buffers_per_cqe = max_buffers_per_cqe;
  506. req->only_untagged = only_untagged;
  507. /* status blocks */
  508. for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
  509. if (p_hwfn->sbs_info[i])
  510. req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
  511. /* add list termination tlv */
  512. qed_add_tlv(p_hwfn, &p_iov->offset,
  513. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  514. resp = &p_iov->pf2vf_reply->default_resp;
  515. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  516. if (rc)
  517. goto exit;
  518. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  519. rc = -EINVAL;
  520. goto exit;
  521. }
  522. exit:
  523. qed_vf_pf_req_end(p_hwfn, rc);
  524. return rc;
  525. }
  526. int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
  527. {
  528. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  529. struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
  530. int rc;
  531. /* clear mailbox and prep first tlv */
  532. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
  533. sizeof(struct vfpf_first_tlv));
  534. /* add list termination tlv */
  535. qed_add_tlv(p_hwfn, &p_iov->offset,
  536. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  537. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  538. if (rc)
  539. goto exit;
  540. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  541. rc = -EINVAL;
  542. goto exit;
  543. }
  544. exit:
  545. qed_vf_pf_req_end(p_hwfn, rc);
  546. return rc;
  547. }
  548. static bool
  549. qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
  550. struct qed_sp_vport_update_params *p_data,
  551. u16 tlv)
  552. {
  553. switch (tlv) {
  554. case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
  555. return !!(p_data->update_vport_active_rx_flg ||
  556. p_data->update_vport_active_tx_flg);
  557. case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
  558. return !!p_data->update_tx_switching_flg;
  559. case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
  560. return !!p_data->update_inner_vlan_removal_flg;
  561. case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
  562. return !!p_data->update_accept_any_vlan_flg;
  563. case CHANNEL_TLV_VPORT_UPDATE_MCAST:
  564. return !!p_data->update_approx_mcast_flg;
  565. case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
  566. return !!(p_data->accept_flags.update_rx_mode_config ||
  567. p_data->accept_flags.update_tx_mode_config);
  568. case CHANNEL_TLV_VPORT_UPDATE_RSS:
  569. return !!p_data->rss_params;
  570. case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
  571. return !!p_data->sge_tpa_params;
  572. default:
  573. DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
  574. tlv);
  575. return false;
  576. }
  577. }
  578. static void
  579. qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
  580. struct qed_sp_vport_update_params *p_data)
  581. {
  582. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  583. struct pfvf_def_resp_tlv *p_resp;
  584. u16 tlv;
  585. for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
  586. tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
  587. if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
  588. continue;
  589. p_resp = (struct pfvf_def_resp_tlv *)
  590. qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
  591. tlv);
  592. if (p_resp && p_resp->hdr.status)
  593. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  594. "TLV[%d] Configuration %s\n",
  595. tlv,
  596. (p_resp && p_resp->hdr.status) ? "succeeded"
  597. : "failed");
  598. }
  599. }
  600. int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
  601. struct qed_sp_vport_update_params *p_params)
  602. {
  603. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  604. struct vfpf_vport_update_tlv *req;
  605. struct pfvf_def_resp_tlv *resp;
  606. u8 update_rx, update_tx;
  607. u32 resp_size = 0;
  608. u16 size, tlv;
  609. int rc;
  610. resp = &p_iov->pf2vf_reply->default_resp;
  611. resp_size = sizeof(*resp);
  612. update_rx = p_params->update_vport_active_rx_flg;
  613. update_tx = p_params->update_vport_active_tx_flg;
  614. /* clear mailbox and prep header tlv */
  615. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
  616. /* Prepare extended tlvs */
  617. if (update_rx || update_tx) {
  618. struct vfpf_vport_update_activate_tlv *p_act_tlv;
  619. size = sizeof(struct vfpf_vport_update_activate_tlv);
  620. p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  621. CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
  622. size);
  623. resp_size += sizeof(struct pfvf_def_resp_tlv);
  624. if (update_rx) {
  625. p_act_tlv->update_rx = update_rx;
  626. p_act_tlv->active_rx = p_params->vport_active_rx_flg;
  627. }
  628. if (update_tx) {
  629. p_act_tlv->update_tx = update_tx;
  630. p_act_tlv->active_tx = p_params->vport_active_tx_flg;
  631. }
  632. }
  633. if (p_params->update_tx_switching_flg) {
  634. struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
  635. size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
  636. tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
  637. p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  638. tlv, size);
  639. resp_size += sizeof(struct pfvf_def_resp_tlv);
  640. p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
  641. }
  642. if (p_params->update_approx_mcast_flg) {
  643. struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
  644. size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
  645. p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  646. CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
  647. resp_size += sizeof(struct pfvf_def_resp_tlv);
  648. memcpy(p_mcast_tlv->bins, p_params->bins,
  649. sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
  650. }
  651. update_rx = p_params->accept_flags.update_rx_mode_config;
  652. update_tx = p_params->accept_flags.update_tx_mode_config;
  653. if (update_rx || update_tx) {
  654. struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
  655. tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
  656. size = sizeof(struct vfpf_vport_update_accept_param_tlv);
  657. p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
  658. resp_size += sizeof(struct pfvf_def_resp_tlv);
  659. if (update_rx) {
  660. p_accept_tlv->update_rx_mode = update_rx;
  661. p_accept_tlv->rx_accept_filter =
  662. p_params->accept_flags.rx_accept_filter;
  663. }
  664. if (update_tx) {
  665. p_accept_tlv->update_tx_mode = update_tx;
  666. p_accept_tlv->tx_accept_filter =
  667. p_params->accept_flags.tx_accept_filter;
  668. }
  669. }
  670. if (p_params->rss_params) {
  671. struct qed_rss_params *rss_params = p_params->rss_params;
  672. struct vfpf_vport_update_rss_tlv *p_rss_tlv;
  673. size = sizeof(struct vfpf_vport_update_rss_tlv);
  674. p_rss_tlv = qed_add_tlv(p_hwfn,
  675. &p_iov->offset,
  676. CHANNEL_TLV_VPORT_UPDATE_RSS, size);
  677. resp_size += sizeof(struct pfvf_def_resp_tlv);
  678. if (rss_params->update_rss_config)
  679. p_rss_tlv->update_rss_flags |=
  680. VFPF_UPDATE_RSS_CONFIG_FLAG;
  681. if (rss_params->update_rss_capabilities)
  682. p_rss_tlv->update_rss_flags |=
  683. VFPF_UPDATE_RSS_CAPS_FLAG;
  684. if (rss_params->update_rss_ind_table)
  685. p_rss_tlv->update_rss_flags |=
  686. VFPF_UPDATE_RSS_IND_TABLE_FLAG;
  687. if (rss_params->update_rss_key)
  688. p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
  689. p_rss_tlv->rss_enable = rss_params->rss_enable;
  690. p_rss_tlv->rss_caps = rss_params->rss_caps;
  691. p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
  692. memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
  693. sizeof(rss_params->rss_ind_table));
  694. memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
  695. sizeof(rss_params->rss_key));
  696. }
  697. if (p_params->update_accept_any_vlan_flg) {
  698. struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
  699. size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
  700. tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
  701. p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
  702. resp_size += sizeof(struct pfvf_def_resp_tlv);
  703. p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
  704. p_any_vlan_tlv->update_accept_any_vlan_flg =
  705. p_params->update_accept_any_vlan_flg;
  706. }
  707. /* add list termination tlv */
  708. qed_add_tlv(p_hwfn, &p_iov->offset,
  709. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  710. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
  711. if (rc)
  712. goto exit;
  713. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  714. rc = -EINVAL;
  715. goto exit;
  716. }
  717. qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
  718. exit:
  719. qed_vf_pf_req_end(p_hwfn, rc);
  720. return rc;
  721. }
  722. int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
  723. {
  724. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  725. struct pfvf_def_resp_tlv *resp;
  726. struct vfpf_first_tlv *req;
  727. int rc;
  728. /* clear mailbox and prep first tlv */
  729. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
  730. /* add list termination tlv */
  731. qed_add_tlv(p_hwfn, &p_iov->offset,
  732. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  733. resp = &p_iov->pf2vf_reply->default_resp;
  734. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  735. if (rc)
  736. goto exit;
  737. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  738. rc = -EAGAIN;
  739. goto exit;
  740. }
  741. p_hwfn->b_int_enabled = 0;
  742. exit:
  743. qed_vf_pf_req_end(p_hwfn, rc);
  744. return rc;
  745. }
  746. int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
  747. {
  748. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  749. struct pfvf_def_resp_tlv *resp;
  750. struct vfpf_first_tlv *req;
  751. u32 size;
  752. int rc;
  753. /* clear mailbox and prep first tlv */
  754. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
  755. /* add list termination tlv */
  756. qed_add_tlv(p_hwfn, &p_iov->offset,
  757. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  758. resp = &p_iov->pf2vf_reply->default_resp;
  759. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  760. if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
  761. rc = -EAGAIN;
  762. qed_vf_pf_req_end(p_hwfn, rc);
  763. p_hwfn->b_int_enabled = 0;
  764. if (p_iov->vf2pf_request)
  765. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  766. sizeof(union vfpf_tlvs),
  767. p_iov->vf2pf_request,
  768. p_iov->vf2pf_request_phys);
  769. if (p_iov->pf2vf_reply)
  770. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  771. sizeof(union pfvf_tlvs),
  772. p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
  773. if (p_iov->bulletin.p_virt) {
  774. size = sizeof(struct qed_bulletin_content);
  775. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  776. size,
  777. p_iov->bulletin.p_virt, p_iov->bulletin.phys);
  778. }
  779. kfree(p_hwfn->vf_iov_info);
  780. p_hwfn->vf_iov_info = NULL;
  781. return rc;
  782. }
  783. void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
  784. struct qed_filter_mcast *p_filter_cmd)
  785. {
  786. struct qed_sp_vport_update_params sp_params;
  787. int i;
  788. memset(&sp_params, 0, sizeof(sp_params));
  789. sp_params.update_approx_mcast_flg = 1;
  790. if (p_filter_cmd->opcode == QED_FILTER_ADD) {
  791. for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
  792. u32 bit;
  793. bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
  794. __set_bit(bit, sp_params.bins);
  795. }
  796. }
  797. qed_vf_pf_vport_update(p_hwfn, &sp_params);
  798. }
  799. int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
  800. struct qed_filter_ucast *p_ucast)
  801. {
  802. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  803. struct vfpf_ucast_filter_tlv *req;
  804. struct pfvf_def_resp_tlv *resp;
  805. int rc;
  806. /* clear mailbox and prep first tlv */
  807. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
  808. req->opcode = (u8) p_ucast->opcode;
  809. req->type = (u8) p_ucast->type;
  810. memcpy(req->mac, p_ucast->mac, ETH_ALEN);
  811. req->vlan = p_ucast->vlan;
  812. /* add list termination tlv */
  813. qed_add_tlv(p_hwfn, &p_iov->offset,
  814. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  815. resp = &p_iov->pf2vf_reply->default_resp;
  816. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  817. if (rc)
  818. goto exit;
  819. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  820. rc = -EAGAIN;
  821. goto exit;
  822. }
  823. exit:
  824. qed_vf_pf_req_end(p_hwfn, rc);
  825. return rc;
  826. }
  827. int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
  828. {
  829. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  830. struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
  831. int rc;
  832. /* clear mailbox and prep first tlv */
  833. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
  834. sizeof(struct vfpf_first_tlv));
  835. /* add list termination tlv */
  836. qed_add_tlv(p_hwfn, &p_iov->offset,
  837. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  838. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  839. if (rc)
  840. goto exit;
  841. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  842. rc = -EINVAL;
  843. goto exit;
  844. }
  845. exit:
  846. qed_vf_pf_req_end(p_hwfn, rc);
  847. return rc;
  848. }
  849. u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
  850. {
  851. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  852. if (!p_iov) {
  853. DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
  854. return 0;
  855. }
  856. return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
  857. }
  858. int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
  859. {
  860. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  861. struct qed_bulletin_content shadow;
  862. u32 crc, crc_size;
  863. crc_size = sizeof(p_iov->bulletin.p_virt->crc);
  864. *p_change = 0;
  865. /* Need to guarantee PF is not in the middle of writing it */
  866. memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
  867. /* If version did not update, no need to do anything */
  868. if (shadow.version == p_iov->bulletin_shadow.version)
  869. return 0;
  870. /* Verify the bulletin we see is valid */
  871. crc = crc32(0, (u8 *)&shadow + crc_size,
  872. p_iov->bulletin.size - crc_size);
  873. if (crc != shadow.crc)
  874. return -EAGAIN;
  875. /* Set the shadow bulletin and process it */
  876. memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
  877. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  878. "Read a bulletin update %08x\n", shadow.version);
  879. *p_change = 1;
  880. return 0;
  881. }
  882. void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  883. struct qed_mcp_link_params *p_params,
  884. struct qed_bulletin_content *p_bulletin)
  885. {
  886. memset(p_params, 0, sizeof(*p_params));
  887. p_params->speed.autoneg = p_bulletin->req_autoneg;
  888. p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
  889. p_params->speed.forced_speed = p_bulletin->req_forced_speed;
  890. p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
  891. p_params->pause.forced_rx = p_bulletin->req_forced_rx;
  892. p_params->pause.forced_tx = p_bulletin->req_forced_tx;
  893. p_params->loopback_mode = p_bulletin->req_loopback;
  894. }
  895. void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  896. struct qed_mcp_link_params *params)
  897. {
  898. __qed_vf_get_link_params(p_hwfn, params,
  899. &(p_hwfn->vf_iov_info->bulletin_shadow));
  900. }
  901. void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  902. struct qed_mcp_link_state *p_link,
  903. struct qed_bulletin_content *p_bulletin)
  904. {
  905. memset(p_link, 0, sizeof(*p_link));
  906. p_link->link_up = p_bulletin->link_up;
  907. p_link->speed = p_bulletin->speed;
  908. p_link->full_duplex = p_bulletin->full_duplex;
  909. p_link->an = p_bulletin->autoneg;
  910. p_link->an_complete = p_bulletin->autoneg_complete;
  911. p_link->parallel_detection = p_bulletin->parallel_detection;
  912. p_link->pfc_enabled = p_bulletin->pfc_enabled;
  913. p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
  914. p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
  915. p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
  916. p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
  917. p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
  918. }
  919. void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  920. struct qed_mcp_link_state *link)
  921. {
  922. __qed_vf_get_link_state(p_hwfn, link,
  923. &(p_hwfn->vf_iov_info->bulletin_shadow));
  924. }
  925. void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  926. struct qed_mcp_link_capabilities *p_link_caps,
  927. struct qed_bulletin_content *p_bulletin)
  928. {
  929. memset(p_link_caps, 0, sizeof(*p_link_caps));
  930. p_link_caps->speed_capabilities = p_bulletin->capability_speed;
  931. }
  932. void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  933. struct qed_mcp_link_capabilities *p_link_caps)
  934. {
  935. __qed_vf_get_link_caps(p_hwfn, p_link_caps,
  936. &(p_hwfn->vf_iov_info->bulletin_shadow));
  937. }
  938. void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
  939. {
  940. *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
  941. }
  942. void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
  943. {
  944. memcpy(port_mac,
  945. p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
  946. }
  947. void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
  948. {
  949. struct qed_vf_iov *p_vf;
  950. p_vf = p_hwfn->vf_iov_info;
  951. *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
  952. }
  953. bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
  954. {
  955. struct qed_bulletin_content *bulletin;
  956. bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
  957. if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
  958. return true;
  959. /* Forbid VF from changing a MAC enforced by PF */
  960. if (ether_addr_equal(bulletin->mac, mac))
  961. return false;
  962. return false;
  963. }
  964. static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
  965. u8 *dst_mac, u8 *p_is_forced)
  966. {
  967. struct qed_bulletin_content *bulletin;
  968. bulletin = &hwfn->vf_iov_info->bulletin_shadow;
  969. if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
  970. if (p_is_forced)
  971. *p_is_forced = 1;
  972. } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
  973. if (p_is_forced)
  974. *p_is_forced = 0;
  975. } else {
  976. return false;
  977. }
  978. ether_addr_copy(dst_mac, bulletin->mac);
  979. return true;
  980. }
  981. void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
  982. u16 *fw_major, u16 *fw_minor,
  983. u16 *fw_rev, u16 *fw_eng)
  984. {
  985. struct pf_vf_pfdev_info *info;
  986. info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
  987. *fw_major = info->fw_major;
  988. *fw_minor = info->fw_minor;
  989. *fw_rev = info->fw_rev;
  990. *fw_eng = info->fw_eng;
  991. }
  992. static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
  993. {
  994. struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
  995. u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
  996. void *cookie = hwfn->cdev->ops_cookie;
  997. is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
  998. &is_mac_forced);
  999. if (is_mac_exist && is_mac_forced && cookie)
  1000. ops->force_mac(cookie, mac);
  1001. /* Always update link configuration according to bulletin */
  1002. qed_link_update(hwfn);
  1003. }
  1004. void qed_iov_vf_task(struct work_struct *work)
  1005. {
  1006. struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
  1007. iov_task.work);
  1008. u8 change = 0;
  1009. if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
  1010. return;
  1011. /* Handle bulletin board changes */
  1012. qed_vf_read_bulletin(hwfn, &change);
  1013. if (change)
  1014. qed_handle_bulletin_change(hwfn);
  1015. /* As VF is polling bulletin board, need to constantly re-schedule */
  1016. queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
  1017. }