bnx2x_vfpf.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  16. * Written by: Shmulik Ravid
  17. * Ariel Elior <ariel.elior@qlogic.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_cmn.h"
  21. #include <linux/crc32.h>
  22. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
  23. /* place a given tlv on the tlv buffer at a given offset */
  24. static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
  25. u16 offset, u16 type, u16 length)
  26. {
  27. struct channel_tlv *tl =
  28. (struct channel_tlv *)(tlvs_list + offset);
  29. tl->type = type;
  30. tl->length = length;
  31. }
  32. /* Clear the mailbox and init the header of the first tlv */
  33. static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  34. u16 type, u16 length)
  35. {
  36. mutex_lock(&bp->vf2pf_mutex);
  37. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  38. type);
  39. /* Clear mailbox */
  40. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  41. /* init type and length */
  42. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  43. /* init first tlv header */
  44. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  45. }
  46. /* releases the mailbox */
  47. static void bnx2x_vfpf_finalize(struct bnx2x *bp,
  48. struct vfpf_first_tlv *first_tlv)
  49. {
  50. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  51. first_tlv->tl.type);
  52. mutex_unlock(&bp->vf2pf_mutex);
  53. }
  54. /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
  55. static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
  56. enum channel_tlvs req_tlv)
  57. {
  58. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  59. do {
  60. if (tlv->type == req_tlv)
  61. return tlv;
  62. if (!tlv->length) {
  63. BNX2X_ERR("Found TLV with length 0\n");
  64. return NULL;
  65. }
  66. tlvs_list += tlv->length;
  67. tlv = (struct channel_tlv *)tlvs_list;
  68. } while (tlv->type != CHANNEL_TLV_LIST_END);
  69. DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
  70. return NULL;
  71. }
  72. /* list the types and lengths of the tlvs on the buffer */
  73. static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  74. {
  75. int i = 1;
  76. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  77. while (tlv->type != CHANNEL_TLV_LIST_END) {
  78. /* output tlv */
  79. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  80. tlv->type, tlv->length);
  81. /* advance to next tlv */
  82. tlvs_list += tlv->length;
  83. /* cast general tlv list pointer to channel tlv header*/
  84. tlv = (struct channel_tlv *)tlvs_list;
  85. i++;
  86. /* break condition for this loop */
  87. if (i > MAX_TLVS_IN_LIST) {
  88. WARN(true, "corrupt tlvs");
  89. return;
  90. }
  91. }
  92. /* output last tlv */
  93. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  94. tlv->type, tlv->length);
  95. }
  96. /* test whether we support a tlv type */
  97. bool bnx2x_tlv_supported(u16 tlvtype)
  98. {
  99. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  100. }
  101. static inline int bnx2x_pfvf_status_codes(int rc)
  102. {
  103. switch (rc) {
  104. case 0:
  105. return PFVF_STATUS_SUCCESS;
  106. case -ENOMEM:
  107. return PFVF_STATUS_NO_RESOURCE;
  108. default:
  109. return PFVF_STATUS_FAILURE;
  110. }
  111. }
  112. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  113. {
  114. struct cstorm_vf_zone_data __iomem *zone_data =
  115. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  116. int tout = 100, interval = 100; /* wait for 10 seconds */
  117. if (*done) {
  118. BNX2X_ERR("done was non zero before message to pf was sent\n");
  119. WARN_ON(true);
  120. return -EINVAL;
  121. }
  122. /* if PF indicated channel is down avoid sending message. Return success
  123. * so calling flow can continue
  124. */
  125. bnx2x_sample_bulletin(bp);
  126. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  127. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  128. *done = PFVF_STATUS_SUCCESS;
  129. return -EINVAL;
  130. }
  131. /* Write message address */
  132. writel(U64_LO(msg_mapping),
  133. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  134. writel(U64_HI(msg_mapping),
  135. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  136. /* make sure the address is written before FW accesses it */
  137. wmb();
  138. /* Trigger the PF FW */
  139. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  140. /* Wait for PF to complete */
  141. while ((tout >= 0) && (!*done)) {
  142. msleep(interval);
  143. tout -= 1;
  144. /* progress indicator - HV can take its own sweet time in
  145. * answering VFs...
  146. */
  147. DP_CONT(BNX2X_MSG_IOV, ".");
  148. }
  149. if (!*done) {
  150. BNX2X_ERR("PF response has timed out\n");
  151. return -EAGAIN;
  152. }
  153. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  154. return 0;
  155. }
  156. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  157. {
  158. u32 me_reg;
  159. int tout = 10, interval = 100; /* Wait for 1 sec */
  160. do {
  161. /* pxp traps vf read of doorbells and returns me reg value */
  162. me_reg = readl(bp->doorbells);
  163. if (GOOD_ME_REG(me_reg))
  164. break;
  165. msleep(interval);
  166. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  167. me_reg);
  168. } while (tout-- > 0);
  169. if (!GOOD_ME_REG(me_reg)) {
  170. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  171. return -EINVAL;
  172. }
  173. DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  174. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  175. return 0;
  176. }
  177. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  178. {
  179. int rc = 0, attempts = 0;
  180. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  181. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  182. struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
  183. struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
  184. u32 vf_id;
  185. bool resources_acquired = false;
  186. /* clear mailbox and prep first tlv */
  187. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  188. if (bnx2x_get_vf_id(bp, &vf_id)) {
  189. rc = -EAGAIN;
  190. goto out;
  191. }
  192. req->vfdev_info.vf_id = vf_id;
  193. req->vfdev_info.vf_os = 0;
  194. req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
  195. req->resc_request.num_rxqs = rx_count;
  196. req->resc_request.num_txqs = tx_count;
  197. req->resc_request.num_sbs = bp->igu_sb_cnt;
  198. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  199. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  200. /* pf 2 vf bulletin board address */
  201. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  202. /* Request physical port identifier */
  203. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
  204. CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
  205. /* Bulletin support for bulletin board with length > legacy length */
  206. req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
  207. /* add list termination tlv */
  208. bnx2x_add_tlv(bp, req,
  209. req->first_tlv.tl.length + sizeof(struct channel_tlv),
  210. CHANNEL_TLV_LIST_END,
  211. sizeof(struct channel_list_end_tlv));
  212. /* output tlvs list */
  213. bnx2x_dp_tlv_list(bp, req);
  214. while (!resources_acquired) {
  215. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  216. /* send acquire request */
  217. rc = bnx2x_send_msg2pf(bp,
  218. &resp->hdr.status,
  219. bp->vf2pf_mbox_mapping);
  220. /* PF timeout */
  221. if (rc)
  222. goto out;
  223. /* copy acquire response from buffer to bp */
  224. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  225. attempts++;
  226. /* test whether the PF accepted our request. If not, humble
  227. * the request and try again.
  228. */
  229. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  230. DP(BNX2X_MSG_SP, "resources acquired\n");
  231. resources_acquired = true;
  232. } else if (bp->acquire_resp.hdr.status ==
  233. PFVF_STATUS_NO_RESOURCE &&
  234. attempts < VF_ACQUIRE_THRESH) {
  235. DP(BNX2X_MSG_SP,
  236. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  237. /* humble our request */
  238. req->resc_request.num_txqs =
  239. min(req->resc_request.num_txqs,
  240. bp->acquire_resp.resc.num_txqs);
  241. req->resc_request.num_rxqs =
  242. min(req->resc_request.num_rxqs,
  243. bp->acquire_resp.resc.num_rxqs);
  244. req->resc_request.num_sbs =
  245. min(req->resc_request.num_sbs,
  246. bp->acquire_resp.resc.num_sbs);
  247. req->resc_request.num_mac_filters =
  248. min(req->resc_request.num_mac_filters,
  249. bp->acquire_resp.resc.num_mac_filters);
  250. req->resc_request.num_vlan_filters =
  251. min(req->resc_request.num_vlan_filters,
  252. bp->acquire_resp.resc.num_vlan_filters);
  253. req->resc_request.num_mc_filters =
  254. min(req->resc_request.num_mc_filters,
  255. bp->acquire_resp.resc.num_mc_filters);
  256. /* Clear response buffer */
  257. memset(&bp->vf2pf_mbox->resp, 0,
  258. sizeof(union pfvf_tlvs));
  259. } else {
  260. /* Determine reason of PF failure of acquire process */
  261. fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
  262. CHANNEL_TLV_FP_HSI_SUPPORT);
  263. if (fp_hsi_resp && !fp_hsi_resp->is_supported)
  264. BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
  265. else
  266. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  267. bp->acquire_resp.hdr.status);
  268. rc = -EAGAIN;
  269. goto out;
  270. }
  271. }
  272. /* Retrieve physical port id (if possible) */
  273. phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
  274. bnx2x_search_tlv_list(bp, resp,
  275. CHANNEL_TLV_PHYS_PORT_ID);
  276. if (phys_port_resp) {
  277. memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
  278. bp->flags |= HAS_PHYS_PORT_ID;
  279. }
  280. /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
  281. * If that's the case, we need to make certain required FW was
  282. * supported by such a hypervisor [i.e., v0-v2].
  283. */
  284. fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
  285. CHANNEL_TLV_FP_HSI_SUPPORT);
  286. if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
  287. BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
  288. /* Since acquire succeeded on the PF side, we need to send a
  289. * release message in order to allow future probes.
  290. */
  291. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  292. bnx2x_vfpf_release(bp);
  293. rc = -EINVAL;
  294. goto out;
  295. }
  296. /* get HW info */
  297. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  298. bp->link_params.chip_id = bp->common.chip_id;
  299. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  300. bp->common.int_block = INT_BLOCK_IGU;
  301. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  302. bp->igu_dsb_id = -1;
  303. bp->mf_ov = 0;
  304. bp->mf_mode = 0;
  305. bp->common.flash_size = 0;
  306. bp->flags |=
  307. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  308. bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
  309. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  310. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  311. sizeof(bp->fw_ver));
  312. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  313. memcpy(bp->dev->dev_addr,
  314. bp->acquire_resp.resc.current_mac_addr,
  315. ETH_ALEN);
  316. out:
  317. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  318. return rc;
  319. }
  320. int bnx2x_vfpf_release(struct bnx2x *bp)
  321. {
  322. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  323. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  324. u32 rc, vf_id;
  325. /* clear mailbox and prep first tlv */
  326. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  327. if (bnx2x_get_vf_id(bp, &vf_id)) {
  328. rc = -EAGAIN;
  329. goto out;
  330. }
  331. req->vf_id = vf_id;
  332. /* add list termination tlv */
  333. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  334. sizeof(struct channel_list_end_tlv));
  335. /* output tlvs list */
  336. bnx2x_dp_tlv_list(bp, req);
  337. /* send release request */
  338. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  339. if (rc)
  340. /* PF timeout */
  341. goto out;
  342. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  343. /* PF released us */
  344. DP(BNX2X_MSG_SP, "vf released\n");
  345. } else {
  346. /* PF reports error */
  347. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  348. resp->hdr.status);
  349. rc = -EAGAIN;
  350. goto out;
  351. }
  352. out:
  353. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  354. return rc;
  355. }
  356. /* Tell PF about SB addresses */
  357. int bnx2x_vfpf_init(struct bnx2x *bp)
  358. {
  359. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  360. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  361. int rc, i;
  362. /* clear mailbox and prep first tlv */
  363. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  364. /* status blocks */
  365. for_each_eth_queue(bp, i)
  366. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  367. status_blk_mapping);
  368. /* statistics - requests only supports single queue for now */
  369. req->stats_addr = bp->fw_stats_data_mapping +
  370. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  371. req->stats_stride = sizeof(struct per_queue_stats);
  372. /* add list termination tlv */
  373. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  374. sizeof(struct channel_list_end_tlv));
  375. /* output tlvs list */
  376. bnx2x_dp_tlv_list(bp, req);
  377. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  378. if (rc)
  379. goto out;
  380. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  381. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  382. resp->hdr.status);
  383. rc = -EAGAIN;
  384. goto out;
  385. }
  386. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  387. out:
  388. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  389. return rc;
  390. }
  391. /* CLOSE VF - opposite to INIT_VF */
  392. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  393. {
  394. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  395. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  396. int i, rc;
  397. u32 vf_id;
  398. /* If we haven't got a valid VF id, there is no sense to
  399. * continue with sending messages
  400. */
  401. if (bnx2x_get_vf_id(bp, &vf_id))
  402. goto free_irq;
  403. /* Close the queues */
  404. for_each_queue(bp, i)
  405. bnx2x_vfpf_teardown_queue(bp, i);
  406. /* remove mac */
  407. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  408. /* clear mailbox and prep first tlv */
  409. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  410. req->vf_id = vf_id;
  411. /* add list termination tlv */
  412. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  413. sizeof(struct channel_list_end_tlv));
  414. /* output tlvs list */
  415. bnx2x_dp_tlv_list(bp, req);
  416. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  417. if (rc)
  418. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  419. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  420. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  421. resp->hdr.status);
  422. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  423. free_irq:
  424. /* Disable HW interrupts, NAPI */
  425. bnx2x_netif_stop(bp, 0);
  426. /* Delete all NAPI objects */
  427. bnx2x_del_all_napi(bp);
  428. /* Release IRQs */
  429. bnx2x_free_irq(bp);
  430. }
  431. static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  432. struct bnx2x_vf_queue *q)
  433. {
  434. u8 cl_id = vfq_cl_id(vf, q);
  435. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  436. /* mac */
  437. bnx2x_init_mac_obj(bp, &q->mac_obj,
  438. cl_id, q->cid, func_id,
  439. bnx2x_vf_sp(bp, vf, mac_rdata),
  440. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  441. BNX2X_FILTER_MAC_PENDING,
  442. &vf->filter_state,
  443. BNX2X_OBJ_TYPE_RX_TX,
  444. &bp->macs_pool);
  445. /* vlan */
  446. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  447. cl_id, q->cid, func_id,
  448. bnx2x_vf_sp(bp, vf, vlan_rdata),
  449. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  450. BNX2X_FILTER_VLAN_PENDING,
  451. &vf->filter_state,
  452. BNX2X_OBJ_TYPE_RX_TX,
  453. &bp->vlans_pool);
  454. /* mcast */
  455. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  456. q->cid, func_id, func_id,
  457. bnx2x_vf_sp(bp, vf, mcast_rdata),
  458. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  459. BNX2X_FILTER_MCAST_PENDING,
  460. &vf->filter_state,
  461. BNX2X_OBJ_TYPE_RX_TX);
  462. /* rss */
  463. bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
  464. func_id, func_id,
  465. bnx2x_vf_sp(bp, vf, rss_rdata),
  466. bnx2x_vf_sp_map(bp, vf, rss_rdata),
  467. BNX2X_FILTER_RSS_CONF_PENDING,
  468. &vf->filter_state,
  469. BNX2X_OBJ_TYPE_RX_TX);
  470. vf->leading_rss = cl_id;
  471. q->is_leading = true;
  472. q->sp_initialized = true;
  473. }
  474. /* ask the pf to open a queue for the vf */
  475. int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  476. bool is_leading)
  477. {
  478. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  479. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  480. u8 fp_idx = fp->index;
  481. u16 tpa_agg_size = 0, flags = 0;
  482. int rc;
  483. /* clear mailbox and prep first tlv */
  484. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  485. /* select tpa mode to request */
  486. if (!fp->disable_tpa) {
  487. flags |= VFPF_QUEUE_FLG_TPA;
  488. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  489. if (fp->mode == TPA_MODE_GRO)
  490. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  491. tpa_agg_size = TPA_AGG_SIZE;
  492. }
  493. if (is_leading)
  494. flags |= VFPF_QUEUE_FLG_LEADING_RSS;
  495. /* calculate queue flags */
  496. flags |= VFPF_QUEUE_FLG_STATS;
  497. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  498. flags |= VFPF_QUEUE_FLG_VLAN;
  499. /* Common */
  500. req->vf_qid = fp_idx;
  501. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  502. /* Rx */
  503. req->rxq.rcq_addr = fp->rx_comp_mapping;
  504. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  505. req->rxq.rxq_addr = fp->rx_desc_mapping;
  506. req->rxq.sge_addr = fp->rx_sge_mapping;
  507. req->rxq.vf_sb = fp_idx;
  508. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  509. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  510. req->rxq.mtu = bp->dev->mtu;
  511. req->rxq.buf_sz = fp->rx_buf_size;
  512. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  513. req->rxq.tpa_agg_sz = tpa_agg_size;
  514. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  515. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  516. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  517. req->rxq.flags = flags;
  518. req->rxq.drop_flags = 0;
  519. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  520. req->rxq.stat_id = -1; /* No stats at the moment */
  521. /* Tx */
  522. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  523. req->txq.vf_sb = fp_idx;
  524. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  525. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  526. req->txq.flags = flags;
  527. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  528. /* add list termination tlv */
  529. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  530. sizeof(struct channel_list_end_tlv));
  531. /* output tlvs list */
  532. bnx2x_dp_tlv_list(bp, req);
  533. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  534. if (rc)
  535. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  536. fp_idx);
  537. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  538. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  539. fp_idx, resp->hdr.status);
  540. rc = -EINVAL;
  541. }
  542. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  543. return rc;
  544. }
  545. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  546. {
  547. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  548. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  549. int rc;
  550. /* clear mailbox and prep first tlv */
  551. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  552. sizeof(*req));
  553. req->vf_qid = qidx;
  554. /* add list termination tlv */
  555. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  556. sizeof(struct channel_list_end_tlv));
  557. /* output tlvs list */
  558. bnx2x_dp_tlv_list(bp, req);
  559. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  560. if (rc) {
  561. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  562. rc);
  563. goto out;
  564. }
  565. /* PF failed the transaction */
  566. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  567. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  568. resp->hdr.status);
  569. rc = -EINVAL;
  570. }
  571. out:
  572. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  573. return rc;
  574. }
  575. /* request pf to add a mac for the vf */
  576. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  577. {
  578. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  579. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  580. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  581. int rc = 0;
  582. /* clear mailbox and prep first tlv */
  583. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  584. sizeof(*req));
  585. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  586. req->vf_qid = vf_qid;
  587. req->n_mac_vlan_filters = 1;
  588. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  589. if (set)
  590. req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
  591. /* sample bulletin board for new mac */
  592. bnx2x_sample_bulletin(bp);
  593. /* copy mac from device to request */
  594. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  595. /* add list termination tlv */
  596. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  597. sizeof(struct channel_list_end_tlv));
  598. /* output tlvs list */
  599. bnx2x_dp_tlv_list(bp, req);
  600. /* send message to pf */
  601. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  602. if (rc) {
  603. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  604. goto out;
  605. }
  606. /* failure may mean PF was configured with a new mac for us */
  607. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  608. DP(BNX2X_MSG_IOV,
  609. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  610. /* copy mac from bulletin to device */
  611. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  612. /* check if bulletin board was updated */
  613. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  614. /* copy mac from device to request */
  615. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  616. ETH_ALEN);
  617. /* send message to pf */
  618. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  619. bp->vf2pf_mbox_mapping);
  620. } else {
  621. /* no new info in bulletin */
  622. break;
  623. }
  624. }
  625. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  626. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  627. rc = -EINVAL;
  628. }
  629. out:
  630. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  631. return rc;
  632. }
  633. /* request pf to config rss table for vf queues*/
  634. int bnx2x_vfpf_config_rss(struct bnx2x *bp,
  635. struct bnx2x_config_rss_params *params)
  636. {
  637. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  638. struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
  639. int rc = 0;
  640. /* clear mailbox and prep first tlv */
  641. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
  642. sizeof(*req));
  643. /* add list termination tlv */
  644. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  645. sizeof(struct channel_list_end_tlv));
  646. memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  647. memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
  648. req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
  649. req->rss_key_size = T_ETH_RSS_KEY;
  650. req->rss_result_mask = params->rss_result_mask;
  651. /* flags handled individually for backward/forward compatability */
  652. if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
  653. req->rss_flags |= VFPF_RSS_MODE_DISABLED;
  654. if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
  655. req->rss_flags |= VFPF_RSS_MODE_REGULAR;
  656. if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
  657. req->rss_flags |= VFPF_RSS_SET_SRCH;
  658. if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
  659. req->rss_flags |= VFPF_RSS_IPV4;
  660. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
  661. req->rss_flags |= VFPF_RSS_IPV4_TCP;
  662. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
  663. req->rss_flags |= VFPF_RSS_IPV4_UDP;
  664. if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
  665. req->rss_flags |= VFPF_RSS_IPV6;
  666. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
  667. req->rss_flags |= VFPF_RSS_IPV6_TCP;
  668. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
  669. req->rss_flags |= VFPF_RSS_IPV6_UDP;
  670. DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
  671. /* output tlvs list */
  672. bnx2x_dp_tlv_list(bp, req);
  673. /* send message to pf */
  674. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  675. if (rc) {
  676. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  677. goto out;
  678. }
  679. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  680. /* Since older drivers don't support this feature (and VF has
  681. * no way of knowing other than failing this), don't propagate
  682. * an error in this case.
  683. */
  684. DP(BNX2X_MSG_IOV,
  685. "Failed to send rss message to PF over VF-PF channel [%d]\n",
  686. resp->hdr.status);
  687. }
  688. out:
  689. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  690. return rc;
  691. }
  692. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  693. {
  694. struct bnx2x *bp = netdev_priv(dev);
  695. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  696. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  697. int rc, i = 0;
  698. struct netdev_hw_addr *ha;
  699. if (bp->state != BNX2X_STATE_OPEN) {
  700. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  701. return -EINVAL;
  702. }
  703. /* clear mailbox and prep first tlv */
  704. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  705. sizeof(*req));
  706. /* Get Rx mode requested */
  707. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  708. netdev_for_each_mc_addr(ha, dev) {
  709. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  710. bnx2x_mc_addr(ha));
  711. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  712. i++;
  713. }
  714. /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
  715. * addresses tops
  716. */
  717. if (i >= PFVF_MAX_MULTICAST_PER_VF) {
  718. DP(NETIF_MSG_IFUP,
  719. "VF supports not more than %d multicast MAC addresses\n",
  720. PFVF_MAX_MULTICAST_PER_VF);
  721. return -EINVAL;
  722. }
  723. req->n_multicast = i;
  724. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  725. req->vf_qid = 0;
  726. /* add list termination tlv */
  727. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  728. sizeof(struct channel_list_end_tlv));
  729. /* output tlvs list */
  730. bnx2x_dp_tlv_list(bp, req);
  731. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  732. if (rc) {
  733. BNX2X_ERR("Sending a message failed: %d\n", rc);
  734. goto out;
  735. }
  736. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  737. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  738. resp->hdr.status);
  739. rc = -EINVAL;
  740. }
  741. out:
  742. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  743. return 0;
  744. }
  745. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  746. {
  747. int mode = bp->rx_mode;
  748. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  749. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  750. int rc;
  751. /* clear mailbox and prep first tlv */
  752. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  753. sizeof(*req));
  754. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  755. /* Ignore everything accept MODE_NONE */
  756. if (mode == BNX2X_RX_MODE_NONE) {
  757. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  758. } else {
  759. /* Current PF driver will not look at the specific flags,
  760. * but they are required when working with older drivers on hv.
  761. */
  762. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  763. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  764. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  765. }
  766. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  767. req->vf_qid = 0;
  768. /* add list termination tlv */
  769. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  770. sizeof(struct channel_list_end_tlv));
  771. /* output tlvs list */
  772. bnx2x_dp_tlv_list(bp, req);
  773. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  774. if (rc)
  775. BNX2X_ERR("Sending a message failed: %d\n", rc);
  776. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  777. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  778. rc = -EINVAL;
  779. }
  780. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  781. return rc;
  782. }
  783. /* General service functions */
  784. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  785. {
  786. u32 addr = BAR_CSTRORM_INTMEM +
  787. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  788. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  789. }
  790. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  791. {
  792. u32 addr = BAR_CSTRORM_INTMEM +
  793. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  794. REG_WR8(bp, addr, 1);
  795. }
  796. /* enable vf_pf mailbox (aka vf-pf-channel) */
  797. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  798. {
  799. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  800. /* enable the mailbox in the FW */
  801. storm_memset_vf_mbx_ack(bp, abs_vfid);
  802. storm_memset_vf_mbx_valid(bp, abs_vfid);
  803. /* enable the VF access to the mailbox */
  804. bnx2x_vf_enable_access(bp, abs_vfid);
  805. }
  806. /* this works only on !E1h */
  807. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  808. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  809. u32 vf_addr_lo, u32 len32)
  810. {
  811. struct dmae_command dmae;
  812. if (CHIP_IS_E1x(bp)) {
  813. BNX2X_ERR("Chip revision does not support VFs\n");
  814. return DMAE_NOT_RDY;
  815. }
  816. if (!bp->dmae_ready) {
  817. BNX2X_ERR("DMAE is not ready, can not copy\n");
  818. return DMAE_NOT_RDY;
  819. }
  820. /* set opcode and fixed command fields */
  821. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  822. if (from_vf) {
  823. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  824. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  825. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  826. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  827. dmae.src_addr_lo = vf_addr_lo;
  828. dmae.src_addr_hi = vf_addr_hi;
  829. dmae.dst_addr_lo = U64_LO(pf_addr);
  830. dmae.dst_addr_hi = U64_HI(pf_addr);
  831. } else {
  832. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  833. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  834. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  835. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  836. dmae.src_addr_lo = U64_LO(pf_addr);
  837. dmae.src_addr_hi = U64_HI(pf_addr);
  838. dmae.dst_addr_lo = vf_addr_lo;
  839. dmae.dst_addr_hi = vf_addr_hi;
  840. }
  841. dmae.len = len32;
  842. /* issue the command and wait for completion */
  843. return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
  844. }
  845. static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
  846. struct bnx2x_virtf *vf)
  847. {
  848. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  849. u16 length, type;
  850. /* prepare response */
  851. type = mbx->first_tlv.tl.type;
  852. length = type == CHANNEL_TLV_ACQUIRE ?
  853. sizeof(struct pfvf_acquire_resp_tlv) :
  854. sizeof(struct pfvf_general_resp_tlv);
  855. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
  856. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  857. sizeof(struct channel_list_end_tlv));
  858. }
  859. static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
  860. struct bnx2x_virtf *vf,
  861. int vf_rc)
  862. {
  863. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  864. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  865. dma_addr_t pf_addr;
  866. u64 vf_addr;
  867. int rc;
  868. bnx2x_dp_tlv_list(bp, resp);
  869. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  870. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  871. resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
  872. /* send response */
  873. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  874. mbx->first_tlv.resp_msg_offset;
  875. pf_addr = mbx->msg_mapping +
  876. offsetof(struct bnx2x_vf_mbx_msg, resp);
  877. /* Copy the response buffer. The first u64 is written afterwards, as
  878. * the vf is sensitive to the header being written
  879. */
  880. vf_addr += sizeof(u64);
  881. pf_addr += sizeof(u64);
  882. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  883. U64_HI(vf_addr),
  884. U64_LO(vf_addr),
  885. (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
  886. if (rc) {
  887. BNX2X_ERR("Failed to copy response body to VF %d\n",
  888. vf->abs_vfid);
  889. goto mbx_error;
  890. }
  891. vf_addr -= sizeof(u64);
  892. pf_addr -= sizeof(u64);
  893. /* ack the FW */
  894. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  895. mmiowb();
  896. /* copy the response header including status-done field,
  897. * must be last dmae, must be after FW is acked
  898. */
  899. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  900. U64_HI(vf_addr),
  901. U64_LO(vf_addr),
  902. sizeof(u64)/4);
  903. /* unlock channel mutex */
  904. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  905. if (rc) {
  906. BNX2X_ERR("Failed to copy response status to VF %d\n",
  907. vf->abs_vfid);
  908. goto mbx_error;
  909. }
  910. return;
  911. mbx_error:
  912. bnx2x_vf_release(bp, vf);
  913. }
  914. static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
  915. struct bnx2x_virtf *vf,
  916. int rc)
  917. {
  918. bnx2x_vf_mbx_resp_single_tlv(bp, vf);
  919. bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
  920. }
  921. static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
  922. struct bnx2x_virtf *vf,
  923. void *buffer,
  924. u16 *offset)
  925. {
  926. struct vfpf_port_phys_id_resp_tlv *port_id;
  927. if (!(bp->flags & HAS_PHYS_PORT_ID))
  928. return;
  929. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
  930. sizeof(struct vfpf_port_phys_id_resp_tlv));
  931. port_id = (struct vfpf_port_phys_id_resp_tlv *)
  932. (((u8 *)buffer) + *offset);
  933. memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
  934. /* Offset should continue representing the offset to the tail
  935. * of TLV data (outside this function scope)
  936. */
  937. *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
  938. }
  939. static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
  940. struct bnx2x_virtf *vf,
  941. void *buffer,
  942. u16 *offset)
  943. {
  944. struct vfpf_fp_hsi_resp_tlv *fp_hsi;
  945. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
  946. sizeof(struct vfpf_fp_hsi_resp_tlv));
  947. fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
  948. (((u8 *)buffer) + *offset);
  949. fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
  950. /* Offset should continue representing the offset to the tail
  951. * of TLV data (outside this function scope)
  952. */
  953. *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
  954. }
  955. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  956. struct bnx2x_vf_mbx *mbx, int vfop_status)
  957. {
  958. int i;
  959. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  960. struct pf_vf_resc *resc = &resp->resc;
  961. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  962. u16 length;
  963. memset(resp, 0, sizeof(*resp));
  964. /* fill in pfdev info */
  965. resp->pfdev_info.chip_num = bp->common.chip_id;
  966. resp->pfdev_info.db_size = bp->db_size;
  967. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  968. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  969. PFVF_CAP_TPA |
  970. PFVF_CAP_TPA_UPDATE);
  971. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  972. sizeof(resp->pfdev_info.fw_ver));
  973. if (status == PFVF_STATUS_NO_RESOURCE ||
  974. status == PFVF_STATUS_SUCCESS) {
  975. /* set resources numbers, if status equals NO_RESOURCE these
  976. * are max possible numbers
  977. */
  978. resc->num_rxqs = vf_rxq_count(vf) ? :
  979. bnx2x_vf_max_queue_cnt(bp, vf);
  980. resc->num_txqs = vf_txq_count(vf) ? :
  981. bnx2x_vf_max_queue_cnt(bp, vf);
  982. resc->num_sbs = vf_sb_count(vf);
  983. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  984. resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
  985. resc->num_mc_filters = 0;
  986. if (status == PFVF_STATUS_SUCCESS) {
  987. /* fill in the allocated resources */
  988. struct pf_vf_bulletin_content *bulletin =
  989. BP_VF_BULLETIN(bp, vf->index);
  990. for_each_vfq(vf, i)
  991. resc->hw_qid[i] =
  992. vfq_qzone_id(vf, vfq_get(vf, i));
  993. for_each_vf_sb(vf, i) {
  994. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  995. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  996. }
  997. /* if a mac has been set for this vf, supply it */
  998. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  999. memcpy(resc->current_mac_addr, bulletin->mac,
  1000. ETH_ALEN);
  1001. }
  1002. }
  1003. }
  1004. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  1005. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  1006. vf->abs_vfid,
  1007. resp->pfdev_info.chip_num,
  1008. resp->pfdev_info.db_size,
  1009. resp->pfdev_info.indices_per_sb,
  1010. resp->pfdev_info.pf_cap,
  1011. resc->num_rxqs,
  1012. resc->num_txqs,
  1013. resc->num_sbs,
  1014. resc->num_mac_filters,
  1015. resc->num_vlan_filters,
  1016. resc->num_mc_filters,
  1017. resp->pfdev_info.fw_ver);
  1018. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  1019. for (i = 0; i < vf_rxq_count(vf); i++)
  1020. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  1021. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  1022. for (i = 0; i < vf_sb_count(vf); i++)
  1023. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  1024. resc->hw_sbs[i].hw_sb_id,
  1025. resc->hw_sbs[i].sb_qid);
  1026. DP_CONT(BNX2X_MSG_IOV, "]\n");
  1027. /* prepare response */
  1028. length = sizeof(struct pfvf_acquire_resp_tlv);
  1029. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
  1030. /* Handle possible VF requests for physical port identifiers.
  1031. * 'length' should continue to indicate the offset of the first empty
  1032. * place in the buffer (i.e., where next TLV should be inserted)
  1033. */
  1034. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1035. CHANNEL_TLV_PHYS_PORT_ID))
  1036. bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
  1037. /* `New' vfs will want to know if fastpath HSI is supported, since
  1038. * if that's not the case they could print into system log the fact
  1039. * the driver version must be updated.
  1040. */
  1041. bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
  1042. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  1043. sizeof(struct channel_list_end_tlv));
  1044. /* send the response */
  1045. bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
  1046. }
  1047. static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
  1048. struct vfpf_acquire_tlv *acquire)
  1049. {
  1050. /* Windows driver does one of three things:
  1051. * 1. Old driver doesn't have bulletin board address set.
  1052. * 2. 'Middle' driver sends mc_num == 32.
  1053. * 3. New driver sets the OS field.
  1054. */
  1055. if (!acquire->bulletin_addr ||
  1056. acquire->resc_request.num_mc_filters == 32 ||
  1057. ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
  1058. VF_OS_WINDOWS))
  1059. return true;
  1060. return false;
  1061. }
  1062. static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
  1063. struct bnx2x_virtf *vf,
  1064. struct bnx2x_vf_mbx *mbx)
  1065. {
  1066. /* Linux drivers which correctly set the doorbell size also
  1067. * send a physical port request
  1068. */
  1069. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1070. CHANNEL_TLV_PHYS_PORT_ID))
  1071. return 0;
  1072. /* Issue does not exist in windows VMs */
  1073. if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
  1074. return 0;
  1075. return -EOPNOTSUPP;
  1076. }
  1077. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1078. struct bnx2x_vf_mbx *mbx)
  1079. {
  1080. int rc;
  1081. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  1082. /* log vfdef info */
  1083. DP(BNX2X_MSG_IOV,
  1084. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  1085. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  1086. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  1087. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  1088. acquire->resc_request.num_vlan_filters,
  1089. acquire->resc_request.num_mc_filters);
  1090. /* Prevent VFs with old drivers from loading, since they calculate
  1091. * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
  1092. * while being upgraded.
  1093. */
  1094. rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
  1095. if (rc) {
  1096. DP(BNX2X_MSG_IOV,
  1097. "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
  1098. vf->abs_vfid);
  1099. goto out;
  1100. }
  1101. /* Verify the VF fastpath HSI can be supported by the loaded FW.
  1102. * Linux vfs should be oblivious to changes between v0 and v2.
  1103. */
  1104. if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
  1105. vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
  1106. else
  1107. vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
  1108. ETH_FP_HSI_VER_2);
  1109. if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
  1110. DP(BNX2X_MSG_IOV,
  1111. "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
  1112. vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
  1113. ETH_FP_HSI_VERSION);
  1114. rc = -EINVAL;
  1115. goto out;
  1116. }
  1117. /* acquire the resources */
  1118. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  1119. /* store address of vf's bulletin board */
  1120. vf->bulletin_map = acquire->bulletin_addr;
  1121. if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
  1122. DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
  1123. vf->abs_vfid);
  1124. vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
  1125. } else {
  1126. vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
  1127. }
  1128. out:
  1129. /* response */
  1130. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  1131. }
  1132. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1133. struct bnx2x_vf_mbx *mbx)
  1134. {
  1135. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  1136. int rc;
  1137. /* record ghost addresses from vf message */
  1138. vf->spq_map = init->spq_addr;
  1139. vf->fw_stat_map = init->stats_addr;
  1140. vf->stats_stride = init->stats_stride;
  1141. rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  1142. /* set VF multiqueue statistics collection mode */
  1143. if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
  1144. vf->cfg_flags |= VF_CFG_STATS_COALESCE;
  1145. /* Update VF's view of link state */
  1146. if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
  1147. bnx2x_iov_link_update_vf(bp, vf->index);
  1148. /* response */
  1149. bnx2x_vf_mbx_resp(bp, vf, rc);
  1150. }
  1151. /* convert MBX queue-flags to standard SP queue-flags */
  1152. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  1153. unsigned long *sp_q_flags)
  1154. {
  1155. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  1156. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  1157. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  1158. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  1159. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  1160. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  1161. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  1162. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  1163. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  1164. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  1165. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  1166. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  1167. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  1168. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  1169. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  1170. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  1171. if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
  1172. __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
  1173. /* outer vlan removal is set according to PF's multi function mode */
  1174. if (IS_MF_SD(bp))
  1175. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  1176. }
  1177. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1178. struct bnx2x_vf_mbx *mbx)
  1179. {
  1180. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  1181. struct bnx2x_vf_queue_construct_params qctor;
  1182. int rc = 0;
  1183. /* verify vf_qid */
  1184. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  1185. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  1186. setup_q->vf_qid, vf_rxq_count(vf));
  1187. rc = -EINVAL;
  1188. goto response;
  1189. }
  1190. /* tx queues must be setup alongside rx queues thus if the rx queue
  1191. * is not marked as valid there's nothing to do.
  1192. */
  1193. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  1194. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  1195. unsigned long q_type = 0;
  1196. struct bnx2x_queue_init_params *init_p;
  1197. struct bnx2x_queue_setup_params *setup_p;
  1198. if (bnx2x_vfq_is_leading(q))
  1199. bnx2x_leading_vfq_init(bp, vf, q);
  1200. /* re-init the VF operation context */
  1201. memset(&qctor, 0 ,
  1202. sizeof(struct bnx2x_vf_queue_construct_params));
  1203. setup_p = &qctor.prep_qsetup;
  1204. init_p = &qctor.qstate.params.init;
  1205. /* activate immediately */
  1206. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  1207. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  1208. struct bnx2x_txq_setup_params *txq_params =
  1209. &setup_p->txq_params;
  1210. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1211. /* save sb resource index */
  1212. q->sb_idx = setup_q->txq.vf_sb;
  1213. /* tx init */
  1214. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  1215. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  1216. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1217. &init_p->tx.flags);
  1218. /* tx setup - flags */
  1219. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1220. &setup_p->flags);
  1221. /* tx setup - general, nothing */
  1222. /* tx setup - tx */
  1223. txq_params->dscr_map = setup_q->txq.txq_addr;
  1224. txq_params->sb_cq_index = setup_q->txq.sb_index;
  1225. txq_params->traffic_type = setup_q->txq.traffic_type;
  1226. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  1227. q->index, q->sb_idx);
  1228. }
  1229. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  1230. struct bnx2x_rxq_setup_params *rxq_params =
  1231. &setup_p->rxq_params;
  1232. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1233. /* Note: there is no support for different SBs
  1234. * for TX and RX
  1235. */
  1236. q->sb_idx = setup_q->rxq.vf_sb;
  1237. /* rx init */
  1238. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  1239. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  1240. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1241. &init_p->rx.flags);
  1242. /* rx setup - flags */
  1243. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1244. &setup_p->flags);
  1245. /* rx setup - general */
  1246. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  1247. /* rx setup - rx */
  1248. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  1249. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  1250. rxq_params->sge_map = setup_q->rxq.sge_addr;
  1251. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  1252. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  1253. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  1254. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  1255. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  1256. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  1257. rxq_params->cache_line_log =
  1258. setup_q->rxq.cache_line_log;
  1259. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  1260. /* rx setup - multicast engine */
  1261. if (bnx2x_vfq_is_leading(q)) {
  1262. u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
  1263. rxq_params->mcast_engine_id = mcast_id;
  1264. __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
  1265. }
  1266. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  1267. q->index, q->sb_idx);
  1268. }
  1269. /* complete the preparations */
  1270. bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
  1271. rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
  1272. if (rc)
  1273. goto response;
  1274. }
  1275. response:
  1276. bnx2x_vf_mbx_resp(bp, vf, rc);
  1277. }
  1278. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  1279. struct bnx2x_virtf *vf,
  1280. struct vfpf_set_q_filters_tlv *tlv,
  1281. struct bnx2x_vf_mac_vlan_filters **pfl,
  1282. u32 type_flag)
  1283. {
  1284. int i, j;
  1285. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1286. size_t fsz;
  1287. fsz = tlv->n_mac_vlan_filters *
  1288. sizeof(struct bnx2x_vf_mac_vlan_filter) +
  1289. sizeof(struct bnx2x_vf_mac_vlan_filters);
  1290. fl = kzalloc(fsz, GFP_KERNEL);
  1291. if (!fl)
  1292. return -ENOMEM;
  1293. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1294. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1295. if ((msg_filter->flags & type_flag) != type_flag)
  1296. continue;
  1297. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  1298. fl->filters[j].mac = msg_filter->mac;
  1299. fl->filters[j].type = BNX2X_VF_FILTER_MAC;
  1300. } else {
  1301. fl->filters[j].vid = msg_filter->vlan_tag;
  1302. fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
  1303. }
  1304. fl->filters[j].add =
  1305. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  1306. true : false;
  1307. fl->count++;
  1308. }
  1309. if (!fl->count)
  1310. kfree(fl);
  1311. else
  1312. *pfl = fl;
  1313. return 0;
  1314. }
  1315. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1316. struct vfpf_q_mac_vlan_filter *filter)
  1317. {
  1318. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1319. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1320. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1321. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1322. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1323. DP_CONT(msglvl, "\n");
  1324. }
  1325. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1326. struct vfpf_set_q_filters_tlv *filters)
  1327. {
  1328. int i;
  1329. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1330. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1331. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1332. &filters->filters[i]);
  1333. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1334. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1335. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1336. for (i = 0; i < filters->n_multicast; i++)
  1337. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1338. }
  1339. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1340. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1341. static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1342. {
  1343. int rc = 0;
  1344. struct vfpf_set_q_filters_tlv *msg =
  1345. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1346. /* check for any mac/vlan changes */
  1347. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1348. /* build mac list */
  1349. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1350. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1351. VFPF_MAC_FILTER);
  1352. if (rc)
  1353. goto op_err;
  1354. if (fl) {
  1355. /* set mac list */
  1356. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1357. msg->vf_qid,
  1358. false);
  1359. if (rc)
  1360. goto op_err;
  1361. }
  1362. /* build vlan list */
  1363. fl = NULL;
  1364. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1365. VFPF_VLAN_FILTER);
  1366. if (rc)
  1367. goto op_err;
  1368. if (fl) {
  1369. /* set vlan list */
  1370. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1371. msg->vf_qid,
  1372. false);
  1373. if (rc)
  1374. goto op_err;
  1375. }
  1376. }
  1377. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1378. unsigned long accept = 0;
  1379. struct pf_vf_bulletin_content *bulletin =
  1380. BP_VF_BULLETIN(bp, vf->index);
  1381. /* Ignore VF requested mode; instead set a regular mode */
  1382. if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
  1383. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1384. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1385. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1386. }
  1387. /* A packet arriving the vf's mac should be accepted
  1388. * with any vlan, unless a vlan has already been
  1389. * configured.
  1390. */
  1391. if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
  1392. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1393. /* set rx-mode */
  1394. rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
  1395. if (rc)
  1396. goto op_err;
  1397. }
  1398. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1399. /* set mcasts */
  1400. rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
  1401. msg->n_multicast, false);
  1402. if (rc)
  1403. goto op_err;
  1404. }
  1405. op_err:
  1406. if (rc)
  1407. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1408. vf->abs_vfid, msg->vf_qid, rc);
  1409. return rc;
  1410. }
  1411. static int bnx2x_filters_validate_mac(struct bnx2x *bp,
  1412. struct bnx2x_virtf *vf,
  1413. struct vfpf_set_q_filters_tlv *filters)
  1414. {
  1415. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1416. int rc = 0;
  1417. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1418. * accept mac configurations of that mac. Why accept them at all?
  1419. * because PF may have been unable to configure the mac at the time
  1420. * since queue was not set up.
  1421. */
  1422. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1423. /* once a mac was set by ndo can only accept a single mac... */
  1424. if (filters->n_mac_vlan_filters > 1) {
  1425. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
  1426. vf->abs_vfid);
  1427. rc = -EPERM;
  1428. goto response;
  1429. }
  1430. /* ...and only the mac set by the ndo */
  1431. if (filters->n_mac_vlan_filters == 1 &&
  1432. !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
  1433. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1434. vf->abs_vfid);
  1435. rc = -EPERM;
  1436. goto response;
  1437. }
  1438. }
  1439. response:
  1440. return rc;
  1441. }
  1442. static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
  1443. struct bnx2x_virtf *vf,
  1444. struct vfpf_set_q_filters_tlv *filters)
  1445. {
  1446. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1447. int rc = 0;
  1448. /* if vlan was set by hypervisor we don't allow guest to config vlan */
  1449. if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
  1450. int i;
  1451. /* search for vlan filters */
  1452. for (i = 0; i < filters->n_mac_vlan_filters; i++) {
  1453. if (filters->filters[i].flags &
  1454. VFPF_Q_FILTER_VLAN_TAG_VALID) {
  1455. BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
  1456. vf->abs_vfid);
  1457. rc = -EPERM;
  1458. goto response;
  1459. }
  1460. }
  1461. }
  1462. /* verify vf_qid */
  1463. if (filters->vf_qid > vf_rxq_count(vf)) {
  1464. rc = -EPERM;
  1465. goto response;
  1466. }
  1467. response:
  1468. return rc;
  1469. }
  1470. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1471. struct bnx2x_virtf *vf,
  1472. struct bnx2x_vf_mbx *mbx)
  1473. {
  1474. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1475. int rc;
  1476. rc = bnx2x_filters_validate_mac(bp, vf, filters);
  1477. if (rc)
  1478. goto response;
  1479. rc = bnx2x_filters_validate_vlan(bp, vf, filters);
  1480. if (rc)
  1481. goto response;
  1482. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1483. vf->abs_vfid,
  1484. filters->vf_qid);
  1485. /* print q_filter message */
  1486. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1487. rc = bnx2x_vf_mbx_qfilters(bp, vf);
  1488. response:
  1489. bnx2x_vf_mbx_resp(bp, vf, rc);
  1490. }
  1491. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1492. struct bnx2x_vf_mbx *mbx)
  1493. {
  1494. int qid = mbx->msg->req.q_op.vf_qid;
  1495. int rc;
  1496. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1497. vf->abs_vfid, qid);
  1498. rc = bnx2x_vf_queue_teardown(bp, vf, qid);
  1499. bnx2x_vf_mbx_resp(bp, vf, rc);
  1500. }
  1501. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1502. struct bnx2x_vf_mbx *mbx)
  1503. {
  1504. int rc;
  1505. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1506. rc = bnx2x_vf_close(bp, vf);
  1507. bnx2x_vf_mbx_resp(bp, vf, rc);
  1508. }
  1509. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1510. struct bnx2x_vf_mbx *mbx)
  1511. {
  1512. int rc;
  1513. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1514. rc = bnx2x_vf_free(bp, vf);
  1515. bnx2x_vf_mbx_resp(bp, vf, rc);
  1516. }
  1517. static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1518. struct bnx2x_vf_mbx *mbx)
  1519. {
  1520. struct bnx2x_config_rss_params rss;
  1521. struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
  1522. int rc = 0;
  1523. if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
  1524. rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
  1525. BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
  1526. vf->index);
  1527. rc = -EINVAL;
  1528. goto mbx_resp;
  1529. }
  1530. memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
  1531. /* set vfop params according to rss tlv */
  1532. memcpy(rss.ind_table, rss_tlv->ind_table,
  1533. T_ETH_INDIRECTION_TABLE_SIZE);
  1534. memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
  1535. rss.rss_obj = &vf->rss_conf_obj;
  1536. rss.rss_result_mask = rss_tlv->rss_result_mask;
  1537. /* flags handled individually for backward/forward compatability */
  1538. rss.rss_flags = 0;
  1539. rss.ramrod_flags = 0;
  1540. if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  1541. __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
  1542. if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
  1543. __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
  1544. if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
  1545. __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
  1546. if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
  1547. __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
  1548. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
  1549. __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
  1550. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
  1551. __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
  1552. if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
  1553. __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
  1554. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
  1555. __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
  1556. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
  1557. __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
  1558. if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
  1559. rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
  1560. (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
  1561. rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
  1562. BNX2X_ERR("about to hit a FW assert. aborting...\n");
  1563. rc = -EINVAL;
  1564. goto mbx_resp;
  1565. }
  1566. rc = bnx2x_vf_rss_update(bp, vf, &rss);
  1567. mbx_resp:
  1568. bnx2x_vf_mbx_resp(bp, vf, rc);
  1569. }
  1570. static int bnx2x_validate_tpa_params(struct bnx2x *bp,
  1571. struct vfpf_tpa_tlv *tpa_tlv)
  1572. {
  1573. int rc = 0;
  1574. if (tpa_tlv->tpa_client_info.max_sges_for_packet >
  1575. U_ETH_MAX_SGES_FOR_PACKET) {
  1576. rc = -EINVAL;
  1577. BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
  1578. tpa_tlv->tpa_client_info.max_sges_for_packet,
  1579. U_ETH_MAX_SGES_FOR_PACKET);
  1580. }
  1581. if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
  1582. rc = -EINVAL;
  1583. BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
  1584. tpa_tlv->tpa_client_info.max_tpa_queues,
  1585. MAX_AGG_QS(bp));
  1586. }
  1587. return rc;
  1588. }
  1589. static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1590. struct bnx2x_vf_mbx *mbx)
  1591. {
  1592. struct bnx2x_queue_update_tpa_params vf_op_params;
  1593. struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
  1594. int rc = 0;
  1595. memset(&vf_op_params, 0, sizeof(vf_op_params));
  1596. if (bnx2x_validate_tpa_params(bp, tpa_tlv))
  1597. goto mbx_resp;
  1598. vf_op_params.complete_on_both_clients =
  1599. tpa_tlv->tpa_client_info.complete_on_both_clients;
  1600. vf_op_params.dont_verify_thr =
  1601. tpa_tlv->tpa_client_info.dont_verify_thr;
  1602. vf_op_params.max_agg_sz =
  1603. tpa_tlv->tpa_client_info.max_agg_size;
  1604. vf_op_params.max_sges_pkt =
  1605. tpa_tlv->tpa_client_info.max_sges_for_packet;
  1606. vf_op_params.max_tpa_queues =
  1607. tpa_tlv->tpa_client_info.max_tpa_queues;
  1608. vf_op_params.sge_buff_sz =
  1609. tpa_tlv->tpa_client_info.sge_buff_size;
  1610. vf_op_params.sge_pause_thr_high =
  1611. tpa_tlv->tpa_client_info.sge_pause_thr_high;
  1612. vf_op_params.sge_pause_thr_low =
  1613. tpa_tlv->tpa_client_info.sge_pause_thr_low;
  1614. vf_op_params.tpa_mode =
  1615. tpa_tlv->tpa_client_info.tpa_mode;
  1616. vf_op_params.update_ipv4 =
  1617. tpa_tlv->tpa_client_info.update_ipv4;
  1618. vf_op_params.update_ipv6 =
  1619. tpa_tlv->tpa_client_info.update_ipv6;
  1620. rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
  1621. mbx_resp:
  1622. bnx2x_vf_mbx_resp(bp, vf, rc);
  1623. }
  1624. /* dispatch request */
  1625. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1626. struct bnx2x_vf_mbx *mbx)
  1627. {
  1628. int i;
  1629. /* check if tlv type is known */
  1630. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1631. /* Lock the per vf op mutex and note the locker's identity.
  1632. * The unlock will take place in mbx response.
  1633. */
  1634. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1635. /* switch on the opcode */
  1636. switch (mbx->first_tlv.tl.type) {
  1637. case CHANNEL_TLV_ACQUIRE:
  1638. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1639. return;
  1640. case CHANNEL_TLV_INIT:
  1641. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1642. return;
  1643. case CHANNEL_TLV_SETUP_Q:
  1644. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1645. return;
  1646. case CHANNEL_TLV_SET_Q_FILTERS:
  1647. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1648. return;
  1649. case CHANNEL_TLV_TEARDOWN_Q:
  1650. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1651. return;
  1652. case CHANNEL_TLV_CLOSE:
  1653. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1654. return;
  1655. case CHANNEL_TLV_RELEASE:
  1656. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1657. return;
  1658. case CHANNEL_TLV_UPDATE_RSS:
  1659. bnx2x_vf_mbx_update_rss(bp, vf, mbx);
  1660. return;
  1661. case CHANNEL_TLV_UPDATE_TPA:
  1662. bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
  1663. return;
  1664. }
  1665. } else {
  1666. /* unknown TLV - this may belong to a VF driver from the future
  1667. * - a version written after this PF driver was written, which
  1668. * supports features unknown as of yet. Too bad since we don't
  1669. * support them. Or this may be because someone wrote a crappy
  1670. * VF driver and is sending garbage over the channel.
  1671. */
  1672. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1673. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1674. vf->state);
  1675. for (i = 0; i < 20; i++)
  1676. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1677. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1678. }
  1679. /* can we respond to VF (do we have an address for it?) */
  1680. if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
  1681. /* notify the VF that we do not support this request */
  1682. bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
  1683. } else {
  1684. /* can't send a response since this VF is unknown to us
  1685. * just ack the FW to release the mailbox and unlock
  1686. * the channel.
  1687. */
  1688. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1689. /* Firmware ack should be written before unlocking channel */
  1690. mmiowb();
  1691. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1692. }
  1693. }
  1694. void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
  1695. struct vf_pf_event_data *vfpf_event)
  1696. {
  1697. u8 vf_idx;
  1698. DP(BNX2X_MSG_IOV,
  1699. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1700. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1701. /* Sanity checks consider removing later */
  1702. /* check if the vf_id is valid */
  1703. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1704. BNX2X_NR_VIRTFN(bp)) {
  1705. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1706. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1707. return;
  1708. }
  1709. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1710. /* Update VFDB with current message and schedule its handling */
  1711. mutex_lock(&BP_VFDB(bp)->event_mutex);
  1712. BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
  1713. BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
  1714. BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
  1715. mutex_unlock(&BP_VFDB(bp)->event_mutex);
  1716. bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
  1717. }
  1718. /* handle new vf-pf messages */
  1719. void bnx2x_vf_mbx(struct bnx2x *bp)
  1720. {
  1721. struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
  1722. u64 events;
  1723. u8 vf_idx;
  1724. int rc;
  1725. if (!vfdb)
  1726. return;
  1727. mutex_lock(&vfdb->event_mutex);
  1728. events = vfdb->event_occur;
  1729. vfdb->event_occur = 0;
  1730. mutex_unlock(&vfdb->event_mutex);
  1731. for_each_vf(bp, vf_idx) {
  1732. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
  1733. struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
  1734. /* Handle VFs which have pending events */
  1735. if (!(events & (1ULL << vf_idx)))
  1736. continue;
  1737. DP(BNX2X_MSG_IOV,
  1738. "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
  1739. vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
  1740. mbx->first_tlv.resp_msg_offset);
  1741. /* dmae to get the VF request */
  1742. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
  1743. vf->abs_vfid, mbx->vf_addr_hi,
  1744. mbx->vf_addr_lo,
  1745. sizeof(union vfpf_tlvs)/4);
  1746. if (rc) {
  1747. BNX2X_ERR("Failed to copy request VF %d\n",
  1748. vf->abs_vfid);
  1749. bnx2x_vf_release(bp, vf);
  1750. return;
  1751. }
  1752. /* process the VF message header */
  1753. mbx->first_tlv = mbx->msg->req.first_tlv;
  1754. /* Clean response buffer to refrain from falsely
  1755. * seeing chains.
  1756. */
  1757. memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
  1758. /* dispatch the request (will prepare the response) */
  1759. bnx2x_vf_mbx_request(bp, vf, mbx);
  1760. }
  1761. }
  1762. void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
  1763. bool support_long)
  1764. {
  1765. /* Older VFs contain a bug where they can't check CRC for bulletin
  1766. * boards of length greater than legacy size.
  1767. */
  1768. bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
  1769. BULLETIN_CONTENT_LEGACY_SIZE;
  1770. bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
  1771. }
  1772. /* propagate local bulletin board to vf */
  1773. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1774. {
  1775. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1776. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1777. vf * BULLETIN_CONTENT_SIZE;
  1778. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1779. int rc;
  1780. /* can only update vf after init took place */
  1781. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1782. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1783. return 0;
  1784. /* increment bulletin board version and compute crc */
  1785. bulletin->version++;
  1786. bnx2x_vf_bulletin_finalize(bulletin,
  1787. (bnx2x_vf(bp, vf, cfg_flags) &
  1788. VF_CFG_EXT_BULLETIN) ? true : false);
  1789. /* propagate bulletin board via dmae to vm memory */
  1790. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1791. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1792. U64_LO(vf_addr), bulletin->length / 4);
  1793. return rc;
  1794. }