bnx2x_vfpf.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306
  1. /* bnx2x_vfpf.c: QLogic Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. * Copyright 2014 QLogic Corporation
  5. * All rights reserved
  6. *
  7. * Unless you and QLogic execute a separate written software license
  8. * agreement governing use of this software, this software is licensed to you
  9. * under the terms of the GNU General Public License version 2, available
  10. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  11. *
  12. * Notwithstanding the above, under no circumstances may you combine this
  13. * software in any way with any other QLogic software provided under a
  14. * license other than the GPL, without QLogic's express prior written
  15. * consent.
  16. *
  17. * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  18. * Written by: Shmulik Ravid
  19. * Ariel Elior <ariel.elior@qlogic.com>
  20. */
  21. #include "bnx2x.h"
  22. #include "bnx2x_cmn.h"
  23. #include <linux/crc32.h>
  24. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
  25. /* place a given tlv on the tlv buffer at a given offset */
  26. static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
  27. u16 offset, u16 type, u16 length)
  28. {
  29. struct channel_tlv *tl =
  30. (struct channel_tlv *)(tlvs_list + offset);
  31. tl->type = type;
  32. tl->length = length;
  33. }
  34. /* Clear the mailbox and init the header of the first tlv */
  35. static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  36. u16 type, u16 length)
  37. {
  38. mutex_lock(&bp->vf2pf_mutex);
  39. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  40. type);
  41. /* Clear mailbox */
  42. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  43. /* init type and length */
  44. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  45. /* init first tlv header */
  46. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  47. }
  48. /* releases the mailbox */
  49. static void bnx2x_vfpf_finalize(struct bnx2x *bp,
  50. struct vfpf_first_tlv *first_tlv)
  51. {
  52. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  53. first_tlv->tl.type);
  54. mutex_unlock(&bp->vf2pf_mutex);
  55. }
  56. /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
  57. static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
  58. enum channel_tlvs req_tlv)
  59. {
  60. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  61. do {
  62. if (tlv->type == req_tlv)
  63. return tlv;
  64. if (!tlv->length) {
  65. BNX2X_ERR("Found TLV with length 0\n");
  66. return NULL;
  67. }
  68. tlvs_list += tlv->length;
  69. tlv = (struct channel_tlv *)tlvs_list;
  70. } while (tlv->type != CHANNEL_TLV_LIST_END);
  71. DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
  72. return NULL;
  73. }
  74. /* list the types and lengths of the tlvs on the buffer */
  75. static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  76. {
  77. int i = 1;
  78. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  79. while (tlv->type != CHANNEL_TLV_LIST_END) {
  80. /* output tlv */
  81. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  82. tlv->type, tlv->length);
  83. /* advance to next tlv */
  84. tlvs_list += tlv->length;
  85. /* cast general tlv list pointer to channel tlv header*/
  86. tlv = (struct channel_tlv *)tlvs_list;
  87. i++;
  88. /* break condition for this loop */
  89. if (i > MAX_TLVS_IN_LIST) {
  90. WARN(true, "corrupt tlvs");
  91. return;
  92. }
  93. }
  94. /* output last tlv */
  95. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  96. tlv->type, tlv->length);
  97. }
  98. /* test whether we support a tlv type */
  99. bool bnx2x_tlv_supported(u16 tlvtype)
  100. {
  101. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  102. }
  103. static inline int bnx2x_pfvf_status_codes(int rc)
  104. {
  105. switch (rc) {
  106. case 0:
  107. return PFVF_STATUS_SUCCESS;
  108. case -ENOMEM:
  109. return PFVF_STATUS_NO_RESOURCE;
  110. default:
  111. return PFVF_STATUS_FAILURE;
  112. }
  113. }
  114. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  115. {
  116. struct cstorm_vf_zone_data __iomem *zone_data =
  117. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  118. int tout = 100, interval = 100; /* wait for 10 seconds */
  119. if (*done) {
  120. BNX2X_ERR("done was non zero before message to pf was sent\n");
  121. WARN_ON(true);
  122. return -EINVAL;
  123. }
  124. /* if PF indicated channel is down avoid sending message. Return success
  125. * so calling flow can continue
  126. */
  127. bnx2x_sample_bulletin(bp);
  128. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  129. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  130. *done = PFVF_STATUS_SUCCESS;
  131. return -EINVAL;
  132. }
  133. /* Write message address */
  134. writel(U64_LO(msg_mapping),
  135. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  136. writel(U64_HI(msg_mapping),
  137. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  138. /* make sure the address is written before FW accesses it */
  139. wmb();
  140. /* Trigger the PF FW */
  141. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  142. /* Wait for PF to complete */
  143. while ((tout >= 0) && (!*done)) {
  144. msleep(interval);
  145. tout -= 1;
  146. /* progress indicator - HV can take its own sweet time in
  147. * answering VFs...
  148. */
  149. DP_CONT(BNX2X_MSG_IOV, ".");
  150. }
  151. if (!*done) {
  152. BNX2X_ERR("PF response has timed out\n");
  153. return -EAGAIN;
  154. }
  155. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  156. return 0;
  157. }
  158. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  159. {
  160. u32 me_reg;
  161. int tout = 10, interval = 100; /* Wait for 1 sec */
  162. do {
  163. /* pxp traps vf read of doorbells and returns me reg value */
  164. me_reg = readl(bp->doorbells);
  165. if (GOOD_ME_REG(me_reg))
  166. break;
  167. msleep(interval);
  168. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  169. me_reg);
  170. } while (tout-- > 0);
  171. if (!GOOD_ME_REG(me_reg)) {
  172. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  173. return -EINVAL;
  174. }
  175. DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  176. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  177. return 0;
  178. }
  179. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  180. {
  181. int rc = 0, attempts = 0;
  182. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  183. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  184. struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
  185. struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
  186. u32 vf_id;
  187. bool resources_acquired = false;
  188. /* clear mailbox and prep first tlv */
  189. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  190. if (bnx2x_get_vf_id(bp, &vf_id)) {
  191. rc = -EAGAIN;
  192. goto out;
  193. }
  194. req->vfdev_info.vf_id = vf_id;
  195. req->vfdev_info.vf_os = 0;
  196. req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
  197. req->resc_request.num_rxqs = rx_count;
  198. req->resc_request.num_txqs = tx_count;
  199. req->resc_request.num_sbs = bp->igu_sb_cnt;
  200. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  201. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  202. req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
  203. /* pf 2 vf bulletin board address */
  204. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  205. /* Request physical port identifier */
  206. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
  207. CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
  208. /* Bulletin support for bulletin board with length > legacy length */
  209. req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
  210. /* vlan filtering is supported */
  211. req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
  212. /* add list termination tlv */
  213. bnx2x_add_tlv(bp, req,
  214. req->first_tlv.tl.length + sizeof(struct channel_tlv),
  215. CHANNEL_TLV_LIST_END,
  216. sizeof(struct channel_list_end_tlv));
  217. /* output tlvs list */
  218. bnx2x_dp_tlv_list(bp, req);
  219. while (!resources_acquired) {
  220. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  221. /* send acquire request */
  222. rc = bnx2x_send_msg2pf(bp,
  223. &resp->hdr.status,
  224. bp->vf2pf_mbox_mapping);
  225. /* PF timeout */
  226. if (rc)
  227. goto out;
  228. /* copy acquire response from buffer to bp */
  229. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  230. attempts++;
  231. /* test whether the PF accepted our request. If not, humble
  232. * the request and try again.
  233. */
  234. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  235. DP(BNX2X_MSG_SP, "resources acquired\n");
  236. resources_acquired = true;
  237. } else if (bp->acquire_resp.hdr.status ==
  238. PFVF_STATUS_NO_RESOURCE &&
  239. attempts < VF_ACQUIRE_THRESH) {
  240. DP(BNX2X_MSG_SP,
  241. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  242. /* humble our request */
  243. req->resc_request.num_txqs =
  244. min(req->resc_request.num_txqs,
  245. bp->acquire_resp.resc.num_txqs);
  246. req->resc_request.num_rxqs =
  247. min(req->resc_request.num_rxqs,
  248. bp->acquire_resp.resc.num_rxqs);
  249. req->resc_request.num_sbs =
  250. min(req->resc_request.num_sbs,
  251. bp->acquire_resp.resc.num_sbs);
  252. req->resc_request.num_mac_filters =
  253. min(req->resc_request.num_mac_filters,
  254. bp->acquire_resp.resc.num_mac_filters);
  255. req->resc_request.num_vlan_filters =
  256. min(req->resc_request.num_vlan_filters,
  257. bp->acquire_resp.resc.num_vlan_filters);
  258. req->resc_request.num_mc_filters =
  259. min(req->resc_request.num_mc_filters,
  260. bp->acquire_resp.resc.num_mc_filters);
  261. /* Clear response buffer */
  262. memset(&bp->vf2pf_mbox->resp, 0,
  263. sizeof(union pfvf_tlvs));
  264. } else {
  265. /* Determine reason of PF failure of acquire process */
  266. fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
  267. CHANNEL_TLV_FP_HSI_SUPPORT);
  268. if (fp_hsi_resp && !fp_hsi_resp->is_supported)
  269. BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
  270. else
  271. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  272. bp->acquire_resp.hdr.status);
  273. rc = -EAGAIN;
  274. goto out;
  275. }
  276. }
  277. /* Retrieve physical port id (if possible) */
  278. phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
  279. bnx2x_search_tlv_list(bp, resp,
  280. CHANNEL_TLV_PHYS_PORT_ID);
  281. if (phys_port_resp) {
  282. memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
  283. bp->flags |= HAS_PHYS_PORT_ID;
  284. }
  285. /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
  286. * If that's the case, we need to make certain required FW was
  287. * supported by such a hypervisor [i.e., v0-v2].
  288. */
  289. fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
  290. CHANNEL_TLV_FP_HSI_SUPPORT);
  291. if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
  292. BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
  293. /* Since acquire succeeded on the PF side, we need to send a
  294. * release message in order to allow future probes.
  295. */
  296. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  297. bnx2x_vfpf_release(bp);
  298. rc = -EINVAL;
  299. goto out;
  300. }
  301. /* get HW info */
  302. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  303. bp->link_params.chip_id = bp->common.chip_id;
  304. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  305. bp->common.int_block = INT_BLOCK_IGU;
  306. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  307. bp->igu_dsb_id = -1;
  308. bp->mf_ov = 0;
  309. bp->mf_mode = 0;
  310. bp->common.flash_size = 0;
  311. bp->flags |=
  312. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  313. bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
  314. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  315. bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
  316. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  317. sizeof(bp->fw_ver));
  318. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  319. memcpy(bp->dev->dev_addr,
  320. bp->acquire_resp.resc.current_mac_addr,
  321. ETH_ALEN);
  322. out:
  323. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  324. return rc;
  325. }
  326. int bnx2x_vfpf_release(struct bnx2x *bp)
  327. {
  328. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  329. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  330. u32 rc, vf_id;
  331. /* clear mailbox and prep first tlv */
  332. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  333. if (bnx2x_get_vf_id(bp, &vf_id)) {
  334. rc = -EAGAIN;
  335. goto out;
  336. }
  337. req->vf_id = vf_id;
  338. /* add list termination tlv */
  339. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  340. sizeof(struct channel_list_end_tlv));
  341. /* output tlvs list */
  342. bnx2x_dp_tlv_list(bp, req);
  343. /* send release request */
  344. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  345. if (rc)
  346. /* PF timeout */
  347. goto out;
  348. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  349. /* PF released us */
  350. DP(BNX2X_MSG_SP, "vf released\n");
  351. } else {
  352. /* PF reports error */
  353. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  354. resp->hdr.status);
  355. rc = -EAGAIN;
  356. goto out;
  357. }
  358. out:
  359. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  360. return rc;
  361. }
  362. /* Tell PF about SB addresses */
  363. int bnx2x_vfpf_init(struct bnx2x *bp)
  364. {
  365. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  366. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  367. int rc, i;
  368. /* clear mailbox and prep first tlv */
  369. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  370. /* status blocks */
  371. for_each_eth_queue(bp, i)
  372. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  373. status_blk_mapping);
  374. /* statistics - requests only supports single queue for now */
  375. req->stats_addr = bp->fw_stats_data_mapping +
  376. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  377. req->stats_stride = sizeof(struct per_queue_stats);
  378. /* add list termination tlv */
  379. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  380. sizeof(struct channel_list_end_tlv));
  381. /* output tlvs list */
  382. bnx2x_dp_tlv_list(bp, req);
  383. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  384. if (rc)
  385. goto out;
  386. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  387. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  388. resp->hdr.status);
  389. rc = -EAGAIN;
  390. goto out;
  391. }
  392. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  393. out:
  394. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  395. return rc;
  396. }
  397. /* CLOSE VF - opposite to INIT_VF */
  398. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  399. {
  400. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  401. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  402. int i, rc;
  403. u32 vf_id;
  404. /* If we haven't got a valid VF id, there is no sense to
  405. * continue with sending messages
  406. */
  407. if (bnx2x_get_vf_id(bp, &vf_id))
  408. goto free_irq;
  409. /* Close the queues */
  410. for_each_queue(bp, i)
  411. bnx2x_vfpf_teardown_queue(bp, i);
  412. /* remove mac */
  413. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  414. /* clear mailbox and prep first tlv */
  415. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  416. req->vf_id = vf_id;
  417. /* add list termination tlv */
  418. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  419. sizeof(struct channel_list_end_tlv));
  420. /* output tlvs list */
  421. bnx2x_dp_tlv_list(bp, req);
  422. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  423. if (rc)
  424. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  425. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  426. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  427. resp->hdr.status);
  428. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  429. free_irq:
  430. /* Disable HW interrupts, NAPI */
  431. bnx2x_netif_stop(bp, 0);
  432. /* Delete all NAPI objects */
  433. bnx2x_del_all_napi(bp);
  434. /* Release IRQs */
  435. bnx2x_free_irq(bp);
  436. }
  437. static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  438. struct bnx2x_vf_queue *q)
  439. {
  440. u8 cl_id = vfq_cl_id(vf, q);
  441. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  442. /* mac */
  443. bnx2x_init_mac_obj(bp, &q->mac_obj,
  444. cl_id, q->cid, func_id,
  445. bnx2x_vf_sp(bp, vf, mac_rdata),
  446. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  447. BNX2X_FILTER_MAC_PENDING,
  448. &vf->filter_state,
  449. BNX2X_OBJ_TYPE_RX_TX,
  450. &vf->vf_macs_pool);
  451. /* vlan */
  452. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  453. cl_id, q->cid, func_id,
  454. bnx2x_vf_sp(bp, vf, vlan_rdata),
  455. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  456. BNX2X_FILTER_VLAN_PENDING,
  457. &vf->filter_state,
  458. BNX2X_OBJ_TYPE_RX_TX,
  459. &vf->vf_vlans_pool);
  460. /* vlan-mac */
  461. bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
  462. cl_id, q->cid, func_id,
  463. bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
  464. bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
  465. BNX2X_FILTER_VLAN_MAC_PENDING,
  466. &vf->filter_state,
  467. BNX2X_OBJ_TYPE_RX_TX,
  468. &vf->vf_macs_pool,
  469. &vf->vf_vlans_pool);
  470. /* mcast */
  471. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  472. q->cid, func_id, func_id,
  473. bnx2x_vf_sp(bp, vf, mcast_rdata),
  474. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  475. BNX2X_FILTER_MCAST_PENDING,
  476. &vf->filter_state,
  477. BNX2X_OBJ_TYPE_RX_TX);
  478. /* rss */
  479. bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
  480. func_id, func_id,
  481. bnx2x_vf_sp(bp, vf, rss_rdata),
  482. bnx2x_vf_sp_map(bp, vf, rss_rdata),
  483. BNX2X_FILTER_RSS_CONF_PENDING,
  484. &vf->filter_state,
  485. BNX2X_OBJ_TYPE_RX_TX);
  486. vf->leading_rss = cl_id;
  487. q->is_leading = true;
  488. q->sp_initialized = true;
  489. }
  490. /* ask the pf to open a queue for the vf */
  491. int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  492. bool is_leading)
  493. {
  494. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  495. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  496. u8 fp_idx = fp->index;
  497. u16 tpa_agg_size = 0, flags = 0;
  498. int rc;
  499. /* clear mailbox and prep first tlv */
  500. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  501. /* select tpa mode to request */
  502. if (fp->mode != TPA_MODE_DISABLED) {
  503. flags |= VFPF_QUEUE_FLG_TPA;
  504. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  505. if (fp->mode == TPA_MODE_GRO)
  506. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  507. tpa_agg_size = TPA_AGG_SIZE;
  508. }
  509. if (is_leading)
  510. flags |= VFPF_QUEUE_FLG_LEADING_RSS;
  511. /* calculate queue flags */
  512. flags |= VFPF_QUEUE_FLG_STATS;
  513. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  514. flags |= VFPF_QUEUE_FLG_VLAN;
  515. /* Common */
  516. req->vf_qid = fp_idx;
  517. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  518. /* Rx */
  519. req->rxq.rcq_addr = fp->rx_comp_mapping;
  520. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  521. req->rxq.rxq_addr = fp->rx_desc_mapping;
  522. req->rxq.sge_addr = fp->rx_sge_mapping;
  523. req->rxq.vf_sb = fp_idx;
  524. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  525. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  526. req->rxq.mtu = bp->dev->mtu;
  527. req->rxq.buf_sz = fp->rx_buf_size;
  528. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  529. req->rxq.tpa_agg_sz = tpa_agg_size;
  530. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  531. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  532. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  533. req->rxq.flags = flags;
  534. req->rxq.drop_flags = 0;
  535. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  536. req->rxq.stat_id = -1; /* No stats at the moment */
  537. /* Tx */
  538. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  539. req->txq.vf_sb = fp_idx;
  540. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  541. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  542. req->txq.flags = flags;
  543. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  544. /* add list termination tlv */
  545. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  546. sizeof(struct channel_list_end_tlv));
  547. /* output tlvs list */
  548. bnx2x_dp_tlv_list(bp, req);
  549. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  550. if (rc)
  551. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  552. fp_idx);
  553. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  554. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  555. fp_idx, resp->hdr.status);
  556. rc = -EINVAL;
  557. }
  558. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  559. return rc;
  560. }
  561. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  562. {
  563. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  564. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  565. int rc;
  566. /* clear mailbox and prep first tlv */
  567. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  568. sizeof(*req));
  569. req->vf_qid = qidx;
  570. /* add list termination tlv */
  571. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  572. sizeof(struct channel_list_end_tlv));
  573. /* output tlvs list */
  574. bnx2x_dp_tlv_list(bp, req);
  575. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  576. if (rc) {
  577. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  578. rc);
  579. goto out;
  580. }
  581. /* PF failed the transaction */
  582. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  583. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  584. resp->hdr.status);
  585. rc = -EINVAL;
  586. }
  587. out:
  588. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  589. return rc;
  590. }
  591. /* request pf to add a mac for the vf */
  592. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  593. {
  594. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  595. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  596. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  597. int rc = 0;
  598. /* clear mailbox and prep first tlv */
  599. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  600. sizeof(*req));
  601. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  602. req->vf_qid = vf_qid;
  603. req->n_mac_vlan_filters = 1;
  604. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  605. if (set)
  606. req->filters[0].flags |= VFPF_Q_FILTER_SET;
  607. /* sample bulletin board for new mac */
  608. bnx2x_sample_bulletin(bp);
  609. /* copy mac from device to request */
  610. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  611. /* add list termination tlv */
  612. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  613. sizeof(struct channel_list_end_tlv));
  614. /* output tlvs list */
  615. bnx2x_dp_tlv_list(bp, req);
  616. /* send message to pf */
  617. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  618. if (rc) {
  619. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  620. goto out;
  621. }
  622. /* failure may mean PF was configured with a new mac for us */
  623. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  624. DP(BNX2X_MSG_IOV,
  625. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  626. /* copy mac from bulletin to device */
  627. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  628. /* check if bulletin board was updated */
  629. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  630. /* copy mac from device to request */
  631. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  632. ETH_ALEN);
  633. /* send message to pf */
  634. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  635. bp->vf2pf_mbox_mapping);
  636. } else {
  637. /* no new info in bulletin */
  638. break;
  639. }
  640. }
  641. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  642. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  643. rc = -EINVAL;
  644. }
  645. out:
  646. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  647. return rc;
  648. }
  649. /* request pf to config rss table for vf queues*/
  650. int bnx2x_vfpf_config_rss(struct bnx2x *bp,
  651. struct bnx2x_config_rss_params *params)
  652. {
  653. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  654. struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
  655. int rc = 0;
  656. /* clear mailbox and prep first tlv */
  657. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
  658. sizeof(*req));
  659. /* add list termination tlv */
  660. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  661. sizeof(struct channel_list_end_tlv));
  662. memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  663. memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
  664. req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
  665. req->rss_key_size = T_ETH_RSS_KEY;
  666. req->rss_result_mask = params->rss_result_mask;
  667. /* flags handled individually for backward/forward compatibility */
  668. if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
  669. req->rss_flags |= VFPF_RSS_MODE_DISABLED;
  670. if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
  671. req->rss_flags |= VFPF_RSS_MODE_REGULAR;
  672. if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
  673. req->rss_flags |= VFPF_RSS_SET_SRCH;
  674. if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
  675. req->rss_flags |= VFPF_RSS_IPV4;
  676. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
  677. req->rss_flags |= VFPF_RSS_IPV4_TCP;
  678. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
  679. req->rss_flags |= VFPF_RSS_IPV4_UDP;
  680. if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
  681. req->rss_flags |= VFPF_RSS_IPV6;
  682. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
  683. req->rss_flags |= VFPF_RSS_IPV6_TCP;
  684. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
  685. req->rss_flags |= VFPF_RSS_IPV6_UDP;
  686. DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
  687. /* output tlvs list */
  688. bnx2x_dp_tlv_list(bp, req);
  689. /* send message to pf */
  690. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  691. if (rc) {
  692. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  693. goto out;
  694. }
  695. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  696. /* Since older drivers don't support this feature (and VF has
  697. * no way of knowing other than failing this), don't propagate
  698. * an error in this case.
  699. */
  700. DP(BNX2X_MSG_IOV,
  701. "Failed to send rss message to PF over VF-PF channel [%d]\n",
  702. resp->hdr.status);
  703. }
  704. out:
  705. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  706. return rc;
  707. }
  708. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  709. {
  710. struct bnx2x *bp = netdev_priv(dev);
  711. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  712. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  713. int rc = 0, i = 0;
  714. struct netdev_hw_addr *ha;
  715. if (bp->state != BNX2X_STATE_OPEN) {
  716. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  717. return -EINVAL;
  718. }
  719. /* clear mailbox and prep first tlv */
  720. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  721. sizeof(*req));
  722. /* Get Rx mode requested */
  723. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  724. /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
  725. if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
  726. DP(NETIF_MSG_IFUP,
  727. "VF supports not more than %d multicast MAC addresses\n",
  728. PFVF_MAX_MULTICAST_PER_VF);
  729. rc = -EINVAL;
  730. goto out;
  731. }
  732. netdev_for_each_mc_addr(ha, dev) {
  733. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  734. bnx2x_mc_addr(ha));
  735. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  736. i++;
  737. }
  738. req->n_multicast = i;
  739. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  740. req->vf_qid = 0;
  741. /* add list termination tlv */
  742. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  743. sizeof(struct channel_list_end_tlv));
  744. /* output tlvs list */
  745. bnx2x_dp_tlv_list(bp, req);
  746. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  747. if (rc) {
  748. BNX2X_ERR("Sending a message failed: %d\n", rc);
  749. goto out;
  750. }
  751. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  752. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  753. resp->hdr.status);
  754. rc = -EINVAL;
  755. }
  756. out:
  757. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  758. return rc;
  759. }
  760. /* request pf to add a vlan for the vf */
  761. int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
  762. {
  763. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  764. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  765. int rc = 0;
  766. if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
  767. DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
  768. return 0;
  769. }
  770. /* clear mailbox and prep first tlv */
  771. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  772. sizeof(*req));
  773. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  774. req->vf_qid = vf_qid;
  775. req->n_mac_vlan_filters = 1;
  776. req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
  777. if (add)
  778. req->filters[0].flags |= VFPF_Q_FILTER_SET;
  779. /* sample bulletin board for hypervisor vlan */
  780. bnx2x_sample_bulletin(bp);
  781. if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
  782. BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
  783. rc = -EINVAL;
  784. goto out;
  785. }
  786. req->filters[0].vlan_tag = vid;
  787. /* add list termination tlv */
  788. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  789. sizeof(struct channel_list_end_tlv));
  790. /* output tlvs list */
  791. bnx2x_dp_tlv_list(bp, req);
  792. /* send message to pf */
  793. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  794. if (rc) {
  795. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  796. goto out;
  797. }
  798. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  799. BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
  800. vid);
  801. rc = -EINVAL;
  802. }
  803. out:
  804. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  805. return rc;
  806. }
  807. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  808. {
  809. int mode = bp->rx_mode;
  810. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  811. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  812. int rc;
  813. /* clear mailbox and prep first tlv */
  814. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  815. sizeof(*req));
  816. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  817. /* Ignore everything accept MODE_NONE */
  818. if (mode == BNX2X_RX_MODE_NONE) {
  819. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  820. } else {
  821. /* Current PF driver will not look at the specific flags,
  822. * but they are required when working with older drivers on hv.
  823. */
  824. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  825. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  826. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  827. if (mode == BNX2X_RX_MODE_PROMISC)
  828. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
  829. }
  830. if (bp->accept_any_vlan)
  831. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
  832. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  833. req->vf_qid = 0;
  834. /* add list termination tlv */
  835. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  836. sizeof(struct channel_list_end_tlv));
  837. /* output tlvs list */
  838. bnx2x_dp_tlv_list(bp, req);
  839. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  840. if (rc)
  841. BNX2X_ERR("Sending a message failed: %d\n", rc);
  842. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  843. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  844. rc = -EINVAL;
  845. }
  846. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  847. return rc;
  848. }
  849. /* General service functions */
  850. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  851. {
  852. u32 addr = BAR_CSTRORM_INTMEM +
  853. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  854. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  855. }
  856. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  857. {
  858. u32 addr = BAR_CSTRORM_INTMEM +
  859. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  860. REG_WR8(bp, addr, 1);
  861. }
  862. /* enable vf_pf mailbox (aka vf-pf-channel) */
  863. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  864. {
  865. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  866. /* enable the mailbox in the FW */
  867. storm_memset_vf_mbx_ack(bp, abs_vfid);
  868. storm_memset_vf_mbx_valid(bp, abs_vfid);
  869. /* enable the VF access to the mailbox */
  870. bnx2x_vf_enable_access(bp, abs_vfid);
  871. }
  872. /* this works only on !E1h */
  873. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  874. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  875. u32 vf_addr_lo, u32 len32)
  876. {
  877. struct dmae_command dmae;
  878. if (CHIP_IS_E1x(bp)) {
  879. BNX2X_ERR("Chip revision does not support VFs\n");
  880. return DMAE_NOT_RDY;
  881. }
  882. if (!bp->dmae_ready) {
  883. BNX2X_ERR("DMAE is not ready, can not copy\n");
  884. return DMAE_NOT_RDY;
  885. }
  886. /* set opcode and fixed command fields */
  887. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  888. if (from_vf) {
  889. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  890. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  891. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  892. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  893. dmae.src_addr_lo = vf_addr_lo;
  894. dmae.src_addr_hi = vf_addr_hi;
  895. dmae.dst_addr_lo = U64_LO(pf_addr);
  896. dmae.dst_addr_hi = U64_HI(pf_addr);
  897. } else {
  898. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  899. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  900. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  901. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  902. dmae.src_addr_lo = U64_LO(pf_addr);
  903. dmae.src_addr_hi = U64_HI(pf_addr);
  904. dmae.dst_addr_lo = vf_addr_lo;
  905. dmae.dst_addr_hi = vf_addr_hi;
  906. }
  907. dmae.len = len32;
  908. /* issue the command and wait for completion */
  909. return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
  910. }
  911. static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
  912. struct bnx2x_virtf *vf)
  913. {
  914. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  915. u16 length, type;
  916. /* prepare response */
  917. type = mbx->first_tlv.tl.type;
  918. length = type == CHANNEL_TLV_ACQUIRE ?
  919. sizeof(struct pfvf_acquire_resp_tlv) :
  920. sizeof(struct pfvf_general_resp_tlv);
  921. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
  922. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  923. sizeof(struct channel_list_end_tlv));
  924. }
  925. static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
  926. struct bnx2x_virtf *vf,
  927. int vf_rc)
  928. {
  929. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  930. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  931. dma_addr_t pf_addr;
  932. u64 vf_addr;
  933. int rc;
  934. bnx2x_dp_tlv_list(bp, resp);
  935. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  936. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  937. resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
  938. /* send response */
  939. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  940. mbx->first_tlv.resp_msg_offset;
  941. pf_addr = mbx->msg_mapping +
  942. offsetof(struct bnx2x_vf_mbx_msg, resp);
  943. /* Copy the response buffer. The first u64 is written afterwards, as
  944. * the vf is sensitive to the header being written
  945. */
  946. vf_addr += sizeof(u64);
  947. pf_addr += sizeof(u64);
  948. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  949. U64_HI(vf_addr),
  950. U64_LO(vf_addr),
  951. (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
  952. if (rc) {
  953. BNX2X_ERR("Failed to copy response body to VF %d\n",
  954. vf->abs_vfid);
  955. goto mbx_error;
  956. }
  957. vf_addr -= sizeof(u64);
  958. pf_addr -= sizeof(u64);
  959. /* ack the FW */
  960. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  961. mmiowb();
  962. /* copy the response header including status-done field,
  963. * must be last dmae, must be after FW is acked
  964. */
  965. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  966. U64_HI(vf_addr),
  967. U64_LO(vf_addr),
  968. sizeof(u64)/4);
  969. /* unlock channel mutex */
  970. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  971. if (rc) {
  972. BNX2X_ERR("Failed to copy response status to VF %d\n",
  973. vf->abs_vfid);
  974. goto mbx_error;
  975. }
  976. return;
  977. mbx_error:
  978. bnx2x_vf_release(bp, vf);
  979. }
  980. static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
  981. struct bnx2x_virtf *vf,
  982. int rc)
  983. {
  984. bnx2x_vf_mbx_resp_single_tlv(bp, vf);
  985. bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
  986. }
  987. static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
  988. struct bnx2x_virtf *vf,
  989. void *buffer,
  990. u16 *offset)
  991. {
  992. struct vfpf_port_phys_id_resp_tlv *port_id;
  993. if (!(bp->flags & HAS_PHYS_PORT_ID))
  994. return;
  995. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
  996. sizeof(struct vfpf_port_phys_id_resp_tlv));
  997. port_id = (struct vfpf_port_phys_id_resp_tlv *)
  998. (((u8 *)buffer) + *offset);
  999. memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
  1000. /* Offset should continue representing the offset to the tail
  1001. * of TLV data (outside this function scope)
  1002. */
  1003. *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
  1004. }
  1005. static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
  1006. struct bnx2x_virtf *vf,
  1007. void *buffer,
  1008. u16 *offset)
  1009. {
  1010. struct vfpf_fp_hsi_resp_tlv *fp_hsi;
  1011. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
  1012. sizeof(struct vfpf_fp_hsi_resp_tlv));
  1013. fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
  1014. (((u8 *)buffer) + *offset);
  1015. fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
  1016. /* Offset should continue representing the offset to the tail
  1017. * of TLV data (outside this function scope)
  1018. */
  1019. *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
  1020. }
  1021. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1022. struct bnx2x_vf_mbx *mbx, int vfop_status)
  1023. {
  1024. int i;
  1025. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  1026. struct pf_vf_resc *resc = &resp->resc;
  1027. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  1028. u16 length;
  1029. memset(resp, 0, sizeof(*resp));
  1030. /* fill in pfdev info */
  1031. resp->pfdev_info.chip_num = bp->common.chip_id;
  1032. resp->pfdev_info.db_size = bp->db_size;
  1033. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  1034. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  1035. PFVF_CAP_TPA |
  1036. PFVF_CAP_TPA_UPDATE |
  1037. PFVF_CAP_VLAN_FILTER);
  1038. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  1039. sizeof(resp->pfdev_info.fw_ver));
  1040. if (status == PFVF_STATUS_NO_RESOURCE ||
  1041. status == PFVF_STATUS_SUCCESS) {
  1042. /* set resources numbers, if status equals NO_RESOURCE these
  1043. * are max possible numbers
  1044. */
  1045. resc->num_rxqs = vf_rxq_count(vf) ? :
  1046. bnx2x_vf_max_queue_cnt(bp, vf);
  1047. resc->num_txqs = vf_txq_count(vf) ? :
  1048. bnx2x_vf_max_queue_cnt(bp, vf);
  1049. resc->num_sbs = vf_sb_count(vf);
  1050. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  1051. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  1052. resc->num_mc_filters = 0;
  1053. if (status == PFVF_STATUS_SUCCESS) {
  1054. /* fill in the allocated resources */
  1055. struct pf_vf_bulletin_content *bulletin =
  1056. BP_VF_BULLETIN(bp, vf->index);
  1057. for_each_vfq(vf, i)
  1058. resc->hw_qid[i] =
  1059. vfq_qzone_id(vf, vfq_get(vf, i));
  1060. for_each_vf_sb(vf, i) {
  1061. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  1062. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  1063. }
  1064. /* if a mac has been set for this vf, supply it */
  1065. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1066. memcpy(resc->current_mac_addr, bulletin->mac,
  1067. ETH_ALEN);
  1068. }
  1069. }
  1070. }
  1071. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  1072. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  1073. vf->abs_vfid,
  1074. resp->pfdev_info.chip_num,
  1075. resp->pfdev_info.db_size,
  1076. resp->pfdev_info.indices_per_sb,
  1077. resp->pfdev_info.pf_cap,
  1078. resc->num_rxqs,
  1079. resc->num_txqs,
  1080. resc->num_sbs,
  1081. resc->num_mac_filters,
  1082. resc->num_vlan_filters,
  1083. resc->num_mc_filters,
  1084. resp->pfdev_info.fw_ver);
  1085. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  1086. for (i = 0; i < vf_rxq_count(vf); i++)
  1087. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  1088. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  1089. for (i = 0; i < vf_sb_count(vf); i++)
  1090. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  1091. resc->hw_sbs[i].hw_sb_id,
  1092. resc->hw_sbs[i].sb_qid);
  1093. DP_CONT(BNX2X_MSG_IOV, "]\n");
  1094. /* prepare response */
  1095. length = sizeof(struct pfvf_acquire_resp_tlv);
  1096. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
  1097. /* Handle possible VF requests for physical port identifiers.
  1098. * 'length' should continue to indicate the offset of the first empty
  1099. * place in the buffer (i.e., where next TLV should be inserted)
  1100. */
  1101. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1102. CHANNEL_TLV_PHYS_PORT_ID))
  1103. bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
  1104. /* `New' vfs will want to know if fastpath HSI is supported, since
  1105. * if that's not the case they could print into system log the fact
  1106. * the driver version must be updated.
  1107. */
  1108. bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
  1109. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  1110. sizeof(struct channel_list_end_tlv));
  1111. /* send the response */
  1112. bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
  1113. }
  1114. static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
  1115. struct vfpf_acquire_tlv *acquire)
  1116. {
  1117. /* Windows driver does one of three things:
  1118. * 1. Old driver doesn't have bulletin board address set.
  1119. * 2. 'Middle' driver sends mc_num == 32.
  1120. * 3. New driver sets the OS field.
  1121. */
  1122. if (!acquire->bulletin_addr ||
  1123. acquire->resc_request.num_mc_filters == 32 ||
  1124. ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
  1125. VF_OS_WINDOWS))
  1126. return true;
  1127. return false;
  1128. }
  1129. static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
  1130. struct bnx2x_virtf *vf,
  1131. struct bnx2x_vf_mbx *mbx)
  1132. {
  1133. /* Linux drivers which correctly set the doorbell size also
  1134. * send a physical port request
  1135. */
  1136. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1137. CHANNEL_TLV_PHYS_PORT_ID))
  1138. return 0;
  1139. /* Issue does not exist in windows VMs */
  1140. if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
  1141. return 0;
  1142. return -EOPNOTSUPP;
  1143. }
  1144. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1145. struct bnx2x_vf_mbx *mbx)
  1146. {
  1147. int rc;
  1148. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  1149. /* log vfdef info */
  1150. DP(BNX2X_MSG_IOV,
  1151. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  1152. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  1153. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  1154. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  1155. acquire->resc_request.num_vlan_filters,
  1156. acquire->resc_request.num_mc_filters);
  1157. /* Prevent VFs with old drivers from loading, since they calculate
  1158. * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
  1159. * while being upgraded.
  1160. */
  1161. rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
  1162. if (rc) {
  1163. DP(BNX2X_MSG_IOV,
  1164. "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
  1165. vf->abs_vfid);
  1166. goto out;
  1167. }
  1168. /* Verify the VF fastpath HSI can be supported by the loaded FW.
  1169. * Linux vfs should be oblivious to changes between v0 and v2.
  1170. */
  1171. if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
  1172. vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
  1173. else
  1174. vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
  1175. ETH_FP_HSI_VER_2);
  1176. if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
  1177. DP(BNX2X_MSG_IOV,
  1178. "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
  1179. vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
  1180. ETH_FP_HSI_VERSION);
  1181. rc = -EINVAL;
  1182. goto out;
  1183. }
  1184. /* acquire the resources */
  1185. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  1186. /* store address of vf's bulletin board */
  1187. vf->bulletin_map = acquire->bulletin_addr;
  1188. if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
  1189. DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
  1190. vf->abs_vfid);
  1191. vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
  1192. } else {
  1193. vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
  1194. }
  1195. if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
  1196. DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
  1197. vf->abs_vfid);
  1198. vf->cfg_flags |= VF_CFG_VLAN_FILTER;
  1199. } else {
  1200. vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
  1201. }
  1202. out:
  1203. /* response */
  1204. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  1205. }
  1206. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1207. struct bnx2x_vf_mbx *mbx)
  1208. {
  1209. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  1210. int rc;
  1211. /* record ghost addresses from vf message */
  1212. vf->fw_stat_map = init->stats_addr;
  1213. vf->stats_stride = init->stats_stride;
  1214. rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  1215. /* set VF multiqueue statistics collection mode */
  1216. if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
  1217. vf->cfg_flags |= VF_CFG_STATS_COALESCE;
  1218. /* Update VF's view of link state */
  1219. if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
  1220. bnx2x_iov_link_update_vf(bp, vf->index);
  1221. /* response */
  1222. bnx2x_vf_mbx_resp(bp, vf, rc);
  1223. }
  1224. /* convert MBX queue-flags to standard SP queue-flags */
  1225. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  1226. unsigned long *sp_q_flags)
  1227. {
  1228. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  1229. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  1230. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  1231. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  1232. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  1233. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  1234. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  1235. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  1236. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  1237. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  1238. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  1239. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  1240. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  1241. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  1242. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  1243. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  1244. if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
  1245. __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
  1246. /* outer vlan removal is set according to PF's multi function mode */
  1247. if (IS_MF_SD(bp))
  1248. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  1249. }
  1250. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1251. struct bnx2x_vf_mbx *mbx)
  1252. {
  1253. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  1254. struct bnx2x_vf_queue_construct_params qctor;
  1255. int rc = 0;
  1256. /* verify vf_qid */
  1257. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  1258. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  1259. setup_q->vf_qid, vf_rxq_count(vf));
  1260. rc = -EINVAL;
  1261. goto response;
  1262. }
  1263. /* tx queues must be setup alongside rx queues thus if the rx queue
  1264. * is not marked as valid there's nothing to do.
  1265. */
  1266. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  1267. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  1268. unsigned long q_type = 0;
  1269. struct bnx2x_queue_init_params *init_p;
  1270. struct bnx2x_queue_setup_params *setup_p;
  1271. if (bnx2x_vfq_is_leading(q))
  1272. bnx2x_leading_vfq_init(bp, vf, q);
  1273. /* re-init the VF operation context */
  1274. memset(&qctor, 0 ,
  1275. sizeof(struct bnx2x_vf_queue_construct_params));
  1276. setup_p = &qctor.prep_qsetup;
  1277. init_p = &qctor.qstate.params.init;
  1278. /* activate immediately */
  1279. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  1280. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  1281. struct bnx2x_txq_setup_params *txq_params =
  1282. &setup_p->txq_params;
  1283. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1284. /* save sb resource index */
  1285. q->sb_idx = setup_q->txq.vf_sb;
  1286. /* tx init */
  1287. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  1288. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  1289. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1290. &init_p->tx.flags);
  1291. /* tx setup - flags */
  1292. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1293. &setup_p->flags);
  1294. /* tx setup - general, nothing */
  1295. /* tx setup - tx */
  1296. txq_params->dscr_map = setup_q->txq.txq_addr;
  1297. txq_params->sb_cq_index = setup_q->txq.sb_index;
  1298. txq_params->traffic_type = setup_q->txq.traffic_type;
  1299. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  1300. q->index, q->sb_idx);
  1301. }
  1302. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  1303. struct bnx2x_rxq_setup_params *rxq_params =
  1304. &setup_p->rxq_params;
  1305. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1306. /* Note: there is no support for different SBs
  1307. * for TX and RX
  1308. */
  1309. q->sb_idx = setup_q->rxq.vf_sb;
  1310. /* rx init */
  1311. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  1312. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  1313. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1314. &init_p->rx.flags);
  1315. /* rx setup - flags */
  1316. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1317. &setup_p->flags);
  1318. /* rx setup - general */
  1319. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  1320. /* rx setup - rx */
  1321. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  1322. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  1323. rxq_params->sge_map = setup_q->rxq.sge_addr;
  1324. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  1325. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  1326. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  1327. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  1328. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  1329. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  1330. rxq_params->cache_line_log =
  1331. setup_q->rxq.cache_line_log;
  1332. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  1333. /* rx setup - multicast engine */
  1334. if (bnx2x_vfq_is_leading(q)) {
  1335. u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
  1336. rxq_params->mcast_engine_id = mcast_id;
  1337. __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
  1338. }
  1339. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  1340. q->index, q->sb_idx);
  1341. }
  1342. /* complete the preparations */
  1343. bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
  1344. rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
  1345. if (rc)
  1346. goto response;
  1347. }
  1348. response:
  1349. bnx2x_vf_mbx_resp(bp, vf, rc);
  1350. }
  1351. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  1352. struct bnx2x_virtf *vf,
  1353. struct vfpf_set_q_filters_tlv *tlv,
  1354. struct bnx2x_vf_mac_vlan_filters **pfl,
  1355. u32 type_flag)
  1356. {
  1357. int i, j;
  1358. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1359. size_t fsz;
  1360. fsz = tlv->n_mac_vlan_filters *
  1361. sizeof(struct bnx2x_vf_mac_vlan_filter) +
  1362. sizeof(struct bnx2x_vf_mac_vlan_filters);
  1363. fl = kzalloc(fsz, GFP_KERNEL);
  1364. if (!fl)
  1365. return -ENOMEM;
  1366. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1367. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1368. if ((msg_filter->flags & type_flag) != type_flag)
  1369. continue;
  1370. memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
  1371. if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
  1372. fl->filters[j].mac = msg_filter->mac;
  1373. fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
  1374. }
  1375. if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
  1376. fl->filters[j].vid = msg_filter->vlan_tag;
  1377. fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
  1378. }
  1379. fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
  1380. fl->count++;
  1381. j++;
  1382. }
  1383. if (!fl->count)
  1384. kfree(fl);
  1385. else
  1386. *pfl = fl;
  1387. return 0;
  1388. }
  1389. static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
  1390. u32 flags)
  1391. {
  1392. int i, cnt = 0;
  1393. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1394. if ((filters->filters[i].flags & flags) == flags)
  1395. cnt++;
  1396. return cnt;
  1397. }
  1398. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1399. struct vfpf_q_mac_vlan_filter *filter)
  1400. {
  1401. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1402. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1403. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1404. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1405. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1406. DP_CONT(msglvl, "\n");
  1407. }
  1408. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1409. struct vfpf_set_q_filters_tlv *filters)
  1410. {
  1411. int i;
  1412. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1413. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1414. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1415. &filters->filters[i]);
  1416. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1417. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1418. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1419. for (i = 0; i < filters->n_multicast; i++)
  1420. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1421. }
  1422. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1423. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1424. #define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
  1425. static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1426. {
  1427. int rc = 0;
  1428. struct vfpf_set_q_filters_tlv *msg =
  1429. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1430. /* check for any mac/vlan changes */
  1431. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1432. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1433. /* build vlan-mac list */
  1434. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1435. VFPF_VLAN_MAC_FILTER);
  1436. if (rc)
  1437. goto op_err;
  1438. if (fl) {
  1439. /* set vlan-mac list */
  1440. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1441. msg->vf_qid,
  1442. false);
  1443. if (rc)
  1444. goto op_err;
  1445. }
  1446. /* build mac list */
  1447. fl = NULL;
  1448. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1449. VFPF_MAC_FILTER);
  1450. if (rc)
  1451. goto op_err;
  1452. if (fl) {
  1453. /* set mac list */
  1454. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1455. msg->vf_qid,
  1456. false);
  1457. if (rc)
  1458. goto op_err;
  1459. }
  1460. /* build vlan list */
  1461. fl = NULL;
  1462. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1463. VFPF_VLAN_FILTER);
  1464. if (rc)
  1465. goto op_err;
  1466. if (fl) {
  1467. /* set vlan list */
  1468. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1469. msg->vf_qid,
  1470. false);
  1471. if (rc)
  1472. goto op_err;
  1473. }
  1474. }
  1475. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1476. unsigned long accept = 0;
  1477. struct pf_vf_bulletin_content *bulletin =
  1478. BP_VF_BULLETIN(bp, vf->index);
  1479. /* Ignore VF requested mode; instead set a regular mode */
  1480. if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
  1481. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1482. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1483. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1484. }
  1485. /* any_vlan is not configured if HV is forcing VLAN
  1486. * any_vlan is configured if
  1487. * 1. VF does not support vlan filtering
  1488. * OR
  1489. * 2. VF supports vlan filtering and explicitly requested it
  1490. */
  1491. if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
  1492. (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
  1493. msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
  1494. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1495. /* set rx-mode */
  1496. rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
  1497. if (rc)
  1498. goto op_err;
  1499. }
  1500. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1501. /* set mcasts */
  1502. rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
  1503. msg->n_multicast, false);
  1504. if (rc)
  1505. goto op_err;
  1506. }
  1507. op_err:
  1508. if (rc)
  1509. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1510. vf->abs_vfid, msg->vf_qid, rc);
  1511. return rc;
  1512. }
  1513. static int bnx2x_filters_validate_mac(struct bnx2x *bp,
  1514. struct bnx2x_virtf *vf,
  1515. struct vfpf_set_q_filters_tlv *filters)
  1516. {
  1517. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1518. int rc = 0;
  1519. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1520. * accept mac configurations of that mac. Why accept them at all?
  1521. * because PF may have been unable to configure the mac at the time
  1522. * since queue was not set up.
  1523. */
  1524. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1525. struct vfpf_q_mac_vlan_filter *filter = NULL;
  1526. int i;
  1527. for (i = 0; i < filters->n_mac_vlan_filters; i++) {
  1528. if (!(filters->filters[i].flags &
  1529. VFPF_Q_FILTER_DEST_MAC_VALID))
  1530. continue;
  1531. /* once a mac was set by ndo can only accept
  1532. * a single mac...
  1533. */
  1534. if (filter) {
  1535. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
  1536. vf->abs_vfid,
  1537. filters->n_mac_vlan_filters);
  1538. rc = -EPERM;
  1539. goto response;
  1540. }
  1541. filter = &filters->filters[i];
  1542. }
  1543. /* ...and only the mac set by the ndo */
  1544. if (filter &&
  1545. !ether_addr_equal(filter->mac, bulletin->mac)) {
  1546. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1547. vf->abs_vfid);
  1548. rc = -EPERM;
  1549. goto response;
  1550. }
  1551. }
  1552. response:
  1553. return rc;
  1554. }
  1555. static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
  1556. struct bnx2x_virtf *vf,
  1557. struct vfpf_set_q_filters_tlv *filters)
  1558. {
  1559. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1560. int rc = 0;
  1561. /* if vlan was set by hypervisor we don't allow guest to config vlan */
  1562. if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
  1563. /* search for vlan filters */
  1564. if (bnx2x_vf_filters_contain(filters,
  1565. VFPF_Q_FILTER_VLAN_TAG_VALID)) {
  1566. BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
  1567. vf->abs_vfid);
  1568. rc = -EPERM;
  1569. goto response;
  1570. }
  1571. }
  1572. /* verify vf_qid */
  1573. if (filters->vf_qid > vf_rxq_count(vf)) {
  1574. rc = -EPERM;
  1575. goto response;
  1576. }
  1577. response:
  1578. return rc;
  1579. }
  1580. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1581. struct bnx2x_virtf *vf,
  1582. struct bnx2x_vf_mbx *mbx)
  1583. {
  1584. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1585. int rc;
  1586. rc = bnx2x_filters_validate_mac(bp, vf, filters);
  1587. if (rc)
  1588. goto response;
  1589. rc = bnx2x_filters_validate_vlan(bp, vf, filters);
  1590. if (rc)
  1591. goto response;
  1592. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1593. vf->abs_vfid,
  1594. filters->vf_qid);
  1595. /* print q_filter message */
  1596. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1597. rc = bnx2x_vf_mbx_qfilters(bp, vf);
  1598. response:
  1599. bnx2x_vf_mbx_resp(bp, vf, rc);
  1600. }
  1601. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1602. struct bnx2x_vf_mbx *mbx)
  1603. {
  1604. int qid = mbx->msg->req.q_op.vf_qid;
  1605. int rc;
  1606. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1607. vf->abs_vfid, qid);
  1608. rc = bnx2x_vf_queue_teardown(bp, vf, qid);
  1609. bnx2x_vf_mbx_resp(bp, vf, rc);
  1610. }
  1611. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1612. struct bnx2x_vf_mbx *mbx)
  1613. {
  1614. int rc;
  1615. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1616. rc = bnx2x_vf_close(bp, vf);
  1617. bnx2x_vf_mbx_resp(bp, vf, rc);
  1618. }
  1619. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1620. struct bnx2x_vf_mbx *mbx)
  1621. {
  1622. int rc;
  1623. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1624. rc = bnx2x_vf_free(bp, vf);
  1625. bnx2x_vf_mbx_resp(bp, vf, rc);
  1626. }
  1627. static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1628. struct bnx2x_vf_mbx *mbx)
  1629. {
  1630. struct bnx2x_config_rss_params rss;
  1631. struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
  1632. int rc = 0;
  1633. if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
  1634. rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
  1635. BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
  1636. vf->index);
  1637. rc = -EINVAL;
  1638. goto mbx_resp;
  1639. }
  1640. memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
  1641. /* set vfop params according to rss tlv */
  1642. memcpy(rss.ind_table, rss_tlv->ind_table,
  1643. T_ETH_INDIRECTION_TABLE_SIZE);
  1644. memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
  1645. rss.rss_obj = &vf->rss_conf_obj;
  1646. rss.rss_result_mask = rss_tlv->rss_result_mask;
  1647. /* flags handled individually for backward/forward compatibility */
  1648. rss.rss_flags = 0;
  1649. rss.ramrod_flags = 0;
  1650. if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  1651. __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
  1652. if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
  1653. __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
  1654. if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
  1655. __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
  1656. if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
  1657. __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
  1658. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
  1659. __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
  1660. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
  1661. __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
  1662. if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
  1663. __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
  1664. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
  1665. __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
  1666. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
  1667. __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
  1668. if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
  1669. rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
  1670. (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
  1671. rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
  1672. BNX2X_ERR("about to hit a FW assert. aborting...\n");
  1673. rc = -EINVAL;
  1674. goto mbx_resp;
  1675. }
  1676. rc = bnx2x_vf_rss_update(bp, vf, &rss);
  1677. mbx_resp:
  1678. bnx2x_vf_mbx_resp(bp, vf, rc);
  1679. }
  1680. static int bnx2x_validate_tpa_params(struct bnx2x *bp,
  1681. struct vfpf_tpa_tlv *tpa_tlv)
  1682. {
  1683. int rc = 0;
  1684. if (tpa_tlv->tpa_client_info.max_sges_for_packet >
  1685. U_ETH_MAX_SGES_FOR_PACKET) {
  1686. rc = -EINVAL;
  1687. BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
  1688. tpa_tlv->tpa_client_info.max_sges_for_packet,
  1689. U_ETH_MAX_SGES_FOR_PACKET);
  1690. }
  1691. if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
  1692. rc = -EINVAL;
  1693. BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
  1694. tpa_tlv->tpa_client_info.max_tpa_queues,
  1695. MAX_AGG_QS(bp));
  1696. }
  1697. return rc;
  1698. }
  1699. static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1700. struct bnx2x_vf_mbx *mbx)
  1701. {
  1702. struct bnx2x_queue_update_tpa_params vf_op_params;
  1703. struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
  1704. int rc = 0;
  1705. memset(&vf_op_params, 0, sizeof(vf_op_params));
  1706. if (bnx2x_validate_tpa_params(bp, tpa_tlv))
  1707. goto mbx_resp;
  1708. vf_op_params.complete_on_both_clients =
  1709. tpa_tlv->tpa_client_info.complete_on_both_clients;
  1710. vf_op_params.dont_verify_thr =
  1711. tpa_tlv->tpa_client_info.dont_verify_thr;
  1712. vf_op_params.max_agg_sz =
  1713. tpa_tlv->tpa_client_info.max_agg_size;
  1714. vf_op_params.max_sges_pkt =
  1715. tpa_tlv->tpa_client_info.max_sges_for_packet;
  1716. vf_op_params.max_tpa_queues =
  1717. tpa_tlv->tpa_client_info.max_tpa_queues;
  1718. vf_op_params.sge_buff_sz =
  1719. tpa_tlv->tpa_client_info.sge_buff_size;
  1720. vf_op_params.sge_pause_thr_high =
  1721. tpa_tlv->tpa_client_info.sge_pause_thr_high;
  1722. vf_op_params.sge_pause_thr_low =
  1723. tpa_tlv->tpa_client_info.sge_pause_thr_low;
  1724. vf_op_params.tpa_mode =
  1725. tpa_tlv->tpa_client_info.tpa_mode;
  1726. vf_op_params.update_ipv4 =
  1727. tpa_tlv->tpa_client_info.update_ipv4;
  1728. vf_op_params.update_ipv6 =
  1729. tpa_tlv->tpa_client_info.update_ipv6;
  1730. rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
  1731. mbx_resp:
  1732. bnx2x_vf_mbx_resp(bp, vf, rc);
  1733. }
  1734. /* dispatch request */
  1735. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1736. struct bnx2x_vf_mbx *mbx)
  1737. {
  1738. int i;
  1739. /* check if tlv type is known */
  1740. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1741. /* Lock the per vf op mutex and note the locker's identity.
  1742. * The unlock will take place in mbx response.
  1743. */
  1744. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1745. /* switch on the opcode */
  1746. switch (mbx->first_tlv.tl.type) {
  1747. case CHANNEL_TLV_ACQUIRE:
  1748. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1749. return;
  1750. case CHANNEL_TLV_INIT:
  1751. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1752. return;
  1753. case CHANNEL_TLV_SETUP_Q:
  1754. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1755. return;
  1756. case CHANNEL_TLV_SET_Q_FILTERS:
  1757. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1758. return;
  1759. case CHANNEL_TLV_TEARDOWN_Q:
  1760. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1761. return;
  1762. case CHANNEL_TLV_CLOSE:
  1763. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1764. return;
  1765. case CHANNEL_TLV_RELEASE:
  1766. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1767. return;
  1768. case CHANNEL_TLV_UPDATE_RSS:
  1769. bnx2x_vf_mbx_update_rss(bp, vf, mbx);
  1770. return;
  1771. case CHANNEL_TLV_UPDATE_TPA:
  1772. bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
  1773. return;
  1774. }
  1775. } else {
  1776. /* unknown TLV - this may belong to a VF driver from the future
  1777. * - a version written after this PF driver was written, which
  1778. * supports features unknown as of yet. Too bad since we don't
  1779. * support them. Or this may be because someone wrote a crappy
  1780. * VF driver and is sending garbage over the channel.
  1781. */
  1782. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1783. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1784. vf->state);
  1785. for (i = 0; i < 20; i++)
  1786. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1787. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1788. }
  1789. /* can we respond to VF (do we have an address for it?) */
  1790. if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
  1791. /* notify the VF that we do not support this request */
  1792. bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
  1793. } else {
  1794. /* can't send a response since this VF is unknown to us
  1795. * just ack the FW to release the mailbox and unlock
  1796. * the channel.
  1797. */
  1798. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1799. /* Firmware ack should be written before unlocking channel */
  1800. mmiowb();
  1801. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1802. }
  1803. }
  1804. void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
  1805. struct vf_pf_event_data *vfpf_event)
  1806. {
  1807. u8 vf_idx;
  1808. DP(BNX2X_MSG_IOV,
  1809. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1810. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1811. /* Sanity checks consider removing later */
  1812. /* check if the vf_id is valid */
  1813. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1814. BNX2X_NR_VIRTFN(bp)) {
  1815. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1816. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1817. return;
  1818. }
  1819. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1820. /* Update VFDB with current message and schedule its handling */
  1821. mutex_lock(&BP_VFDB(bp)->event_mutex);
  1822. BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
  1823. le32_to_cpu(vfpf_event->msg_addr_hi);
  1824. BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
  1825. le32_to_cpu(vfpf_event->msg_addr_lo);
  1826. BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
  1827. mutex_unlock(&BP_VFDB(bp)->event_mutex);
  1828. bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
  1829. }
  1830. /* handle new vf-pf messages */
  1831. void bnx2x_vf_mbx(struct bnx2x *bp)
  1832. {
  1833. struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
  1834. u64 events;
  1835. u8 vf_idx;
  1836. int rc;
  1837. if (!vfdb)
  1838. return;
  1839. mutex_lock(&vfdb->event_mutex);
  1840. events = vfdb->event_occur;
  1841. vfdb->event_occur = 0;
  1842. mutex_unlock(&vfdb->event_mutex);
  1843. for_each_vf(bp, vf_idx) {
  1844. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
  1845. struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
  1846. /* Handle VFs which have pending events */
  1847. if (!(events & (1ULL << vf_idx)))
  1848. continue;
  1849. DP(BNX2X_MSG_IOV,
  1850. "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
  1851. vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
  1852. mbx->first_tlv.resp_msg_offset);
  1853. /* dmae to get the VF request */
  1854. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
  1855. vf->abs_vfid, mbx->vf_addr_hi,
  1856. mbx->vf_addr_lo,
  1857. sizeof(union vfpf_tlvs)/4);
  1858. if (rc) {
  1859. BNX2X_ERR("Failed to copy request VF %d\n",
  1860. vf->abs_vfid);
  1861. bnx2x_vf_release(bp, vf);
  1862. return;
  1863. }
  1864. /* process the VF message header */
  1865. mbx->first_tlv = mbx->msg->req.first_tlv;
  1866. /* Clean response buffer to refrain from falsely
  1867. * seeing chains.
  1868. */
  1869. memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
  1870. /* dispatch the request (will prepare the response) */
  1871. bnx2x_vf_mbx_request(bp, vf, mbx);
  1872. }
  1873. }
  1874. void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
  1875. bool support_long)
  1876. {
  1877. /* Older VFs contain a bug where they can't check CRC for bulletin
  1878. * boards of length greater than legacy size.
  1879. */
  1880. bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
  1881. BULLETIN_CONTENT_LEGACY_SIZE;
  1882. bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
  1883. }
  1884. /* propagate local bulletin board to vf */
  1885. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1886. {
  1887. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1888. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1889. vf * BULLETIN_CONTENT_SIZE;
  1890. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1891. int rc;
  1892. /* can only update vf after init took place */
  1893. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1894. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1895. return 0;
  1896. /* increment bulletin board version and compute crc */
  1897. bulletin->version++;
  1898. bnx2x_vf_bulletin_finalize(bulletin,
  1899. (bnx2x_vf(bp, vf, cfg_flags) &
  1900. VF_CFG_EXT_BULLETIN) ? true : false);
  1901. /* propagate bulletin board via dmae to vm memory */
  1902. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1903. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1904. U64_LO(vf_addr), bulletin->length / 4);
  1905. return rc;
  1906. }