bnx2x_vfpf.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308
  1. /* bnx2x_vfpf.c: QLogic Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. * Copyright 2014 QLogic Corporation
  5. * All rights reserved
  6. *
  7. * Unless you and QLogic execute a separate written software license
  8. * agreement governing use of this software, this software is licensed to you
  9. * under the terms of the GNU General Public License version 2, available
  10. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  11. *
  12. * Notwithstanding the above, under no circumstances may you combine this
  13. * software in any way with any other QLogic software provided under a
  14. * license other than the GPL, without QLogic's express prior written
  15. * consent.
  16. *
  17. * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  18. * Written by: Shmulik Ravid
  19. * Ariel Elior <ariel.elior@qlogic.com>
  20. */
  21. #include "bnx2x.h"
  22. #include "bnx2x_cmn.h"
  23. #include <linux/crc32.h>
  24. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
  25. /* place a given tlv on the tlv buffer at a given offset */
  26. static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
  27. u16 offset, u16 type, u16 length)
  28. {
  29. struct channel_tlv *tl =
  30. (struct channel_tlv *)(tlvs_list + offset);
  31. tl->type = type;
  32. tl->length = length;
  33. }
  34. /* Clear the mailbox and init the header of the first tlv */
  35. static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  36. u16 type, u16 length)
  37. {
  38. mutex_lock(&bp->vf2pf_mutex);
  39. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  40. type);
  41. /* Clear mailbox */
  42. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  43. /* init type and length */
  44. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  45. /* init first tlv header */
  46. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  47. }
  48. /* releases the mailbox */
  49. static void bnx2x_vfpf_finalize(struct bnx2x *bp,
  50. struct vfpf_first_tlv *first_tlv)
  51. {
  52. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  53. first_tlv->tl.type);
  54. mutex_unlock(&bp->vf2pf_mutex);
  55. }
  56. /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
  57. static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
  58. enum channel_tlvs req_tlv)
  59. {
  60. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  61. do {
  62. if (tlv->type == req_tlv)
  63. return tlv;
  64. if (!tlv->length) {
  65. BNX2X_ERR("Found TLV with length 0\n");
  66. return NULL;
  67. }
  68. tlvs_list += tlv->length;
  69. tlv = (struct channel_tlv *)tlvs_list;
  70. } while (tlv->type != CHANNEL_TLV_LIST_END);
  71. DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
  72. return NULL;
  73. }
  74. /* list the types and lengths of the tlvs on the buffer */
  75. static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  76. {
  77. int i = 1;
  78. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  79. while (tlv->type != CHANNEL_TLV_LIST_END) {
  80. /* output tlv */
  81. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  82. tlv->type, tlv->length);
  83. /* advance to next tlv */
  84. tlvs_list += tlv->length;
  85. /* cast general tlv list pointer to channel tlv header*/
  86. tlv = (struct channel_tlv *)tlvs_list;
  87. i++;
  88. /* break condition for this loop */
  89. if (i > MAX_TLVS_IN_LIST) {
  90. WARN(true, "corrupt tlvs");
  91. return;
  92. }
  93. }
  94. /* output last tlv */
  95. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  96. tlv->type, tlv->length);
  97. }
  98. /* test whether we support a tlv type */
  99. bool bnx2x_tlv_supported(u16 tlvtype)
  100. {
  101. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  102. }
  103. static inline int bnx2x_pfvf_status_codes(int rc)
  104. {
  105. switch (rc) {
  106. case 0:
  107. return PFVF_STATUS_SUCCESS;
  108. case -ENOMEM:
  109. return PFVF_STATUS_NO_RESOURCE;
  110. default:
  111. return PFVF_STATUS_FAILURE;
  112. }
  113. }
  114. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  115. {
  116. struct cstorm_vf_zone_data __iomem *zone_data =
  117. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  118. int tout = 100, interval = 100; /* wait for 10 seconds */
  119. if (*done) {
  120. BNX2X_ERR("done was non zero before message to pf was sent\n");
  121. WARN_ON(true);
  122. return -EINVAL;
  123. }
  124. /* if PF indicated channel is down avoid sending message. Return success
  125. * so calling flow can continue
  126. */
  127. bnx2x_sample_bulletin(bp);
  128. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  129. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  130. *done = PFVF_STATUS_SUCCESS;
  131. return -EINVAL;
  132. }
  133. /* Write message address */
  134. writel(U64_LO(msg_mapping),
  135. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  136. writel(U64_HI(msg_mapping),
  137. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  138. /* make sure the address is written before FW accesses it */
  139. wmb();
  140. /* Trigger the PF FW */
  141. writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  142. mmiowb();
  143. /* Wait for PF to complete */
  144. while ((tout >= 0) && (!*done)) {
  145. msleep(interval);
  146. tout -= 1;
  147. /* progress indicator - HV can take its own sweet time in
  148. * answering VFs...
  149. */
  150. DP_CONT(BNX2X_MSG_IOV, ".");
  151. }
  152. if (!*done) {
  153. BNX2X_ERR("PF response has timed out\n");
  154. return -EAGAIN;
  155. }
  156. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  157. return 0;
  158. }
  159. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  160. {
  161. u32 me_reg;
  162. int tout = 10, interval = 100; /* Wait for 1 sec */
  163. do {
  164. /* pxp traps vf read of doorbells and returns me reg value */
  165. me_reg = readl(bp->doorbells);
  166. if (GOOD_ME_REG(me_reg))
  167. break;
  168. msleep(interval);
  169. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  170. me_reg);
  171. } while (tout-- > 0);
  172. if (!GOOD_ME_REG(me_reg)) {
  173. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  174. return -EINVAL;
  175. }
  176. DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  177. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  178. return 0;
  179. }
  180. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  181. {
  182. int rc = 0, attempts = 0;
  183. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  184. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  185. struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
  186. struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp;
  187. u32 vf_id;
  188. bool resources_acquired = false;
  189. /* clear mailbox and prep first tlv */
  190. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  191. if (bnx2x_get_vf_id(bp, &vf_id)) {
  192. rc = -EAGAIN;
  193. goto out;
  194. }
  195. req->vfdev_info.vf_id = vf_id;
  196. req->vfdev_info.vf_os = 0;
  197. req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION;
  198. req->resc_request.num_rxqs = rx_count;
  199. req->resc_request.num_txqs = tx_count;
  200. req->resc_request.num_sbs = bp->igu_sb_cnt;
  201. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  202. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  203. req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
  204. /* pf 2 vf bulletin board address */
  205. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  206. /* Request physical port identifier */
  207. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
  208. CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
  209. /* Bulletin support for bulletin board with length > legacy length */
  210. req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
  211. /* vlan filtering is supported */
  212. req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
  213. /* add list termination tlv */
  214. bnx2x_add_tlv(bp, req,
  215. req->first_tlv.tl.length + sizeof(struct channel_tlv),
  216. CHANNEL_TLV_LIST_END,
  217. sizeof(struct channel_list_end_tlv));
  218. /* output tlvs list */
  219. bnx2x_dp_tlv_list(bp, req);
  220. while (!resources_acquired) {
  221. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  222. /* send acquire request */
  223. rc = bnx2x_send_msg2pf(bp,
  224. &resp->hdr.status,
  225. bp->vf2pf_mbox_mapping);
  226. /* PF timeout */
  227. if (rc)
  228. goto out;
  229. /* copy acquire response from buffer to bp */
  230. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  231. attempts++;
  232. /* test whether the PF accepted our request. If not, humble
  233. * the request and try again.
  234. */
  235. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  236. DP(BNX2X_MSG_SP, "resources acquired\n");
  237. resources_acquired = true;
  238. } else if (bp->acquire_resp.hdr.status ==
  239. PFVF_STATUS_NO_RESOURCE &&
  240. attempts < VF_ACQUIRE_THRESH) {
  241. DP(BNX2X_MSG_SP,
  242. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  243. /* humble our request */
  244. req->resc_request.num_txqs =
  245. min(req->resc_request.num_txqs,
  246. bp->acquire_resp.resc.num_txqs);
  247. req->resc_request.num_rxqs =
  248. min(req->resc_request.num_rxqs,
  249. bp->acquire_resp.resc.num_rxqs);
  250. req->resc_request.num_sbs =
  251. min(req->resc_request.num_sbs,
  252. bp->acquire_resp.resc.num_sbs);
  253. req->resc_request.num_mac_filters =
  254. min(req->resc_request.num_mac_filters,
  255. bp->acquire_resp.resc.num_mac_filters);
  256. req->resc_request.num_vlan_filters =
  257. min(req->resc_request.num_vlan_filters,
  258. bp->acquire_resp.resc.num_vlan_filters);
  259. req->resc_request.num_mc_filters =
  260. min(req->resc_request.num_mc_filters,
  261. bp->acquire_resp.resc.num_mc_filters);
  262. /* Clear response buffer */
  263. memset(&bp->vf2pf_mbox->resp, 0,
  264. sizeof(union pfvf_tlvs));
  265. } else {
  266. /* Determine reason of PF failure of acquire process */
  267. fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
  268. CHANNEL_TLV_FP_HSI_SUPPORT);
  269. if (fp_hsi_resp && !fp_hsi_resp->is_supported)
  270. BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n");
  271. else
  272. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  273. bp->acquire_resp.hdr.status);
  274. rc = -EAGAIN;
  275. goto out;
  276. }
  277. }
  278. /* Retrieve physical port id (if possible) */
  279. phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
  280. bnx2x_search_tlv_list(bp, resp,
  281. CHANNEL_TLV_PHYS_PORT_ID);
  282. if (phys_port_resp) {
  283. memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
  284. bp->flags |= HAS_PHYS_PORT_ID;
  285. }
  286. /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV.
  287. * If that's the case, we need to make certain required FW was
  288. * supported by such a hypervisor [i.e., v0-v2].
  289. */
  290. fp_hsi_resp = bnx2x_search_tlv_list(bp, resp,
  291. CHANNEL_TLV_FP_HSI_SUPPORT);
  292. if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) {
  293. BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n");
  294. /* Since acquire succeeded on the PF side, we need to send a
  295. * release message in order to allow future probes.
  296. */
  297. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  298. bnx2x_vfpf_release(bp);
  299. rc = -EINVAL;
  300. goto out;
  301. }
  302. /* get HW info */
  303. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  304. bp->link_params.chip_id = bp->common.chip_id;
  305. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  306. bp->common.int_block = INT_BLOCK_IGU;
  307. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  308. bp->igu_dsb_id = -1;
  309. bp->mf_ov = 0;
  310. bp->mf_mode = 0;
  311. bp->common.flash_size = 0;
  312. bp->flags |=
  313. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  314. bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
  315. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  316. bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
  317. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  318. sizeof(bp->fw_ver));
  319. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  320. memcpy(bp->dev->dev_addr,
  321. bp->acquire_resp.resc.current_mac_addr,
  322. ETH_ALEN);
  323. out:
  324. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  325. return rc;
  326. }
  327. int bnx2x_vfpf_release(struct bnx2x *bp)
  328. {
  329. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  330. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  331. u32 rc, vf_id;
  332. /* clear mailbox and prep first tlv */
  333. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  334. if (bnx2x_get_vf_id(bp, &vf_id)) {
  335. rc = -EAGAIN;
  336. goto out;
  337. }
  338. req->vf_id = vf_id;
  339. /* add list termination tlv */
  340. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  341. sizeof(struct channel_list_end_tlv));
  342. /* output tlvs list */
  343. bnx2x_dp_tlv_list(bp, req);
  344. /* send release request */
  345. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  346. if (rc)
  347. /* PF timeout */
  348. goto out;
  349. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  350. /* PF released us */
  351. DP(BNX2X_MSG_SP, "vf released\n");
  352. } else {
  353. /* PF reports error */
  354. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  355. resp->hdr.status);
  356. rc = -EAGAIN;
  357. goto out;
  358. }
  359. out:
  360. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  361. return rc;
  362. }
  363. /* Tell PF about SB addresses */
  364. int bnx2x_vfpf_init(struct bnx2x *bp)
  365. {
  366. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  367. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  368. int rc, i;
  369. /* clear mailbox and prep first tlv */
  370. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  371. /* status blocks */
  372. for_each_eth_queue(bp, i)
  373. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  374. status_blk_mapping);
  375. /* statistics - requests only supports single queue for now */
  376. req->stats_addr = bp->fw_stats_data_mapping +
  377. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  378. req->stats_stride = sizeof(struct per_queue_stats);
  379. /* add list termination tlv */
  380. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  381. sizeof(struct channel_list_end_tlv));
  382. /* output tlvs list */
  383. bnx2x_dp_tlv_list(bp, req);
  384. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  385. if (rc)
  386. goto out;
  387. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  388. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  389. resp->hdr.status);
  390. rc = -EAGAIN;
  391. goto out;
  392. }
  393. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  394. out:
  395. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  396. return rc;
  397. }
  398. /* CLOSE VF - opposite to INIT_VF */
  399. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  400. {
  401. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  402. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  403. int i, rc;
  404. u32 vf_id;
  405. /* If we haven't got a valid VF id, there is no sense to
  406. * continue with sending messages
  407. */
  408. if (bnx2x_get_vf_id(bp, &vf_id))
  409. goto free_irq;
  410. /* Close the queues */
  411. for_each_queue(bp, i)
  412. bnx2x_vfpf_teardown_queue(bp, i);
  413. /* remove mac */
  414. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  415. /* clear mailbox and prep first tlv */
  416. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  417. req->vf_id = vf_id;
  418. /* add list termination tlv */
  419. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  420. sizeof(struct channel_list_end_tlv));
  421. /* output tlvs list */
  422. bnx2x_dp_tlv_list(bp, req);
  423. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  424. if (rc)
  425. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  426. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  427. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  428. resp->hdr.status);
  429. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  430. free_irq:
  431. /* Disable HW interrupts, NAPI */
  432. bnx2x_netif_stop(bp, 0);
  433. /* Delete all NAPI objects */
  434. bnx2x_del_all_napi(bp);
  435. /* Release IRQs */
  436. bnx2x_free_irq(bp);
  437. }
  438. static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  439. struct bnx2x_vf_queue *q)
  440. {
  441. u8 cl_id = vfq_cl_id(vf, q);
  442. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  443. /* mac */
  444. bnx2x_init_mac_obj(bp, &q->mac_obj,
  445. cl_id, q->cid, func_id,
  446. bnx2x_vf_sp(bp, vf, mac_rdata),
  447. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  448. BNX2X_FILTER_MAC_PENDING,
  449. &vf->filter_state,
  450. BNX2X_OBJ_TYPE_RX_TX,
  451. &vf->vf_macs_pool);
  452. /* vlan */
  453. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  454. cl_id, q->cid, func_id,
  455. bnx2x_vf_sp(bp, vf, vlan_rdata),
  456. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  457. BNX2X_FILTER_VLAN_PENDING,
  458. &vf->filter_state,
  459. BNX2X_OBJ_TYPE_RX_TX,
  460. &vf->vf_vlans_pool);
  461. /* vlan-mac */
  462. bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
  463. cl_id, q->cid, func_id,
  464. bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
  465. bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
  466. BNX2X_FILTER_VLAN_MAC_PENDING,
  467. &vf->filter_state,
  468. BNX2X_OBJ_TYPE_RX_TX,
  469. &vf->vf_macs_pool,
  470. &vf->vf_vlans_pool);
  471. /* mcast */
  472. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  473. q->cid, func_id, func_id,
  474. bnx2x_vf_sp(bp, vf, mcast_rdata),
  475. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  476. BNX2X_FILTER_MCAST_PENDING,
  477. &vf->filter_state,
  478. BNX2X_OBJ_TYPE_RX_TX);
  479. /* rss */
  480. bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
  481. func_id, func_id,
  482. bnx2x_vf_sp(bp, vf, rss_rdata),
  483. bnx2x_vf_sp_map(bp, vf, rss_rdata),
  484. BNX2X_FILTER_RSS_CONF_PENDING,
  485. &vf->filter_state,
  486. BNX2X_OBJ_TYPE_RX_TX);
  487. vf->leading_rss = cl_id;
  488. q->is_leading = true;
  489. q->sp_initialized = true;
  490. }
  491. /* ask the pf to open a queue for the vf */
  492. int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  493. bool is_leading)
  494. {
  495. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  496. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  497. u8 fp_idx = fp->index;
  498. u16 tpa_agg_size = 0, flags = 0;
  499. int rc;
  500. /* clear mailbox and prep first tlv */
  501. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  502. /* select tpa mode to request */
  503. if (fp->mode != TPA_MODE_DISABLED) {
  504. flags |= VFPF_QUEUE_FLG_TPA;
  505. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  506. if (fp->mode == TPA_MODE_GRO)
  507. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  508. tpa_agg_size = TPA_AGG_SIZE;
  509. }
  510. if (is_leading)
  511. flags |= VFPF_QUEUE_FLG_LEADING_RSS;
  512. /* calculate queue flags */
  513. flags |= VFPF_QUEUE_FLG_STATS;
  514. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  515. flags |= VFPF_QUEUE_FLG_VLAN;
  516. /* Common */
  517. req->vf_qid = fp_idx;
  518. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  519. /* Rx */
  520. req->rxq.rcq_addr = fp->rx_comp_mapping;
  521. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  522. req->rxq.rxq_addr = fp->rx_desc_mapping;
  523. req->rxq.sge_addr = fp->rx_sge_mapping;
  524. req->rxq.vf_sb = fp_idx;
  525. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  526. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  527. req->rxq.mtu = bp->dev->mtu;
  528. req->rxq.buf_sz = fp->rx_buf_size;
  529. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  530. req->rxq.tpa_agg_sz = tpa_agg_size;
  531. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  532. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  533. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  534. req->rxq.flags = flags;
  535. req->rxq.drop_flags = 0;
  536. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  537. req->rxq.stat_id = -1; /* No stats at the moment */
  538. /* Tx */
  539. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  540. req->txq.vf_sb = fp_idx;
  541. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  542. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  543. req->txq.flags = flags;
  544. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  545. /* add list termination tlv */
  546. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  547. sizeof(struct channel_list_end_tlv));
  548. /* output tlvs list */
  549. bnx2x_dp_tlv_list(bp, req);
  550. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  551. if (rc)
  552. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  553. fp_idx);
  554. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  555. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  556. fp_idx, resp->hdr.status);
  557. rc = -EINVAL;
  558. }
  559. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  560. return rc;
  561. }
  562. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  563. {
  564. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  565. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  566. int rc;
  567. /* clear mailbox and prep first tlv */
  568. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  569. sizeof(*req));
  570. req->vf_qid = qidx;
  571. /* add list termination tlv */
  572. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  573. sizeof(struct channel_list_end_tlv));
  574. /* output tlvs list */
  575. bnx2x_dp_tlv_list(bp, req);
  576. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  577. if (rc) {
  578. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  579. rc);
  580. goto out;
  581. }
  582. /* PF failed the transaction */
  583. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  584. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  585. resp->hdr.status);
  586. rc = -EINVAL;
  587. }
  588. out:
  589. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  590. return rc;
  591. }
  592. /* request pf to add a mac for the vf */
  593. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  594. {
  595. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  596. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  597. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  598. int rc = 0;
  599. /* clear mailbox and prep first tlv */
  600. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  601. sizeof(*req));
  602. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  603. req->vf_qid = vf_qid;
  604. req->n_mac_vlan_filters = 1;
  605. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  606. if (set)
  607. req->filters[0].flags |= VFPF_Q_FILTER_SET;
  608. /* sample bulletin board for new mac */
  609. bnx2x_sample_bulletin(bp);
  610. /* copy mac from device to request */
  611. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  612. /* add list termination tlv */
  613. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  614. sizeof(struct channel_list_end_tlv));
  615. /* output tlvs list */
  616. bnx2x_dp_tlv_list(bp, req);
  617. /* send message to pf */
  618. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  619. if (rc) {
  620. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  621. goto out;
  622. }
  623. /* failure may mean PF was configured with a new mac for us */
  624. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  625. DP(BNX2X_MSG_IOV,
  626. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  627. /* copy mac from bulletin to device */
  628. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  629. /* check if bulletin board was updated */
  630. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  631. /* copy mac from device to request */
  632. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  633. ETH_ALEN);
  634. /* send message to pf */
  635. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  636. bp->vf2pf_mbox_mapping);
  637. } else {
  638. /* no new info in bulletin */
  639. break;
  640. }
  641. }
  642. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  643. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  644. rc = -EINVAL;
  645. }
  646. out:
  647. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  648. return rc;
  649. }
  650. /* request pf to config rss table for vf queues*/
  651. int bnx2x_vfpf_config_rss(struct bnx2x *bp,
  652. struct bnx2x_config_rss_params *params)
  653. {
  654. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  655. struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
  656. int rc = 0;
  657. /* clear mailbox and prep first tlv */
  658. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
  659. sizeof(*req));
  660. /* add list termination tlv */
  661. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  662. sizeof(struct channel_list_end_tlv));
  663. memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  664. memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
  665. req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
  666. req->rss_key_size = T_ETH_RSS_KEY;
  667. req->rss_result_mask = params->rss_result_mask;
  668. /* flags handled individually for backward/forward compatibility */
  669. if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
  670. req->rss_flags |= VFPF_RSS_MODE_DISABLED;
  671. if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
  672. req->rss_flags |= VFPF_RSS_MODE_REGULAR;
  673. if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
  674. req->rss_flags |= VFPF_RSS_SET_SRCH;
  675. if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
  676. req->rss_flags |= VFPF_RSS_IPV4;
  677. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
  678. req->rss_flags |= VFPF_RSS_IPV4_TCP;
  679. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
  680. req->rss_flags |= VFPF_RSS_IPV4_UDP;
  681. if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
  682. req->rss_flags |= VFPF_RSS_IPV6;
  683. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
  684. req->rss_flags |= VFPF_RSS_IPV6_TCP;
  685. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
  686. req->rss_flags |= VFPF_RSS_IPV6_UDP;
  687. DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
  688. /* output tlvs list */
  689. bnx2x_dp_tlv_list(bp, req);
  690. /* send message to pf */
  691. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  692. if (rc) {
  693. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  694. goto out;
  695. }
  696. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  697. /* Since older drivers don't support this feature (and VF has
  698. * no way of knowing other than failing this), don't propagate
  699. * an error in this case.
  700. */
  701. DP(BNX2X_MSG_IOV,
  702. "Failed to send rss message to PF over VF-PF channel [%d]\n",
  703. resp->hdr.status);
  704. }
  705. out:
  706. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  707. return rc;
  708. }
  709. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  710. {
  711. struct bnx2x *bp = netdev_priv(dev);
  712. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  713. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  714. int rc = 0, i = 0;
  715. struct netdev_hw_addr *ha;
  716. if (bp->state != BNX2X_STATE_OPEN) {
  717. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  718. return -EINVAL;
  719. }
  720. /* clear mailbox and prep first tlv */
  721. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  722. sizeof(*req));
  723. /* Get Rx mode requested */
  724. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  725. /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
  726. if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
  727. DP(NETIF_MSG_IFUP,
  728. "VF supports not more than %d multicast MAC addresses\n",
  729. PFVF_MAX_MULTICAST_PER_VF);
  730. rc = -EINVAL;
  731. goto out;
  732. }
  733. netdev_for_each_mc_addr(ha, dev) {
  734. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  735. bnx2x_mc_addr(ha));
  736. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  737. i++;
  738. }
  739. req->n_multicast = i;
  740. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  741. req->vf_qid = 0;
  742. /* add list termination tlv */
  743. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  744. sizeof(struct channel_list_end_tlv));
  745. /* output tlvs list */
  746. bnx2x_dp_tlv_list(bp, req);
  747. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  748. if (rc) {
  749. BNX2X_ERR("Sending a message failed: %d\n", rc);
  750. goto out;
  751. }
  752. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  753. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  754. resp->hdr.status);
  755. rc = -EINVAL;
  756. }
  757. out:
  758. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  759. return rc;
  760. }
  761. /* request pf to add a vlan for the vf */
  762. int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
  763. {
  764. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  765. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  766. int rc = 0;
  767. if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
  768. DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
  769. return 0;
  770. }
  771. /* clear mailbox and prep first tlv */
  772. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  773. sizeof(*req));
  774. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  775. req->vf_qid = vf_qid;
  776. req->n_mac_vlan_filters = 1;
  777. req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
  778. if (add)
  779. req->filters[0].flags |= VFPF_Q_FILTER_SET;
  780. /* sample bulletin board for hypervisor vlan */
  781. bnx2x_sample_bulletin(bp);
  782. if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
  783. BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
  784. rc = -EINVAL;
  785. goto out;
  786. }
  787. req->filters[0].vlan_tag = vid;
  788. /* add list termination tlv */
  789. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  790. sizeof(struct channel_list_end_tlv));
  791. /* output tlvs list */
  792. bnx2x_dp_tlv_list(bp, req);
  793. /* send message to pf */
  794. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  795. if (rc) {
  796. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  797. goto out;
  798. }
  799. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  800. BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
  801. vid);
  802. rc = -EINVAL;
  803. }
  804. out:
  805. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  806. return rc;
  807. }
  808. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  809. {
  810. int mode = bp->rx_mode;
  811. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  812. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  813. int rc;
  814. /* clear mailbox and prep first tlv */
  815. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  816. sizeof(*req));
  817. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  818. /* Ignore everything accept MODE_NONE */
  819. if (mode == BNX2X_RX_MODE_NONE) {
  820. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  821. } else {
  822. /* Current PF driver will not look at the specific flags,
  823. * but they are required when working with older drivers on hv.
  824. */
  825. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  826. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  827. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  828. if (mode == BNX2X_RX_MODE_PROMISC)
  829. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
  830. }
  831. if (bp->accept_any_vlan)
  832. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
  833. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  834. req->vf_qid = 0;
  835. /* add list termination tlv */
  836. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  837. sizeof(struct channel_list_end_tlv));
  838. /* output tlvs list */
  839. bnx2x_dp_tlv_list(bp, req);
  840. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  841. if (rc)
  842. BNX2X_ERR("Sending a message failed: %d\n", rc);
  843. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  844. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  845. rc = -EINVAL;
  846. }
  847. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  848. return rc;
  849. }
  850. /* General service functions */
  851. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  852. {
  853. u32 addr = BAR_CSTRORM_INTMEM +
  854. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  855. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  856. }
  857. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  858. {
  859. u32 addr = BAR_CSTRORM_INTMEM +
  860. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  861. REG_WR8(bp, addr, 1);
  862. }
  863. /* enable vf_pf mailbox (aka vf-pf-channel) */
  864. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  865. {
  866. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  867. /* enable the mailbox in the FW */
  868. storm_memset_vf_mbx_ack(bp, abs_vfid);
  869. storm_memset_vf_mbx_valid(bp, abs_vfid);
  870. /* enable the VF access to the mailbox */
  871. bnx2x_vf_enable_access(bp, abs_vfid);
  872. }
  873. /* this works only on !E1h */
  874. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  875. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  876. u32 vf_addr_lo, u32 len32)
  877. {
  878. struct dmae_command dmae;
  879. if (CHIP_IS_E1x(bp)) {
  880. BNX2X_ERR("Chip revision does not support VFs\n");
  881. return DMAE_NOT_RDY;
  882. }
  883. if (!bp->dmae_ready) {
  884. BNX2X_ERR("DMAE is not ready, can not copy\n");
  885. return DMAE_NOT_RDY;
  886. }
  887. /* set opcode and fixed command fields */
  888. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  889. if (from_vf) {
  890. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  891. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  892. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  893. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  894. dmae.src_addr_lo = vf_addr_lo;
  895. dmae.src_addr_hi = vf_addr_hi;
  896. dmae.dst_addr_lo = U64_LO(pf_addr);
  897. dmae.dst_addr_hi = U64_HI(pf_addr);
  898. } else {
  899. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  900. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  901. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  902. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  903. dmae.src_addr_lo = U64_LO(pf_addr);
  904. dmae.src_addr_hi = U64_HI(pf_addr);
  905. dmae.dst_addr_lo = vf_addr_lo;
  906. dmae.dst_addr_hi = vf_addr_hi;
  907. }
  908. dmae.len = len32;
  909. /* issue the command and wait for completion */
  910. return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
  911. }
  912. static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
  913. struct bnx2x_virtf *vf)
  914. {
  915. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  916. u16 length, type;
  917. /* prepare response */
  918. type = mbx->first_tlv.tl.type;
  919. length = type == CHANNEL_TLV_ACQUIRE ?
  920. sizeof(struct pfvf_acquire_resp_tlv) :
  921. sizeof(struct pfvf_general_resp_tlv);
  922. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
  923. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  924. sizeof(struct channel_list_end_tlv));
  925. }
  926. static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
  927. struct bnx2x_virtf *vf,
  928. int vf_rc)
  929. {
  930. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  931. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  932. dma_addr_t pf_addr;
  933. u64 vf_addr;
  934. int rc;
  935. bnx2x_dp_tlv_list(bp, resp);
  936. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  937. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  938. resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
  939. /* send response */
  940. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  941. mbx->first_tlv.resp_msg_offset;
  942. pf_addr = mbx->msg_mapping +
  943. offsetof(struct bnx2x_vf_mbx_msg, resp);
  944. /* Copy the response buffer. The first u64 is written afterwards, as
  945. * the vf is sensitive to the header being written
  946. */
  947. vf_addr += sizeof(u64);
  948. pf_addr += sizeof(u64);
  949. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  950. U64_HI(vf_addr),
  951. U64_LO(vf_addr),
  952. (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
  953. if (rc) {
  954. BNX2X_ERR("Failed to copy response body to VF %d\n",
  955. vf->abs_vfid);
  956. goto mbx_error;
  957. }
  958. vf_addr -= sizeof(u64);
  959. pf_addr -= sizeof(u64);
  960. /* ack the FW */
  961. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  962. mmiowb();
  963. /* copy the response header including status-done field,
  964. * must be last dmae, must be after FW is acked
  965. */
  966. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  967. U64_HI(vf_addr),
  968. U64_LO(vf_addr),
  969. sizeof(u64)/4);
  970. /* unlock channel mutex */
  971. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  972. if (rc) {
  973. BNX2X_ERR("Failed to copy response status to VF %d\n",
  974. vf->abs_vfid);
  975. goto mbx_error;
  976. }
  977. return;
  978. mbx_error:
  979. bnx2x_vf_release(bp, vf);
  980. }
  981. static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
  982. struct bnx2x_virtf *vf,
  983. int rc)
  984. {
  985. bnx2x_vf_mbx_resp_single_tlv(bp, vf);
  986. bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
  987. }
  988. static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
  989. struct bnx2x_virtf *vf,
  990. void *buffer,
  991. u16 *offset)
  992. {
  993. struct vfpf_port_phys_id_resp_tlv *port_id;
  994. if (!(bp->flags & HAS_PHYS_PORT_ID))
  995. return;
  996. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
  997. sizeof(struct vfpf_port_phys_id_resp_tlv));
  998. port_id = (struct vfpf_port_phys_id_resp_tlv *)
  999. (((u8 *)buffer) + *offset);
  1000. memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
  1001. /* Offset should continue representing the offset to the tail
  1002. * of TLV data (outside this function scope)
  1003. */
  1004. *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
  1005. }
  1006. static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp,
  1007. struct bnx2x_virtf *vf,
  1008. void *buffer,
  1009. u16 *offset)
  1010. {
  1011. struct vfpf_fp_hsi_resp_tlv *fp_hsi;
  1012. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT,
  1013. sizeof(struct vfpf_fp_hsi_resp_tlv));
  1014. fp_hsi = (struct vfpf_fp_hsi_resp_tlv *)
  1015. (((u8 *)buffer) + *offset);
  1016. fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1;
  1017. /* Offset should continue representing the offset to the tail
  1018. * of TLV data (outside this function scope)
  1019. */
  1020. *offset += sizeof(struct vfpf_fp_hsi_resp_tlv);
  1021. }
  1022. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1023. struct bnx2x_vf_mbx *mbx, int vfop_status)
  1024. {
  1025. int i;
  1026. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  1027. struct pf_vf_resc *resc = &resp->resc;
  1028. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  1029. u16 length;
  1030. memset(resp, 0, sizeof(*resp));
  1031. /* fill in pfdev info */
  1032. resp->pfdev_info.chip_num = bp->common.chip_id;
  1033. resp->pfdev_info.db_size = bp->db_size;
  1034. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  1035. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  1036. PFVF_CAP_TPA |
  1037. PFVF_CAP_TPA_UPDATE |
  1038. PFVF_CAP_VLAN_FILTER);
  1039. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  1040. sizeof(resp->pfdev_info.fw_ver));
  1041. if (status == PFVF_STATUS_NO_RESOURCE ||
  1042. status == PFVF_STATUS_SUCCESS) {
  1043. /* set resources numbers, if status equals NO_RESOURCE these
  1044. * are max possible numbers
  1045. */
  1046. resc->num_rxqs = vf_rxq_count(vf) ? :
  1047. bnx2x_vf_max_queue_cnt(bp, vf);
  1048. resc->num_txqs = vf_txq_count(vf) ? :
  1049. bnx2x_vf_max_queue_cnt(bp, vf);
  1050. resc->num_sbs = vf_sb_count(vf);
  1051. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  1052. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  1053. resc->num_mc_filters = 0;
  1054. if (status == PFVF_STATUS_SUCCESS) {
  1055. /* fill in the allocated resources */
  1056. struct pf_vf_bulletin_content *bulletin =
  1057. BP_VF_BULLETIN(bp, vf->index);
  1058. for_each_vfq(vf, i)
  1059. resc->hw_qid[i] =
  1060. vfq_qzone_id(vf, vfq_get(vf, i));
  1061. for_each_vf_sb(vf, i) {
  1062. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  1063. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  1064. }
  1065. /* if a mac has been set for this vf, supply it */
  1066. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1067. memcpy(resc->current_mac_addr, bulletin->mac,
  1068. ETH_ALEN);
  1069. }
  1070. }
  1071. }
  1072. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  1073. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  1074. vf->abs_vfid,
  1075. resp->pfdev_info.chip_num,
  1076. resp->pfdev_info.db_size,
  1077. resp->pfdev_info.indices_per_sb,
  1078. resp->pfdev_info.pf_cap,
  1079. resc->num_rxqs,
  1080. resc->num_txqs,
  1081. resc->num_sbs,
  1082. resc->num_mac_filters,
  1083. resc->num_vlan_filters,
  1084. resc->num_mc_filters,
  1085. resp->pfdev_info.fw_ver);
  1086. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  1087. for (i = 0; i < vf_rxq_count(vf); i++)
  1088. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  1089. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  1090. for (i = 0; i < vf_sb_count(vf); i++)
  1091. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  1092. resc->hw_sbs[i].hw_sb_id,
  1093. resc->hw_sbs[i].sb_qid);
  1094. DP_CONT(BNX2X_MSG_IOV, "]\n");
  1095. /* prepare response */
  1096. length = sizeof(struct pfvf_acquire_resp_tlv);
  1097. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
  1098. /* Handle possible VF requests for physical port identifiers.
  1099. * 'length' should continue to indicate the offset of the first empty
  1100. * place in the buffer (i.e., where next TLV should be inserted)
  1101. */
  1102. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1103. CHANNEL_TLV_PHYS_PORT_ID))
  1104. bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
  1105. /* `New' vfs will want to know if fastpath HSI is supported, since
  1106. * if that's not the case they could print into system log the fact
  1107. * the driver version must be updated.
  1108. */
  1109. bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
  1110. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  1111. sizeof(struct channel_list_end_tlv));
  1112. /* send the response */
  1113. bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
  1114. }
  1115. static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
  1116. struct vfpf_acquire_tlv *acquire)
  1117. {
  1118. /* Windows driver does one of three things:
  1119. * 1. Old driver doesn't have bulletin board address set.
  1120. * 2. 'Middle' driver sends mc_num == 32.
  1121. * 3. New driver sets the OS field.
  1122. */
  1123. if (!acquire->bulletin_addr ||
  1124. acquire->resc_request.num_mc_filters == 32 ||
  1125. ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
  1126. VF_OS_WINDOWS))
  1127. return true;
  1128. return false;
  1129. }
  1130. static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
  1131. struct bnx2x_virtf *vf,
  1132. struct bnx2x_vf_mbx *mbx)
  1133. {
  1134. /* Linux drivers which correctly set the doorbell size also
  1135. * send a physical port request
  1136. */
  1137. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1138. CHANNEL_TLV_PHYS_PORT_ID))
  1139. return 0;
  1140. /* Issue does not exist in windows VMs */
  1141. if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
  1142. return 0;
  1143. return -EOPNOTSUPP;
  1144. }
  1145. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1146. struct bnx2x_vf_mbx *mbx)
  1147. {
  1148. int rc;
  1149. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  1150. /* log vfdef info */
  1151. DP(BNX2X_MSG_IOV,
  1152. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  1153. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  1154. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  1155. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  1156. acquire->resc_request.num_vlan_filters,
  1157. acquire->resc_request.num_mc_filters);
  1158. /* Prevent VFs with old drivers from loading, since they calculate
  1159. * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
  1160. * while being upgraded.
  1161. */
  1162. rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
  1163. if (rc) {
  1164. DP(BNX2X_MSG_IOV,
  1165. "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
  1166. vf->abs_vfid);
  1167. goto out;
  1168. }
  1169. /* Verify the VF fastpath HSI can be supported by the loaded FW.
  1170. * Linux vfs should be oblivious to changes between v0 and v2.
  1171. */
  1172. if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
  1173. vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver;
  1174. else
  1175. vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver,
  1176. ETH_FP_HSI_VER_2);
  1177. if (vf->fp_hsi > ETH_FP_HSI_VERSION) {
  1178. DP(BNX2X_MSG_IOV,
  1179. "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n",
  1180. vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver,
  1181. ETH_FP_HSI_VERSION);
  1182. rc = -EINVAL;
  1183. goto out;
  1184. }
  1185. /* acquire the resources */
  1186. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  1187. /* store address of vf's bulletin board */
  1188. vf->bulletin_map = acquire->bulletin_addr;
  1189. if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
  1190. DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
  1191. vf->abs_vfid);
  1192. vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
  1193. } else {
  1194. vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
  1195. }
  1196. if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
  1197. DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
  1198. vf->abs_vfid);
  1199. vf->cfg_flags |= VF_CFG_VLAN_FILTER;
  1200. } else {
  1201. vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
  1202. }
  1203. out:
  1204. /* response */
  1205. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  1206. }
  1207. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1208. struct bnx2x_vf_mbx *mbx)
  1209. {
  1210. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  1211. int rc;
  1212. /* record ghost addresses from vf message */
  1213. vf->fw_stat_map = init->stats_addr;
  1214. vf->stats_stride = init->stats_stride;
  1215. rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  1216. /* set VF multiqueue statistics collection mode */
  1217. if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
  1218. vf->cfg_flags |= VF_CFG_STATS_COALESCE;
  1219. /* Update VF's view of link state */
  1220. if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
  1221. bnx2x_iov_link_update_vf(bp, vf->index);
  1222. /* response */
  1223. bnx2x_vf_mbx_resp(bp, vf, rc);
  1224. }
  1225. /* convert MBX queue-flags to standard SP queue-flags */
  1226. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  1227. unsigned long *sp_q_flags)
  1228. {
  1229. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  1230. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  1231. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  1232. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  1233. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  1234. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  1235. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  1236. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  1237. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  1238. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  1239. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  1240. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  1241. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  1242. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  1243. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  1244. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  1245. if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
  1246. __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
  1247. /* outer vlan removal is set according to PF's multi function mode */
  1248. if (IS_MF_SD(bp))
  1249. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  1250. }
  1251. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1252. struct bnx2x_vf_mbx *mbx)
  1253. {
  1254. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  1255. struct bnx2x_vf_queue_construct_params qctor;
  1256. int rc = 0;
  1257. /* verify vf_qid */
  1258. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  1259. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  1260. setup_q->vf_qid, vf_rxq_count(vf));
  1261. rc = -EINVAL;
  1262. goto response;
  1263. }
  1264. /* tx queues must be setup alongside rx queues thus if the rx queue
  1265. * is not marked as valid there's nothing to do.
  1266. */
  1267. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  1268. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  1269. unsigned long q_type = 0;
  1270. struct bnx2x_queue_init_params *init_p;
  1271. struct bnx2x_queue_setup_params *setup_p;
  1272. if (bnx2x_vfq_is_leading(q))
  1273. bnx2x_leading_vfq_init(bp, vf, q);
  1274. /* re-init the VF operation context */
  1275. memset(&qctor, 0 ,
  1276. sizeof(struct bnx2x_vf_queue_construct_params));
  1277. setup_p = &qctor.prep_qsetup;
  1278. init_p = &qctor.qstate.params.init;
  1279. /* activate immediately */
  1280. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  1281. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  1282. struct bnx2x_txq_setup_params *txq_params =
  1283. &setup_p->txq_params;
  1284. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1285. /* save sb resource index */
  1286. q->sb_idx = setup_q->txq.vf_sb;
  1287. /* tx init */
  1288. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  1289. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  1290. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1291. &init_p->tx.flags);
  1292. /* tx setup - flags */
  1293. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1294. &setup_p->flags);
  1295. /* tx setup - general, nothing */
  1296. /* tx setup - tx */
  1297. txq_params->dscr_map = setup_q->txq.txq_addr;
  1298. txq_params->sb_cq_index = setup_q->txq.sb_index;
  1299. txq_params->traffic_type = setup_q->txq.traffic_type;
  1300. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  1301. q->index, q->sb_idx);
  1302. }
  1303. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  1304. struct bnx2x_rxq_setup_params *rxq_params =
  1305. &setup_p->rxq_params;
  1306. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1307. /* Note: there is no support for different SBs
  1308. * for TX and RX
  1309. */
  1310. q->sb_idx = setup_q->rxq.vf_sb;
  1311. /* rx init */
  1312. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  1313. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  1314. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1315. &init_p->rx.flags);
  1316. /* rx setup - flags */
  1317. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1318. &setup_p->flags);
  1319. /* rx setup - general */
  1320. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  1321. /* rx setup - rx */
  1322. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  1323. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  1324. rxq_params->sge_map = setup_q->rxq.sge_addr;
  1325. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  1326. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  1327. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  1328. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  1329. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  1330. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  1331. rxq_params->cache_line_log =
  1332. setup_q->rxq.cache_line_log;
  1333. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  1334. /* rx setup - multicast engine */
  1335. if (bnx2x_vfq_is_leading(q)) {
  1336. u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
  1337. rxq_params->mcast_engine_id = mcast_id;
  1338. __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
  1339. }
  1340. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  1341. q->index, q->sb_idx);
  1342. }
  1343. /* complete the preparations */
  1344. bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
  1345. rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
  1346. if (rc)
  1347. goto response;
  1348. }
  1349. response:
  1350. bnx2x_vf_mbx_resp(bp, vf, rc);
  1351. }
  1352. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  1353. struct bnx2x_virtf *vf,
  1354. struct vfpf_set_q_filters_tlv *tlv,
  1355. struct bnx2x_vf_mac_vlan_filters **pfl,
  1356. u32 type_flag)
  1357. {
  1358. int i, j;
  1359. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1360. size_t fsz;
  1361. fsz = tlv->n_mac_vlan_filters *
  1362. sizeof(struct bnx2x_vf_mac_vlan_filter) +
  1363. sizeof(struct bnx2x_vf_mac_vlan_filters);
  1364. fl = kzalloc(fsz, GFP_KERNEL);
  1365. if (!fl)
  1366. return -ENOMEM;
  1367. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1368. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1369. if ((msg_filter->flags & type_flag) != type_flag)
  1370. continue;
  1371. memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
  1372. if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
  1373. fl->filters[j].mac = msg_filter->mac;
  1374. fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
  1375. }
  1376. if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
  1377. fl->filters[j].vid = msg_filter->vlan_tag;
  1378. fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
  1379. }
  1380. fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
  1381. fl->count++;
  1382. j++;
  1383. }
  1384. if (!fl->count)
  1385. kfree(fl);
  1386. else
  1387. *pfl = fl;
  1388. return 0;
  1389. }
  1390. static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
  1391. u32 flags)
  1392. {
  1393. int i, cnt = 0;
  1394. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1395. if ((filters->filters[i].flags & flags) == flags)
  1396. cnt++;
  1397. return cnt;
  1398. }
  1399. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1400. struct vfpf_q_mac_vlan_filter *filter)
  1401. {
  1402. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1403. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1404. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1405. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1406. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1407. DP_CONT(msglvl, "\n");
  1408. }
  1409. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1410. struct vfpf_set_q_filters_tlv *filters)
  1411. {
  1412. int i;
  1413. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1414. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1415. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1416. &filters->filters[i]);
  1417. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1418. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1419. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1420. for (i = 0; i < filters->n_multicast; i++)
  1421. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1422. }
  1423. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1424. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1425. #define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
  1426. static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1427. {
  1428. int rc = 0;
  1429. struct vfpf_set_q_filters_tlv *msg =
  1430. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1431. /* check for any mac/vlan changes */
  1432. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1433. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1434. /* build vlan-mac list */
  1435. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1436. VFPF_VLAN_MAC_FILTER);
  1437. if (rc)
  1438. goto op_err;
  1439. if (fl) {
  1440. /* set vlan-mac list */
  1441. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1442. msg->vf_qid,
  1443. false);
  1444. if (rc)
  1445. goto op_err;
  1446. }
  1447. /* build mac list */
  1448. fl = NULL;
  1449. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1450. VFPF_MAC_FILTER);
  1451. if (rc)
  1452. goto op_err;
  1453. if (fl) {
  1454. /* set mac list */
  1455. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1456. msg->vf_qid,
  1457. false);
  1458. if (rc)
  1459. goto op_err;
  1460. }
  1461. /* build vlan list */
  1462. fl = NULL;
  1463. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1464. VFPF_VLAN_FILTER);
  1465. if (rc)
  1466. goto op_err;
  1467. if (fl) {
  1468. /* set vlan list */
  1469. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1470. msg->vf_qid,
  1471. false);
  1472. if (rc)
  1473. goto op_err;
  1474. }
  1475. }
  1476. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1477. unsigned long accept = 0;
  1478. struct pf_vf_bulletin_content *bulletin =
  1479. BP_VF_BULLETIN(bp, vf->index);
  1480. /* Ignore VF requested mode; instead set a regular mode */
  1481. if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
  1482. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1483. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1484. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1485. }
  1486. /* any_vlan is not configured if HV is forcing VLAN
  1487. * any_vlan is configured if
  1488. * 1. VF does not support vlan filtering
  1489. * OR
  1490. * 2. VF supports vlan filtering and explicitly requested it
  1491. */
  1492. if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
  1493. (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
  1494. msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
  1495. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1496. /* set rx-mode */
  1497. rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
  1498. if (rc)
  1499. goto op_err;
  1500. }
  1501. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1502. /* set mcasts */
  1503. rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
  1504. msg->n_multicast, false);
  1505. if (rc)
  1506. goto op_err;
  1507. }
  1508. op_err:
  1509. if (rc)
  1510. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1511. vf->abs_vfid, msg->vf_qid, rc);
  1512. return rc;
  1513. }
  1514. static int bnx2x_filters_validate_mac(struct bnx2x *bp,
  1515. struct bnx2x_virtf *vf,
  1516. struct vfpf_set_q_filters_tlv *filters)
  1517. {
  1518. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1519. int rc = 0;
  1520. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1521. * accept mac configurations of that mac. Why accept them at all?
  1522. * because PF may have been unable to configure the mac at the time
  1523. * since queue was not set up.
  1524. */
  1525. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1526. struct vfpf_q_mac_vlan_filter *filter = NULL;
  1527. int i;
  1528. for (i = 0; i < filters->n_mac_vlan_filters; i++) {
  1529. if (!(filters->filters[i].flags &
  1530. VFPF_Q_FILTER_DEST_MAC_VALID))
  1531. continue;
  1532. /* once a mac was set by ndo can only accept
  1533. * a single mac...
  1534. */
  1535. if (filter) {
  1536. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
  1537. vf->abs_vfid,
  1538. filters->n_mac_vlan_filters);
  1539. rc = -EPERM;
  1540. goto response;
  1541. }
  1542. filter = &filters->filters[i];
  1543. }
  1544. /* ...and only the mac set by the ndo */
  1545. if (filter &&
  1546. !ether_addr_equal(filter->mac, bulletin->mac)) {
  1547. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1548. vf->abs_vfid);
  1549. rc = -EPERM;
  1550. goto response;
  1551. }
  1552. }
  1553. response:
  1554. return rc;
  1555. }
  1556. static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
  1557. struct bnx2x_virtf *vf,
  1558. struct vfpf_set_q_filters_tlv *filters)
  1559. {
  1560. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1561. int rc = 0;
  1562. /* if vlan was set by hypervisor we don't allow guest to config vlan */
  1563. if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
  1564. /* search for vlan filters */
  1565. if (bnx2x_vf_filters_contain(filters,
  1566. VFPF_Q_FILTER_VLAN_TAG_VALID)) {
  1567. BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
  1568. vf->abs_vfid);
  1569. rc = -EPERM;
  1570. goto response;
  1571. }
  1572. }
  1573. /* verify vf_qid */
  1574. if (filters->vf_qid > vf_rxq_count(vf)) {
  1575. rc = -EPERM;
  1576. goto response;
  1577. }
  1578. response:
  1579. return rc;
  1580. }
  1581. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1582. struct bnx2x_virtf *vf,
  1583. struct bnx2x_vf_mbx *mbx)
  1584. {
  1585. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1586. int rc;
  1587. rc = bnx2x_filters_validate_mac(bp, vf, filters);
  1588. if (rc)
  1589. goto response;
  1590. rc = bnx2x_filters_validate_vlan(bp, vf, filters);
  1591. if (rc)
  1592. goto response;
  1593. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1594. vf->abs_vfid,
  1595. filters->vf_qid);
  1596. /* print q_filter message */
  1597. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1598. rc = bnx2x_vf_mbx_qfilters(bp, vf);
  1599. response:
  1600. bnx2x_vf_mbx_resp(bp, vf, rc);
  1601. }
  1602. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1603. struct bnx2x_vf_mbx *mbx)
  1604. {
  1605. int qid = mbx->msg->req.q_op.vf_qid;
  1606. int rc;
  1607. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1608. vf->abs_vfid, qid);
  1609. rc = bnx2x_vf_queue_teardown(bp, vf, qid);
  1610. bnx2x_vf_mbx_resp(bp, vf, rc);
  1611. }
  1612. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1613. struct bnx2x_vf_mbx *mbx)
  1614. {
  1615. int rc;
  1616. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1617. rc = bnx2x_vf_close(bp, vf);
  1618. bnx2x_vf_mbx_resp(bp, vf, rc);
  1619. }
  1620. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1621. struct bnx2x_vf_mbx *mbx)
  1622. {
  1623. int rc;
  1624. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1625. rc = bnx2x_vf_free(bp, vf);
  1626. bnx2x_vf_mbx_resp(bp, vf, rc);
  1627. }
  1628. static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1629. struct bnx2x_vf_mbx *mbx)
  1630. {
  1631. struct bnx2x_config_rss_params rss;
  1632. struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
  1633. int rc = 0;
  1634. if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
  1635. rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
  1636. BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
  1637. vf->index);
  1638. rc = -EINVAL;
  1639. goto mbx_resp;
  1640. }
  1641. memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
  1642. /* set vfop params according to rss tlv */
  1643. memcpy(rss.ind_table, rss_tlv->ind_table,
  1644. T_ETH_INDIRECTION_TABLE_SIZE);
  1645. memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
  1646. rss.rss_obj = &vf->rss_conf_obj;
  1647. rss.rss_result_mask = rss_tlv->rss_result_mask;
  1648. /* flags handled individually for backward/forward compatibility */
  1649. rss.rss_flags = 0;
  1650. rss.ramrod_flags = 0;
  1651. if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  1652. __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
  1653. if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
  1654. __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
  1655. if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
  1656. __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
  1657. if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
  1658. __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
  1659. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
  1660. __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
  1661. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
  1662. __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
  1663. if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
  1664. __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
  1665. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
  1666. __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
  1667. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
  1668. __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
  1669. if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
  1670. rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
  1671. (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
  1672. rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
  1673. BNX2X_ERR("about to hit a FW assert. aborting...\n");
  1674. rc = -EINVAL;
  1675. goto mbx_resp;
  1676. }
  1677. rc = bnx2x_vf_rss_update(bp, vf, &rss);
  1678. mbx_resp:
  1679. bnx2x_vf_mbx_resp(bp, vf, rc);
  1680. }
  1681. static int bnx2x_validate_tpa_params(struct bnx2x *bp,
  1682. struct vfpf_tpa_tlv *tpa_tlv)
  1683. {
  1684. int rc = 0;
  1685. if (tpa_tlv->tpa_client_info.max_sges_for_packet >
  1686. U_ETH_MAX_SGES_FOR_PACKET) {
  1687. rc = -EINVAL;
  1688. BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
  1689. tpa_tlv->tpa_client_info.max_sges_for_packet,
  1690. U_ETH_MAX_SGES_FOR_PACKET);
  1691. }
  1692. if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
  1693. rc = -EINVAL;
  1694. BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
  1695. tpa_tlv->tpa_client_info.max_tpa_queues,
  1696. MAX_AGG_QS(bp));
  1697. }
  1698. return rc;
  1699. }
  1700. static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1701. struct bnx2x_vf_mbx *mbx)
  1702. {
  1703. struct bnx2x_queue_update_tpa_params vf_op_params;
  1704. struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
  1705. int rc = 0;
  1706. memset(&vf_op_params, 0, sizeof(vf_op_params));
  1707. if (bnx2x_validate_tpa_params(bp, tpa_tlv))
  1708. goto mbx_resp;
  1709. vf_op_params.complete_on_both_clients =
  1710. tpa_tlv->tpa_client_info.complete_on_both_clients;
  1711. vf_op_params.dont_verify_thr =
  1712. tpa_tlv->tpa_client_info.dont_verify_thr;
  1713. vf_op_params.max_agg_sz =
  1714. tpa_tlv->tpa_client_info.max_agg_size;
  1715. vf_op_params.max_sges_pkt =
  1716. tpa_tlv->tpa_client_info.max_sges_for_packet;
  1717. vf_op_params.max_tpa_queues =
  1718. tpa_tlv->tpa_client_info.max_tpa_queues;
  1719. vf_op_params.sge_buff_sz =
  1720. tpa_tlv->tpa_client_info.sge_buff_size;
  1721. vf_op_params.sge_pause_thr_high =
  1722. tpa_tlv->tpa_client_info.sge_pause_thr_high;
  1723. vf_op_params.sge_pause_thr_low =
  1724. tpa_tlv->tpa_client_info.sge_pause_thr_low;
  1725. vf_op_params.tpa_mode =
  1726. tpa_tlv->tpa_client_info.tpa_mode;
  1727. vf_op_params.update_ipv4 =
  1728. tpa_tlv->tpa_client_info.update_ipv4;
  1729. vf_op_params.update_ipv6 =
  1730. tpa_tlv->tpa_client_info.update_ipv6;
  1731. rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
  1732. mbx_resp:
  1733. bnx2x_vf_mbx_resp(bp, vf, rc);
  1734. }
  1735. /* dispatch request */
  1736. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1737. struct bnx2x_vf_mbx *mbx)
  1738. {
  1739. int i;
  1740. /* check if tlv type is known */
  1741. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1742. /* Lock the per vf op mutex and note the locker's identity.
  1743. * The unlock will take place in mbx response.
  1744. */
  1745. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1746. /* switch on the opcode */
  1747. switch (mbx->first_tlv.tl.type) {
  1748. case CHANNEL_TLV_ACQUIRE:
  1749. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1750. return;
  1751. case CHANNEL_TLV_INIT:
  1752. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1753. return;
  1754. case CHANNEL_TLV_SETUP_Q:
  1755. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1756. return;
  1757. case CHANNEL_TLV_SET_Q_FILTERS:
  1758. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1759. return;
  1760. case CHANNEL_TLV_TEARDOWN_Q:
  1761. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1762. return;
  1763. case CHANNEL_TLV_CLOSE:
  1764. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1765. return;
  1766. case CHANNEL_TLV_RELEASE:
  1767. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1768. return;
  1769. case CHANNEL_TLV_UPDATE_RSS:
  1770. bnx2x_vf_mbx_update_rss(bp, vf, mbx);
  1771. return;
  1772. case CHANNEL_TLV_UPDATE_TPA:
  1773. bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
  1774. return;
  1775. }
  1776. } else {
  1777. /* unknown TLV - this may belong to a VF driver from the future
  1778. * - a version written after this PF driver was written, which
  1779. * supports features unknown as of yet. Too bad since we don't
  1780. * support them. Or this may be because someone wrote a crappy
  1781. * VF driver and is sending garbage over the channel.
  1782. */
  1783. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1784. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1785. vf->state);
  1786. for (i = 0; i < 20; i++)
  1787. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1788. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1789. }
  1790. /* can we respond to VF (do we have an address for it?) */
  1791. if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
  1792. /* notify the VF that we do not support this request */
  1793. bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
  1794. } else {
  1795. /* can't send a response since this VF is unknown to us
  1796. * just ack the FW to release the mailbox and unlock
  1797. * the channel.
  1798. */
  1799. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1800. /* Firmware ack should be written before unlocking channel */
  1801. mmiowb();
  1802. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1803. }
  1804. }
  1805. void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
  1806. struct vf_pf_event_data *vfpf_event)
  1807. {
  1808. u8 vf_idx;
  1809. DP(BNX2X_MSG_IOV,
  1810. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1811. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1812. /* Sanity checks consider removing later */
  1813. /* check if the vf_id is valid */
  1814. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1815. BNX2X_NR_VIRTFN(bp)) {
  1816. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1817. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1818. return;
  1819. }
  1820. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1821. /* Update VFDB with current message and schedule its handling */
  1822. mutex_lock(&BP_VFDB(bp)->event_mutex);
  1823. BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
  1824. le32_to_cpu(vfpf_event->msg_addr_hi);
  1825. BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
  1826. le32_to_cpu(vfpf_event->msg_addr_lo);
  1827. BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
  1828. mutex_unlock(&BP_VFDB(bp)->event_mutex);
  1829. bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
  1830. }
  1831. /* handle new vf-pf messages */
  1832. void bnx2x_vf_mbx(struct bnx2x *bp)
  1833. {
  1834. struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
  1835. u64 events;
  1836. u8 vf_idx;
  1837. int rc;
  1838. if (!vfdb)
  1839. return;
  1840. mutex_lock(&vfdb->event_mutex);
  1841. events = vfdb->event_occur;
  1842. vfdb->event_occur = 0;
  1843. mutex_unlock(&vfdb->event_mutex);
  1844. for_each_vf(bp, vf_idx) {
  1845. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
  1846. struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
  1847. /* Handle VFs which have pending events */
  1848. if (!(events & (1ULL << vf_idx)))
  1849. continue;
  1850. DP(BNX2X_MSG_IOV,
  1851. "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
  1852. vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
  1853. mbx->first_tlv.resp_msg_offset);
  1854. /* dmae to get the VF request */
  1855. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
  1856. vf->abs_vfid, mbx->vf_addr_hi,
  1857. mbx->vf_addr_lo,
  1858. sizeof(union vfpf_tlvs)/4);
  1859. if (rc) {
  1860. BNX2X_ERR("Failed to copy request VF %d\n",
  1861. vf->abs_vfid);
  1862. bnx2x_vf_release(bp, vf);
  1863. return;
  1864. }
  1865. /* process the VF message header */
  1866. mbx->first_tlv = mbx->msg->req.first_tlv;
  1867. /* Clean response buffer to refrain from falsely
  1868. * seeing chains.
  1869. */
  1870. memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
  1871. /* dispatch the request (will prepare the response) */
  1872. bnx2x_vf_mbx_request(bp, vf, mbx);
  1873. }
  1874. }
  1875. void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
  1876. bool support_long)
  1877. {
  1878. /* Older VFs contain a bug where they can't check CRC for bulletin
  1879. * boards of length greater than legacy size.
  1880. */
  1881. bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
  1882. BULLETIN_CONTENT_LEGACY_SIZE;
  1883. bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
  1884. }
  1885. /* propagate local bulletin board to vf */
  1886. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1887. {
  1888. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1889. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1890. vf * BULLETIN_CONTENT_SIZE;
  1891. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1892. int rc;
  1893. /* can only update vf after init took place */
  1894. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1895. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1896. return 0;
  1897. /* increment bulletin board version and compute crc */
  1898. bulletin->version++;
  1899. bnx2x_vf_bulletin_finalize(bulletin,
  1900. (bnx2x_vf(bp, vf, cfg_flags) &
  1901. VF_CFG_EXT_BULLETIN) ? true : false);
  1902. /* propagate bulletin board via dmae to vm memory */
  1903. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1904. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1905. U64_LO(vf_addr), bulletin->length / 4);
  1906. return rc;
  1907. }