bnx2x_vfpf.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  16. * Written by: Shmulik Ravid <shmulikr@broadcom.com>
  17. * Ariel Elior <ariele@broadcom.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_cmn.h"
  21. #include <linux/crc32.h>
  22. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
  23. /* place a given tlv on the tlv buffer at a given offset */
  24. static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
  25. u16 offset, u16 type, u16 length)
  26. {
  27. struct channel_tlv *tl =
  28. (struct channel_tlv *)(tlvs_list + offset);
  29. tl->type = type;
  30. tl->length = length;
  31. }
  32. /* Clear the mailbox and init the header of the first tlv */
  33. static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  34. u16 type, u16 length)
  35. {
  36. mutex_lock(&bp->vf2pf_mutex);
  37. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  38. type);
  39. /* Clear mailbox */
  40. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  41. /* init type and length */
  42. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  43. /* init first tlv header */
  44. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  45. }
  46. /* releases the mailbox */
  47. static void bnx2x_vfpf_finalize(struct bnx2x *bp,
  48. struct vfpf_first_tlv *first_tlv)
  49. {
  50. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  51. first_tlv->tl.type);
  52. mutex_unlock(&bp->vf2pf_mutex);
  53. }
  54. /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
  55. static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
  56. enum channel_tlvs req_tlv)
  57. {
  58. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  59. do {
  60. if (tlv->type == req_tlv)
  61. return tlv;
  62. if (!tlv->length) {
  63. BNX2X_ERR("Found TLV with length 0\n");
  64. return NULL;
  65. }
  66. tlvs_list += tlv->length;
  67. tlv = (struct channel_tlv *)tlvs_list;
  68. } while (tlv->type != CHANNEL_TLV_LIST_END);
  69. DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
  70. return NULL;
  71. }
  72. /* list the types and lengths of the tlvs on the buffer */
  73. static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  74. {
  75. int i = 1;
  76. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  77. while (tlv->type != CHANNEL_TLV_LIST_END) {
  78. /* output tlv */
  79. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  80. tlv->type, tlv->length);
  81. /* advance to next tlv */
  82. tlvs_list += tlv->length;
  83. /* cast general tlv list pointer to channel tlv header*/
  84. tlv = (struct channel_tlv *)tlvs_list;
  85. i++;
  86. /* break condition for this loop */
  87. if (i > MAX_TLVS_IN_LIST) {
  88. WARN(true, "corrupt tlvs");
  89. return;
  90. }
  91. }
  92. /* output last tlv */
  93. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  94. tlv->type, tlv->length);
  95. }
  96. /* test whether we support a tlv type */
  97. bool bnx2x_tlv_supported(u16 tlvtype)
  98. {
  99. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  100. }
  101. static inline int bnx2x_pfvf_status_codes(int rc)
  102. {
  103. switch (rc) {
  104. case 0:
  105. return PFVF_STATUS_SUCCESS;
  106. case -ENOMEM:
  107. return PFVF_STATUS_NO_RESOURCE;
  108. default:
  109. return PFVF_STATUS_FAILURE;
  110. }
  111. }
  112. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  113. {
  114. struct cstorm_vf_zone_data __iomem *zone_data =
  115. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  116. int tout = 100, interval = 100; /* wait for 10 seconds */
  117. if (*done) {
  118. BNX2X_ERR("done was non zero before message to pf was sent\n");
  119. WARN_ON(true);
  120. return -EINVAL;
  121. }
  122. /* if PF indicated channel is down avoid sending message. Return success
  123. * so calling flow can continue
  124. */
  125. bnx2x_sample_bulletin(bp);
  126. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  127. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  128. *done = PFVF_STATUS_SUCCESS;
  129. return -EINVAL;
  130. }
  131. /* Write message address */
  132. writel(U64_LO(msg_mapping),
  133. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  134. writel(U64_HI(msg_mapping),
  135. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  136. /* make sure the address is written before FW accesses it */
  137. wmb();
  138. /* Trigger the PF FW */
  139. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  140. /* Wait for PF to complete */
  141. while ((tout >= 0) && (!*done)) {
  142. msleep(interval);
  143. tout -= 1;
  144. /* progress indicator - HV can take its own sweet time in
  145. * answering VFs...
  146. */
  147. DP_CONT(BNX2X_MSG_IOV, ".");
  148. }
  149. if (!*done) {
  150. BNX2X_ERR("PF response has timed out\n");
  151. return -EAGAIN;
  152. }
  153. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  154. return 0;
  155. }
  156. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  157. {
  158. u32 me_reg;
  159. int tout = 10, interval = 100; /* Wait for 1 sec */
  160. do {
  161. /* pxp traps vf read of doorbells and returns me reg value */
  162. me_reg = readl(bp->doorbells);
  163. if (GOOD_ME_REG(me_reg))
  164. break;
  165. msleep(interval);
  166. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  167. me_reg);
  168. } while (tout-- > 0);
  169. if (!GOOD_ME_REG(me_reg)) {
  170. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  171. return -EINVAL;
  172. }
  173. DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  174. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  175. return 0;
  176. }
  177. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  178. {
  179. int rc = 0, attempts = 0;
  180. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  181. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  182. struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
  183. u32 vf_id;
  184. bool resources_acquired = false;
  185. /* clear mailbox and prep first tlv */
  186. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  187. if (bnx2x_get_vf_id(bp, &vf_id)) {
  188. rc = -EAGAIN;
  189. goto out;
  190. }
  191. req->vfdev_info.vf_id = vf_id;
  192. req->vfdev_info.vf_os = 0;
  193. req->resc_request.num_rxqs = rx_count;
  194. req->resc_request.num_txqs = tx_count;
  195. req->resc_request.num_sbs = bp->igu_sb_cnt;
  196. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  197. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  198. /* pf 2 vf bulletin board address */
  199. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  200. /* Request physical port identifier */
  201. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
  202. CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
  203. /* add list termination tlv */
  204. bnx2x_add_tlv(bp, req,
  205. req->first_tlv.tl.length + sizeof(struct channel_tlv),
  206. CHANNEL_TLV_LIST_END,
  207. sizeof(struct channel_list_end_tlv));
  208. /* output tlvs list */
  209. bnx2x_dp_tlv_list(bp, req);
  210. while (!resources_acquired) {
  211. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  212. /* send acquire request */
  213. rc = bnx2x_send_msg2pf(bp,
  214. &resp->hdr.status,
  215. bp->vf2pf_mbox_mapping);
  216. /* PF timeout */
  217. if (rc)
  218. goto out;
  219. /* copy acquire response from buffer to bp */
  220. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  221. attempts++;
  222. /* test whether the PF accepted our request. If not, humble
  223. * the request and try again.
  224. */
  225. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  226. DP(BNX2X_MSG_SP, "resources acquired\n");
  227. resources_acquired = true;
  228. } else if (bp->acquire_resp.hdr.status ==
  229. PFVF_STATUS_NO_RESOURCE &&
  230. attempts < VF_ACQUIRE_THRESH) {
  231. DP(BNX2X_MSG_SP,
  232. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  233. /* humble our request */
  234. req->resc_request.num_txqs =
  235. min(req->resc_request.num_txqs,
  236. bp->acquire_resp.resc.num_txqs);
  237. req->resc_request.num_rxqs =
  238. min(req->resc_request.num_rxqs,
  239. bp->acquire_resp.resc.num_rxqs);
  240. req->resc_request.num_sbs =
  241. min(req->resc_request.num_sbs,
  242. bp->acquire_resp.resc.num_sbs);
  243. req->resc_request.num_mac_filters =
  244. min(req->resc_request.num_mac_filters,
  245. bp->acquire_resp.resc.num_mac_filters);
  246. req->resc_request.num_vlan_filters =
  247. min(req->resc_request.num_vlan_filters,
  248. bp->acquire_resp.resc.num_vlan_filters);
  249. req->resc_request.num_mc_filters =
  250. min(req->resc_request.num_mc_filters,
  251. bp->acquire_resp.resc.num_mc_filters);
  252. /* Clear response buffer */
  253. memset(&bp->vf2pf_mbox->resp, 0,
  254. sizeof(union pfvf_tlvs));
  255. } else {
  256. /* PF reports error */
  257. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  258. bp->acquire_resp.hdr.status);
  259. rc = -EAGAIN;
  260. goto out;
  261. }
  262. }
  263. /* Retrieve physical port id (if possible) */
  264. phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
  265. bnx2x_search_tlv_list(bp, resp,
  266. CHANNEL_TLV_PHYS_PORT_ID);
  267. if (phys_port_resp) {
  268. memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
  269. bp->flags |= HAS_PHYS_PORT_ID;
  270. }
  271. /* get HW info */
  272. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  273. bp->link_params.chip_id = bp->common.chip_id;
  274. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  275. bp->common.int_block = INT_BLOCK_IGU;
  276. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  277. bp->igu_dsb_id = -1;
  278. bp->mf_ov = 0;
  279. bp->mf_mode = 0;
  280. bp->common.flash_size = 0;
  281. bp->flags |=
  282. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  283. bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
  284. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  285. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  286. sizeof(bp->fw_ver));
  287. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  288. memcpy(bp->dev->dev_addr,
  289. bp->acquire_resp.resc.current_mac_addr,
  290. ETH_ALEN);
  291. out:
  292. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  293. return rc;
  294. }
  295. int bnx2x_vfpf_release(struct bnx2x *bp)
  296. {
  297. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  298. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  299. u32 rc, vf_id;
  300. /* clear mailbox and prep first tlv */
  301. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  302. if (bnx2x_get_vf_id(bp, &vf_id)) {
  303. rc = -EAGAIN;
  304. goto out;
  305. }
  306. req->vf_id = vf_id;
  307. /* add list termination tlv */
  308. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  309. sizeof(struct channel_list_end_tlv));
  310. /* output tlvs list */
  311. bnx2x_dp_tlv_list(bp, req);
  312. /* send release request */
  313. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  314. if (rc)
  315. /* PF timeout */
  316. goto out;
  317. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  318. /* PF released us */
  319. DP(BNX2X_MSG_SP, "vf released\n");
  320. } else {
  321. /* PF reports error */
  322. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  323. resp->hdr.status);
  324. rc = -EAGAIN;
  325. goto out;
  326. }
  327. out:
  328. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  329. return rc;
  330. }
  331. /* Tell PF about SB addresses */
  332. int bnx2x_vfpf_init(struct bnx2x *bp)
  333. {
  334. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  335. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  336. int rc, i;
  337. /* clear mailbox and prep first tlv */
  338. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  339. /* status blocks */
  340. for_each_eth_queue(bp, i)
  341. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  342. status_blk_mapping);
  343. /* statistics - requests only supports single queue for now */
  344. req->stats_addr = bp->fw_stats_data_mapping +
  345. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  346. req->stats_stride = sizeof(struct per_queue_stats);
  347. /* add list termination tlv */
  348. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  349. sizeof(struct channel_list_end_tlv));
  350. /* output tlvs list */
  351. bnx2x_dp_tlv_list(bp, req);
  352. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  353. if (rc)
  354. goto out;
  355. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  356. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  357. resp->hdr.status);
  358. rc = -EAGAIN;
  359. goto out;
  360. }
  361. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  362. out:
  363. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  364. return rc;
  365. }
  366. /* CLOSE VF - opposite to INIT_VF */
  367. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  368. {
  369. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  370. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  371. int i, rc;
  372. u32 vf_id;
  373. /* If we haven't got a valid VF id, there is no sense to
  374. * continue with sending messages
  375. */
  376. if (bnx2x_get_vf_id(bp, &vf_id))
  377. goto free_irq;
  378. /* Close the queues */
  379. for_each_queue(bp, i)
  380. bnx2x_vfpf_teardown_queue(bp, i);
  381. /* remove mac */
  382. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  383. /* clear mailbox and prep first tlv */
  384. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  385. req->vf_id = vf_id;
  386. /* add list termination tlv */
  387. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  388. sizeof(struct channel_list_end_tlv));
  389. /* output tlvs list */
  390. bnx2x_dp_tlv_list(bp, req);
  391. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  392. if (rc)
  393. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  394. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  395. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  396. resp->hdr.status);
  397. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  398. free_irq:
  399. /* Disable HW interrupts, NAPI */
  400. bnx2x_netif_stop(bp, 0);
  401. /* Delete all NAPI objects */
  402. bnx2x_del_all_napi(bp);
  403. /* Release IRQs */
  404. bnx2x_free_irq(bp);
  405. }
  406. static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  407. struct bnx2x_vf_queue *q)
  408. {
  409. u8 cl_id = vfq_cl_id(vf, q);
  410. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  411. /* mac */
  412. bnx2x_init_mac_obj(bp, &q->mac_obj,
  413. cl_id, q->cid, func_id,
  414. bnx2x_vf_sp(bp, vf, mac_rdata),
  415. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  416. BNX2X_FILTER_MAC_PENDING,
  417. &vf->filter_state,
  418. BNX2X_OBJ_TYPE_RX_TX,
  419. &bp->macs_pool);
  420. /* vlan */
  421. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  422. cl_id, q->cid, func_id,
  423. bnx2x_vf_sp(bp, vf, vlan_rdata),
  424. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  425. BNX2X_FILTER_VLAN_PENDING,
  426. &vf->filter_state,
  427. BNX2X_OBJ_TYPE_RX_TX,
  428. &bp->vlans_pool);
  429. /* mcast */
  430. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  431. q->cid, func_id, func_id,
  432. bnx2x_vf_sp(bp, vf, mcast_rdata),
  433. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  434. BNX2X_FILTER_MCAST_PENDING,
  435. &vf->filter_state,
  436. BNX2X_OBJ_TYPE_RX_TX);
  437. /* rss */
  438. bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
  439. func_id, func_id,
  440. bnx2x_vf_sp(bp, vf, rss_rdata),
  441. bnx2x_vf_sp_map(bp, vf, rss_rdata),
  442. BNX2X_FILTER_RSS_CONF_PENDING,
  443. &vf->filter_state,
  444. BNX2X_OBJ_TYPE_RX_TX);
  445. vf->leading_rss = cl_id;
  446. q->is_leading = true;
  447. }
  448. /* ask the pf to open a queue for the vf */
  449. int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  450. bool is_leading)
  451. {
  452. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  453. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  454. u8 fp_idx = fp->index;
  455. u16 tpa_agg_size = 0, flags = 0;
  456. int rc;
  457. /* clear mailbox and prep first tlv */
  458. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  459. /* select tpa mode to request */
  460. if (!fp->disable_tpa) {
  461. flags |= VFPF_QUEUE_FLG_TPA;
  462. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  463. if (fp->mode == TPA_MODE_GRO)
  464. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  465. tpa_agg_size = TPA_AGG_SIZE;
  466. }
  467. if (is_leading)
  468. flags |= VFPF_QUEUE_FLG_LEADING_RSS;
  469. /* calculate queue flags */
  470. flags |= VFPF_QUEUE_FLG_STATS;
  471. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  472. flags |= VFPF_QUEUE_FLG_VLAN;
  473. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  474. /* Common */
  475. req->vf_qid = fp_idx;
  476. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  477. /* Rx */
  478. req->rxq.rcq_addr = fp->rx_comp_mapping;
  479. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  480. req->rxq.rxq_addr = fp->rx_desc_mapping;
  481. req->rxq.sge_addr = fp->rx_sge_mapping;
  482. req->rxq.vf_sb = fp_idx;
  483. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  484. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  485. req->rxq.mtu = bp->dev->mtu;
  486. req->rxq.buf_sz = fp->rx_buf_size;
  487. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  488. req->rxq.tpa_agg_sz = tpa_agg_size;
  489. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  490. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  491. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  492. req->rxq.flags = flags;
  493. req->rxq.drop_flags = 0;
  494. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  495. req->rxq.stat_id = -1; /* No stats at the moment */
  496. /* Tx */
  497. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  498. req->txq.vf_sb = fp_idx;
  499. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  500. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  501. req->txq.flags = flags;
  502. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  503. /* add list termination tlv */
  504. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  505. sizeof(struct channel_list_end_tlv));
  506. /* output tlvs list */
  507. bnx2x_dp_tlv_list(bp, req);
  508. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  509. if (rc)
  510. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  511. fp_idx);
  512. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  513. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  514. fp_idx, resp->hdr.status);
  515. rc = -EINVAL;
  516. }
  517. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  518. return rc;
  519. }
  520. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  521. {
  522. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  523. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  524. int rc;
  525. /* clear mailbox and prep first tlv */
  526. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  527. sizeof(*req));
  528. req->vf_qid = qidx;
  529. /* add list termination tlv */
  530. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  531. sizeof(struct channel_list_end_tlv));
  532. /* output tlvs list */
  533. bnx2x_dp_tlv_list(bp, req);
  534. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  535. if (rc) {
  536. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  537. rc);
  538. goto out;
  539. }
  540. /* PF failed the transaction */
  541. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  542. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  543. resp->hdr.status);
  544. rc = -EINVAL;
  545. }
  546. out:
  547. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  548. return rc;
  549. }
  550. /* request pf to add a mac for the vf */
  551. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  552. {
  553. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  554. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  555. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  556. int rc = 0;
  557. /* clear mailbox and prep first tlv */
  558. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  559. sizeof(*req));
  560. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  561. req->vf_qid = vf_qid;
  562. req->n_mac_vlan_filters = 1;
  563. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  564. if (set)
  565. req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
  566. /* sample bulletin board for new mac */
  567. bnx2x_sample_bulletin(bp);
  568. /* copy mac from device to request */
  569. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  570. /* add list termination tlv */
  571. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  572. sizeof(struct channel_list_end_tlv));
  573. /* output tlvs list */
  574. bnx2x_dp_tlv_list(bp, req);
  575. /* send message to pf */
  576. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  577. if (rc) {
  578. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  579. goto out;
  580. }
  581. /* failure may mean PF was configured with a new mac for us */
  582. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  583. DP(BNX2X_MSG_IOV,
  584. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  585. /* copy mac from bulletin to device */
  586. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  587. /* check if bulletin board was updated */
  588. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  589. /* copy mac from device to request */
  590. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  591. ETH_ALEN);
  592. /* send message to pf */
  593. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  594. bp->vf2pf_mbox_mapping);
  595. } else {
  596. /* no new info in bulletin */
  597. break;
  598. }
  599. }
  600. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  601. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  602. rc = -EINVAL;
  603. }
  604. out:
  605. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  606. return 0;
  607. }
  608. /* request pf to config rss table for vf queues*/
  609. int bnx2x_vfpf_config_rss(struct bnx2x *bp,
  610. struct bnx2x_config_rss_params *params)
  611. {
  612. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  613. struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
  614. int rc = 0;
  615. /* clear mailbox and prep first tlv */
  616. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
  617. sizeof(*req));
  618. /* add list termination tlv */
  619. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  620. sizeof(struct channel_list_end_tlv));
  621. memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  622. memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
  623. req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
  624. req->rss_key_size = T_ETH_RSS_KEY;
  625. req->rss_result_mask = params->rss_result_mask;
  626. /* flags handled individually for backward/forward compatability */
  627. if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
  628. req->rss_flags |= VFPF_RSS_MODE_DISABLED;
  629. if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
  630. req->rss_flags |= VFPF_RSS_MODE_REGULAR;
  631. if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
  632. req->rss_flags |= VFPF_RSS_SET_SRCH;
  633. if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
  634. req->rss_flags |= VFPF_RSS_IPV4;
  635. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
  636. req->rss_flags |= VFPF_RSS_IPV4_TCP;
  637. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
  638. req->rss_flags |= VFPF_RSS_IPV4_UDP;
  639. if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
  640. req->rss_flags |= VFPF_RSS_IPV6;
  641. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
  642. req->rss_flags |= VFPF_RSS_IPV6_TCP;
  643. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
  644. req->rss_flags |= VFPF_RSS_IPV6_UDP;
  645. DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
  646. /* output tlvs list */
  647. bnx2x_dp_tlv_list(bp, req);
  648. /* send message to pf */
  649. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  650. if (rc) {
  651. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  652. goto out;
  653. }
  654. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  655. /* Since older drivers don't support this feature (and VF has
  656. * no way of knowing other than failing this), don't propagate
  657. * an error in this case.
  658. */
  659. DP(BNX2X_MSG_IOV,
  660. "Failed to send rss message to PF over VF-PF channel [%d]\n",
  661. resp->hdr.status);
  662. }
  663. out:
  664. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  665. return rc;
  666. }
  667. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  668. {
  669. struct bnx2x *bp = netdev_priv(dev);
  670. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  671. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  672. int rc, i = 0;
  673. struct netdev_hw_addr *ha;
  674. if (bp->state != BNX2X_STATE_OPEN) {
  675. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  676. return -EINVAL;
  677. }
  678. /* clear mailbox and prep first tlv */
  679. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  680. sizeof(*req));
  681. /* Get Rx mode requested */
  682. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  683. netdev_for_each_mc_addr(ha, dev) {
  684. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  685. bnx2x_mc_addr(ha));
  686. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  687. i++;
  688. }
  689. /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
  690. * addresses tops
  691. */
  692. if (i >= PFVF_MAX_MULTICAST_PER_VF) {
  693. DP(NETIF_MSG_IFUP,
  694. "VF supports not more than %d multicast MAC addresses\n",
  695. PFVF_MAX_MULTICAST_PER_VF);
  696. return -EINVAL;
  697. }
  698. req->n_multicast = i;
  699. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  700. req->vf_qid = 0;
  701. /* add list termination tlv */
  702. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  703. sizeof(struct channel_list_end_tlv));
  704. /* output tlvs list */
  705. bnx2x_dp_tlv_list(bp, req);
  706. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  707. if (rc) {
  708. BNX2X_ERR("Sending a message failed: %d\n", rc);
  709. goto out;
  710. }
  711. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  712. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  713. resp->hdr.status);
  714. rc = -EINVAL;
  715. }
  716. out:
  717. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  718. return 0;
  719. }
  720. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  721. {
  722. int mode = bp->rx_mode;
  723. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  724. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  725. int rc;
  726. /* clear mailbox and prep first tlv */
  727. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  728. sizeof(*req));
  729. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  730. switch (mode) {
  731. case BNX2X_RX_MODE_NONE: /* no Rx */
  732. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  733. break;
  734. case BNX2X_RX_MODE_NORMAL:
  735. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  736. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  737. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  738. break;
  739. case BNX2X_RX_MODE_ALLMULTI:
  740. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  741. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  742. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  743. break;
  744. case BNX2X_RX_MODE_PROMISC:
  745. req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
  746. req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
  747. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  748. break;
  749. default:
  750. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  751. rc = -EINVAL;
  752. goto out;
  753. }
  754. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  755. req->vf_qid = 0;
  756. /* add list termination tlv */
  757. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  758. sizeof(struct channel_list_end_tlv));
  759. /* output tlvs list */
  760. bnx2x_dp_tlv_list(bp, req);
  761. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  762. if (rc)
  763. BNX2X_ERR("Sending a message failed: %d\n", rc);
  764. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  765. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  766. rc = -EINVAL;
  767. }
  768. out:
  769. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  770. return rc;
  771. }
  772. /* General service functions */
  773. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  774. {
  775. u32 addr = BAR_CSTRORM_INTMEM +
  776. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  777. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  778. }
  779. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  780. {
  781. u32 addr = BAR_CSTRORM_INTMEM +
  782. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  783. REG_WR8(bp, addr, 1);
  784. }
  785. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  786. {
  787. int i;
  788. for_each_vf(bp, i)
  789. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  790. }
  791. /* enable vf_pf mailbox (aka vf-pf-channel) */
  792. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  793. {
  794. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  795. /* enable the mailbox in the FW */
  796. storm_memset_vf_mbx_ack(bp, abs_vfid);
  797. storm_memset_vf_mbx_valid(bp, abs_vfid);
  798. /* enable the VF access to the mailbox */
  799. bnx2x_vf_enable_access(bp, abs_vfid);
  800. }
  801. /* this works only on !E1h */
  802. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  803. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  804. u32 vf_addr_lo, u32 len32)
  805. {
  806. struct dmae_command dmae;
  807. if (CHIP_IS_E1x(bp)) {
  808. BNX2X_ERR("Chip revision does not support VFs\n");
  809. return DMAE_NOT_RDY;
  810. }
  811. if (!bp->dmae_ready) {
  812. BNX2X_ERR("DMAE is not ready, can not copy\n");
  813. return DMAE_NOT_RDY;
  814. }
  815. /* set opcode and fixed command fields */
  816. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  817. if (from_vf) {
  818. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  819. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  820. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  821. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  822. dmae.src_addr_lo = vf_addr_lo;
  823. dmae.src_addr_hi = vf_addr_hi;
  824. dmae.dst_addr_lo = U64_LO(pf_addr);
  825. dmae.dst_addr_hi = U64_HI(pf_addr);
  826. } else {
  827. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  828. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  829. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  830. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  831. dmae.src_addr_lo = U64_LO(pf_addr);
  832. dmae.src_addr_hi = U64_HI(pf_addr);
  833. dmae.dst_addr_lo = vf_addr_lo;
  834. dmae.dst_addr_hi = vf_addr_hi;
  835. }
  836. dmae.len = len32;
  837. /* issue the command and wait for completion */
  838. return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
  839. }
  840. static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
  841. struct bnx2x_virtf *vf)
  842. {
  843. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  844. u16 length, type;
  845. /* prepare response */
  846. type = mbx->first_tlv.tl.type;
  847. length = type == CHANNEL_TLV_ACQUIRE ?
  848. sizeof(struct pfvf_acquire_resp_tlv) :
  849. sizeof(struct pfvf_general_resp_tlv);
  850. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
  851. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  852. sizeof(struct channel_list_end_tlv));
  853. }
  854. static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
  855. struct bnx2x_virtf *vf)
  856. {
  857. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  858. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  859. dma_addr_t pf_addr;
  860. u64 vf_addr;
  861. int rc;
  862. bnx2x_dp_tlv_list(bp, resp);
  863. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  864. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  865. resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
  866. /* send response */
  867. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  868. mbx->first_tlv.resp_msg_offset;
  869. pf_addr = mbx->msg_mapping +
  870. offsetof(struct bnx2x_vf_mbx_msg, resp);
  871. /* Copy the response buffer. The first u64 is written afterwards, as
  872. * the vf is sensitive to the header being written
  873. */
  874. vf_addr += sizeof(u64);
  875. pf_addr += sizeof(u64);
  876. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  877. U64_HI(vf_addr),
  878. U64_LO(vf_addr),
  879. (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
  880. if (rc) {
  881. BNX2X_ERR("Failed to copy response body to VF %d\n",
  882. vf->abs_vfid);
  883. goto mbx_error;
  884. }
  885. vf_addr -= sizeof(u64);
  886. pf_addr -= sizeof(u64);
  887. /* ack the FW */
  888. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  889. mmiowb();
  890. /* initiate dmae to send the response */
  891. mbx->flags &= ~VF_MSG_INPROCESS;
  892. /* copy the response header including status-done field,
  893. * must be last dmae, must be after FW is acked
  894. */
  895. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  896. U64_HI(vf_addr),
  897. U64_LO(vf_addr),
  898. sizeof(u64)/4);
  899. /* unlock channel mutex */
  900. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  901. if (rc) {
  902. BNX2X_ERR("Failed to copy response status to VF %d\n",
  903. vf->abs_vfid);
  904. goto mbx_error;
  905. }
  906. return;
  907. mbx_error:
  908. bnx2x_vf_release(bp, vf, false); /* non blocking */
  909. }
  910. static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
  911. struct bnx2x_virtf *vf)
  912. {
  913. bnx2x_vf_mbx_resp_single_tlv(bp, vf);
  914. bnx2x_vf_mbx_resp_send_msg(bp, vf);
  915. }
  916. static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
  917. struct bnx2x_virtf *vf,
  918. void *buffer,
  919. u16 *offset)
  920. {
  921. struct vfpf_port_phys_id_resp_tlv *port_id;
  922. if (!(bp->flags & HAS_PHYS_PORT_ID))
  923. return;
  924. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
  925. sizeof(struct vfpf_port_phys_id_resp_tlv));
  926. port_id = (struct vfpf_port_phys_id_resp_tlv *)
  927. (((u8 *)buffer) + *offset);
  928. memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
  929. /* Offset should continue representing the offset to the tail
  930. * of TLV data (outside this function scope)
  931. */
  932. *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
  933. }
  934. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  935. struct bnx2x_vf_mbx *mbx, int vfop_status)
  936. {
  937. int i;
  938. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  939. struct pf_vf_resc *resc = &resp->resc;
  940. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  941. u16 length;
  942. memset(resp, 0, sizeof(*resp));
  943. /* fill in pfdev info */
  944. resp->pfdev_info.chip_num = bp->common.chip_id;
  945. resp->pfdev_info.db_size = bp->db_size;
  946. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  947. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  948. /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
  949. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  950. sizeof(resp->pfdev_info.fw_ver));
  951. if (status == PFVF_STATUS_NO_RESOURCE ||
  952. status == PFVF_STATUS_SUCCESS) {
  953. /* set resources numbers, if status equals NO_RESOURCE these
  954. * are max possible numbers
  955. */
  956. resc->num_rxqs = vf_rxq_count(vf) ? :
  957. bnx2x_vf_max_queue_cnt(bp, vf);
  958. resc->num_txqs = vf_txq_count(vf) ? :
  959. bnx2x_vf_max_queue_cnt(bp, vf);
  960. resc->num_sbs = vf_sb_count(vf);
  961. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  962. resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
  963. resc->num_mc_filters = 0;
  964. if (status == PFVF_STATUS_SUCCESS) {
  965. /* fill in the allocated resources */
  966. struct pf_vf_bulletin_content *bulletin =
  967. BP_VF_BULLETIN(bp, vf->index);
  968. for_each_vfq(vf, i)
  969. resc->hw_qid[i] =
  970. vfq_qzone_id(vf, vfq_get(vf, i));
  971. for_each_vf_sb(vf, i) {
  972. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  973. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  974. }
  975. /* if a mac has been set for this vf, supply it */
  976. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  977. memcpy(resc->current_mac_addr, bulletin->mac,
  978. ETH_ALEN);
  979. }
  980. }
  981. }
  982. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  983. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  984. vf->abs_vfid,
  985. resp->pfdev_info.chip_num,
  986. resp->pfdev_info.db_size,
  987. resp->pfdev_info.indices_per_sb,
  988. resp->pfdev_info.pf_cap,
  989. resc->num_rxqs,
  990. resc->num_txqs,
  991. resc->num_sbs,
  992. resc->num_mac_filters,
  993. resc->num_vlan_filters,
  994. resc->num_mc_filters,
  995. resp->pfdev_info.fw_ver);
  996. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  997. for (i = 0; i < vf_rxq_count(vf); i++)
  998. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  999. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  1000. for (i = 0; i < vf_sb_count(vf); i++)
  1001. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  1002. resc->hw_sbs[i].hw_sb_id,
  1003. resc->hw_sbs[i].sb_qid);
  1004. DP_CONT(BNX2X_MSG_IOV, "]\n");
  1005. /* prepare response */
  1006. length = sizeof(struct pfvf_acquire_resp_tlv);
  1007. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
  1008. /* Handle possible VF requests for physical port identifiers.
  1009. * 'length' should continue to indicate the offset of the first empty
  1010. * place in the buffer (i.e., where next TLV should be inserted)
  1011. */
  1012. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1013. CHANNEL_TLV_PHYS_PORT_ID))
  1014. bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
  1015. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  1016. sizeof(struct channel_list_end_tlv));
  1017. /* send the response */
  1018. vf->op_rc = vfop_status;
  1019. bnx2x_vf_mbx_resp_send_msg(bp, vf);
  1020. }
  1021. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1022. struct bnx2x_vf_mbx *mbx)
  1023. {
  1024. int rc;
  1025. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  1026. /* log vfdef info */
  1027. DP(BNX2X_MSG_IOV,
  1028. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  1029. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  1030. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  1031. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  1032. acquire->resc_request.num_vlan_filters,
  1033. acquire->resc_request.num_mc_filters);
  1034. /* acquire the resources */
  1035. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  1036. /* store address of vf's bulletin board */
  1037. vf->bulletin_map = acquire->bulletin_addr;
  1038. /* response */
  1039. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  1040. }
  1041. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1042. struct bnx2x_vf_mbx *mbx)
  1043. {
  1044. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  1045. /* record ghost addresses from vf message */
  1046. vf->spq_map = init->spq_addr;
  1047. vf->fw_stat_map = init->stats_addr;
  1048. vf->stats_stride = init->stats_stride;
  1049. vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  1050. /* set VF multiqueue statistics collection mode */
  1051. if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
  1052. vf->cfg_flags |= VF_CFG_STATS_COALESCE;
  1053. /* response */
  1054. bnx2x_vf_mbx_resp(bp, vf);
  1055. }
  1056. /* convert MBX queue-flags to standard SP queue-flags */
  1057. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  1058. unsigned long *sp_q_flags)
  1059. {
  1060. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  1061. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  1062. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  1063. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  1064. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  1065. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  1066. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  1067. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  1068. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  1069. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  1070. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  1071. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  1072. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  1073. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  1074. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  1075. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  1076. if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
  1077. __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
  1078. /* outer vlan removal is set according to PF's multi function mode */
  1079. if (IS_MF_SD(bp))
  1080. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  1081. }
  1082. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1083. struct bnx2x_vf_mbx *mbx)
  1084. {
  1085. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  1086. struct bnx2x_vfop_cmd cmd = {
  1087. .done = bnx2x_vf_mbx_resp,
  1088. .block = false,
  1089. };
  1090. /* verify vf_qid */
  1091. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  1092. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  1093. setup_q->vf_qid, vf_rxq_count(vf));
  1094. vf->op_rc = -EINVAL;
  1095. goto response;
  1096. }
  1097. /* tx queues must be setup alongside rx queues thus if the rx queue
  1098. * is not marked as valid there's nothing to do.
  1099. */
  1100. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  1101. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  1102. unsigned long q_type = 0;
  1103. struct bnx2x_queue_init_params *init_p;
  1104. struct bnx2x_queue_setup_params *setup_p;
  1105. if (bnx2x_vfq_is_leading(q))
  1106. bnx2x_leading_vfq_init(bp, vf, q);
  1107. /* re-init the VF operation context */
  1108. memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
  1109. setup_p = &vf->op_params.qctor.prep_qsetup;
  1110. init_p = &vf->op_params.qctor.qstate.params.init;
  1111. /* activate immediately */
  1112. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  1113. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  1114. struct bnx2x_txq_setup_params *txq_params =
  1115. &setup_p->txq_params;
  1116. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1117. /* save sb resource index */
  1118. q->sb_idx = setup_q->txq.vf_sb;
  1119. /* tx init */
  1120. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  1121. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  1122. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1123. &init_p->tx.flags);
  1124. /* tx setup - flags */
  1125. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1126. &setup_p->flags);
  1127. /* tx setup - general, nothing */
  1128. /* tx setup - tx */
  1129. txq_params->dscr_map = setup_q->txq.txq_addr;
  1130. txq_params->sb_cq_index = setup_q->txq.sb_index;
  1131. txq_params->traffic_type = setup_q->txq.traffic_type;
  1132. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  1133. q->index, q->sb_idx);
  1134. }
  1135. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  1136. struct bnx2x_rxq_setup_params *rxq_params =
  1137. &setup_p->rxq_params;
  1138. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1139. /* Note: there is no support for different SBs
  1140. * for TX and RX
  1141. */
  1142. q->sb_idx = setup_q->rxq.vf_sb;
  1143. /* rx init */
  1144. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  1145. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  1146. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1147. &init_p->rx.flags);
  1148. /* rx setup - flags */
  1149. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1150. &setup_p->flags);
  1151. /* rx setup - general */
  1152. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  1153. /* rx setup - rx */
  1154. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  1155. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  1156. rxq_params->sge_map = setup_q->rxq.sge_addr;
  1157. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  1158. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  1159. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  1160. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  1161. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  1162. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  1163. rxq_params->cache_line_log =
  1164. setup_q->rxq.cache_line_log;
  1165. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  1166. /* rx setup - multicast engine */
  1167. if (bnx2x_vfq_is_leading(q)) {
  1168. u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
  1169. rxq_params->mcast_engine_id = mcast_id;
  1170. __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
  1171. }
  1172. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  1173. q->index, q->sb_idx);
  1174. }
  1175. /* complete the preparations */
  1176. bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
  1177. vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
  1178. if (vf->op_rc)
  1179. goto response;
  1180. return;
  1181. }
  1182. response:
  1183. bnx2x_vf_mbx_resp(bp, vf);
  1184. }
  1185. enum bnx2x_vfop_filters_state {
  1186. BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  1187. BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
  1188. BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
  1189. BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
  1190. BNX2X_VFOP_MBX_Q_FILTERS_DONE
  1191. };
  1192. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  1193. struct bnx2x_virtf *vf,
  1194. struct vfpf_set_q_filters_tlv *tlv,
  1195. struct bnx2x_vfop_filters **pfl,
  1196. u32 type_flag)
  1197. {
  1198. int i, j;
  1199. struct bnx2x_vfop_filters *fl = NULL;
  1200. size_t fsz;
  1201. fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
  1202. sizeof(struct bnx2x_vfop_filters);
  1203. fl = kzalloc(fsz, GFP_KERNEL);
  1204. if (!fl)
  1205. return -ENOMEM;
  1206. INIT_LIST_HEAD(&fl->head);
  1207. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1208. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1209. if ((msg_filter->flags & type_flag) != type_flag)
  1210. continue;
  1211. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  1212. fl->filters[j].mac = msg_filter->mac;
  1213. fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
  1214. } else {
  1215. fl->filters[j].vid = msg_filter->vlan_tag;
  1216. fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
  1217. }
  1218. fl->filters[j].add =
  1219. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  1220. true : false;
  1221. list_add_tail(&fl->filters[j++].link, &fl->head);
  1222. }
  1223. if (list_empty(&fl->head))
  1224. kfree(fl);
  1225. else
  1226. *pfl = fl;
  1227. return 0;
  1228. }
  1229. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1230. struct vfpf_q_mac_vlan_filter *filter)
  1231. {
  1232. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1233. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1234. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1235. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1236. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1237. DP_CONT(msglvl, "\n");
  1238. }
  1239. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1240. struct vfpf_set_q_filters_tlv *filters)
  1241. {
  1242. int i;
  1243. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1244. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1245. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1246. &filters->filters[i]);
  1247. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1248. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1249. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1250. for (i = 0; i < filters->n_multicast; i++)
  1251. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1252. }
  1253. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1254. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1255. static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1256. {
  1257. int rc;
  1258. struct vfpf_set_q_filters_tlv *msg =
  1259. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1260. struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
  1261. enum bnx2x_vfop_filters_state state = vfop->state;
  1262. struct bnx2x_vfop_cmd cmd = {
  1263. .done = bnx2x_vfop_mbx_qfilters,
  1264. .block = false,
  1265. };
  1266. DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
  1267. if (vfop->rc < 0)
  1268. goto op_err;
  1269. switch (state) {
  1270. case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
  1271. /* next state */
  1272. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
  1273. /* check for any vlan/mac changes */
  1274. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1275. /* build mac list */
  1276. struct bnx2x_vfop_filters *fl = NULL;
  1277. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1278. VFPF_MAC_FILTER);
  1279. if (vfop->rc)
  1280. goto op_err;
  1281. if (fl) {
  1282. /* set mac list */
  1283. rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
  1284. msg->vf_qid,
  1285. false);
  1286. if (rc) {
  1287. vfop->rc = rc;
  1288. goto op_err;
  1289. }
  1290. return;
  1291. }
  1292. }
  1293. /* fall through */
  1294. case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
  1295. /* next state */
  1296. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
  1297. /* check for any vlan/mac changes */
  1298. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1299. /* build vlan list */
  1300. struct bnx2x_vfop_filters *fl = NULL;
  1301. vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1302. VFPF_VLAN_FILTER);
  1303. if (vfop->rc)
  1304. goto op_err;
  1305. if (fl) {
  1306. /* set vlan list */
  1307. rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
  1308. msg->vf_qid,
  1309. false);
  1310. if (rc) {
  1311. vfop->rc = rc;
  1312. goto op_err;
  1313. }
  1314. return;
  1315. }
  1316. }
  1317. /* fall through */
  1318. case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
  1319. /* next state */
  1320. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
  1321. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1322. unsigned long accept = 0;
  1323. struct pf_vf_bulletin_content *bulletin =
  1324. BP_VF_BULLETIN(bp, vf->index);
  1325. /* covert VF-PF if mask to bnx2x accept flags */
  1326. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
  1327. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1328. if (msg->rx_mask &
  1329. VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
  1330. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1331. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
  1332. __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
  1333. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
  1334. __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
  1335. if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
  1336. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1337. /* A packet arriving the vf's mac should be accepted
  1338. * with any vlan, unless a vlan has already been
  1339. * configured.
  1340. */
  1341. if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
  1342. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1343. /* set rx-mode */
  1344. rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
  1345. msg->vf_qid, accept);
  1346. if (rc) {
  1347. vfop->rc = rc;
  1348. goto op_err;
  1349. }
  1350. return;
  1351. }
  1352. /* fall through */
  1353. case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
  1354. /* next state */
  1355. vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
  1356. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1357. /* set mcasts */
  1358. rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
  1359. msg->n_multicast, false);
  1360. if (rc) {
  1361. vfop->rc = rc;
  1362. goto op_err;
  1363. }
  1364. return;
  1365. }
  1366. /* fall through */
  1367. op_done:
  1368. case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
  1369. bnx2x_vfop_end(bp, vf, vfop);
  1370. return;
  1371. op_err:
  1372. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1373. vf->abs_vfid, msg->vf_qid, vfop->rc);
  1374. goto op_done;
  1375. default:
  1376. bnx2x_vfop_default(state);
  1377. }
  1378. }
  1379. static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
  1380. struct bnx2x_virtf *vf,
  1381. struct bnx2x_vfop_cmd *cmd)
  1382. {
  1383. struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
  1384. if (vfop) {
  1385. bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
  1386. bnx2x_vfop_mbx_qfilters, cmd->done);
  1387. return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
  1388. cmd->block);
  1389. }
  1390. return -ENOMEM;
  1391. }
  1392. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1393. struct bnx2x_virtf *vf,
  1394. struct bnx2x_vf_mbx *mbx)
  1395. {
  1396. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1397. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1398. struct bnx2x_vfop_cmd cmd = {
  1399. .done = bnx2x_vf_mbx_resp,
  1400. .block = false,
  1401. };
  1402. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1403. * accept mac configurations of that mac. Why accept them at all?
  1404. * because PF may have been unable to configure the mac at the time
  1405. * since queue was not set up.
  1406. */
  1407. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1408. /* once a mac was set by ndo can only accept a single mac... */
  1409. if (filters->n_mac_vlan_filters > 1) {
  1410. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
  1411. vf->abs_vfid);
  1412. vf->op_rc = -EPERM;
  1413. goto response;
  1414. }
  1415. /* ...and only the mac set by the ndo */
  1416. if (filters->n_mac_vlan_filters == 1 &&
  1417. !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
  1418. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1419. vf->abs_vfid);
  1420. vf->op_rc = -EPERM;
  1421. goto response;
  1422. }
  1423. }
  1424. /* if vlan was set by hypervisor we don't allow guest to config vlan */
  1425. if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
  1426. int i;
  1427. /* search for vlan filters */
  1428. for (i = 0; i < filters->n_mac_vlan_filters; i++) {
  1429. if (filters->filters[i].flags &
  1430. VFPF_Q_FILTER_VLAN_TAG_VALID) {
  1431. BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
  1432. vf->abs_vfid);
  1433. vf->op_rc = -EPERM;
  1434. goto response;
  1435. }
  1436. }
  1437. }
  1438. /* verify vf_qid */
  1439. if (filters->vf_qid > vf_rxq_count(vf))
  1440. goto response;
  1441. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1442. vf->abs_vfid,
  1443. filters->vf_qid);
  1444. /* print q_filter message */
  1445. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1446. vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
  1447. if (vf->op_rc)
  1448. goto response;
  1449. return;
  1450. response:
  1451. bnx2x_vf_mbx_resp(bp, vf);
  1452. }
  1453. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1454. struct bnx2x_vf_mbx *mbx)
  1455. {
  1456. int qid = mbx->msg->req.q_op.vf_qid;
  1457. struct bnx2x_vfop_cmd cmd = {
  1458. .done = bnx2x_vf_mbx_resp,
  1459. .block = false,
  1460. };
  1461. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1462. vf->abs_vfid, qid);
  1463. vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
  1464. if (vf->op_rc)
  1465. bnx2x_vf_mbx_resp(bp, vf);
  1466. }
  1467. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1468. struct bnx2x_vf_mbx *mbx)
  1469. {
  1470. struct bnx2x_vfop_cmd cmd = {
  1471. .done = bnx2x_vf_mbx_resp,
  1472. .block = false,
  1473. };
  1474. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1475. vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
  1476. if (vf->op_rc)
  1477. bnx2x_vf_mbx_resp(bp, vf);
  1478. }
  1479. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1480. struct bnx2x_vf_mbx *mbx)
  1481. {
  1482. struct bnx2x_vfop_cmd cmd = {
  1483. .done = bnx2x_vf_mbx_resp,
  1484. .block = false,
  1485. };
  1486. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1487. vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
  1488. if (vf->op_rc)
  1489. bnx2x_vf_mbx_resp(bp, vf);
  1490. }
  1491. static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1492. struct bnx2x_vf_mbx *mbx)
  1493. {
  1494. struct bnx2x_vfop_cmd cmd = {
  1495. .done = bnx2x_vf_mbx_resp,
  1496. .block = false,
  1497. };
  1498. struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
  1499. struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
  1500. if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
  1501. rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
  1502. BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
  1503. vf->index);
  1504. vf->op_rc = -EINVAL;
  1505. goto mbx_resp;
  1506. }
  1507. /* set vfop params according to rss tlv */
  1508. memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
  1509. T_ETH_INDIRECTION_TABLE_SIZE);
  1510. memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
  1511. sizeof(rss_tlv->rss_key));
  1512. vf_op_params->rss_obj = &vf->rss_conf_obj;
  1513. vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
  1514. /* flags handled individually for backward/forward compatability */
  1515. vf_op_params->rss_flags = 0;
  1516. vf_op_params->ramrod_flags = 0;
  1517. if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  1518. __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
  1519. if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
  1520. __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
  1521. if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
  1522. __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
  1523. if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
  1524. __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
  1525. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
  1526. __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
  1527. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
  1528. __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
  1529. if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
  1530. __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
  1531. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
  1532. __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
  1533. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
  1534. __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
  1535. if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
  1536. rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
  1537. (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
  1538. rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
  1539. BNX2X_ERR("about to hit a FW assert. aborting...\n");
  1540. vf->op_rc = -EINVAL;
  1541. goto mbx_resp;
  1542. }
  1543. vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
  1544. mbx_resp:
  1545. if (vf->op_rc)
  1546. bnx2x_vf_mbx_resp(bp, vf);
  1547. }
  1548. /* dispatch request */
  1549. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1550. struct bnx2x_vf_mbx *mbx)
  1551. {
  1552. int i;
  1553. /* check if tlv type is known */
  1554. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1555. /* Lock the per vf op mutex and note the locker's identity.
  1556. * The unlock will take place in mbx response.
  1557. */
  1558. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1559. /* switch on the opcode */
  1560. switch (mbx->first_tlv.tl.type) {
  1561. case CHANNEL_TLV_ACQUIRE:
  1562. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1563. return;
  1564. case CHANNEL_TLV_INIT:
  1565. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1566. return;
  1567. case CHANNEL_TLV_SETUP_Q:
  1568. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1569. return;
  1570. case CHANNEL_TLV_SET_Q_FILTERS:
  1571. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1572. return;
  1573. case CHANNEL_TLV_TEARDOWN_Q:
  1574. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1575. return;
  1576. case CHANNEL_TLV_CLOSE:
  1577. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1578. return;
  1579. case CHANNEL_TLV_RELEASE:
  1580. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1581. return;
  1582. case CHANNEL_TLV_UPDATE_RSS:
  1583. bnx2x_vf_mbx_update_rss(bp, vf, mbx);
  1584. return;
  1585. }
  1586. } else {
  1587. /* unknown TLV - this may belong to a VF driver from the future
  1588. * - a version written after this PF driver was written, which
  1589. * supports features unknown as of yet. Too bad since we don't
  1590. * support them. Or this may be because someone wrote a crappy
  1591. * VF driver and is sending garbage over the channel.
  1592. */
  1593. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1594. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1595. vf->state);
  1596. for (i = 0; i < 20; i++)
  1597. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1598. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1599. }
  1600. /* can we respond to VF (do we have an address for it?) */
  1601. if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
  1602. /* mbx_resp uses the op_rc of the VF */
  1603. vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
  1604. /* notify the VF that we do not support this request */
  1605. bnx2x_vf_mbx_resp(bp, vf);
  1606. } else {
  1607. /* can't send a response since this VF is unknown to us
  1608. * just ack the FW to release the mailbox and unlock
  1609. * the channel.
  1610. */
  1611. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1612. /* Firmware ack should be written before unlocking channel */
  1613. mmiowb();
  1614. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1615. }
  1616. }
  1617. /* handle new vf-pf message */
  1618. void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
  1619. {
  1620. struct bnx2x_virtf *vf;
  1621. struct bnx2x_vf_mbx *mbx;
  1622. u8 vf_idx;
  1623. int rc;
  1624. DP(BNX2X_MSG_IOV,
  1625. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1626. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1627. /* Sanity checks consider removing later */
  1628. /* check if the vf_id is valid */
  1629. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1630. BNX2X_NR_VIRTFN(bp)) {
  1631. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1632. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1633. goto mbx_done;
  1634. }
  1635. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1636. mbx = BP_VF_MBX(bp, vf_idx);
  1637. /* verify an event is not currently being processed -
  1638. * debug failsafe only
  1639. */
  1640. if (mbx->flags & VF_MSG_INPROCESS) {
  1641. BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
  1642. vfpf_event->vf_id);
  1643. goto mbx_done;
  1644. }
  1645. vf = BP_VF(bp, vf_idx);
  1646. /* save the VF message address */
  1647. mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
  1648. mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
  1649. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  1650. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  1651. /* dmae to get the VF request */
  1652. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
  1653. mbx->vf_addr_hi, mbx->vf_addr_lo,
  1654. sizeof(union vfpf_tlvs)/4);
  1655. if (rc) {
  1656. BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
  1657. goto mbx_error;
  1658. }
  1659. /* process the VF message header */
  1660. mbx->first_tlv = mbx->msg->req.first_tlv;
  1661. /* Clean response buffer to refrain from falsely seeing chains */
  1662. memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
  1663. /* dispatch the request (will prepare the response) */
  1664. bnx2x_vf_mbx_request(bp, vf, mbx);
  1665. goto mbx_done;
  1666. mbx_error:
  1667. bnx2x_vf_release(bp, vf, false); /* non blocking */
  1668. mbx_done:
  1669. return;
  1670. }
  1671. /* propagate local bulletin board to vf */
  1672. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1673. {
  1674. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1675. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1676. vf * BULLETIN_CONTENT_SIZE;
  1677. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1678. int rc;
  1679. /* can only update vf after init took place */
  1680. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1681. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1682. return 0;
  1683. /* increment bulletin board version and compute crc */
  1684. bulletin->version++;
  1685. bulletin->length = BULLETIN_CONTENT_SIZE;
  1686. bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
  1687. /* propagate bulletin board via dmae to vm memory */
  1688. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1689. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1690. U64_LO(vf_addr), bulletin->length / 4);
  1691. return rc;
  1692. }