bnx2x_vfpf.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034
  1. /* bnx2x_vfpf.c: Broadcom Everest network driver.
  2. *
  3. * Copyright 2009-2013 Broadcom Corporation
  4. *
  5. * Unless you and Broadcom execute a separate written software license
  6. * agreement governing use of this software, this software is licensed to you
  7. * under the terms of the GNU General Public License version 2, available
  8. * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  9. *
  10. * Notwithstanding the above, under no circumstances may you combine this
  11. * software in any way with any other Broadcom software provided under a
  12. * license other than the GPL, without Broadcom's express prior written
  13. * consent.
  14. *
  15. * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  16. * Written by: Shmulik Ravid
  17. * Ariel Elior <ariel.elior@qlogic.com>
  18. */
  19. #include "bnx2x.h"
  20. #include "bnx2x_cmn.h"
  21. #include <linux/crc32.h>
  22. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
  23. /* place a given tlv on the tlv buffer at a given offset */
  24. static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
  25. u16 offset, u16 type, u16 length)
  26. {
  27. struct channel_tlv *tl =
  28. (struct channel_tlv *)(tlvs_list + offset);
  29. tl->type = type;
  30. tl->length = length;
  31. }
  32. /* Clear the mailbox and init the header of the first tlv */
  33. static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
  34. u16 type, u16 length)
  35. {
  36. mutex_lock(&bp->vf2pf_mutex);
  37. DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
  38. type);
  39. /* Clear mailbox */
  40. memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
  41. /* init type and length */
  42. bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
  43. /* init first tlv header */
  44. first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
  45. }
  46. /* releases the mailbox */
  47. static void bnx2x_vfpf_finalize(struct bnx2x *bp,
  48. struct vfpf_first_tlv *first_tlv)
  49. {
  50. DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
  51. first_tlv->tl.type);
  52. mutex_unlock(&bp->vf2pf_mutex);
  53. }
  54. /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
  55. static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
  56. enum channel_tlvs req_tlv)
  57. {
  58. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  59. do {
  60. if (tlv->type == req_tlv)
  61. return tlv;
  62. if (!tlv->length) {
  63. BNX2X_ERR("Found TLV with length 0\n");
  64. return NULL;
  65. }
  66. tlvs_list += tlv->length;
  67. tlv = (struct channel_tlv *)tlvs_list;
  68. } while (tlv->type != CHANNEL_TLV_LIST_END);
  69. DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
  70. return NULL;
  71. }
  72. /* list the types and lengths of the tlvs on the buffer */
  73. static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
  74. {
  75. int i = 1;
  76. struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
  77. while (tlv->type != CHANNEL_TLV_LIST_END) {
  78. /* output tlv */
  79. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  80. tlv->type, tlv->length);
  81. /* advance to next tlv */
  82. tlvs_list += tlv->length;
  83. /* cast general tlv list pointer to channel tlv header*/
  84. tlv = (struct channel_tlv *)tlvs_list;
  85. i++;
  86. /* break condition for this loop */
  87. if (i > MAX_TLVS_IN_LIST) {
  88. WARN(true, "corrupt tlvs");
  89. return;
  90. }
  91. }
  92. /* output last tlv */
  93. DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
  94. tlv->type, tlv->length);
  95. }
  96. /* test whether we support a tlv type */
  97. bool bnx2x_tlv_supported(u16 tlvtype)
  98. {
  99. return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
  100. }
  101. static inline int bnx2x_pfvf_status_codes(int rc)
  102. {
  103. switch (rc) {
  104. case 0:
  105. return PFVF_STATUS_SUCCESS;
  106. case -ENOMEM:
  107. return PFVF_STATUS_NO_RESOURCE;
  108. default:
  109. return PFVF_STATUS_FAILURE;
  110. }
  111. }
  112. static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
  113. {
  114. struct cstorm_vf_zone_data __iomem *zone_data =
  115. REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
  116. int tout = 100, interval = 100; /* wait for 10 seconds */
  117. if (*done) {
  118. BNX2X_ERR("done was non zero before message to pf was sent\n");
  119. WARN_ON(true);
  120. return -EINVAL;
  121. }
  122. /* if PF indicated channel is down avoid sending message. Return success
  123. * so calling flow can continue
  124. */
  125. bnx2x_sample_bulletin(bp);
  126. if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
  127. DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
  128. *done = PFVF_STATUS_SUCCESS;
  129. return -EINVAL;
  130. }
  131. /* Write message address */
  132. writel(U64_LO(msg_mapping),
  133. &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
  134. writel(U64_HI(msg_mapping),
  135. &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
  136. /* make sure the address is written before FW accesses it */
  137. wmb();
  138. /* Trigger the PF FW */
  139. writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
  140. /* Wait for PF to complete */
  141. while ((tout >= 0) && (!*done)) {
  142. msleep(interval);
  143. tout -= 1;
  144. /* progress indicator - HV can take its own sweet time in
  145. * answering VFs...
  146. */
  147. DP_CONT(BNX2X_MSG_IOV, ".");
  148. }
  149. if (!*done) {
  150. BNX2X_ERR("PF response has timed out\n");
  151. return -EAGAIN;
  152. }
  153. DP(BNX2X_MSG_SP, "Got a response from PF\n");
  154. return 0;
  155. }
  156. static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
  157. {
  158. u32 me_reg;
  159. int tout = 10, interval = 100; /* Wait for 1 sec */
  160. do {
  161. /* pxp traps vf read of doorbells and returns me reg value */
  162. me_reg = readl(bp->doorbells);
  163. if (GOOD_ME_REG(me_reg))
  164. break;
  165. msleep(interval);
  166. BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
  167. me_reg);
  168. } while (tout-- > 0);
  169. if (!GOOD_ME_REG(me_reg)) {
  170. BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
  171. return -EINVAL;
  172. }
  173. DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
  174. *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
  175. return 0;
  176. }
  177. int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
  178. {
  179. int rc = 0, attempts = 0;
  180. struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
  181. struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
  182. struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
  183. u32 vf_id;
  184. bool resources_acquired = false;
  185. /* clear mailbox and prep first tlv */
  186. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  187. if (bnx2x_get_vf_id(bp, &vf_id)) {
  188. rc = -EAGAIN;
  189. goto out;
  190. }
  191. req->vfdev_info.vf_id = vf_id;
  192. req->vfdev_info.vf_os = 0;
  193. req->resc_request.num_rxqs = rx_count;
  194. req->resc_request.num_txqs = tx_count;
  195. req->resc_request.num_sbs = bp->igu_sb_cnt;
  196. req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
  197. req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
  198. /* pf 2 vf bulletin board address */
  199. req->bulletin_addr = bp->pf2vf_bulletin_mapping;
  200. /* Request physical port identifier */
  201. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
  202. CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
  203. /* add list termination tlv */
  204. bnx2x_add_tlv(bp, req,
  205. req->first_tlv.tl.length + sizeof(struct channel_tlv),
  206. CHANNEL_TLV_LIST_END,
  207. sizeof(struct channel_list_end_tlv));
  208. /* output tlvs list */
  209. bnx2x_dp_tlv_list(bp, req);
  210. while (!resources_acquired) {
  211. DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
  212. /* send acquire request */
  213. rc = bnx2x_send_msg2pf(bp,
  214. &resp->hdr.status,
  215. bp->vf2pf_mbox_mapping);
  216. /* PF timeout */
  217. if (rc)
  218. goto out;
  219. /* copy acquire response from buffer to bp */
  220. memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
  221. attempts++;
  222. /* test whether the PF accepted our request. If not, humble
  223. * the request and try again.
  224. */
  225. if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
  226. DP(BNX2X_MSG_SP, "resources acquired\n");
  227. resources_acquired = true;
  228. } else if (bp->acquire_resp.hdr.status ==
  229. PFVF_STATUS_NO_RESOURCE &&
  230. attempts < VF_ACQUIRE_THRESH) {
  231. DP(BNX2X_MSG_SP,
  232. "PF unwilling to fulfill resource request. Try PF recommended amount\n");
  233. /* humble our request */
  234. req->resc_request.num_txqs =
  235. min(req->resc_request.num_txqs,
  236. bp->acquire_resp.resc.num_txqs);
  237. req->resc_request.num_rxqs =
  238. min(req->resc_request.num_rxqs,
  239. bp->acquire_resp.resc.num_rxqs);
  240. req->resc_request.num_sbs =
  241. min(req->resc_request.num_sbs,
  242. bp->acquire_resp.resc.num_sbs);
  243. req->resc_request.num_mac_filters =
  244. min(req->resc_request.num_mac_filters,
  245. bp->acquire_resp.resc.num_mac_filters);
  246. req->resc_request.num_vlan_filters =
  247. min(req->resc_request.num_vlan_filters,
  248. bp->acquire_resp.resc.num_vlan_filters);
  249. req->resc_request.num_mc_filters =
  250. min(req->resc_request.num_mc_filters,
  251. bp->acquire_resp.resc.num_mc_filters);
  252. /* Clear response buffer */
  253. memset(&bp->vf2pf_mbox->resp, 0,
  254. sizeof(union pfvf_tlvs));
  255. } else {
  256. /* PF reports error */
  257. BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
  258. bp->acquire_resp.hdr.status);
  259. rc = -EAGAIN;
  260. goto out;
  261. }
  262. }
  263. /* Retrieve physical port id (if possible) */
  264. phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
  265. bnx2x_search_tlv_list(bp, resp,
  266. CHANNEL_TLV_PHYS_PORT_ID);
  267. if (phys_port_resp) {
  268. memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
  269. bp->flags |= HAS_PHYS_PORT_ID;
  270. }
  271. /* get HW info */
  272. bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
  273. bp->link_params.chip_id = bp->common.chip_id;
  274. bp->db_size = bp->acquire_resp.pfdev_info.db_size;
  275. bp->common.int_block = INT_BLOCK_IGU;
  276. bp->common.chip_port_mode = CHIP_2_PORT_MODE;
  277. bp->igu_dsb_id = -1;
  278. bp->mf_ov = 0;
  279. bp->mf_mode = 0;
  280. bp->common.flash_size = 0;
  281. bp->flags |=
  282. NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
  283. bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
  284. bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
  285. strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
  286. sizeof(bp->fw_ver));
  287. if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
  288. memcpy(bp->dev->dev_addr,
  289. bp->acquire_resp.resc.current_mac_addr,
  290. ETH_ALEN);
  291. out:
  292. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  293. return rc;
  294. }
  295. int bnx2x_vfpf_release(struct bnx2x *bp)
  296. {
  297. struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
  298. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  299. u32 rc, vf_id;
  300. /* clear mailbox and prep first tlv */
  301. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
  302. if (bnx2x_get_vf_id(bp, &vf_id)) {
  303. rc = -EAGAIN;
  304. goto out;
  305. }
  306. req->vf_id = vf_id;
  307. /* add list termination tlv */
  308. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  309. sizeof(struct channel_list_end_tlv));
  310. /* output tlvs list */
  311. bnx2x_dp_tlv_list(bp, req);
  312. /* send release request */
  313. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  314. if (rc)
  315. /* PF timeout */
  316. goto out;
  317. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  318. /* PF released us */
  319. DP(BNX2X_MSG_SP, "vf released\n");
  320. } else {
  321. /* PF reports error */
  322. BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
  323. resp->hdr.status);
  324. rc = -EAGAIN;
  325. goto out;
  326. }
  327. out:
  328. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  329. return rc;
  330. }
  331. /* Tell PF about SB addresses */
  332. int bnx2x_vfpf_init(struct bnx2x *bp)
  333. {
  334. struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
  335. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  336. int rc, i;
  337. /* clear mailbox and prep first tlv */
  338. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
  339. /* status blocks */
  340. for_each_eth_queue(bp, i)
  341. req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
  342. status_blk_mapping);
  343. /* statistics - requests only supports single queue for now */
  344. req->stats_addr = bp->fw_stats_data_mapping +
  345. offsetof(struct bnx2x_fw_stats_data, queue_stats);
  346. req->stats_stride = sizeof(struct per_queue_stats);
  347. /* add list termination tlv */
  348. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  349. sizeof(struct channel_list_end_tlv));
  350. /* output tlvs list */
  351. bnx2x_dp_tlv_list(bp, req);
  352. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  353. if (rc)
  354. goto out;
  355. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  356. BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
  357. resp->hdr.status);
  358. rc = -EAGAIN;
  359. goto out;
  360. }
  361. DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
  362. out:
  363. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  364. return rc;
  365. }
  366. /* CLOSE VF - opposite to INIT_VF */
  367. void bnx2x_vfpf_close_vf(struct bnx2x *bp)
  368. {
  369. struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
  370. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  371. int i, rc;
  372. u32 vf_id;
  373. /* If we haven't got a valid VF id, there is no sense to
  374. * continue with sending messages
  375. */
  376. if (bnx2x_get_vf_id(bp, &vf_id))
  377. goto free_irq;
  378. /* Close the queues */
  379. for_each_queue(bp, i)
  380. bnx2x_vfpf_teardown_queue(bp, i);
  381. /* remove mac */
  382. bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
  383. /* clear mailbox and prep first tlv */
  384. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
  385. req->vf_id = vf_id;
  386. /* add list termination tlv */
  387. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  388. sizeof(struct channel_list_end_tlv));
  389. /* output tlvs list */
  390. bnx2x_dp_tlv_list(bp, req);
  391. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  392. if (rc)
  393. BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
  394. else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
  395. BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
  396. resp->hdr.status);
  397. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  398. free_irq:
  399. /* Disable HW interrupts, NAPI */
  400. bnx2x_netif_stop(bp, 0);
  401. /* Delete all NAPI objects */
  402. bnx2x_del_all_napi(bp);
  403. /* Release IRQs */
  404. bnx2x_free_irq(bp);
  405. }
  406. static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
  407. struct bnx2x_vf_queue *q)
  408. {
  409. u8 cl_id = vfq_cl_id(vf, q);
  410. u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
  411. /* mac */
  412. bnx2x_init_mac_obj(bp, &q->mac_obj,
  413. cl_id, q->cid, func_id,
  414. bnx2x_vf_sp(bp, vf, mac_rdata),
  415. bnx2x_vf_sp_map(bp, vf, mac_rdata),
  416. BNX2X_FILTER_MAC_PENDING,
  417. &vf->filter_state,
  418. BNX2X_OBJ_TYPE_RX_TX,
  419. &bp->macs_pool);
  420. /* vlan */
  421. bnx2x_init_vlan_obj(bp, &q->vlan_obj,
  422. cl_id, q->cid, func_id,
  423. bnx2x_vf_sp(bp, vf, vlan_rdata),
  424. bnx2x_vf_sp_map(bp, vf, vlan_rdata),
  425. BNX2X_FILTER_VLAN_PENDING,
  426. &vf->filter_state,
  427. BNX2X_OBJ_TYPE_RX_TX,
  428. &bp->vlans_pool);
  429. /* mcast */
  430. bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
  431. q->cid, func_id, func_id,
  432. bnx2x_vf_sp(bp, vf, mcast_rdata),
  433. bnx2x_vf_sp_map(bp, vf, mcast_rdata),
  434. BNX2X_FILTER_MCAST_PENDING,
  435. &vf->filter_state,
  436. BNX2X_OBJ_TYPE_RX_TX);
  437. /* rss */
  438. bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
  439. func_id, func_id,
  440. bnx2x_vf_sp(bp, vf, rss_rdata),
  441. bnx2x_vf_sp_map(bp, vf, rss_rdata),
  442. BNX2X_FILTER_RSS_CONF_PENDING,
  443. &vf->filter_state,
  444. BNX2X_OBJ_TYPE_RX_TX);
  445. vf->leading_rss = cl_id;
  446. q->is_leading = true;
  447. q->sp_initialized = true;
  448. }
  449. /* ask the pf to open a queue for the vf */
  450. int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  451. bool is_leading)
  452. {
  453. struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
  454. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  455. u8 fp_idx = fp->index;
  456. u16 tpa_agg_size = 0, flags = 0;
  457. int rc;
  458. /* clear mailbox and prep first tlv */
  459. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
  460. /* select tpa mode to request */
  461. if (!fp->disable_tpa) {
  462. flags |= VFPF_QUEUE_FLG_TPA;
  463. flags |= VFPF_QUEUE_FLG_TPA_IPV6;
  464. if (fp->mode == TPA_MODE_GRO)
  465. flags |= VFPF_QUEUE_FLG_TPA_GRO;
  466. tpa_agg_size = TPA_AGG_SIZE;
  467. }
  468. if (is_leading)
  469. flags |= VFPF_QUEUE_FLG_LEADING_RSS;
  470. /* calculate queue flags */
  471. flags |= VFPF_QUEUE_FLG_STATS;
  472. flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
  473. flags |= VFPF_QUEUE_FLG_VLAN;
  474. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  475. /* Common */
  476. req->vf_qid = fp_idx;
  477. req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
  478. /* Rx */
  479. req->rxq.rcq_addr = fp->rx_comp_mapping;
  480. req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  481. req->rxq.rxq_addr = fp->rx_desc_mapping;
  482. req->rxq.sge_addr = fp->rx_sge_mapping;
  483. req->rxq.vf_sb = fp_idx;
  484. req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
  485. req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
  486. req->rxq.mtu = bp->dev->mtu;
  487. req->rxq.buf_sz = fp->rx_buf_size;
  488. req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
  489. req->rxq.tpa_agg_sz = tpa_agg_size;
  490. req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
  491. req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
  492. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  493. req->rxq.flags = flags;
  494. req->rxq.drop_flags = 0;
  495. req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  496. req->rxq.stat_id = -1; /* No stats at the moment */
  497. /* Tx */
  498. req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
  499. req->txq.vf_sb = fp_idx;
  500. req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
  501. req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
  502. req->txq.flags = flags;
  503. req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
  504. /* add list termination tlv */
  505. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  506. sizeof(struct channel_list_end_tlv));
  507. /* output tlvs list */
  508. bnx2x_dp_tlv_list(bp, req);
  509. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  510. if (rc)
  511. BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
  512. fp_idx);
  513. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  514. BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
  515. fp_idx, resp->hdr.status);
  516. rc = -EINVAL;
  517. }
  518. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  519. return rc;
  520. }
  521. static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
  522. {
  523. struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
  524. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  525. int rc;
  526. /* clear mailbox and prep first tlv */
  527. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
  528. sizeof(*req));
  529. req->vf_qid = qidx;
  530. /* add list termination tlv */
  531. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  532. sizeof(struct channel_list_end_tlv));
  533. /* output tlvs list */
  534. bnx2x_dp_tlv_list(bp, req);
  535. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  536. if (rc) {
  537. BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
  538. rc);
  539. goto out;
  540. }
  541. /* PF failed the transaction */
  542. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  543. BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
  544. resp->hdr.status);
  545. rc = -EINVAL;
  546. }
  547. out:
  548. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  549. return rc;
  550. }
  551. /* request pf to add a mac for the vf */
  552. int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
  553. {
  554. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  555. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  556. struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
  557. int rc = 0;
  558. /* clear mailbox and prep first tlv */
  559. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  560. sizeof(*req));
  561. req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
  562. req->vf_qid = vf_qid;
  563. req->n_mac_vlan_filters = 1;
  564. req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
  565. if (set)
  566. req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
  567. /* sample bulletin board for new mac */
  568. bnx2x_sample_bulletin(bp);
  569. /* copy mac from device to request */
  570. memcpy(req->filters[0].mac, addr, ETH_ALEN);
  571. /* add list termination tlv */
  572. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  573. sizeof(struct channel_list_end_tlv));
  574. /* output tlvs list */
  575. bnx2x_dp_tlv_list(bp, req);
  576. /* send message to pf */
  577. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  578. if (rc) {
  579. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  580. goto out;
  581. }
  582. /* failure may mean PF was configured with a new mac for us */
  583. while (resp->hdr.status == PFVF_STATUS_FAILURE) {
  584. DP(BNX2X_MSG_IOV,
  585. "vfpf SET MAC failed. Check bulletin board for new posts\n");
  586. /* copy mac from bulletin to device */
  587. memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
  588. /* check if bulletin board was updated */
  589. if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
  590. /* copy mac from device to request */
  591. memcpy(req->filters[0].mac, bp->dev->dev_addr,
  592. ETH_ALEN);
  593. /* send message to pf */
  594. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
  595. bp->vf2pf_mbox_mapping);
  596. } else {
  597. /* no new info in bulletin */
  598. break;
  599. }
  600. }
  601. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  602. BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
  603. rc = -EINVAL;
  604. }
  605. out:
  606. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  607. return rc;
  608. }
  609. /* request pf to config rss table for vf queues*/
  610. int bnx2x_vfpf_config_rss(struct bnx2x *bp,
  611. struct bnx2x_config_rss_params *params)
  612. {
  613. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  614. struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
  615. int rc = 0;
  616. /* clear mailbox and prep first tlv */
  617. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
  618. sizeof(*req));
  619. /* add list termination tlv */
  620. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  621. sizeof(struct channel_list_end_tlv));
  622. memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
  623. memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
  624. req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
  625. req->rss_key_size = T_ETH_RSS_KEY;
  626. req->rss_result_mask = params->rss_result_mask;
  627. /* flags handled individually for backward/forward compatability */
  628. if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
  629. req->rss_flags |= VFPF_RSS_MODE_DISABLED;
  630. if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
  631. req->rss_flags |= VFPF_RSS_MODE_REGULAR;
  632. if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
  633. req->rss_flags |= VFPF_RSS_SET_SRCH;
  634. if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
  635. req->rss_flags |= VFPF_RSS_IPV4;
  636. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
  637. req->rss_flags |= VFPF_RSS_IPV4_TCP;
  638. if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
  639. req->rss_flags |= VFPF_RSS_IPV4_UDP;
  640. if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
  641. req->rss_flags |= VFPF_RSS_IPV6;
  642. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
  643. req->rss_flags |= VFPF_RSS_IPV6_TCP;
  644. if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
  645. req->rss_flags |= VFPF_RSS_IPV6_UDP;
  646. DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
  647. /* output tlvs list */
  648. bnx2x_dp_tlv_list(bp, req);
  649. /* send message to pf */
  650. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  651. if (rc) {
  652. BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
  653. goto out;
  654. }
  655. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  656. /* Since older drivers don't support this feature (and VF has
  657. * no way of knowing other than failing this), don't propagate
  658. * an error in this case.
  659. */
  660. DP(BNX2X_MSG_IOV,
  661. "Failed to send rss message to PF over VF-PF channel [%d]\n",
  662. resp->hdr.status);
  663. }
  664. out:
  665. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  666. return rc;
  667. }
  668. int bnx2x_vfpf_set_mcast(struct net_device *dev)
  669. {
  670. struct bnx2x *bp = netdev_priv(dev);
  671. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  672. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  673. int rc, i = 0;
  674. struct netdev_hw_addr *ha;
  675. if (bp->state != BNX2X_STATE_OPEN) {
  676. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  677. return -EINVAL;
  678. }
  679. /* clear mailbox and prep first tlv */
  680. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  681. sizeof(*req));
  682. /* Get Rx mode requested */
  683. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  684. netdev_for_each_mc_addr(ha, dev) {
  685. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  686. bnx2x_mc_addr(ha));
  687. memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
  688. i++;
  689. }
  690. /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
  691. * addresses tops
  692. */
  693. if (i >= PFVF_MAX_MULTICAST_PER_VF) {
  694. DP(NETIF_MSG_IFUP,
  695. "VF supports not more than %d multicast MAC addresses\n",
  696. PFVF_MAX_MULTICAST_PER_VF);
  697. return -EINVAL;
  698. }
  699. req->n_multicast = i;
  700. req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
  701. req->vf_qid = 0;
  702. /* add list termination tlv */
  703. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  704. sizeof(struct channel_list_end_tlv));
  705. /* output tlvs list */
  706. bnx2x_dp_tlv_list(bp, req);
  707. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  708. if (rc) {
  709. BNX2X_ERR("Sending a message failed: %d\n", rc);
  710. goto out;
  711. }
  712. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  713. BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
  714. resp->hdr.status);
  715. rc = -EINVAL;
  716. }
  717. out:
  718. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  719. return 0;
  720. }
  721. int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
  722. {
  723. int mode = bp->rx_mode;
  724. struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
  725. struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
  726. int rc;
  727. /* clear mailbox and prep first tlv */
  728. bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
  729. sizeof(*req));
  730. DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
  731. /* Ignore everything accept MODE_NONE */
  732. if (mode == BNX2X_RX_MODE_NONE) {
  733. req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
  734. } else {
  735. /* Current PF driver will not look at the specific flags,
  736. * but they are required when working with older drivers on hv.
  737. */
  738. req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
  739. req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
  740. req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
  741. }
  742. req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
  743. req->vf_qid = 0;
  744. /* add list termination tlv */
  745. bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
  746. sizeof(struct channel_list_end_tlv));
  747. /* output tlvs list */
  748. bnx2x_dp_tlv_list(bp, req);
  749. rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
  750. if (rc)
  751. BNX2X_ERR("Sending a message failed: %d\n", rc);
  752. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  753. BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
  754. rc = -EINVAL;
  755. }
  756. bnx2x_vfpf_finalize(bp, &req->first_tlv);
  757. return rc;
  758. }
  759. /* General service functions */
  760. static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
  761. {
  762. u32 addr = BAR_CSTRORM_INTMEM +
  763. CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
  764. REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
  765. }
  766. static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
  767. {
  768. u32 addr = BAR_CSTRORM_INTMEM +
  769. CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
  770. REG_WR8(bp, addr, 1);
  771. }
  772. static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
  773. {
  774. int i;
  775. for_each_vf(bp, i)
  776. storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
  777. }
  778. /* enable vf_pf mailbox (aka vf-pf-channel) */
  779. void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
  780. {
  781. bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
  782. /* enable the mailbox in the FW */
  783. storm_memset_vf_mbx_ack(bp, abs_vfid);
  784. storm_memset_vf_mbx_valid(bp, abs_vfid);
  785. /* enable the VF access to the mailbox */
  786. bnx2x_vf_enable_access(bp, abs_vfid);
  787. }
  788. /* this works only on !E1h */
  789. static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
  790. dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
  791. u32 vf_addr_lo, u32 len32)
  792. {
  793. struct dmae_command dmae;
  794. if (CHIP_IS_E1x(bp)) {
  795. BNX2X_ERR("Chip revision does not support VFs\n");
  796. return DMAE_NOT_RDY;
  797. }
  798. if (!bp->dmae_ready) {
  799. BNX2X_ERR("DMAE is not ready, can not copy\n");
  800. return DMAE_NOT_RDY;
  801. }
  802. /* set opcode and fixed command fields */
  803. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
  804. if (from_vf) {
  805. dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
  806. (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
  807. (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
  808. dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
  809. dmae.src_addr_lo = vf_addr_lo;
  810. dmae.src_addr_hi = vf_addr_hi;
  811. dmae.dst_addr_lo = U64_LO(pf_addr);
  812. dmae.dst_addr_hi = U64_HI(pf_addr);
  813. } else {
  814. dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
  815. (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
  816. (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
  817. dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
  818. dmae.src_addr_lo = U64_LO(pf_addr);
  819. dmae.src_addr_hi = U64_HI(pf_addr);
  820. dmae.dst_addr_lo = vf_addr_lo;
  821. dmae.dst_addr_hi = vf_addr_hi;
  822. }
  823. dmae.len = len32;
  824. /* issue the command and wait for completion */
  825. return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
  826. }
  827. static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
  828. struct bnx2x_virtf *vf)
  829. {
  830. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  831. u16 length, type;
  832. /* prepare response */
  833. type = mbx->first_tlv.tl.type;
  834. length = type == CHANNEL_TLV_ACQUIRE ?
  835. sizeof(struct pfvf_acquire_resp_tlv) :
  836. sizeof(struct pfvf_general_resp_tlv);
  837. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
  838. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  839. sizeof(struct channel_list_end_tlv));
  840. }
  841. static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
  842. struct bnx2x_virtf *vf,
  843. int vf_rc)
  844. {
  845. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
  846. struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
  847. dma_addr_t pf_addr;
  848. u64 vf_addr;
  849. int rc;
  850. bnx2x_dp_tlv_list(bp, resp);
  851. DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
  852. mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
  853. resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
  854. /* send response */
  855. vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
  856. mbx->first_tlv.resp_msg_offset;
  857. pf_addr = mbx->msg_mapping +
  858. offsetof(struct bnx2x_vf_mbx_msg, resp);
  859. /* Copy the response buffer. The first u64 is written afterwards, as
  860. * the vf is sensitive to the header being written
  861. */
  862. vf_addr += sizeof(u64);
  863. pf_addr += sizeof(u64);
  864. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  865. U64_HI(vf_addr),
  866. U64_LO(vf_addr),
  867. (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
  868. if (rc) {
  869. BNX2X_ERR("Failed to copy response body to VF %d\n",
  870. vf->abs_vfid);
  871. goto mbx_error;
  872. }
  873. vf_addr -= sizeof(u64);
  874. pf_addr -= sizeof(u64);
  875. /* ack the FW */
  876. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  877. mmiowb();
  878. /* copy the response header including status-done field,
  879. * must be last dmae, must be after FW is acked
  880. */
  881. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
  882. U64_HI(vf_addr),
  883. U64_LO(vf_addr),
  884. sizeof(u64)/4);
  885. /* unlock channel mutex */
  886. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  887. if (rc) {
  888. BNX2X_ERR("Failed to copy response status to VF %d\n",
  889. vf->abs_vfid);
  890. goto mbx_error;
  891. }
  892. return;
  893. mbx_error:
  894. bnx2x_vf_release(bp, vf);
  895. }
  896. static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
  897. struct bnx2x_virtf *vf,
  898. int rc)
  899. {
  900. bnx2x_vf_mbx_resp_single_tlv(bp, vf);
  901. bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
  902. }
  903. static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
  904. struct bnx2x_virtf *vf,
  905. void *buffer,
  906. u16 *offset)
  907. {
  908. struct vfpf_port_phys_id_resp_tlv *port_id;
  909. if (!(bp->flags & HAS_PHYS_PORT_ID))
  910. return;
  911. bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
  912. sizeof(struct vfpf_port_phys_id_resp_tlv));
  913. port_id = (struct vfpf_port_phys_id_resp_tlv *)
  914. (((u8 *)buffer) + *offset);
  915. memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
  916. /* Offset should continue representing the offset to the tail
  917. * of TLV data (outside this function scope)
  918. */
  919. *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
  920. }
  921. static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
  922. struct bnx2x_vf_mbx *mbx, int vfop_status)
  923. {
  924. int i;
  925. struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
  926. struct pf_vf_resc *resc = &resp->resc;
  927. u8 status = bnx2x_pfvf_status_codes(vfop_status);
  928. u16 length;
  929. memset(resp, 0, sizeof(*resp));
  930. /* fill in pfdev info */
  931. resp->pfdev_info.chip_num = bp->common.chip_id;
  932. resp->pfdev_info.db_size = bp->db_size;
  933. resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
  934. resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
  935. PFVF_CAP_TPA |
  936. PFVF_CAP_TPA_UPDATE);
  937. bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
  938. sizeof(resp->pfdev_info.fw_ver));
  939. if (status == PFVF_STATUS_NO_RESOURCE ||
  940. status == PFVF_STATUS_SUCCESS) {
  941. /* set resources numbers, if status equals NO_RESOURCE these
  942. * are max possible numbers
  943. */
  944. resc->num_rxqs = vf_rxq_count(vf) ? :
  945. bnx2x_vf_max_queue_cnt(bp, vf);
  946. resc->num_txqs = vf_txq_count(vf) ? :
  947. bnx2x_vf_max_queue_cnt(bp, vf);
  948. resc->num_sbs = vf_sb_count(vf);
  949. resc->num_mac_filters = vf_mac_rules_cnt(vf);
  950. resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
  951. resc->num_mc_filters = 0;
  952. if (status == PFVF_STATUS_SUCCESS) {
  953. /* fill in the allocated resources */
  954. struct pf_vf_bulletin_content *bulletin =
  955. BP_VF_BULLETIN(bp, vf->index);
  956. for_each_vfq(vf, i)
  957. resc->hw_qid[i] =
  958. vfq_qzone_id(vf, vfq_get(vf, i));
  959. for_each_vf_sb(vf, i) {
  960. resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
  961. resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
  962. }
  963. /* if a mac has been set for this vf, supply it */
  964. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  965. memcpy(resc->current_mac_addr, bulletin->mac,
  966. ETH_ALEN);
  967. }
  968. }
  969. }
  970. DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
  971. "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
  972. vf->abs_vfid,
  973. resp->pfdev_info.chip_num,
  974. resp->pfdev_info.db_size,
  975. resp->pfdev_info.indices_per_sb,
  976. resp->pfdev_info.pf_cap,
  977. resc->num_rxqs,
  978. resc->num_txqs,
  979. resc->num_sbs,
  980. resc->num_mac_filters,
  981. resc->num_vlan_filters,
  982. resc->num_mc_filters,
  983. resp->pfdev_info.fw_ver);
  984. DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
  985. for (i = 0; i < vf_rxq_count(vf); i++)
  986. DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
  987. DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
  988. for (i = 0; i < vf_sb_count(vf); i++)
  989. DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
  990. resc->hw_sbs[i].hw_sb_id,
  991. resc->hw_sbs[i].sb_qid);
  992. DP_CONT(BNX2X_MSG_IOV, "]\n");
  993. /* prepare response */
  994. length = sizeof(struct pfvf_acquire_resp_tlv);
  995. bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
  996. /* Handle possible VF requests for physical port identifiers.
  997. * 'length' should continue to indicate the offset of the first empty
  998. * place in the buffer (i.e., where next TLV should be inserted)
  999. */
  1000. if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
  1001. CHANNEL_TLV_PHYS_PORT_ID))
  1002. bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
  1003. bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
  1004. sizeof(struct channel_list_end_tlv));
  1005. /* send the response */
  1006. bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
  1007. }
  1008. static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1009. struct bnx2x_vf_mbx *mbx)
  1010. {
  1011. int rc;
  1012. struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
  1013. /* log vfdef info */
  1014. DP(BNX2X_MSG_IOV,
  1015. "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
  1016. vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
  1017. acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
  1018. acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
  1019. acquire->resc_request.num_vlan_filters,
  1020. acquire->resc_request.num_mc_filters);
  1021. /* acquire the resources */
  1022. rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
  1023. /* store address of vf's bulletin board */
  1024. vf->bulletin_map = acquire->bulletin_addr;
  1025. /* response */
  1026. bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
  1027. }
  1028. static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1029. struct bnx2x_vf_mbx *mbx)
  1030. {
  1031. struct vfpf_init_tlv *init = &mbx->msg->req.init;
  1032. int rc;
  1033. /* record ghost addresses from vf message */
  1034. vf->spq_map = init->spq_addr;
  1035. vf->fw_stat_map = init->stats_addr;
  1036. vf->stats_stride = init->stats_stride;
  1037. rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
  1038. /* set VF multiqueue statistics collection mode */
  1039. if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
  1040. vf->cfg_flags |= VF_CFG_STATS_COALESCE;
  1041. /* response */
  1042. bnx2x_vf_mbx_resp(bp, vf, rc);
  1043. }
  1044. /* convert MBX queue-flags to standard SP queue-flags */
  1045. static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
  1046. unsigned long *sp_q_flags)
  1047. {
  1048. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
  1049. __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
  1050. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
  1051. __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
  1052. if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
  1053. __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
  1054. if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
  1055. __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
  1056. if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
  1057. __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
  1058. if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
  1059. __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
  1060. if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
  1061. __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
  1062. if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
  1063. __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
  1064. if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
  1065. __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
  1066. /* outer vlan removal is set according to PF's multi function mode */
  1067. if (IS_MF_SD(bp))
  1068. __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
  1069. }
  1070. static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1071. struct bnx2x_vf_mbx *mbx)
  1072. {
  1073. struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
  1074. struct bnx2x_vf_queue_construct_params qctor;
  1075. int rc = 0;
  1076. /* verify vf_qid */
  1077. if (setup_q->vf_qid >= vf_rxq_count(vf)) {
  1078. BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
  1079. setup_q->vf_qid, vf_rxq_count(vf));
  1080. rc = -EINVAL;
  1081. goto response;
  1082. }
  1083. /* tx queues must be setup alongside rx queues thus if the rx queue
  1084. * is not marked as valid there's nothing to do.
  1085. */
  1086. if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
  1087. struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
  1088. unsigned long q_type = 0;
  1089. struct bnx2x_queue_init_params *init_p;
  1090. struct bnx2x_queue_setup_params *setup_p;
  1091. if (bnx2x_vfq_is_leading(q))
  1092. bnx2x_leading_vfq_init(bp, vf, q);
  1093. /* re-init the VF operation context */
  1094. memset(&qctor, 0 ,
  1095. sizeof(struct bnx2x_vf_queue_construct_params));
  1096. setup_p = &qctor.prep_qsetup;
  1097. init_p = &qctor.qstate.params.init;
  1098. /* activate immediately */
  1099. __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
  1100. if (setup_q->param_valid & VFPF_TXQ_VALID) {
  1101. struct bnx2x_txq_setup_params *txq_params =
  1102. &setup_p->txq_params;
  1103. __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
  1104. /* save sb resource index */
  1105. q->sb_idx = setup_q->txq.vf_sb;
  1106. /* tx init */
  1107. init_p->tx.hc_rate = setup_q->txq.hc_rate;
  1108. init_p->tx.sb_cq_index = setup_q->txq.sb_index;
  1109. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1110. &init_p->tx.flags);
  1111. /* tx setup - flags */
  1112. bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
  1113. &setup_p->flags);
  1114. /* tx setup - general, nothing */
  1115. /* tx setup - tx */
  1116. txq_params->dscr_map = setup_q->txq.txq_addr;
  1117. txq_params->sb_cq_index = setup_q->txq.sb_index;
  1118. txq_params->traffic_type = setup_q->txq.traffic_type;
  1119. bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
  1120. q->index, q->sb_idx);
  1121. }
  1122. if (setup_q->param_valid & VFPF_RXQ_VALID) {
  1123. struct bnx2x_rxq_setup_params *rxq_params =
  1124. &setup_p->rxq_params;
  1125. __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
  1126. /* Note: there is no support for different SBs
  1127. * for TX and RX
  1128. */
  1129. q->sb_idx = setup_q->rxq.vf_sb;
  1130. /* rx init */
  1131. init_p->rx.hc_rate = setup_q->rxq.hc_rate;
  1132. init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
  1133. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1134. &init_p->rx.flags);
  1135. /* rx setup - flags */
  1136. bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
  1137. &setup_p->flags);
  1138. /* rx setup - general */
  1139. setup_p->gen_params.mtu = setup_q->rxq.mtu;
  1140. /* rx setup - rx */
  1141. rxq_params->drop_flags = setup_q->rxq.drop_flags;
  1142. rxq_params->dscr_map = setup_q->rxq.rxq_addr;
  1143. rxq_params->sge_map = setup_q->rxq.sge_addr;
  1144. rxq_params->rcq_map = setup_q->rxq.rcq_addr;
  1145. rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
  1146. rxq_params->buf_sz = setup_q->rxq.buf_sz;
  1147. rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
  1148. rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
  1149. rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
  1150. rxq_params->cache_line_log =
  1151. setup_q->rxq.cache_line_log;
  1152. rxq_params->sb_cq_index = setup_q->rxq.sb_index;
  1153. /* rx setup - multicast engine */
  1154. if (bnx2x_vfq_is_leading(q)) {
  1155. u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
  1156. rxq_params->mcast_engine_id = mcast_id;
  1157. __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
  1158. }
  1159. bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
  1160. q->index, q->sb_idx);
  1161. }
  1162. /* complete the preparations */
  1163. bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
  1164. rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
  1165. if (rc)
  1166. goto response;
  1167. }
  1168. response:
  1169. bnx2x_vf_mbx_resp(bp, vf, rc);
  1170. }
  1171. static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
  1172. struct bnx2x_virtf *vf,
  1173. struct vfpf_set_q_filters_tlv *tlv,
  1174. struct bnx2x_vf_mac_vlan_filters **pfl,
  1175. u32 type_flag)
  1176. {
  1177. int i, j;
  1178. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1179. size_t fsz;
  1180. fsz = tlv->n_mac_vlan_filters *
  1181. sizeof(struct bnx2x_vf_mac_vlan_filter) +
  1182. sizeof(struct bnx2x_vf_mac_vlan_filters);
  1183. fl = kzalloc(fsz, GFP_KERNEL);
  1184. if (!fl)
  1185. return -ENOMEM;
  1186. for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
  1187. struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
  1188. if ((msg_filter->flags & type_flag) != type_flag)
  1189. continue;
  1190. if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
  1191. fl->filters[j].mac = msg_filter->mac;
  1192. fl->filters[j].type = BNX2X_VF_FILTER_MAC;
  1193. } else {
  1194. fl->filters[j].vid = msg_filter->vlan_tag;
  1195. fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
  1196. }
  1197. fl->filters[j].add =
  1198. (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
  1199. true : false;
  1200. fl->count++;
  1201. }
  1202. if (!fl->count)
  1203. kfree(fl);
  1204. else
  1205. *pfl = fl;
  1206. return 0;
  1207. }
  1208. static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
  1209. struct vfpf_q_mac_vlan_filter *filter)
  1210. {
  1211. DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
  1212. if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
  1213. DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
  1214. if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
  1215. DP_CONT(msglvl, ", MAC=%pM", filter->mac);
  1216. DP_CONT(msglvl, "\n");
  1217. }
  1218. static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
  1219. struct vfpf_set_q_filters_tlv *filters)
  1220. {
  1221. int i;
  1222. if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
  1223. for (i = 0; i < filters->n_mac_vlan_filters; i++)
  1224. bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
  1225. &filters->filters[i]);
  1226. if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
  1227. DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
  1228. if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
  1229. for (i = 0; i < filters->n_multicast; i++)
  1230. DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
  1231. }
  1232. #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
  1233. #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
  1234. static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
  1235. {
  1236. int rc = 0;
  1237. struct vfpf_set_q_filters_tlv *msg =
  1238. &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
  1239. /* check for any mac/vlan changes */
  1240. if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
  1241. /* build mac list */
  1242. struct bnx2x_vf_mac_vlan_filters *fl = NULL;
  1243. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1244. VFPF_MAC_FILTER);
  1245. if (rc)
  1246. goto op_err;
  1247. if (fl) {
  1248. /* set mac list */
  1249. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1250. msg->vf_qid,
  1251. false);
  1252. if (rc)
  1253. goto op_err;
  1254. }
  1255. /* build vlan list */
  1256. fl = NULL;
  1257. rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
  1258. VFPF_VLAN_FILTER);
  1259. if (rc)
  1260. goto op_err;
  1261. if (fl) {
  1262. /* set vlan list */
  1263. rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
  1264. msg->vf_qid,
  1265. false);
  1266. if (rc)
  1267. goto op_err;
  1268. }
  1269. }
  1270. if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
  1271. unsigned long accept = 0;
  1272. struct pf_vf_bulletin_content *bulletin =
  1273. BP_VF_BULLETIN(bp, vf->index);
  1274. /* Ignore VF requested mode; instead set a regular mode */
  1275. if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
  1276. __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
  1277. __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
  1278. __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
  1279. }
  1280. /* A packet arriving the vf's mac should be accepted
  1281. * with any vlan, unless a vlan has already been
  1282. * configured.
  1283. */
  1284. if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
  1285. __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
  1286. /* set rx-mode */
  1287. rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
  1288. if (rc)
  1289. goto op_err;
  1290. }
  1291. if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
  1292. /* set mcasts */
  1293. rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
  1294. msg->n_multicast, false);
  1295. if (rc)
  1296. goto op_err;
  1297. }
  1298. op_err:
  1299. if (rc)
  1300. BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
  1301. vf->abs_vfid, msg->vf_qid, rc);
  1302. return rc;
  1303. }
  1304. static int bnx2x_filters_validate_mac(struct bnx2x *bp,
  1305. struct bnx2x_virtf *vf,
  1306. struct vfpf_set_q_filters_tlv *filters)
  1307. {
  1308. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1309. int rc = 0;
  1310. /* if a mac was already set for this VF via the set vf mac ndo, we only
  1311. * accept mac configurations of that mac. Why accept them at all?
  1312. * because PF may have been unable to configure the mac at the time
  1313. * since queue was not set up.
  1314. */
  1315. if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
  1316. /* once a mac was set by ndo can only accept a single mac... */
  1317. if (filters->n_mac_vlan_filters > 1) {
  1318. BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
  1319. vf->abs_vfid);
  1320. rc = -EPERM;
  1321. goto response;
  1322. }
  1323. /* ...and only the mac set by the ndo */
  1324. if (filters->n_mac_vlan_filters == 1 &&
  1325. !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
  1326. BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
  1327. vf->abs_vfid);
  1328. rc = -EPERM;
  1329. goto response;
  1330. }
  1331. }
  1332. response:
  1333. return rc;
  1334. }
  1335. static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
  1336. struct bnx2x_virtf *vf,
  1337. struct vfpf_set_q_filters_tlv *filters)
  1338. {
  1339. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
  1340. int rc = 0;
  1341. /* if vlan was set by hypervisor we don't allow guest to config vlan */
  1342. if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
  1343. int i;
  1344. /* search for vlan filters */
  1345. for (i = 0; i < filters->n_mac_vlan_filters; i++) {
  1346. if (filters->filters[i].flags &
  1347. VFPF_Q_FILTER_VLAN_TAG_VALID) {
  1348. BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
  1349. vf->abs_vfid);
  1350. rc = -EPERM;
  1351. goto response;
  1352. }
  1353. }
  1354. }
  1355. /* verify vf_qid */
  1356. if (filters->vf_qid > vf_rxq_count(vf)) {
  1357. rc = -EPERM;
  1358. goto response;
  1359. }
  1360. response:
  1361. return rc;
  1362. }
  1363. static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
  1364. struct bnx2x_virtf *vf,
  1365. struct bnx2x_vf_mbx *mbx)
  1366. {
  1367. struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
  1368. int rc;
  1369. rc = bnx2x_filters_validate_mac(bp, vf, filters);
  1370. if (rc)
  1371. goto response;
  1372. rc = bnx2x_filters_validate_vlan(bp, vf, filters);
  1373. if (rc)
  1374. goto response;
  1375. DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
  1376. vf->abs_vfid,
  1377. filters->vf_qid);
  1378. /* print q_filter message */
  1379. bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
  1380. rc = bnx2x_vf_mbx_qfilters(bp, vf);
  1381. response:
  1382. bnx2x_vf_mbx_resp(bp, vf, rc);
  1383. }
  1384. static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1385. struct bnx2x_vf_mbx *mbx)
  1386. {
  1387. int qid = mbx->msg->req.q_op.vf_qid;
  1388. int rc;
  1389. DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
  1390. vf->abs_vfid, qid);
  1391. rc = bnx2x_vf_queue_teardown(bp, vf, qid);
  1392. bnx2x_vf_mbx_resp(bp, vf, rc);
  1393. }
  1394. static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1395. struct bnx2x_vf_mbx *mbx)
  1396. {
  1397. int rc;
  1398. DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
  1399. rc = bnx2x_vf_close(bp, vf);
  1400. bnx2x_vf_mbx_resp(bp, vf, rc);
  1401. }
  1402. static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1403. struct bnx2x_vf_mbx *mbx)
  1404. {
  1405. int rc;
  1406. DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
  1407. rc = bnx2x_vf_free(bp, vf);
  1408. bnx2x_vf_mbx_resp(bp, vf, rc);
  1409. }
  1410. static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1411. struct bnx2x_vf_mbx *mbx)
  1412. {
  1413. struct bnx2x_config_rss_params rss;
  1414. struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
  1415. int rc = 0;
  1416. if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
  1417. rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
  1418. BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
  1419. vf->index);
  1420. rc = -EINVAL;
  1421. goto mbx_resp;
  1422. }
  1423. memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
  1424. /* set vfop params according to rss tlv */
  1425. memcpy(rss.ind_table, rss_tlv->ind_table,
  1426. T_ETH_INDIRECTION_TABLE_SIZE);
  1427. memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
  1428. rss.rss_obj = &vf->rss_conf_obj;
  1429. rss.rss_result_mask = rss_tlv->rss_result_mask;
  1430. /* flags handled individually for backward/forward compatability */
  1431. rss.rss_flags = 0;
  1432. rss.ramrod_flags = 0;
  1433. if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
  1434. __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
  1435. if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
  1436. __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
  1437. if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
  1438. __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
  1439. if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
  1440. __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
  1441. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
  1442. __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
  1443. if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
  1444. __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
  1445. if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
  1446. __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
  1447. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
  1448. __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
  1449. if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
  1450. __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
  1451. if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
  1452. rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
  1453. (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
  1454. rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
  1455. BNX2X_ERR("about to hit a FW assert. aborting...\n");
  1456. rc = -EINVAL;
  1457. goto mbx_resp;
  1458. }
  1459. rc = bnx2x_vf_rss_update(bp, vf, &rss);
  1460. mbx_resp:
  1461. bnx2x_vf_mbx_resp(bp, vf, rc);
  1462. }
  1463. static int bnx2x_validate_tpa_params(struct bnx2x *bp,
  1464. struct vfpf_tpa_tlv *tpa_tlv)
  1465. {
  1466. int rc = 0;
  1467. if (tpa_tlv->tpa_client_info.max_sges_for_packet >
  1468. U_ETH_MAX_SGES_FOR_PACKET) {
  1469. rc = -EINVAL;
  1470. BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
  1471. tpa_tlv->tpa_client_info.max_sges_for_packet,
  1472. U_ETH_MAX_SGES_FOR_PACKET);
  1473. }
  1474. if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
  1475. rc = -EINVAL;
  1476. BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
  1477. tpa_tlv->tpa_client_info.max_tpa_queues,
  1478. MAX_AGG_QS(bp));
  1479. }
  1480. return rc;
  1481. }
  1482. static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1483. struct bnx2x_vf_mbx *mbx)
  1484. {
  1485. struct bnx2x_queue_update_tpa_params vf_op_params;
  1486. struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
  1487. int rc = 0;
  1488. memset(&vf_op_params, 0, sizeof(vf_op_params));
  1489. if (bnx2x_validate_tpa_params(bp, tpa_tlv))
  1490. goto mbx_resp;
  1491. vf_op_params.complete_on_both_clients =
  1492. tpa_tlv->tpa_client_info.complete_on_both_clients;
  1493. vf_op_params.dont_verify_thr =
  1494. tpa_tlv->tpa_client_info.dont_verify_thr;
  1495. vf_op_params.max_agg_sz =
  1496. tpa_tlv->tpa_client_info.max_agg_size;
  1497. vf_op_params.max_sges_pkt =
  1498. tpa_tlv->tpa_client_info.max_sges_for_packet;
  1499. vf_op_params.max_tpa_queues =
  1500. tpa_tlv->tpa_client_info.max_tpa_queues;
  1501. vf_op_params.sge_buff_sz =
  1502. tpa_tlv->tpa_client_info.sge_buff_size;
  1503. vf_op_params.sge_pause_thr_high =
  1504. tpa_tlv->tpa_client_info.sge_pause_thr_high;
  1505. vf_op_params.sge_pause_thr_low =
  1506. tpa_tlv->tpa_client_info.sge_pause_thr_low;
  1507. vf_op_params.tpa_mode =
  1508. tpa_tlv->tpa_client_info.tpa_mode;
  1509. vf_op_params.update_ipv4 =
  1510. tpa_tlv->tpa_client_info.update_ipv4;
  1511. vf_op_params.update_ipv6 =
  1512. tpa_tlv->tpa_client_info.update_ipv6;
  1513. rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
  1514. mbx_resp:
  1515. bnx2x_vf_mbx_resp(bp, vf, rc);
  1516. }
  1517. /* dispatch request */
  1518. static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
  1519. struct bnx2x_vf_mbx *mbx)
  1520. {
  1521. int i;
  1522. /* check if tlv type is known */
  1523. if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
  1524. /* Lock the per vf op mutex and note the locker's identity.
  1525. * The unlock will take place in mbx response.
  1526. */
  1527. bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1528. /* switch on the opcode */
  1529. switch (mbx->first_tlv.tl.type) {
  1530. case CHANNEL_TLV_ACQUIRE:
  1531. bnx2x_vf_mbx_acquire(bp, vf, mbx);
  1532. return;
  1533. case CHANNEL_TLV_INIT:
  1534. bnx2x_vf_mbx_init_vf(bp, vf, mbx);
  1535. return;
  1536. case CHANNEL_TLV_SETUP_Q:
  1537. bnx2x_vf_mbx_setup_q(bp, vf, mbx);
  1538. return;
  1539. case CHANNEL_TLV_SET_Q_FILTERS:
  1540. bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
  1541. return;
  1542. case CHANNEL_TLV_TEARDOWN_Q:
  1543. bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
  1544. return;
  1545. case CHANNEL_TLV_CLOSE:
  1546. bnx2x_vf_mbx_close_vf(bp, vf, mbx);
  1547. return;
  1548. case CHANNEL_TLV_RELEASE:
  1549. bnx2x_vf_mbx_release_vf(bp, vf, mbx);
  1550. return;
  1551. case CHANNEL_TLV_UPDATE_RSS:
  1552. bnx2x_vf_mbx_update_rss(bp, vf, mbx);
  1553. return;
  1554. case CHANNEL_TLV_UPDATE_TPA:
  1555. bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
  1556. return;
  1557. }
  1558. } else {
  1559. /* unknown TLV - this may belong to a VF driver from the future
  1560. * - a version written after this PF driver was written, which
  1561. * supports features unknown as of yet. Too bad since we don't
  1562. * support them. Or this may be because someone wrote a crappy
  1563. * VF driver and is sending garbage over the channel.
  1564. */
  1565. BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
  1566. mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
  1567. vf->state);
  1568. for (i = 0; i < 20; i++)
  1569. DP_CONT(BNX2X_MSG_IOV, "%x ",
  1570. mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
  1571. }
  1572. /* can we respond to VF (do we have an address for it?) */
  1573. if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
  1574. /* notify the VF that we do not support this request */
  1575. bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
  1576. } else {
  1577. /* can't send a response since this VF is unknown to us
  1578. * just ack the FW to release the mailbox and unlock
  1579. * the channel.
  1580. */
  1581. storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
  1582. /* Firmware ack should be written before unlocking channel */
  1583. mmiowb();
  1584. bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
  1585. }
  1586. }
  1587. void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
  1588. struct vf_pf_event_data *vfpf_event)
  1589. {
  1590. u8 vf_idx;
  1591. DP(BNX2X_MSG_IOV,
  1592. "vf pf event received: vfid %d, address_hi %x, address lo %x",
  1593. vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
  1594. /* Sanity checks consider removing later */
  1595. /* check if the vf_id is valid */
  1596. if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
  1597. BNX2X_NR_VIRTFN(bp)) {
  1598. BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
  1599. vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
  1600. return;
  1601. }
  1602. vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
  1603. /* Update VFDB with current message and schedule its handling */
  1604. mutex_lock(&BP_VFDB(bp)->event_mutex);
  1605. BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
  1606. BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
  1607. BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
  1608. mutex_unlock(&BP_VFDB(bp)->event_mutex);
  1609. bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
  1610. }
  1611. /* handle new vf-pf messages */
  1612. void bnx2x_vf_mbx(struct bnx2x *bp)
  1613. {
  1614. struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
  1615. u64 events;
  1616. u8 vf_idx;
  1617. int rc;
  1618. if (!vfdb)
  1619. return;
  1620. mutex_lock(&vfdb->event_mutex);
  1621. events = vfdb->event_occur;
  1622. vfdb->event_occur = 0;
  1623. mutex_unlock(&vfdb->event_mutex);
  1624. for_each_vf(bp, vf_idx) {
  1625. struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
  1626. struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
  1627. /* Handle VFs which have pending events */
  1628. if (!(events & (1ULL << vf_idx)))
  1629. continue;
  1630. DP(BNX2X_MSG_IOV,
  1631. "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
  1632. vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
  1633. mbx->first_tlv.resp_msg_offset);
  1634. /* dmae to get the VF request */
  1635. rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
  1636. vf->abs_vfid, mbx->vf_addr_hi,
  1637. mbx->vf_addr_lo,
  1638. sizeof(union vfpf_tlvs)/4);
  1639. if (rc) {
  1640. BNX2X_ERR("Failed to copy request VF %d\n",
  1641. vf->abs_vfid);
  1642. bnx2x_vf_release(bp, vf);
  1643. return;
  1644. }
  1645. /* process the VF message header */
  1646. mbx->first_tlv = mbx->msg->req.first_tlv;
  1647. /* Clean response buffer to refrain from falsely
  1648. * seeing chains.
  1649. */
  1650. memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
  1651. /* dispatch the request (will prepare the response) */
  1652. bnx2x_vf_mbx_request(bp, vf, mbx);
  1653. }
  1654. }
  1655. /* propagate local bulletin board to vf */
  1656. int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
  1657. {
  1658. struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
  1659. dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
  1660. vf * BULLETIN_CONTENT_SIZE;
  1661. dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
  1662. int rc;
  1663. /* can only update vf after init took place */
  1664. if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
  1665. bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
  1666. return 0;
  1667. /* increment bulletin board version and compute crc */
  1668. bulletin->version++;
  1669. bulletin->length = BULLETIN_CONTENT_SIZE;
  1670. bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
  1671. /* propagate bulletin board via dmae to vm memory */
  1672. rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
  1673. bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
  1674. U64_LO(vf_addr), bulletin->length / 4);
  1675. return rc;
  1676. }