qed_vf.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/crc32.h>
  33. #include <linux/etherdevice.h>
  34. #include "qed.h"
  35. #include "qed_sriov.h"
  36. #include "qed_vf.h"
  37. static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
  38. {
  39. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  40. void *p_tlv;
  41. /* This lock is released when we receive PF's response
  42. * in qed_send_msg2pf().
  43. * So, qed_vf_pf_prep() and qed_send_msg2pf()
  44. * must come in sequence.
  45. */
  46. mutex_lock(&(p_iov->mutex));
  47. DP_VERBOSE(p_hwfn,
  48. QED_MSG_IOV,
  49. "preparing to send 0x%04x tlv over vf pf channel\n",
  50. type);
  51. /* Reset Requst offset */
  52. p_iov->offset = (u8 *)p_iov->vf2pf_request;
  53. /* Clear mailbox - both request and reply */
  54. memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
  55. memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
  56. /* Init type and length */
  57. p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
  58. /* Init first tlv header */
  59. ((struct vfpf_first_tlv *)p_tlv)->reply_address =
  60. (u64)p_iov->pf2vf_reply_phys;
  61. return p_tlv;
  62. }
  63. static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
  64. {
  65. union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
  66. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  67. "VF request status = 0x%x, PF reply status = 0x%x\n",
  68. req_status, resp->default_resp.hdr.status);
  69. mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
  70. }
  71. static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
  72. {
  73. union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
  74. struct ustorm_trigger_vf_zone trigger;
  75. struct ustorm_vf_zone *zone_data;
  76. int rc = 0, time = 100;
  77. zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
  78. /* output tlvs list */
  79. qed_dp_tlv_list(p_hwfn, p_req);
  80. /* need to add the END TLV to the message size */
  81. resp_size += sizeof(struct channel_list_end_tlv);
  82. /* Send TLVs over HW channel */
  83. memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
  84. trigger.vf_pf_msg_valid = 1;
  85. DP_VERBOSE(p_hwfn,
  86. QED_MSG_IOV,
  87. "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
  88. GET_FIELD(p_hwfn->hw_info.concrete_fid,
  89. PXP_CONCRETE_FID_PFID),
  90. upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
  91. lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
  92. &zone_data->non_trigger.vf_pf_msg_addr,
  93. *((u32 *)&trigger), &zone_data->trigger);
  94. REG_WR(p_hwfn,
  95. (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
  96. lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
  97. REG_WR(p_hwfn,
  98. (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
  99. upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
  100. /* The message data must be written first, to prevent trigger before
  101. * data is written.
  102. */
  103. wmb();
  104. REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
  105. /* When PF would be done with the response, it would write back to the
  106. * `done' address. Poll until then.
  107. */
  108. while ((!*done) && time) {
  109. msleep(25);
  110. time--;
  111. }
  112. if (!*done) {
  113. DP_NOTICE(p_hwfn,
  114. "VF <-- PF Timeout [Type %d]\n",
  115. p_req->first_tlv.tl.type);
  116. rc = -EBUSY;
  117. } else {
  118. if ((*done != PFVF_STATUS_SUCCESS) &&
  119. (*done != PFVF_STATUS_NO_RESOURCE))
  120. DP_NOTICE(p_hwfn,
  121. "PF response: %d [Type %d]\n",
  122. *done, p_req->first_tlv.tl.type);
  123. else
  124. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  125. "PF response: %d [Type %d]\n",
  126. *done, p_req->first_tlv.tl.type);
  127. }
  128. return rc;
  129. }
  130. #define VF_ACQUIRE_THRESH 3
  131. static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
  132. struct vf_pf_resc_request *p_req,
  133. struct pf_vf_resc *p_resp)
  134. {
  135. DP_VERBOSE(p_hwfn,
  136. QED_MSG_IOV,
  137. "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
  138. p_req->num_rxqs,
  139. p_resp->num_rxqs,
  140. p_req->num_rxqs,
  141. p_resp->num_txqs,
  142. p_req->num_sbs,
  143. p_resp->num_sbs,
  144. p_req->num_mac_filters,
  145. p_resp->num_mac_filters,
  146. p_req->num_vlan_filters,
  147. p_resp->num_vlan_filters,
  148. p_req->num_mc_filters, p_resp->num_mc_filters);
  149. /* humble our request */
  150. p_req->num_txqs = p_resp->num_txqs;
  151. p_req->num_rxqs = p_resp->num_rxqs;
  152. p_req->num_sbs = p_resp->num_sbs;
  153. p_req->num_mac_filters = p_resp->num_mac_filters;
  154. p_req->num_vlan_filters = p_resp->num_vlan_filters;
  155. p_req->num_mc_filters = p_resp->num_mc_filters;
  156. }
  157. static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
  158. {
  159. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  160. struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
  161. struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
  162. struct vf_pf_resc_request *p_resc;
  163. bool resources_acquired = false;
  164. struct vfpf_acquire_tlv *req;
  165. int rc = 0, attempts = 0;
  166. /* clear mailbox and prep first tlv */
  167. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
  168. p_resc = &req->resc_request;
  169. /* starting filling the request */
  170. req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
  171. p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
  172. p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
  173. p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
  174. p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
  175. p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
  176. req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
  177. req->vfdev_info.fw_major = FW_MAJOR_VERSION;
  178. req->vfdev_info.fw_minor = FW_MINOR_VERSION;
  179. req->vfdev_info.fw_revision = FW_REVISION_VERSION;
  180. req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
  181. req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
  182. req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
  183. /* Fill capability field with any non-deprecated config we support */
  184. req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
  185. /* pf 2 vf bulletin board address */
  186. req->bulletin_addr = p_iov->bulletin.phys;
  187. req->bulletin_size = p_iov->bulletin.size;
  188. /* add list termination tlv */
  189. qed_add_tlv(p_hwfn, &p_iov->offset,
  190. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  191. while (!resources_acquired) {
  192. DP_VERBOSE(p_hwfn,
  193. QED_MSG_IOV, "attempting to acquire resources\n");
  194. /* Clear response buffer, as this might be a re-send */
  195. memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
  196. /* send acquire request */
  197. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  198. if (rc)
  199. goto exit;
  200. /* copy acquire response from buffer to p_hwfn */
  201. memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
  202. attempts++;
  203. if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
  204. /* PF agrees to allocate our resources */
  205. if (!(resp->pfdev_info.capabilities &
  206. PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
  207. /* It's possible legacy PF mistakenly accepted;
  208. * but we don't care - simply mark it as
  209. * legacy and continue.
  210. */
  211. req->vfdev_info.capabilities |=
  212. VFPF_ACQUIRE_CAP_PRE_FP_HSI;
  213. }
  214. DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
  215. resources_acquired = true;
  216. } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
  217. attempts < VF_ACQUIRE_THRESH) {
  218. qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
  219. &resp->resc);
  220. } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
  221. if (pfdev_info->major_fp_hsi &&
  222. (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
  223. DP_NOTICE(p_hwfn,
  224. "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
  225. pfdev_info->major_fp_hsi,
  226. pfdev_info->minor_fp_hsi,
  227. ETH_HSI_VER_MAJOR,
  228. ETH_HSI_VER_MINOR,
  229. pfdev_info->major_fp_hsi);
  230. rc = -EINVAL;
  231. goto exit;
  232. }
  233. if (!pfdev_info->major_fp_hsi) {
  234. if (req->vfdev_info.capabilities &
  235. VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
  236. DP_NOTICE(p_hwfn,
  237. "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
  238. rc = -EINVAL;
  239. goto exit;
  240. } else {
  241. DP_INFO(p_hwfn,
  242. "PF is old - try re-acquire to see if it supports FW-version override\n");
  243. req->vfdev_info.capabilities |=
  244. VFPF_ACQUIRE_CAP_PRE_FP_HSI;
  245. continue;
  246. }
  247. }
  248. /* If PF/VF are using same Major, PF must have had
  249. * it's reasons. Simply fail.
  250. */
  251. DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
  252. rc = -EINVAL;
  253. goto exit;
  254. } else {
  255. DP_ERR(p_hwfn,
  256. "PF returned error %d to VF acquisition request\n",
  257. resp->hdr.status);
  258. rc = -EAGAIN;
  259. goto exit;
  260. }
  261. }
  262. /* Mark the PF as legacy, if needed */
  263. if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
  264. p_iov->b_pre_fp_hsi = true;
  265. /* Update bulletin board size with response from PF */
  266. p_iov->bulletin.size = resp->bulletin_size;
  267. /* get HW info */
  268. p_hwfn->cdev->type = resp->pfdev_info.dev_type;
  269. p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
  270. p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
  271. /* Learn of the possibility of CMT */
  272. if (IS_LEAD_HWFN(p_hwfn)) {
  273. if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
  274. DP_NOTICE(p_hwfn, "100g VF\n");
  275. p_hwfn->cdev->num_hwfns = 2;
  276. }
  277. }
  278. if (!p_iov->b_pre_fp_hsi &&
  279. ETH_HSI_VER_MINOR &&
  280. (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
  281. DP_INFO(p_hwfn,
  282. "PF is using older fastpath HSI; %02x.%02x is configured\n",
  283. ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
  284. }
  285. exit:
  286. qed_vf_pf_req_end(p_hwfn, rc);
  287. return rc;
  288. }
  289. int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
  290. {
  291. struct qed_vf_iov *p_iov;
  292. u32 reg;
  293. /* Set number of hwfns - might be overriden once leading hwfn learns
  294. * actual configuration from PF.
  295. */
  296. if (IS_LEAD_HWFN(p_hwfn))
  297. p_hwfn->cdev->num_hwfns = 1;
  298. /* Set the doorbell bar. Assumption: regview is set */
  299. p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
  300. PXP_VF_BAR0_START_DQ;
  301. reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
  302. p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
  303. reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
  304. p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
  305. /* Allocate vf sriov info */
  306. p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
  307. if (!p_iov)
  308. return -ENOMEM;
  309. /* Allocate vf2pf msg */
  310. p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  311. sizeof(union vfpf_tlvs),
  312. &p_iov->vf2pf_request_phys,
  313. GFP_KERNEL);
  314. if (!p_iov->vf2pf_request)
  315. goto free_p_iov;
  316. p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  317. sizeof(union pfvf_tlvs),
  318. &p_iov->pf2vf_reply_phys,
  319. GFP_KERNEL);
  320. if (!p_iov->pf2vf_reply)
  321. goto free_vf2pf_request;
  322. DP_VERBOSE(p_hwfn,
  323. QED_MSG_IOV,
  324. "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
  325. p_iov->vf2pf_request,
  326. (u64) p_iov->vf2pf_request_phys,
  327. p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
  328. /* Allocate Bulletin board */
  329. p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
  330. p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  331. p_iov->bulletin.size,
  332. &p_iov->bulletin.phys,
  333. GFP_KERNEL);
  334. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  335. "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
  336. p_iov->bulletin.p_virt,
  337. (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
  338. mutex_init(&p_iov->mutex);
  339. p_hwfn->vf_iov_info = p_iov;
  340. p_hwfn->hw_info.personality = QED_PCI_ETH;
  341. return qed_vf_pf_acquire(p_hwfn);
  342. free_vf2pf_request:
  343. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  344. sizeof(union vfpf_tlvs),
  345. p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
  346. free_p_iov:
  347. kfree(p_iov);
  348. return -ENOMEM;
  349. }
  350. #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
  351. #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
  352. (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
  353. static void
  354. __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
  355. struct qed_tunn_update_type *p_src,
  356. enum qed_tunn_clss mask, u8 *p_cls)
  357. {
  358. if (p_src->b_update_mode) {
  359. p_req->tun_mode_update_mask |= BIT(mask);
  360. if (p_src->b_mode_enabled)
  361. p_req->tunn_mode |= BIT(mask);
  362. }
  363. *p_cls = p_src->tun_cls;
  364. }
  365. static void
  366. qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
  367. struct qed_tunn_update_type *p_src,
  368. enum qed_tunn_clss mask,
  369. u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
  370. u8 *p_update_port, u16 *p_udp_port)
  371. {
  372. if (p_port->b_update_port) {
  373. *p_update_port = 1;
  374. *p_udp_port = p_port->port;
  375. }
  376. __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
  377. }
  378. void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
  379. {
  380. if (p_tun->vxlan.b_mode_enabled)
  381. p_tun->vxlan.b_update_mode = true;
  382. if (p_tun->l2_geneve.b_mode_enabled)
  383. p_tun->l2_geneve.b_update_mode = true;
  384. if (p_tun->ip_geneve.b_mode_enabled)
  385. p_tun->ip_geneve.b_update_mode = true;
  386. if (p_tun->l2_gre.b_mode_enabled)
  387. p_tun->l2_gre.b_update_mode = true;
  388. if (p_tun->ip_gre.b_mode_enabled)
  389. p_tun->ip_gre.b_update_mode = true;
  390. p_tun->b_update_rx_cls = true;
  391. p_tun->b_update_tx_cls = true;
  392. }
  393. static void
  394. __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
  395. u16 feature_mask, u8 tunn_mode,
  396. u8 tunn_cls, enum qed_tunn_mode val)
  397. {
  398. if (feature_mask & BIT(val)) {
  399. p_tun->b_mode_enabled = tunn_mode;
  400. p_tun->tun_cls = tunn_cls;
  401. } else {
  402. p_tun->b_mode_enabled = false;
  403. }
  404. }
  405. static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
  406. struct qed_tunnel_info *p_tun,
  407. struct pfvf_update_tunn_param_tlv *p_resp)
  408. {
  409. /* Update mode and classes provided by PF */
  410. u16 feat_mask = p_resp->tunn_feature_mask;
  411. __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
  412. p_resp->vxlan_mode, p_resp->vxlan_clss,
  413. QED_MODE_VXLAN_TUNN);
  414. __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
  415. p_resp->l2geneve_mode,
  416. p_resp->l2geneve_clss,
  417. QED_MODE_L2GENEVE_TUNN);
  418. __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
  419. p_resp->ipgeneve_mode,
  420. p_resp->ipgeneve_clss,
  421. QED_MODE_IPGENEVE_TUNN);
  422. __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
  423. p_resp->l2gre_mode, p_resp->l2gre_clss,
  424. QED_MODE_L2GRE_TUNN);
  425. __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
  426. p_resp->ipgre_mode, p_resp->ipgre_clss,
  427. QED_MODE_IPGRE_TUNN);
  428. p_tun->geneve_port.port = p_resp->geneve_udp_port;
  429. p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
  430. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  431. "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
  432. p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
  433. p_tun->ip_geneve.b_mode_enabled,
  434. p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
  435. }
  436. int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
  437. struct qed_tunnel_info *p_src)
  438. {
  439. struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
  440. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  441. struct pfvf_update_tunn_param_tlv *p_resp;
  442. struct vfpf_update_tunn_param_tlv *p_req;
  443. int rc;
  444. p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
  445. sizeof(*p_req));
  446. if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
  447. p_req->update_tun_cls = 1;
  448. qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
  449. &p_req->vxlan_clss, &p_src->vxlan_port,
  450. &p_req->update_vxlan_port,
  451. &p_req->vxlan_port);
  452. qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
  453. QED_MODE_L2GENEVE_TUNN,
  454. &p_req->l2geneve_clss, &p_src->geneve_port,
  455. &p_req->update_geneve_port,
  456. &p_req->geneve_port);
  457. __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
  458. QED_MODE_IPGENEVE_TUNN,
  459. &p_req->ipgeneve_clss);
  460. __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
  461. QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
  462. __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
  463. QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
  464. /* add list termination tlv */
  465. qed_add_tlv(p_hwfn, &p_iov->offset,
  466. CHANNEL_TLV_LIST_END,
  467. sizeof(struct channel_list_end_tlv));
  468. p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
  469. rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
  470. if (rc)
  471. goto exit;
  472. if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
  473. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  474. "Failed to update tunnel parameters\n");
  475. rc = -EINVAL;
  476. }
  477. qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
  478. exit:
  479. qed_vf_pf_req_end(p_hwfn, rc);
  480. return rc;
  481. }
  482. int
  483. qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
  484. struct qed_queue_cid *p_cid,
  485. u16 bd_max_bytes,
  486. dma_addr_t bd_chain_phys_addr,
  487. dma_addr_t cqe_pbl_addr,
  488. u16 cqe_pbl_size, void __iomem **pp_prod)
  489. {
  490. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  491. struct pfvf_start_queue_resp_tlv *resp;
  492. struct vfpf_start_rxq_tlv *req;
  493. u8 rx_qid = p_cid->rel.queue_id;
  494. int rc;
  495. /* clear mailbox and prep first tlv */
  496. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
  497. req->rx_qid = rx_qid;
  498. req->cqe_pbl_addr = cqe_pbl_addr;
  499. req->cqe_pbl_size = cqe_pbl_size;
  500. req->rxq_addr = bd_chain_phys_addr;
  501. req->hw_sb = p_cid->rel.sb;
  502. req->sb_index = p_cid->rel.sb_idx;
  503. req->bd_max_bytes = bd_max_bytes;
  504. req->stat_id = -1;
  505. /* If PF is legacy, we'll need to calculate producers ourselves
  506. * as well as clean them.
  507. */
  508. if (p_iov->b_pre_fp_hsi) {
  509. u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
  510. u32 init_prod_val = 0;
  511. *pp_prod = (u8 __iomem *)
  512. p_hwfn->regview +
  513. MSTORM_QZONE_START(p_hwfn->cdev) +
  514. hw_qid * MSTORM_QZONE_SIZE;
  515. /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
  516. __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
  517. (u32 *)(&init_prod_val));
  518. }
  519. /* add list termination tlv */
  520. qed_add_tlv(p_hwfn, &p_iov->offset,
  521. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  522. resp = &p_iov->pf2vf_reply->queue_start;
  523. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  524. if (rc)
  525. goto exit;
  526. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  527. rc = -EINVAL;
  528. goto exit;
  529. }
  530. /* Learn the address of the producer from the response */
  531. if (!p_iov->b_pre_fp_hsi) {
  532. u32 init_prod_val = 0;
  533. *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
  534. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  535. "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
  536. rx_qid, *pp_prod, resp->offset);
  537. /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
  538. __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
  539. (u32 *)&init_prod_val);
  540. }
  541. exit:
  542. qed_vf_pf_req_end(p_hwfn, rc);
  543. return rc;
  544. }
  545. int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
  546. struct qed_queue_cid *p_cid, bool cqe_completion)
  547. {
  548. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  549. struct vfpf_stop_rxqs_tlv *req;
  550. struct pfvf_def_resp_tlv *resp;
  551. int rc;
  552. /* clear mailbox and prep first tlv */
  553. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
  554. req->rx_qid = p_cid->rel.queue_id;
  555. req->num_rxqs = 1;
  556. req->cqe_completion = cqe_completion;
  557. /* add list termination tlv */
  558. qed_add_tlv(p_hwfn, &p_iov->offset,
  559. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  560. resp = &p_iov->pf2vf_reply->default_resp;
  561. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  562. if (rc)
  563. goto exit;
  564. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  565. rc = -EINVAL;
  566. goto exit;
  567. }
  568. exit:
  569. qed_vf_pf_req_end(p_hwfn, rc);
  570. return rc;
  571. }
  572. int
  573. qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
  574. struct qed_queue_cid *p_cid,
  575. dma_addr_t pbl_addr,
  576. u16 pbl_size, void __iomem **pp_doorbell)
  577. {
  578. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  579. struct pfvf_start_queue_resp_tlv *resp;
  580. struct vfpf_start_txq_tlv *req;
  581. u16 qid = p_cid->rel.queue_id;
  582. int rc;
  583. /* clear mailbox and prep first tlv */
  584. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
  585. req->tx_qid = qid;
  586. /* Tx */
  587. req->pbl_addr = pbl_addr;
  588. req->pbl_size = pbl_size;
  589. req->hw_sb = p_cid->rel.sb;
  590. req->sb_index = p_cid->rel.sb_idx;
  591. /* add list termination tlv */
  592. qed_add_tlv(p_hwfn, &p_iov->offset,
  593. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  594. resp = &p_iov->pf2vf_reply->queue_start;
  595. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  596. if (rc)
  597. goto exit;
  598. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  599. rc = -EINVAL;
  600. goto exit;
  601. }
  602. /* Modern PFs provide the actual offsets, while legacy
  603. * provided only the queue id.
  604. */
  605. if (!p_iov->b_pre_fp_hsi) {
  606. *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
  607. } else {
  608. u8 cid = p_iov->acquire_resp.resc.cid[qid];
  609. *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
  610. qed_db_addr_vf(cid,
  611. DQ_DEMS_LEGACY);
  612. }
  613. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  614. "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
  615. qid, *pp_doorbell, resp->offset);
  616. exit:
  617. qed_vf_pf_req_end(p_hwfn, rc);
  618. return rc;
  619. }
  620. int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
  621. {
  622. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  623. struct vfpf_stop_txqs_tlv *req;
  624. struct pfvf_def_resp_tlv *resp;
  625. int rc;
  626. /* clear mailbox and prep first tlv */
  627. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
  628. req->tx_qid = p_cid->rel.queue_id;
  629. req->num_txqs = 1;
  630. /* add list termination tlv */
  631. qed_add_tlv(p_hwfn, &p_iov->offset,
  632. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  633. resp = &p_iov->pf2vf_reply->default_resp;
  634. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  635. if (rc)
  636. goto exit;
  637. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  638. rc = -EINVAL;
  639. goto exit;
  640. }
  641. exit:
  642. qed_vf_pf_req_end(p_hwfn, rc);
  643. return rc;
  644. }
  645. int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
  646. u8 vport_id,
  647. u16 mtu,
  648. u8 inner_vlan_removal,
  649. enum qed_tpa_mode tpa_mode,
  650. u8 max_buffers_per_cqe, u8 only_untagged)
  651. {
  652. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  653. struct vfpf_vport_start_tlv *req;
  654. struct pfvf_def_resp_tlv *resp;
  655. int rc, i;
  656. /* clear mailbox and prep first tlv */
  657. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
  658. req->mtu = mtu;
  659. req->vport_id = vport_id;
  660. req->inner_vlan_removal = inner_vlan_removal;
  661. req->tpa_mode = tpa_mode;
  662. req->max_buffers_per_cqe = max_buffers_per_cqe;
  663. req->only_untagged = only_untagged;
  664. /* status blocks */
  665. for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
  666. if (p_hwfn->sbs_info[i])
  667. req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
  668. /* add list termination tlv */
  669. qed_add_tlv(p_hwfn, &p_iov->offset,
  670. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  671. resp = &p_iov->pf2vf_reply->default_resp;
  672. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  673. if (rc)
  674. goto exit;
  675. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  676. rc = -EINVAL;
  677. goto exit;
  678. }
  679. exit:
  680. qed_vf_pf_req_end(p_hwfn, rc);
  681. return rc;
  682. }
  683. int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
  684. {
  685. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  686. struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
  687. int rc;
  688. /* clear mailbox and prep first tlv */
  689. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
  690. sizeof(struct vfpf_first_tlv));
  691. /* add list termination tlv */
  692. qed_add_tlv(p_hwfn, &p_iov->offset,
  693. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  694. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  695. if (rc)
  696. goto exit;
  697. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  698. rc = -EINVAL;
  699. goto exit;
  700. }
  701. exit:
  702. qed_vf_pf_req_end(p_hwfn, rc);
  703. return rc;
  704. }
  705. static bool
  706. qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
  707. struct qed_sp_vport_update_params *p_data,
  708. u16 tlv)
  709. {
  710. switch (tlv) {
  711. case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
  712. return !!(p_data->update_vport_active_rx_flg ||
  713. p_data->update_vport_active_tx_flg);
  714. case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
  715. return !!p_data->update_tx_switching_flg;
  716. case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
  717. return !!p_data->update_inner_vlan_removal_flg;
  718. case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
  719. return !!p_data->update_accept_any_vlan_flg;
  720. case CHANNEL_TLV_VPORT_UPDATE_MCAST:
  721. return !!p_data->update_approx_mcast_flg;
  722. case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
  723. return !!(p_data->accept_flags.update_rx_mode_config ||
  724. p_data->accept_flags.update_tx_mode_config);
  725. case CHANNEL_TLV_VPORT_UPDATE_RSS:
  726. return !!p_data->rss_params;
  727. case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
  728. return !!p_data->sge_tpa_params;
  729. default:
  730. DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
  731. tlv);
  732. return false;
  733. }
  734. }
  735. static void
  736. qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
  737. struct qed_sp_vport_update_params *p_data)
  738. {
  739. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  740. struct pfvf_def_resp_tlv *p_resp;
  741. u16 tlv;
  742. for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
  743. tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
  744. if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
  745. continue;
  746. p_resp = (struct pfvf_def_resp_tlv *)
  747. qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
  748. tlv);
  749. if (p_resp && p_resp->hdr.status)
  750. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  751. "TLV[%d] Configuration %s\n",
  752. tlv,
  753. (p_resp && p_resp->hdr.status) ? "succeeded"
  754. : "failed");
  755. }
  756. }
  757. int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
  758. struct qed_sp_vport_update_params *p_params)
  759. {
  760. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  761. struct vfpf_vport_update_tlv *req;
  762. struct pfvf_def_resp_tlv *resp;
  763. u8 update_rx, update_tx;
  764. u32 resp_size = 0;
  765. u16 size, tlv;
  766. int rc;
  767. resp = &p_iov->pf2vf_reply->default_resp;
  768. resp_size = sizeof(*resp);
  769. update_rx = p_params->update_vport_active_rx_flg;
  770. update_tx = p_params->update_vport_active_tx_flg;
  771. /* clear mailbox and prep header tlv */
  772. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
  773. /* Prepare extended tlvs */
  774. if (update_rx || update_tx) {
  775. struct vfpf_vport_update_activate_tlv *p_act_tlv;
  776. size = sizeof(struct vfpf_vport_update_activate_tlv);
  777. p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  778. CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
  779. size);
  780. resp_size += sizeof(struct pfvf_def_resp_tlv);
  781. if (update_rx) {
  782. p_act_tlv->update_rx = update_rx;
  783. p_act_tlv->active_rx = p_params->vport_active_rx_flg;
  784. }
  785. if (update_tx) {
  786. p_act_tlv->update_tx = update_tx;
  787. p_act_tlv->active_tx = p_params->vport_active_tx_flg;
  788. }
  789. }
  790. if (p_params->update_tx_switching_flg) {
  791. struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
  792. size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
  793. tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
  794. p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  795. tlv, size);
  796. resp_size += sizeof(struct pfvf_def_resp_tlv);
  797. p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
  798. }
  799. if (p_params->update_approx_mcast_flg) {
  800. struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
  801. size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
  802. p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
  803. CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
  804. resp_size += sizeof(struct pfvf_def_resp_tlv);
  805. memcpy(p_mcast_tlv->bins, p_params->bins,
  806. sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
  807. }
  808. update_rx = p_params->accept_flags.update_rx_mode_config;
  809. update_tx = p_params->accept_flags.update_tx_mode_config;
  810. if (update_rx || update_tx) {
  811. struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
  812. tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
  813. size = sizeof(struct vfpf_vport_update_accept_param_tlv);
  814. p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
  815. resp_size += sizeof(struct pfvf_def_resp_tlv);
  816. if (update_rx) {
  817. p_accept_tlv->update_rx_mode = update_rx;
  818. p_accept_tlv->rx_accept_filter =
  819. p_params->accept_flags.rx_accept_filter;
  820. }
  821. if (update_tx) {
  822. p_accept_tlv->update_tx_mode = update_tx;
  823. p_accept_tlv->tx_accept_filter =
  824. p_params->accept_flags.tx_accept_filter;
  825. }
  826. }
  827. if (p_params->rss_params) {
  828. struct qed_rss_params *rss_params = p_params->rss_params;
  829. struct vfpf_vport_update_rss_tlv *p_rss_tlv;
  830. int i, table_size;
  831. size = sizeof(struct vfpf_vport_update_rss_tlv);
  832. p_rss_tlv = qed_add_tlv(p_hwfn,
  833. &p_iov->offset,
  834. CHANNEL_TLV_VPORT_UPDATE_RSS, size);
  835. resp_size += sizeof(struct pfvf_def_resp_tlv);
  836. if (rss_params->update_rss_config)
  837. p_rss_tlv->update_rss_flags |=
  838. VFPF_UPDATE_RSS_CONFIG_FLAG;
  839. if (rss_params->update_rss_capabilities)
  840. p_rss_tlv->update_rss_flags |=
  841. VFPF_UPDATE_RSS_CAPS_FLAG;
  842. if (rss_params->update_rss_ind_table)
  843. p_rss_tlv->update_rss_flags |=
  844. VFPF_UPDATE_RSS_IND_TABLE_FLAG;
  845. if (rss_params->update_rss_key)
  846. p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
  847. p_rss_tlv->rss_enable = rss_params->rss_enable;
  848. p_rss_tlv->rss_caps = rss_params->rss_caps;
  849. p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
  850. table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
  851. 1 << p_rss_tlv->rss_table_size_log);
  852. for (i = 0; i < table_size; i++) {
  853. struct qed_queue_cid *p_queue;
  854. p_queue = rss_params->rss_ind_table[i];
  855. p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
  856. }
  857. memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
  858. sizeof(rss_params->rss_key));
  859. }
  860. if (p_params->update_accept_any_vlan_flg) {
  861. struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
  862. size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
  863. tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
  864. p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
  865. resp_size += sizeof(struct pfvf_def_resp_tlv);
  866. p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
  867. p_any_vlan_tlv->update_accept_any_vlan_flg =
  868. p_params->update_accept_any_vlan_flg;
  869. }
  870. /* add list termination tlv */
  871. qed_add_tlv(p_hwfn, &p_iov->offset,
  872. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  873. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
  874. if (rc)
  875. goto exit;
  876. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  877. rc = -EINVAL;
  878. goto exit;
  879. }
  880. qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
  881. exit:
  882. qed_vf_pf_req_end(p_hwfn, rc);
  883. return rc;
  884. }
  885. int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
  886. {
  887. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  888. struct pfvf_def_resp_tlv *resp;
  889. struct vfpf_first_tlv *req;
  890. int rc;
  891. /* clear mailbox and prep first tlv */
  892. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
  893. /* add list termination tlv */
  894. qed_add_tlv(p_hwfn, &p_iov->offset,
  895. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  896. resp = &p_iov->pf2vf_reply->default_resp;
  897. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  898. if (rc)
  899. goto exit;
  900. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  901. rc = -EAGAIN;
  902. goto exit;
  903. }
  904. p_hwfn->b_int_enabled = 0;
  905. exit:
  906. qed_vf_pf_req_end(p_hwfn, rc);
  907. return rc;
  908. }
  909. int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
  910. {
  911. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  912. struct pfvf_def_resp_tlv *resp;
  913. struct vfpf_first_tlv *req;
  914. u32 size;
  915. int rc;
  916. /* clear mailbox and prep first tlv */
  917. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
  918. /* add list termination tlv */
  919. qed_add_tlv(p_hwfn, &p_iov->offset,
  920. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  921. resp = &p_iov->pf2vf_reply->default_resp;
  922. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  923. if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
  924. rc = -EAGAIN;
  925. qed_vf_pf_req_end(p_hwfn, rc);
  926. p_hwfn->b_int_enabled = 0;
  927. if (p_iov->vf2pf_request)
  928. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  929. sizeof(union vfpf_tlvs),
  930. p_iov->vf2pf_request,
  931. p_iov->vf2pf_request_phys);
  932. if (p_iov->pf2vf_reply)
  933. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  934. sizeof(union pfvf_tlvs),
  935. p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
  936. if (p_iov->bulletin.p_virt) {
  937. size = sizeof(struct qed_bulletin_content);
  938. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  939. size,
  940. p_iov->bulletin.p_virt, p_iov->bulletin.phys);
  941. }
  942. kfree(p_hwfn->vf_iov_info);
  943. p_hwfn->vf_iov_info = NULL;
  944. return rc;
  945. }
  946. void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
  947. struct qed_filter_mcast *p_filter_cmd)
  948. {
  949. struct qed_sp_vport_update_params sp_params;
  950. int i;
  951. memset(&sp_params, 0, sizeof(sp_params));
  952. sp_params.update_approx_mcast_flg = 1;
  953. if (p_filter_cmd->opcode == QED_FILTER_ADD) {
  954. for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
  955. u32 bit;
  956. bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
  957. __set_bit(bit, sp_params.bins);
  958. }
  959. }
  960. qed_vf_pf_vport_update(p_hwfn, &sp_params);
  961. }
  962. int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
  963. struct qed_filter_ucast *p_ucast)
  964. {
  965. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  966. struct vfpf_ucast_filter_tlv *req;
  967. struct pfvf_def_resp_tlv *resp;
  968. int rc;
  969. /* clear mailbox and prep first tlv */
  970. req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
  971. req->opcode = (u8) p_ucast->opcode;
  972. req->type = (u8) p_ucast->type;
  973. memcpy(req->mac, p_ucast->mac, ETH_ALEN);
  974. req->vlan = p_ucast->vlan;
  975. /* add list termination tlv */
  976. qed_add_tlv(p_hwfn, &p_iov->offset,
  977. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  978. resp = &p_iov->pf2vf_reply->default_resp;
  979. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  980. if (rc)
  981. goto exit;
  982. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  983. rc = -EAGAIN;
  984. goto exit;
  985. }
  986. exit:
  987. qed_vf_pf_req_end(p_hwfn, rc);
  988. return rc;
  989. }
  990. int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
  991. {
  992. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  993. struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
  994. int rc;
  995. /* clear mailbox and prep first tlv */
  996. qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
  997. sizeof(struct vfpf_first_tlv));
  998. /* add list termination tlv */
  999. qed_add_tlv(p_hwfn, &p_iov->offset,
  1000. CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
  1001. rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
  1002. if (rc)
  1003. goto exit;
  1004. if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
  1005. rc = -EINVAL;
  1006. goto exit;
  1007. }
  1008. exit:
  1009. qed_vf_pf_req_end(p_hwfn, rc);
  1010. return rc;
  1011. }
  1012. u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
  1013. {
  1014. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  1015. if (!p_iov) {
  1016. DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
  1017. return 0;
  1018. }
  1019. return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
  1020. }
  1021. int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
  1022. {
  1023. struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
  1024. struct qed_bulletin_content shadow;
  1025. u32 crc, crc_size;
  1026. crc_size = sizeof(p_iov->bulletin.p_virt->crc);
  1027. *p_change = 0;
  1028. /* Need to guarantee PF is not in the middle of writing it */
  1029. memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
  1030. /* If version did not update, no need to do anything */
  1031. if (shadow.version == p_iov->bulletin_shadow.version)
  1032. return 0;
  1033. /* Verify the bulletin we see is valid */
  1034. crc = crc32(0, (u8 *)&shadow + crc_size,
  1035. p_iov->bulletin.size - crc_size);
  1036. if (crc != shadow.crc)
  1037. return -EAGAIN;
  1038. /* Set the shadow bulletin and process it */
  1039. memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
  1040. DP_VERBOSE(p_hwfn, QED_MSG_IOV,
  1041. "Read a bulletin update %08x\n", shadow.version);
  1042. *p_change = 1;
  1043. return 0;
  1044. }
  1045. void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  1046. struct qed_mcp_link_params *p_params,
  1047. struct qed_bulletin_content *p_bulletin)
  1048. {
  1049. memset(p_params, 0, sizeof(*p_params));
  1050. p_params->speed.autoneg = p_bulletin->req_autoneg;
  1051. p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
  1052. p_params->speed.forced_speed = p_bulletin->req_forced_speed;
  1053. p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
  1054. p_params->pause.forced_rx = p_bulletin->req_forced_rx;
  1055. p_params->pause.forced_tx = p_bulletin->req_forced_tx;
  1056. p_params->loopback_mode = p_bulletin->req_loopback;
  1057. }
  1058. void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  1059. struct qed_mcp_link_params *params)
  1060. {
  1061. __qed_vf_get_link_params(p_hwfn, params,
  1062. &(p_hwfn->vf_iov_info->bulletin_shadow));
  1063. }
  1064. void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  1065. struct qed_mcp_link_state *p_link,
  1066. struct qed_bulletin_content *p_bulletin)
  1067. {
  1068. memset(p_link, 0, sizeof(*p_link));
  1069. p_link->link_up = p_bulletin->link_up;
  1070. p_link->speed = p_bulletin->speed;
  1071. p_link->full_duplex = p_bulletin->full_duplex;
  1072. p_link->an = p_bulletin->autoneg;
  1073. p_link->an_complete = p_bulletin->autoneg_complete;
  1074. p_link->parallel_detection = p_bulletin->parallel_detection;
  1075. p_link->pfc_enabled = p_bulletin->pfc_enabled;
  1076. p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
  1077. p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
  1078. p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
  1079. p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
  1080. p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
  1081. }
  1082. void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  1083. struct qed_mcp_link_state *link)
  1084. {
  1085. __qed_vf_get_link_state(p_hwfn, link,
  1086. &(p_hwfn->vf_iov_info->bulletin_shadow));
  1087. }
  1088. void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  1089. struct qed_mcp_link_capabilities *p_link_caps,
  1090. struct qed_bulletin_content *p_bulletin)
  1091. {
  1092. memset(p_link_caps, 0, sizeof(*p_link_caps));
  1093. p_link_caps->speed_capabilities = p_bulletin->capability_speed;
  1094. }
  1095. void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  1096. struct qed_mcp_link_capabilities *p_link_caps)
  1097. {
  1098. __qed_vf_get_link_caps(p_hwfn, p_link_caps,
  1099. &(p_hwfn->vf_iov_info->bulletin_shadow));
  1100. }
  1101. void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
  1102. {
  1103. *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
  1104. }
  1105. void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
  1106. {
  1107. memcpy(port_mac,
  1108. p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
  1109. }
  1110. void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
  1111. {
  1112. struct qed_vf_iov *p_vf;
  1113. p_vf = p_hwfn->vf_iov_info;
  1114. *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
  1115. }
  1116. void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
  1117. {
  1118. struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
  1119. *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
  1120. }
  1121. bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
  1122. {
  1123. struct qed_bulletin_content *bulletin;
  1124. bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
  1125. if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
  1126. return true;
  1127. /* Forbid VF from changing a MAC enforced by PF */
  1128. if (ether_addr_equal(bulletin->mac, mac))
  1129. return false;
  1130. return false;
  1131. }
  1132. static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
  1133. u8 *dst_mac, u8 *p_is_forced)
  1134. {
  1135. struct qed_bulletin_content *bulletin;
  1136. bulletin = &hwfn->vf_iov_info->bulletin_shadow;
  1137. if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
  1138. if (p_is_forced)
  1139. *p_is_forced = 1;
  1140. } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
  1141. if (p_is_forced)
  1142. *p_is_forced = 0;
  1143. } else {
  1144. return false;
  1145. }
  1146. ether_addr_copy(dst_mac, bulletin->mac);
  1147. return true;
  1148. }
  1149. static void
  1150. qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
  1151. u16 *p_vxlan_port, u16 *p_geneve_port)
  1152. {
  1153. struct qed_bulletin_content *p_bulletin;
  1154. p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
  1155. *p_vxlan_port = p_bulletin->vxlan_udp_port;
  1156. *p_geneve_port = p_bulletin->geneve_udp_port;
  1157. }
  1158. void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
  1159. u16 *fw_major, u16 *fw_minor,
  1160. u16 *fw_rev, u16 *fw_eng)
  1161. {
  1162. struct pf_vf_pfdev_info *info;
  1163. info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
  1164. *fw_major = info->fw_major;
  1165. *fw_minor = info->fw_minor;
  1166. *fw_rev = info->fw_rev;
  1167. *fw_eng = info->fw_eng;
  1168. }
  1169. static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
  1170. {
  1171. struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
  1172. u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
  1173. void *cookie = hwfn->cdev->ops_cookie;
  1174. u16 vxlan_port, geneve_port;
  1175. qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
  1176. is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
  1177. &is_mac_forced);
  1178. if (is_mac_exist && cookie)
  1179. ops->force_mac(cookie, mac, !!is_mac_forced);
  1180. ops->ports_update(cookie, vxlan_port, geneve_port);
  1181. /* Always update link configuration according to bulletin */
  1182. qed_link_update(hwfn);
  1183. }
  1184. void qed_iov_vf_task(struct work_struct *work)
  1185. {
  1186. struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
  1187. iov_task.work);
  1188. u8 change = 0;
  1189. if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
  1190. return;
  1191. /* Handle bulletin board changes */
  1192. qed_vf_read_bulletin(hwfn, &change);
  1193. if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
  1194. &hwfn->iov_task_flags))
  1195. change = 1;
  1196. if (change)
  1197. qed_handle_bulletin_change(hwfn);
  1198. /* As VF is polling bulletin board, need to constantly re-schedule */
  1199. queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
  1200. }