bnxt_dcb.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. * Copyright (c) 2016-2017 Broadcom Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. */
  10. #include <linux/netdevice.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/pci.h>
  16. #include <linux/etherdevice.h>
  17. #include <rdma/ib_verbs.h>
  18. #include "bnxt_hsi.h"
  19. #include "bnxt.h"
  20. #include "bnxt_dcb.h"
  21. #ifdef CONFIG_BNXT_DCB
  22. static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
  23. {
  24. struct hwrm_queue_pri2cos_cfg_input req = {0};
  25. int rc = 0, i;
  26. u8 *pri2cos;
  27. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
  28. req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
  29. QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
  30. pri2cos = &req.pri0_cos_queue_id;
  31. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  32. req.enables |= cpu_to_le32(
  33. QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
  34. pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
  35. }
  36. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  37. return rc;
  38. }
  39. static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
  40. {
  41. struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  42. struct hwrm_queue_pri2cos_qcfg_input req = {0};
  43. int rc = 0;
  44. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
  45. req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
  46. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  47. if (!rc) {
  48. u8 *pri2cos = &resp->pri0_cos_queue_id;
  49. int i, j;
  50. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  51. u8 queue_id = pri2cos[i];
  52. for (j = 0; j < bp->max_tc; j++) {
  53. if (bp->q_info[j].queue_id == queue_id) {
  54. ets->prio_tc[i] = j;
  55. break;
  56. }
  57. }
  58. }
  59. }
  60. return rc;
  61. }
  62. static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
  63. u8 max_tc)
  64. {
  65. struct hwrm_queue_cos2bw_cfg_input req = {0};
  66. struct bnxt_cos2bw_cfg cos2bw;
  67. int rc = 0, i;
  68. void *data;
  69. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
  70. data = &req.unused_0;
  71. for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
  72. req.enables |= cpu_to_le32(
  73. QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
  74. memset(&cos2bw, 0, sizeof(cos2bw));
  75. cos2bw.queue_id = bp->q_info[i].queue_id;
  76. if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
  77. cos2bw.tsa =
  78. QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
  79. cos2bw.pri_lvl = i;
  80. } else {
  81. cos2bw.tsa =
  82. QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
  83. cos2bw.bw_weight = ets->tc_tx_bw[i];
  84. /* older firmware requires min_bw to be set to the
  85. * same weight value in percent.
  86. */
  87. cos2bw.min_bw =
  88. cpu_to_le32((ets->tc_tx_bw[i] * 100) |
  89. BW_VALUE_UNIT_PERCENT1_100);
  90. }
  91. memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
  92. if (i == 0) {
  93. req.queue_id0 = cos2bw.queue_id;
  94. req.unused_0 = 0;
  95. }
  96. }
  97. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  98. return rc;
  99. }
  100. static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
  101. {
  102. struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  103. struct hwrm_queue_cos2bw_qcfg_input req = {0};
  104. struct bnxt_cos2bw_cfg cos2bw;
  105. void *data;
  106. int rc, i;
  107. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
  108. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  109. if (rc)
  110. return rc;
  111. data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
  112. for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
  113. int j;
  114. memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
  115. if (i == 0)
  116. cos2bw.queue_id = resp->queue_id0;
  117. for (j = 0; j < bp->max_tc; j++) {
  118. if (bp->q_info[j].queue_id != cos2bw.queue_id)
  119. continue;
  120. if (cos2bw.tsa ==
  121. QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
  122. ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
  123. } else {
  124. ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
  125. ets->tc_tx_bw[j] = cos2bw.bw_weight;
  126. }
  127. }
  128. }
  129. return 0;
  130. }
  131. static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
  132. {
  133. struct hwrm_queue_cfg_input req = {0};
  134. int i;
  135. if (netif_running(bp->dev))
  136. bnxt_tx_disable(bp);
  137. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
  138. req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
  139. req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
  140. /* Configure lossless queues to lossy first */
  141. req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
  142. for (i = 0; i < bp->max_tc; i++) {
  143. if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
  144. req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
  145. hwrm_send_message(bp, &req, sizeof(req),
  146. HWRM_CMD_TIMEOUT);
  147. bp->q_info[i].queue_profile =
  148. QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
  149. }
  150. }
  151. /* Now configure desired queues to lossless */
  152. req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
  153. for (i = 0; i < bp->max_tc; i++) {
  154. if (lltc_mask & (1 << i)) {
  155. req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
  156. hwrm_send_message(bp, &req, sizeof(req),
  157. HWRM_CMD_TIMEOUT);
  158. bp->q_info[i].queue_profile =
  159. QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
  160. }
  161. }
  162. if (netif_running(bp->dev))
  163. bnxt_tx_enable(bp);
  164. return 0;
  165. }
  166. static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
  167. {
  168. struct hwrm_queue_pfcenable_cfg_input req = {0};
  169. struct ieee_ets *my_ets = bp->ieee_ets;
  170. unsigned int tc_mask = 0, pri_mask = 0;
  171. u8 i, pri, lltc_count = 0;
  172. bool need_q_recfg = false;
  173. int rc;
  174. if (!my_ets)
  175. return -EINVAL;
  176. for (i = 0; i < bp->max_tc; i++) {
  177. for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
  178. if ((pfc->pfc_en & (1 << pri)) &&
  179. (my_ets->prio_tc[pri] == i)) {
  180. pri_mask |= 1 << pri;
  181. tc_mask |= 1 << i;
  182. }
  183. }
  184. if (tc_mask & (1 << i))
  185. lltc_count++;
  186. }
  187. if (lltc_count > bp->max_lltc)
  188. return -EINVAL;
  189. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
  190. req.flags = cpu_to_le32(pri_mask);
  191. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  192. if (rc)
  193. return rc;
  194. for (i = 0; i < bp->max_tc; i++) {
  195. if (tc_mask & (1 << i)) {
  196. if (!BNXT_LLQ(bp->q_info[i].queue_profile))
  197. need_q_recfg = true;
  198. }
  199. }
  200. if (need_q_recfg)
  201. rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
  202. return rc;
  203. }
  204. static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
  205. {
  206. struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  207. struct hwrm_queue_pfcenable_qcfg_input req = {0};
  208. u8 pri_mask;
  209. int rc;
  210. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
  211. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  212. if (rc)
  213. return rc;
  214. pri_mask = le32_to_cpu(resp->flags);
  215. pfc->pfc_en = pri_mask;
  216. return 0;
  217. }
  218. static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
  219. bool add)
  220. {
  221. struct hwrm_fw_set_structured_data_input set = {0};
  222. struct hwrm_fw_get_structured_data_input get = {0};
  223. struct hwrm_struct_data_dcbx_app *fw_app;
  224. struct hwrm_struct_hdr *data;
  225. dma_addr_t mapping;
  226. size_t data_len;
  227. int rc, n, i;
  228. if (bp->hwrm_spec_code < 0x10601)
  229. return 0;
  230. n = IEEE_8021QAZ_MAX_TCS;
  231. data_len = sizeof(*data) + sizeof(*fw_app) * n;
  232. data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
  233. GFP_KERNEL);
  234. if (!data)
  235. return -ENOMEM;
  236. memset(data, 0, data_len);
  237. bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
  238. get.dest_data_addr = cpu_to_le64(mapping);
  239. get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
  240. get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
  241. get.count = 0;
  242. rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
  243. if (rc)
  244. goto set_app_exit;
  245. fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
  246. if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
  247. rc = -ENODEV;
  248. goto set_app_exit;
  249. }
  250. n = data->count;
  251. for (i = 0; i < n; i++, fw_app++) {
  252. if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
  253. fw_app->protocol_selector == app->selector &&
  254. fw_app->priority == app->priority) {
  255. if (add)
  256. goto set_app_exit;
  257. else
  258. break;
  259. }
  260. }
  261. if (add) {
  262. /* append */
  263. n++;
  264. fw_app->protocol_id = cpu_to_be16(app->protocol);
  265. fw_app->protocol_selector = app->selector;
  266. fw_app->priority = app->priority;
  267. fw_app->valid = 1;
  268. } else {
  269. size_t len = 0;
  270. /* not found, nothing to delete */
  271. if (n == i)
  272. goto set_app_exit;
  273. len = (n - 1 - i) * sizeof(*fw_app);
  274. if (len)
  275. memmove(fw_app, fw_app + 1, len);
  276. n--;
  277. memset(fw_app + n, 0, sizeof(*fw_app));
  278. }
  279. data->count = n;
  280. data->len = cpu_to_le16(sizeof(*fw_app) * n);
  281. data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
  282. bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
  283. set.src_data_addr = cpu_to_le64(mapping);
  284. set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
  285. set.hdr_cnt = 1;
  286. rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
  287. if (rc)
  288. rc = -EIO;
  289. set_app_exit:
  290. dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
  291. return rc;
  292. }
  293. static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
  294. {
  295. int total_ets_bw = 0;
  296. u8 max_tc = 0;
  297. int i;
  298. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  299. if (ets->prio_tc[i] > bp->max_tc) {
  300. netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
  301. ets->prio_tc[i]);
  302. return -EINVAL;
  303. }
  304. if (ets->prio_tc[i] > max_tc)
  305. max_tc = ets->prio_tc[i];
  306. if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
  307. return -EINVAL;
  308. switch (ets->tc_tsa[i]) {
  309. case IEEE_8021QAZ_TSA_STRICT:
  310. break;
  311. case IEEE_8021QAZ_TSA_ETS:
  312. total_ets_bw += ets->tc_tx_bw[i];
  313. break;
  314. default:
  315. return -ENOTSUPP;
  316. }
  317. }
  318. if (total_ets_bw > 100)
  319. return -EINVAL;
  320. *tc = max_tc + 1;
  321. return 0;
  322. }
  323. static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
  324. {
  325. struct bnxt *bp = netdev_priv(dev);
  326. struct ieee_ets *my_ets = bp->ieee_ets;
  327. ets->ets_cap = bp->max_tc;
  328. if (!my_ets) {
  329. int rc;
  330. if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
  331. return 0;
  332. my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
  333. if (!my_ets)
  334. return 0;
  335. rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
  336. if (rc)
  337. return 0;
  338. rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
  339. if (rc)
  340. return 0;
  341. }
  342. ets->cbs = my_ets->cbs;
  343. memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
  344. memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
  345. memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
  346. memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
  347. return 0;
  348. }
  349. static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
  350. {
  351. struct bnxt *bp = netdev_priv(dev);
  352. struct ieee_ets *my_ets = bp->ieee_ets;
  353. u8 max_tc = 0;
  354. int rc, i;
  355. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  356. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  357. return -EINVAL;
  358. rc = bnxt_ets_validate(bp, ets, &max_tc);
  359. if (!rc) {
  360. if (!my_ets) {
  361. my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
  362. if (!my_ets)
  363. return -ENOMEM;
  364. /* initialize PRI2TC mappings to invalid value */
  365. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
  366. my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
  367. bp->ieee_ets = my_ets;
  368. }
  369. rc = bnxt_setup_mq_tc(dev, max_tc);
  370. if (rc)
  371. return rc;
  372. rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
  373. if (rc)
  374. return rc;
  375. rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
  376. if (rc)
  377. return rc;
  378. memcpy(my_ets, ets, sizeof(*my_ets));
  379. }
  380. return rc;
  381. }
  382. static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
  383. {
  384. struct bnxt *bp = netdev_priv(dev);
  385. __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
  386. struct ieee_pfc *my_pfc = bp->ieee_pfc;
  387. long rx_off, tx_off;
  388. int i, rc;
  389. pfc->pfc_cap = bp->max_lltc;
  390. if (!my_pfc) {
  391. if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
  392. return 0;
  393. my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
  394. if (!my_pfc)
  395. return 0;
  396. bp->ieee_pfc = my_pfc;
  397. rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
  398. if (rc)
  399. return 0;
  400. }
  401. pfc->pfc_en = my_pfc->pfc_en;
  402. pfc->mbc = my_pfc->mbc;
  403. pfc->delay = my_pfc->delay;
  404. if (!stats)
  405. return 0;
  406. rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
  407. tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
  408. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
  409. pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
  410. pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
  411. }
  412. return 0;
  413. }
  414. static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
  415. {
  416. struct bnxt *bp = netdev_priv(dev);
  417. struct ieee_pfc *my_pfc = bp->ieee_pfc;
  418. int rc;
  419. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  420. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  421. return -EINVAL;
  422. if (!my_pfc) {
  423. my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
  424. if (!my_pfc)
  425. return -ENOMEM;
  426. bp->ieee_pfc = my_pfc;
  427. }
  428. rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
  429. if (!rc)
  430. memcpy(my_pfc, pfc, sizeof(*my_pfc));
  431. return rc;
  432. }
  433. static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
  434. {
  435. struct bnxt *bp = netdev_priv(dev);
  436. int rc = -EINVAL;
  437. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  438. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  439. return -EINVAL;
  440. rc = dcb_ieee_setapp(dev, app);
  441. if (rc)
  442. return rc;
  443. if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  444. app->protocol == ETH_P_IBOE) ||
  445. (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
  446. app->protocol == ROCE_V2_UDP_DPORT))
  447. rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
  448. return rc;
  449. }
  450. static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
  451. {
  452. struct bnxt *bp = netdev_priv(dev);
  453. int rc;
  454. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  455. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  456. return -EINVAL;
  457. rc = dcb_ieee_delapp(dev, app);
  458. if (rc)
  459. return rc;
  460. if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  461. app->protocol == ETH_P_IBOE) ||
  462. (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
  463. app->protocol == ROCE_V2_UDP_DPORT))
  464. rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
  465. return rc;
  466. }
  467. static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
  468. {
  469. struct bnxt *bp = netdev_priv(dev);
  470. return bp->dcbx_cap;
  471. }
  472. static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
  473. {
  474. struct bnxt *bp = netdev_priv(dev);
  475. /* All firmware DCBX settings are set in NVRAM */
  476. if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
  477. return 1;
  478. if (mode & DCB_CAP_DCBX_HOST) {
  479. if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
  480. return 1;
  481. /* only support IEEE */
  482. if ((mode & DCB_CAP_DCBX_VER_CEE) ||
  483. !(mode & DCB_CAP_DCBX_VER_IEEE))
  484. return 1;
  485. }
  486. if (mode == bp->dcbx_cap)
  487. return 0;
  488. bp->dcbx_cap = mode;
  489. return 0;
  490. }
  491. static const struct dcbnl_rtnl_ops dcbnl_ops = {
  492. .ieee_getets = bnxt_dcbnl_ieee_getets,
  493. .ieee_setets = bnxt_dcbnl_ieee_setets,
  494. .ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
  495. .ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
  496. .ieee_setapp = bnxt_dcbnl_ieee_setapp,
  497. .ieee_delapp = bnxt_dcbnl_ieee_delapp,
  498. .getdcbx = bnxt_dcbnl_getdcbx,
  499. .setdcbx = bnxt_dcbnl_setdcbx,
  500. };
  501. void bnxt_dcb_init(struct bnxt *bp)
  502. {
  503. if (bp->hwrm_spec_code < 0x10501)
  504. return;
  505. bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
  506. if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
  507. bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
  508. else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
  509. bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
  510. bp->dev->dcbnl_ops = &dcbnl_ops;
  511. }
  512. void bnxt_dcb_free(struct bnxt *bp)
  513. {
  514. kfree(bp->ieee_pfc);
  515. kfree(bp->ieee_ets);
  516. bp->ieee_pfc = NULL;
  517. bp->ieee_ets = NULL;
  518. }
  519. #else
  520. void bnxt_dcb_init(struct bnxt *bp)
  521. {
  522. }
  523. void bnxt_dcb_free(struct bnxt *bp)
  524. {
  525. }
  526. #endif