bnxt_dcb.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. * Copyright (c) 2016-2017 Broadcom Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. */
  10. #include <linux/netdevice.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/pci.h>
  16. #include <linux/etherdevice.h>
  17. #include <rdma/ib_verbs.h>
  18. #include "bnxt_hsi.h"
  19. #include "bnxt.h"
  20. #include "bnxt_dcb.h"
  21. #ifdef CONFIG_BNXT_DCB
  22. static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
  23. {
  24. int i, j;
  25. for (i = 0; i < bp->max_tc; i++) {
  26. if (bp->q_info[i].queue_id == queue_id) {
  27. for (j = 0; j < bp->max_tc; j++) {
  28. if (bp->tc_to_qidx[j] == i)
  29. return j;
  30. }
  31. }
  32. }
  33. return -EINVAL;
  34. }
  35. static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
  36. {
  37. struct hwrm_queue_pri2cos_cfg_input req = {0};
  38. int rc = 0, i;
  39. u8 *pri2cos;
  40. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
  41. req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
  42. QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
  43. pri2cos = &req.pri0_cos_queue_id;
  44. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  45. u8 qidx;
  46. req.enables |= cpu_to_le32(
  47. QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
  48. qidx = bp->tc_to_qidx[ets->prio_tc[i]];
  49. pri2cos[i] = bp->q_info[qidx].queue_id;
  50. }
  51. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  52. return rc;
  53. }
  54. static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
  55. {
  56. struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  57. struct hwrm_queue_pri2cos_qcfg_input req = {0};
  58. int rc = 0;
  59. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
  60. req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
  61. mutex_lock(&bp->hwrm_cmd_lock);
  62. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  63. if (!rc) {
  64. u8 *pri2cos = &resp->pri0_cos_queue_id;
  65. int i;
  66. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  67. u8 queue_id = pri2cos[i];
  68. int tc;
  69. tc = bnxt_queue_to_tc(bp, queue_id);
  70. if (tc >= 0)
  71. ets->prio_tc[i] = tc;
  72. }
  73. }
  74. mutex_unlock(&bp->hwrm_cmd_lock);
  75. return rc;
  76. }
  77. static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
  78. u8 max_tc)
  79. {
  80. struct hwrm_queue_cos2bw_cfg_input req = {0};
  81. struct bnxt_cos2bw_cfg cos2bw;
  82. int rc = 0, i;
  83. void *data;
  84. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
  85. for (i = 0; i < max_tc; i++) {
  86. u8 qidx;
  87. req.enables |= cpu_to_le32(
  88. QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
  89. memset(&cos2bw, 0, sizeof(cos2bw));
  90. qidx = bp->tc_to_qidx[i];
  91. cos2bw.queue_id = bp->q_info[qidx].queue_id;
  92. if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
  93. cos2bw.tsa =
  94. QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
  95. cos2bw.pri_lvl = i;
  96. } else {
  97. cos2bw.tsa =
  98. QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
  99. cos2bw.bw_weight = ets->tc_tx_bw[i];
  100. /* older firmware requires min_bw to be set to the
  101. * same weight value in percent.
  102. */
  103. cos2bw.min_bw =
  104. cpu_to_le32((ets->tc_tx_bw[i] * 100) |
  105. BW_VALUE_UNIT_PERCENT1_100);
  106. }
  107. data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
  108. memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
  109. if (qidx == 0) {
  110. req.queue_id0 = cos2bw.queue_id;
  111. req.unused_0 = 0;
  112. }
  113. }
  114. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  115. return rc;
  116. }
  117. static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
  118. {
  119. struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  120. struct hwrm_queue_cos2bw_qcfg_input req = {0};
  121. struct bnxt_cos2bw_cfg cos2bw;
  122. void *data;
  123. int rc, i;
  124. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
  125. mutex_lock(&bp->hwrm_cmd_lock);
  126. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  127. if (rc) {
  128. mutex_unlock(&bp->hwrm_cmd_lock);
  129. return rc;
  130. }
  131. data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
  132. for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
  133. int tc;
  134. memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
  135. if (i == 0)
  136. cos2bw.queue_id = resp->queue_id0;
  137. tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
  138. if (tc < 0)
  139. continue;
  140. if (cos2bw.tsa ==
  141. QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
  142. ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
  143. } else {
  144. ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
  145. ets->tc_tx_bw[tc] = cos2bw.bw_weight;
  146. }
  147. }
  148. mutex_unlock(&bp->hwrm_cmd_lock);
  149. return 0;
  150. }
  151. static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
  152. {
  153. unsigned long qmap = 0;
  154. int max = bp->max_tc;
  155. int i, j, rc;
  156. /* Assign lossless TCs first */
  157. for (i = 0, j = 0; i < max; ) {
  158. if (lltc_mask & (1 << i)) {
  159. if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
  160. bp->tc_to_qidx[i] = j;
  161. __set_bit(j, &qmap);
  162. i++;
  163. }
  164. j++;
  165. continue;
  166. }
  167. i++;
  168. }
  169. for (i = 0, j = 0; i < max; i++) {
  170. if (lltc_mask & (1 << i))
  171. continue;
  172. j = find_next_zero_bit(&qmap, max, j);
  173. bp->tc_to_qidx[i] = j;
  174. __set_bit(j, &qmap);
  175. j++;
  176. }
  177. if (netif_running(bp->dev)) {
  178. bnxt_close_nic(bp, false, false);
  179. rc = bnxt_open_nic(bp, false, false);
  180. if (rc) {
  181. netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
  182. return rc;
  183. }
  184. }
  185. if (bp->ieee_ets) {
  186. int tc = netdev_get_num_tc(bp->dev);
  187. if (!tc)
  188. tc = 1;
  189. rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
  190. if (rc) {
  191. netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
  192. return rc;
  193. }
  194. rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
  195. if (rc) {
  196. netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
  197. return rc;
  198. }
  199. }
  200. return 0;
  201. }
  202. static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
  203. {
  204. struct hwrm_queue_pfcenable_cfg_input req = {0};
  205. struct ieee_ets *my_ets = bp->ieee_ets;
  206. unsigned int tc_mask = 0, pri_mask = 0;
  207. u8 i, pri, lltc_count = 0;
  208. bool need_q_remap = false;
  209. int rc;
  210. if (!my_ets)
  211. return -EINVAL;
  212. for (i = 0; i < bp->max_tc; i++) {
  213. for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
  214. if ((pfc->pfc_en & (1 << pri)) &&
  215. (my_ets->prio_tc[pri] == i)) {
  216. pri_mask |= 1 << pri;
  217. tc_mask |= 1 << i;
  218. }
  219. }
  220. if (tc_mask & (1 << i))
  221. lltc_count++;
  222. }
  223. if (lltc_count > bp->max_lltc)
  224. return -EINVAL;
  225. for (i = 0; i < bp->max_tc; i++) {
  226. if (tc_mask & (1 << i)) {
  227. u8 qidx = bp->tc_to_qidx[i];
  228. if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
  229. need_q_remap = true;
  230. break;
  231. }
  232. }
  233. }
  234. if (need_q_remap)
  235. rc = bnxt_queue_remap(bp, tc_mask);
  236. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
  237. req.flags = cpu_to_le32(pri_mask);
  238. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  239. if (rc)
  240. return rc;
  241. return rc;
  242. }
  243. static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
  244. {
  245. struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  246. struct hwrm_queue_pfcenable_qcfg_input req = {0};
  247. u8 pri_mask;
  248. int rc;
  249. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
  250. mutex_lock(&bp->hwrm_cmd_lock);
  251. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  252. if (rc) {
  253. mutex_unlock(&bp->hwrm_cmd_lock);
  254. return rc;
  255. }
  256. pri_mask = le32_to_cpu(resp->flags);
  257. pfc->pfc_en = pri_mask;
  258. mutex_unlock(&bp->hwrm_cmd_lock);
  259. return 0;
  260. }
  261. static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
  262. bool add)
  263. {
  264. struct hwrm_fw_set_structured_data_input set = {0};
  265. struct hwrm_fw_get_structured_data_input get = {0};
  266. struct hwrm_struct_data_dcbx_app *fw_app;
  267. struct hwrm_struct_hdr *data;
  268. dma_addr_t mapping;
  269. size_t data_len;
  270. int rc, n, i;
  271. if (bp->hwrm_spec_code < 0x10601)
  272. return 0;
  273. n = IEEE_8021QAZ_MAX_TCS;
  274. data_len = sizeof(*data) + sizeof(*fw_app) * n;
  275. data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
  276. GFP_KERNEL);
  277. if (!data)
  278. return -ENOMEM;
  279. bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
  280. get.dest_data_addr = cpu_to_le64(mapping);
  281. get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
  282. get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
  283. get.count = 0;
  284. rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
  285. if (rc)
  286. goto set_app_exit;
  287. fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
  288. if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
  289. rc = -ENODEV;
  290. goto set_app_exit;
  291. }
  292. n = data->count;
  293. for (i = 0; i < n; i++, fw_app++) {
  294. if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
  295. fw_app->protocol_selector == app->selector &&
  296. fw_app->priority == app->priority) {
  297. if (add)
  298. goto set_app_exit;
  299. else
  300. break;
  301. }
  302. }
  303. if (add) {
  304. /* append */
  305. n++;
  306. fw_app->protocol_id = cpu_to_be16(app->protocol);
  307. fw_app->protocol_selector = app->selector;
  308. fw_app->priority = app->priority;
  309. fw_app->valid = 1;
  310. } else {
  311. size_t len = 0;
  312. /* not found, nothing to delete */
  313. if (n == i)
  314. goto set_app_exit;
  315. len = (n - 1 - i) * sizeof(*fw_app);
  316. if (len)
  317. memmove(fw_app, fw_app + 1, len);
  318. n--;
  319. memset(fw_app + n, 0, sizeof(*fw_app));
  320. }
  321. data->count = n;
  322. data->len = cpu_to_le16(sizeof(*fw_app) * n);
  323. data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
  324. bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
  325. set.src_data_addr = cpu_to_le64(mapping);
  326. set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
  327. set.hdr_cnt = 1;
  328. rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
  329. if (rc)
  330. rc = -EIO;
  331. set_app_exit:
  332. dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
  333. return rc;
  334. }
  335. static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
  336. {
  337. int total_ets_bw = 0;
  338. u8 max_tc = 0;
  339. int i;
  340. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  341. if (ets->prio_tc[i] > bp->max_tc) {
  342. netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
  343. ets->prio_tc[i]);
  344. return -EINVAL;
  345. }
  346. if (ets->prio_tc[i] > max_tc)
  347. max_tc = ets->prio_tc[i];
  348. if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
  349. return -EINVAL;
  350. switch (ets->tc_tsa[i]) {
  351. case IEEE_8021QAZ_TSA_STRICT:
  352. break;
  353. case IEEE_8021QAZ_TSA_ETS:
  354. total_ets_bw += ets->tc_tx_bw[i];
  355. break;
  356. default:
  357. return -ENOTSUPP;
  358. }
  359. }
  360. if (total_ets_bw > 100)
  361. return -EINVAL;
  362. *tc = max_tc + 1;
  363. return 0;
  364. }
  365. static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
  366. {
  367. struct bnxt *bp = netdev_priv(dev);
  368. struct ieee_ets *my_ets = bp->ieee_ets;
  369. ets->ets_cap = bp->max_tc;
  370. if (!my_ets) {
  371. int rc;
  372. if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
  373. return 0;
  374. my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
  375. if (!my_ets)
  376. return 0;
  377. rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
  378. if (rc)
  379. return 0;
  380. rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
  381. if (rc)
  382. return 0;
  383. }
  384. ets->cbs = my_ets->cbs;
  385. memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
  386. memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
  387. memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
  388. memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
  389. return 0;
  390. }
  391. static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
  392. {
  393. struct bnxt *bp = netdev_priv(dev);
  394. struct ieee_ets *my_ets = bp->ieee_ets;
  395. u8 max_tc = 0;
  396. int rc, i;
  397. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  398. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  399. return -EINVAL;
  400. rc = bnxt_ets_validate(bp, ets, &max_tc);
  401. if (!rc) {
  402. if (!my_ets) {
  403. my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
  404. if (!my_ets)
  405. return -ENOMEM;
  406. /* initialize PRI2TC mappings to invalid value */
  407. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
  408. my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
  409. bp->ieee_ets = my_ets;
  410. }
  411. rc = bnxt_setup_mq_tc(dev, max_tc);
  412. if (rc)
  413. return rc;
  414. rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
  415. if (rc)
  416. return rc;
  417. rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
  418. if (rc)
  419. return rc;
  420. memcpy(my_ets, ets, sizeof(*my_ets));
  421. }
  422. return rc;
  423. }
  424. static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
  425. {
  426. struct bnxt *bp = netdev_priv(dev);
  427. __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
  428. struct ieee_pfc *my_pfc = bp->ieee_pfc;
  429. long rx_off, tx_off;
  430. int i, rc;
  431. pfc->pfc_cap = bp->max_lltc;
  432. if (!my_pfc) {
  433. if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
  434. return 0;
  435. my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
  436. if (!my_pfc)
  437. return 0;
  438. bp->ieee_pfc = my_pfc;
  439. rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
  440. if (rc)
  441. return 0;
  442. }
  443. pfc->pfc_en = my_pfc->pfc_en;
  444. pfc->mbc = my_pfc->mbc;
  445. pfc->delay = my_pfc->delay;
  446. if (!stats)
  447. return 0;
  448. rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
  449. tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
  450. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
  451. pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
  452. pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
  453. }
  454. return 0;
  455. }
  456. static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
  457. {
  458. struct bnxt *bp = netdev_priv(dev);
  459. struct ieee_pfc *my_pfc = bp->ieee_pfc;
  460. int rc;
  461. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  462. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  463. return -EINVAL;
  464. if (!my_pfc) {
  465. my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
  466. if (!my_pfc)
  467. return -ENOMEM;
  468. bp->ieee_pfc = my_pfc;
  469. }
  470. rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
  471. if (!rc)
  472. memcpy(my_pfc, pfc, sizeof(*my_pfc));
  473. return rc;
  474. }
  475. static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
  476. {
  477. struct bnxt *bp = netdev_priv(dev);
  478. int rc = -EINVAL;
  479. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  480. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  481. return -EINVAL;
  482. rc = dcb_ieee_setapp(dev, app);
  483. if (rc)
  484. return rc;
  485. if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  486. app->protocol == ETH_P_IBOE) ||
  487. (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
  488. app->protocol == ROCE_V2_UDP_DPORT))
  489. rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
  490. return rc;
  491. }
  492. static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
  493. {
  494. struct bnxt *bp = netdev_priv(dev);
  495. int rc;
  496. if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
  497. !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
  498. return -EINVAL;
  499. rc = dcb_ieee_delapp(dev, app);
  500. if (rc)
  501. return rc;
  502. if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  503. app->protocol == ETH_P_IBOE) ||
  504. (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
  505. app->protocol == ROCE_V2_UDP_DPORT))
  506. rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
  507. return rc;
  508. }
  509. static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
  510. {
  511. struct bnxt *bp = netdev_priv(dev);
  512. return bp->dcbx_cap;
  513. }
  514. static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
  515. {
  516. struct bnxt *bp = netdev_priv(dev);
  517. /* All firmware DCBX settings are set in NVRAM */
  518. if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
  519. return 1;
  520. if (mode & DCB_CAP_DCBX_HOST) {
  521. if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
  522. return 1;
  523. /* only support IEEE */
  524. if ((mode & DCB_CAP_DCBX_VER_CEE) ||
  525. !(mode & DCB_CAP_DCBX_VER_IEEE))
  526. return 1;
  527. }
  528. if (mode == bp->dcbx_cap)
  529. return 0;
  530. bp->dcbx_cap = mode;
  531. return 0;
  532. }
  533. static const struct dcbnl_rtnl_ops dcbnl_ops = {
  534. .ieee_getets = bnxt_dcbnl_ieee_getets,
  535. .ieee_setets = bnxt_dcbnl_ieee_setets,
  536. .ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
  537. .ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
  538. .ieee_setapp = bnxt_dcbnl_ieee_setapp,
  539. .ieee_delapp = bnxt_dcbnl_ieee_delapp,
  540. .getdcbx = bnxt_dcbnl_getdcbx,
  541. .setdcbx = bnxt_dcbnl_setdcbx,
  542. };
  543. void bnxt_dcb_init(struct bnxt *bp)
  544. {
  545. if (bp->hwrm_spec_code < 0x10501)
  546. return;
  547. bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
  548. if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
  549. bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
  550. else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
  551. bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
  552. bp->dev->dcbnl_ops = &dcbnl_ops;
  553. }
  554. void bnxt_dcb_free(struct bnxt *bp)
  555. {
  556. kfree(bp->ieee_pfc);
  557. kfree(bp->ieee_ets);
  558. bp->ieee_pfc = NULL;
  559. bp->ieee_ets = NULL;
  560. }
  561. #else
  562. void bnxt_dcb_init(struct bnxt *bp)
  563. {
  564. }
  565. void bnxt_dcb_free(struct bnxt *bp)
  566. {
  567. }
  568. #endif