cxgb4_dcb.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. /*
  2. * Copyright (C) 2013-2014 Chelsio Communications. All rights reserved.
  3. *
  4. * Written by Anish Bhatt (anish@chelsio.com)
  5. * Casey Leedom (leedom@chelsio.com)
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * The full GNU General Public License is included in this distribution in
  17. * the file called "COPYING".
  18. *
  19. */
  20. #include "cxgb4.h"
  21. /* DCBx version control
  22. */
  23. static const char * const dcb_ver_array[] = {
  24. "Unknown",
  25. "DCBx-CIN",
  26. "DCBx-CEE 1.01",
  27. "DCBx-IEEE",
  28. "", "", "",
  29. "Auto Negotiated"
  30. };
  31. /* Initialize a port's Data Center Bridging state. Typically used after a
  32. * Link Down event.
  33. */
  34. void cxgb4_dcb_state_init(struct net_device *dev)
  35. {
  36. struct port_info *pi = netdev2pinfo(dev);
  37. struct port_dcb_info *dcb = &pi->dcb;
  38. int version_temp = dcb->dcb_version;
  39. memset(dcb, 0, sizeof(struct port_dcb_info));
  40. dcb->state = CXGB4_DCB_STATE_START;
  41. if (version_temp)
  42. dcb->dcb_version = version_temp;
  43. netdev_dbg(dev, "%s: Initializing DCB state for port[%d]\n",
  44. __func__, pi->port_id);
  45. }
  46. void cxgb4_dcb_version_init(struct net_device *dev)
  47. {
  48. struct port_info *pi = netdev2pinfo(dev);
  49. struct port_dcb_info *dcb = &pi->dcb;
  50. /* Any writes here are only done on kernels that exlicitly need
  51. * a specific version, say < 2.6.38 which only support CEE
  52. */
  53. dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
  54. }
  55. static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
  56. {
  57. struct port_info *pi = netdev2pinfo(dev);
  58. struct adapter *adap = pi->adapter;
  59. struct port_dcb_info *dcb = &pi->dcb;
  60. struct dcb_app app;
  61. int i, err;
  62. /* zero priority implies remove */
  63. app.priority = 0;
  64. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  65. /* Check if app list is exhausted */
  66. if (!dcb->app_priority[i].protocolid)
  67. break;
  68. app.protocol = dcb->app_priority[i].protocolid;
  69. if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
  70. app.priority = dcb->app_priority[i].user_prio_map;
  71. app.selector = dcb->app_priority[i].sel_field + 1;
  72. err = dcb_ieee_delapp(dev, &app);
  73. } else {
  74. app.selector = !!(dcb->app_priority[i].sel_field);
  75. err = dcb_setapp(dev, &app);
  76. }
  77. if (err) {
  78. dev_err(adap->pdev_dev,
  79. "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
  80. dcb_ver_array[dcb->dcb_version], app.selector,
  81. app.protocol, -err);
  82. break;
  83. }
  84. }
  85. }
  86. /* Finite State machine for Data Center Bridging.
  87. */
  88. void cxgb4_dcb_state_fsm(struct net_device *dev,
  89. enum cxgb4_dcb_state_input transition_to)
  90. {
  91. struct port_info *pi = netdev2pinfo(dev);
  92. struct port_dcb_info *dcb = &pi->dcb;
  93. struct adapter *adap = pi->adapter;
  94. enum cxgb4_dcb_state current_state = dcb->state;
  95. netdev_dbg(dev, "%s: State change from %d to %d for %s\n",
  96. __func__, dcb->state, transition_to, dev->name);
  97. switch (current_state) {
  98. case CXGB4_DCB_STATE_START: {
  99. switch (transition_to) {
  100. case CXGB4_DCB_INPUT_FW_DISABLED: {
  101. /* we're going to use Host DCB */
  102. dcb->state = CXGB4_DCB_STATE_HOST;
  103. dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
  104. break;
  105. }
  106. case CXGB4_DCB_INPUT_FW_ENABLED: {
  107. /* we're going to use Firmware DCB */
  108. dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
  109. dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
  110. if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
  111. dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
  112. else
  113. dcb->supported |= DCB_CAP_DCBX_VER_CEE;
  114. break;
  115. }
  116. case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
  117. /* expected transition */
  118. break;
  119. }
  120. case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
  121. dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
  122. break;
  123. }
  124. default:
  125. goto bad_state_input;
  126. }
  127. break;
  128. }
  129. case CXGB4_DCB_STATE_FW_INCOMPLETE: {
  130. switch (transition_to) {
  131. case CXGB4_DCB_INPUT_FW_ENABLED: {
  132. /* we're alreaady in firmware DCB mode */
  133. break;
  134. }
  135. case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
  136. /* we're already incomplete */
  137. break;
  138. }
  139. case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
  140. dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
  141. dcb->enabled = 1;
  142. linkwatch_fire_event(dev);
  143. break;
  144. }
  145. default:
  146. goto bad_state_input;
  147. }
  148. break;
  149. }
  150. case CXGB4_DCB_STATE_FW_ALLSYNCED: {
  151. switch (transition_to) {
  152. case CXGB4_DCB_INPUT_FW_ENABLED: {
  153. /* we're alreaady in firmware DCB mode */
  154. break;
  155. }
  156. case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
  157. /* We were successfully running with firmware DCB but
  158. * now it's telling us that it's in an "incomplete
  159. * state. We need to reset back to a ground state
  160. * of incomplete.
  161. */
  162. cxgb4_dcb_cleanup_apps(dev);
  163. cxgb4_dcb_state_init(dev);
  164. dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
  165. dcb->supported = CXGB4_DCBX_FW_SUPPORT;
  166. linkwatch_fire_event(dev);
  167. break;
  168. }
  169. case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
  170. /* we're already all sync'ed
  171. * this is only applicable for IEEE or
  172. * when another VI already completed negotiaton
  173. */
  174. dcb->enabled = 1;
  175. linkwatch_fire_event(dev);
  176. break;
  177. }
  178. default:
  179. goto bad_state_input;
  180. }
  181. break;
  182. }
  183. case CXGB4_DCB_STATE_HOST: {
  184. switch (transition_to) {
  185. case CXGB4_DCB_INPUT_FW_DISABLED: {
  186. /* we're alreaady in Host DCB mode */
  187. break;
  188. }
  189. default:
  190. goto bad_state_input;
  191. }
  192. break;
  193. }
  194. default:
  195. goto bad_state_transition;
  196. }
  197. return;
  198. bad_state_input:
  199. dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n",
  200. transition_to);
  201. return;
  202. bad_state_transition:
  203. dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n",
  204. current_state, transition_to);
  205. }
  206. /* Handle a DCB/DCBX update message from the firmware.
  207. */
  208. void cxgb4_dcb_handle_fw_update(struct adapter *adap,
  209. const struct fw_port_cmd *pcmd)
  210. {
  211. const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
  212. int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
  213. struct net_device *dev = adap->port[port];
  214. struct port_info *pi = netdev_priv(dev);
  215. struct port_dcb_info *dcb = &pi->dcb;
  216. int dcb_type = pcmd->u.dcb.pgid.type;
  217. int dcb_running_version;
  218. /* Handle Firmware DCB Control messages separately since they drive
  219. * our state machine.
  220. */
  221. if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
  222. enum cxgb4_dcb_state_input input =
  223. ((pcmd->u.dcb.control.all_syncd_pkd &
  224. FW_PORT_CMD_ALL_SYNCD_F)
  225. ? CXGB4_DCB_STATE_FW_ALLSYNCED
  226. : CXGB4_DCB_STATE_FW_INCOMPLETE);
  227. if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
  228. dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
  229. be16_to_cpu(
  230. pcmd->u.dcb.control.dcb_version_to_app_state));
  231. if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 ||
  232. dcb_running_version == FW_PORT_DCB_VER_IEEE) {
  233. dcb->dcb_version = dcb_running_version;
  234. dev_warn(adap->pdev_dev, "Interface %s is running %s\n",
  235. dev->name,
  236. dcb_ver_array[dcb->dcb_version]);
  237. } else {
  238. dev_warn(adap->pdev_dev,
  239. "Something screwed up, requested firmware for %s, but firmware returned %s instead\n",
  240. dcb_ver_array[dcb->dcb_version],
  241. dcb_ver_array[dcb_running_version]);
  242. dcb->dcb_version = FW_PORT_DCB_VER_UNKNOWN;
  243. }
  244. }
  245. cxgb4_dcb_state_fsm(dev, input);
  246. return;
  247. }
  248. /* It's weird, and almost certainly an error, to get Firmware DCB
  249. * messages when we either haven't been told whether we're going to be
  250. * doing Host or Firmware DCB; and even worse when we've been told
  251. * that we're doing Host DCB!
  252. */
  253. if (dcb->state == CXGB4_DCB_STATE_START ||
  254. dcb->state == CXGB4_DCB_STATE_HOST) {
  255. dev_err(adap->pdev_dev, "Receiving Firmware DCB messages in State %d\n",
  256. dcb->state);
  257. return;
  258. }
  259. /* Now handle the general Firmware DCB update messages ...
  260. */
  261. switch (dcb_type) {
  262. case FW_PORT_DCB_TYPE_PGID:
  263. dcb->pgid = be32_to_cpu(fwdcb->pgid.pgid);
  264. dcb->msgs |= CXGB4_DCB_FW_PGID;
  265. break;
  266. case FW_PORT_DCB_TYPE_PGRATE:
  267. dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported;
  268. memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate,
  269. sizeof(dcb->pgrate));
  270. memcpy(dcb->tsa, &fwdcb->pgrate.tsa,
  271. sizeof(dcb->tsa));
  272. dcb->msgs |= CXGB4_DCB_FW_PGRATE;
  273. if (dcb->msgs & CXGB4_DCB_FW_PGID)
  274. IEEE_FAUX_SYNC(dev, dcb);
  275. break;
  276. case FW_PORT_DCB_TYPE_PRIORATE:
  277. memcpy(dcb->priorate, &fwdcb->priorate.strict_priorate,
  278. sizeof(dcb->priorate));
  279. dcb->msgs |= CXGB4_DCB_FW_PRIORATE;
  280. break;
  281. case FW_PORT_DCB_TYPE_PFC:
  282. dcb->pfcen = fwdcb->pfc.pfcen;
  283. dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs;
  284. dcb->msgs |= CXGB4_DCB_FW_PFC;
  285. IEEE_FAUX_SYNC(dev, dcb);
  286. break;
  287. case FW_PORT_DCB_TYPE_APP_ID: {
  288. const struct fw_port_app_priority *fwap = &fwdcb->app_priority;
  289. int idx = fwap->idx;
  290. struct app_priority *ap = &dcb->app_priority[idx];
  291. struct dcb_app app = {
  292. .protocol = be16_to_cpu(fwap->protocolid),
  293. };
  294. int err;
  295. /* Convert from firmware format to relevant format
  296. * when using app selector
  297. */
  298. if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
  299. app.selector = (fwap->sel_field + 1);
  300. app.priority = ffs(fwap->user_prio_map) - 1;
  301. err = dcb_ieee_setapp(dev, &app);
  302. IEEE_FAUX_SYNC(dev, dcb);
  303. } else {
  304. /* Default is CEE */
  305. app.selector = !!(fwap->sel_field);
  306. app.priority = fwap->user_prio_map;
  307. err = dcb_setapp(dev, &app);
  308. }
  309. if (err)
  310. dev_err(adap->pdev_dev,
  311. "Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n",
  312. app.selector, app.protocol, app.priority, -err);
  313. ap->user_prio_map = fwap->user_prio_map;
  314. ap->sel_field = fwap->sel_field;
  315. ap->protocolid = be16_to_cpu(fwap->protocolid);
  316. dcb->msgs |= CXGB4_DCB_FW_APP_ID;
  317. break;
  318. }
  319. default:
  320. dev_err(adap->pdev_dev, "Unknown DCB update type received %x\n",
  321. dcb_type);
  322. break;
  323. }
  324. }
  325. /* Data Center Bridging netlink operations.
  326. */
  327. /* Get current DCB enabled/disabled state.
  328. */
  329. static u8 cxgb4_getstate(struct net_device *dev)
  330. {
  331. struct port_info *pi = netdev2pinfo(dev);
  332. return pi->dcb.enabled;
  333. }
  334. /* Set DCB enabled/disabled.
  335. */
  336. static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
  337. {
  338. struct port_info *pi = netdev2pinfo(dev);
  339. /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
  340. if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
  341. pi->dcb.enabled = enabled;
  342. return 0;
  343. }
  344. /* Firmware doesn't provide any mechanism to control the DCB state.
  345. */
  346. if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
  347. return 1;
  348. return 0;
  349. }
  350. static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
  351. u8 *prio_type, u8 *pgid, u8 *bw_per,
  352. u8 *up_tc_map, int local)
  353. {
  354. struct fw_port_cmd pcmd;
  355. struct port_info *pi = netdev2pinfo(dev);
  356. struct adapter *adap = pi->adapter;
  357. int err;
  358. *prio_type = *pgid = *bw_per = *up_tc_map = 0;
  359. if (local)
  360. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  361. else
  362. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  363. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  364. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  365. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  366. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  367. return;
  368. }
  369. *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
  370. if (local)
  371. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  372. else
  373. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  374. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  375. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  376. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  377. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  378. -err);
  379. return;
  380. }
  381. *bw_per = pcmd.u.dcb.pgrate.pgrate[*pgid];
  382. *up_tc_map = (1 << tc);
  383. /* prio_type is link strict */
  384. if (*pgid != 0xF)
  385. *prio_type = 0x2;
  386. }
  387. static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
  388. u8 *prio_type, u8 *pgid, u8 *bw_per,
  389. u8 *up_tc_map)
  390. {
  391. /* tc 0 is written at MSB position */
  392. return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
  393. up_tc_map, 1);
  394. }
  395. static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
  396. u8 *prio_type, u8 *pgid, u8 *bw_per,
  397. u8 *up_tc_map)
  398. {
  399. /* tc 0 is written at MSB position */
  400. return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
  401. up_tc_map, 0);
  402. }
  403. static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
  404. u8 prio_type, u8 pgid, u8 bw_per,
  405. u8 up_tc_map)
  406. {
  407. struct fw_port_cmd pcmd;
  408. struct port_info *pi = netdev2pinfo(dev);
  409. struct adapter *adap = pi->adapter;
  410. int fw_tc = 7 - tc;
  411. u32 _pgid;
  412. int err;
  413. if (pgid == DCB_ATTR_VALUE_UNDEFINED)
  414. return;
  415. if (bw_per == DCB_ATTR_VALUE_UNDEFINED)
  416. return;
  417. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  418. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  419. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  420. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  421. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  422. return;
  423. }
  424. _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
  425. _pgid &= ~(0xF << (fw_tc * 4));
  426. _pgid |= pgid << (fw_tc * 4);
  427. pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
  428. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  429. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  430. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  431. dev_err(adap->pdev_dev, "DCB write PGID failed with %d\n",
  432. -err);
  433. return;
  434. }
  435. memset(&pcmd, 0, sizeof(struct fw_port_cmd));
  436. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  437. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  438. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  439. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  440. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  441. -err);
  442. return;
  443. }
  444. pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
  445. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  446. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  447. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  448. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  449. if (err != FW_PORT_DCB_CFG_SUCCESS)
  450. dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
  451. -err);
  452. }
  453. static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per,
  454. int local)
  455. {
  456. struct fw_port_cmd pcmd;
  457. struct port_info *pi = netdev2pinfo(dev);
  458. struct adapter *adap = pi->adapter;
  459. int err;
  460. if (local)
  461. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  462. else
  463. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  464. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  465. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  466. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  467. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  468. -err);
  469. return;
  470. }
  471. *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid];
  472. }
  473. static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per)
  474. {
  475. return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 1);
  476. }
  477. static void cxgb4_getpgbwgcfg_rx(struct net_device *dev, int pgid, u8 *bw_per)
  478. {
  479. return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 0);
  480. }
  481. static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
  482. u8 bw_per)
  483. {
  484. struct fw_port_cmd pcmd;
  485. struct port_info *pi = netdev2pinfo(dev);
  486. struct adapter *adap = pi->adapter;
  487. int err;
  488. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  489. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  490. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  491. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  492. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  493. -err);
  494. return;
  495. }
  496. pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
  497. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  498. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  499. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  500. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  501. if (err != FW_PORT_DCB_CFG_SUCCESS)
  502. dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
  503. -err);
  504. }
  505. /* Return whether the specified Traffic Class Priority has Priority Pause
  506. * Frames enabled.
  507. */
  508. static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
  509. {
  510. struct port_info *pi = netdev2pinfo(dev);
  511. struct port_dcb_info *dcb = &pi->dcb;
  512. if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
  513. priority >= CXGB4_MAX_PRIORITY)
  514. *pfccfg = 0;
  515. else
  516. *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
  517. }
  518. /* Enable/disable Priority Pause Frames for the specified Traffic Class
  519. * Priority.
  520. */
  521. static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
  522. {
  523. struct fw_port_cmd pcmd;
  524. struct port_info *pi = netdev2pinfo(dev);
  525. struct adapter *adap = pi->adapter;
  526. int err;
  527. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
  528. priority >= CXGB4_MAX_PRIORITY)
  529. return;
  530. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  531. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  532. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  533. pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
  534. pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
  535. if (pfccfg)
  536. pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
  537. else
  538. pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
  539. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  540. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  541. dev_err(adap->pdev_dev, "DCB PFC write failed with %d\n", -err);
  542. return;
  543. }
  544. pi->dcb.pfcen = pcmd.u.dcb.pfc.pfcen;
  545. }
  546. static u8 cxgb4_setall(struct net_device *dev)
  547. {
  548. return 0;
  549. }
  550. /* Return DCB capabilities.
  551. */
  552. static u8 cxgb4_getcap(struct net_device *dev, int cap_id, u8 *caps)
  553. {
  554. struct port_info *pi = netdev2pinfo(dev);
  555. switch (cap_id) {
  556. case DCB_CAP_ATTR_PG:
  557. case DCB_CAP_ATTR_PFC:
  558. *caps = true;
  559. break;
  560. case DCB_CAP_ATTR_PG_TCS:
  561. /* 8 priorities for PG represented by bitmap */
  562. *caps = 0x80;
  563. break;
  564. case DCB_CAP_ATTR_PFC_TCS:
  565. /* 8 priorities for PFC represented by bitmap */
  566. *caps = 0x80;
  567. break;
  568. case DCB_CAP_ATTR_GSP:
  569. *caps = true;
  570. break;
  571. case DCB_CAP_ATTR_UP2TC:
  572. case DCB_CAP_ATTR_BCN:
  573. *caps = false;
  574. break;
  575. case DCB_CAP_ATTR_DCBX:
  576. *caps = pi->dcb.supported;
  577. break;
  578. default:
  579. *caps = false;
  580. }
  581. return 0;
  582. }
  583. /* Return the number of Traffic Classes for the indicated Traffic Class ID.
  584. */
  585. static int cxgb4_getnumtcs(struct net_device *dev, int tcs_id, u8 *num)
  586. {
  587. struct port_info *pi = netdev2pinfo(dev);
  588. switch (tcs_id) {
  589. case DCB_NUMTCS_ATTR_PG:
  590. if (pi->dcb.msgs & CXGB4_DCB_FW_PGRATE)
  591. *num = pi->dcb.pg_num_tcs_supported;
  592. else
  593. *num = 0x8;
  594. break;
  595. case DCB_NUMTCS_ATTR_PFC:
  596. *num = 0x8;
  597. break;
  598. default:
  599. return -EINVAL;
  600. }
  601. return 0;
  602. }
  603. /* Set the number of Traffic Classes supported for the indicated Traffic Class
  604. * ID.
  605. */
  606. static int cxgb4_setnumtcs(struct net_device *dev, int tcs_id, u8 num)
  607. {
  608. /* Setting the number of Traffic Classes isn't supported.
  609. */
  610. return -ENOSYS;
  611. }
  612. /* Return whether Priority Flow Control is enabled. */
  613. static u8 cxgb4_getpfcstate(struct net_device *dev)
  614. {
  615. struct port_info *pi = netdev2pinfo(dev);
  616. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
  617. return false;
  618. return pi->dcb.pfcen != 0;
  619. }
  620. /* Enable/disable Priority Flow Control. */
  621. static void cxgb4_setpfcstate(struct net_device *dev, u8 state)
  622. {
  623. /* We can't enable/disable Priority Flow Control but we also can't
  624. * return an error ...
  625. */
  626. }
  627. /* Return the Application User Priority Map associated with the specified
  628. * Application ID.
  629. */
  630. static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  631. int peer)
  632. {
  633. struct port_info *pi = netdev2pinfo(dev);
  634. struct adapter *adap = pi->adapter;
  635. int i;
  636. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
  637. return 0;
  638. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  639. struct fw_port_cmd pcmd;
  640. int err;
  641. if (peer)
  642. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  643. else
  644. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  645. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  646. pcmd.u.dcb.app_priority.idx = i;
  647. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  648. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  649. dev_err(adap->pdev_dev, "DCB APP read failed with %d\n",
  650. -err);
  651. return err;
  652. }
  653. if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id)
  654. if (pcmd.u.dcb.app_priority.sel_field == app_idtype)
  655. return pcmd.u.dcb.app_priority.user_prio_map;
  656. /* exhausted app list */
  657. if (!pcmd.u.dcb.app_priority.protocolid)
  658. break;
  659. }
  660. return -EEXIST;
  661. }
  662. /* Return the Application User Priority Map associated with the specified
  663. * Application ID.
  664. */
  665. static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
  666. {
  667. return __cxgb4_getapp(dev, app_idtype, app_id, 0);
  668. }
  669. /* Write a new Application User Priority Map for the specified Application ID
  670. */
  671. static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  672. u8 app_prio)
  673. {
  674. struct fw_port_cmd pcmd;
  675. struct port_info *pi = netdev2pinfo(dev);
  676. struct adapter *adap = pi->adapter;
  677. int i, err;
  678. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
  679. return -EINVAL;
  680. /* DCB info gets thrown away on link up */
  681. if (!netif_carrier_ok(dev))
  682. return -ENOLINK;
  683. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  684. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  685. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  686. pcmd.u.dcb.app_priority.idx = i;
  687. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  688. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  689. dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
  690. -err);
  691. return err;
  692. }
  693. if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) {
  694. /* overwrite existing app table */
  695. pcmd.u.dcb.app_priority.protocolid = 0;
  696. break;
  697. }
  698. /* find first empty slot */
  699. if (!pcmd.u.dcb.app_priority.protocolid)
  700. break;
  701. }
  702. if (i == CXGB4_MAX_DCBX_APP_SUPPORTED) {
  703. /* no empty slots available */
  704. dev_err(adap->pdev_dev, "DCB app table full\n");
  705. return -EBUSY;
  706. }
  707. /* write out new app table entry */
  708. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  709. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  710. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  711. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  712. pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
  713. pcmd.u.dcb.app_priority.sel_field = app_idtype;
  714. pcmd.u.dcb.app_priority.user_prio_map = app_prio;
  715. pcmd.u.dcb.app_priority.idx = i;
  716. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  717. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  718. dev_err(adap->pdev_dev, "DCB app table write failed with %d\n",
  719. -err);
  720. return err;
  721. }
  722. return 0;
  723. }
  724. /* Priority for CEE inside dcb_app is bitmask, with 0 being an invalid value */
  725. static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  726. u8 app_prio)
  727. {
  728. int ret;
  729. struct dcb_app app = {
  730. .selector = app_idtype,
  731. .protocol = app_id,
  732. .priority = app_prio,
  733. };
  734. if (app_idtype != DCB_APP_IDTYPE_ETHTYPE &&
  735. app_idtype != DCB_APP_IDTYPE_PORTNUM)
  736. return -EINVAL;
  737. /* Convert app_idtype to a format that firmware understands */
  738. ret = __cxgb4_setapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
  739. app_idtype : 3, app_id, app_prio);
  740. if (ret)
  741. return ret;
  742. return dcb_setapp(dev, &app);
  743. }
  744. /* Return whether IEEE Data Center Bridging has been negotiated.
  745. */
  746. static inline int
  747. cxgb4_ieee_negotiation_complete(struct net_device *dev,
  748. enum cxgb4_dcb_fw_msgs dcb_subtype)
  749. {
  750. struct port_info *pi = netdev2pinfo(dev);
  751. struct port_dcb_info *dcb = &pi->dcb;
  752. if (dcb_subtype && !(dcb->msgs & dcb_subtype))
  753. return 0;
  754. return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
  755. (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
  756. }
  757. static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
  758. int local)
  759. {
  760. struct port_info *pi = netdev2pinfo(dev);
  761. struct port_dcb_info *dcb = &pi->dcb;
  762. struct adapter *adap = pi->adapter;
  763. uint32_t tc_info;
  764. struct fw_port_cmd pcmd;
  765. int i, bwg, err;
  766. if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
  767. return 0;
  768. ets->ets_cap = dcb->pg_num_tcs_supported;
  769. if (local) {
  770. ets->willing = 1;
  771. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  772. } else {
  773. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  774. }
  775. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  776. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  777. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  778. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  779. return err;
  780. }
  781. tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
  782. if (local)
  783. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  784. else
  785. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  786. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  787. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  788. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  789. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  790. -err);
  791. return err;
  792. }
  793. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  794. bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
  795. ets->prio_tc[i] = bwg;
  796. ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
  797. ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
  798. ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
  799. }
  800. return 0;
  801. }
  802. static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
  803. {
  804. return cxgb4_ieee_read_ets(dev, ets, 1);
  805. }
  806. /* We reuse this for peer PFC as well, as we can't have it enabled one way */
  807. static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
  808. {
  809. struct port_info *pi = netdev2pinfo(dev);
  810. struct port_dcb_info *dcb = &pi->dcb;
  811. memset(pfc, 0, sizeof(struct ieee_pfc));
  812. if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
  813. return 0;
  814. pfc->pfc_cap = dcb->pfc_num_tcs_supported;
  815. pfc->pfc_en = bitswap_1(dcb->pfcen);
  816. return 0;
  817. }
  818. static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
  819. {
  820. return cxgb4_ieee_read_ets(dev, ets, 0);
  821. }
  822. /* Fill in the Application User Priority Map associated with the
  823. * specified Application.
  824. * Priority for IEEE dcb_app is an integer, with 0 being a valid value
  825. */
  826. static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
  827. {
  828. int prio;
  829. if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
  830. return -EINVAL;
  831. if (!(app->selector && app->protocol))
  832. return -EINVAL;
  833. /* Try querying firmware first, use firmware format */
  834. prio = __cxgb4_getapp(dev, app->selector - 1, app->protocol, 0);
  835. if (prio < 0)
  836. prio = dcb_ieee_getapp_mask(dev, app);
  837. app->priority = ffs(prio) - 1;
  838. return 0;
  839. }
  840. /* Write a new Application User Priority Map for the specified Application ID.
  841. * Priority for IEEE dcb_app is an integer, with 0 being a valid value
  842. */
  843. static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
  844. {
  845. int ret;
  846. if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
  847. return -EINVAL;
  848. if (!(app->selector && app->protocol))
  849. return -EINVAL;
  850. if (!(app->selector > IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  851. app->selector < IEEE_8021QAZ_APP_SEL_ANY))
  852. return -EINVAL;
  853. /* change selector to a format that firmware understands */
  854. ret = __cxgb4_setapp(dev, app->selector - 1, app->protocol,
  855. (1 << app->priority));
  856. if (ret)
  857. return ret;
  858. return dcb_ieee_setapp(dev, app);
  859. }
  860. /* Return our DCBX parameters.
  861. */
  862. static u8 cxgb4_getdcbx(struct net_device *dev)
  863. {
  864. struct port_info *pi = netdev2pinfo(dev);
  865. /* This is already set by cxgb4_set_dcb_caps, so just return it */
  866. return pi->dcb.supported;
  867. }
  868. /* Set our DCBX parameters.
  869. */
  870. static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
  871. {
  872. struct port_info *pi = netdev2pinfo(dev);
  873. /* Filter out requests which exceed our capabilities.
  874. */
  875. if ((dcb_request & (CXGB4_DCBX_FW_SUPPORT | CXGB4_DCBX_HOST_SUPPORT))
  876. != dcb_request)
  877. return 1;
  878. /* Can't enable DCB if we haven't successfully negotiated it.
  879. */
  880. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
  881. return 1;
  882. /* There's currently no mechanism to allow for the firmware DCBX
  883. * negotiation to be changed from the Host Driver. If the caller
  884. * requests exactly the same parameters that we already have then
  885. * we'll allow them to be successfully "set" ...
  886. */
  887. if (dcb_request != pi->dcb.supported)
  888. return 1;
  889. pi->dcb.supported = dcb_request;
  890. return 0;
  891. }
  892. static int cxgb4_getpeer_app(struct net_device *dev,
  893. struct dcb_peer_app_info *info, u16 *app_count)
  894. {
  895. struct fw_port_cmd pcmd;
  896. struct port_info *pi = netdev2pinfo(dev);
  897. struct adapter *adap = pi->adapter;
  898. int i, err = 0;
  899. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
  900. return 1;
  901. info->willing = 0;
  902. info->error = 0;
  903. *app_count = 0;
  904. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  905. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  906. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  907. pcmd.u.dcb.app_priority.idx = *app_count;
  908. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  909. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  910. dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
  911. -err);
  912. return err;
  913. }
  914. /* find first empty slot */
  915. if (!pcmd.u.dcb.app_priority.protocolid)
  916. break;
  917. }
  918. *app_count = i;
  919. return err;
  920. }
  921. static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
  922. {
  923. struct fw_port_cmd pcmd;
  924. struct port_info *pi = netdev2pinfo(dev);
  925. struct adapter *adap = pi->adapter;
  926. int i, err = 0;
  927. if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
  928. return 1;
  929. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  930. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  931. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  932. pcmd.u.dcb.app_priority.idx = i;
  933. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  934. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  935. dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
  936. -err);
  937. return err;
  938. }
  939. /* find first empty slot */
  940. if (!pcmd.u.dcb.app_priority.protocolid)
  941. break;
  942. table[i].selector = pcmd.u.dcb.app_priority.sel_field;
  943. table[i].protocol =
  944. be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
  945. table[i].priority =
  946. ffs(pcmd.u.dcb.app_priority.user_prio_map) - 1;
  947. }
  948. return err;
  949. }
  950. /* Return Priority Group information.
  951. */
  952. static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
  953. {
  954. struct fw_port_cmd pcmd;
  955. struct port_info *pi = netdev2pinfo(dev);
  956. struct adapter *adap = pi->adapter;
  957. u32 pgid;
  958. int i, err;
  959. /* We're always "willing" -- the Switch Fabric always dictates the
  960. * DCBX parameters to us.
  961. */
  962. pg->willing = true;
  963. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  964. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  965. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  966. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  967. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  968. return err;
  969. }
  970. pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
  971. for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
  972. pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
  973. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  974. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  975. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  976. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  977. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  978. -err);
  979. return err;
  980. }
  981. for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
  982. pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
  983. return 0;
  984. }
  985. /* Return Priority Flow Control information.
  986. */
  987. static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
  988. {
  989. struct port_info *pi = netdev2pinfo(dev);
  990. cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
  991. /* Firmware sends this to us in a formwat that is a bit flipped version
  992. * of spec, correct it before we send it to host. This is taken care of
  993. * by bit shifting in other uses of pfcen
  994. */
  995. pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
  996. return 0;
  997. }
  998. const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
  999. .ieee_getets = cxgb4_ieee_get_ets,
  1000. .ieee_getpfc = cxgb4_ieee_get_pfc,
  1001. .ieee_getapp = cxgb4_ieee_getapp,
  1002. .ieee_setapp = cxgb4_ieee_setapp,
  1003. .ieee_peer_getets = cxgb4_ieee_peer_ets,
  1004. .ieee_peer_getpfc = cxgb4_ieee_get_pfc,
  1005. /* CEE std */
  1006. .getstate = cxgb4_getstate,
  1007. .setstate = cxgb4_setstate,
  1008. .getpgtccfgtx = cxgb4_getpgtccfg_tx,
  1009. .getpgbwgcfgtx = cxgb4_getpgbwgcfg_tx,
  1010. .getpgtccfgrx = cxgb4_getpgtccfg_rx,
  1011. .getpgbwgcfgrx = cxgb4_getpgbwgcfg_rx,
  1012. .setpgtccfgtx = cxgb4_setpgtccfg_tx,
  1013. .setpgbwgcfgtx = cxgb4_setpgbwgcfg_tx,
  1014. .setpfccfg = cxgb4_setpfccfg,
  1015. .getpfccfg = cxgb4_getpfccfg,
  1016. .setall = cxgb4_setall,
  1017. .getcap = cxgb4_getcap,
  1018. .getnumtcs = cxgb4_getnumtcs,
  1019. .setnumtcs = cxgb4_setnumtcs,
  1020. .getpfcstate = cxgb4_getpfcstate,
  1021. .setpfcstate = cxgb4_setpfcstate,
  1022. .getapp = cxgb4_getapp,
  1023. .setapp = cxgb4_setapp,
  1024. /* DCBX configuration */
  1025. .getdcbx = cxgb4_getdcbx,
  1026. .setdcbx = cxgb4_setdcbx,
  1027. /* peer apps */
  1028. .peer_getappinfo = cxgb4_getpeer_app,
  1029. .peer_getapptable = cxgb4_getpeerapp_tbl,
  1030. /* CEE peer */
  1031. .cee_peer_getpg = cxgb4_cee_peer_getpg,
  1032. .cee_peer_getpfc = cxgb4_cee_peer_getpfc,
  1033. };