bna_enet.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bna.h"
  19. static inline int
  20. ethport_can_be_up(struct bna_ethport *ethport)
  21. {
  22. int ready = 0;
  23. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  24. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  25. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  26. (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  27. else
  28. ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
  29. (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
  30. !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
  31. return ready;
  32. }
  33. #define ethport_is_up ethport_can_be_up
  34. enum bna_ethport_event {
  35. ETHPORT_E_START = 1,
  36. ETHPORT_E_STOP = 2,
  37. ETHPORT_E_FAIL = 3,
  38. ETHPORT_E_UP = 4,
  39. ETHPORT_E_DOWN = 5,
  40. ETHPORT_E_FWRESP_UP_OK = 6,
  41. ETHPORT_E_FWRESP_DOWN = 7,
  42. ETHPORT_E_FWRESP_UP_FAIL = 8,
  43. };
  44. enum bna_enet_event {
  45. ENET_E_START = 1,
  46. ENET_E_STOP = 2,
  47. ENET_E_FAIL = 3,
  48. ENET_E_PAUSE_CFG = 4,
  49. ENET_E_MTU_CFG = 5,
  50. ENET_E_FWRESP_PAUSE = 6,
  51. ENET_E_CHLD_STOPPED = 7,
  52. };
  53. enum bna_ioceth_event {
  54. IOCETH_E_ENABLE = 1,
  55. IOCETH_E_DISABLE = 2,
  56. IOCETH_E_IOC_RESET = 3,
  57. IOCETH_E_IOC_FAILED = 4,
  58. IOCETH_E_IOC_READY = 5,
  59. IOCETH_E_ENET_ATTR_RESP = 6,
  60. IOCETH_E_ENET_STOPPED = 7,
  61. IOCETH_E_IOC_DISABLED = 8,
  62. };
  63. #define bna_stats_copy(_name, _type) \
  64. do { \
  65. count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
  66. stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
  67. stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
  68. for (i = 0; i < count; i++) \
  69. stats_dst[i] = be64_to_cpu(stats_src[i]); \
  70. } while (0) \
  71. /*
  72. * FW response handlers
  73. */
  74. static void
  75. bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
  76. struct bfi_msgq_mhdr *msghdr)
  77. {
  78. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  79. if (ethport_can_be_up(ethport))
  80. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  81. }
  82. static void
  83. bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
  84. struct bfi_msgq_mhdr *msghdr)
  85. {
  86. int ethport_up = ethport_is_up(ethport);
  87. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  88. if (ethport_up)
  89. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  90. }
  91. static void
  92. bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
  93. struct bfi_msgq_mhdr *msghdr)
  94. {
  95. struct bfi_enet_enable_req *admin_req =
  96. &ethport->bfi_enet_cmd.admin_req;
  97. struct bfi_enet_rsp *rsp =
  98. container_of(msghdr, struct bfi_enet_rsp, mh);
  99. switch (admin_req->enable) {
  100. case BNA_STATUS_T_ENABLED:
  101. if (rsp->error == BFI_ENET_CMD_OK)
  102. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  103. else {
  104. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  105. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  106. }
  107. break;
  108. case BNA_STATUS_T_DISABLED:
  109. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  110. ethport->link_status = BNA_LINK_DOWN;
  111. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  112. break;
  113. }
  114. }
  115. static void
  116. bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
  117. struct bfi_msgq_mhdr *msghdr)
  118. {
  119. struct bfi_enet_diag_lb_req *diag_lb_req =
  120. &ethport->bfi_enet_cmd.lpbk_req;
  121. struct bfi_enet_rsp *rsp =
  122. container_of(msghdr, struct bfi_enet_rsp, mh);
  123. switch (diag_lb_req->enable) {
  124. case BNA_STATUS_T_ENABLED:
  125. if (rsp->error == BFI_ENET_CMD_OK)
  126. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
  127. else {
  128. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  129. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
  130. }
  131. break;
  132. case BNA_STATUS_T_DISABLED:
  133. bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
  134. break;
  135. }
  136. }
  137. static void
  138. bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
  139. {
  140. bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
  141. }
  142. static void
  143. bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
  144. struct bfi_msgq_mhdr *msghdr)
  145. {
  146. struct bfi_enet_attr_rsp *rsp =
  147. container_of(msghdr, struct bfi_enet_attr_rsp, mh);
  148. /**
  149. * Store only if not set earlier, since BNAD can override the HW
  150. * attributes
  151. */
  152. if (!ioceth->attr.fw_query_complete) {
  153. ioceth->attr.num_txq = ntohl(rsp->max_cfg);
  154. ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
  155. ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
  156. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  157. ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
  158. ioceth->attr.fw_query_complete = true;
  159. }
  160. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
  161. }
  162. static void
  163. bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
  164. {
  165. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  166. u64 *stats_src;
  167. u64 *stats_dst;
  168. u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
  169. u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
  170. int count;
  171. int i;
  172. bna_stats_copy(mac, mac);
  173. bna_stats_copy(bpc, bpc);
  174. bna_stats_copy(rad, rad);
  175. bna_stats_copy(rlb, rad);
  176. bna_stats_copy(fc_rx, fc_rx);
  177. bna_stats_copy(fc_tx, fc_tx);
  178. stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
  179. /* Copy Rxf stats to SW area, scatter them while copying */
  180. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  181. stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
  182. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
  183. if (rx_enet_mask & ((u32)(1 << i))) {
  184. int k;
  185. count = sizeof(struct bfi_enet_stats_rxf) /
  186. sizeof(u64);
  187. for (k = 0; k < count; k++) {
  188. stats_dst[k] = be64_to_cpu(*stats_src);
  189. stats_src++;
  190. }
  191. }
  192. }
  193. /* Copy Txf stats to SW area, scatter them while copying */
  194. for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
  195. stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
  196. memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
  197. if (tx_enet_mask & ((u32)(1 << i))) {
  198. int k;
  199. count = sizeof(struct bfi_enet_stats_txf) /
  200. sizeof(u64);
  201. for (k = 0; k < count; k++) {
  202. stats_dst[k] = be64_to_cpu(*stats_src);
  203. stats_src++;
  204. }
  205. }
  206. }
  207. bna->stats_mod.stats_get_busy = false;
  208. bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
  209. }
  210. static void
  211. bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
  212. struct bfi_msgq_mhdr *msghdr)
  213. {
  214. ethport->link_status = BNA_LINK_UP;
  215. /* Dispatch events */
  216. ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
  217. }
  218. static void
  219. bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
  220. struct bfi_msgq_mhdr *msghdr)
  221. {
  222. ethport->link_status = BNA_LINK_DOWN;
  223. /* Dispatch events */
  224. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  225. }
  226. static void
  227. bna_err_handler(struct bna *bna, u32 intr_status)
  228. {
  229. if (BNA_IS_HALT_INTR(bna, intr_status))
  230. bna_halt_clear(bna);
  231. bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
  232. }
  233. void
  234. bna_mbox_handler(struct bna *bna, u32 intr_status)
  235. {
  236. if (BNA_IS_ERR_INTR(bna, intr_status)) {
  237. bna_err_handler(bna, intr_status);
  238. return;
  239. }
  240. if (BNA_IS_MBOX_INTR(bna, intr_status))
  241. bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
  242. }
  243. static void
  244. bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
  245. {
  246. struct bna *bna = (struct bna *)arg;
  247. struct bna_tx *tx;
  248. struct bna_rx *rx;
  249. switch (msghdr->msg_id) {
  250. case BFI_ENET_I2H_RX_CFG_SET_RSP:
  251. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  252. if (rx)
  253. bna_bfi_rx_enet_start_rsp(rx, msghdr);
  254. break;
  255. case BFI_ENET_I2H_RX_CFG_CLR_RSP:
  256. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  257. if (rx)
  258. bna_bfi_rx_enet_stop_rsp(rx, msghdr);
  259. break;
  260. case BFI_ENET_I2H_RIT_CFG_RSP:
  261. case BFI_ENET_I2H_RSS_CFG_RSP:
  262. case BFI_ENET_I2H_RSS_ENABLE_RSP:
  263. case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
  264. case BFI_ENET_I2H_RX_DEFAULT_RSP:
  265. case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
  266. case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
  267. case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
  268. case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
  269. case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
  270. case BFI_ENET_I2H_RX_VLAN_SET_RSP:
  271. case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
  272. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  273. if (rx)
  274. bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
  275. break;
  276. case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
  277. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  278. if (rx)
  279. bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
  280. break;
  281. case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
  282. bna_rx_from_rid(bna, msghdr->enet_id, rx);
  283. if (rx)
  284. bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
  285. break;
  286. case BFI_ENET_I2H_TX_CFG_SET_RSP:
  287. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  288. if (tx)
  289. bna_bfi_tx_enet_start_rsp(tx, msghdr);
  290. break;
  291. case BFI_ENET_I2H_TX_CFG_CLR_RSP:
  292. bna_tx_from_rid(bna, msghdr->enet_id, tx);
  293. if (tx)
  294. bna_bfi_tx_enet_stop_rsp(tx, msghdr);
  295. break;
  296. case BFI_ENET_I2H_PORT_ADMIN_RSP:
  297. bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
  298. break;
  299. case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
  300. bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
  301. break;
  302. case BFI_ENET_I2H_SET_PAUSE_RSP:
  303. bna_bfi_pause_set_rsp(&bna->enet, msghdr);
  304. break;
  305. case BFI_ENET_I2H_GET_ATTR_RSP:
  306. bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
  307. break;
  308. case BFI_ENET_I2H_STATS_GET_RSP:
  309. bna_bfi_stats_get_rsp(bna, msghdr);
  310. break;
  311. case BFI_ENET_I2H_STATS_CLR_RSP:
  312. /* No-op */
  313. break;
  314. case BFI_ENET_I2H_LINK_UP_AEN:
  315. bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
  316. break;
  317. case BFI_ENET_I2H_LINK_DOWN_AEN:
  318. bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
  319. break;
  320. case BFI_ENET_I2H_PORT_ENABLE_AEN:
  321. bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
  322. break;
  323. case BFI_ENET_I2H_PORT_DISABLE_AEN:
  324. bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
  325. break;
  326. case BFI_ENET_I2H_BW_UPDATE_AEN:
  327. bna_bfi_bw_update_aen(&bna->tx_mod);
  328. break;
  329. default:
  330. break;
  331. }
  332. }
  333. /* ETHPORT */
  334. #define call_ethport_stop_cbfn(_ethport) \
  335. do { \
  336. if ((_ethport)->stop_cbfn) { \
  337. void (*cbfn)(struct bna_enet *); \
  338. cbfn = (_ethport)->stop_cbfn; \
  339. (_ethport)->stop_cbfn = NULL; \
  340. cbfn(&(_ethport)->bna->enet); \
  341. } \
  342. } while (0)
  343. #define call_ethport_adminup_cbfn(ethport, status) \
  344. do { \
  345. if ((ethport)->adminup_cbfn) { \
  346. void (*cbfn)(struct bnad *, enum bna_cb_status); \
  347. cbfn = (ethport)->adminup_cbfn; \
  348. (ethport)->adminup_cbfn = NULL; \
  349. cbfn((ethport)->bna->bnad, status); \
  350. } \
  351. } while (0)
  352. static void
  353. bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
  354. {
  355. struct bfi_enet_enable_req *admin_up_req =
  356. &ethport->bfi_enet_cmd.admin_req;
  357. bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
  358. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  359. admin_up_req->mh.num_entries = htons(
  360. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  361. admin_up_req->enable = BNA_STATUS_T_ENABLED;
  362. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  363. sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
  364. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  365. }
  366. static void
  367. bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
  368. {
  369. struct bfi_enet_enable_req *admin_down_req =
  370. &ethport->bfi_enet_cmd.admin_req;
  371. bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
  372. BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
  373. admin_down_req->mh.num_entries = htons(
  374. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
  375. admin_down_req->enable = BNA_STATUS_T_DISABLED;
  376. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  377. sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
  378. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  379. }
  380. static void
  381. bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
  382. {
  383. struct bfi_enet_diag_lb_req *lpbk_up_req =
  384. &ethport->bfi_enet_cmd.lpbk_req;
  385. bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
  386. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  387. lpbk_up_req->mh.num_entries = htons(
  388. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  389. lpbk_up_req->mode = (ethport->bna->enet.type ==
  390. BNA_ENET_T_LOOPBACK_INTERNAL) ?
  391. BFI_ENET_DIAG_LB_OPMODE_EXT :
  392. BFI_ENET_DIAG_LB_OPMODE_CBL;
  393. lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
  394. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  395. sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
  396. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  397. }
  398. static void
  399. bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
  400. {
  401. struct bfi_enet_diag_lb_req *lpbk_down_req =
  402. &ethport->bfi_enet_cmd.lpbk_req;
  403. bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
  404. BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
  405. lpbk_down_req->mh.num_entries = htons(
  406. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
  407. lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
  408. bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
  409. sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
  410. bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
  411. }
  412. static void
  413. bna_bfi_ethport_up(struct bna_ethport *ethport)
  414. {
  415. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  416. bna_bfi_ethport_admin_up(ethport);
  417. else
  418. bna_bfi_ethport_lpbk_up(ethport);
  419. }
  420. static void
  421. bna_bfi_ethport_down(struct bna_ethport *ethport)
  422. {
  423. if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
  424. bna_bfi_ethport_admin_down(ethport);
  425. else
  426. bna_bfi_ethport_lpbk_down(ethport);
  427. }
  428. bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
  429. enum bna_ethport_event);
  430. bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
  431. enum bna_ethport_event);
  432. bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
  433. enum bna_ethport_event);
  434. bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
  435. enum bna_ethport_event);
  436. bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
  437. enum bna_ethport_event);
  438. bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
  439. enum bna_ethport_event);
  440. static void
  441. bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
  442. {
  443. call_ethport_stop_cbfn(ethport);
  444. }
  445. static void
  446. bna_ethport_sm_stopped(struct bna_ethport *ethport,
  447. enum bna_ethport_event event)
  448. {
  449. switch (event) {
  450. case ETHPORT_E_START:
  451. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  452. break;
  453. case ETHPORT_E_STOP:
  454. call_ethport_stop_cbfn(ethport);
  455. break;
  456. case ETHPORT_E_FAIL:
  457. /* No-op */
  458. break;
  459. case ETHPORT_E_DOWN:
  460. /* This event is received due to Rx objects failing */
  461. /* No-op */
  462. break;
  463. default:
  464. bfa_sm_fault(event);
  465. }
  466. }
  467. static void
  468. bna_ethport_sm_down_entry(struct bna_ethport *ethport)
  469. {
  470. }
  471. static void
  472. bna_ethport_sm_down(struct bna_ethport *ethport,
  473. enum bna_ethport_event event)
  474. {
  475. switch (event) {
  476. case ETHPORT_E_STOP:
  477. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  478. break;
  479. case ETHPORT_E_FAIL:
  480. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  481. break;
  482. case ETHPORT_E_UP:
  483. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  484. bna_bfi_ethport_up(ethport);
  485. break;
  486. default:
  487. bfa_sm_fault(event);
  488. }
  489. }
  490. static void
  491. bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
  492. {
  493. }
  494. static void
  495. bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
  496. enum bna_ethport_event event)
  497. {
  498. switch (event) {
  499. case ETHPORT_E_STOP:
  500. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  501. break;
  502. case ETHPORT_E_FAIL:
  503. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  504. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  505. break;
  506. case ETHPORT_E_DOWN:
  507. call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
  508. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  509. break;
  510. case ETHPORT_E_FWRESP_UP_OK:
  511. call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
  512. bfa_fsm_set_state(ethport, bna_ethport_sm_up);
  513. break;
  514. case ETHPORT_E_FWRESP_UP_FAIL:
  515. call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
  516. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  517. break;
  518. case ETHPORT_E_FWRESP_DOWN:
  519. /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
  520. bna_bfi_ethport_up(ethport);
  521. break;
  522. default:
  523. bfa_sm_fault(event);
  524. }
  525. }
  526. static void
  527. bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
  528. {
  529. /**
  530. * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
  531. * mbox due to up_resp_wait -> down_resp_wait transition on event
  532. * ETHPORT_E_DOWN
  533. */
  534. }
  535. static void
  536. bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
  537. enum bna_ethport_event event)
  538. {
  539. switch (event) {
  540. case ETHPORT_E_STOP:
  541. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  542. break;
  543. case ETHPORT_E_FAIL:
  544. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  545. break;
  546. case ETHPORT_E_UP:
  547. bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
  548. break;
  549. case ETHPORT_E_FWRESP_UP_OK:
  550. /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
  551. bna_bfi_ethport_down(ethport);
  552. break;
  553. case ETHPORT_E_FWRESP_UP_FAIL:
  554. case ETHPORT_E_FWRESP_DOWN:
  555. bfa_fsm_set_state(ethport, bna_ethport_sm_down);
  556. break;
  557. default:
  558. bfa_sm_fault(event);
  559. }
  560. }
  561. static void
  562. bna_ethport_sm_up_entry(struct bna_ethport *ethport)
  563. {
  564. }
  565. static void
  566. bna_ethport_sm_up(struct bna_ethport *ethport,
  567. enum bna_ethport_event event)
  568. {
  569. switch (event) {
  570. case ETHPORT_E_STOP:
  571. bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
  572. bna_bfi_ethport_down(ethport);
  573. break;
  574. case ETHPORT_E_FAIL:
  575. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  576. break;
  577. case ETHPORT_E_DOWN:
  578. bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
  579. bna_bfi_ethport_down(ethport);
  580. break;
  581. default:
  582. bfa_sm_fault(event);
  583. }
  584. }
  585. static void
  586. bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
  587. {
  588. }
  589. static void
  590. bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
  591. enum bna_ethport_event event)
  592. {
  593. switch (event) {
  594. case ETHPORT_E_FAIL:
  595. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  596. break;
  597. case ETHPORT_E_DOWN:
  598. /**
  599. * This event is received due to Rx objects stopping in
  600. * parallel to ethport
  601. */
  602. /* No-op */
  603. break;
  604. case ETHPORT_E_FWRESP_UP_OK:
  605. /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
  606. bna_bfi_ethport_down(ethport);
  607. break;
  608. case ETHPORT_E_FWRESP_UP_FAIL:
  609. case ETHPORT_E_FWRESP_DOWN:
  610. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  611. break;
  612. default:
  613. bfa_sm_fault(event);
  614. }
  615. }
  616. static void
  617. bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
  618. {
  619. ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
  620. ethport->bna = bna;
  621. ethport->link_status = BNA_LINK_DOWN;
  622. ethport->link_cbfn = bnad_cb_ethport_link_status;
  623. ethport->rx_started_count = 0;
  624. ethport->stop_cbfn = NULL;
  625. ethport->adminup_cbfn = NULL;
  626. bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
  627. }
  628. static void
  629. bna_ethport_uninit(struct bna_ethport *ethport)
  630. {
  631. ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
  632. ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
  633. ethport->bna = NULL;
  634. }
  635. static void
  636. bna_ethport_start(struct bna_ethport *ethport)
  637. {
  638. bfa_fsm_send_event(ethport, ETHPORT_E_START);
  639. }
  640. static void
  641. bna_enet_cb_ethport_stopped(struct bna_enet *enet)
  642. {
  643. bfa_wc_down(&enet->chld_stop_wc);
  644. }
  645. static void
  646. bna_ethport_stop(struct bna_ethport *ethport)
  647. {
  648. ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
  649. bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
  650. }
  651. static void
  652. bna_ethport_fail(struct bna_ethport *ethport)
  653. {
  654. /* Reset the physical port status to enabled */
  655. ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
  656. if (ethport->link_status != BNA_LINK_DOWN) {
  657. ethport->link_status = BNA_LINK_DOWN;
  658. ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
  659. }
  660. bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
  661. }
  662. /* Should be called only when ethport is disabled */
  663. void
  664. bna_ethport_cb_rx_started(struct bna_ethport *ethport)
  665. {
  666. ethport->rx_started_count++;
  667. if (ethport->rx_started_count == 1) {
  668. ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
  669. if (ethport_can_be_up(ethport))
  670. bfa_fsm_send_event(ethport, ETHPORT_E_UP);
  671. }
  672. }
  673. void
  674. bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
  675. {
  676. int ethport_up = ethport_is_up(ethport);
  677. ethport->rx_started_count--;
  678. if (ethport->rx_started_count == 0) {
  679. ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
  680. if (ethport_up)
  681. bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
  682. }
  683. }
  684. /* ENET */
  685. #define bna_enet_chld_start(enet) \
  686. do { \
  687. enum bna_tx_type tx_type = \
  688. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  689. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  690. enum bna_rx_type rx_type = \
  691. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  692. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  693. bna_ethport_start(&(enet)->bna->ethport); \
  694. bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
  695. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  696. } while (0)
  697. #define bna_enet_chld_stop(enet) \
  698. do { \
  699. enum bna_tx_type tx_type = \
  700. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  701. BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
  702. enum bna_rx_type rx_type = \
  703. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  704. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  705. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  706. bfa_wc_up(&(enet)->chld_stop_wc); \
  707. bna_ethport_stop(&(enet)->bna->ethport); \
  708. bfa_wc_up(&(enet)->chld_stop_wc); \
  709. bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
  710. bfa_wc_up(&(enet)->chld_stop_wc); \
  711. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  712. bfa_wc_wait(&(enet)->chld_stop_wc); \
  713. } while (0)
  714. #define bna_enet_chld_fail(enet) \
  715. do { \
  716. bna_ethport_fail(&(enet)->bna->ethport); \
  717. bna_tx_mod_fail(&(enet)->bna->tx_mod); \
  718. bna_rx_mod_fail(&(enet)->bna->rx_mod); \
  719. } while (0)
  720. #define bna_enet_rx_start(enet) \
  721. do { \
  722. enum bna_rx_type rx_type = \
  723. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  724. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  725. bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
  726. } while (0)
  727. #define bna_enet_rx_stop(enet) \
  728. do { \
  729. enum bna_rx_type rx_type = \
  730. ((enet)->type == BNA_ENET_T_REGULAR) ? \
  731. BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
  732. bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
  733. bfa_wc_up(&(enet)->chld_stop_wc); \
  734. bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
  735. bfa_wc_wait(&(enet)->chld_stop_wc); \
  736. } while (0)
  737. #define call_enet_stop_cbfn(enet) \
  738. do { \
  739. if ((enet)->stop_cbfn) { \
  740. void (*cbfn)(void *); \
  741. void *cbarg; \
  742. cbfn = (enet)->stop_cbfn; \
  743. cbarg = (enet)->stop_cbarg; \
  744. (enet)->stop_cbfn = NULL; \
  745. (enet)->stop_cbarg = NULL; \
  746. cbfn(cbarg); \
  747. } \
  748. } while (0)
  749. #define call_enet_pause_cbfn(enet) \
  750. do { \
  751. if ((enet)->pause_cbfn) { \
  752. void (*cbfn)(struct bnad *); \
  753. cbfn = (enet)->pause_cbfn; \
  754. (enet)->pause_cbfn = NULL; \
  755. cbfn((enet)->bna->bnad); \
  756. } \
  757. } while (0)
  758. #define call_enet_mtu_cbfn(enet) \
  759. do { \
  760. if ((enet)->mtu_cbfn) { \
  761. void (*cbfn)(struct bnad *); \
  762. cbfn = (enet)->mtu_cbfn; \
  763. (enet)->mtu_cbfn = NULL; \
  764. cbfn((enet)->bna->bnad); \
  765. } \
  766. } while (0)
  767. static void bna_enet_cb_chld_stopped(void *arg);
  768. static void bna_bfi_pause_set(struct bna_enet *enet);
  769. bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
  770. enum bna_enet_event);
  771. bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
  772. enum bna_enet_event);
  773. bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
  774. enum bna_enet_event);
  775. bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
  776. enum bna_enet_event);
  777. bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
  778. enum bna_enet_event);
  779. bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
  780. enum bna_enet_event);
  781. bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
  782. enum bna_enet_event);
  783. static void
  784. bna_enet_sm_stopped_entry(struct bna_enet *enet)
  785. {
  786. call_enet_pause_cbfn(enet);
  787. call_enet_mtu_cbfn(enet);
  788. call_enet_stop_cbfn(enet);
  789. }
  790. static void
  791. bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
  792. {
  793. switch (event) {
  794. case ENET_E_START:
  795. bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
  796. break;
  797. case ENET_E_STOP:
  798. call_enet_stop_cbfn(enet);
  799. break;
  800. case ENET_E_FAIL:
  801. /* No-op */
  802. break;
  803. case ENET_E_PAUSE_CFG:
  804. call_enet_pause_cbfn(enet);
  805. break;
  806. case ENET_E_MTU_CFG:
  807. call_enet_mtu_cbfn(enet);
  808. break;
  809. case ENET_E_CHLD_STOPPED:
  810. /**
  811. * This event is received due to Ethport, Tx and Rx objects
  812. * failing
  813. */
  814. /* No-op */
  815. break;
  816. default:
  817. bfa_sm_fault(event);
  818. }
  819. }
  820. static void
  821. bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
  822. {
  823. bna_bfi_pause_set(enet);
  824. }
  825. static void
  826. bna_enet_sm_pause_init_wait(struct bna_enet *enet,
  827. enum bna_enet_event event)
  828. {
  829. switch (event) {
  830. case ENET_E_STOP:
  831. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  832. bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
  833. break;
  834. case ENET_E_FAIL:
  835. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  836. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  837. break;
  838. case ENET_E_PAUSE_CFG:
  839. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  840. break;
  841. case ENET_E_MTU_CFG:
  842. /* No-op */
  843. break;
  844. case ENET_E_FWRESP_PAUSE:
  845. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  846. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  847. bna_bfi_pause_set(enet);
  848. } else {
  849. bfa_fsm_set_state(enet, bna_enet_sm_started);
  850. bna_enet_chld_start(enet);
  851. }
  852. break;
  853. default:
  854. bfa_sm_fault(event);
  855. }
  856. }
  857. static void
  858. bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
  859. {
  860. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  861. }
  862. static void
  863. bna_enet_sm_last_resp_wait(struct bna_enet *enet,
  864. enum bna_enet_event event)
  865. {
  866. switch (event) {
  867. case ENET_E_FAIL:
  868. case ENET_E_FWRESP_PAUSE:
  869. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  870. break;
  871. default:
  872. bfa_sm_fault(event);
  873. }
  874. }
  875. static void
  876. bna_enet_sm_started_entry(struct bna_enet *enet)
  877. {
  878. /**
  879. * NOTE: Do not call bna_enet_chld_start() here, since it will be
  880. * inadvertently called during cfg_wait->started transition as well
  881. */
  882. call_enet_pause_cbfn(enet);
  883. call_enet_mtu_cbfn(enet);
  884. }
  885. static void
  886. bna_enet_sm_started(struct bna_enet *enet,
  887. enum bna_enet_event event)
  888. {
  889. switch (event) {
  890. case ENET_E_STOP:
  891. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  892. break;
  893. case ENET_E_FAIL:
  894. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  895. bna_enet_chld_fail(enet);
  896. break;
  897. case ENET_E_PAUSE_CFG:
  898. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  899. bna_bfi_pause_set(enet);
  900. break;
  901. case ENET_E_MTU_CFG:
  902. bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
  903. bna_enet_rx_stop(enet);
  904. break;
  905. default:
  906. bfa_sm_fault(event);
  907. }
  908. }
  909. static void
  910. bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
  911. {
  912. }
  913. static void
  914. bna_enet_sm_cfg_wait(struct bna_enet *enet,
  915. enum bna_enet_event event)
  916. {
  917. switch (event) {
  918. case ENET_E_STOP:
  919. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  920. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  921. bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
  922. break;
  923. case ENET_E_FAIL:
  924. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  925. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  926. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  927. bna_enet_chld_fail(enet);
  928. break;
  929. case ENET_E_PAUSE_CFG:
  930. enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
  931. break;
  932. case ENET_E_MTU_CFG:
  933. enet->flags |= BNA_ENET_F_MTU_CHANGED;
  934. break;
  935. case ENET_E_CHLD_STOPPED:
  936. bna_enet_rx_start(enet);
  937. /* Fall through */
  938. case ENET_E_FWRESP_PAUSE:
  939. if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
  940. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  941. bna_bfi_pause_set(enet);
  942. } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
  943. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  944. bna_enet_rx_stop(enet);
  945. } else {
  946. bfa_fsm_set_state(enet, bna_enet_sm_started);
  947. }
  948. break;
  949. default:
  950. bfa_sm_fault(event);
  951. }
  952. }
  953. static void
  954. bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
  955. {
  956. enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
  957. enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
  958. }
  959. static void
  960. bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
  961. enum bna_enet_event event)
  962. {
  963. switch (event) {
  964. case ENET_E_FAIL:
  965. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  966. bna_enet_chld_fail(enet);
  967. break;
  968. case ENET_E_FWRESP_PAUSE:
  969. case ENET_E_CHLD_STOPPED:
  970. bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
  971. break;
  972. default:
  973. bfa_sm_fault(event);
  974. }
  975. }
  976. static void
  977. bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
  978. {
  979. bna_enet_chld_stop(enet);
  980. }
  981. static void
  982. bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
  983. enum bna_enet_event event)
  984. {
  985. switch (event) {
  986. case ENET_E_FAIL:
  987. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  988. bna_enet_chld_fail(enet);
  989. break;
  990. case ENET_E_CHLD_STOPPED:
  991. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  992. break;
  993. default:
  994. bfa_sm_fault(event);
  995. }
  996. }
  997. static void
  998. bna_bfi_pause_set(struct bna_enet *enet)
  999. {
  1000. struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
  1001. bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
  1002. BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
  1003. pause_req->mh.num_entries = htons(
  1004. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
  1005. pause_req->tx_pause = enet->pause_config.tx_pause;
  1006. pause_req->rx_pause = enet->pause_config.rx_pause;
  1007. bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
  1008. sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
  1009. bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
  1010. }
  1011. static void
  1012. bna_enet_cb_chld_stopped(void *arg)
  1013. {
  1014. struct bna_enet *enet = (struct bna_enet *)arg;
  1015. bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
  1016. }
  1017. static void
  1018. bna_enet_init(struct bna_enet *enet, struct bna *bna)
  1019. {
  1020. enet->bna = bna;
  1021. enet->flags = 0;
  1022. enet->mtu = 0;
  1023. enet->type = BNA_ENET_T_REGULAR;
  1024. enet->stop_cbfn = NULL;
  1025. enet->stop_cbarg = NULL;
  1026. enet->pause_cbfn = NULL;
  1027. enet->mtu_cbfn = NULL;
  1028. bfa_fsm_set_state(enet, bna_enet_sm_stopped);
  1029. }
  1030. static void
  1031. bna_enet_uninit(struct bna_enet *enet)
  1032. {
  1033. enet->flags = 0;
  1034. enet->bna = NULL;
  1035. }
  1036. static void
  1037. bna_enet_start(struct bna_enet *enet)
  1038. {
  1039. enet->flags |= BNA_ENET_F_IOCETH_READY;
  1040. if (enet->flags & BNA_ENET_F_ENABLED)
  1041. bfa_fsm_send_event(enet, ENET_E_START);
  1042. }
  1043. static void
  1044. bna_ioceth_cb_enet_stopped(void *arg)
  1045. {
  1046. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1047. bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
  1048. }
  1049. static void
  1050. bna_enet_stop(struct bna_enet *enet)
  1051. {
  1052. enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
  1053. enet->stop_cbarg = &enet->bna->ioceth;
  1054. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1055. bfa_fsm_send_event(enet, ENET_E_STOP);
  1056. }
  1057. static void
  1058. bna_enet_fail(struct bna_enet *enet)
  1059. {
  1060. enet->flags &= ~BNA_ENET_F_IOCETH_READY;
  1061. bfa_fsm_send_event(enet, ENET_E_FAIL);
  1062. }
  1063. void
  1064. bna_enet_cb_tx_stopped(struct bna_enet *enet)
  1065. {
  1066. bfa_wc_down(&enet->chld_stop_wc);
  1067. }
  1068. void
  1069. bna_enet_cb_rx_stopped(struct bna_enet *enet)
  1070. {
  1071. bfa_wc_down(&enet->chld_stop_wc);
  1072. }
  1073. int
  1074. bna_enet_mtu_get(struct bna_enet *enet)
  1075. {
  1076. return enet->mtu;
  1077. }
  1078. void
  1079. bna_enet_enable(struct bna_enet *enet)
  1080. {
  1081. if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
  1082. return;
  1083. enet->flags |= BNA_ENET_F_ENABLED;
  1084. if (enet->flags & BNA_ENET_F_IOCETH_READY)
  1085. bfa_fsm_send_event(enet, ENET_E_START);
  1086. }
  1087. void
  1088. bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
  1089. void (*cbfn)(void *))
  1090. {
  1091. if (type == BNA_SOFT_CLEANUP) {
  1092. (*cbfn)(enet->bna->bnad);
  1093. return;
  1094. }
  1095. enet->stop_cbfn = cbfn;
  1096. enet->stop_cbarg = enet->bna->bnad;
  1097. enet->flags &= ~BNA_ENET_F_ENABLED;
  1098. bfa_fsm_send_event(enet, ENET_E_STOP);
  1099. }
  1100. void
  1101. bna_enet_pause_config(struct bna_enet *enet,
  1102. struct bna_pause_config *pause_config,
  1103. void (*cbfn)(struct bnad *))
  1104. {
  1105. enet->pause_config = *pause_config;
  1106. enet->pause_cbfn = cbfn;
  1107. bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
  1108. }
  1109. void
  1110. bna_enet_mtu_set(struct bna_enet *enet, int mtu,
  1111. void (*cbfn)(struct bnad *))
  1112. {
  1113. enet->mtu = mtu;
  1114. enet->mtu_cbfn = cbfn;
  1115. bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
  1116. }
  1117. void
  1118. bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
  1119. {
  1120. *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
  1121. }
  1122. /* IOCETH */
  1123. #define enable_mbox_intr(_ioceth) \
  1124. do { \
  1125. u32 intr_status; \
  1126. bna_intr_status_get((_ioceth)->bna, intr_status); \
  1127. bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
  1128. bna_mbox_intr_enable((_ioceth)->bna); \
  1129. } while (0)
  1130. #define disable_mbox_intr(_ioceth) \
  1131. do { \
  1132. bna_mbox_intr_disable((_ioceth)->bna); \
  1133. bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
  1134. } while (0)
  1135. #define call_ioceth_stop_cbfn(_ioceth) \
  1136. do { \
  1137. if ((_ioceth)->stop_cbfn) { \
  1138. void (*cbfn)(struct bnad *); \
  1139. struct bnad *cbarg; \
  1140. cbfn = (_ioceth)->stop_cbfn; \
  1141. cbarg = (_ioceth)->stop_cbarg; \
  1142. (_ioceth)->stop_cbfn = NULL; \
  1143. (_ioceth)->stop_cbarg = NULL; \
  1144. cbfn(cbarg); \
  1145. } \
  1146. } while (0)
  1147. #define bna_stats_mod_uninit(_stats_mod) \
  1148. do { \
  1149. } while (0)
  1150. #define bna_stats_mod_start(_stats_mod) \
  1151. do { \
  1152. (_stats_mod)->ioc_ready = true; \
  1153. } while (0)
  1154. #define bna_stats_mod_stop(_stats_mod) \
  1155. do { \
  1156. (_stats_mod)->ioc_ready = false; \
  1157. } while (0)
  1158. #define bna_stats_mod_fail(_stats_mod) \
  1159. do { \
  1160. (_stats_mod)->ioc_ready = false; \
  1161. (_stats_mod)->stats_get_busy = false; \
  1162. (_stats_mod)->stats_clr_busy = false; \
  1163. } while (0)
  1164. static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
  1165. bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
  1166. enum bna_ioceth_event);
  1167. bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
  1168. enum bna_ioceth_event);
  1169. bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
  1170. enum bna_ioceth_event);
  1171. bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
  1172. enum bna_ioceth_event);
  1173. bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
  1174. enum bna_ioceth_event);
  1175. bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
  1176. enum bna_ioceth_event);
  1177. bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
  1178. enum bna_ioceth_event);
  1179. bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
  1180. enum bna_ioceth_event);
  1181. static void
  1182. bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
  1183. {
  1184. call_ioceth_stop_cbfn(ioceth);
  1185. }
  1186. static void
  1187. bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
  1188. enum bna_ioceth_event event)
  1189. {
  1190. switch (event) {
  1191. case IOCETH_E_ENABLE:
  1192. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1193. bfa_nw_ioc_enable(&ioceth->ioc);
  1194. break;
  1195. case IOCETH_E_DISABLE:
  1196. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1197. break;
  1198. case IOCETH_E_IOC_RESET:
  1199. enable_mbox_intr(ioceth);
  1200. break;
  1201. case IOCETH_E_IOC_FAILED:
  1202. disable_mbox_intr(ioceth);
  1203. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1204. break;
  1205. default:
  1206. bfa_sm_fault(event);
  1207. }
  1208. }
  1209. static void
  1210. bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
  1211. {
  1212. /**
  1213. * Do not call bfa_nw_ioc_enable() here. It must be called in the
  1214. * previous state due to failed -> ioc_ready_wait transition.
  1215. */
  1216. }
  1217. static void
  1218. bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
  1219. enum bna_ioceth_event event)
  1220. {
  1221. switch (event) {
  1222. case IOCETH_E_DISABLE:
  1223. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1224. bfa_nw_ioc_disable(&ioceth->ioc);
  1225. break;
  1226. case IOCETH_E_IOC_RESET:
  1227. enable_mbox_intr(ioceth);
  1228. break;
  1229. case IOCETH_E_IOC_FAILED:
  1230. disable_mbox_intr(ioceth);
  1231. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1232. break;
  1233. case IOCETH_E_IOC_READY:
  1234. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
  1235. break;
  1236. default:
  1237. bfa_sm_fault(event);
  1238. }
  1239. }
  1240. static void
  1241. bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
  1242. {
  1243. bna_bfi_attr_get(ioceth);
  1244. }
  1245. static void
  1246. bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
  1247. enum bna_ioceth_event event)
  1248. {
  1249. switch (event) {
  1250. case IOCETH_E_DISABLE:
  1251. bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
  1252. break;
  1253. case IOCETH_E_IOC_FAILED:
  1254. disable_mbox_intr(ioceth);
  1255. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1256. break;
  1257. case IOCETH_E_ENET_ATTR_RESP:
  1258. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
  1259. break;
  1260. default:
  1261. bfa_sm_fault(event);
  1262. }
  1263. }
  1264. static void
  1265. bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
  1266. {
  1267. bna_enet_start(&ioceth->bna->enet);
  1268. bna_stats_mod_start(&ioceth->bna->stats_mod);
  1269. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1270. }
  1271. static void
  1272. bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
  1273. {
  1274. switch (event) {
  1275. case IOCETH_E_DISABLE:
  1276. bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
  1277. break;
  1278. case IOCETH_E_IOC_FAILED:
  1279. disable_mbox_intr(ioceth);
  1280. bna_enet_fail(&ioceth->bna->enet);
  1281. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1282. bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
  1283. break;
  1284. default:
  1285. bfa_sm_fault(event);
  1286. }
  1287. }
  1288. static void
  1289. bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
  1290. {
  1291. }
  1292. static void
  1293. bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
  1294. enum bna_ioceth_event event)
  1295. {
  1296. switch (event) {
  1297. case IOCETH_E_IOC_FAILED:
  1298. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1299. disable_mbox_intr(ioceth);
  1300. bfa_nw_ioc_disable(&ioceth->ioc);
  1301. break;
  1302. case IOCETH_E_ENET_ATTR_RESP:
  1303. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1304. bfa_nw_ioc_disable(&ioceth->ioc);
  1305. break;
  1306. default:
  1307. bfa_sm_fault(event);
  1308. }
  1309. }
  1310. static void
  1311. bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
  1312. {
  1313. bna_stats_mod_stop(&ioceth->bna->stats_mod);
  1314. bna_enet_stop(&ioceth->bna->enet);
  1315. }
  1316. static void
  1317. bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
  1318. enum bna_ioceth_event event)
  1319. {
  1320. switch (event) {
  1321. case IOCETH_E_IOC_FAILED:
  1322. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1323. disable_mbox_intr(ioceth);
  1324. bna_enet_fail(&ioceth->bna->enet);
  1325. bna_stats_mod_fail(&ioceth->bna->stats_mod);
  1326. bfa_nw_ioc_disable(&ioceth->ioc);
  1327. break;
  1328. case IOCETH_E_ENET_STOPPED:
  1329. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1330. bfa_nw_ioc_disable(&ioceth->ioc);
  1331. break;
  1332. default:
  1333. bfa_sm_fault(event);
  1334. }
  1335. }
  1336. static void
  1337. bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
  1338. {
  1339. }
  1340. static void
  1341. bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
  1342. enum bna_ioceth_event event)
  1343. {
  1344. switch (event) {
  1345. case IOCETH_E_IOC_DISABLED:
  1346. disable_mbox_intr(ioceth);
  1347. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1348. break;
  1349. case IOCETH_E_ENET_STOPPED:
  1350. /* This event is received due to enet failing */
  1351. /* No-op */
  1352. break;
  1353. default:
  1354. bfa_sm_fault(event);
  1355. }
  1356. }
  1357. static void
  1358. bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
  1359. {
  1360. bnad_cb_ioceth_failed(ioceth->bna->bnad);
  1361. }
  1362. static void
  1363. bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
  1364. enum bna_ioceth_event event)
  1365. {
  1366. switch (event) {
  1367. case IOCETH_E_DISABLE:
  1368. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
  1369. bfa_nw_ioc_disable(&ioceth->ioc);
  1370. break;
  1371. case IOCETH_E_IOC_RESET:
  1372. enable_mbox_intr(ioceth);
  1373. bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
  1374. break;
  1375. case IOCETH_E_IOC_FAILED:
  1376. break;
  1377. default:
  1378. bfa_sm_fault(event);
  1379. }
  1380. }
  1381. static void
  1382. bna_bfi_attr_get(struct bna_ioceth *ioceth)
  1383. {
  1384. struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
  1385. bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
  1386. BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
  1387. attr_req->mh.num_entries = htons(
  1388. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
  1389. bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
  1390. sizeof(struct bfi_enet_attr_req), &attr_req->mh);
  1391. bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
  1392. }
  1393. /* IOC callback functions */
  1394. static void
  1395. bna_cb_ioceth_enable(void *arg, enum bfa_status error)
  1396. {
  1397. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1398. if (error)
  1399. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1400. else
  1401. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
  1402. }
  1403. static void
  1404. bna_cb_ioceth_disable(void *arg)
  1405. {
  1406. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1407. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
  1408. }
  1409. static void
  1410. bna_cb_ioceth_hbfail(void *arg)
  1411. {
  1412. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1413. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
  1414. }
  1415. static void
  1416. bna_cb_ioceth_reset(void *arg)
  1417. {
  1418. struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
  1419. bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
  1420. }
  1421. static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
  1422. bna_cb_ioceth_enable,
  1423. bna_cb_ioceth_disable,
  1424. bna_cb_ioceth_hbfail,
  1425. bna_cb_ioceth_reset
  1426. };
  1427. static void bna_attr_init(struct bna_ioceth *ioceth)
  1428. {
  1429. ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
  1430. ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
  1431. ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
  1432. ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
  1433. ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
  1434. ioceth->attr.fw_query_complete = false;
  1435. }
  1436. static void
  1437. bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
  1438. struct bna_res_info *res_info)
  1439. {
  1440. u64 dma;
  1441. u8 *kva;
  1442. ioceth->bna = bna;
  1443. /**
  1444. * Attach IOC and claim:
  1445. * 1. DMA memory for IOC attributes
  1446. * 2. Kernel memory for FW trace
  1447. */
  1448. bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
  1449. bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
  1450. BNA_GET_DMA_ADDR(
  1451. &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
  1452. kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
  1453. bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
  1454. kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
  1455. bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
  1456. /**
  1457. * Attach common modules (Diag, SFP, CEE, Port) and claim respective
  1458. * DMA memory.
  1459. */
  1460. BNA_GET_DMA_ADDR(
  1461. &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
  1462. kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
  1463. bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
  1464. bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
  1465. kva += bfa_nw_cee_meminfo();
  1466. dma += bfa_nw_cee_meminfo();
  1467. bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
  1468. bfa_nw_flash_memclaim(&bna->flash, kva, dma);
  1469. kva += bfa_nw_flash_meminfo();
  1470. dma += bfa_nw_flash_meminfo();
  1471. bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
  1472. bfa_msgq_memclaim(&bna->msgq, kva, dma);
  1473. bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
  1474. kva += bfa_msgq_meminfo();
  1475. dma += bfa_msgq_meminfo();
  1476. ioceth->stop_cbfn = NULL;
  1477. ioceth->stop_cbarg = NULL;
  1478. bna_attr_init(ioceth);
  1479. bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
  1480. }
  1481. static void
  1482. bna_ioceth_uninit(struct bna_ioceth *ioceth)
  1483. {
  1484. bfa_nw_ioc_detach(&ioceth->ioc);
  1485. ioceth->bna = NULL;
  1486. }
  1487. void
  1488. bna_ioceth_enable(struct bna_ioceth *ioceth)
  1489. {
  1490. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
  1491. bnad_cb_ioceth_ready(ioceth->bna->bnad);
  1492. return;
  1493. }
  1494. if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
  1495. bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
  1496. }
  1497. void
  1498. bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
  1499. {
  1500. if (type == BNA_SOFT_CLEANUP) {
  1501. bnad_cb_ioceth_disabled(ioceth->bna->bnad);
  1502. return;
  1503. }
  1504. ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
  1505. ioceth->stop_cbarg = ioceth->bna->bnad;
  1506. bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
  1507. }
  1508. static void
  1509. bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
  1510. struct bna_res_info *res_info)
  1511. {
  1512. int i;
  1513. ucam_mod->ucmac = (struct bna_mac *)
  1514. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1515. INIT_LIST_HEAD(&ucam_mod->free_q);
  1516. for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
  1517. bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
  1518. list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
  1519. }
  1520. /* A separate queue to allow synchronous setting of a list of MACs */
  1521. INIT_LIST_HEAD(&ucam_mod->del_q);
  1522. for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
  1523. bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
  1524. list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
  1525. }
  1526. ucam_mod->bna = bna;
  1527. }
  1528. static void
  1529. bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
  1530. {
  1531. struct list_head *qe;
  1532. int i;
  1533. i = 0;
  1534. list_for_each(qe, &ucam_mod->free_q)
  1535. i++;
  1536. i = 0;
  1537. list_for_each(qe, &ucam_mod->del_q)
  1538. i++;
  1539. ucam_mod->bna = NULL;
  1540. }
  1541. static void
  1542. bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
  1543. struct bna_res_info *res_info)
  1544. {
  1545. int i;
  1546. mcam_mod->mcmac = (struct bna_mac *)
  1547. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
  1548. INIT_LIST_HEAD(&mcam_mod->free_q);
  1549. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
  1550. bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
  1551. list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
  1552. }
  1553. mcam_mod->mchandle = (struct bna_mcam_handle *)
  1554. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
  1555. INIT_LIST_HEAD(&mcam_mod->free_handle_q);
  1556. for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
  1557. bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
  1558. list_add_tail(&mcam_mod->mchandle[i].qe,
  1559. &mcam_mod->free_handle_q);
  1560. }
  1561. /* A separate queue to allow synchronous setting of a list of MACs */
  1562. INIT_LIST_HEAD(&mcam_mod->del_q);
  1563. for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
  1564. bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
  1565. list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
  1566. }
  1567. mcam_mod->bna = bna;
  1568. }
  1569. static void
  1570. bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
  1571. {
  1572. struct list_head *qe;
  1573. int i;
  1574. i = 0;
  1575. list_for_each(qe, &mcam_mod->free_q) i++;
  1576. i = 0;
  1577. list_for_each(qe, &mcam_mod->del_q) i++;
  1578. i = 0;
  1579. list_for_each(qe, &mcam_mod->free_handle_q) i++;
  1580. mcam_mod->bna = NULL;
  1581. }
  1582. static void
  1583. bna_bfi_stats_get(struct bna *bna)
  1584. {
  1585. struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
  1586. bna->stats_mod.stats_get_busy = true;
  1587. bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
  1588. BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
  1589. stats_req->mh.num_entries = htons(
  1590. bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
  1591. stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
  1592. stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
  1593. stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
  1594. stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
  1595. stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
  1596. bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
  1597. sizeof(struct bfi_enet_stats_req), &stats_req->mh);
  1598. bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
  1599. }
  1600. void
  1601. bna_res_req(struct bna_res_info *res_info)
  1602. {
  1603. /* DMA memory for COMMON_MODULE */
  1604. res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
  1605. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1606. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
  1607. res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
  1608. (bfa_nw_cee_meminfo() +
  1609. bfa_nw_flash_meminfo() +
  1610. bfa_msgq_meminfo()), PAGE_SIZE);
  1611. /* DMA memory for retrieving IOC attributes */
  1612. res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
  1613. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1614. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
  1615. res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
  1616. ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
  1617. /* Virtual memory for retreiving fw_trc */
  1618. res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
  1619. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
  1620. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
  1621. res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
  1622. /* DMA memory for retreiving stats */
  1623. res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
  1624. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
  1625. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
  1626. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
  1627. ALIGN(sizeof(struct bfi_enet_stats),
  1628. PAGE_SIZE);
  1629. }
  1630. void
  1631. bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
  1632. {
  1633. struct bna_attr *attr = &bna->ioceth.attr;
  1634. /* Virtual memory for Tx objects - stored by Tx module */
  1635. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
  1636. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
  1637. BNA_MEM_T_KVA;
  1638. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
  1639. res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
  1640. attr->num_txq * sizeof(struct bna_tx);
  1641. /* Virtual memory for TxQ - stored by Tx module */
  1642. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1643. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
  1644. BNA_MEM_T_KVA;
  1645. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
  1646. res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
  1647. attr->num_txq * sizeof(struct bna_txq);
  1648. /* Virtual memory for Rx objects - stored by Rx module */
  1649. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
  1650. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
  1651. BNA_MEM_T_KVA;
  1652. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
  1653. res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
  1654. attr->num_rxp * sizeof(struct bna_rx);
  1655. /* Virtual memory for RxPath - stored by Rx module */
  1656. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
  1657. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
  1658. BNA_MEM_T_KVA;
  1659. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
  1660. res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
  1661. attr->num_rxp * sizeof(struct bna_rxp);
  1662. /* Virtual memory for RxQ - stored by Rx module */
  1663. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
  1664. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
  1665. BNA_MEM_T_KVA;
  1666. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
  1667. res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
  1668. (attr->num_rxp * 2) * sizeof(struct bna_rxq);
  1669. /* Virtual memory for Unicast MAC address - stored by ucam module */
  1670. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1671. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
  1672. BNA_MEM_T_KVA;
  1673. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
  1674. res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
  1675. (attr->num_ucmac * 2) * sizeof(struct bna_mac);
  1676. /* Virtual memory for Multicast MAC address - stored by mcam module */
  1677. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
  1678. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
  1679. BNA_MEM_T_KVA;
  1680. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
  1681. res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
  1682. (attr->num_mcmac * 2) * sizeof(struct bna_mac);
  1683. /* Virtual memory for Multicast handle - stored by mcam module */
  1684. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
  1685. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
  1686. BNA_MEM_T_KVA;
  1687. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
  1688. res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
  1689. attr->num_mcmac * sizeof(struct bna_mcam_handle);
  1690. }
  1691. void
  1692. bna_init(struct bna *bna, struct bnad *bnad,
  1693. struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
  1694. {
  1695. bna->bnad = bnad;
  1696. bna->pcidev = *pcidev;
  1697. bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
  1698. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
  1699. bna->stats.hw_stats_dma.msb =
  1700. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
  1701. bna->stats.hw_stats_dma.lsb =
  1702. res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
  1703. bna_reg_addr_init(bna, &bna->pcidev);
  1704. /* Also initializes diag, cee, sfp, phy_port, msgq */
  1705. bna_ioceth_init(&bna->ioceth, bna, res_info);
  1706. bna_enet_init(&bna->enet, bna);
  1707. bna_ethport_init(&bna->ethport, bna);
  1708. }
  1709. void
  1710. bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
  1711. {
  1712. bna_tx_mod_init(&bna->tx_mod, bna, res_info);
  1713. bna_rx_mod_init(&bna->rx_mod, bna, res_info);
  1714. bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
  1715. bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
  1716. bna->default_mode_rid = BFI_INVALID_RID;
  1717. bna->promisc_rid = BFI_INVALID_RID;
  1718. bna->mod_flags |= BNA_MOD_F_INIT_DONE;
  1719. }
  1720. void
  1721. bna_uninit(struct bna *bna)
  1722. {
  1723. if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
  1724. bna_mcam_mod_uninit(&bna->mcam_mod);
  1725. bna_ucam_mod_uninit(&bna->ucam_mod);
  1726. bna_rx_mod_uninit(&bna->rx_mod);
  1727. bna_tx_mod_uninit(&bna->tx_mod);
  1728. bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
  1729. }
  1730. bna_stats_mod_uninit(&bna->stats_mod);
  1731. bna_ethport_uninit(&bna->ethport);
  1732. bna_enet_uninit(&bna->enet);
  1733. bna_ioceth_uninit(&bna->ioceth);
  1734. bna->bnad = NULL;
  1735. }
  1736. int
  1737. bna_num_txq_set(struct bna *bna, int num_txq)
  1738. {
  1739. if (bna->ioceth.attr.fw_query_complete &&
  1740. (num_txq <= bna->ioceth.attr.num_txq)) {
  1741. bna->ioceth.attr.num_txq = num_txq;
  1742. return BNA_CB_SUCCESS;
  1743. }
  1744. return BNA_CB_FAIL;
  1745. }
  1746. int
  1747. bna_num_rxp_set(struct bna *bna, int num_rxp)
  1748. {
  1749. if (bna->ioceth.attr.fw_query_complete &&
  1750. (num_rxp <= bna->ioceth.attr.num_rxp)) {
  1751. bna->ioceth.attr.num_rxp = num_rxp;
  1752. return BNA_CB_SUCCESS;
  1753. }
  1754. return BNA_CB_FAIL;
  1755. }
  1756. struct bna_mac *
  1757. bna_cam_mod_mac_get(struct list_head *head)
  1758. {
  1759. struct list_head *qe;
  1760. if (list_empty(head))
  1761. return NULL;
  1762. bfa_q_deq(head, &qe);
  1763. return (struct bna_mac *)qe;
  1764. }
  1765. void
  1766. bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
  1767. {
  1768. list_add_tail(&mac->qe, tail);
  1769. }
  1770. struct bna_mcam_handle *
  1771. bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
  1772. {
  1773. struct list_head *qe;
  1774. if (list_empty(&mcam_mod->free_handle_q))
  1775. return NULL;
  1776. bfa_q_deq(&mcam_mod->free_handle_q, &qe);
  1777. return (struct bna_mcam_handle *)qe;
  1778. }
  1779. void
  1780. bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
  1781. struct bna_mcam_handle *handle)
  1782. {
  1783. list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
  1784. }
  1785. void
  1786. bna_hw_stats_get(struct bna *bna)
  1787. {
  1788. if (!bna->stats_mod.ioc_ready) {
  1789. bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
  1790. return;
  1791. }
  1792. if (bna->stats_mod.stats_get_busy) {
  1793. bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
  1794. return;
  1795. }
  1796. bna_bfi_stats_get(bna);
  1797. }