qlge_dbg.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <linux/slab.h>
  3. #include "qlge.h"
  4. /* Read a NIC register from the alternate function. */
  5. static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
  6. u32 reg)
  7. {
  8. u32 register_to_read;
  9. u32 reg_val;
  10. unsigned int status = 0;
  11. register_to_read = MPI_NIC_REG_BLOCK
  12. | MPI_NIC_READ
  13. | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
  14. | reg;
  15. status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
  16. if (status != 0)
  17. return 0xffffffff;
  18. return reg_val;
  19. }
  20. /* Write a NIC register from the alternate function. */
  21. static int ql_write_other_func_reg(struct ql_adapter *qdev,
  22. u32 reg, u32 reg_val)
  23. {
  24. u32 register_to_read;
  25. int status = 0;
  26. register_to_read = MPI_NIC_REG_BLOCK
  27. | MPI_NIC_READ
  28. | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
  29. | reg;
  30. status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
  31. return status;
  32. }
  33. static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
  34. u32 bit, u32 err_bit)
  35. {
  36. u32 temp;
  37. int count = 10;
  38. while (count) {
  39. temp = ql_read_other_func_reg(qdev, reg);
  40. /* check for errors */
  41. if (temp & err_bit)
  42. return -1;
  43. else if (temp & bit)
  44. return 0;
  45. mdelay(10);
  46. count--;
  47. }
  48. return -1;
  49. }
  50. static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
  51. u32 *data)
  52. {
  53. int status;
  54. /* wait for reg to come ready */
  55. status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
  56. XG_SERDES_ADDR_RDY, 0);
  57. if (status)
  58. goto exit;
  59. /* set up for reg read */
  60. ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
  61. /* wait for reg to come ready */
  62. status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
  63. XG_SERDES_ADDR_RDY, 0);
  64. if (status)
  65. goto exit;
  66. /* get the data */
  67. *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
  68. exit:
  69. return status;
  70. }
  71. /* Read out the SERDES registers */
  72. static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  73. {
  74. int status;
  75. /* wait for reg to come ready */
  76. status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
  77. if (status)
  78. goto exit;
  79. /* set up for reg read */
  80. ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
  81. /* wait for reg to come ready */
  82. status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
  83. if (status)
  84. goto exit;
  85. /* get the data */
  86. *data = ql_read32(qdev, XG_SERDES_DATA);
  87. exit:
  88. return status;
  89. }
  90. static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
  91. u32 *direct_ptr, u32 *indirect_ptr,
  92. unsigned int direct_valid, unsigned int indirect_valid)
  93. {
  94. unsigned int status;
  95. status = 1;
  96. if (direct_valid)
  97. status = ql_read_serdes_reg(qdev, addr, direct_ptr);
  98. /* Dead fill any failures or invalids. */
  99. if (status)
  100. *direct_ptr = 0xDEADBEEF;
  101. status = 1;
  102. if (indirect_valid)
  103. status = ql_read_other_func_serdes_reg(
  104. qdev, addr, indirect_ptr);
  105. /* Dead fill any failures or invalids. */
  106. if (status)
  107. *indirect_ptr = 0xDEADBEEF;
  108. }
  109. static int ql_get_serdes_regs(struct ql_adapter *qdev,
  110. struct ql_mpi_coredump *mpi_coredump)
  111. {
  112. int status;
  113. unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
  114. unsigned int xaui_indirect_valid, i;
  115. u32 *direct_ptr, temp;
  116. u32 *indirect_ptr;
  117. xfi_direct_valid = xfi_indirect_valid = 0;
  118. xaui_direct_valid = xaui_indirect_valid = 1;
  119. /* The XAUI needs to be read out per port */
  120. status = ql_read_other_func_serdes_reg(qdev,
  121. XG_SERDES_XAUI_HSS_PCS_START, &temp);
  122. if (status)
  123. temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
  124. if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
  125. XG_SERDES_ADDR_XAUI_PWR_DOWN)
  126. xaui_indirect_valid = 0;
  127. status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
  128. if (status)
  129. temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
  130. if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
  131. XG_SERDES_ADDR_XAUI_PWR_DOWN)
  132. xaui_direct_valid = 0;
  133. /*
  134. * XFI register is shared so only need to read one
  135. * functions and then check the bits.
  136. */
  137. status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
  138. if (status)
  139. temp = 0;
  140. if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
  141. XG_SERDES_ADDR_XFI1_PWR_UP) {
  142. /* now see if i'm NIC 1 or NIC 2 */
  143. if (qdev->func & 1)
  144. /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
  145. xfi_indirect_valid = 1;
  146. else
  147. xfi_direct_valid = 1;
  148. }
  149. if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
  150. XG_SERDES_ADDR_XFI2_PWR_UP) {
  151. /* now see if i'm NIC 1 or NIC 2 */
  152. if (qdev->func & 1)
  153. /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
  154. xfi_direct_valid = 1;
  155. else
  156. xfi_indirect_valid = 1;
  157. }
  158. /* Get XAUI_AN register block. */
  159. if (qdev->func & 1) {
  160. /* Function 2 is direct */
  161. direct_ptr = mpi_coredump->serdes2_xaui_an;
  162. indirect_ptr = mpi_coredump->serdes_xaui_an;
  163. } else {
  164. /* Function 1 is direct */
  165. direct_ptr = mpi_coredump->serdes_xaui_an;
  166. indirect_ptr = mpi_coredump->serdes2_xaui_an;
  167. }
  168. for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
  169. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  170. xaui_direct_valid, xaui_indirect_valid);
  171. /* Get XAUI_HSS_PCS register block. */
  172. if (qdev->func & 1) {
  173. direct_ptr =
  174. mpi_coredump->serdes2_xaui_hss_pcs;
  175. indirect_ptr =
  176. mpi_coredump->serdes_xaui_hss_pcs;
  177. } else {
  178. direct_ptr =
  179. mpi_coredump->serdes_xaui_hss_pcs;
  180. indirect_ptr =
  181. mpi_coredump->serdes2_xaui_hss_pcs;
  182. }
  183. for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
  184. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  185. xaui_direct_valid, xaui_indirect_valid);
  186. /* Get XAUI_XFI_AN register block. */
  187. if (qdev->func & 1) {
  188. direct_ptr = mpi_coredump->serdes2_xfi_an;
  189. indirect_ptr = mpi_coredump->serdes_xfi_an;
  190. } else {
  191. direct_ptr = mpi_coredump->serdes_xfi_an;
  192. indirect_ptr = mpi_coredump->serdes2_xfi_an;
  193. }
  194. for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
  195. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  196. xfi_direct_valid, xfi_indirect_valid);
  197. /* Get XAUI_XFI_TRAIN register block. */
  198. if (qdev->func & 1) {
  199. direct_ptr = mpi_coredump->serdes2_xfi_train;
  200. indirect_ptr =
  201. mpi_coredump->serdes_xfi_train;
  202. } else {
  203. direct_ptr = mpi_coredump->serdes_xfi_train;
  204. indirect_ptr =
  205. mpi_coredump->serdes2_xfi_train;
  206. }
  207. for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
  208. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  209. xfi_direct_valid, xfi_indirect_valid);
  210. /* Get XAUI_XFI_HSS_PCS register block. */
  211. if (qdev->func & 1) {
  212. direct_ptr =
  213. mpi_coredump->serdes2_xfi_hss_pcs;
  214. indirect_ptr =
  215. mpi_coredump->serdes_xfi_hss_pcs;
  216. } else {
  217. direct_ptr =
  218. mpi_coredump->serdes_xfi_hss_pcs;
  219. indirect_ptr =
  220. mpi_coredump->serdes2_xfi_hss_pcs;
  221. }
  222. for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
  223. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  224. xfi_direct_valid, xfi_indirect_valid);
  225. /* Get XAUI_XFI_HSS_TX register block. */
  226. if (qdev->func & 1) {
  227. direct_ptr =
  228. mpi_coredump->serdes2_xfi_hss_tx;
  229. indirect_ptr =
  230. mpi_coredump->serdes_xfi_hss_tx;
  231. } else {
  232. direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
  233. indirect_ptr =
  234. mpi_coredump->serdes2_xfi_hss_tx;
  235. }
  236. for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
  237. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  238. xfi_direct_valid, xfi_indirect_valid);
  239. /* Get XAUI_XFI_HSS_RX register block. */
  240. if (qdev->func & 1) {
  241. direct_ptr =
  242. mpi_coredump->serdes2_xfi_hss_rx;
  243. indirect_ptr =
  244. mpi_coredump->serdes_xfi_hss_rx;
  245. } else {
  246. direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
  247. indirect_ptr =
  248. mpi_coredump->serdes2_xfi_hss_rx;
  249. }
  250. for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
  251. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  252. xfi_direct_valid, xfi_indirect_valid);
  253. /* Get XAUI_XFI_HSS_PLL register block. */
  254. if (qdev->func & 1) {
  255. direct_ptr =
  256. mpi_coredump->serdes2_xfi_hss_pll;
  257. indirect_ptr =
  258. mpi_coredump->serdes_xfi_hss_pll;
  259. } else {
  260. direct_ptr =
  261. mpi_coredump->serdes_xfi_hss_pll;
  262. indirect_ptr =
  263. mpi_coredump->serdes2_xfi_hss_pll;
  264. }
  265. for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
  266. ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
  267. xfi_direct_valid, xfi_indirect_valid);
  268. return 0;
  269. }
  270. static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
  271. u32 *data)
  272. {
  273. int status = 0;
  274. /* wait for reg to come ready */
  275. status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
  276. XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  277. if (status)
  278. goto exit;
  279. /* set up for reg read */
  280. ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
  281. /* wait for reg to come ready */
  282. status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
  283. XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  284. if (status)
  285. goto exit;
  286. /* get the data */
  287. *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
  288. exit:
  289. return status;
  290. }
  291. /* Read the 400 xgmac control/statistics registers
  292. * skipping unused locations.
  293. */
  294. static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
  295. unsigned int other_function)
  296. {
  297. int status = 0;
  298. int i;
  299. for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
  300. /* We're reading 400 xgmac registers, but we filter out
  301. * serveral locations that are non-responsive to reads.
  302. */
  303. if ((i == 0x00000114) ||
  304. (i == 0x00000118) ||
  305. (i == 0x0000013c) ||
  306. (i == 0x00000140) ||
  307. (i > 0x00000150 && i < 0x000001fc) ||
  308. (i > 0x00000278 && i < 0x000002a0) ||
  309. (i > 0x000002c0 && i < 0x000002cf) ||
  310. (i > 0x000002dc && i < 0x000002f0) ||
  311. (i > 0x000003c8 && i < 0x00000400) ||
  312. (i > 0x00000400 && i < 0x00000410) ||
  313. (i > 0x00000410 && i < 0x00000420) ||
  314. (i > 0x00000420 && i < 0x00000430) ||
  315. (i > 0x00000430 && i < 0x00000440) ||
  316. (i > 0x00000440 && i < 0x00000450) ||
  317. (i > 0x00000450 && i < 0x00000500) ||
  318. (i > 0x0000054c && i < 0x00000568) ||
  319. (i > 0x000005c8 && i < 0x00000600)) {
  320. if (other_function)
  321. status =
  322. ql_read_other_func_xgmac_reg(qdev, i, buf);
  323. else
  324. status = ql_read_xgmac_reg(qdev, i, buf);
  325. if (status)
  326. *buf = 0xdeadbeef;
  327. break;
  328. }
  329. }
  330. return status;
  331. }
  332. static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
  333. {
  334. int status = 0;
  335. int i;
  336. for (i = 0; i < 8; i++, buf++) {
  337. ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
  338. *buf = ql_read32(qdev, NIC_ETS);
  339. }
  340. for (i = 0; i < 2; i++, buf++) {
  341. ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
  342. *buf = ql_read32(qdev, CNA_ETS);
  343. }
  344. return status;
  345. }
  346. static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
  347. {
  348. int i;
  349. for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
  350. ql_write32(qdev, INTR_EN,
  351. qdev->intr_context[i].intr_read_mask);
  352. *buf = ql_read32(qdev, INTR_EN);
  353. }
  354. }
  355. static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
  356. {
  357. int i, status;
  358. u32 value[3];
  359. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  360. if (status)
  361. return status;
  362. for (i = 0; i < 16; i++) {
  363. status = ql_get_mac_addr_reg(qdev,
  364. MAC_ADDR_TYPE_CAM_MAC, i, value);
  365. if (status) {
  366. netif_err(qdev, drv, qdev->ndev,
  367. "Failed read of mac index register\n");
  368. goto err;
  369. }
  370. *buf++ = value[0]; /* lower MAC address */
  371. *buf++ = value[1]; /* upper MAC address */
  372. *buf++ = value[2]; /* output */
  373. }
  374. for (i = 0; i < 32; i++) {
  375. status = ql_get_mac_addr_reg(qdev,
  376. MAC_ADDR_TYPE_MULTI_MAC, i, value);
  377. if (status) {
  378. netif_err(qdev, drv, qdev->ndev,
  379. "Failed read of mac index register\n");
  380. goto err;
  381. }
  382. *buf++ = value[0]; /* lower Mcast address */
  383. *buf++ = value[1]; /* upper Mcast address */
  384. }
  385. err:
  386. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  387. return status;
  388. }
  389. static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
  390. {
  391. int status;
  392. u32 value, i;
  393. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  394. if (status)
  395. return status;
  396. for (i = 0; i < 16; i++) {
  397. status = ql_get_routing_reg(qdev, i, &value);
  398. if (status) {
  399. netif_err(qdev, drv, qdev->ndev,
  400. "Failed read of routing index register\n");
  401. goto err;
  402. } else {
  403. *buf++ = value;
  404. }
  405. }
  406. err:
  407. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  408. return status;
  409. }
  410. /* Read the MPI Processor shadow registers */
  411. static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
  412. {
  413. u32 i;
  414. int status;
  415. for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
  416. status = ql_write_mpi_reg(qdev, RISC_124,
  417. (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
  418. if (status)
  419. goto end;
  420. status = ql_read_mpi_reg(qdev, RISC_127, buf);
  421. if (status)
  422. goto end;
  423. }
  424. end:
  425. return status;
  426. }
  427. /* Read the MPI Processor core registers */
  428. static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
  429. u32 offset, u32 count)
  430. {
  431. int i, status = 0;
  432. for (i = 0; i < count; i++, buf++) {
  433. status = ql_read_mpi_reg(qdev, offset + i, buf);
  434. if (status)
  435. return status;
  436. }
  437. return status;
  438. }
  439. /* Read the ASIC probe dump */
  440. static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
  441. u32 valid, u32 *buf)
  442. {
  443. u32 module, mux_sel, probe, lo_val, hi_val;
  444. for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
  445. if (!((valid >> module) & 1))
  446. continue;
  447. for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
  448. probe = clock
  449. | PRB_MX_ADDR_ARE
  450. | mux_sel
  451. | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
  452. ql_write32(qdev, PRB_MX_ADDR, probe);
  453. lo_val = ql_read32(qdev, PRB_MX_DATA);
  454. if (mux_sel == 0) {
  455. *buf = probe;
  456. buf++;
  457. }
  458. probe |= PRB_MX_ADDR_UP;
  459. ql_write32(qdev, PRB_MX_ADDR, probe);
  460. hi_val = ql_read32(qdev, PRB_MX_DATA);
  461. *buf = lo_val;
  462. buf++;
  463. *buf = hi_val;
  464. buf++;
  465. }
  466. }
  467. return buf;
  468. }
  469. static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
  470. {
  471. /* First we have to enable the probe mux */
  472. ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
  473. buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
  474. PRB_MX_ADDR_VALID_SYS_MOD, buf);
  475. buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
  476. PRB_MX_ADDR_VALID_PCI_MOD, buf);
  477. buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
  478. PRB_MX_ADDR_VALID_XGM_MOD, buf);
  479. buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
  480. PRB_MX_ADDR_VALID_FC_MOD, buf);
  481. return 0;
  482. }
  483. /* Read out the routing index registers */
  484. static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
  485. {
  486. int status;
  487. u32 type, index, index_max;
  488. u32 result_index;
  489. u32 result_data;
  490. u32 val;
  491. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  492. if (status)
  493. return status;
  494. for (type = 0; type < 4; type++) {
  495. if (type < 2)
  496. index_max = 8;
  497. else
  498. index_max = 16;
  499. for (index = 0; index < index_max; index++) {
  500. val = RT_IDX_RS
  501. | (type << RT_IDX_TYPE_SHIFT)
  502. | (index << RT_IDX_IDX_SHIFT);
  503. ql_write32(qdev, RT_IDX, val);
  504. result_index = 0;
  505. while ((result_index & RT_IDX_MR) == 0)
  506. result_index = ql_read32(qdev, RT_IDX);
  507. result_data = ql_read32(qdev, RT_DATA);
  508. *buf = type;
  509. buf++;
  510. *buf = index;
  511. buf++;
  512. *buf = result_index;
  513. buf++;
  514. *buf = result_data;
  515. buf++;
  516. }
  517. }
  518. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  519. return status;
  520. }
  521. /* Read out the MAC protocol registers */
  522. static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
  523. {
  524. u32 result_index, result_data;
  525. u32 type;
  526. u32 index;
  527. u32 offset;
  528. u32 val;
  529. u32 initial_val = MAC_ADDR_RS;
  530. u32 max_index;
  531. u32 max_offset;
  532. for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
  533. switch (type) {
  534. case 0: /* CAM */
  535. initial_val |= MAC_ADDR_ADR;
  536. max_index = MAC_ADDR_MAX_CAM_ENTRIES;
  537. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  538. break;
  539. case 1: /* Multicast MAC Address */
  540. max_index = MAC_ADDR_MAX_CAM_WCOUNT;
  541. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  542. break;
  543. case 2: /* VLAN filter mask */
  544. case 3: /* MC filter mask */
  545. max_index = MAC_ADDR_MAX_CAM_WCOUNT;
  546. max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
  547. break;
  548. case 4: /* FC MAC addresses */
  549. max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
  550. max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
  551. break;
  552. case 5: /* Mgmt MAC addresses */
  553. max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
  554. max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
  555. break;
  556. case 6: /* Mgmt VLAN addresses */
  557. max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
  558. max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
  559. break;
  560. case 7: /* Mgmt IPv4 address */
  561. max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
  562. max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
  563. break;
  564. case 8: /* Mgmt IPv6 address */
  565. max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
  566. max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
  567. break;
  568. case 9: /* Mgmt TCP/UDP Dest port */
  569. max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
  570. max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
  571. break;
  572. default:
  573. pr_err("Bad type!!! 0x%08x\n", type);
  574. max_index = 0;
  575. max_offset = 0;
  576. break;
  577. }
  578. for (index = 0; index < max_index; index++) {
  579. for (offset = 0; offset < max_offset; offset++) {
  580. val = initial_val
  581. | (type << MAC_ADDR_TYPE_SHIFT)
  582. | (index << MAC_ADDR_IDX_SHIFT)
  583. | (offset);
  584. ql_write32(qdev, MAC_ADDR_IDX, val);
  585. result_index = 0;
  586. while ((result_index & MAC_ADDR_MR) == 0) {
  587. result_index = ql_read32(qdev,
  588. MAC_ADDR_IDX);
  589. }
  590. result_data = ql_read32(qdev, MAC_ADDR_DATA);
  591. *buf = result_index;
  592. buf++;
  593. *buf = result_data;
  594. buf++;
  595. }
  596. }
  597. }
  598. }
  599. static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
  600. {
  601. u32 func_num, reg, reg_val;
  602. int status;
  603. for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
  604. reg = MPI_NIC_REG_BLOCK
  605. | (func_num << MPI_NIC_FUNCTION_SHIFT)
  606. | (SEM / 4);
  607. status = ql_read_mpi_reg(qdev, reg, &reg_val);
  608. *buf = reg_val;
  609. /* if the read failed then dead fill the element. */
  610. if (!status)
  611. *buf = 0xdeadbeef;
  612. buf++;
  613. }
  614. }
  615. /* Create a coredump segment header */
  616. static void ql_build_coredump_seg_header(
  617. struct mpi_coredump_segment_header *seg_hdr,
  618. u32 seg_number, u32 seg_size, u8 *desc)
  619. {
  620. memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
  621. seg_hdr->cookie = MPI_COREDUMP_COOKIE;
  622. seg_hdr->segNum = seg_number;
  623. seg_hdr->segSize = seg_size;
  624. strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
  625. }
  626. /*
  627. * This function should be called when a coredump / probedump
  628. * is to be extracted from the HBA. It is assumed there is a
  629. * qdev structure that contains the base address of the register
  630. * space for this function as well as a coredump structure that
  631. * will contain the dump.
  632. */
  633. int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
  634. {
  635. int status;
  636. int i;
  637. if (!mpi_coredump) {
  638. netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
  639. return -EINVAL;
  640. }
  641. /* Try to get the spinlock, but dont worry if
  642. * it isn't available. If the firmware died it
  643. * might be holding the sem.
  644. */
  645. ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
  646. status = ql_pause_mpi_risc(qdev);
  647. if (status) {
  648. netif_err(qdev, drv, qdev->ndev,
  649. "Failed RISC pause. Status = 0x%.08x\n", status);
  650. goto err;
  651. }
  652. /* Insert the global header */
  653. memset(&(mpi_coredump->mpi_global_header), 0,
  654. sizeof(struct mpi_coredump_global_header));
  655. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  656. mpi_coredump->mpi_global_header.headerSize =
  657. sizeof(struct mpi_coredump_global_header);
  658. mpi_coredump->mpi_global_header.imageSize =
  659. sizeof(struct ql_mpi_coredump);
  660. strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  661. sizeof(mpi_coredump->mpi_global_header.idString));
  662. /* Get generic NIC reg dump */
  663. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  664. NIC1_CONTROL_SEG_NUM,
  665. sizeof(struct mpi_coredump_segment_header) +
  666. sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
  667. ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
  668. NIC2_CONTROL_SEG_NUM,
  669. sizeof(struct mpi_coredump_segment_header) +
  670. sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
  671. /* Get XGMac registers. (Segment 18, Rev C. step 21) */
  672. ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
  673. NIC1_XGMAC_SEG_NUM,
  674. sizeof(struct mpi_coredump_segment_header) +
  675. sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
  676. ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
  677. NIC2_XGMAC_SEG_NUM,
  678. sizeof(struct mpi_coredump_segment_header) +
  679. sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
  680. if (qdev->func & 1) {
  681. /* Odd means our function is NIC 2 */
  682. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  683. mpi_coredump->nic2_regs[i] =
  684. ql_read32(qdev, i * sizeof(u32));
  685. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  686. mpi_coredump->nic_regs[i] =
  687. ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
  688. ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
  689. ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
  690. } else {
  691. /* Even means our function is NIC 1 */
  692. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  693. mpi_coredump->nic_regs[i] =
  694. ql_read32(qdev, i * sizeof(u32));
  695. for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
  696. mpi_coredump->nic2_regs[i] =
  697. ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
  698. ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
  699. ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
  700. }
  701. /* Rev C. Step 20a */
  702. ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
  703. XAUI_AN_SEG_NUM,
  704. sizeof(struct mpi_coredump_segment_header) +
  705. sizeof(mpi_coredump->serdes_xaui_an),
  706. "XAUI AN Registers");
  707. /* Rev C. Step 20b */
  708. ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
  709. XAUI_HSS_PCS_SEG_NUM,
  710. sizeof(struct mpi_coredump_segment_header) +
  711. sizeof(mpi_coredump->serdes_xaui_hss_pcs),
  712. "XAUI HSS PCS Registers");
  713. ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
  714. sizeof(struct mpi_coredump_segment_header) +
  715. sizeof(mpi_coredump->serdes_xfi_an),
  716. "XFI AN Registers");
  717. ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
  718. XFI_TRAIN_SEG_NUM,
  719. sizeof(struct mpi_coredump_segment_header) +
  720. sizeof(mpi_coredump->serdes_xfi_train),
  721. "XFI TRAIN Registers");
  722. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
  723. XFI_HSS_PCS_SEG_NUM,
  724. sizeof(struct mpi_coredump_segment_header) +
  725. sizeof(mpi_coredump->serdes_xfi_hss_pcs),
  726. "XFI HSS PCS Registers");
  727. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
  728. XFI_HSS_TX_SEG_NUM,
  729. sizeof(struct mpi_coredump_segment_header) +
  730. sizeof(mpi_coredump->serdes_xfi_hss_tx),
  731. "XFI HSS TX Registers");
  732. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
  733. XFI_HSS_RX_SEG_NUM,
  734. sizeof(struct mpi_coredump_segment_header) +
  735. sizeof(mpi_coredump->serdes_xfi_hss_rx),
  736. "XFI HSS RX Registers");
  737. ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
  738. XFI_HSS_PLL_SEG_NUM,
  739. sizeof(struct mpi_coredump_segment_header) +
  740. sizeof(mpi_coredump->serdes_xfi_hss_pll),
  741. "XFI HSS PLL Registers");
  742. ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
  743. XAUI2_AN_SEG_NUM,
  744. sizeof(struct mpi_coredump_segment_header) +
  745. sizeof(mpi_coredump->serdes2_xaui_an),
  746. "XAUI2 AN Registers");
  747. ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
  748. XAUI2_HSS_PCS_SEG_NUM,
  749. sizeof(struct mpi_coredump_segment_header) +
  750. sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
  751. "XAUI2 HSS PCS Registers");
  752. ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
  753. XFI2_AN_SEG_NUM,
  754. sizeof(struct mpi_coredump_segment_header) +
  755. sizeof(mpi_coredump->serdes2_xfi_an),
  756. "XFI2 AN Registers");
  757. ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
  758. XFI2_TRAIN_SEG_NUM,
  759. sizeof(struct mpi_coredump_segment_header) +
  760. sizeof(mpi_coredump->serdes2_xfi_train),
  761. "XFI2 TRAIN Registers");
  762. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
  763. XFI2_HSS_PCS_SEG_NUM,
  764. sizeof(struct mpi_coredump_segment_header) +
  765. sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
  766. "XFI2 HSS PCS Registers");
  767. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
  768. XFI2_HSS_TX_SEG_NUM,
  769. sizeof(struct mpi_coredump_segment_header) +
  770. sizeof(mpi_coredump->serdes2_xfi_hss_tx),
  771. "XFI2 HSS TX Registers");
  772. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
  773. XFI2_HSS_RX_SEG_NUM,
  774. sizeof(struct mpi_coredump_segment_header) +
  775. sizeof(mpi_coredump->serdes2_xfi_hss_rx),
  776. "XFI2 HSS RX Registers");
  777. ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
  778. XFI2_HSS_PLL_SEG_NUM,
  779. sizeof(struct mpi_coredump_segment_header) +
  780. sizeof(mpi_coredump->serdes2_xfi_hss_pll),
  781. "XFI2 HSS PLL Registers");
  782. status = ql_get_serdes_regs(qdev, mpi_coredump);
  783. if (status) {
  784. netif_err(qdev, drv, qdev->ndev,
  785. "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
  786. status);
  787. goto err;
  788. }
  789. ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
  790. CORE_SEG_NUM,
  791. sizeof(mpi_coredump->core_regs_seg_hdr) +
  792. sizeof(mpi_coredump->mpi_core_regs) +
  793. sizeof(mpi_coredump->mpi_core_sh_regs),
  794. "Core Registers");
  795. /* Get the MPI Core Registers */
  796. status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
  797. MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
  798. if (status)
  799. goto err;
  800. /* Get the 16 MPI shadow registers */
  801. status = ql_get_mpi_shadow_regs(qdev,
  802. &mpi_coredump->mpi_core_sh_regs[0]);
  803. if (status)
  804. goto err;
  805. /* Get the Test Logic Registers */
  806. ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
  807. TEST_LOGIC_SEG_NUM,
  808. sizeof(struct mpi_coredump_segment_header)
  809. + sizeof(mpi_coredump->test_logic_regs),
  810. "Test Logic Regs");
  811. status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
  812. TEST_REGS_ADDR, TEST_REGS_CNT);
  813. if (status)
  814. goto err;
  815. /* Get the RMII Registers */
  816. ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
  817. RMII_SEG_NUM,
  818. sizeof(struct mpi_coredump_segment_header)
  819. + sizeof(mpi_coredump->rmii_regs),
  820. "RMII Registers");
  821. status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
  822. RMII_REGS_ADDR, RMII_REGS_CNT);
  823. if (status)
  824. goto err;
  825. /* Get the FCMAC1 Registers */
  826. ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
  827. FCMAC1_SEG_NUM,
  828. sizeof(struct mpi_coredump_segment_header)
  829. + sizeof(mpi_coredump->fcmac1_regs),
  830. "FCMAC1 Registers");
  831. status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
  832. FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
  833. if (status)
  834. goto err;
  835. /* Get the FCMAC2 Registers */
  836. ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
  837. FCMAC2_SEG_NUM,
  838. sizeof(struct mpi_coredump_segment_header)
  839. + sizeof(mpi_coredump->fcmac2_regs),
  840. "FCMAC2 Registers");
  841. status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
  842. FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
  843. if (status)
  844. goto err;
  845. /* Get the FC1 MBX Registers */
  846. ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
  847. FC1_MBOX_SEG_NUM,
  848. sizeof(struct mpi_coredump_segment_header)
  849. + sizeof(mpi_coredump->fc1_mbx_regs),
  850. "FC1 MBox Regs");
  851. status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
  852. FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
  853. if (status)
  854. goto err;
  855. /* Get the IDE Registers */
  856. ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
  857. IDE_SEG_NUM,
  858. sizeof(struct mpi_coredump_segment_header)
  859. + sizeof(mpi_coredump->ide_regs),
  860. "IDE Registers");
  861. status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
  862. IDE_REGS_ADDR, IDE_REGS_CNT);
  863. if (status)
  864. goto err;
  865. /* Get the NIC1 MBX Registers */
  866. ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
  867. NIC1_MBOX_SEG_NUM,
  868. sizeof(struct mpi_coredump_segment_header)
  869. + sizeof(mpi_coredump->nic1_mbx_regs),
  870. "NIC1 MBox Regs");
  871. status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
  872. NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
  873. if (status)
  874. goto err;
  875. /* Get the SMBus Registers */
  876. ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
  877. SMBUS_SEG_NUM,
  878. sizeof(struct mpi_coredump_segment_header)
  879. + sizeof(mpi_coredump->smbus_regs),
  880. "SMBus Registers");
  881. status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
  882. SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
  883. if (status)
  884. goto err;
  885. /* Get the FC2 MBX Registers */
  886. ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
  887. FC2_MBOX_SEG_NUM,
  888. sizeof(struct mpi_coredump_segment_header)
  889. + sizeof(mpi_coredump->fc2_mbx_regs),
  890. "FC2 MBox Regs");
  891. status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
  892. FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
  893. if (status)
  894. goto err;
  895. /* Get the NIC2 MBX Registers */
  896. ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
  897. NIC2_MBOX_SEG_NUM,
  898. sizeof(struct mpi_coredump_segment_header)
  899. + sizeof(mpi_coredump->nic2_mbx_regs),
  900. "NIC2 MBox Regs");
  901. status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
  902. NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
  903. if (status)
  904. goto err;
  905. /* Get the I2C Registers */
  906. ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
  907. I2C_SEG_NUM,
  908. sizeof(struct mpi_coredump_segment_header)
  909. + sizeof(mpi_coredump->i2c_regs),
  910. "I2C Registers");
  911. status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
  912. I2C_REGS_ADDR, I2C_REGS_CNT);
  913. if (status)
  914. goto err;
  915. /* Get the MEMC Registers */
  916. ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
  917. MEMC_SEG_NUM,
  918. sizeof(struct mpi_coredump_segment_header)
  919. + sizeof(mpi_coredump->memc_regs),
  920. "MEMC Registers");
  921. status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
  922. MEMC_REGS_ADDR, MEMC_REGS_CNT);
  923. if (status)
  924. goto err;
  925. /* Get the PBus Registers */
  926. ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
  927. PBUS_SEG_NUM,
  928. sizeof(struct mpi_coredump_segment_header)
  929. + sizeof(mpi_coredump->pbus_regs),
  930. "PBUS Registers");
  931. status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
  932. PBUS_REGS_ADDR, PBUS_REGS_CNT);
  933. if (status)
  934. goto err;
  935. /* Get the MDE Registers */
  936. ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
  937. MDE_SEG_NUM,
  938. sizeof(struct mpi_coredump_segment_header)
  939. + sizeof(mpi_coredump->mde_regs),
  940. "MDE Registers");
  941. status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
  942. MDE_REGS_ADDR, MDE_REGS_CNT);
  943. if (status)
  944. goto err;
  945. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  946. MISC_NIC_INFO_SEG_NUM,
  947. sizeof(struct mpi_coredump_segment_header)
  948. + sizeof(mpi_coredump->misc_nic_info),
  949. "MISC NIC INFO");
  950. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  951. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  952. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  953. mpi_coredump->misc_nic_info.function = qdev->func;
  954. /* Segment 31 */
  955. /* Get indexed register values. */
  956. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  957. INTR_STATES_SEG_NUM,
  958. sizeof(struct mpi_coredump_segment_header)
  959. + sizeof(mpi_coredump->intr_states),
  960. "INTR States");
  961. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  962. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  963. CAM_ENTRIES_SEG_NUM,
  964. sizeof(struct mpi_coredump_segment_header)
  965. + sizeof(mpi_coredump->cam_entries),
  966. "CAM Entries");
  967. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  968. if (status)
  969. goto err;
  970. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  971. ROUTING_WORDS_SEG_NUM,
  972. sizeof(struct mpi_coredump_segment_header)
  973. + sizeof(mpi_coredump->nic_routing_words),
  974. "Routing Words");
  975. status = ql_get_routing_entries(qdev,
  976. &mpi_coredump->nic_routing_words[0]);
  977. if (status)
  978. goto err;
  979. /* Segment 34 (Rev C. step 23) */
  980. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  981. ETS_SEG_NUM,
  982. sizeof(struct mpi_coredump_segment_header)
  983. + sizeof(mpi_coredump->ets),
  984. "ETS Registers");
  985. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  986. if (status)
  987. goto err;
  988. ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
  989. PROBE_DUMP_SEG_NUM,
  990. sizeof(struct mpi_coredump_segment_header)
  991. + sizeof(mpi_coredump->probe_dump),
  992. "Probe Dump");
  993. ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
  994. ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
  995. ROUTING_INDEX_SEG_NUM,
  996. sizeof(struct mpi_coredump_segment_header)
  997. + sizeof(mpi_coredump->routing_regs),
  998. "Routing Regs");
  999. status = ql_get_routing_index_registers(qdev,
  1000. &mpi_coredump->routing_regs[0]);
  1001. if (status)
  1002. goto err;
  1003. ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
  1004. MAC_PROTOCOL_SEG_NUM,
  1005. sizeof(struct mpi_coredump_segment_header)
  1006. + sizeof(mpi_coredump->mac_prot_regs),
  1007. "MAC Prot Regs");
  1008. ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
  1009. /* Get the semaphore registers for all 5 functions */
  1010. ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
  1011. SEM_REGS_SEG_NUM,
  1012. sizeof(struct mpi_coredump_segment_header) +
  1013. sizeof(mpi_coredump->sem_regs), "Sem Registers");
  1014. ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
  1015. /* Prevent the mpi restarting while we dump the memory.*/
  1016. ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
  1017. /* clear the pause */
  1018. status = ql_unpause_mpi_risc(qdev);
  1019. if (status) {
  1020. netif_err(qdev, drv, qdev->ndev,
  1021. "Failed RISC unpause. Status = 0x%.08x\n", status);
  1022. goto err;
  1023. }
  1024. /* Reset the RISC so we can dump RAM */
  1025. status = ql_hard_reset_mpi_risc(qdev);
  1026. if (status) {
  1027. netif_err(qdev, drv, qdev->ndev,
  1028. "Failed RISC reset. Status = 0x%.08x\n", status);
  1029. goto err;
  1030. }
  1031. ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
  1032. WCS_RAM_SEG_NUM,
  1033. sizeof(struct mpi_coredump_segment_header)
  1034. + sizeof(mpi_coredump->code_ram),
  1035. "WCS RAM");
  1036. status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
  1037. CODE_RAM_ADDR, CODE_RAM_CNT);
  1038. if (status) {
  1039. netif_err(qdev, drv, qdev->ndev,
  1040. "Failed Dump of CODE RAM. Status = 0x%.08x\n",
  1041. status);
  1042. goto err;
  1043. }
  1044. /* Insert the segment header */
  1045. ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
  1046. MEMC_RAM_SEG_NUM,
  1047. sizeof(struct mpi_coredump_segment_header)
  1048. + sizeof(mpi_coredump->memc_ram),
  1049. "MEMC RAM");
  1050. status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
  1051. MEMC_RAM_ADDR, MEMC_RAM_CNT);
  1052. if (status) {
  1053. netif_err(qdev, drv, qdev->ndev,
  1054. "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
  1055. status);
  1056. goto err;
  1057. }
  1058. err:
  1059. ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
  1060. return status;
  1061. }
  1062. static void ql_get_core_dump(struct ql_adapter *qdev)
  1063. {
  1064. if (!ql_own_firmware(qdev)) {
  1065. netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
  1066. return;
  1067. }
  1068. if (!netif_running(qdev->ndev)) {
  1069. netif_err(qdev, ifup, qdev->ndev,
  1070. "Force Coredump can only be done from interface that is up\n");
  1071. return;
  1072. }
  1073. ql_queue_fw_error(qdev);
  1074. }
  1075. static void ql_gen_reg_dump(struct ql_adapter *qdev,
  1076. struct ql_reg_dump *mpi_coredump)
  1077. {
  1078. int i, status;
  1079. memset(&(mpi_coredump->mpi_global_header), 0,
  1080. sizeof(struct mpi_coredump_global_header));
  1081. mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
  1082. mpi_coredump->mpi_global_header.headerSize =
  1083. sizeof(struct mpi_coredump_global_header);
  1084. mpi_coredump->mpi_global_header.imageSize =
  1085. sizeof(struct ql_reg_dump);
  1086. strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
  1087. sizeof(mpi_coredump->mpi_global_header.idString));
  1088. /* segment 16 */
  1089. ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
  1090. MISC_NIC_INFO_SEG_NUM,
  1091. sizeof(struct mpi_coredump_segment_header)
  1092. + sizeof(mpi_coredump->misc_nic_info),
  1093. "MISC NIC INFO");
  1094. mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
  1095. mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
  1096. mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
  1097. mpi_coredump->misc_nic_info.function = qdev->func;
  1098. /* Segment 16, Rev C. Step 18 */
  1099. ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
  1100. NIC1_CONTROL_SEG_NUM,
  1101. sizeof(struct mpi_coredump_segment_header)
  1102. + sizeof(mpi_coredump->nic_regs),
  1103. "NIC Registers");
  1104. /* Get generic reg dump */
  1105. for (i = 0; i < 64; i++)
  1106. mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
  1107. /* Segment 31 */
  1108. /* Get indexed register values. */
  1109. ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
  1110. INTR_STATES_SEG_NUM,
  1111. sizeof(struct mpi_coredump_segment_header)
  1112. + sizeof(mpi_coredump->intr_states),
  1113. "INTR States");
  1114. ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
  1115. ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
  1116. CAM_ENTRIES_SEG_NUM,
  1117. sizeof(struct mpi_coredump_segment_header)
  1118. + sizeof(mpi_coredump->cam_entries),
  1119. "CAM Entries");
  1120. status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
  1121. if (status)
  1122. return;
  1123. ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
  1124. ROUTING_WORDS_SEG_NUM,
  1125. sizeof(struct mpi_coredump_segment_header)
  1126. + sizeof(mpi_coredump->nic_routing_words),
  1127. "Routing Words");
  1128. status = ql_get_routing_entries(qdev,
  1129. &mpi_coredump->nic_routing_words[0]);
  1130. if (status)
  1131. return;
  1132. /* Segment 34 (Rev C. step 23) */
  1133. ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
  1134. ETS_SEG_NUM,
  1135. sizeof(struct mpi_coredump_segment_header)
  1136. + sizeof(mpi_coredump->ets),
  1137. "ETS Registers");
  1138. status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
  1139. if (status)
  1140. return;
  1141. }
  1142. void ql_get_dump(struct ql_adapter *qdev, void *buff)
  1143. {
  1144. /*
  1145. * If the dump has already been taken and is stored
  1146. * in our internal buffer and if force dump is set then
  1147. * just start the spool to dump it to the log file
  1148. * and also, take a snapshot of the general regs to
  1149. * to the user's buffer or else take complete dump
  1150. * to the user's buffer if force is not set.
  1151. */
  1152. if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
  1153. if (!ql_core_dump(qdev, buff))
  1154. ql_soft_reset_mpi_risc(qdev);
  1155. else
  1156. netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
  1157. } else {
  1158. ql_gen_reg_dump(qdev, buff);
  1159. ql_get_core_dump(qdev);
  1160. }
  1161. }
  1162. /* Coredump to messages log file using separate worker thread */
  1163. void ql_mpi_core_to_log(struct work_struct *work)
  1164. {
  1165. struct ql_adapter *qdev =
  1166. container_of(work, struct ql_adapter, mpi_core_to_log.work);
  1167. u32 *tmp, count;
  1168. int i;
  1169. count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
  1170. tmp = (u32 *)qdev->mpi_coredump;
  1171. netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
  1172. "Core is dumping to log file!\n");
  1173. for (i = 0; i < count; i += 8) {
  1174. pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
  1175. "%.08x %.08x %.08x\n", i,
  1176. tmp[i + 0],
  1177. tmp[i + 1],
  1178. tmp[i + 2],
  1179. tmp[i + 3],
  1180. tmp[i + 4],
  1181. tmp[i + 5],
  1182. tmp[i + 6],
  1183. tmp[i + 7]);
  1184. msleep(5);
  1185. }
  1186. }
  1187. #ifdef QL_REG_DUMP
  1188. static void ql_dump_intr_states(struct ql_adapter *qdev)
  1189. {
  1190. int i;
  1191. u32 value;
  1192. for (i = 0; i < qdev->intr_count; i++) {
  1193. ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
  1194. value = ql_read32(qdev, INTR_EN);
  1195. pr_err("%s: Interrupt %d is %s\n",
  1196. qdev->ndev->name, i,
  1197. (value & INTR_EN_EN ? "enabled" : "disabled"));
  1198. }
  1199. }
  1200. #define DUMP_XGMAC(qdev, reg) \
  1201. do { \
  1202. u32 data; \
  1203. ql_read_xgmac_reg(qdev, reg, &data); \
  1204. pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
  1205. } while (0)
  1206. void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
  1207. {
  1208. if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
  1209. pr_err("%s: Couldn't get xgmac sem\n", __func__);
  1210. return;
  1211. }
  1212. DUMP_XGMAC(qdev, PAUSE_SRC_LO);
  1213. DUMP_XGMAC(qdev, PAUSE_SRC_HI);
  1214. DUMP_XGMAC(qdev, GLOBAL_CFG);
  1215. DUMP_XGMAC(qdev, TX_CFG);
  1216. DUMP_XGMAC(qdev, RX_CFG);
  1217. DUMP_XGMAC(qdev, FLOW_CTL);
  1218. DUMP_XGMAC(qdev, PAUSE_OPCODE);
  1219. DUMP_XGMAC(qdev, PAUSE_TIMER);
  1220. DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
  1221. DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
  1222. DUMP_XGMAC(qdev, MAC_TX_PARAMS);
  1223. DUMP_XGMAC(qdev, MAC_RX_PARAMS);
  1224. DUMP_XGMAC(qdev, MAC_SYS_INT);
  1225. DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
  1226. DUMP_XGMAC(qdev, MAC_MGMT_INT);
  1227. DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
  1228. DUMP_XGMAC(qdev, EXT_ARB_MODE);
  1229. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  1230. }
  1231. static void ql_dump_ets_regs(struct ql_adapter *qdev)
  1232. {
  1233. }
  1234. static void ql_dump_cam_entries(struct ql_adapter *qdev)
  1235. {
  1236. int i;
  1237. u32 value[3];
  1238. i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  1239. if (i)
  1240. return;
  1241. for (i = 0; i < 4; i++) {
  1242. if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
  1243. pr_err("%s: Failed read of mac index register\n",
  1244. __func__);
  1245. return;
  1246. } else {
  1247. if (value[0])
  1248. pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
  1249. qdev->ndev->name, i, value[1], value[0],
  1250. value[2]);
  1251. }
  1252. }
  1253. for (i = 0; i < 32; i++) {
  1254. if (ql_get_mac_addr_reg
  1255. (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
  1256. pr_err("%s: Failed read of mac index register\n",
  1257. __func__);
  1258. return;
  1259. } else {
  1260. if (value[0])
  1261. pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
  1262. qdev->ndev->name, i, value[1], value[0]);
  1263. }
  1264. }
  1265. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  1266. }
  1267. void ql_dump_routing_entries(struct ql_adapter *qdev)
  1268. {
  1269. int i;
  1270. u32 value;
  1271. i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  1272. if (i)
  1273. return;
  1274. for (i = 0; i < 16; i++) {
  1275. value = 0;
  1276. if (ql_get_routing_reg(qdev, i, &value)) {
  1277. pr_err("%s: Failed read of routing index register\n",
  1278. __func__);
  1279. return;
  1280. } else {
  1281. if (value)
  1282. pr_err("%s: Routing Mask %d = 0x%.08x\n",
  1283. qdev->ndev->name, i, value);
  1284. }
  1285. }
  1286. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  1287. }
  1288. #define DUMP_REG(qdev, reg) \
  1289. pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
  1290. void ql_dump_regs(struct ql_adapter *qdev)
  1291. {
  1292. pr_err("reg dump for function #%d\n", qdev->func);
  1293. DUMP_REG(qdev, SYS);
  1294. DUMP_REG(qdev, RST_FO);
  1295. DUMP_REG(qdev, FSC);
  1296. DUMP_REG(qdev, CSR);
  1297. DUMP_REG(qdev, ICB_RID);
  1298. DUMP_REG(qdev, ICB_L);
  1299. DUMP_REG(qdev, ICB_H);
  1300. DUMP_REG(qdev, CFG);
  1301. DUMP_REG(qdev, BIOS_ADDR);
  1302. DUMP_REG(qdev, STS);
  1303. DUMP_REG(qdev, INTR_EN);
  1304. DUMP_REG(qdev, INTR_MASK);
  1305. DUMP_REG(qdev, ISR1);
  1306. DUMP_REG(qdev, ISR2);
  1307. DUMP_REG(qdev, ISR3);
  1308. DUMP_REG(qdev, ISR4);
  1309. DUMP_REG(qdev, REV_ID);
  1310. DUMP_REG(qdev, FRC_ECC_ERR);
  1311. DUMP_REG(qdev, ERR_STS);
  1312. DUMP_REG(qdev, RAM_DBG_ADDR);
  1313. DUMP_REG(qdev, RAM_DBG_DATA);
  1314. DUMP_REG(qdev, ECC_ERR_CNT);
  1315. DUMP_REG(qdev, SEM);
  1316. DUMP_REG(qdev, GPIO_1);
  1317. DUMP_REG(qdev, GPIO_2);
  1318. DUMP_REG(qdev, GPIO_3);
  1319. DUMP_REG(qdev, XGMAC_ADDR);
  1320. DUMP_REG(qdev, XGMAC_DATA);
  1321. DUMP_REG(qdev, NIC_ETS);
  1322. DUMP_REG(qdev, CNA_ETS);
  1323. DUMP_REG(qdev, FLASH_ADDR);
  1324. DUMP_REG(qdev, FLASH_DATA);
  1325. DUMP_REG(qdev, CQ_STOP);
  1326. DUMP_REG(qdev, PAGE_TBL_RID);
  1327. DUMP_REG(qdev, WQ_PAGE_TBL_LO);
  1328. DUMP_REG(qdev, WQ_PAGE_TBL_HI);
  1329. DUMP_REG(qdev, CQ_PAGE_TBL_LO);
  1330. DUMP_REG(qdev, CQ_PAGE_TBL_HI);
  1331. DUMP_REG(qdev, COS_DFLT_CQ1);
  1332. DUMP_REG(qdev, COS_DFLT_CQ2);
  1333. DUMP_REG(qdev, SPLT_HDR);
  1334. DUMP_REG(qdev, FC_PAUSE_THRES);
  1335. DUMP_REG(qdev, NIC_PAUSE_THRES);
  1336. DUMP_REG(qdev, FC_ETHERTYPE);
  1337. DUMP_REG(qdev, FC_RCV_CFG);
  1338. DUMP_REG(qdev, NIC_RCV_CFG);
  1339. DUMP_REG(qdev, FC_COS_TAGS);
  1340. DUMP_REG(qdev, NIC_COS_TAGS);
  1341. DUMP_REG(qdev, MGMT_RCV_CFG);
  1342. DUMP_REG(qdev, XG_SERDES_ADDR);
  1343. DUMP_REG(qdev, XG_SERDES_DATA);
  1344. DUMP_REG(qdev, PRB_MX_ADDR);
  1345. DUMP_REG(qdev, PRB_MX_DATA);
  1346. ql_dump_intr_states(qdev);
  1347. ql_dump_xgmac_control_regs(qdev);
  1348. ql_dump_ets_regs(qdev);
  1349. ql_dump_cam_entries(qdev);
  1350. ql_dump_routing_entries(qdev);
  1351. }
  1352. #endif
  1353. #ifdef QL_STAT_DUMP
  1354. #define DUMP_STAT(qdev, stat) \
  1355. pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
  1356. void ql_dump_stat(struct ql_adapter *qdev)
  1357. {
  1358. pr_err("%s: Enter\n", __func__);
  1359. DUMP_STAT(qdev, tx_pkts);
  1360. DUMP_STAT(qdev, tx_bytes);
  1361. DUMP_STAT(qdev, tx_mcast_pkts);
  1362. DUMP_STAT(qdev, tx_bcast_pkts);
  1363. DUMP_STAT(qdev, tx_ucast_pkts);
  1364. DUMP_STAT(qdev, tx_ctl_pkts);
  1365. DUMP_STAT(qdev, tx_pause_pkts);
  1366. DUMP_STAT(qdev, tx_64_pkt);
  1367. DUMP_STAT(qdev, tx_65_to_127_pkt);
  1368. DUMP_STAT(qdev, tx_128_to_255_pkt);
  1369. DUMP_STAT(qdev, tx_256_511_pkt);
  1370. DUMP_STAT(qdev, tx_512_to_1023_pkt);
  1371. DUMP_STAT(qdev, tx_1024_to_1518_pkt);
  1372. DUMP_STAT(qdev, tx_1519_to_max_pkt);
  1373. DUMP_STAT(qdev, tx_undersize_pkt);
  1374. DUMP_STAT(qdev, tx_oversize_pkt);
  1375. DUMP_STAT(qdev, rx_bytes);
  1376. DUMP_STAT(qdev, rx_bytes_ok);
  1377. DUMP_STAT(qdev, rx_pkts);
  1378. DUMP_STAT(qdev, rx_pkts_ok);
  1379. DUMP_STAT(qdev, rx_bcast_pkts);
  1380. DUMP_STAT(qdev, rx_mcast_pkts);
  1381. DUMP_STAT(qdev, rx_ucast_pkts);
  1382. DUMP_STAT(qdev, rx_undersize_pkts);
  1383. DUMP_STAT(qdev, rx_oversize_pkts);
  1384. DUMP_STAT(qdev, rx_jabber_pkts);
  1385. DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
  1386. DUMP_STAT(qdev, rx_drop_events);
  1387. DUMP_STAT(qdev, rx_fcerr_pkts);
  1388. DUMP_STAT(qdev, rx_align_err);
  1389. DUMP_STAT(qdev, rx_symbol_err);
  1390. DUMP_STAT(qdev, rx_mac_err);
  1391. DUMP_STAT(qdev, rx_ctl_pkts);
  1392. DUMP_STAT(qdev, rx_pause_pkts);
  1393. DUMP_STAT(qdev, rx_64_pkts);
  1394. DUMP_STAT(qdev, rx_65_to_127_pkts);
  1395. DUMP_STAT(qdev, rx_128_255_pkts);
  1396. DUMP_STAT(qdev, rx_256_511_pkts);
  1397. DUMP_STAT(qdev, rx_512_to_1023_pkts);
  1398. DUMP_STAT(qdev, rx_1024_to_1518_pkts);
  1399. DUMP_STAT(qdev, rx_1519_to_max_pkts);
  1400. DUMP_STAT(qdev, rx_len_err_pkts);
  1401. };
  1402. #endif
  1403. #ifdef QL_DEV_DUMP
  1404. #define DUMP_QDEV_FIELD(qdev, type, field) \
  1405. pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
  1406. #define DUMP_QDEV_DMA_FIELD(qdev, field) \
  1407. pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
  1408. #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
  1409. pr_err("%s[%d].%s = " type "\n", \
  1410. #array, index, #field, qdev->array[index].field);
  1411. void ql_dump_qdev(struct ql_adapter *qdev)
  1412. {
  1413. int i;
  1414. DUMP_QDEV_FIELD(qdev, "%lx", flags);
  1415. DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
  1416. DUMP_QDEV_FIELD(qdev, "%p", pdev);
  1417. DUMP_QDEV_FIELD(qdev, "%p", ndev);
  1418. DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
  1419. DUMP_QDEV_FIELD(qdev, "%p", reg_base);
  1420. DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
  1421. DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
  1422. DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
  1423. DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
  1424. DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
  1425. DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
  1426. DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
  1427. DUMP_QDEV_FIELD(qdev, "%d", intr_count);
  1428. if (qdev->msi_x_entry)
  1429. for (i = 0; i < qdev->intr_count; i++) {
  1430. DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
  1431. DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
  1432. }
  1433. for (i = 0; i < qdev->intr_count; i++) {
  1434. DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
  1435. DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
  1436. DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
  1437. DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
  1438. DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
  1439. DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
  1440. }
  1441. DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
  1442. DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
  1443. DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
  1444. DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
  1445. DUMP_QDEV_FIELD(qdev, "%d", intr_count);
  1446. DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
  1447. DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
  1448. DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
  1449. DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
  1450. DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
  1451. DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
  1452. DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
  1453. }
  1454. #endif
  1455. #ifdef QL_CB_DUMP
  1456. void ql_dump_wqicb(struct wqicb *wqicb)
  1457. {
  1458. pr_err("Dumping wqicb stuff...\n");
  1459. pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
  1460. pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
  1461. pr_err("wqicb->cq_id_rss = %d\n",
  1462. le16_to_cpu(wqicb->cq_id_rss));
  1463. pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
  1464. pr_err("wqicb->wq_addr = 0x%llx\n",
  1465. (unsigned long long) le64_to_cpu(wqicb->addr));
  1466. pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
  1467. (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
  1468. }
  1469. void ql_dump_tx_ring(struct tx_ring *tx_ring)
  1470. {
  1471. if (tx_ring == NULL)
  1472. return;
  1473. pr_err("===================== Dumping tx_ring %d ===============\n",
  1474. tx_ring->wq_id);
  1475. pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
  1476. pr_err("tx_ring->base_dma = 0x%llx\n",
  1477. (unsigned long long) tx_ring->wq_base_dma);
  1478. pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
  1479. tx_ring->cnsmr_idx_sh_reg,
  1480. tx_ring->cnsmr_idx_sh_reg
  1481. ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
  1482. pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
  1483. pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
  1484. pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
  1485. pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
  1486. pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
  1487. pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
  1488. pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
  1489. pr_err("tx_ring->q = %p\n", tx_ring->q);
  1490. pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
  1491. }
  1492. void ql_dump_ricb(struct ricb *ricb)
  1493. {
  1494. int i;
  1495. pr_err("===================== Dumping ricb ===============\n");
  1496. pr_err("Dumping ricb stuff...\n");
  1497. pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
  1498. pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
  1499. ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
  1500. ricb->flags & RSS_L6K ? "RSS_L6K " : "",
  1501. ricb->flags & RSS_LI ? "RSS_LI " : "",
  1502. ricb->flags & RSS_LB ? "RSS_LB " : "",
  1503. ricb->flags & RSS_LM ? "RSS_LM " : "",
  1504. ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
  1505. ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
  1506. ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
  1507. ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
  1508. pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
  1509. for (i = 0; i < 16; i++)
  1510. pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
  1511. le32_to_cpu(ricb->hash_cq_id[i]));
  1512. for (i = 0; i < 10; i++)
  1513. pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
  1514. le32_to_cpu(ricb->ipv6_hash_key[i]));
  1515. for (i = 0; i < 4; i++)
  1516. pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
  1517. le32_to_cpu(ricb->ipv4_hash_key[i]));
  1518. }
  1519. void ql_dump_cqicb(struct cqicb *cqicb)
  1520. {
  1521. pr_err("Dumping cqicb stuff...\n");
  1522. pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
  1523. pr_err("cqicb->flags = %x\n", cqicb->flags);
  1524. pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
  1525. pr_err("cqicb->addr = 0x%llx\n",
  1526. (unsigned long long) le64_to_cpu(cqicb->addr));
  1527. pr_err("cqicb->prod_idx_addr = 0x%llx\n",
  1528. (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
  1529. pr_err("cqicb->pkt_delay = 0x%.04x\n",
  1530. le16_to_cpu(cqicb->pkt_delay));
  1531. pr_err("cqicb->irq_delay = 0x%.04x\n",
  1532. le16_to_cpu(cqicb->irq_delay));
  1533. pr_err("cqicb->lbq_addr = 0x%llx\n",
  1534. (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
  1535. pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
  1536. le16_to_cpu(cqicb->lbq_buf_size));
  1537. pr_err("cqicb->lbq_len = 0x%.04x\n",
  1538. le16_to_cpu(cqicb->lbq_len));
  1539. pr_err("cqicb->sbq_addr = 0x%llx\n",
  1540. (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
  1541. pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
  1542. le16_to_cpu(cqicb->sbq_buf_size));
  1543. pr_err("cqicb->sbq_len = 0x%.04x\n",
  1544. le16_to_cpu(cqicb->sbq_len));
  1545. }
  1546. void ql_dump_rx_ring(struct rx_ring *rx_ring)
  1547. {
  1548. if (rx_ring == NULL)
  1549. return;
  1550. pr_err("===================== Dumping rx_ring %d ===============\n",
  1551. rx_ring->cq_id);
  1552. pr_err("Dumping rx_ring %d, type = %s%s%s\n",
  1553. rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
  1554. rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
  1555. rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
  1556. pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
  1557. pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
  1558. pr_err("rx_ring->cq_base_dma = %llx\n",
  1559. (unsigned long long) rx_ring->cq_base_dma);
  1560. pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
  1561. pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
  1562. pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
  1563. rx_ring->prod_idx_sh_reg,
  1564. rx_ring->prod_idx_sh_reg
  1565. ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
  1566. pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
  1567. (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
  1568. pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
  1569. rx_ring->cnsmr_idx_db_reg);
  1570. pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
  1571. pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
  1572. pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
  1573. pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
  1574. pr_err("rx_ring->lbq_base_dma = %llx\n",
  1575. (unsigned long long) rx_ring->lbq_base_dma);
  1576. pr_err("rx_ring->lbq_base_indirect = %p\n",
  1577. rx_ring->lbq_base_indirect);
  1578. pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
  1579. (unsigned long long) rx_ring->lbq_base_indirect_dma);
  1580. pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
  1581. pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
  1582. pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
  1583. pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
  1584. rx_ring->lbq_prod_idx_db_reg);
  1585. pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
  1586. pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
  1587. pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
  1588. pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
  1589. pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
  1590. pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
  1591. pr_err("rx_ring->sbq_base_dma = %llx\n",
  1592. (unsigned long long) rx_ring->sbq_base_dma);
  1593. pr_err("rx_ring->sbq_base_indirect = %p\n",
  1594. rx_ring->sbq_base_indirect);
  1595. pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
  1596. (unsigned long long) rx_ring->sbq_base_indirect_dma);
  1597. pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
  1598. pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
  1599. pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
  1600. pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
  1601. rx_ring->sbq_prod_idx_db_reg);
  1602. pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
  1603. pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
  1604. pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
  1605. pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
  1606. pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
  1607. pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
  1608. pr_err("rx_ring->irq = %d\n", rx_ring->irq);
  1609. pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
  1610. pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
  1611. }
  1612. void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
  1613. {
  1614. void *ptr;
  1615. pr_err("%s: Enter\n", __func__);
  1616. ptr = kmalloc(size, GFP_ATOMIC);
  1617. if (ptr == NULL)
  1618. return;
  1619. if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
  1620. pr_err("%s: Failed to upload control block!\n", __func__);
  1621. goto fail_it;
  1622. }
  1623. switch (bit) {
  1624. case CFG_DRQ:
  1625. ql_dump_wqicb((struct wqicb *)ptr);
  1626. break;
  1627. case CFG_DCQ:
  1628. ql_dump_cqicb((struct cqicb *)ptr);
  1629. break;
  1630. case CFG_DR:
  1631. ql_dump_ricb((struct ricb *)ptr);
  1632. break;
  1633. default:
  1634. pr_err("%s: Invalid bit value = %x\n", __func__, bit);
  1635. break;
  1636. }
  1637. fail_it:
  1638. kfree(ptr);
  1639. }
  1640. #endif
  1641. #ifdef QL_OB_DUMP
  1642. void ql_dump_tx_desc(struct tx_buf_desc *tbd)
  1643. {
  1644. pr_err("tbd->addr = 0x%llx\n",
  1645. le64_to_cpu((u64) tbd->addr));
  1646. pr_err("tbd->len = %d\n",
  1647. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1648. pr_err("tbd->flags = %s %s\n",
  1649. tbd->len & TX_DESC_C ? "C" : ".",
  1650. tbd->len & TX_DESC_E ? "E" : ".");
  1651. tbd++;
  1652. pr_err("tbd->addr = 0x%llx\n",
  1653. le64_to_cpu((u64) tbd->addr));
  1654. pr_err("tbd->len = %d\n",
  1655. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1656. pr_err("tbd->flags = %s %s\n",
  1657. tbd->len & TX_DESC_C ? "C" : ".",
  1658. tbd->len & TX_DESC_E ? "E" : ".");
  1659. tbd++;
  1660. pr_err("tbd->addr = 0x%llx\n",
  1661. le64_to_cpu((u64) tbd->addr));
  1662. pr_err("tbd->len = %d\n",
  1663. le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
  1664. pr_err("tbd->flags = %s %s\n",
  1665. tbd->len & TX_DESC_C ? "C" : ".",
  1666. tbd->len & TX_DESC_E ? "E" : ".");
  1667. }
  1668. void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
  1669. {
  1670. struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
  1671. (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
  1672. struct tx_buf_desc *tbd;
  1673. u16 frame_len;
  1674. pr_err("%s\n", __func__);
  1675. pr_err("opcode = %s\n",
  1676. (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
  1677. pr_err("flags1 = %s %s %s %s %s\n",
  1678. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
  1679. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
  1680. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
  1681. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
  1682. ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
  1683. pr_err("flags2 = %s %s %s\n",
  1684. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
  1685. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
  1686. ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
  1687. pr_err("flags3 = %s %s %s\n",
  1688. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
  1689. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
  1690. ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
  1691. pr_err("tid = %x\n", ob_mac_iocb->tid);
  1692. pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
  1693. pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
  1694. if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
  1695. pr_err("frame_len = %d\n",
  1696. le32_to_cpu(ob_mac_tso_iocb->frame_len));
  1697. pr_err("mss = %d\n",
  1698. le16_to_cpu(ob_mac_tso_iocb->mss));
  1699. pr_err("prot_hdr_len = %d\n",
  1700. le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
  1701. pr_err("hdr_offset = 0x%.04x\n",
  1702. le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
  1703. frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
  1704. } else {
  1705. pr_err("frame_len = %d\n",
  1706. le16_to_cpu(ob_mac_iocb->frame_len));
  1707. frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
  1708. }
  1709. tbd = &ob_mac_iocb->tbd[0];
  1710. ql_dump_tx_desc(tbd);
  1711. }
  1712. void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
  1713. {
  1714. pr_err("%s\n", __func__);
  1715. pr_err("opcode = %d\n", ob_mac_rsp->opcode);
  1716. pr_err("flags = %s %s %s %s %s %s %s\n",
  1717. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
  1718. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
  1719. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
  1720. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
  1721. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
  1722. ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
  1723. ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
  1724. pr_err("tid = %x\n", ob_mac_rsp->tid);
  1725. }
  1726. #endif
  1727. #ifdef QL_IB_DUMP
  1728. void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
  1729. {
  1730. pr_err("%s\n", __func__);
  1731. pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
  1732. pr_err("flags1 = %s%s%s%s%s%s\n",
  1733. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
  1734. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
  1735. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
  1736. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
  1737. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
  1738. ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
  1739. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
  1740. pr_err("%s%s%s Multicast\n",
  1741. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1742. IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
  1743. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1744. IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
  1745. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1746. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1747. pr_err("flags2 = %s%s%s%s%s\n",
  1748. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
  1749. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
  1750. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
  1751. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
  1752. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
  1753. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
  1754. pr_err("%s%s%s%s%s error\n",
  1755. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1756. IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
  1757. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1758. IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
  1759. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1760. IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
  1761. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1762. IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
  1763. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
  1764. IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
  1765. pr_err("flags3 = %s%s\n",
  1766. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
  1767. ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
  1768. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  1769. pr_err("RSS flags = %s%s%s%s\n",
  1770. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1771. IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
  1772. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1773. IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
  1774. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1775. IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
  1776. ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
  1777. IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
  1778. pr_err("data_len = %d\n",
  1779. le32_to_cpu(ib_mac_rsp->data_len));
  1780. pr_err("data_addr = 0x%llx\n",
  1781. (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
  1782. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
  1783. pr_err("rss = %x\n",
  1784. le32_to_cpu(ib_mac_rsp->rss));
  1785. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
  1786. pr_err("vlan_id = %x\n",
  1787. le16_to_cpu(ib_mac_rsp->vlan_id));
  1788. pr_err("flags4 = %s%s%s\n",
  1789. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
  1790. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
  1791. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
  1792. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  1793. pr_err("hdr length = %d\n",
  1794. le32_to_cpu(ib_mac_rsp->hdr_len));
  1795. pr_err("hdr addr = 0x%llx\n",
  1796. (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
  1797. }
  1798. }
  1799. #endif
  1800. #ifdef QL_ALL_DUMP
  1801. void ql_dump_all(struct ql_adapter *qdev)
  1802. {
  1803. int i;
  1804. QL_DUMP_REGS(qdev);
  1805. QL_DUMP_QDEV(qdev);
  1806. for (i = 0; i < qdev->tx_ring_count; i++) {
  1807. QL_DUMP_TX_RING(&qdev->tx_ring[i]);
  1808. QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
  1809. }
  1810. for (i = 0; i < qdev->rx_ring_count; i++) {
  1811. QL_DUMP_RX_RING(&qdev->rx_ring[i]);
  1812. QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
  1813. }
  1814. }
  1815. #endif