qlcnic_minidump.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. /*
  2. * QLogic qlcnic NIC Driver
  3. * Copyright (c) 2009-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qlcnic for copyright and licensing details.
  6. */
  7. #include "qlcnic.h"
  8. #include "qlcnic_hdr.h"
  9. #include "qlcnic_83xx_hw.h"
  10. #include "qlcnic_hw.h"
  11. #include <net/ip.h>
  12. #define QLC_83XX_MINIDUMP_FLASH 0x520000
  13. #define QLC_83XX_OCM_INDEX 3
  14. #define QLC_83XX_PCI_INDEX 0
  15. #define QLC_83XX_DMA_ENGINE_INDEX 8
  16. static const u32 qlcnic_ms_read_data[] = {
  17. 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
  18. };
  19. #define QLCNIC_DUMP_WCRB BIT_0
  20. #define QLCNIC_DUMP_RWCRB BIT_1
  21. #define QLCNIC_DUMP_ANDCRB BIT_2
  22. #define QLCNIC_DUMP_ORCRB BIT_3
  23. #define QLCNIC_DUMP_POLLCRB BIT_4
  24. #define QLCNIC_DUMP_RD_SAVE BIT_5
  25. #define QLCNIC_DUMP_WRT_SAVED BIT_6
  26. #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
  27. #define QLCNIC_DUMP_SKIP BIT_7
  28. #define QLCNIC_DUMP_MASK_MAX 0xff
  29. struct qlcnic_pex_dma_descriptor {
  30. u32 read_data_size;
  31. u32 dma_desc_cmd;
  32. u32 src_addr_low;
  33. u32 src_addr_high;
  34. u32 dma_bus_addr_low;
  35. u32 dma_bus_addr_high;
  36. u32 rsvd[6];
  37. } __packed;
  38. struct qlcnic_common_entry_hdr {
  39. u32 type;
  40. u32 offset;
  41. u32 cap_size;
  42. u8 mask;
  43. u8 rsvd[2];
  44. u8 flags;
  45. } __packed;
  46. struct __crb {
  47. u32 addr;
  48. u8 stride;
  49. u8 rsvd1[3];
  50. u32 data_size;
  51. u32 no_ops;
  52. u32 rsvd2[4];
  53. } __packed;
  54. struct __ctrl {
  55. u32 addr;
  56. u8 stride;
  57. u8 index_a;
  58. u16 timeout;
  59. u32 data_size;
  60. u32 no_ops;
  61. u8 opcode;
  62. u8 index_v;
  63. u8 shl_val;
  64. u8 shr_val;
  65. u32 val1;
  66. u32 val2;
  67. u32 val3;
  68. } __packed;
  69. struct __cache {
  70. u32 addr;
  71. u16 stride;
  72. u16 init_tag_val;
  73. u32 size;
  74. u32 no_ops;
  75. u32 ctrl_addr;
  76. u32 ctrl_val;
  77. u32 read_addr;
  78. u8 read_addr_stride;
  79. u8 read_addr_num;
  80. u8 rsvd1[2];
  81. } __packed;
  82. struct __ocm {
  83. u8 rsvd[8];
  84. u32 size;
  85. u32 no_ops;
  86. u8 rsvd1[8];
  87. u32 read_addr;
  88. u32 read_addr_stride;
  89. } __packed;
  90. struct __mem {
  91. u32 desc_card_addr;
  92. u32 dma_desc_cmd;
  93. u32 start_dma_cmd;
  94. u32 rsvd[3];
  95. u32 addr;
  96. u32 size;
  97. } __packed;
  98. struct __mux {
  99. u32 addr;
  100. u8 rsvd[4];
  101. u32 size;
  102. u32 no_ops;
  103. u32 val;
  104. u32 val_stride;
  105. u32 read_addr;
  106. u8 rsvd2[4];
  107. } __packed;
  108. struct __queue {
  109. u32 sel_addr;
  110. u16 stride;
  111. u8 rsvd[2];
  112. u32 size;
  113. u32 no_ops;
  114. u8 rsvd2[8];
  115. u32 read_addr;
  116. u8 read_addr_stride;
  117. u8 read_addr_cnt;
  118. u8 rsvd3[2];
  119. } __packed;
  120. struct __pollrd {
  121. u32 sel_addr;
  122. u32 read_addr;
  123. u32 sel_val;
  124. u16 sel_val_stride;
  125. u16 no_ops;
  126. u32 poll_wait;
  127. u32 poll_mask;
  128. u32 data_size;
  129. u8 rsvd[4];
  130. } __packed;
  131. struct __mux2 {
  132. u32 sel_addr1;
  133. u32 sel_addr2;
  134. u32 sel_val1;
  135. u32 sel_val2;
  136. u32 no_ops;
  137. u32 sel_val_mask;
  138. u32 read_addr;
  139. u8 sel_val_stride;
  140. u8 data_size;
  141. u8 rsvd[2];
  142. } __packed;
  143. struct __pollrdmwr {
  144. u32 addr1;
  145. u32 addr2;
  146. u32 val1;
  147. u32 val2;
  148. u32 poll_wait;
  149. u32 poll_mask;
  150. u32 mod_mask;
  151. u32 data_size;
  152. } __packed;
  153. struct qlcnic_dump_entry {
  154. struct qlcnic_common_entry_hdr hdr;
  155. union {
  156. struct __crb crb;
  157. struct __cache cache;
  158. struct __ocm ocm;
  159. struct __mem mem;
  160. struct __mux mux;
  161. struct __queue que;
  162. struct __ctrl ctrl;
  163. struct __pollrdmwr pollrdmwr;
  164. struct __mux2 mux2;
  165. struct __pollrd pollrd;
  166. } region;
  167. } __packed;
  168. enum qlcnic_minidump_opcode {
  169. QLCNIC_DUMP_NOP = 0,
  170. QLCNIC_DUMP_READ_CRB = 1,
  171. QLCNIC_DUMP_READ_MUX = 2,
  172. QLCNIC_DUMP_QUEUE = 3,
  173. QLCNIC_DUMP_BRD_CONFIG = 4,
  174. QLCNIC_DUMP_READ_OCM = 6,
  175. QLCNIC_DUMP_PEG_REG = 7,
  176. QLCNIC_DUMP_L1_DTAG = 8,
  177. QLCNIC_DUMP_L1_ITAG = 9,
  178. QLCNIC_DUMP_L1_DATA = 11,
  179. QLCNIC_DUMP_L1_INST = 12,
  180. QLCNIC_DUMP_L2_DTAG = 21,
  181. QLCNIC_DUMP_L2_ITAG = 22,
  182. QLCNIC_DUMP_L2_DATA = 23,
  183. QLCNIC_DUMP_L2_INST = 24,
  184. QLCNIC_DUMP_POLL_RD = 35,
  185. QLCNIC_READ_MUX2 = 36,
  186. QLCNIC_READ_POLLRDMWR = 37,
  187. QLCNIC_DUMP_READ_ROM = 71,
  188. QLCNIC_DUMP_READ_MEM = 72,
  189. QLCNIC_DUMP_READ_CTRL = 98,
  190. QLCNIC_DUMP_TLHDR = 99,
  191. QLCNIC_DUMP_RDEND = 255
  192. };
  193. inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
  194. {
  195. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  196. return hdr->saved_state[index];
  197. }
  198. inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
  199. u32 value)
  200. {
  201. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  202. hdr->saved_state[index] = value;
  203. }
  204. void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
  205. {
  206. struct qlcnic_82xx_dump_template_hdr *hdr;
  207. hdr = fw_dump->tmpl_hdr;
  208. fw_dump->tmpl_hdr_size = hdr->size;
  209. fw_dump->version = hdr->version;
  210. fw_dump->num_entries = hdr->num_entries;
  211. fw_dump->offset = hdr->offset;
  212. hdr->drv_cap_mask = hdr->cap_mask;
  213. fw_dump->cap_mask = hdr->cap_mask;
  214. fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
  215. }
  216. inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
  217. {
  218. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  219. return hdr->cap_sizes[index];
  220. }
  221. void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
  222. {
  223. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  224. hdr->sys_info[idx] = value;
  225. }
  226. void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
  227. {
  228. struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
  229. hdr->drv_cap_mask = mask;
  230. }
  231. inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
  232. {
  233. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  234. return hdr->saved_state[index];
  235. }
  236. inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
  237. u32 value)
  238. {
  239. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  240. hdr->saved_state[index] = value;
  241. }
  242. #define QLCNIC_TEMPLATE_VERSION (0x20001)
  243. void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
  244. {
  245. struct qlcnic_83xx_dump_template_hdr *hdr;
  246. hdr = fw_dump->tmpl_hdr;
  247. fw_dump->tmpl_hdr_size = hdr->size;
  248. fw_dump->version = hdr->version;
  249. fw_dump->num_entries = hdr->num_entries;
  250. fw_dump->offset = hdr->offset;
  251. hdr->drv_cap_mask = hdr->cap_mask;
  252. fw_dump->cap_mask = hdr->cap_mask;
  253. fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
  254. QLCNIC_TEMPLATE_VERSION;
  255. }
  256. inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
  257. {
  258. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  259. return hdr->cap_sizes[index];
  260. }
  261. void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
  262. {
  263. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  264. hdr->sys_info[idx] = value;
  265. }
  266. void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
  267. {
  268. struct qlcnic_83xx_dump_template_hdr *hdr;
  269. hdr = tmpl_hdr;
  270. hdr->drv_cap_mask = mask;
  271. }
  272. struct qlcnic_dump_operations {
  273. enum qlcnic_minidump_opcode opcode;
  274. u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
  275. __le32 *);
  276. };
  277. static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
  278. struct qlcnic_dump_entry *entry, __le32 *buffer)
  279. {
  280. int i;
  281. u32 addr, data;
  282. struct __crb *crb = &entry->region.crb;
  283. addr = crb->addr;
  284. for (i = 0; i < crb->no_ops; i++) {
  285. data = qlcnic_ind_rd(adapter, addr);
  286. *buffer++ = cpu_to_le32(addr);
  287. *buffer++ = cpu_to_le32(data);
  288. addr += crb->stride;
  289. }
  290. return crb->no_ops * 2 * sizeof(u32);
  291. }
  292. static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  293. struct qlcnic_dump_entry *entry, __le32 *buffer)
  294. {
  295. void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
  296. struct __ctrl *ctr = &entry->region.ctrl;
  297. int i, k, timeout = 0;
  298. u32 addr, data, temp;
  299. u8 no_ops;
  300. addr = ctr->addr;
  301. no_ops = ctr->no_ops;
  302. for (i = 0; i < no_ops; i++) {
  303. k = 0;
  304. for (k = 0; k < 8; k++) {
  305. if (!(ctr->opcode & (1 << k)))
  306. continue;
  307. switch (1 << k) {
  308. case QLCNIC_DUMP_WCRB:
  309. qlcnic_ind_wr(adapter, addr, ctr->val1);
  310. break;
  311. case QLCNIC_DUMP_RWCRB:
  312. data = qlcnic_ind_rd(adapter, addr);
  313. qlcnic_ind_wr(adapter, addr, data);
  314. break;
  315. case QLCNIC_DUMP_ANDCRB:
  316. data = qlcnic_ind_rd(adapter, addr);
  317. qlcnic_ind_wr(adapter, addr,
  318. (data & ctr->val2));
  319. break;
  320. case QLCNIC_DUMP_ORCRB:
  321. data = qlcnic_ind_rd(adapter, addr);
  322. qlcnic_ind_wr(adapter, addr,
  323. (data | ctr->val3));
  324. break;
  325. case QLCNIC_DUMP_POLLCRB:
  326. while (timeout <= ctr->timeout) {
  327. data = qlcnic_ind_rd(adapter, addr);
  328. if ((data & ctr->val2) == ctr->val1)
  329. break;
  330. usleep_range(1000, 2000);
  331. timeout++;
  332. }
  333. if (timeout > ctr->timeout) {
  334. dev_info(&adapter->pdev->dev,
  335. "Timed out, aborting poll CRB\n");
  336. return -EINVAL;
  337. }
  338. break;
  339. case QLCNIC_DUMP_RD_SAVE:
  340. temp = ctr->index_a;
  341. if (temp)
  342. addr = qlcnic_get_saved_state(adapter,
  343. hdr,
  344. temp);
  345. data = qlcnic_ind_rd(adapter, addr);
  346. qlcnic_set_saved_state(adapter, hdr,
  347. ctr->index_v, data);
  348. break;
  349. case QLCNIC_DUMP_WRT_SAVED:
  350. temp = ctr->index_v;
  351. if (temp)
  352. data = qlcnic_get_saved_state(adapter,
  353. hdr,
  354. temp);
  355. else
  356. data = ctr->val1;
  357. temp = ctr->index_a;
  358. if (temp)
  359. addr = qlcnic_get_saved_state(adapter,
  360. hdr,
  361. temp);
  362. qlcnic_ind_wr(adapter, addr, data);
  363. break;
  364. case QLCNIC_DUMP_MOD_SAVE_ST:
  365. data = qlcnic_get_saved_state(adapter, hdr,
  366. ctr->index_v);
  367. data <<= ctr->shl_val;
  368. data >>= ctr->shr_val;
  369. if (ctr->val2)
  370. data &= ctr->val2;
  371. data |= ctr->val3;
  372. data += ctr->val1;
  373. qlcnic_set_saved_state(adapter, hdr,
  374. ctr->index_v, data);
  375. break;
  376. default:
  377. dev_info(&adapter->pdev->dev,
  378. "Unknown opcode\n");
  379. break;
  380. }
  381. }
  382. addr += ctr->stride;
  383. }
  384. return 0;
  385. }
  386. static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
  387. struct qlcnic_dump_entry *entry, __le32 *buffer)
  388. {
  389. int loop;
  390. u32 val, data = 0;
  391. struct __mux *mux = &entry->region.mux;
  392. val = mux->val;
  393. for (loop = 0; loop < mux->no_ops; loop++) {
  394. qlcnic_ind_wr(adapter, mux->addr, val);
  395. data = qlcnic_ind_rd(adapter, mux->read_addr);
  396. *buffer++ = cpu_to_le32(val);
  397. *buffer++ = cpu_to_le32(data);
  398. val += mux->val_stride;
  399. }
  400. return 2 * mux->no_ops * sizeof(u32);
  401. }
  402. static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
  403. struct qlcnic_dump_entry *entry, __le32 *buffer)
  404. {
  405. int i, loop;
  406. u32 cnt, addr, data, que_id = 0;
  407. struct __queue *que = &entry->region.que;
  408. addr = que->read_addr;
  409. cnt = que->read_addr_cnt;
  410. for (loop = 0; loop < que->no_ops; loop++) {
  411. qlcnic_ind_wr(adapter, que->sel_addr, que_id);
  412. addr = que->read_addr;
  413. for (i = 0; i < cnt; i++) {
  414. data = qlcnic_ind_rd(adapter, addr);
  415. *buffer++ = cpu_to_le32(data);
  416. addr += que->read_addr_stride;
  417. }
  418. que_id += que->stride;
  419. }
  420. return que->no_ops * cnt * sizeof(u32);
  421. }
  422. static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
  423. struct qlcnic_dump_entry *entry, __le32 *buffer)
  424. {
  425. int i;
  426. u32 data;
  427. void __iomem *addr;
  428. struct __ocm *ocm = &entry->region.ocm;
  429. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  430. for (i = 0; i < ocm->no_ops; i++) {
  431. data = readl(addr);
  432. *buffer++ = cpu_to_le32(data);
  433. addr += ocm->read_addr_stride;
  434. }
  435. return ocm->no_ops * sizeof(u32);
  436. }
  437. static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
  438. struct qlcnic_dump_entry *entry, __le32 *buffer)
  439. {
  440. int i, count = 0;
  441. u32 fl_addr, size, val, lck_val, addr;
  442. struct __mem *rom = &entry->region.mem;
  443. fl_addr = rom->addr;
  444. size = rom->size / 4;
  445. lock_try:
  446. lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
  447. if (!lck_val && count < MAX_CTL_CHECK) {
  448. usleep_range(10000, 11000);
  449. count++;
  450. goto lock_try;
  451. }
  452. QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
  453. adapter->ahw->pci_func);
  454. for (i = 0; i < size; i++) {
  455. addr = fl_addr & 0xFFFF0000;
  456. qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
  457. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  458. val = qlcnic_ind_rd(adapter, addr);
  459. fl_addr += 4;
  460. *buffer++ = cpu_to_le32(val);
  461. }
  462. QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
  463. return rom->size;
  464. }
  465. static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  466. struct qlcnic_dump_entry *entry, __le32 *buffer)
  467. {
  468. int i;
  469. u32 cnt, val, data, addr;
  470. struct __cache *l1 = &entry->region.cache;
  471. val = l1->init_tag_val;
  472. for (i = 0; i < l1->no_ops; i++) {
  473. qlcnic_ind_wr(adapter, l1->addr, val);
  474. qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
  475. addr = l1->read_addr;
  476. cnt = l1->read_addr_num;
  477. while (cnt) {
  478. data = qlcnic_ind_rd(adapter, addr);
  479. *buffer++ = cpu_to_le32(data);
  480. addr += l1->read_addr_stride;
  481. cnt--;
  482. }
  483. val += l1->stride;
  484. }
  485. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  486. }
  487. static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  488. struct qlcnic_dump_entry *entry, __le32 *buffer)
  489. {
  490. int i;
  491. u32 cnt, val, data, addr;
  492. u8 poll_mask, poll_to, time_out = 0;
  493. struct __cache *l2 = &entry->region.cache;
  494. val = l2->init_tag_val;
  495. poll_mask = LSB(MSW(l2->ctrl_val));
  496. poll_to = MSB(MSW(l2->ctrl_val));
  497. for (i = 0; i < l2->no_ops; i++) {
  498. qlcnic_ind_wr(adapter, l2->addr, val);
  499. if (LSW(l2->ctrl_val))
  500. qlcnic_ind_wr(adapter, l2->ctrl_addr,
  501. LSW(l2->ctrl_val));
  502. if (!poll_mask)
  503. goto skip_poll;
  504. do {
  505. data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
  506. if (!(data & poll_mask))
  507. break;
  508. usleep_range(1000, 2000);
  509. time_out++;
  510. } while (time_out <= poll_to);
  511. if (time_out > poll_to) {
  512. dev_err(&adapter->pdev->dev,
  513. "Timeout exceeded in %s, aborting dump\n",
  514. __func__);
  515. return -EINVAL;
  516. }
  517. skip_poll:
  518. addr = l2->read_addr;
  519. cnt = l2->read_addr_num;
  520. while (cnt) {
  521. data = qlcnic_ind_rd(adapter, addr);
  522. *buffer++ = cpu_to_le32(data);
  523. addr += l2->read_addr_stride;
  524. cnt--;
  525. }
  526. val += l2->stride;
  527. }
  528. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  529. }
  530. static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
  531. struct __mem *mem, __le32 *buffer,
  532. int *ret)
  533. {
  534. u32 addr, data, test;
  535. int i, reg_read;
  536. reg_read = mem->size;
  537. addr = mem->addr;
  538. /* check for data size of multiple of 16 and 16 byte alignment */
  539. if ((addr & 0xf) || (reg_read%16)) {
  540. dev_info(&adapter->pdev->dev,
  541. "Unaligned memory addr:0x%x size:0x%x\n",
  542. addr, reg_read);
  543. *ret = -EINVAL;
  544. return 0;
  545. }
  546. mutex_lock(&adapter->ahw->mem_lock);
  547. while (reg_read != 0) {
  548. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
  549. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
  550. qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
  551. for (i = 0; i < MAX_CTL_CHECK; i++) {
  552. test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
  553. if (!(test & TA_CTL_BUSY))
  554. break;
  555. }
  556. if (i == MAX_CTL_CHECK) {
  557. if (printk_ratelimit()) {
  558. dev_err(&adapter->pdev->dev,
  559. "failed to read through agent\n");
  560. *ret = -EIO;
  561. goto out;
  562. }
  563. }
  564. for (i = 0; i < 4; i++) {
  565. data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
  566. *buffer++ = cpu_to_le32(data);
  567. }
  568. addr += 16;
  569. reg_read -= 16;
  570. ret += 16;
  571. }
  572. out:
  573. mutex_unlock(&adapter->ahw->mem_lock);
  574. return mem->size;
  575. }
  576. /* DMA register base address */
  577. #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
  578. /* DMA register offsets w.r.t base address */
  579. #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
  580. #define QLC_DMA_CMD_BUFF_ADDR_HI 4
  581. #define QLC_DMA_CMD_STATUS_CTRL 8
  582. static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
  583. struct __mem *mem)
  584. {
  585. struct device *dev = &adapter->pdev->dev;
  586. u32 dma_no, dma_base_addr, temp_addr;
  587. int i, ret, dma_sts;
  588. void *tmpl_hdr;
  589. tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  590. dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
  591. QLC_83XX_DMA_ENGINE_INDEX);
  592. dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
  593. temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
  594. ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
  595. if (ret)
  596. return ret;
  597. temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
  598. ret = qlcnic_ind_wr(adapter, temp_addr, 0);
  599. if (ret)
  600. return ret;
  601. temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
  602. ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
  603. if (ret)
  604. return ret;
  605. /* Wait for DMA to complete */
  606. temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
  607. for (i = 0; i < 400; i++) {
  608. dma_sts = qlcnic_ind_rd(adapter, temp_addr);
  609. if (dma_sts & BIT_1)
  610. usleep_range(250, 500);
  611. else
  612. break;
  613. }
  614. if (i >= 400) {
  615. dev_info(dev, "PEX DMA operation timed out");
  616. ret = -EIO;
  617. }
  618. return ret;
  619. }
  620. static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
  621. struct __mem *mem,
  622. __le32 *buffer, int *ret)
  623. {
  624. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  625. u32 temp, dma_base_addr, size = 0, read_size = 0;
  626. struct qlcnic_pex_dma_descriptor *dma_descr;
  627. struct device *dev = &adapter->pdev->dev;
  628. dma_addr_t dma_phys_addr;
  629. void *dma_buffer;
  630. void *tmpl_hdr;
  631. tmpl_hdr = fw_dump->tmpl_hdr;
  632. /* Check if DMA engine is available */
  633. temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
  634. QLC_83XX_DMA_ENGINE_INDEX);
  635. dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
  636. temp = qlcnic_ind_rd(adapter,
  637. dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
  638. if (!(temp & BIT_31)) {
  639. dev_info(dev, "%s: DMA engine is not available\n", __func__);
  640. *ret = -EIO;
  641. return 0;
  642. }
  643. /* Create DMA descriptor */
  644. dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
  645. GFP_KERNEL);
  646. if (!dma_descr) {
  647. *ret = -ENOMEM;
  648. return 0;
  649. }
  650. /* dma_desc_cmd 0:15 = 0
  651. * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
  652. * dma_desc_cmd 20:23 = pci function number
  653. * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
  654. */
  655. dma_phys_addr = fw_dump->phys_addr;
  656. dma_buffer = fw_dump->dma_buffer;
  657. temp = 0;
  658. temp = mem->dma_desc_cmd & 0xff0f;
  659. temp |= (adapter->ahw->pci_func & 0xf) << 4;
  660. dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
  661. dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
  662. dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
  663. dma_descr->src_addr_high = 0;
  664. /* Collect memory dump using multiple DMA operations if required */
  665. while (read_size < mem->size) {
  666. if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
  667. size = QLC_PEX_DMA_READ_SIZE;
  668. else
  669. size = mem->size - read_size;
  670. dma_descr->src_addr_low = mem->addr + read_size;
  671. dma_descr->read_data_size = size;
  672. /* Write DMA descriptor to MS memory*/
  673. temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
  674. *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
  675. (u32 *)dma_descr, temp);
  676. if (*ret) {
  677. dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
  678. mem->desc_card_addr);
  679. goto free_dma_descr;
  680. }
  681. *ret = qlcnic_start_pex_dma(adapter, mem);
  682. if (*ret) {
  683. dev_info(dev, "Failed to start PEX DMA operation\n");
  684. goto free_dma_descr;
  685. }
  686. memcpy(buffer, dma_buffer, size);
  687. buffer += size / 4;
  688. read_size += size;
  689. }
  690. free_dma_descr:
  691. kfree(dma_descr);
  692. return read_size;
  693. }
  694. static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
  695. struct qlcnic_dump_entry *entry, __le32 *buffer)
  696. {
  697. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  698. struct device *dev = &adapter->pdev->dev;
  699. struct __mem *mem = &entry->region.mem;
  700. u32 data_size;
  701. int ret = 0;
  702. if (fw_dump->use_pex_dma) {
  703. data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
  704. &ret);
  705. if (ret)
  706. dev_info(dev,
  707. "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
  708. entry->hdr.mask);
  709. else
  710. return data_size;
  711. }
  712. data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
  713. if (ret) {
  714. dev_info(dev,
  715. "Failed to read memory dump using test agent method: mask[0x%x]\n",
  716. entry->hdr.mask);
  717. return 0;
  718. } else {
  719. return data_size;
  720. }
  721. }
  722. static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  723. struct qlcnic_dump_entry *entry, __le32 *buffer)
  724. {
  725. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  726. return 0;
  727. }
  728. static int qlcnic_valid_dump_entry(struct device *dev,
  729. struct qlcnic_dump_entry *entry, u32 size)
  730. {
  731. int ret = 1;
  732. if (size != entry->hdr.cap_size) {
  733. dev_err(dev,
  734. "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  735. entry->hdr.type, entry->hdr.mask, size,
  736. entry->hdr.cap_size);
  737. ret = 0;
  738. }
  739. return ret;
  740. }
  741. static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
  742. struct qlcnic_dump_entry *entry,
  743. __le32 *buffer)
  744. {
  745. struct __pollrdmwr *poll = &entry->region.pollrdmwr;
  746. u32 data, wait_count, poll_wait, temp;
  747. poll_wait = poll->poll_wait;
  748. qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
  749. wait_count = 0;
  750. while (wait_count < poll_wait) {
  751. data = qlcnic_ind_rd(adapter, poll->addr1);
  752. if ((data & poll->poll_mask) != 0)
  753. break;
  754. wait_count++;
  755. }
  756. if (wait_count == poll_wait) {
  757. dev_err(&adapter->pdev->dev,
  758. "Timeout exceeded in %s, aborting dump\n",
  759. __func__);
  760. return 0;
  761. }
  762. data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
  763. qlcnic_ind_wr(adapter, poll->addr2, data);
  764. qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
  765. wait_count = 0;
  766. while (wait_count < poll_wait) {
  767. temp = qlcnic_ind_rd(adapter, poll->addr1);
  768. if ((temp & poll->poll_mask) != 0)
  769. break;
  770. wait_count++;
  771. }
  772. *buffer++ = cpu_to_le32(poll->addr2);
  773. *buffer++ = cpu_to_le32(data);
  774. return 2 * sizeof(u32);
  775. }
  776. static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
  777. struct qlcnic_dump_entry *entry, __le32 *buffer)
  778. {
  779. struct __pollrd *pollrd = &entry->region.pollrd;
  780. u32 data, wait_count, poll_wait, sel_val;
  781. int i;
  782. poll_wait = pollrd->poll_wait;
  783. sel_val = pollrd->sel_val;
  784. for (i = 0; i < pollrd->no_ops; i++) {
  785. qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
  786. wait_count = 0;
  787. while (wait_count < poll_wait) {
  788. data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
  789. if ((data & pollrd->poll_mask) != 0)
  790. break;
  791. wait_count++;
  792. }
  793. if (wait_count == poll_wait) {
  794. dev_err(&adapter->pdev->dev,
  795. "Timeout exceeded in %s, aborting dump\n",
  796. __func__);
  797. return 0;
  798. }
  799. data = qlcnic_ind_rd(adapter, pollrd->read_addr);
  800. *buffer++ = cpu_to_le32(sel_val);
  801. *buffer++ = cpu_to_le32(data);
  802. sel_val += pollrd->sel_val_stride;
  803. }
  804. return pollrd->no_ops * (2 * sizeof(u32));
  805. }
  806. static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
  807. struct qlcnic_dump_entry *entry, __le32 *buffer)
  808. {
  809. struct __mux2 *mux2 = &entry->region.mux2;
  810. u32 data;
  811. u32 t_sel_val, sel_val1, sel_val2;
  812. int i;
  813. sel_val1 = mux2->sel_val1;
  814. sel_val2 = mux2->sel_val2;
  815. for (i = 0; i < mux2->no_ops; i++) {
  816. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
  817. t_sel_val = sel_val1 & mux2->sel_val_mask;
  818. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  819. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  820. *buffer++ = cpu_to_le32(t_sel_val);
  821. *buffer++ = cpu_to_le32(data);
  822. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
  823. t_sel_val = sel_val2 & mux2->sel_val_mask;
  824. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  825. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  826. *buffer++ = cpu_to_le32(t_sel_val);
  827. *buffer++ = cpu_to_le32(data);
  828. sel_val1 += mux2->sel_val_stride;
  829. sel_val2 += mux2->sel_val_stride;
  830. }
  831. return mux2->no_ops * (4 * sizeof(u32));
  832. }
  833. static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
  834. struct qlcnic_dump_entry *entry, __le32 *buffer)
  835. {
  836. u32 fl_addr, size;
  837. struct __mem *rom = &entry->region.mem;
  838. fl_addr = rom->addr;
  839. size = rom->size / 4;
  840. if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
  841. (u8 *)buffer, size))
  842. return rom->size;
  843. return 0;
  844. }
  845. static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
  846. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  847. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  848. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  849. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  850. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
  851. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  852. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  853. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  854. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  855. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  856. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  857. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  858. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  859. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  860. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  861. {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
  862. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  863. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  864. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  865. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  866. };
  867. static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
  868. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  869. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  870. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  871. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  872. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
  873. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  874. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  875. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  876. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  877. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  878. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  879. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  880. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  881. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  882. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  883. {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
  884. {QLCNIC_READ_MUX2, qlcnic_read_mux2},
  885. {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
  886. {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
  887. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  888. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  889. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  890. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  891. };
  892. static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
  893. {
  894. uint64_t sum = 0;
  895. int count = temp_size / sizeof(uint32_t);
  896. while (count-- > 0)
  897. sum += *temp_buffer++;
  898. while (sum >> 32)
  899. sum = (sum & 0xFFFFFFFF) + (sum >> 32);
  900. return ~sum;
  901. }
  902. static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
  903. u8 *buffer, u32 size)
  904. {
  905. int ret = 0;
  906. if (qlcnic_82xx_check(adapter))
  907. return -EIO;
  908. if (qlcnic_83xx_lock_flash(adapter))
  909. return -EIO;
  910. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  911. QLC_83XX_MINIDUMP_FLASH,
  912. buffer, size / sizeof(u32));
  913. qlcnic_83xx_unlock_flash(adapter);
  914. return ret;
  915. }
  916. static int
  917. qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  918. struct qlcnic_cmd_args *cmd)
  919. {
  920. struct qlcnic_83xx_dump_template_hdr tmp_hdr;
  921. u32 size = sizeof(tmp_hdr) / sizeof(u32);
  922. int ret = 0;
  923. if (qlcnic_82xx_check(adapter))
  924. return -EIO;
  925. if (qlcnic_83xx_lock_flash(adapter))
  926. return -EIO;
  927. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  928. QLC_83XX_MINIDUMP_FLASH,
  929. (u8 *)&tmp_hdr, size);
  930. qlcnic_83xx_unlock_flash(adapter);
  931. cmd->rsp.arg[2] = tmp_hdr.size;
  932. cmd->rsp.arg[3] = tmp_hdr.version;
  933. return ret;
  934. }
  935. static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  936. u32 *version, u32 *temp_size,
  937. u8 *use_flash_temp)
  938. {
  939. int err = 0;
  940. struct qlcnic_cmd_args cmd;
  941. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
  942. return -ENOMEM;
  943. err = qlcnic_issue_cmd(adapter, &cmd);
  944. if (err != QLCNIC_RCODE_SUCCESS) {
  945. if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
  946. qlcnic_free_mbx_args(&cmd);
  947. return -EIO;
  948. }
  949. *use_flash_temp = 1;
  950. }
  951. *temp_size = cmd.rsp.arg[2];
  952. *version = cmd.rsp.arg[3];
  953. qlcnic_free_mbx_args(&cmd);
  954. if (!(*temp_size))
  955. return -EIO;
  956. return 0;
  957. }
  958. static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
  959. u32 *buffer, u32 temp_size)
  960. {
  961. int err = 0, i;
  962. void *tmp_addr;
  963. __le32 *tmp_buf;
  964. struct qlcnic_cmd_args cmd;
  965. dma_addr_t tmp_addr_t = 0;
  966. tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
  967. &tmp_addr_t, GFP_KERNEL);
  968. if (!tmp_addr)
  969. return -ENOMEM;
  970. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
  971. err = -ENOMEM;
  972. goto free_mem;
  973. }
  974. cmd.req.arg[1] = LSD(tmp_addr_t);
  975. cmd.req.arg[2] = MSD(tmp_addr_t);
  976. cmd.req.arg[3] = temp_size;
  977. err = qlcnic_issue_cmd(adapter, &cmd);
  978. tmp_buf = tmp_addr;
  979. if (err == QLCNIC_RCODE_SUCCESS) {
  980. for (i = 0; i < temp_size / sizeof(u32); i++)
  981. *buffer++ = __le32_to_cpu(*tmp_buf++);
  982. }
  983. qlcnic_free_mbx_args(&cmd);
  984. free_mem:
  985. dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
  986. return err;
  987. }
  988. int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
  989. {
  990. struct qlcnic_hardware_context *ahw;
  991. struct qlcnic_fw_dump *fw_dump;
  992. u32 version, csum, *tmp_buf;
  993. u8 use_flash_temp = 0;
  994. u32 temp_size = 0;
  995. void *temp_buffer;
  996. int err;
  997. ahw = adapter->ahw;
  998. fw_dump = &ahw->fw_dump;
  999. err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
  1000. &use_flash_temp);
  1001. if (err) {
  1002. dev_err(&adapter->pdev->dev,
  1003. "Can't get template size %d\n", err);
  1004. return -EIO;
  1005. }
  1006. fw_dump->tmpl_hdr = vzalloc(temp_size);
  1007. if (!fw_dump->tmpl_hdr)
  1008. return -ENOMEM;
  1009. tmp_buf = (u32 *)fw_dump->tmpl_hdr;
  1010. if (use_flash_temp)
  1011. goto flash_temp;
  1012. err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
  1013. if (err) {
  1014. flash_temp:
  1015. err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
  1016. temp_size);
  1017. if (err) {
  1018. dev_err(&adapter->pdev->dev,
  1019. "Failed to get minidump template header %d\n",
  1020. err);
  1021. vfree(fw_dump->tmpl_hdr);
  1022. fw_dump->tmpl_hdr = NULL;
  1023. return -EIO;
  1024. }
  1025. }
  1026. csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
  1027. if (csum) {
  1028. dev_err(&adapter->pdev->dev,
  1029. "Template header checksum validation failed\n");
  1030. vfree(fw_dump->tmpl_hdr);
  1031. fw_dump->tmpl_hdr = NULL;
  1032. return -EIO;
  1033. }
  1034. qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
  1035. if (fw_dump->use_pex_dma) {
  1036. fw_dump->dma_buffer = NULL;
  1037. temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
  1038. QLC_PEX_DMA_READ_SIZE,
  1039. &fw_dump->phys_addr,
  1040. GFP_KERNEL);
  1041. if (!temp_buffer)
  1042. fw_dump->use_pex_dma = false;
  1043. else
  1044. fw_dump->dma_buffer = temp_buffer;
  1045. }
  1046. dev_info(&adapter->pdev->dev,
  1047. "Default minidump capture mask 0x%x\n",
  1048. fw_dump->cap_mask);
  1049. qlcnic_enable_fw_dump_state(adapter);
  1050. return 0;
  1051. }
  1052. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  1053. {
  1054. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  1055. static const struct qlcnic_dump_operations *fw_dump_ops;
  1056. struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
  1057. u32 entry_offset, dump, no_entries, buf_offset = 0;
  1058. int i, k, ops_cnt, ops_index, dump_size = 0;
  1059. struct device *dev = &adapter->pdev->dev;
  1060. struct qlcnic_hardware_context *ahw;
  1061. struct qlcnic_dump_entry *entry;
  1062. void *tmpl_hdr;
  1063. u32 ocm_window;
  1064. __le32 *buffer;
  1065. char mesg[64];
  1066. char *msg[] = {mesg, NULL};
  1067. ahw = adapter->ahw;
  1068. tmpl_hdr = fw_dump->tmpl_hdr;
  1069. /* Return if we don't have firmware dump template header */
  1070. if (!tmpl_hdr)
  1071. return -EIO;
  1072. if (!qlcnic_check_fw_dump_state(adapter)) {
  1073. dev_info(&adapter->pdev->dev, "Dump not enabled\n");
  1074. return -EIO;
  1075. }
  1076. if (fw_dump->clr) {
  1077. dev_info(&adapter->pdev->dev,
  1078. "Previous dump not cleared, not capturing dump\n");
  1079. return -EIO;
  1080. }
  1081. netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
  1082. /* Calculate the size for dump data area only */
  1083. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  1084. if (i & fw_dump->cap_mask)
  1085. dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
  1086. if (!dump_size)
  1087. return -EIO;
  1088. fw_dump->data = vzalloc(dump_size);
  1089. if (!fw_dump->data)
  1090. return -ENOMEM;
  1091. buffer = fw_dump->data;
  1092. fw_dump->size = dump_size;
  1093. no_entries = fw_dump->num_entries;
  1094. entry_offset = fw_dump->offset;
  1095. qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
  1096. qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
  1097. if (qlcnic_82xx_check(adapter)) {
  1098. ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
  1099. fw_dump_ops = qlcnic_fw_dump_ops;
  1100. } else {
  1101. hdr_83xx = tmpl_hdr;
  1102. ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
  1103. fw_dump_ops = qlcnic_83xx_fw_dump_ops;
  1104. ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
  1105. hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
  1106. hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
  1107. }
  1108. for (i = 0; i < no_entries; i++) {
  1109. entry = tmpl_hdr + entry_offset;
  1110. if (!(entry->hdr.mask & fw_dump->cap_mask)) {
  1111. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1112. entry_offset += entry->hdr.offset;
  1113. continue;
  1114. }
  1115. /* Find the handler for this entry */
  1116. ops_index = 0;
  1117. while (ops_index < ops_cnt) {
  1118. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  1119. break;
  1120. ops_index++;
  1121. }
  1122. if (ops_index == ops_cnt) {
  1123. dev_info(dev, "Skipping unknown entry opcode %d\n",
  1124. entry->hdr.type);
  1125. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1126. entry_offset += entry->hdr.offset;
  1127. continue;
  1128. }
  1129. /* Collect dump for this entry */
  1130. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  1131. if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
  1132. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1133. entry_offset += entry->hdr.offset;
  1134. continue;
  1135. }
  1136. buf_offset += entry->hdr.cap_size;
  1137. entry_offset += entry->hdr.offset;
  1138. buffer = fw_dump->data + buf_offset;
  1139. }
  1140. fw_dump->clr = 1;
  1141. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
  1142. netdev_info(adapter->netdev,
  1143. "Dump data %d bytes captured, template header size %d bytes\n",
  1144. fw_dump->size, fw_dump->tmpl_hdr_size);
  1145. /* Send a udev event to notify availability of FW dump */
  1146. kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
  1147. return 0;
  1148. }
  1149. void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
  1150. {
  1151. u32 prev_version, current_version;
  1152. struct qlcnic_hardware_context *ahw = adapter->ahw;
  1153. struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
  1154. struct pci_dev *pdev = adapter->pdev;
  1155. prev_version = adapter->fw_version;
  1156. current_version = qlcnic_83xx_get_fw_version(adapter);
  1157. if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
  1158. if (fw_dump->tmpl_hdr)
  1159. vfree(fw_dump->tmpl_hdr);
  1160. if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
  1161. dev_info(&pdev->dev, "Supports FW dump capability\n");
  1162. }
  1163. }