dbg.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  9. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of version 2 of the GNU General Public License as
  14. * published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program;
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called COPYING.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <linuxwifi@intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  34. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  35. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
  36. * All rights reserved.
  37. *
  38. * Redistribution and use in source and binary forms, with or without
  39. * modification, are permitted provided that the following conditions
  40. * are met:
  41. *
  42. * * Redistributions of source code must retain the above copyright
  43. * notice, this list of conditions and the following disclaimer.
  44. * * Redistributions in binary form must reproduce the above copyright
  45. * notice, this list of conditions and the following disclaimer in
  46. * the documentation and/or other materials provided with the
  47. * distribution.
  48. * * Neither the name Intel Corporation nor the names of its
  49. * contributors may be used to endorse or promote products derived
  50. * from this software without specific prior written permission.
  51. *
  52. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  53. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  54. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  55. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  56. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  57. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  58. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  59. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  60. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  61. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  62. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  63. *
  64. *****************************************************************************/
  65. #include <linux/devcoredump.h>
  66. #include "iwl-drv.h"
  67. #include "runtime.h"
  68. #include "dbg.h"
  69. #include "iwl-io.h"
  70. #include "iwl-prph.h"
  71. #include "iwl-csr.h"
  72. /**
  73. * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
  74. *
  75. * @fwrt_ptr: pointer to the buffer coming from fwrt
  76. * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
  77. * transport's data.
  78. * @trans_len: length of the valid data in trans_ptr
  79. * @fwrt_len: length of the valid data in fwrt_ptr
  80. */
  81. struct iwl_fw_dump_ptrs {
  82. struct iwl_trans_dump_data *trans_ptr;
  83. void *fwrt_ptr;
  84. u32 fwrt_len;
  85. };
  86. #define RADIO_REG_MAX_READ 0x2ad
  87. static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
  88. struct iwl_fw_error_dump_data **dump_data)
  89. {
  90. u8 *pos = (void *)(*dump_data)->data;
  91. unsigned long flags;
  92. int i;
  93. if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
  94. return;
  95. (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
  96. (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
  97. for (i = 0; i < RADIO_REG_MAX_READ; i++) {
  98. u32 rd_cmd = RADIO_RSP_RD_CMD;
  99. rd_cmd |= i << RADIO_RSP_ADDR_POS;
  100. iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
  101. *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
  102. pos++;
  103. }
  104. *dump_data = iwl_fw_error_next_data(*dump_data);
  105. iwl_trans_release_nic_access(fwrt->trans, &flags);
  106. }
  107. static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
  108. struct iwl_fw_error_dump_data **dump_data,
  109. int size, u32 offset, int fifo_num)
  110. {
  111. struct iwl_fw_error_dump_fifo *fifo_hdr;
  112. u32 *fifo_data;
  113. u32 fifo_len;
  114. int i;
  115. fifo_hdr = (void *)(*dump_data)->data;
  116. fifo_data = (void *)fifo_hdr->data;
  117. fifo_len = size;
  118. /* No need to try to read the data if the length is 0 */
  119. if (fifo_len == 0)
  120. return;
  121. /* Add a TLV for the RXF */
  122. (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
  123. (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
  124. fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
  125. fifo_hdr->available_bytes =
  126. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  127. RXF_RD_D_SPACE + offset));
  128. fifo_hdr->wr_ptr =
  129. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  130. RXF_RD_WR_PTR + offset));
  131. fifo_hdr->rd_ptr =
  132. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  133. RXF_RD_RD_PTR + offset));
  134. fifo_hdr->fence_ptr =
  135. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  136. RXF_RD_FENCE_PTR + offset));
  137. fifo_hdr->fence_mode =
  138. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  139. RXF_SET_FENCE_MODE + offset));
  140. /* Lock fence */
  141. iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
  142. /* Set fence pointer to the same place like WR pointer */
  143. iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
  144. /* Set fence offset */
  145. iwl_trans_write_prph(fwrt->trans,
  146. RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
  147. /* Read FIFO */
  148. fifo_len /= sizeof(u32); /* Size in DWORDS */
  149. for (i = 0; i < fifo_len; i++)
  150. fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
  151. RXF_FIFO_RD_FENCE_INC +
  152. offset);
  153. *dump_data = iwl_fw_error_next_data(*dump_data);
  154. }
  155. static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
  156. struct iwl_fw_error_dump_data **dump_data,
  157. int size, u32 offset, int fifo_num)
  158. {
  159. struct iwl_fw_error_dump_fifo *fifo_hdr;
  160. u32 *fifo_data;
  161. u32 fifo_len;
  162. int i;
  163. fifo_hdr = (void *)(*dump_data)->data;
  164. fifo_data = (void *)fifo_hdr->data;
  165. fifo_len = size;
  166. /* No need to try to read the data if the length is 0 */
  167. if (fifo_len == 0)
  168. return;
  169. /* Add a TLV for the FIFO */
  170. (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
  171. (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
  172. fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
  173. fifo_hdr->available_bytes =
  174. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  175. TXF_FIFO_ITEM_CNT + offset));
  176. fifo_hdr->wr_ptr =
  177. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  178. TXF_WR_PTR + offset));
  179. fifo_hdr->rd_ptr =
  180. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  181. TXF_RD_PTR + offset));
  182. fifo_hdr->fence_ptr =
  183. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  184. TXF_FENCE_PTR + offset));
  185. fifo_hdr->fence_mode =
  186. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  187. TXF_LOCK_FENCE + offset));
  188. /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
  189. iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
  190. TXF_WR_PTR + offset);
  191. /* Dummy-read to advance the read pointer to the head */
  192. iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
  193. /* Read FIFO */
  194. fifo_len /= sizeof(u32); /* Size in DWORDS */
  195. for (i = 0; i < fifo_len; i++)
  196. fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
  197. TXF_READ_MODIFY_DATA +
  198. offset);
  199. *dump_data = iwl_fw_error_next_data(*dump_data);
  200. }
  201. static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
  202. struct iwl_fw_error_dump_data **dump_data)
  203. {
  204. struct iwl_fw_error_dump_fifo *fifo_hdr;
  205. struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
  206. u32 *fifo_data;
  207. u32 fifo_len;
  208. unsigned long flags;
  209. int i, j;
  210. if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
  211. return;
  212. /* Pull RXF1 */
  213. iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0);
  214. /* Pull RXF2 */
  215. iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
  216. RXF_DIFF_FROM_PREV, 1);
  217. /* Pull LMAC2 RXF1 */
  218. if (fwrt->smem_cfg.num_lmacs > 1)
  219. iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size,
  220. LMAC2_PRPH_OFFSET, 2);
  221. /* Pull TXF data from LMAC1 */
  222. for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
  223. /* Mark the number of TXF we're pulling now */
  224. iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
  225. iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
  226. 0, i);
  227. }
  228. /* Pull TXF data from LMAC2 */
  229. if (fwrt->smem_cfg.num_lmacs > 1) {
  230. for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
  231. /* Mark the number of TXF we're pulling now */
  232. iwl_trans_write_prph(fwrt->trans,
  233. TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
  234. i);
  235. iwl_fwrt_dump_txf(fwrt, dump_data,
  236. cfg->lmac[1].txfifo_size[i],
  237. LMAC2_PRPH_OFFSET,
  238. i + cfg->num_txfifo_entries);
  239. }
  240. }
  241. if (fw_has_capa(&fwrt->fw->ucode_capa,
  242. IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
  243. /* Pull UMAC internal TXF data from all TXFs */
  244. for (i = 0;
  245. i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
  246. i++) {
  247. fifo_hdr = (void *)(*dump_data)->data;
  248. fifo_data = (void *)fifo_hdr->data;
  249. fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
  250. /* No need to try to read the data if the length is 0 */
  251. if (fifo_len == 0)
  252. continue;
  253. /* Add a TLV for the internal FIFOs */
  254. (*dump_data)->type =
  255. cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
  256. (*dump_data)->len =
  257. cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
  258. fifo_hdr->fifo_num = cpu_to_le32(i);
  259. /* Mark the number of TXF we're pulling now */
  260. iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
  261. fwrt->smem_cfg.num_txfifo_entries);
  262. fifo_hdr->available_bytes =
  263. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  264. TXF_CPU2_FIFO_ITEM_CNT));
  265. fifo_hdr->wr_ptr =
  266. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  267. TXF_CPU2_WR_PTR));
  268. fifo_hdr->rd_ptr =
  269. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  270. TXF_CPU2_RD_PTR));
  271. fifo_hdr->fence_ptr =
  272. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  273. TXF_CPU2_FENCE_PTR));
  274. fifo_hdr->fence_mode =
  275. cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
  276. TXF_CPU2_LOCK_FENCE));
  277. /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
  278. iwl_trans_write_prph(fwrt->trans,
  279. TXF_CPU2_READ_MODIFY_ADDR,
  280. TXF_CPU2_WR_PTR);
  281. /* Dummy-read to advance the read pointer to head */
  282. iwl_trans_read_prph(fwrt->trans,
  283. TXF_CPU2_READ_MODIFY_DATA);
  284. /* Read FIFO */
  285. fifo_len /= sizeof(u32); /* Size in DWORDS */
  286. for (j = 0; j < fifo_len; j++)
  287. fifo_data[j] =
  288. iwl_trans_read_prph(fwrt->trans,
  289. TXF_CPU2_READ_MODIFY_DATA);
  290. *dump_data = iwl_fw_error_next_data(*dump_data);
  291. }
  292. }
  293. iwl_trans_release_nic_access(fwrt->trans, &flags);
  294. }
  295. #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
  296. #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
  297. struct iwl_prph_range {
  298. u32 start, end;
  299. };
  300. static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
  301. { .start = 0x00a00000, .end = 0x00a00000 },
  302. { .start = 0x00a0000c, .end = 0x00a00024 },
  303. { .start = 0x00a0002c, .end = 0x00a0003c },
  304. { .start = 0x00a00410, .end = 0x00a00418 },
  305. { .start = 0x00a00420, .end = 0x00a00420 },
  306. { .start = 0x00a00428, .end = 0x00a00428 },
  307. { .start = 0x00a00430, .end = 0x00a0043c },
  308. { .start = 0x00a00444, .end = 0x00a00444 },
  309. { .start = 0x00a004c0, .end = 0x00a004cc },
  310. { .start = 0x00a004d8, .end = 0x00a004d8 },
  311. { .start = 0x00a004e0, .end = 0x00a004f0 },
  312. { .start = 0x00a00840, .end = 0x00a00840 },
  313. { .start = 0x00a00850, .end = 0x00a00858 },
  314. { .start = 0x00a01004, .end = 0x00a01008 },
  315. { .start = 0x00a01010, .end = 0x00a01010 },
  316. { .start = 0x00a01018, .end = 0x00a01018 },
  317. { .start = 0x00a01024, .end = 0x00a01024 },
  318. { .start = 0x00a0102c, .end = 0x00a01034 },
  319. { .start = 0x00a0103c, .end = 0x00a01040 },
  320. { .start = 0x00a01048, .end = 0x00a01094 },
  321. { .start = 0x00a01c00, .end = 0x00a01c20 },
  322. { .start = 0x00a01c58, .end = 0x00a01c58 },
  323. { .start = 0x00a01c7c, .end = 0x00a01c7c },
  324. { .start = 0x00a01c28, .end = 0x00a01c54 },
  325. { .start = 0x00a01c5c, .end = 0x00a01c5c },
  326. { .start = 0x00a01c60, .end = 0x00a01cdc },
  327. { .start = 0x00a01ce0, .end = 0x00a01d0c },
  328. { .start = 0x00a01d18, .end = 0x00a01d20 },
  329. { .start = 0x00a01d2c, .end = 0x00a01d30 },
  330. { .start = 0x00a01d40, .end = 0x00a01d5c },
  331. { .start = 0x00a01d80, .end = 0x00a01d80 },
  332. { .start = 0x00a01d98, .end = 0x00a01d9c },
  333. { .start = 0x00a01da8, .end = 0x00a01da8 },
  334. { .start = 0x00a01db8, .end = 0x00a01df4 },
  335. { .start = 0x00a01dc0, .end = 0x00a01dfc },
  336. { .start = 0x00a01e00, .end = 0x00a01e2c },
  337. { .start = 0x00a01e40, .end = 0x00a01e60 },
  338. { .start = 0x00a01e68, .end = 0x00a01e6c },
  339. { .start = 0x00a01e74, .end = 0x00a01e74 },
  340. { .start = 0x00a01e84, .end = 0x00a01e90 },
  341. { .start = 0x00a01e9c, .end = 0x00a01ec4 },
  342. { .start = 0x00a01ed0, .end = 0x00a01ee0 },
  343. { .start = 0x00a01f00, .end = 0x00a01f1c },
  344. { .start = 0x00a01f44, .end = 0x00a01ffc },
  345. { .start = 0x00a02000, .end = 0x00a02048 },
  346. { .start = 0x00a02068, .end = 0x00a020f0 },
  347. { .start = 0x00a02100, .end = 0x00a02118 },
  348. { .start = 0x00a02140, .end = 0x00a0214c },
  349. { .start = 0x00a02168, .end = 0x00a0218c },
  350. { .start = 0x00a021c0, .end = 0x00a021c0 },
  351. { .start = 0x00a02400, .end = 0x00a02410 },
  352. { .start = 0x00a02418, .end = 0x00a02420 },
  353. { .start = 0x00a02428, .end = 0x00a0242c },
  354. { .start = 0x00a02434, .end = 0x00a02434 },
  355. { .start = 0x00a02440, .end = 0x00a02460 },
  356. { .start = 0x00a02468, .end = 0x00a024b0 },
  357. { .start = 0x00a024c8, .end = 0x00a024cc },
  358. { .start = 0x00a02500, .end = 0x00a02504 },
  359. { .start = 0x00a0250c, .end = 0x00a02510 },
  360. { .start = 0x00a02540, .end = 0x00a02554 },
  361. { .start = 0x00a02580, .end = 0x00a025f4 },
  362. { .start = 0x00a02600, .end = 0x00a0260c },
  363. { .start = 0x00a02648, .end = 0x00a02650 },
  364. { .start = 0x00a02680, .end = 0x00a02680 },
  365. { .start = 0x00a026c0, .end = 0x00a026d0 },
  366. { .start = 0x00a02700, .end = 0x00a0270c },
  367. { .start = 0x00a02804, .end = 0x00a02804 },
  368. { .start = 0x00a02818, .end = 0x00a0281c },
  369. { .start = 0x00a02c00, .end = 0x00a02db4 },
  370. { .start = 0x00a02df4, .end = 0x00a02fb0 },
  371. { .start = 0x00a03000, .end = 0x00a03014 },
  372. { .start = 0x00a0301c, .end = 0x00a0302c },
  373. { .start = 0x00a03034, .end = 0x00a03038 },
  374. { .start = 0x00a03040, .end = 0x00a03048 },
  375. { .start = 0x00a03060, .end = 0x00a03068 },
  376. { .start = 0x00a03070, .end = 0x00a03074 },
  377. { .start = 0x00a0307c, .end = 0x00a0307c },
  378. { .start = 0x00a03080, .end = 0x00a03084 },
  379. { .start = 0x00a0308c, .end = 0x00a03090 },
  380. { .start = 0x00a03098, .end = 0x00a03098 },
  381. { .start = 0x00a030a0, .end = 0x00a030a0 },
  382. { .start = 0x00a030a8, .end = 0x00a030b4 },
  383. { .start = 0x00a030bc, .end = 0x00a030bc },
  384. { .start = 0x00a030c0, .end = 0x00a0312c },
  385. { .start = 0x00a03c00, .end = 0x00a03c5c },
  386. { .start = 0x00a04400, .end = 0x00a04454 },
  387. { .start = 0x00a04460, .end = 0x00a04474 },
  388. { .start = 0x00a044c0, .end = 0x00a044ec },
  389. { .start = 0x00a04500, .end = 0x00a04504 },
  390. { .start = 0x00a04510, .end = 0x00a04538 },
  391. { .start = 0x00a04540, .end = 0x00a04548 },
  392. { .start = 0x00a04560, .end = 0x00a0457c },
  393. { .start = 0x00a04590, .end = 0x00a04598 },
  394. { .start = 0x00a045c0, .end = 0x00a045f4 },
  395. };
  396. static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
  397. { .start = 0x00a05c00, .end = 0x00a05c18 },
  398. { .start = 0x00a05400, .end = 0x00a056e8 },
  399. { .start = 0x00a08000, .end = 0x00a098bc },
  400. { .start = 0x00a02400, .end = 0x00a02758 },
  401. };
  402. static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
  403. u32 len_bytes, __le32 *data)
  404. {
  405. u32 i;
  406. for (i = 0; i < len_bytes; i += 4)
  407. *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
  408. }
  409. static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
  410. u32 len_bytes, __le32 *data)
  411. {
  412. unsigned long flags;
  413. bool success = false;
  414. if (iwl_trans_grab_nic_access(trans, &flags)) {
  415. success = true;
  416. _iwl_read_prph_block(trans, start, len_bytes, data);
  417. iwl_trans_release_nic_access(trans, &flags);
  418. }
  419. return success;
  420. }
  421. static void iwl_dump_prph(struct iwl_trans *trans,
  422. struct iwl_fw_error_dump_data **data,
  423. const struct iwl_prph_range *iwl_prph_dump_addr,
  424. u32 range_len)
  425. {
  426. struct iwl_fw_error_dump_prph *prph;
  427. unsigned long flags;
  428. u32 i;
  429. if (!iwl_trans_grab_nic_access(trans, &flags))
  430. return;
  431. for (i = 0; i < range_len; i++) {
  432. /* The range includes both boundaries */
  433. int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
  434. iwl_prph_dump_addr[i].start + 4;
  435. (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
  436. (*data)->len = cpu_to_le32(sizeof(*prph) +
  437. num_bytes_in_chunk);
  438. prph = (void *)(*data)->data;
  439. prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
  440. _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
  441. /* our range is inclusive, hence + 4 */
  442. iwl_prph_dump_addr[i].end -
  443. iwl_prph_dump_addr[i].start + 4,
  444. (void *)prph->data);
  445. *data = iwl_fw_error_next_data(*data);
  446. }
  447. iwl_trans_release_nic_access(trans, &flags);
  448. }
  449. /*
  450. * alloc_sgtable - allocates scallerlist table in the given size,
  451. * fills it with pages and returns it
  452. * @size: the size (in bytes) of the table
  453. */
  454. static struct scatterlist *alloc_sgtable(int size)
  455. {
  456. int alloc_size, nents, i;
  457. struct page *new_page;
  458. struct scatterlist *iter;
  459. struct scatterlist *table;
  460. nents = DIV_ROUND_UP(size, PAGE_SIZE);
  461. table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
  462. if (!table)
  463. return NULL;
  464. sg_init_table(table, nents);
  465. iter = table;
  466. for_each_sg(table, iter, sg_nents(table), i) {
  467. new_page = alloc_page(GFP_KERNEL);
  468. if (!new_page) {
  469. /* release all previous allocated pages in the table */
  470. iter = table;
  471. for_each_sg(table, iter, sg_nents(table), i) {
  472. new_page = sg_page(iter);
  473. if (new_page)
  474. __free_page(new_page);
  475. }
  476. return NULL;
  477. }
  478. alloc_size = min_t(int, size, PAGE_SIZE);
  479. size -= PAGE_SIZE;
  480. sg_set_page(iter, new_page, alloc_size, 0);
  481. }
  482. return table;
  483. }
  484. void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
  485. {
  486. struct iwl_fw_error_dump_file *dump_file;
  487. struct iwl_fw_error_dump_data *dump_data;
  488. struct iwl_fw_error_dump_info *dump_info;
  489. struct iwl_fw_error_dump_mem *dump_mem;
  490. struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
  491. struct iwl_fw_error_dump_trigger_desc *dump_trig;
  492. struct iwl_fw_dump_ptrs *fw_error_dump;
  493. struct scatterlist *sg_dump_data;
  494. u32 sram_len, sram_ofs;
  495. const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
  496. struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
  497. u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
  498. u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
  499. u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
  500. 0 : fwrt->trans->cfg->dccm2_len;
  501. bool monitor_dump_only = false;
  502. int i;
  503. /* there's no point in fw dump if the bus is dead */
  504. if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
  505. IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
  506. goto out;
  507. }
  508. if (fwrt->dump.trig &&
  509. fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
  510. monitor_dump_only = true;
  511. fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
  512. if (!fw_error_dump)
  513. goto out;
  514. /* SRAM - include stack CCM if driver knows the values for it */
  515. if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
  516. const struct fw_img *img;
  517. img = &fwrt->fw->img[fwrt->cur_fw_img];
  518. sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
  519. sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
  520. } else {
  521. sram_ofs = fwrt->trans->cfg->dccm_offset;
  522. sram_len = fwrt->trans->cfg->dccm_len;
  523. }
  524. /* reading RXF/TXF sizes */
  525. if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
  526. fifo_data_len = 0;
  527. /* Count RXF2 size */
  528. if (mem_cfg->rxfifo2_size) {
  529. /* Add header info */
  530. fifo_data_len += mem_cfg->rxfifo2_size +
  531. sizeof(*dump_data) +
  532. sizeof(struct iwl_fw_error_dump_fifo);
  533. }
  534. /* Count RXF1 sizes */
  535. for (i = 0; i < mem_cfg->num_lmacs; i++) {
  536. if (!mem_cfg->lmac[i].rxfifo1_size)
  537. continue;
  538. /* Add header info */
  539. fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
  540. sizeof(*dump_data) +
  541. sizeof(struct iwl_fw_error_dump_fifo);
  542. }
  543. /* Count TXF sizes */
  544. for (i = 0; i < mem_cfg->num_lmacs; i++) {
  545. int j;
  546. for (j = 0; j < mem_cfg->num_txfifo_entries; j++) {
  547. if (!mem_cfg->lmac[i].txfifo_size[j])
  548. continue;
  549. /* Add header info */
  550. fifo_data_len +=
  551. mem_cfg->lmac[i].txfifo_size[j] +
  552. sizeof(*dump_data) +
  553. sizeof(struct iwl_fw_error_dump_fifo);
  554. }
  555. }
  556. if (fw_has_capa(&fwrt->fw->ucode_capa,
  557. IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
  558. for (i = 0;
  559. i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
  560. i++) {
  561. if (!mem_cfg->internal_txfifo_size[i])
  562. continue;
  563. /* Add header info */
  564. fifo_data_len +=
  565. mem_cfg->internal_txfifo_size[i] +
  566. sizeof(*dump_data) +
  567. sizeof(struct iwl_fw_error_dump_fifo);
  568. }
  569. }
  570. /* Make room for PRPH registers */
  571. if (!fwrt->trans->cfg->gen2) {
  572. for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
  573. i++) {
  574. /* The range includes both boundaries */
  575. int num_bytes_in_chunk =
  576. iwl_prph_dump_addr_comm[i].end -
  577. iwl_prph_dump_addr_comm[i].start + 4;
  578. prph_len += sizeof(*dump_data) +
  579. sizeof(struct iwl_fw_error_dump_prph) +
  580. num_bytes_in_chunk;
  581. }
  582. }
  583. if (!fwrt->trans->cfg->gen2 &&
  584. fwrt->trans->cfg->mq_rx_supported) {
  585. for (i = 0; i <
  586. ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
  587. /* The range includes both boundaries */
  588. int num_bytes_in_chunk =
  589. iwl_prph_dump_addr_9000[i].end -
  590. iwl_prph_dump_addr_9000[i].start + 4;
  591. prph_len += sizeof(*dump_data) +
  592. sizeof(struct iwl_fw_error_dump_prph) +
  593. num_bytes_in_chunk;
  594. }
  595. }
  596. if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
  597. radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
  598. }
  599. file_len = sizeof(*dump_file) +
  600. sizeof(*dump_data) * 3 +
  601. sizeof(*dump_smem_cfg) +
  602. fifo_data_len +
  603. prph_len +
  604. radio_len +
  605. sizeof(*dump_info);
  606. /* Make room for the SMEM, if it exists */
  607. if (smem_len)
  608. file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
  609. /* Make room for the secondary SRAM, if it exists */
  610. if (sram2_len)
  611. file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
  612. /* Make room for MEM segments */
  613. for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
  614. file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
  615. le32_to_cpu(fw_dbg_mem[i].len);
  616. }
  617. /* Make room for fw's virtual image pages, if it exists */
  618. if (!fwrt->trans->cfg->gen2 &&
  619. fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
  620. fwrt->fw_paging_db[0].fw_paging_block)
  621. file_len += fwrt->num_of_paging_blk *
  622. (sizeof(*dump_data) +
  623. sizeof(struct iwl_fw_error_dump_paging) +
  624. PAGING_BLOCK_SIZE);
  625. /* If we only want a monitor dump, reset the file length */
  626. if (monitor_dump_only) {
  627. file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
  628. sizeof(*dump_info) + sizeof(*dump_smem_cfg);
  629. }
  630. if (fwrt->dump.desc)
  631. file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
  632. fwrt->dump.desc->len;
  633. if (!fwrt->fw->n_dbg_mem_tlv)
  634. file_len += sram_len + sizeof(*dump_mem);
  635. dump_file = vzalloc(file_len);
  636. if (!dump_file) {
  637. kfree(fw_error_dump);
  638. goto out;
  639. }
  640. fw_error_dump->fwrt_ptr = dump_file;
  641. dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
  642. dump_data = (void *)dump_file->data;
  643. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
  644. dump_data->len = cpu_to_le32(sizeof(*dump_info));
  645. dump_info = (void *)dump_data->data;
  646. dump_info->device_family =
  647. fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
  648. cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
  649. cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
  650. dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
  651. memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
  652. sizeof(dump_info->fw_human_readable));
  653. strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
  654. sizeof(dump_info->dev_human_readable));
  655. strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
  656. sizeof(dump_info->bus_human_readable));
  657. dump_data = iwl_fw_error_next_data(dump_data);
  658. /* Dump shared memory configuration */
  659. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
  660. dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
  661. dump_smem_cfg = (void *)dump_data->data;
  662. dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
  663. dump_smem_cfg->num_txfifo_entries =
  664. cpu_to_le32(mem_cfg->num_txfifo_entries);
  665. for (i = 0; i < MAX_NUM_LMAC; i++) {
  666. int j;
  667. for (j = 0; j < TX_FIFO_MAX_NUM; j++)
  668. dump_smem_cfg->lmac[i].txfifo_size[j] =
  669. cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
  670. dump_smem_cfg->lmac[i].rxfifo1_size =
  671. cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
  672. }
  673. dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
  674. dump_smem_cfg->internal_txfifo_addr =
  675. cpu_to_le32(mem_cfg->internal_txfifo_addr);
  676. for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
  677. dump_smem_cfg->internal_txfifo_size[i] =
  678. cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
  679. }
  680. dump_data = iwl_fw_error_next_data(dump_data);
  681. /* We only dump the FIFOs if the FW is in error state */
  682. if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
  683. iwl_fw_dump_fifos(fwrt, &dump_data);
  684. if (radio_len)
  685. iwl_read_radio_regs(fwrt, &dump_data);
  686. }
  687. if (fwrt->dump.desc) {
  688. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
  689. dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
  690. fwrt->dump.desc->len);
  691. dump_trig = (void *)dump_data->data;
  692. memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
  693. sizeof(*dump_trig) + fwrt->dump.desc->len);
  694. dump_data = iwl_fw_error_next_data(dump_data);
  695. }
  696. /* In case we only want monitor dump, skip to dump trasport data */
  697. if (monitor_dump_only)
  698. goto dump_trans_data;
  699. if (!fwrt->fw->n_dbg_mem_tlv) {
  700. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
  701. dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
  702. dump_mem = (void *)dump_data->data;
  703. dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
  704. dump_mem->offset = cpu_to_le32(sram_ofs);
  705. iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
  706. sram_len);
  707. dump_data = iwl_fw_error_next_data(dump_data);
  708. }
  709. for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
  710. u32 len = le32_to_cpu(fw_dbg_mem[i].len);
  711. u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
  712. bool success;
  713. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
  714. dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
  715. dump_mem = (void *)dump_data->data;
  716. dump_mem->type = fw_dbg_mem[i].data_type;
  717. dump_mem->offset = cpu_to_le32(ofs);
  718. switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
  719. case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
  720. iwl_trans_read_mem_bytes(fwrt->trans, ofs,
  721. dump_mem->data,
  722. len);
  723. success = true;
  724. break;
  725. case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
  726. success = iwl_read_prph_block(fwrt->trans, ofs, len,
  727. (void *)dump_mem->data);
  728. break;
  729. default:
  730. /*
  731. * shouldn't get here, we ignored this kind
  732. * of TLV earlier during the TLV parsing?!
  733. */
  734. WARN_ON(1);
  735. success = false;
  736. }
  737. if (success)
  738. dump_data = iwl_fw_error_next_data(dump_data);
  739. }
  740. if (smem_len) {
  741. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
  742. dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
  743. dump_mem = (void *)dump_data->data;
  744. dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
  745. dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
  746. iwl_trans_read_mem_bytes(fwrt->trans,
  747. fwrt->trans->cfg->smem_offset,
  748. dump_mem->data, smem_len);
  749. dump_data = iwl_fw_error_next_data(dump_data);
  750. }
  751. if (sram2_len) {
  752. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
  753. dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
  754. dump_mem = (void *)dump_data->data;
  755. dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
  756. dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
  757. iwl_trans_read_mem_bytes(fwrt->trans,
  758. fwrt->trans->cfg->dccm2_offset,
  759. dump_mem->data, sram2_len);
  760. dump_data = iwl_fw_error_next_data(dump_data);
  761. }
  762. /* Dump fw's virtual image */
  763. if (!fwrt->trans->cfg->gen2 &&
  764. fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
  765. fwrt->fw_paging_db[0].fw_paging_block) {
  766. for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
  767. struct iwl_fw_error_dump_paging *paging;
  768. struct page *pages =
  769. fwrt->fw_paging_db[i].fw_paging_block;
  770. dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
  771. dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
  772. dump_data->len = cpu_to_le32(sizeof(*paging) +
  773. PAGING_BLOCK_SIZE);
  774. paging = (void *)dump_data->data;
  775. paging->index = cpu_to_le32(i);
  776. dma_sync_single_for_cpu(fwrt->trans->dev, addr,
  777. PAGING_BLOCK_SIZE,
  778. DMA_BIDIRECTIONAL);
  779. memcpy(paging->data, page_address(pages),
  780. PAGING_BLOCK_SIZE);
  781. dump_data = iwl_fw_error_next_data(dump_data);
  782. }
  783. }
  784. if (prph_len) {
  785. iwl_dump_prph(fwrt->trans, &dump_data,
  786. iwl_prph_dump_addr_comm,
  787. ARRAY_SIZE(iwl_prph_dump_addr_comm));
  788. if (fwrt->trans->cfg->mq_rx_supported)
  789. iwl_dump_prph(fwrt->trans, &dump_data,
  790. iwl_prph_dump_addr_9000,
  791. ARRAY_SIZE(iwl_prph_dump_addr_9000));
  792. }
  793. dump_trans_data:
  794. fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
  795. fwrt->dump.trig);
  796. fw_error_dump->fwrt_len = file_len;
  797. if (fw_error_dump->trans_ptr)
  798. file_len += fw_error_dump->trans_ptr->len;
  799. dump_file->file_len = cpu_to_le32(file_len);
  800. sg_dump_data = alloc_sgtable(file_len);
  801. if (sg_dump_data) {
  802. sg_pcopy_from_buffer(sg_dump_data,
  803. sg_nents(sg_dump_data),
  804. fw_error_dump->fwrt_ptr,
  805. fw_error_dump->fwrt_len, 0);
  806. if (fw_error_dump->trans_ptr)
  807. sg_pcopy_from_buffer(sg_dump_data,
  808. sg_nents(sg_dump_data),
  809. fw_error_dump->trans_ptr->data,
  810. fw_error_dump->trans_ptr->len,
  811. fw_error_dump->fwrt_len);
  812. dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
  813. GFP_KERNEL);
  814. }
  815. vfree(fw_error_dump->fwrt_ptr);
  816. vfree(fw_error_dump->trans_ptr);
  817. kfree(fw_error_dump);
  818. out:
  819. iwl_fw_free_dump_desc(fwrt);
  820. fwrt->dump.trig = NULL;
  821. clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
  822. }
  823. IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
  824. const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
  825. .trig_desc = {
  826. .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
  827. },
  828. };
  829. IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
  830. int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
  831. const struct iwl_fw_dump_desc *desc,
  832. const struct iwl_fw_dbg_trigger_tlv *trigger)
  833. {
  834. unsigned int delay = 0;
  835. if (trigger)
  836. delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
  837. if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
  838. "Can't collect dbg data when FW isn't alive\n"))
  839. return -EIO;
  840. if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
  841. return -EBUSY;
  842. if (WARN_ON(fwrt->dump.desc))
  843. iwl_fw_free_dump_desc(fwrt);
  844. IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
  845. le32_to_cpu(desc->trig_desc.type));
  846. fwrt->dump.desc = desc;
  847. fwrt->dump.trig = trigger;
  848. schedule_delayed_work(&fwrt->dump.wk, delay);
  849. return 0;
  850. }
  851. IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
  852. int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
  853. enum iwl_fw_dbg_trigger trig,
  854. const char *str, size_t len,
  855. const struct iwl_fw_dbg_trigger_tlv *trigger)
  856. {
  857. struct iwl_fw_dump_desc *desc;
  858. desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
  859. if (!desc)
  860. return -ENOMEM;
  861. desc->len = len;
  862. desc->trig_desc.type = cpu_to_le32(trig);
  863. memcpy(desc->trig_desc.data, str, len);
  864. return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
  865. }
  866. IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
  867. int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
  868. struct iwl_fw_dbg_trigger_tlv *trigger,
  869. const char *fmt, ...)
  870. {
  871. u16 occurrences = le16_to_cpu(trigger->occurrences);
  872. int ret, len = 0;
  873. char buf[64];
  874. if (!occurrences)
  875. return 0;
  876. if (fmt) {
  877. va_list ap;
  878. buf[sizeof(buf) - 1] = '\0';
  879. va_start(ap, fmt);
  880. vsnprintf(buf, sizeof(buf), fmt, ap);
  881. va_end(ap);
  882. /* check for truncation */
  883. if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
  884. buf[sizeof(buf) - 1] = '\0';
  885. len = strlen(buf) + 1;
  886. }
  887. ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
  888. trigger);
  889. if (ret)
  890. return ret;
  891. trigger->occurrences = cpu_to_le16(occurrences - 1);
  892. return 0;
  893. }
  894. IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
  895. int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
  896. {
  897. u8 *ptr;
  898. int ret;
  899. int i;
  900. if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
  901. "Invalid configuration %d\n", conf_id))
  902. return -EINVAL;
  903. /* EARLY START - firmware's configuration is hard coded */
  904. if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
  905. !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
  906. conf_id == FW_DBG_START_FROM_ALIVE)
  907. return 0;
  908. if (!fwrt->fw->dbg_conf_tlv[conf_id])
  909. return -EINVAL;
  910. if (fwrt->dump.conf != FW_DBG_INVALID)
  911. IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
  912. fwrt->dump.conf);
  913. /* Send all HCMDs for configuring the FW debug */
  914. ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
  915. for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
  916. struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
  917. struct iwl_host_cmd hcmd = {
  918. .id = cmd->id,
  919. .len = { le16_to_cpu(cmd->len), },
  920. .data = { cmd->data, },
  921. };
  922. ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
  923. if (ret)
  924. return ret;
  925. ptr += sizeof(*cmd);
  926. ptr += le16_to_cpu(cmd->len);
  927. }
  928. fwrt->dump.conf = conf_id;
  929. return 0;
  930. }
  931. IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
  932. void iwl_fw_error_dump_wk(struct work_struct *work)
  933. {
  934. struct iwl_fw_runtime *fwrt =
  935. container_of(work, struct iwl_fw_runtime, dump.wk.work);
  936. if (fwrt->ops && fwrt->ops->dump_start &&
  937. fwrt->ops->dump_start(fwrt->ops_ctx))
  938. return;
  939. if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
  940. /* stop recording */
  941. iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
  942. iwl_fw_error_dump(fwrt);
  943. /* start recording again if the firmware is not crashed */
  944. if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
  945. fwrt->fw->dbg_dest_tlv) {
  946. iwl_clear_bits_prph(fwrt->trans,
  947. MON_BUFF_SAMPLE_CTL, 0x100);
  948. iwl_clear_bits_prph(fwrt->trans,
  949. MON_BUFF_SAMPLE_CTL, 0x1);
  950. iwl_set_bits_prph(fwrt->trans,
  951. MON_BUFF_SAMPLE_CTL, 0x1);
  952. }
  953. } else {
  954. u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
  955. u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
  956. /* stop recording */
  957. iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
  958. udelay(100);
  959. iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
  960. /* wait before we collect the data till the DBGC stop */
  961. udelay(500);
  962. iwl_fw_error_dump(fwrt);
  963. /* start recording again if the firmware is not crashed */
  964. if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
  965. fwrt->fw->dbg_dest_tlv) {
  966. iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
  967. iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
  968. }
  969. }
  970. if (fwrt->ops && fwrt->ops->dump_end)
  971. fwrt->ops->dump_end(fwrt->ops_ctx);
  972. }