etnaviv_dump.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <linux/devcoredump.h>
  6. #include "etnaviv_cmdbuf.h"
  7. #include "etnaviv_dump.h"
  8. #include "etnaviv_gem.h"
  9. #include "etnaviv_gpu.h"
  10. #include "etnaviv_mmu.h"
  11. #include "etnaviv_sched.h"
  12. #include "state.xml.h"
  13. #include "state_hi.xml.h"
  14. static bool etnaviv_dump_core = true;
  15. module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
  16. struct core_dump_iterator {
  17. void *start;
  18. struct etnaviv_dump_object_header *hdr;
  19. void *data;
  20. };
  21. static const unsigned short etnaviv_dump_registers[] = {
  22. VIVS_HI_AXI_STATUS,
  23. VIVS_HI_CLOCK_CONTROL,
  24. VIVS_HI_IDLE_STATE,
  25. VIVS_HI_AXI_CONFIG,
  26. VIVS_HI_INTR_ENBL,
  27. VIVS_HI_CHIP_IDENTITY,
  28. VIVS_HI_CHIP_FEATURE,
  29. VIVS_HI_CHIP_MODEL,
  30. VIVS_HI_CHIP_REV,
  31. VIVS_HI_CHIP_DATE,
  32. VIVS_HI_CHIP_TIME,
  33. VIVS_HI_CHIP_MINOR_FEATURE_0,
  34. VIVS_HI_CACHE_CONTROL,
  35. VIVS_HI_AXI_CONTROL,
  36. VIVS_PM_POWER_CONTROLS,
  37. VIVS_PM_MODULE_CONTROLS,
  38. VIVS_PM_MODULE_STATUS,
  39. VIVS_PM_PULSE_EATER,
  40. VIVS_MC_MMU_FE_PAGE_TABLE,
  41. VIVS_MC_MMU_TX_PAGE_TABLE,
  42. VIVS_MC_MMU_PE_PAGE_TABLE,
  43. VIVS_MC_MMU_PEZ_PAGE_TABLE,
  44. VIVS_MC_MMU_RA_PAGE_TABLE,
  45. VIVS_MC_DEBUG_MEMORY,
  46. VIVS_MC_MEMORY_BASE_ADDR_RA,
  47. VIVS_MC_MEMORY_BASE_ADDR_FE,
  48. VIVS_MC_MEMORY_BASE_ADDR_TX,
  49. VIVS_MC_MEMORY_BASE_ADDR_PEZ,
  50. VIVS_MC_MEMORY_BASE_ADDR_PE,
  51. VIVS_MC_MEMORY_TIMING_CONTROL,
  52. VIVS_MC_BUS_CONFIG,
  53. VIVS_FE_DMA_STATUS,
  54. VIVS_FE_DMA_DEBUG_STATE,
  55. VIVS_FE_DMA_ADDRESS,
  56. VIVS_FE_DMA_LOW,
  57. VIVS_FE_DMA_HIGH,
  58. VIVS_FE_AUTO_FLUSH,
  59. };
  60. static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
  61. u32 type, void *data_end)
  62. {
  63. struct etnaviv_dump_object_header *hdr = iter->hdr;
  64. hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
  65. hdr->type = cpu_to_le32(type);
  66. hdr->file_offset = cpu_to_le32(iter->data - iter->start);
  67. hdr->file_size = cpu_to_le32(data_end - iter->data);
  68. iter->hdr++;
  69. iter->data += hdr->file_size;
  70. }
  71. static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
  72. struct etnaviv_gpu *gpu)
  73. {
  74. struct etnaviv_dump_registers *reg = iter->data;
  75. unsigned int i;
  76. for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
  77. reg->reg = etnaviv_dump_registers[i];
  78. reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
  79. }
  80. etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
  81. }
  82. static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
  83. struct etnaviv_gpu *gpu, size_t mmu_size)
  84. {
  85. etnaviv_iommu_dump(gpu->mmu, iter->data);
  86. etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
  87. }
  88. static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
  89. void *ptr, size_t size, u64 iova)
  90. {
  91. memcpy(iter->data, ptr, size);
  92. iter->hdr->iova = cpu_to_le64(iova);
  93. etnaviv_core_dump_header(iter, type, iter->data + size);
  94. }
  95. void etnaviv_core_dump(struct etnaviv_gpu *gpu)
  96. {
  97. struct core_dump_iterator iter;
  98. struct etnaviv_vram_mapping *vram;
  99. struct etnaviv_gem_object *obj;
  100. struct etnaviv_gem_submit *submit;
  101. struct drm_sched_job *s_job;
  102. unsigned int n_obj, n_bomap_pages;
  103. size_t file_size, mmu_size;
  104. __le64 *bomap, *bomap_start;
  105. /* Only catch the first event, or when manually re-armed */
  106. if (!etnaviv_dump_core)
  107. return;
  108. etnaviv_dump_core = false;
  109. mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
  110. /* We always dump registers, mmu, ring and end marker */
  111. n_obj = 4;
  112. n_bomap_pages = 0;
  113. file_size = ARRAY_SIZE(etnaviv_dump_registers) *
  114. sizeof(struct etnaviv_dump_registers) +
  115. mmu_size + gpu->buffer.size;
  116. /* Add in the active command buffers */
  117. spin_lock(&gpu->sched.job_list_lock);
  118. list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
  119. submit = to_etnaviv_submit(s_job);
  120. file_size += submit->cmdbuf.size;
  121. n_obj++;
  122. }
  123. spin_unlock(&gpu->sched.job_list_lock);
  124. /* Add in the active buffer objects */
  125. list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
  126. if (!vram->use)
  127. continue;
  128. obj = vram->object;
  129. file_size += obj->base.size;
  130. n_bomap_pages += obj->base.size >> PAGE_SHIFT;
  131. n_obj++;
  132. }
  133. /* If we have any buffer objects, add a bomap object */
  134. if (n_bomap_pages) {
  135. file_size += n_bomap_pages * sizeof(__le64);
  136. n_obj++;
  137. }
  138. /* Add the size of the headers */
  139. file_size += sizeof(*iter.hdr) * n_obj;
  140. /* Allocate the file in vmalloc memory, it's likely to be big */
  141. iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
  142. PAGE_KERNEL);
  143. if (!iter.start) {
  144. dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
  145. return;
  146. }
  147. /* Point the data member after the headers */
  148. iter.hdr = iter.start;
  149. iter.data = &iter.hdr[n_obj];
  150. memset(iter.hdr, 0, iter.data - iter.start);
  151. etnaviv_core_dump_registers(&iter, gpu);
  152. etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
  153. etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
  154. gpu->buffer.size,
  155. etnaviv_cmdbuf_get_va(&gpu->buffer));
  156. spin_lock(&gpu->sched.job_list_lock);
  157. list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
  158. submit = to_etnaviv_submit(s_job);
  159. etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
  160. submit->cmdbuf.vaddr, submit->cmdbuf.size,
  161. etnaviv_cmdbuf_get_va(&submit->cmdbuf));
  162. }
  163. spin_unlock(&gpu->sched.job_list_lock);
  164. /* Reserve space for the bomap */
  165. if (n_bomap_pages) {
  166. bomap_start = bomap = iter.data;
  167. memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
  168. etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
  169. bomap + n_bomap_pages);
  170. } else {
  171. /* Silence warning */
  172. bomap_start = bomap = NULL;
  173. }
  174. list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
  175. struct page **pages;
  176. void *vaddr;
  177. if (vram->use == 0)
  178. continue;
  179. obj = vram->object;
  180. mutex_lock(&obj->lock);
  181. pages = etnaviv_gem_get_pages(obj);
  182. mutex_unlock(&obj->lock);
  183. if (pages) {
  184. int j;
  185. iter.hdr->data[0] = bomap - bomap_start;
  186. for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
  187. *bomap++ = cpu_to_le64(page_to_phys(*pages++));
  188. }
  189. iter.hdr->iova = cpu_to_le64(vram->iova);
  190. vaddr = etnaviv_gem_vmap(&obj->base);
  191. if (vaddr)
  192. memcpy(iter.data, vaddr, obj->base.size);
  193. etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
  194. obj->base.size);
  195. }
  196. etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
  197. dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
  198. }