intel_guc_log.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. /*
  2. * Copyright © 2014-2017 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/debugfs.h>
  25. #include <linux/relay.h>
  26. #include "intel_guc_log.h"
  27. #include "i915_drv.h"
  28. static void guc_log_capture_logs(struct intel_guc *guc);
  29. /**
  30. * DOC: GuC firmware log
  31. *
  32. * Firmware log is enabled by setting i915.guc_log_level to the positive level.
  33. * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
  34. * i915_guc_load_status will print out firmware loading status and scratch
  35. * registers value.
  36. */
  37. static int guc_log_flush_complete(struct intel_guc *guc)
  38. {
  39. u32 action[] = {
  40. INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
  41. };
  42. return intel_guc_send(guc, action, ARRAY_SIZE(action));
  43. }
  44. static int guc_log_flush(struct intel_guc *guc)
  45. {
  46. u32 action[] = {
  47. INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
  48. 0
  49. };
  50. return intel_guc_send(guc, action, ARRAY_SIZE(action));
  51. }
  52. static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
  53. {
  54. union guc_log_control control_val = {
  55. .logging_enabled = enable,
  56. .verbosity = verbosity,
  57. };
  58. u32 action[] = {
  59. INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
  60. control_val.value
  61. };
  62. return intel_guc_send(guc, action, ARRAY_SIZE(action));
  63. }
  64. /*
  65. * Sub buffer switch callback. Called whenever relay has to switch to a new
  66. * sub buffer, relay stays on the same sub buffer if 0 is returned.
  67. */
  68. static int subbuf_start_callback(struct rchan_buf *buf,
  69. void *subbuf,
  70. void *prev_subbuf,
  71. size_t prev_padding)
  72. {
  73. /*
  74. * Use no-overwrite mode by default, where relay will stop accepting
  75. * new data if there are no empty sub buffers left.
  76. * There is no strict synchronization enforced by relay between Consumer
  77. * and Producer. In overwrite mode, there is a possibility of getting
  78. * inconsistent/garbled data, the producer could be writing on to the
  79. * same sub buffer from which Consumer is reading. This can't be avoided
  80. * unless Consumer is fast enough and can always run in tandem with
  81. * Producer.
  82. */
  83. if (relay_buf_full(buf))
  84. return 0;
  85. return 1;
  86. }
  87. /*
  88. * file_create() callback. Creates relay file in debugfs.
  89. */
  90. static struct dentry *create_buf_file_callback(const char *filename,
  91. struct dentry *parent,
  92. umode_t mode,
  93. struct rchan_buf *buf,
  94. int *is_global)
  95. {
  96. struct dentry *buf_file;
  97. /*
  98. * This to enable the use of a single buffer for the relay channel and
  99. * correspondingly have a single file exposed to User, through which
  100. * it can collect the logs in order without any post-processing.
  101. * Need to set 'is_global' even if parent is NULL for early logging.
  102. */
  103. *is_global = 1;
  104. if (!parent)
  105. return NULL;
  106. /*
  107. * Not using the channel filename passed as an argument, since for each
  108. * channel relay appends the corresponding CPU number to the filename
  109. * passed in relay_open(). This should be fine as relay just needs a
  110. * dentry of the file associated with the channel buffer and that file's
  111. * name need not be same as the filename passed as an argument.
  112. */
  113. buf_file = debugfs_create_file("guc_log", mode,
  114. parent, buf, &relay_file_operations);
  115. return buf_file;
  116. }
  117. /*
  118. * file_remove() default callback. Removes relay file in debugfs.
  119. */
  120. static int remove_buf_file_callback(struct dentry *dentry)
  121. {
  122. debugfs_remove(dentry);
  123. return 0;
  124. }
  125. /* relay channel callbacks */
  126. static struct rchan_callbacks relay_callbacks = {
  127. .subbuf_start = subbuf_start_callback,
  128. .create_buf_file = create_buf_file_callback,
  129. .remove_buf_file = remove_buf_file_callback,
  130. };
  131. static int guc_log_relay_file_create(struct intel_guc *guc)
  132. {
  133. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  134. struct dentry *log_dir;
  135. int ret;
  136. if (!i915_modparams.guc_log_level)
  137. return 0;
  138. mutex_lock(&guc->log.runtime.relay_lock);
  139. /* For now create the log file in /sys/kernel/debug/dri/0 dir */
  140. log_dir = dev_priv->drm.primary->debugfs_root;
  141. /*
  142. * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
  143. * not mounted and so can't create the relay file.
  144. * The relay API seems to fit well with debugfs only, for availing relay
  145. * there are 3 requirements which can be met for debugfs file only in a
  146. * straightforward/clean manner :-
  147. * i) Need the associated dentry pointer of the file, while opening the
  148. * relay channel.
  149. * ii) Should be able to use 'relay_file_operations' fops for the file.
  150. * iii) Set the 'i_private' field of file's inode to the pointer of
  151. * relay channel buffer.
  152. */
  153. if (!log_dir) {
  154. DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
  155. ret = -ENODEV;
  156. goto out_unlock;
  157. }
  158. ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
  159. if (ret < 0 && ret != -EEXIST) {
  160. DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
  161. goto out_unlock;
  162. }
  163. ret = 0;
  164. out_unlock:
  165. mutex_unlock(&guc->log.runtime.relay_lock);
  166. return ret;
  167. }
  168. static bool guc_log_has_relay(struct intel_guc *guc)
  169. {
  170. lockdep_assert_held(&guc->log.runtime.relay_lock);
  171. return guc->log.runtime.relay_chan != NULL;
  172. }
  173. static void guc_move_to_next_buf(struct intel_guc *guc)
  174. {
  175. /*
  176. * Make sure the updates made in the sub buffer are visible when
  177. * Consumer sees the following update to offset inside the sub buffer.
  178. */
  179. smp_wmb();
  180. if (!guc_log_has_relay(guc))
  181. return;
  182. /* All data has been written, so now move the offset of sub buffer. */
  183. relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
  184. /* Switch to the next sub buffer */
  185. relay_flush(guc->log.runtime.relay_chan);
  186. }
  187. static void *guc_get_write_buffer(struct intel_guc *guc)
  188. {
  189. if (!guc_log_has_relay(guc))
  190. return NULL;
  191. /*
  192. * Just get the base address of a new sub buffer and copy data into it
  193. * ourselves. NULL will be returned in no-overwrite mode, if all sub
  194. * buffers are full. Could have used the relay_write() to indirectly
  195. * copy the data, but that would have been bit convoluted, as we need to
  196. * write to only certain locations inside a sub buffer which cannot be
  197. * done without using relay_reserve() along with relay_write(). So its
  198. * better to use relay_reserve() alone.
  199. */
  200. return relay_reserve(guc->log.runtime.relay_chan, 0);
  201. }
  202. static bool guc_check_log_buf_overflow(struct intel_guc *guc,
  203. enum guc_log_buffer_type type,
  204. unsigned int full_cnt)
  205. {
  206. unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
  207. bool overflow = false;
  208. if (full_cnt != prev_full_cnt) {
  209. overflow = true;
  210. guc->log.prev_overflow_count[type] = full_cnt;
  211. guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
  212. if (full_cnt < prev_full_cnt) {
  213. /* buffer_full_cnt is a 4 bit counter */
  214. guc->log.total_overflow_count[type] += 16;
  215. }
  216. DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
  217. }
  218. return overflow;
  219. }
  220. static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
  221. {
  222. switch (type) {
  223. case GUC_ISR_LOG_BUFFER:
  224. return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
  225. case GUC_DPC_LOG_BUFFER:
  226. return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
  227. case GUC_CRASH_DUMP_LOG_BUFFER:
  228. return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
  229. default:
  230. MISSING_CASE(type);
  231. }
  232. return 0;
  233. }
  234. static void guc_read_update_log_buffer(struct intel_guc *guc)
  235. {
  236. unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
  237. struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
  238. struct guc_log_buffer_state log_buf_state_local;
  239. enum guc_log_buffer_type type;
  240. void *src_data, *dst_data;
  241. bool new_overflow;
  242. if (WARN_ON(!guc->log.runtime.buf_addr))
  243. return;
  244. /* Get the pointer to shared GuC log buffer */
  245. log_buf_state = src_data = guc->log.runtime.buf_addr;
  246. mutex_lock(&guc->log.runtime.relay_lock);
  247. /* Get the pointer to local buffer to store the logs */
  248. log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
  249. if (unlikely(!log_buf_snapshot_state)) {
  250. /*
  251. * Used rate limited to avoid deluge of messages, logs might be
  252. * getting consumed by User at a slow rate.
  253. */
  254. DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
  255. guc->log.capture_miss_count++;
  256. mutex_unlock(&guc->log.runtime.relay_lock);
  257. return;
  258. }
  259. /* Actual logs are present from the 2nd page */
  260. src_data += PAGE_SIZE;
  261. dst_data += PAGE_SIZE;
  262. for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
  263. /*
  264. * Make a copy of the state structure, inside GuC log buffer
  265. * (which is uncached mapped), on the stack to avoid reading
  266. * from it multiple times.
  267. */
  268. memcpy(&log_buf_state_local, log_buf_state,
  269. sizeof(struct guc_log_buffer_state));
  270. buffer_size = guc_get_log_buffer_size(type);
  271. read_offset = log_buf_state_local.read_ptr;
  272. write_offset = log_buf_state_local.sampled_write_ptr;
  273. full_cnt = log_buf_state_local.buffer_full_cnt;
  274. /* Bookkeeping stuff */
  275. guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
  276. new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
  277. /* Update the state of shared log buffer */
  278. log_buf_state->read_ptr = write_offset;
  279. log_buf_state->flush_to_file = 0;
  280. log_buf_state++;
  281. /* First copy the state structure in snapshot buffer */
  282. memcpy(log_buf_snapshot_state, &log_buf_state_local,
  283. sizeof(struct guc_log_buffer_state));
  284. /*
  285. * The write pointer could have been updated by GuC firmware,
  286. * after sending the flush interrupt to Host, for consistency
  287. * set write pointer value to same value of sampled_write_ptr
  288. * in the snapshot buffer.
  289. */
  290. log_buf_snapshot_state->write_ptr = write_offset;
  291. log_buf_snapshot_state++;
  292. /* Now copy the actual logs. */
  293. if (unlikely(new_overflow)) {
  294. /* copy the whole buffer in case of overflow */
  295. read_offset = 0;
  296. write_offset = buffer_size;
  297. } else if (unlikely((read_offset > buffer_size) ||
  298. (write_offset > buffer_size))) {
  299. DRM_ERROR("invalid log buffer state\n");
  300. /* copy whole buffer as offsets are unreliable */
  301. read_offset = 0;
  302. write_offset = buffer_size;
  303. }
  304. /* Just copy the newly written data */
  305. if (read_offset > write_offset) {
  306. i915_memcpy_from_wc(dst_data, src_data, write_offset);
  307. bytes_to_copy = buffer_size - read_offset;
  308. } else {
  309. bytes_to_copy = write_offset - read_offset;
  310. }
  311. i915_memcpy_from_wc(dst_data + read_offset,
  312. src_data + read_offset, bytes_to_copy);
  313. src_data += buffer_size;
  314. dst_data += buffer_size;
  315. }
  316. guc_move_to_next_buf(guc);
  317. mutex_unlock(&guc->log.runtime.relay_lock);
  318. }
  319. static void capture_logs_work(struct work_struct *work)
  320. {
  321. struct intel_guc *guc =
  322. container_of(work, struct intel_guc, log.runtime.flush_work);
  323. guc_log_capture_logs(guc);
  324. }
  325. static bool guc_log_has_runtime(struct intel_guc *guc)
  326. {
  327. return guc->log.runtime.buf_addr != NULL;
  328. }
  329. static int guc_log_runtime_create(struct intel_guc *guc)
  330. {
  331. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  332. void *vaddr;
  333. int ret;
  334. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  335. if (!guc->log.vma)
  336. return -ENODEV;
  337. GEM_BUG_ON(guc_log_has_runtime(guc));
  338. ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
  339. if (ret)
  340. return ret;
  341. /*
  342. * Create a WC (Uncached for read) vmalloc mapping of log
  343. * buffer pages, so that we can directly get the data
  344. * (up-to-date) from memory.
  345. */
  346. vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
  347. if (IS_ERR(vaddr)) {
  348. DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
  349. return PTR_ERR(vaddr);
  350. }
  351. guc->log.runtime.buf_addr = vaddr;
  352. return 0;
  353. }
  354. static void guc_log_runtime_destroy(struct intel_guc *guc)
  355. {
  356. /*
  357. * It's possible that the runtime stuff was never allocated because
  358. * GuC log was disabled at the boot time.
  359. */
  360. if (!guc_log_has_runtime(guc))
  361. return;
  362. i915_gem_object_unpin_map(guc->log.vma->obj);
  363. guc->log.runtime.buf_addr = NULL;
  364. }
  365. void intel_guc_log_init_early(struct intel_guc *guc)
  366. {
  367. mutex_init(&guc->log.runtime.relay_lock);
  368. INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
  369. }
  370. int intel_guc_log_relay_create(struct intel_guc *guc)
  371. {
  372. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  373. struct rchan *guc_log_relay_chan;
  374. size_t n_subbufs, subbuf_size;
  375. int ret;
  376. if (!i915_modparams.guc_log_level)
  377. return 0;
  378. mutex_lock(&guc->log.runtime.relay_lock);
  379. GEM_BUG_ON(guc_log_has_relay(guc));
  380. /* Keep the size of sub buffers same as shared log buffer */
  381. subbuf_size = GUC_LOG_SIZE;
  382. /*
  383. * Store up to 8 snapshots, which is large enough to buffer sufficient
  384. * boot time logs and provides enough leeway to User, in terms of
  385. * latency, for consuming the logs from relay. Also doesn't take
  386. * up too much memory.
  387. */
  388. n_subbufs = 8;
  389. /*
  390. * Create a relay channel, so that we have buffers for storing
  391. * the GuC firmware logs, the channel will be linked with a file
  392. * later on when debugfs is registered.
  393. */
  394. guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
  395. n_subbufs, &relay_callbacks, dev_priv);
  396. if (!guc_log_relay_chan) {
  397. DRM_ERROR("Couldn't create relay chan for GuC logging\n");
  398. ret = -ENOMEM;
  399. goto err;
  400. }
  401. GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
  402. guc->log.runtime.relay_chan = guc_log_relay_chan;
  403. mutex_unlock(&guc->log.runtime.relay_lock);
  404. return 0;
  405. err:
  406. mutex_unlock(&guc->log.runtime.relay_lock);
  407. /* logging will be off */
  408. i915_modparams.guc_log_level = 0;
  409. return ret;
  410. }
  411. void intel_guc_log_relay_destroy(struct intel_guc *guc)
  412. {
  413. mutex_lock(&guc->log.runtime.relay_lock);
  414. /*
  415. * It's possible that the relay was never allocated because
  416. * GuC log was disabled at the boot time.
  417. */
  418. if (!guc_log_has_relay(guc))
  419. goto out_unlock;
  420. relay_close(guc->log.runtime.relay_chan);
  421. guc->log.runtime.relay_chan = NULL;
  422. out_unlock:
  423. mutex_unlock(&guc->log.runtime.relay_lock);
  424. }
  425. static int guc_log_late_setup(struct intel_guc *guc)
  426. {
  427. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  428. int ret;
  429. if (!guc_log_has_runtime(guc)) {
  430. /*
  431. * If log was disabled at boot time, then setup needed to handle
  432. * log buffer flush interrupts would not have been done yet, so
  433. * do that now.
  434. */
  435. ret = intel_guc_log_relay_create(guc);
  436. if (ret)
  437. goto err;
  438. mutex_lock(&dev_priv->drm.struct_mutex);
  439. intel_runtime_pm_get(dev_priv);
  440. ret = guc_log_runtime_create(guc);
  441. intel_runtime_pm_put(dev_priv);
  442. mutex_unlock(&dev_priv->drm.struct_mutex);
  443. if (ret)
  444. goto err_relay;
  445. }
  446. ret = guc_log_relay_file_create(guc);
  447. if (ret)
  448. goto err_runtime;
  449. return 0;
  450. err_runtime:
  451. mutex_lock(&dev_priv->drm.struct_mutex);
  452. guc_log_runtime_destroy(guc);
  453. mutex_unlock(&dev_priv->drm.struct_mutex);
  454. err_relay:
  455. intel_guc_log_relay_destroy(guc);
  456. err:
  457. /* logging will remain off */
  458. i915_modparams.guc_log_level = 0;
  459. return ret;
  460. }
  461. static void guc_log_capture_logs(struct intel_guc *guc)
  462. {
  463. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  464. guc_read_update_log_buffer(guc);
  465. /*
  466. * Generally device is expected to be active only at this
  467. * time, so get/put should be really quick.
  468. */
  469. intel_runtime_pm_get(dev_priv);
  470. guc_log_flush_complete(guc);
  471. intel_runtime_pm_put(dev_priv);
  472. }
  473. static void guc_flush_logs(struct intel_guc *guc)
  474. {
  475. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  476. if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
  477. return;
  478. /* First disable the interrupts, will be renabled afterwards */
  479. mutex_lock(&dev_priv->drm.struct_mutex);
  480. intel_runtime_pm_get(dev_priv);
  481. gen9_disable_guc_interrupts(dev_priv);
  482. intel_runtime_pm_put(dev_priv);
  483. mutex_unlock(&dev_priv->drm.struct_mutex);
  484. /*
  485. * Before initiating the forceful flush, wait for any pending/ongoing
  486. * flush to complete otherwise forceful flush may not actually happen.
  487. */
  488. flush_work(&guc->log.runtime.flush_work);
  489. /* Ask GuC to update the log buffer state */
  490. intel_runtime_pm_get(dev_priv);
  491. guc_log_flush(guc);
  492. intel_runtime_pm_put(dev_priv);
  493. /* GuC would have updated log buffer by now, so capture it */
  494. guc_log_capture_logs(guc);
  495. }
  496. int intel_guc_log_create(struct intel_guc *guc)
  497. {
  498. struct i915_vma *vma;
  499. unsigned long offset;
  500. u32 flags;
  501. int ret;
  502. GEM_BUG_ON(guc->log.vma);
  503. /*
  504. * We require SSE 4.1 for fast reads from the GuC log buffer and
  505. * it should be present on the chipsets supporting GuC based
  506. * submisssions.
  507. */
  508. if (WARN_ON(!i915_has_memcpy_from_wc())) {
  509. ret = -EINVAL;
  510. goto err;
  511. }
  512. vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
  513. if (IS_ERR(vma)) {
  514. ret = PTR_ERR(vma);
  515. goto err;
  516. }
  517. guc->log.vma = vma;
  518. if (i915_modparams.guc_log_level) {
  519. ret = guc_log_runtime_create(guc);
  520. if (ret < 0)
  521. goto err_vma;
  522. }
  523. /* each allocated unit is a page */
  524. flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
  525. (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
  526. (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
  527. (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
  528. offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
  529. guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
  530. return 0;
  531. err_vma:
  532. i915_vma_unpin_and_release(&guc->log.vma);
  533. err:
  534. /* logging will be off */
  535. i915_modparams.guc_log_level = 0;
  536. return ret;
  537. }
  538. void intel_guc_log_destroy(struct intel_guc *guc)
  539. {
  540. guc_log_runtime_destroy(guc);
  541. i915_vma_unpin_and_release(&guc->log.vma);
  542. }
  543. int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
  544. {
  545. struct drm_i915_private *dev_priv = guc_to_i915(guc);
  546. bool enable_logging = control_val > 0;
  547. u32 verbosity;
  548. int ret;
  549. if (!guc->log.vma)
  550. return -ENODEV;
  551. BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
  552. if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
  553. return -EINVAL;
  554. /* This combination doesn't make sense & won't have any effect */
  555. if (!enable_logging && !i915_modparams.guc_log_level)
  556. return 0;
  557. verbosity = enable_logging ? control_val - 1 : 0;
  558. ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
  559. if (ret)
  560. return ret;
  561. intel_runtime_pm_get(dev_priv);
  562. ret = guc_log_control(guc, enable_logging, verbosity);
  563. intel_runtime_pm_put(dev_priv);
  564. mutex_unlock(&dev_priv->drm.struct_mutex);
  565. if (ret < 0) {
  566. DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
  567. return ret;
  568. }
  569. if (enable_logging) {
  570. i915_modparams.guc_log_level = 1 + verbosity;
  571. /*
  572. * If log was disabled at boot time, then the relay channel file
  573. * wouldn't have been created by now and interrupts also would
  574. * not have been enabled. Try again now, just in case.
  575. */
  576. ret = guc_log_late_setup(guc);
  577. if (ret < 0) {
  578. DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
  579. return ret;
  580. }
  581. /* GuC logging is currently the only user of Guc2Host interrupts */
  582. mutex_lock(&dev_priv->drm.struct_mutex);
  583. intel_runtime_pm_get(dev_priv);
  584. gen9_enable_guc_interrupts(dev_priv);
  585. intel_runtime_pm_put(dev_priv);
  586. mutex_unlock(&dev_priv->drm.struct_mutex);
  587. } else {
  588. /*
  589. * Once logging is disabled, GuC won't generate logs & send an
  590. * interrupt. But there could be some data in the log buffer
  591. * which is yet to be captured. So request GuC to update the log
  592. * buffer state and then collect the left over logs.
  593. */
  594. guc_flush_logs(guc);
  595. /* As logging is disabled, update log level to reflect that */
  596. i915_modparams.guc_log_level = 0;
  597. }
  598. return ret;
  599. }
  600. void i915_guc_log_register(struct drm_i915_private *dev_priv)
  601. {
  602. if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
  603. return;
  604. guc_log_late_setup(&dev_priv->guc);
  605. }
  606. void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
  607. {
  608. struct intel_guc *guc = &dev_priv->guc;
  609. if (!USES_GUC_SUBMISSION(dev_priv))
  610. return;
  611. mutex_lock(&dev_priv->drm.struct_mutex);
  612. /* GuC logging is currently the only user of Guc2Host interrupts */
  613. intel_runtime_pm_get(dev_priv);
  614. gen9_disable_guc_interrupts(dev_priv);
  615. intel_runtime_pm_put(dev_priv);
  616. guc_log_runtime_destroy(guc);
  617. mutex_unlock(&dev_priv->drm.struct_mutex);
  618. intel_guc_log_relay_destroy(guc);
  619. }