amdgpu_fence.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115
  1. /*
  2. * Copyright 2009 Jerome Glisse.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sub license, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20. *
  21. * The above copyright notice and this permission notice (including the
  22. * next paragraph) shall be included in all copies or substantial portions
  23. * of the Software.
  24. *
  25. */
  26. /*
  27. * Authors:
  28. * Jerome Glisse <glisse@freedesktop.org>
  29. * Dave Airlie
  30. */
  31. #include <linux/seq_file.h>
  32. #include <linux/atomic.h>
  33. #include <linux/wait.h>
  34. #include <linux/kref.h>
  35. #include <linux/slab.h>
  36. #include <linux/firmware.h>
  37. #include <drm/drmP.h>
  38. #include "amdgpu.h"
  39. #include "amdgpu_trace.h"
  40. /*
  41. * Fences
  42. * Fences mark an event in the GPUs pipeline and are used
  43. * for GPU/CPU synchronization. When the fence is written,
  44. * it is expected that all buffers associated with that fence
  45. * are no longer in use by the associated ring on the GPU and
  46. * that the the relevant GPU caches have been flushed.
  47. */
  48. /**
  49. * amdgpu_fence_write - write a fence value
  50. *
  51. * @ring: ring the fence is associated with
  52. * @seq: sequence number to write
  53. *
  54. * Writes a fence value to memory (all asics).
  55. */
  56. static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
  57. {
  58. struct amdgpu_fence_driver *drv = &ring->fence_drv;
  59. if (drv->cpu_addr)
  60. *drv->cpu_addr = cpu_to_le32(seq);
  61. }
  62. /**
  63. * amdgpu_fence_read - read a fence value
  64. *
  65. * @ring: ring the fence is associated with
  66. *
  67. * Reads a fence value from memory (all asics).
  68. * Returns the value of the fence read from memory.
  69. */
  70. static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
  71. {
  72. struct amdgpu_fence_driver *drv = &ring->fence_drv;
  73. u32 seq = 0;
  74. if (drv->cpu_addr)
  75. seq = le32_to_cpu(*drv->cpu_addr);
  76. else
  77. seq = lower_32_bits(atomic64_read(&drv->last_seq));
  78. return seq;
  79. }
  80. /**
  81. * amdgpu_fence_schedule_check - schedule lockup check
  82. *
  83. * @ring: pointer to struct amdgpu_ring
  84. *
  85. * Queues a delayed work item to check for lockups.
  86. */
  87. static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
  88. {
  89. /*
  90. * Do not reset the timer here with mod_delayed_work,
  91. * this can livelock in an interaction with TTM delayed destroy.
  92. */
  93. queue_delayed_work(system_power_efficient_wq,
  94. &ring->fence_drv.lockup_work,
  95. AMDGPU_FENCE_JIFFIES_TIMEOUT);
  96. }
  97. /**
  98. * amdgpu_fence_emit - emit a fence on the requested ring
  99. *
  100. * @ring: ring the fence is associated with
  101. * @owner: creator of the fence
  102. * @fence: amdgpu fence object
  103. *
  104. * Emits a fence command on the requested ring (all asics).
  105. * Returns 0 on success, -ENOMEM on failure.
  106. */
  107. int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
  108. struct amdgpu_fence **fence)
  109. {
  110. struct amdgpu_device *adev = ring->adev;
  111. /* we are protected by the ring emission mutex */
  112. *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
  113. if ((*fence) == NULL) {
  114. return -ENOMEM;
  115. }
  116. (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
  117. (*fence)->ring = ring;
  118. (*fence)->owner = owner;
  119. fence_init(&(*fence)->base, &amdgpu_fence_ops,
  120. &ring->fence_drv.fence_queue.lock,
  121. adev->fence_context + ring->idx,
  122. (*fence)->seq);
  123. amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
  124. (*fence)->seq,
  125. AMDGPU_FENCE_FLAG_INT);
  126. trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
  127. return 0;
  128. }
  129. /**
  130. * amdgpu_fence_check_signaled - callback from fence_queue
  131. *
  132. * this function is called with fence_queue lock held, which is also used
  133. * for the fence locking itself, so unlocked variants are used for
  134. * fence_signal, and remove_wait_queue.
  135. */
  136. static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
  137. {
  138. struct amdgpu_fence *fence;
  139. struct amdgpu_device *adev;
  140. u64 seq;
  141. int ret;
  142. fence = container_of(wait, struct amdgpu_fence, fence_wake);
  143. adev = fence->ring->adev;
  144. /*
  145. * We cannot use amdgpu_fence_process here because we're already
  146. * in the waitqueue, in a call from wake_up_all.
  147. */
  148. seq = atomic64_read(&fence->ring->fence_drv.last_seq);
  149. if (seq >= fence->seq) {
  150. ret = fence_signal_locked(&fence->base);
  151. if (!ret)
  152. FENCE_TRACE(&fence->base, "signaled from irq context\n");
  153. else
  154. FENCE_TRACE(&fence->base, "was already signaled\n");
  155. __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
  156. fence_put(&fence->base);
  157. } else
  158. FENCE_TRACE(&fence->base, "pending\n");
  159. return 0;
  160. }
  161. /**
  162. * amdgpu_fence_activity - check for fence activity
  163. *
  164. * @ring: pointer to struct amdgpu_ring
  165. *
  166. * Checks the current fence value and calculates the last
  167. * signalled fence value. Returns true if activity occured
  168. * on the ring, and the fence_queue should be waken up.
  169. */
  170. static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
  171. {
  172. uint64_t seq, last_seq, last_emitted;
  173. unsigned count_loop = 0;
  174. bool wake = false;
  175. /* Note there is a scenario here for an infinite loop but it's
  176. * very unlikely to happen. For it to happen, the current polling
  177. * process need to be interrupted by another process and another
  178. * process needs to update the last_seq btw the atomic read and
  179. * xchg of the current process.
  180. *
  181. * More over for this to go in infinite loop there need to be
  182. * continuously new fence signaled ie amdgpu_fence_read needs
  183. * to return a different value each time for both the currently
  184. * polling process and the other process that xchg the last_seq
  185. * btw atomic read and xchg of the current process. And the
  186. * value the other process set as last seq must be higher than
  187. * the seq value we just read. Which means that current process
  188. * need to be interrupted after amdgpu_fence_read and before
  189. * atomic xchg.
  190. *
  191. * To be even more safe we count the number of time we loop and
  192. * we bail after 10 loop just accepting the fact that we might
  193. * have temporarly set the last_seq not to the true real last
  194. * seq but to an older one.
  195. */
  196. last_seq = atomic64_read(&ring->fence_drv.last_seq);
  197. do {
  198. last_emitted = ring->fence_drv.sync_seq[ring->idx];
  199. seq = amdgpu_fence_read(ring);
  200. seq |= last_seq & 0xffffffff00000000LL;
  201. if (seq < last_seq) {
  202. seq &= 0xffffffff;
  203. seq |= last_emitted & 0xffffffff00000000LL;
  204. }
  205. if (seq <= last_seq || seq > last_emitted) {
  206. break;
  207. }
  208. /* If we loop over we don't want to return without
  209. * checking if a fence is signaled as it means that the
  210. * seq we just read is different from the previous on.
  211. */
  212. wake = true;
  213. last_seq = seq;
  214. if ((count_loop++) > 10) {
  215. /* We looped over too many time leave with the
  216. * fact that we might have set an older fence
  217. * seq then the current real last seq as signaled
  218. * by the hw.
  219. */
  220. break;
  221. }
  222. } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
  223. if (seq < last_emitted)
  224. amdgpu_fence_schedule_check(ring);
  225. return wake;
  226. }
  227. /**
  228. * amdgpu_fence_check_lockup - check for hardware lockup
  229. *
  230. * @work: delayed work item
  231. *
  232. * Checks for fence activity and if there is none probe
  233. * the hardware if a lockup occured.
  234. */
  235. static void amdgpu_fence_check_lockup(struct work_struct *work)
  236. {
  237. struct amdgpu_fence_driver *fence_drv;
  238. struct amdgpu_ring *ring;
  239. fence_drv = container_of(work, struct amdgpu_fence_driver,
  240. lockup_work.work);
  241. ring = fence_drv->ring;
  242. if (!down_read_trylock(&ring->adev->exclusive_lock)) {
  243. /* just reschedule the check if a reset is going on */
  244. amdgpu_fence_schedule_check(ring);
  245. return;
  246. }
  247. if (amdgpu_fence_activity(ring)) {
  248. wake_up_all(&ring->fence_drv.fence_queue);
  249. }
  250. else if (amdgpu_ring_is_lockup(ring)) {
  251. /* good news we believe it's a lockup */
  252. dev_warn(ring->adev->dev, "GPU lockup (current fence id "
  253. "0x%016llx last fence id 0x%016llx on ring %d)\n",
  254. (uint64_t)atomic64_read(&fence_drv->last_seq),
  255. fence_drv->sync_seq[ring->idx], ring->idx);
  256. /* remember that we need an reset */
  257. ring->adev->needs_reset = true;
  258. wake_up_all(&ring->fence_drv.fence_queue);
  259. }
  260. up_read(&ring->adev->exclusive_lock);
  261. }
  262. /**
  263. * amdgpu_fence_process - process a fence
  264. *
  265. * @adev: amdgpu_device pointer
  266. * @ring: ring index the fence is associated with
  267. *
  268. * Checks the current fence value and wakes the fence queue
  269. * if the sequence number has increased (all asics).
  270. */
  271. void amdgpu_fence_process(struct amdgpu_ring *ring)
  272. {
  273. uint64_t seq, last_seq, last_emitted;
  274. unsigned count_loop = 0;
  275. bool wake = false;
  276. unsigned long irqflags;
  277. /* Note there is a scenario here for an infinite loop but it's
  278. * very unlikely to happen. For it to happen, the current polling
  279. * process need to be interrupted by another process and another
  280. * process needs to update the last_seq btw the atomic read and
  281. * xchg of the current process.
  282. *
  283. * More over for this to go in infinite loop there need to be
  284. * continuously new fence signaled ie amdgpu_fence_read needs
  285. * to return a different value each time for both the currently
  286. * polling process and the other process that xchg the last_seq
  287. * btw atomic read and xchg of the current process. And the
  288. * value the other process set as last seq must be higher than
  289. * the seq value we just read. Which means that current process
  290. * need to be interrupted after amdgpu_fence_read and before
  291. * atomic xchg.
  292. *
  293. * To be even more safe we count the number of time we loop and
  294. * we bail after 10 loop just accepting the fact that we might
  295. * have temporarly set the last_seq not to the true real last
  296. * seq but to an older one.
  297. */
  298. spin_lock_irqsave(&ring->fence_lock, irqflags);
  299. last_seq = atomic64_read(&ring->fence_drv.last_seq);
  300. do {
  301. last_emitted = ring->fence_drv.sync_seq[ring->idx];
  302. seq = amdgpu_fence_read(ring);
  303. seq |= last_seq & 0xffffffff00000000LL;
  304. if (seq < last_seq) {
  305. seq &= 0xffffffff;
  306. seq |= last_emitted & 0xffffffff00000000LL;
  307. }
  308. if (seq <= last_seq || seq > last_emitted) {
  309. break;
  310. }
  311. /* If we loop over we don't want to return without
  312. * checking if a fence is signaled as it means that the
  313. * seq we just read is different from the previous on.
  314. */
  315. wake = true;
  316. last_seq = seq;
  317. if ((count_loop++) > 10) {
  318. /* We looped over too many time leave with the
  319. * fact that we might have set an older fence
  320. * seq then the current real last seq as signaled
  321. * by the hw.
  322. */
  323. break;
  324. }
  325. } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
  326. if (wake) {
  327. if (amdgpu_enable_scheduler) {
  328. uint64_t handled_seq =
  329. amd_sched_get_handled_seq(ring->scheduler);
  330. uint64_t latest_seq =
  331. atomic64_read(&ring->fence_drv.last_seq);
  332. if (handled_seq == latest_seq) {
  333. DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
  334. ring->idx, latest_seq);
  335. goto exit;
  336. }
  337. do {
  338. amd_sched_isr(ring->scheduler);
  339. } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
  340. }
  341. wake_up_all(&ring->fence_drv.fence_queue);
  342. }
  343. exit:
  344. spin_unlock_irqrestore(&ring->fence_lock, irqflags);
  345. }
  346. /**
  347. * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
  348. *
  349. * @ring: ring the fence is associated with
  350. * @seq: sequence number
  351. *
  352. * Check if the last signaled fence sequnce number is >= the requested
  353. * sequence number (all asics).
  354. * Returns true if the fence has signaled (current fence value
  355. * is >= requested value) or false if it has not (current fence
  356. * value is < the requested value. Helper function for
  357. * amdgpu_fence_signaled().
  358. */
  359. static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
  360. {
  361. if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
  362. return true;
  363. /* poll new last sequence at least once */
  364. amdgpu_fence_process(ring);
  365. if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
  366. return true;
  367. return false;
  368. }
  369. static bool amdgpu_fence_is_signaled(struct fence *f)
  370. {
  371. struct amdgpu_fence *fence = to_amdgpu_fence(f);
  372. struct amdgpu_ring *ring = fence->ring;
  373. struct amdgpu_device *adev = ring->adev;
  374. if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
  375. return true;
  376. if (down_read_trylock(&adev->exclusive_lock)) {
  377. amdgpu_fence_process(ring);
  378. up_read(&adev->exclusive_lock);
  379. if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
  380. return true;
  381. }
  382. return false;
  383. }
  384. /**
  385. * amdgpu_fence_enable_signaling - enable signalling on fence
  386. * @fence: fence
  387. *
  388. * This function is called with fence_queue lock held, and adds a callback
  389. * to fence_queue that checks if this fence is signaled, and if so it
  390. * signals the fence and removes itself.
  391. */
  392. static bool amdgpu_fence_enable_signaling(struct fence *f)
  393. {
  394. struct amdgpu_fence *fence = to_amdgpu_fence(f);
  395. struct amdgpu_ring *ring = fence->ring;
  396. if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
  397. return false;
  398. fence->fence_wake.flags = 0;
  399. fence->fence_wake.private = NULL;
  400. fence->fence_wake.func = amdgpu_fence_check_signaled;
  401. __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
  402. fence_get(f);
  403. FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
  404. return true;
  405. }
  406. /**
  407. * amdgpu_fence_signaled - check if a fence has signaled
  408. *
  409. * @fence: amdgpu fence object
  410. *
  411. * Check if the requested fence has signaled (all asics).
  412. * Returns true if the fence has signaled or false if it has not.
  413. */
  414. bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
  415. {
  416. if (!fence)
  417. return true;
  418. if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
  419. if (!fence_signal(&fence->base))
  420. FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
  421. return true;
  422. }
  423. return false;
  424. }
  425. /*
  426. * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
  427. * @ring: ring to wait on for the seq number
  428. * @seq: seq number wait for
  429. * @intr: if interruptible
  430. * @timeout: jiffies before time out
  431. *
  432. * return value:
  433. * 0: time out but seq not signaled, and gpu not hang
  434. * X (X > 0): seq signaled and X means how many jiffies remains before time out
  435. * -EDEADL: GPU hang before time out
  436. * -ESYSRESTART: interrupted before seq signaled
  437. * -EINVAL: some paramter is not valid
  438. */
  439. static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_t seq,
  440. bool intr, long timeout)
  441. {
  442. struct amdgpu_device *adev = ring->adev;
  443. long r = 0;
  444. bool signaled = false;
  445. BUG_ON(!ring);
  446. if (seq > ring->fence_drv.sync_seq[ring->idx])
  447. return -EINVAL;
  448. if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
  449. return timeout;
  450. while (1) {
  451. if (intr) {
  452. r = wait_event_interruptible_timeout(ring->fence_drv.fence_queue, (
  453. (signaled = amdgpu_fence_seq_signaled(ring, seq))
  454. || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
  455. if (r == -ERESTARTSYS) /* interrupted */
  456. return r;
  457. } else {
  458. r = wait_event_timeout(ring->fence_drv.fence_queue, (
  459. (signaled = amdgpu_fence_seq_signaled(ring, seq))
  460. || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
  461. }
  462. if (signaled) {
  463. /* seq signaled */
  464. if (timeout == MAX_SCHEDULE_TIMEOUT)
  465. return timeout;
  466. return (timeout - AMDGPU_FENCE_JIFFIES_TIMEOUT - r);
  467. }
  468. else if (adev->needs_reset) {
  469. return -EDEADLK;
  470. }
  471. /* check if it's a lockup */
  472. if (amdgpu_ring_is_lockup(ring)) {
  473. uint64_t last_seq = atomic64_read(&ring->fence_drv.last_seq);
  474. /* ring lookup */
  475. dev_warn(adev->dev, "GPU lockup (waiting for "
  476. "0x%016llx last fence id 0x%016llx on"
  477. " ring %d)\n",
  478. seq, last_seq, ring->idx);
  479. wake_up_all(&ring->fence_drv.fence_queue);
  480. return -EDEADLK;
  481. }
  482. if (timeout < MAX_SCHEDULE_TIMEOUT) {
  483. timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
  484. if (timeout < 1)
  485. return 0;
  486. }
  487. }
  488. }
  489. /**
  490. * amdgpu_fence_wait - wait for a fence to signal
  491. *
  492. * @fence: amdgpu fence object
  493. * @intr: use interruptable sleep
  494. *
  495. * Wait for the requested fence to signal (all asics).
  496. * @intr selects whether to use interruptable (true) or non-interruptable
  497. * (false) sleep when waiting for the fence.
  498. * Returns 0 if the fence has passed, error for all other cases.
  499. */
  500. int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
  501. {
  502. long r;
  503. r = fence_wait_timeout(&fence->base, intr, MAX_SCHEDULE_TIMEOUT);
  504. if (r < 0)
  505. return r;
  506. return 0;
  507. }
  508. /**
  509. * amdgpu_fence_wait_next - wait for the next fence to signal
  510. *
  511. * @adev: amdgpu device pointer
  512. * @ring: ring index the fence is associated with
  513. *
  514. * Wait for the next fence on the requested ring to signal (all asics).
  515. * Returns 0 if the next fence has passed, error for all other cases.
  516. * Caller must hold ring lock.
  517. */
  518. int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
  519. {
  520. long r;
  521. uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
  522. if (seq >= ring->fence_drv.sync_seq[ring->idx])
  523. return -ENOENT;
  524. r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
  525. if (r < 0)
  526. return r;
  527. return 0;
  528. }
  529. /**
  530. * amdgpu_fence_wait_empty - wait for all fences to signal
  531. *
  532. * @adev: amdgpu device pointer
  533. * @ring: ring index the fence is associated with
  534. *
  535. * Wait for all fences on the requested ring to signal (all asics).
  536. * Returns 0 if the fences have passed, error for all other cases.
  537. * Caller must hold ring lock.
  538. */
  539. int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
  540. {
  541. long r;
  542. uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
  543. if (!seq)
  544. return 0;
  545. r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
  546. if (r < 0) {
  547. if (r == -EDEADLK)
  548. return -EDEADLK;
  549. dev_err(ring->adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
  550. ring->idx, r);
  551. }
  552. return 0;
  553. }
  554. /**
  555. * amdgpu_fence_ref - take a ref on a fence
  556. *
  557. * @fence: amdgpu fence object
  558. *
  559. * Take a reference on a fence (all asics).
  560. * Returns the fence.
  561. */
  562. struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
  563. {
  564. fence_get(&fence->base);
  565. return fence;
  566. }
  567. /**
  568. * amdgpu_fence_unref - remove a ref on a fence
  569. *
  570. * @fence: amdgpu fence object
  571. *
  572. * Remove a reference on a fence (all asics).
  573. */
  574. void amdgpu_fence_unref(struct amdgpu_fence **fence)
  575. {
  576. struct amdgpu_fence *tmp = *fence;
  577. *fence = NULL;
  578. if (tmp)
  579. fence_put(&tmp->base);
  580. }
  581. /**
  582. * amdgpu_fence_count_emitted - get the count of emitted fences
  583. *
  584. * @ring: ring the fence is associated with
  585. *
  586. * Get the number of fences emitted on the requested ring (all asics).
  587. * Returns the number of emitted fences on the ring. Used by the
  588. * dynpm code to ring track activity.
  589. */
  590. unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
  591. {
  592. uint64_t emitted;
  593. /* We are not protected by ring lock when reading the last sequence
  594. * but it's ok to report slightly wrong fence count here.
  595. */
  596. amdgpu_fence_process(ring);
  597. emitted = ring->fence_drv.sync_seq[ring->idx]
  598. - atomic64_read(&ring->fence_drv.last_seq);
  599. /* to avoid 32bits warp around */
  600. if (emitted > 0x10000000)
  601. emitted = 0x10000000;
  602. return (unsigned)emitted;
  603. }
  604. /**
  605. * amdgpu_fence_need_sync - do we need a semaphore
  606. *
  607. * @fence: amdgpu fence object
  608. * @dst_ring: which ring to check against
  609. *
  610. * Check if the fence needs to be synced against another ring
  611. * (all asics). If so, we need to emit a semaphore.
  612. * Returns true if we need to sync with another ring, false if
  613. * not.
  614. */
  615. bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
  616. struct amdgpu_ring *dst_ring)
  617. {
  618. struct amdgpu_fence_driver *fdrv;
  619. if (!fence)
  620. return false;
  621. if (fence->ring == dst_ring)
  622. return false;
  623. /* we are protected by the ring mutex */
  624. fdrv = &dst_ring->fence_drv;
  625. if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
  626. return false;
  627. return true;
  628. }
  629. /**
  630. * amdgpu_fence_note_sync - record the sync point
  631. *
  632. * @fence: amdgpu fence object
  633. * @dst_ring: which ring to check against
  634. *
  635. * Note the sequence number at which point the fence will
  636. * be synced with the requested ring (all asics).
  637. */
  638. void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
  639. struct amdgpu_ring *dst_ring)
  640. {
  641. struct amdgpu_fence_driver *dst, *src;
  642. unsigned i;
  643. if (!fence)
  644. return;
  645. if (fence->ring == dst_ring)
  646. return;
  647. /* we are protected by the ring mutex */
  648. src = &fence->ring->fence_drv;
  649. dst = &dst_ring->fence_drv;
  650. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  651. if (i == dst_ring->idx)
  652. continue;
  653. dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
  654. }
  655. }
  656. /**
  657. * amdgpu_fence_driver_start_ring - make the fence driver
  658. * ready for use on the requested ring.
  659. *
  660. * @ring: ring to start the fence driver on
  661. * @irq_src: interrupt source to use for this ring
  662. * @irq_type: interrupt type to use for this ring
  663. *
  664. * Make the fence driver ready for processing (all asics).
  665. * Not all asics have all rings, so each asic will only
  666. * start the fence driver on the rings it has.
  667. * Returns 0 for success, errors for failure.
  668. */
  669. int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
  670. struct amdgpu_irq_src *irq_src,
  671. unsigned irq_type)
  672. {
  673. struct amdgpu_device *adev = ring->adev;
  674. uint64_t index;
  675. if (ring != &adev->uvd.ring) {
  676. ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
  677. ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
  678. } else {
  679. /* put fence directly behind firmware */
  680. index = ALIGN(adev->uvd.fw->size, 8);
  681. ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
  682. ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
  683. }
  684. amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
  685. amdgpu_irq_get(adev, irq_src, irq_type);
  686. ring->fence_drv.irq_src = irq_src;
  687. ring->fence_drv.irq_type = irq_type;
  688. ring->fence_drv.initialized = true;
  689. dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
  690. "cpu addr 0x%p\n", ring->idx,
  691. ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
  692. return 0;
  693. }
  694. /**
  695. * amdgpu_fence_driver_init_ring - init the fence driver
  696. * for the requested ring.
  697. *
  698. * @ring: ring to init the fence driver on
  699. *
  700. * Init the fence driver for the requested ring (all asics).
  701. * Helper function for amdgpu_fence_driver_init().
  702. */
  703. void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
  704. {
  705. int i;
  706. ring->fence_drv.cpu_addr = NULL;
  707. ring->fence_drv.gpu_addr = 0;
  708. for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  709. ring->fence_drv.sync_seq[i] = 0;
  710. atomic64_set(&ring->fence_drv.last_seq, 0);
  711. ring->fence_drv.initialized = false;
  712. INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
  713. amdgpu_fence_check_lockup);
  714. ring->fence_drv.ring = ring;
  715. if (amdgpu_enable_scheduler) {
  716. ring->scheduler = amd_sched_create((void *)ring->adev,
  717. &amdgpu_sched_ops,
  718. ring->idx, 5, 0,
  719. amdgpu_sched_hw_submission);
  720. if (!ring->scheduler)
  721. DRM_ERROR("Failed to create scheduler on ring %d.\n",
  722. ring->idx);
  723. }
  724. }
  725. /**
  726. * amdgpu_fence_driver_init - init the fence driver
  727. * for all possible rings.
  728. *
  729. * @adev: amdgpu device pointer
  730. *
  731. * Init the fence driver for all possible rings (all asics).
  732. * Not all asics have all rings, so each asic will only
  733. * start the fence driver on the rings it has using
  734. * amdgpu_fence_driver_start_ring().
  735. * Returns 0 for success.
  736. */
  737. int amdgpu_fence_driver_init(struct amdgpu_device *adev)
  738. {
  739. if (amdgpu_debugfs_fence_init(adev))
  740. dev_err(adev->dev, "fence debugfs file creation failed\n");
  741. return 0;
  742. }
  743. /**
  744. * amdgpu_fence_driver_fini - tear down the fence driver
  745. * for all possible rings.
  746. *
  747. * @adev: amdgpu device pointer
  748. *
  749. * Tear down the fence driver for all possible rings (all asics).
  750. */
  751. void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
  752. {
  753. int i, r;
  754. mutex_lock(&adev->ring_lock);
  755. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  756. struct amdgpu_ring *ring = adev->rings[i];
  757. if (!ring || !ring->fence_drv.initialized)
  758. continue;
  759. r = amdgpu_fence_wait_empty(ring);
  760. if (r) {
  761. /* no need to trigger GPU reset as we are unloading */
  762. amdgpu_fence_driver_force_completion(adev);
  763. }
  764. wake_up_all(&ring->fence_drv.fence_queue);
  765. amdgpu_irq_put(adev, ring->fence_drv.irq_src,
  766. ring->fence_drv.irq_type);
  767. if (ring->scheduler)
  768. amd_sched_destroy(ring->scheduler);
  769. ring->fence_drv.initialized = false;
  770. }
  771. mutex_unlock(&adev->ring_lock);
  772. }
  773. /**
  774. * amdgpu_fence_driver_suspend - suspend the fence driver
  775. * for all possible rings.
  776. *
  777. * @adev: amdgpu device pointer
  778. *
  779. * Suspend the fence driver for all possible rings (all asics).
  780. */
  781. void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
  782. {
  783. int i, r;
  784. mutex_lock(&adev->ring_lock);
  785. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  786. struct amdgpu_ring *ring = adev->rings[i];
  787. if (!ring || !ring->fence_drv.initialized)
  788. continue;
  789. /* wait for gpu to finish processing current batch */
  790. r = amdgpu_fence_wait_empty(ring);
  791. if (r) {
  792. /* delay GPU reset to resume */
  793. amdgpu_fence_driver_force_completion(adev);
  794. }
  795. /* disable the interrupt */
  796. amdgpu_irq_put(adev, ring->fence_drv.irq_src,
  797. ring->fence_drv.irq_type);
  798. }
  799. mutex_unlock(&adev->ring_lock);
  800. }
  801. /**
  802. * amdgpu_fence_driver_resume - resume the fence driver
  803. * for all possible rings.
  804. *
  805. * @adev: amdgpu device pointer
  806. *
  807. * Resume the fence driver for all possible rings (all asics).
  808. * Not all asics have all rings, so each asic will only
  809. * start the fence driver on the rings it has using
  810. * amdgpu_fence_driver_start_ring().
  811. * Returns 0 for success.
  812. */
  813. void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
  814. {
  815. int i;
  816. mutex_lock(&adev->ring_lock);
  817. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  818. struct amdgpu_ring *ring = adev->rings[i];
  819. if (!ring || !ring->fence_drv.initialized)
  820. continue;
  821. /* enable the interrupt */
  822. amdgpu_irq_get(adev, ring->fence_drv.irq_src,
  823. ring->fence_drv.irq_type);
  824. }
  825. mutex_unlock(&adev->ring_lock);
  826. }
  827. /**
  828. * amdgpu_fence_driver_force_completion - force all fence waiter to complete
  829. *
  830. * @adev: amdgpu device pointer
  831. *
  832. * In case of GPU reset failure make sure no process keep waiting on fence
  833. * that will never complete.
  834. */
  835. void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
  836. {
  837. int i;
  838. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  839. struct amdgpu_ring *ring = adev->rings[i];
  840. if (!ring || !ring->fence_drv.initialized)
  841. continue;
  842. amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
  843. }
  844. }
  845. /*
  846. * Fence debugfs
  847. */
  848. #if defined(CONFIG_DEBUG_FS)
  849. static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
  850. {
  851. struct drm_info_node *node = (struct drm_info_node *)m->private;
  852. struct drm_device *dev = node->minor->dev;
  853. struct amdgpu_device *adev = dev->dev_private;
  854. int i, j;
  855. for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  856. struct amdgpu_ring *ring = adev->rings[i];
  857. if (!ring || !ring->fence_drv.initialized)
  858. continue;
  859. amdgpu_fence_process(ring);
  860. seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
  861. seq_printf(m, "Last signaled fence 0x%016llx\n",
  862. (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
  863. seq_printf(m, "Last emitted 0x%016llx\n",
  864. ring->fence_drv.sync_seq[i]);
  865. for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
  866. struct amdgpu_ring *other = adev->rings[j];
  867. if (i != j && other && other->fence_drv.initialized &&
  868. ring->fence_drv.sync_seq[j])
  869. seq_printf(m, "Last sync to ring %d 0x%016llx\n",
  870. j, ring->fence_drv.sync_seq[j]);
  871. }
  872. }
  873. return 0;
  874. }
  875. static struct drm_info_list amdgpu_debugfs_fence_list[] = {
  876. {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
  877. };
  878. #endif
  879. int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
  880. {
  881. #if defined(CONFIG_DEBUG_FS)
  882. return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
  883. #else
  884. return 0;
  885. #endif
  886. }
  887. static const char *amdgpu_fence_get_driver_name(struct fence *fence)
  888. {
  889. return "amdgpu";
  890. }
  891. static const char *amdgpu_fence_get_timeline_name(struct fence *f)
  892. {
  893. struct amdgpu_fence *fence = to_amdgpu_fence(f);
  894. return (const char *)fence->ring->name;
  895. }
  896. static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
  897. {
  898. return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
  899. }
  900. static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
  901. {
  902. int idx;
  903. struct amdgpu_fence *fence;
  904. idx = 0;
  905. for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
  906. fence = fences[idx];
  907. if (fence) {
  908. if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
  909. return true;
  910. }
  911. }
  912. return false;
  913. }
  914. struct amdgpu_wait_cb {
  915. struct fence_cb base;
  916. struct task_struct *task;
  917. };
  918. static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
  919. {
  920. struct amdgpu_wait_cb *wait =
  921. container_of(cb, struct amdgpu_wait_cb, base);
  922. wake_up_process(wait->task);
  923. }
  924. static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
  925. signed long t)
  926. {
  927. struct amdgpu_fence *array[AMDGPU_MAX_RINGS];
  928. struct amdgpu_fence *fence = to_amdgpu_fence(f);
  929. struct amdgpu_device *adev = fence->ring->adev;
  930. memset(&array[0], 0, sizeof(array));
  931. array[0] = fence;
  932. return amdgpu_fence_wait_any(adev, array, intr, t);
  933. }
  934. /* wait until any fence in array signaled */
  935. signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
  936. struct amdgpu_fence **array, bool intr, signed long t)
  937. {
  938. long idx = 0;
  939. struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
  940. struct amdgpu_fence *fence;
  941. BUG_ON(!array);
  942. for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
  943. fence = array[idx];
  944. if (fence) {
  945. cb[idx].task = current;
  946. if (fence_add_callback(&fence->base,
  947. &cb[idx].base, amdgpu_fence_wait_cb))
  948. return t; /* return if fence is already signaled */
  949. }
  950. }
  951. while (t > 0) {
  952. if (intr)
  953. set_current_state(TASK_INTERRUPTIBLE);
  954. else
  955. set_current_state(TASK_UNINTERRUPTIBLE);
  956. /*
  957. * amdgpu_test_signaled_any must be called after
  958. * set_current_state to prevent a race with wake_up_process
  959. */
  960. if (amdgpu_test_signaled_any(array))
  961. break;
  962. if (adev->needs_reset) {
  963. t = -EDEADLK;
  964. break;
  965. }
  966. t = schedule_timeout(t);
  967. if (t > 0 && intr && signal_pending(current))
  968. t = -ERESTARTSYS;
  969. }
  970. __set_current_state(TASK_RUNNING);
  971. idx = 0;
  972. for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
  973. fence = array[idx];
  974. if (fence)
  975. fence_remove_callback(&fence->base, &cb[idx].base);
  976. }
  977. return t;
  978. }
  979. const struct fence_ops amdgpu_fence_ops = {
  980. .get_driver_name = amdgpu_fence_get_driver_name,
  981. .get_timeline_name = amdgpu_fence_get_timeline_name,
  982. .enable_signaling = amdgpu_fence_enable_signaling,
  983. .signaled = amdgpu_fence_is_signaled,
  984. .wait = amdgpu_fence_default_wait,
  985. .release = NULL,
  986. };