i915_gem_request.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include "i915_drv.h"
  25. static const char *i915_fence_get_driver_name(struct fence *fence)
  26. {
  27. return "i915";
  28. }
  29. static const char *i915_fence_get_timeline_name(struct fence *fence)
  30. {
  31. /* Timelines are bound by eviction to a VM. However, since
  32. * we only have a global seqno at the moment, we only have
  33. * a single timeline. Note that each timeline will have
  34. * multiple execution contexts (fence contexts) as we allow
  35. * engines within a single timeline to execute in parallel.
  36. */
  37. return "global";
  38. }
  39. static bool i915_fence_signaled(struct fence *fence)
  40. {
  41. return i915_gem_request_completed(to_request(fence));
  42. }
  43. static bool i915_fence_enable_signaling(struct fence *fence)
  44. {
  45. if (i915_fence_signaled(fence))
  46. return false;
  47. intel_engine_enable_signaling(to_request(fence));
  48. return true;
  49. }
  50. static signed long i915_fence_wait(struct fence *fence,
  51. bool interruptible,
  52. signed long timeout_jiffies)
  53. {
  54. s64 timeout_ns, *timeout;
  55. int ret;
  56. if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
  57. timeout_ns = jiffies_to_nsecs(timeout_jiffies);
  58. timeout = &timeout_ns;
  59. } else {
  60. timeout = NULL;
  61. }
  62. ret = __i915_wait_request(to_request(fence),
  63. interruptible, timeout,
  64. NO_WAITBOOST);
  65. if (ret == -ETIME)
  66. return 0;
  67. if (ret < 0)
  68. return ret;
  69. if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
  70. timeout_jiffies = nsecs_to_jiffies(timeout_ns);
  71. return timeout_jiffies;
  72. }
  73. static void i915_fence_value_str(struct fence *fence, char *str, int size)
  74. {
  75. snprintf(str, size, "%u", fence->seqno);
  76. }
  77. static void i915_fence_timeline_value_str(struct fence *fence, char *str,
  78. int size)
  79. {
  80. snprintf(str, size, "%u",
  81. intel_engine_get_seqno(to_request(fence)->engine));
  82. }
  83. static void i915_fence_release(struct fence *fence)
  84. {
  85. struct drm_i915_gem_request *req = to_request(fence);
  86. kmem_cache_free(req->i915->requests, req);
  87. }
  88. const struct fence_ops i915_fence_ops = {
  89. .get_driver_name = i915_fence_get_driver_name,
  90. .get_timeline_name = i915_fence_get_timeline_name,
  91. .enable_signaling = i915_fence_enable_signaling,
  92. .signaled = i915_fence_signaled,
  93. .wait = i915_fence_wait,
  94. .release = i915_fence_release,
  95. .fence_value_str = i915_fence_value_str,
  96. .timeline_value_str = i915_fence_timeline_value_str,
  97. };
  98. int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
  99. struct drm_file *file)
  100. {
  101. struct drm_i915_private *dev_private;
  102. struct drm_i915_file_private *file_priv;
  103. WARN_ON(!req || !file || req->file_priv);
  104. if (!req || !file)
  105. return -EINVAL;
  106. if (req->file_priv)
  107. return -EINVAL;
  108. dev_private = req->i915;
  109. file_priv = file->driver_priv;
  110. spin_lock(&file_priv->mm.lock);
  111. req->file_priv = file_priv;
  112. list_add_tail(&req->client_list, &file_priv->mm.request_list);
  113. spin_unlock(&file_priv->mm.lock);
  114. req->pid = get_pid(task_pid(current));
  115. return 0;
  116. }
  117. static inline void
  118. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  119. {
  120. struct drm_i915_file_private *file_priv = request->file_priv;
  121. if (!file_priv)
  122. return;
  123. spin_lock(&file_priv->mm.lock);
  124. list_del(&request->client_list);
  125. request->file_priv = NULL;
  126. spin_unlock(&file_priv->mm.lock);
  127. put_pid(request->pid);
  128. request->pid = NULL;
  129. }
  130. static void i915_gem_request_retire(struct drm_i915_gem_request *request)
  131. {
  132. trace_i915_gem_request_retire(request);
  133. list_del_init(&request->list);
  134. /* We know the GPU must have read the request to have
  135. * sent us the seqno + interrupt, so use the position
  136. * of tail of the request to update the last known position
  137. * of the GPU head.
  138. *
  139. * Note this requires that we are always called in request
  140. * completion order.
  141. */
  142. request->ring->last_retired_head = request->postfix;
  143. i915_gem_request_remove_from_client(request);
  144. if (request->previous_context) {
  145. if (i915.enable_execlists)
  146. intel_lr_context_unpin(request->previous_context,
  147. request->engine);
  148. }
  149. i915_gem_context_put(request->ctx);
  150. i915_gem_request_put(request);
  151. }
  152. void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
  153. {
  154. struct intel_engine_cs *engine = req->engine;
  155. struct drm_i915_gem_request *tmp;
  156. lockdep_assert_held(&req->i915->drm.struct_mutex);
  157. if (list_empty(&req->list))
  158. return;
  159. do {
  160. tmp = list_first_entry(&engine->request_list,
  161. typeof(*tmp), list);
  162. i915_gem_request_retire(tmp);
  163. } while (tmp != req);
  164. WARN_ON(i915_verify_lists(engine->dev));
  165. }
  166. static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
  167. {
  168. if (__i915_terminally_wedged(reset_counter))
  169. return -EIO;
  170. if (__i915_reset_in_progress(reset_counter)) {
  171. /* Non-interruptible callers can't handle -EAGAIN, hence return
  172. * -EIO unconditionally for these.
  173. */
  174. if (!interruptible)
  175. return -EIO;
  176. return -EAGAIN;
  177. }
  178. return 0;
  179. }
  180. static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
  181. {
  182. struct intel_engine_cs *engine;
  183. int ret;
  184. /* Carefully retire all requests without writing to the rings */
  185. for_each_engine(engine, dev_priv) {
  186. ret = intel_engine_idle(engine);
  187. if (ret)
  188. return ret;
  189. }
  190. i915_gem_retire_requests(dev_priv);
  191. /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
  192. if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
  193. while (intel_kick_waiters(dev_priv) ||
  194. intel_kick_signalers(dev_priv))
  195. yield();
  196. }
  197. /* Finally reset hw state */
  198. for_each_engine(engine, dev_priv)
  199. intel_engine_init_seqno(engine, seqno);
  200. return 0;
  201. }
  202. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  203. {
  204. struct drm_i915_private *dev_priv = to_i915(dev);
  205. int ret;
  206. if (seqno == 0)
  207. return -EINVAL;
  208. /* HWS page needs to be set less than what we
  209. * will inject to ring
  210. */
  211. ret = i915_gem_init_seqno(dev_priv, seqno - 1);
  212. if (ret)
  213. return ret;
  214. /* Carefully set the last_seqno value so that wrap
  215. * detection still works
  216. */
  217. dev_priv->next_seqno = seqno;
  218. dev_priv->last_seqno = seqno - 1;
  219. if (dev_priv->last_seqno == 0)
  220. dev_priv->last_seqno--;
  221. return 0;
  222. }
  223. static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
  224. {
  225. /* reserve 0 for non-seqno */
  226. if (unlikely(dev_priv->next_seqno == 0)) {
  227. int ret;
  228. ret = i915_gem_init_seqno(dev_priv, 0);
  229. if (ret)
  230. return ret;
  231. dev_priv->next_seqno = 1;
  232. }
  233. *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
  234. return 0;
  235. }
  236. /**
  237. * i915_gem_request_alloc - allocate a request structure
  238. *
  239. * @engine: engine that we wish to issue the request on.
  240. * @ctx: context that the request will be associated with.
  241. * This can be NULL if the request is not directly related to
  242. * any specific user context, in which case this function will
  243. * choose an appropriate context to use.
  244. *
  245. * Returns a pointer to the allocated request if successful,
  246. * or an error code if not.
  247. */
  248. struct drm_i915_gem_request *
  249. i915_gem_request_alloc(struct intel_engine_cs *engine,
  250. struct i915_gem_context *ctx)
  251. {
  252. struct drm_i915_private *dev_priv = engine->i915;
  253. unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
  254. struct drm_i915_gem_request *req;
  255. u32 seqno;
  256. int ret;
  257. /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
  258. * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
  259. * and restart.
  260. */
  261. ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
  262. if (ret)
  263. return ERR_PTR(ret);
  264. /* Move the oldest request to the slab-cache (if not in use!) */
  265. req = list_first_entry_or_null(&engine->request_list,
  266. typeof(*req), list);
  267. if (req && i915_gem_request_completed(req))
  268. i915_gem_request_retire(req);
  269. req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
  270. if (!req)
  271. return ERR_PTR(-ENOMEM);
  272. ret = i915_gem_get_seqno(dev_priv, &seqno);
  273. if (ret)
  274. goto err;
  275. spin_lock_init(&req->lock);
  276. fence_init(&req->fence,
  277. &i915_fence_ops,
  278. &req->lock,
  279. engine->fence_context,
  280. seqno);
  281. req->i915 = dev_priv;
  282. req->engine = engine;
  283. req->ctx = i915_gem_context_get(ctx);
  284. /*
  285. * Reserve space in the ring buffer for all the commands required to
  286. * eventually emit this request. This is to guarantee that the
  287. * i915_add_request() call can't fail. Note that the reserve may need
  288. * to be redone if the request is not actually submitted straight
  289. * away, e.g. because a GPU scheduler has deferred it.
  290. */
  291. req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
  292. if (i915.enable_execlists)
  293. ret = intel_logical_ring_alloc_request_extras(req);
  294. else
  295. ret = intel_ring_alloc_request_extras(req);
  296. if (ret)
  297. goto err_ctx;
  298. return req;
  299. err_ctx:
  300. i915_gem_context_put(ctx);
  301. err:
  302. kmem_cache_free(dev_priv->requests, req);
  303. return ERR_PTR(ret);
  304. }
  305. static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
  306. {
  307. struct drm_i915_private *dev_priv = engine->i915;
  308. dev_priv->gt.active_engines |= intel_engine_flag(engine);
  309. if (dev_priv->gt.awake)
  310. return;
  311. intel_runtime_pm_get_noresume(dev_priv);
  312. dev_priv->gt.awake = true;
  313. intel_enable_gt_powersave(dev_priv);
  314. i915_update_gfx_val(dev_priv);
  315. if (INTEL_GEN(dev_priv) >= 6)
  316. gen6_rps_busy(dev_priv);
  317. queue_delayed_work(dev_priv->wq,
  318. &dev_priv->gt.retire_work,
  319. round_jiffies_up_relative(HZ));
  320. }
  321. /*
  322. * NB: This function is not allowed to fail. Doing so would mean the the
  323. * request is not being tracked for completion but the work itself is
  324. * going to happen on the hardware. This would be a Bad Thing(tm).
  325. */
  326. void __i915_add_request(struct drm_i915_gem_request *request,
  327. struct drm_i915_gem_object *obj,
  328. bool flush_caches)
  329. {
  330. struct intel_engine_cs *engine;
  331. struct intel_ring *ring;
  332. u32 request_start;
  333. u32 reserved_tail;
  334. int ret;
  335. if (WARN_ON(!request))
  336. return;
  337. engine = request->engine;
  338. ring = request->ring;
  339. /*
  340. * To ensure that this call will not fail, space for its emissions
  341. * should already have been reserved in the ring buffer. Let the ring
  342. * know that it is time to use that space up.
  343. */
  344. request_start = ring->tail;
  345. reserved_tail = request->reserved_space;
  346. request->reserved_space = 0;
  347. /*
  348. * Emit any outstanding flushes - execbuf can fail to emit the flush
  349. * after having emitted the batchbuffer command. Hence we need to fix
  350. * things up similar to emitting the lazy request. The difference here
  351. * is that the flush _must_ happen before the next request, no matter
  352. * what.
  353. */
  354. if (flush_caches) {
  355. ret = engine->emit_flush(request, EMIT_FLUSH);
  356. /* Not allowed to fail! */
  357. WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
  358. }
  359. trace_i915_gem_request_add(request);
  360. request->head = request_start;
  361. /* Whilst this request exists, batch_obj will be on the
  362. * active_list, and so will hold the active reference. Only when this
  363. * request is retired will the the batch_obj be moved onto the
  364. * inactive_list and lose its active reference. Hence we do not need
  365. * to explicitly hold another reference here.
  366. */
  367. request->batch_obj = obj;
  368. /* Seal the request and mark it as pending execution. Note that
  369. * we may inspect this state, without holding any locks, during
  370. * hangcheck. Hence we apply the barrier to ensure that we do not
  371. * see a more recent value in the hws than we are tracking.
  372. */
  373. request->emitted_jiffies = jiffies;
  374. request->previous_seqno = engine->last_submitted_seqno;
  375. smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
  376. list_add_tail(&request->list, &engine->request_list);
  377. /* Record the position of the start of the request so that
  378. * should we detect the updated seqno part-way through the
  379. * GPU processing the request, we never over-estimate the
  380. * position of the head.
  381. */
  382. request->postfix = ring->tail;
  383. if (i915.enable_execlists) {
  384. ret = engine->emit_request(request);
  385. } else {
  386. ret = engine->add_request(request);
  387. request->tail = ring->tail;
  388. }
  389. /* Not allowed to fail! */
  390. WARN(ret, "emit|add_request failed: %d!\n", ret);
  391. /* Sanity check that the reserved size was large enough. */
  392. ret = ring->tail - request_start;
  393. if (ret < 0)
  394. ret += ring->size;
  395. WARN_ONCE(ret > reserved_tail,
  396. "Not enough space reserved (%d bytes) "
  397. "for adding the request (%d bytes)\n",
  398. reserved_tail, ret);
  399. i915_gem_mark_busy(engine);
  400. }
  401. static unsigned long local_clock_us(unsigned int *cpu)
  402. {
  403. unsigned long t;
  404. /* Cheaply and approximately convert from nanoseconds to microseconds.
  405. * The result and subsequent calculations are also defined in the same
  406. * approximate microseconds units. The principal source of timing
  407. * error here is from the simple truncation.
  408. *
  409. * Note that local_clock() is only defined wrt to the current CPU;
  410. * the comparisons are no longer valid if we switch CPUs. Instead of
  411. * blocking preemption for the entire busywait, we can detect the CPU
  412. * switch and use that as indicator of system load and a reason to
  413. * stop busywaiting, see busywait_stop().
  414. */
  415. *cpu = get_cpu();
  416. t = local_clock() >> 10;
  417. put_cpu();
  418. return t;
  419. }
  420. static bool busywait_stop(unsigned long timeout, unsigned int cpu)
  421. {
  422. unsigned int this_cpu;
  423. if (time_after(local_clock_us(&this_cpu), timeout))
  424. return true;
  425. return this_cpu != cpu;
  426. }
  427. bool __i915_spin_request(const struct drm_i915_gem_request *req,
  428. int state, unsigned long timeout_us)
  429. {
  430. unsigned int cpu;
  431. /* When waiting for high frequency requests, e.g. during synchronous
  432. * rendering split between the CPU and GPU, the finite amount of time
  433. * required to set up the irq and wait upon it limits the response
  434. * rate. By busywaiting on the request completion for a short while we
  435. * can service the high frequency waits as quick as possible. However,
  436. * if it is a slow request, we want to sleep as quickly as possible.
  437. * The tradeoff between waiting and sleeping is roughly the time it
  438. * takes to sleep on a request, on the order of a microsecond.
  439. */
  440. timeout_us += local_clock_us(&cpu);
  441. do {
  442. if (i915_gem_request_completed(req))
  443. return true;
  444. if (signal_pending_state(state, current))
  445. break;
  446. if (busywait_stop(timeout_us, cpu))
  447. break;
  448. cpu_relax_lowlatency();
  449. } while (!need_resched());
  450. return false;
  451. }
  452. /**
  453. * __i915_wait_request - wait until execution of request has finished
  454. * @req: duh!
  455. * @interruptible: do an interruptible wait (normally yes)
  456. * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  457. * @rps: client to charge for RPS boosting
  458. *
  459. * Note: It is of utmost importance that the passed in seqno and reset_counter
  460. * values have been read by the caller in an smp safe manner. Where read-side
  461. * locks are involved, it is sufficient to read the reset_counter before
  462. * unlocking the lock that protects the seqno. For lockless tricks, the
  463. * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  464. * inserted.
  465. *
  466. * Returns 0 if the request was found within the alloted time. Else returns the
  467. * errno with remaining time filled in timeout argument.
  468. */
  469. int __i915_wait_request(struct drm_i915_gem_request *req,
  470. bool interruptible,
  471. s64 *timeout,
  472. struct intel_rps_client *rps)
  473. {
  474. int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
  475. DEFINE_WAIT(reset);
  476. struct intel_wait wait;
  477. unsigned long timeout_remain;
  478. int ret = 0;
  479. might_sleep();
  480. if (list_empty(&req->list))
  481. return 0;
  482. if (i915_gem_request_completed(req))
  483. return 0;
  484. timeout_remain = MAX_SCHEDULE_TIMEOUT;
  485. if (timeout) {
  486. if (WARN_ON(*timeout < 0))
  487. return -EINVAL;
  488. if (*timeout == 0)
  489. return -ETIME;
  490. /* Record current time in case interrupted, or wedged */
  491. timeout_remain = nsecs_to_jiffies_timeout(*timeout);
  492. *timeout += ktime_get_raw_ns();
  493. }
  494. trace_i915_gem_request_wait_begin(req);
  495. /* This client is about to stall waiting for the GPU. In many cases
  496. * this is undesirable and limits the throughput of the system, as
  497. * many clients cannot continue processing user input/output whilst
  498. * blocked. RPS autotuning may take tens of milliseconds to respond
  499. * to the GPU load and thus incurs additional latency for the client.
  500. * We can circumvent that by promoting the GPU frequency to maximum
  501. * before we wait. This makes the GPU throttle up much more quickly
  502. * (good for benchmarks and user experience, e.g. window animations),
  503. * but at a cost of spending more power processing the workload
  504. * (bad for battery). Not all clients even want their results
  505. * immediately and for them we should just let the GPU select its own
  506. * frequency to maximise efficiency. To prevent a single client from
  507. * forcing the clocks too high for the whole system, we only allow
  508. * each client to waitboost once in a busy period.
  509. */
  510. if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
  511. gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
  512. /* Optimistic spin for the next ~jiffie before touching IRQs */
  513. if (i915_spin_request(req, state, 5))
  514. goto complete;
  515. set_current_state(state);
  516. add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
  517. intel_wait_init(&wait, req->fence.seqno);
  518. if (intel_engine_add_wait(req->engine, &wait))
  519. /* In order to check that we haven't missed the interrupt
  520. * as we enabled it, we need to kick ourselves to do a
  521. * coherent check on the seqno before we sleep.
  522. */
  523. goto wakeup;
  524. for (;;) {
  525. if (signal_pending_state(state, current)) {
  526. ret = -ERESTARTSYS;
  527. break;
  528. }
  529. timeout_remain = io_schedule_timeout(timeout_remain);
  530. if (timeout_remain == 0) {
  531. ret = -ETIME;
  532. break;
  533. }
  534. if (intel_wait_complete(&wait))
  535. break;
  536. set_current_state(state);
  537. wakeup:
  538. /* Carefully check if the request is complete, giving time
  539. * for the seqno to be visible following the interrupt.
  540. * We also have to check in case we are kicked by the GPU
  541. * reset in order to drop the struct_mutex.
  542. */
  543. if (__i915_request_irq_complete(req))
  544. break;
  545. /* Only spin if we know the GPU is processing this request */
  546. if (i915_spin_request(req, state, 2))
  547. break;
  548. }
  549. remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
  550. intel_engine_remove_wait(req->engine, &wait);
  551. __set_current_state(TASK_RUNNING);
  552. complete:
  553. trace_i915_gem_request_wait_end(req);
  554. if (timeout) {
  555. *timeout -= ktime_get_raw_ns();
  556. if (*timeout < 0)
  557. *timeout = 0;
  558. /*
  559. * Apparently ktime isn't accurate enough and occasionally has a
  560. * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
  561. * things up to make the test happy. We allow up to 1 jiffy.
  562. *
  563. * This is a regrssion from the timespec->ktime conversion.
  564. */
  565. if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
  566. *timeout = 0;
  567. }
  568. if (IS_RPS_USER(rps) &&
  569. req->fence.seqno == req->engine->last_submitted_seqno) {
  570. /* The GPU is now idle and this client has stalled.
  571. * Since no other client has submitted a request in the
  572. * meantime, assume that this client is the only one
  573. * supplying work to the GPU but is unable to keep that
  574. * work supplied because it is waiting. Since the GPU is
  575. * then never kept fully busy, RPS autoclocking will
  576. * keep the clocks relatively low, causing further delays.
  577. * Compensate by giving the synchronous client credit for
  578. * a waitboost next time.
  579. */
  580. spin_lock(&req->i915->rps.client_lock);
  581. list_del_init(&rps->link);
  582. spin_unlock(&req->i915->rps.client_lock);
  583. }
  584. return ret;
  585. }
  586. /**
  587. * Waits for a request to be signaled, and cleans up the
  588. * request and object lists appropriately for that event.
  589. */
  590. int i915_wait_request(struct drm_i915_gem_request *req)
  591. {
  592. int ret;
  593. GEM_BUG_ON(!req);
  594. lockdep_assert_held(&req->i915->drm.struct_mutex);
  595. ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL);
  596. if (ret)
  597. return ret;
  598. /* If the GPU hung, we want to keep the requests to find the guilty. */
  599. if (!i915_reset_in_progress(&req->i915->gpu_error))
  600. i915_gem_request_retire_upto(req);
  601. return 0;
  602. }