i915_gem_request.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright © 2008-2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. */
  24. #include <linux/prefetch.h>
  25. #include "i915_drv.h"
  26. static const char *i915_fence_get_driver_name(struct fence *fence)
  27. {
  28. return "i915";
  29. }
  30. static const char *i915_fence_get_timeline_name(struct fence *fence)
  31. {
  32. /* Timelines are bound by eviction to a VM. However, since
  33. * we only have a global seqno at the moment, we only have
  34. * a single timeline. Note that each timeline will have
  35. * multiple execution contexts (fence contexts) as we allow
  36. * engines within a single timeline to execute in parallel.
  37. */
  38. return "global";
  39. }
  40. static bool i915_fence_signaled(struct fence *fence)
  41. {
  42. return i915_gem_request_completed(to_request(fence));
  43. }
  44. static bool i915_fence_enable_signaling(struct fence *fence)
  45. {
  46. if (i915_fence_signaled(fence))
  47. return false;
  48. intel_engine_enable_signaling(to_request(fence));
  49. return true;
  50. }
  51. static signed long i915_fence_wait(struct fence *fence,
  52. bool interruptible,
  53. signed long timeout_jiffies)
  54. {
  55. s64 timeout_ns, *timeout;
  56. int ret;
  57. if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
  58. timeout_ns = jiffies_to_nsecs(timeout_jiffies);
  59. timeout = &timeout_ns;
  60. } else {
  61. timeout = NULL;
  62. }
  63. ret = i915_wait_request(to_request(fence),
  64. interruptible, timeout,
  65. NO_WAITBOOST);
  66. if (ret == -ETIME)
  67. return 0;
  68. if (ret < 0)
  69. return ret;
  70. if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
  71. timeout_jiffies = nsecs_to_jiffies(timeout_ns);
  72. return timeout_jiffies;
  73. }
  74. static void i915_fence_value_str(struct fence *fence, char *str, int size)
  75. {
  76. snprintf(str, size, "%u", fence->seqno);
  77. }
  78. static void i915_fence_timeline_value_str(struct fence *fence, char *str,
  79. int size)
  80. {
  81. snprintf(str, size, "%u",
  82. intel_engine_get_seqno(to_request(fence)->engine));
  83. }
  84. static void i915_fence_release(struct fence *fence)
  85. {
  86. struct drm_i915_gem_request *req = to_request(fence);
  87. kmem_cache_free(req->i915->requests, req);
  88. }
  89. const struct fence_ops i915_fence_ops = {
  90. .get_driver_name = i915_fence_get_driver_name,
  91. .get_timeline_name = i915_fence_get_timeline_name,
  92. .enable_signaling = i915_fence_enable_signaling,
  93. .signaled = i915_fence_signaled,
  94. .wait = i915_fence_wait,
  95. .release = i915_fence_release,
  96. .fence_value_str = i915_fence_value_str,
  97. .timeline_value_str = i915_fence_timeline_value_str,
  98. };
  99. int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
  100. struct drm_file *file)
  101. {
  102. struct drm_i915_private *dev_private;
  103. struct drm_i915_file_private *file_priv;
  104. WARN_ON(!req || !file || req->file_priv);
  105. if (!req || !file)
  106. return -EINVAL;
  107. if (req->file_priv)
  108. return -EINVAL;
  109. dev_private = req->i915;
  110. file_priv = file->driver_priv;
  111. spin_lock(&file_priv->mm.lock);
  112. req->file_priv = file_priv;
  113. list_add_tail(&req->client_list, &file_priv->mm.request_list);
  114. spin_unlock(&file_priv->mm.lock);
  115. req->pid = get_pid(task_pid(current));
  116. return 0;
  117. }
  118. static inline void
  119. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  120. {
  121. struct drm_i915_file_private *file_priv = request->file_priv;
  122. if (!file_priv)
  123. return;
  124. spin_lock(&file_priv->mm.lock);
  125. list_del(&request->client_list);
  126. request->file_priv = NULL;
  127. spin_unlock(&file_priv->mm.lock);
  128. put_pid(request->pid);
  129. request->pid = NULL;
  130. }
  131. void i915_gem_retire_noop(struct i915_gem_active *active,
  132. struct drm_i915_gem_request *request)
  133. {
  134. /* Space left intentionally blank */
  135. }
  136. static void i915_gem_request_retire(struct drm_i915_gem_request *request)
  137. {
  138. struct i915_gem_active *active, *next;
  139. trace_i915_gem_request_retire(request);
  140. list_del(&request->link);
  141. /* We know the GPU must have read the request to have
  142. * sent us the seqno + interrupt, so use the position
  143. * of tail of the request to update the last known position
  144. * of the GPU head.
  145. *
  146. * Note this requires that we are always called in request
  147. * completion order.
  148. */
  149. list_del(&request->ring_link);
  150. request->ring->last_retired_head = request->postfix;
  151. /* Walk through the active list, calling retire on each. This allows
  152. * objects to track their GPU activity and mark themselves as idle
  153. * when their *last* active request is completed (updating state
  154. * tracking lists for eviction, active references for GEM, etc).
  155. *
  156. * As the ->retire() may free the node, we decouple it first and
  157. * pass along the auxiliary information (to avoid dereferencing
  158. * the node after the callback).
  159. */
  160. list_for_each_entry_safe(active, next, &request->active_list, link) {
  161. /* In microbenchmarks or focusing upon time inside the kernel,
  162. * we may spend an inordinate amount of time simply handling
  163. * the retirement of requests and processing their callbacks.
  164. * Of which, this loop itself is particularly hot due to the
  165. * cache misses when jumping around the list of i915_gem_active.
  166. * So we try to keep this loop as streamlined as possible and
  167. * also prefetch the next i915_gem_active to try and hide
  168. * the likely cache miss.
  169. */
  170. prefetchw(next);
  171. INIT_LIST_HEAD(&active->link);
  172. RCU_INIT_POINTER(active->request, NULL);
  173. active->retire(active, request);
  174. }
  175. i915_gem_request_remove_from_client(request);
  176. if (request->previous_context) {
  177. if (i915.enable_execlists)
  178. intel_lr_context_unpin(request->previous_context,
  179. request->engine);
  180. }
  181. i915_gem_context_put(request->ctx);
  182. i915_gem_request_put(request);
  183. }
  184. void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
  185. {
  186. struct intel_engine_cs *engine = req->engine;
  187. struct drm_i915_gem_request *tmp;
  188. lockdep_assert_held(&req->i915->drm.struct_mutex);
  189. GEM_BUG_ON(list_empty(&req->link));
  190. do {
  191. tmp = list_first_entry(&engine->request_list,
  192. typeof(*tmp), link);
  193. i915_gem_request_retire(tmp);
  194. } while (tmp != req);
  195. }
  196. static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
  197. {
  198. if (__i915_terminally_wedged(reset_counter))
  199. return -EIO;
  200. if (__i915_reset_in_progress(reset_counter)) {
  201. /* Non-interruptible callers can't handle -EAGAIN, hence return
  202. * -EIO unconditionally for these.
  203. */
  204. if (!interruptible)
  205. return -EIO;
  206. return -EAGAIN;
  207. }
  208. return 0;
  209. }
  210. static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
  211. {
  212. struct intel_engine_cs *engine;
  213. int ret;
  214. /* Carefully retire all requests without writing to the rings */
  215. for_each_engine(engine, dev_priv) {
  216. ret = intel_engine_idle(engine, true);
  217. if (ret)
  218. return ret;
  219. }
  220. i915_gem_retire_requests(dev_priv);
  221. /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
  222. if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
  223. while (intel_kick_waiters(dev_priv) ||
  224. intel_kick_signalers(dev_priv))
  225. yield();
  226. }
  227. /* Finally reset hw state */
  228. for_each_engine(engine, dev_priv)
  229. intel_engine_init_seqno(engine, seqno);
  230. return 0;
  231. }
  232. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  233. {
  234. struct drm_i915_private *dev_priv = to_i915(dev);
  235. int ret;
  236. if (seqno == 0)
  237. return -EINVAL;
  238. /* HWS page needs to be set less than what we
  239. * will inject to ring
  240. */
  241. ret = i915_gem_init_seqno(dev_priv, seqno - 1);
  242. if (ret)
  243. return ret;
  244. dev_priv->next_seqno = seqno;
  245. return 0;
  246. }
  247. static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
  248. {
  249. /* reserve 0 for non-seqno */
  250. if (unlikely(dev_priv->next_seqno == 0)) {
  251. int ret;
  252. ret = i915_gem_init_seqno(dev_priv, 0);
  253. if (ret)
  254. return ret;
  255. dev_priv->next_seqno = 1;
  256. }
  257. *seqno = dev_priv->next_seqno++;
  258. return 0;
  259. }
  260. /**
  261. * i915_gem_request_alloc - allocate a request structure
  262. *
  263. * @engine: engine that we wish to issue the request on.
  264. * @ctx: context that the request will be associated with.
  265. * This can be NULL if the request is not directly related to
  266. * any specific user context, in which case this function will
  267. * choose an appropriate context to use.
  268. *
  269. * Returns a pointer to the allocated request if successful,
  270. * or an error code if not.
  271. */
  272. struct drm_i915_gem_request *
  273. i915_gem_request_alloc(struct intel_engine_cs *engine,
  274. struct i915_gem_context *ctx)
  275. {
  276. struct drm_i915_private *dev_priv = engine->i915;
  277. unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
  278. struct drm_i915_gem_request *req;
  279. u32 seqno;
  280. int ret;
  281. /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
  282. * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
  283. * and restart.
  284. */
  285. ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
  286. if (ret)
  287. return ERR_PTR(ret);
  288. /* Move the oldest request to the slab-cache (if not in use!) */
  289. req = list_first_entry_or_null(&engine->request_list,
  290. typeof(*req), link);
  291. if (req && i915_gem_request_completed(req))
  292. i915_gem_request_retire(req);
  293. /* Beware: Dragons be flying overhead.
  294. *
  295. * We use RCU to look up requests in flight. The lookups may
  296. * race with the request being allocated from the slab freelist.
  297. * That is the request we are writing to here, may be in the process
  298. * of being read by __i915_gem_active_get_rcu(). As such,
  299. * we have to be very careful when overwriting the contents. During
  300. * the RCU lookup, we change chase the request->engine pointer,
  301. * read the request->fence.seqno and increment the reference count.
  302. *
  303. * The reference count is incremented atomically. If it is zero,
  304. * the lookup knows the request is unallocated and complete. Otherwise,
  305. * it is either still in use, or has been reallocated and reset
  306. * with fence_init(). This increment is safe for release as we check
  307. * that the request we have a reference to and matches the active
  308. * request.
  309. *
  310. * Before we increment the refcount, we chase the request->engine
  311. * pointer. We must not call kmem_cache_zalloc() or else we set
  312. * that pointer to NULL and cause a crash during the lookup. If
  313. * we see the request is completed (based on the value of the
  314. * old engine and seqno), the lookup is complete and reports NULL.
  315. * If we decide the request is not completed (new engine or seqno),
  316. * then we grab a reference and double check that it is still the
  317. * active request - which it won't be and restart the lookup.
  318. *
  319. * Do not use kmem_cache_zalloc() here!
  320. */
  321. req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
  322. if (!req)
  323. return ERR_PTR(-ENOMEM);
  324. ret = i915_gem_get_seqno(dev_priv, &seqno);
  325. if (ret)
  326. goto err;
  327. spin_lock_init(&req->lock);
  328. fence_init(&req->fence,
  329. &i915_fence_ops,
  330. &req->lock,
  331. engine->fence_context,
  332. seqno);
  333. INIT_LIST_HEAD(&req->active_list);
  334. req->i915 = dev_priv;
  335. req->engine = engine;
  336. req->ctx = i915_gem_context_get(ctx);
  337. /* No zalloc, must clear what we need by hand */
  338. req->previous_context = NULL;
  339. req->file_priv = NULL;
  340. req->batch_obj = NULL;
  341. req->pid = NULL;
  342. req->elsp_submitted = 0;
  343. /*
  344. * Reserve space in the ring buffer for all the commands required to
  345. * eventually emit this request. This is to guarantee that the
  346. * i915_add_request() call can't fail. Note that the reserve may need
  347. * to be redone if the request is not actually submitted straight
  348. * away, e.g. because a GPU scheduler has deferred it.
  349. */
  350. req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
  351. if (i915.enable_execlists)
  352. ret = intel_logical_ring_alloc_request_extras(req);
  353. else
  354. ret = intel_ring_alloc_request_extras(req);
  355. if (ret)
  356. goto err_ctx;
  357. return req;
  358. err_ctx:
  359. i915_gem_context_put(ctx);
  360. err:
  361. kmem_cache_free(dev_priv->requests, req);
  362. return ERR_PTR(ret);
  363. }
  364. static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
  365. {
  366. struct drm_i915_private *dev_priv = engine->i915;
  367. dev_priv->gt.active_engines |= intel_engine_flag(engine);
  368. if (dev_priv->gt.awake)
  369. return;
  370. intel_runtime_pm_get_noresume(dev_priv);
  371. dev_priv->gt.awake = true;
  372. intel_enable_gt_powersave(dev_priv);
  373. i915_update_gfx_val(dev_priv);
  374. if (INTEL_GEN(dev_priv) >= 6)
  375. gen6_rps_busy(dev_priv);
  376. queue_delayed_work(dev_priv->wq,
  377. &dev_priv->gt.retire_work,
  378. round_jiffies_up_relative(HZ));
  379. }
  380. /*
  381. * NB: This function is not allowed to fail. Doing so would mean the the
  382. * request is not being tracked for completion but the work itself is
  383. * going to happen on the hardware. This would be a Bad Thing(tm).
  384. */
  385. void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
  386. {
  387. struct intel_engine_cs *engine;
  388. struct intel_ring *ring;
  389. u32 request_start;
  390. u32 reserved_tail;
  391. int ret;
  392. if (WARN_ON(!request))
  393. return;
  394. engine = request->engine;
  395. ring = request->ring;
  396. /*
  397. * To ensure that this call will not fail, space for its emissions
  398. * should already have been reserved in the ring buffer. Let the ring
  399. * know that it is time to use that space up.
  400. */
  401. request_start = ring->tail;
  402. reserved_tail = request->reserved_space;
  403. request->reserved_space = 0;
  404. /*
  405. * Emit any outstanding flushes - execbuf can fail to emit the flush
  406. * after having emitted the batchbuffer command. Hence we need to fix
  407. * things up similar to emitting the lazy request. The difference here
  408. * is that the flush _must_ happen before the next request, no matter
  409. * what.
  410. */
  411. if (flush_caches) {
  412. ret = engine->emit_flush(request, EMIT_FLUSH);
  413. /* Not allowed to fail! */
  414. WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
  415. }
  416. trace_i915_gem_request_add(request);
  417. request->head = request_start;
  418. /* Seal the request and mark it as pending execution. Note that
  419. * we may inspect this state, without holding any locks, during
  420. * hangcheck. Hence we apply the barrier to ensure that we do not
  421. * see a more recent value in the hws than we are tracking.
  422. */
  423. request->emitted_jiffies = jiffies;
  424. request->previous_seqno = engine->last_submitted_seqno;
  425. engine->last_submitted_seqno = request->fence.seqno;
  426. i915_gem_active_set(&engine->last_request, request);
  427. list_add_tail(&request->link, &engine->request_list);
  428. list_add_tail(&request->ring_link, &ring->request_list);
  429. /* Record the position of the start of the request so that
  430. * should we detect the updated seqno part-way through the
  431. * GPU processing the request, we never over-estimate the
  432. * position of the head.
  433. */
  434. request->postfix = ring->tail;
  435. /* Not allowed to fail! */
  436. ret = engine->emit_request(request);
  437. WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
  438. /* Sanity check that the reserved size was large enough. */
  439. ret = ring->tail - request_start;
  440. if (ret < 0)
  441. ret += ring->size;
  442. WARN_ONCE(ret > reserved_tail,
  443. "Not enough space reserved (%d bytes) "
  444. "for adding the request (%d bytes)\n",
  445. reserved_tail, ret);
  446. i915_gem_mark_busy(engine);
  447. engine->submit_request(request);
  448. }
  449. static unsigned long local_clock_us(unsigned int *cpu)
  450. {
  451. unsigned long t;
  452. /* Cheaply and approximately convert from nanoseconds to microseconds.
  453. * The result and subsequent calculations are also defined in the same
  454. * approximate microseconds units. The principal source of timing
  455. * error here is from the simple truncation.
  456. *
  457. * Note that local_clock() is only defined wrt to the current CPU;
  458. * the comparisons are no longer valid if we switch CPUs. Instead of
  459. * blocking preemption for the entire busywait, we can detect the CPU
  460. * switch and use that as indicator of system load and a reason to
  461. * stop busywaiting, see busywait_stop().
  462. */
  463. *cpu = get_cpu();
  464. t = local_clock() >> 10;
  465. put_cpu();
  466. return t;
  467. }
  468. static bool busywait_stop(unsigned long timeout, unsigned int cpu)
  469. {
  470. unsigned int this_cpu;
  471. if (time_after(local_clock_us(&this_cpu), timeout))
  472. return true;
  473. return this_cpu != cpu;
  474. }
  475. bool __i915_spin_request(const struct drm_i915_gem_request *req,
  476. int state, unsigned long timeout_us)
  477. {
  478. unsigned int cpu;
  479. /* When waiting for high frequency requests, e.g. during synchronous
  480. * rendering split between the CPU and GPU, the finite amount of time
  481. * required to set up the irq and wait upon it limits the response
  482. * rate. By busywaiting on the request completion for a short while we
  483. * can service the high frequency waits as quick as possible. However,
  484. * if it is a slow request, we want to sleep as quickly as possible.
  485. * The tradeoff between waiting and sleeping is roughly the time it
  486. * takes to sleep on a request, on the order of a microsecond.
  487. */
  488. timeout_us += local_clock_us(&cpu);
  489. do {
  490. if (i915_gem_request_completed(req))
  491. return true;
  492. if (signal_pending_state(state, current))
  493. break;
  494. if (busywait_stop(timeout_us, cpu))
  495. break;
  496. cpu_relax_lowlatency();
  497. } while (!need_resched());
  498. return false;
  499. }
  500. /**
  501. * i915_wait_request - wait until execution of request has finished
  502. * @req: duh!
  503. * @interruptible: do an interruptible wait (normally yes)
  504. * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  505. * @rps: client to charge for RPS boosting
  506. *
  507. * Note: It is of utmost importance that the passed in seqno and reset_counter
  508. * values have been read by the caller in an smp safe manner. Where read-side
  509. * locks are involved, it is sufficient to read the reset_counter before
  510. * unlocking the lock that protects the seqno. For lockless tricks, the
  511. * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  512. * inserted.
  513. *
  514. * Returns 0 if the request was found within the alloted time. Else returns the
  515. * errno with remaining time filled in timeout argument.
  516. */
  517. int i915_wait_request(struct drm_i915_gem_request *req,
  518. bool interruptible,
  519. s64 *timeout,
  520. struct intel_rps_client *rps)
  521. {
  522. int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
  523. DEFINE_WAIT(reset);
  524. struct intel_wait wait;
  525. unsigned long timeout_remain;
  526. int ret = 0;
  527. might_sleep();
  528. if (i915_gem_request_completed(req))
  529. return 0;
  530. timeout_remain = MAX_SCHEDULE_TIMEOUT;
  531. if (timeout) {
  532. if (WARN_ON(*timeout < 0))
  533. return -EINVAL;
  534. if (*timeout == 0)
  535. return -ETIME;
  536. /* Record current time in case interrupted, or wedged */
  537. timeout_remain = nsecs_to_jiffies_timeout(*timeout);
  538. *timeout += ktime_get_raw_ns();
  539. }
  540. trace_i915_gem_request_wait_begin(req);
  541. /* This client is about to stall waiting for the GPU. In many cases
  542. * this is undesirable and limits the throughput of the system, as
  543. * many clients cannot continue processing user input/output whilst
  544. * blocked. RPS autotuning may take tens of milliseconds to respond
  545. * to the GPU load and thus incurs additional latency for the client.
  546. * We can circumvent that by promoting the GPU frequency to maximum
  547. * before we wait. This makes the GPU throttle up much more quickly
  548. * (good for benchmarks and user experience, e.g. window animations),
  549. * but at a cost of spending more power processing the workload
  550. * (bad for battery). Not all clients even want their results
  551. * immediately and for them we should just let the GPU select its own
  552. * frequency to maximise efficiency. To prevent a single client from
  553. * forcing the clocks too high for the whole system, we only allow
  554. * each client to waitboost once in a busy period.
  555. */
  556. if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
  557. gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
  558. /* Optimistic short spin before touching IRQs */
  559. if (i915_spin_request(req, state, 5))
  560. goto complete;
  561. set_current_state(state);
  562. add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
  563. intel_wait_init(&wait, req->fence.seqno);
  564. if (intel_engine_add_wait(req->engine, &wait))
  565. /* In order to check that we haven't missed the interrupt
  566. * as we enabled it, we need to kick ourselves to do a
  567. * coherent check on the seqno before we sleep.
  568. */
  569. goto wakeup;
  570. for (;;) {
  571. if (signal_pending_state(state, current)) {
  572. ret = -ERESTARTSYS;
  573. break;
  574. }
  575. timeout_remain = io_schedule_timeout(timeout_remain);
  576. if (timeout_remain == 0) {
  577. ret = -ETIME;
  578. break;
  579. }
  580. if (intel_wait_complete(&wait))
  581. break;
  582. set_current_state(state);
  583. wakeup:
  584. /* Carefully check if the request is complete, giving time
  585. * for the seqno to be visible following the interrupt.
  586. * We also have to check in case we are kicked by the GPU
  587. * reset in order to drop the struct_mutex.
  588. */
  589. if (__i915_request_irq_complete(req))
  590. break;
  591. /* Only spin if we know the GPU is processing this request */
  592. if (i915_spin_request(req, state, 2))
  593. break;
  594. }
  595. remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
  596. intel_engine_remove_wait(req->engine, &wait);
  597. __set_current_state(TASK_RUNNING);
  598. complete:
  599. trace_i915_gem_request_wait_end(req);
  600. if (timeout) {
  601. *timeout -= ktime_get_raw_ns();
  602. if (*timeout < 0)
  603. *timeout = 0;
  604. /*
  605. * Apparently ktime isn't accurate enough and occasionally has a
  606. * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
  607. * things up to make the test happy. We allow up to 1 jiffy.
  608. *
  609. * This is a regrssion from the timespec->ktime conversion.
  610. */
  611. if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
  612. *timeout = 0;
  613. }
  614. if (IS_RPS_USER(rps) &&
  615. req->fence.seqno == req->engine->last_submitted_seqno) {
  616. /* The GPU is now idle and this client has stalled.
  617. * Since no other client has submitted a request in the
  618. * meantime, assume that this client is the only one
  619. * supplying work to the GPU but is unable to keep that
  620. * work supplied because it is waiting. Since the GPU is
  621. * then never kept fully busy, RPS autoclocking will
  622. * keep the clocks relatively low, causing further delays.
  623. * Compensate by giving the synchronous client credit for
  624. * a waitboost next time.
  625. */
  626. spin_lock(&req->i915->rps.client_lock);
  627. list_del_init(&rps->link);
  628. spin_unlock(&req->i915->rps.client_lock);
  629. }
  630. return ret;
  631. }
  632. static void engine_retire_requests(struct intel_engine_cs *engine)
  633. {
  634. struct drm_i915_gem_request *request, *next;
  635. list_for_each_entry_safe(request, next, &engine->request_list, link) {
  636. if (!i915_gem_request_completed(request))
  637. break;
  638. i915_gem_request_retire(request);
  639. }
  640. }
  641. void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
  642. {
  643. struct intel_engine_cs *engine;
  644. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  645. if (dev_priv->gt.active_engines == 0)
  646. return;
  647. GEM_BUG_ON(!dev_priv->gt.awake);
  648. for_each_engine(engine, dev_priv) {
  649. engine_retire_requests(engine);
  650. if (!intel_engine_is_active(engine))
  651. dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
  652. }
  653. if (dev_priv->gt.active_engines == 0)
  654. queue_delayed_work(dev_priv->wq,
  655. &dev_priv->gt.idle_work,
  656. msecs_to_jiffies(100));
  657. }