i915_gem_context.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032
  1. /*
  2. * Copyright © 2011-2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. *
  26. */
  27. /*
  28. * This file implements HW context support. On gen5+ a HW context consists of an
  29. * opaque GPU object which is referenced at times of context saves and restores.
  30. * With RC6 enabled, the context is also referenced as the GPU enters and exists
  31. * from RC6 (GPU has it's own internal power context, except on gen5). Though
  32. * something like a context does exist for the media ring, the code only
  33. * supports contexts for the render ring.
  34. *
  35. * In software, there is a distinction between contexts created by the user,
  36. * and the default HW context. The default HW context is used by GPU clients
  37. * that do not request setup of their own hardware context. The default
  38. * context's state is never restored to help prevent programming errors. This
  39. * would happen if a client ran and piggy-backed off another clients GPU state.
  40. * The default context only exists to give the GPU some offset to load as the
  41. * current to invoke a save of the context we actually care about. In fact, the
  42. * code could likely be constructed, albeit in a more complicated fashion, to
  43. * never use the default context, though that limits the driver's ability to
  44. * swap out, and/or destroy other contexts.
  45. *
  46. * All other contexts are created as a request by the GPU client. These contexts
  47. * store GPU state, and thus allow GPU clients to not re-emit state (and
  48. * potentially query certain state) at any time. The kernel driver makes
  49. * certain that the appropriate commands are inserted.
  50. *
  51. * The context life cycle is semi-complicated in that context BOs may live
  52. * longer than the context itself because of the way the hardware, and object
  53. * tracking works. Below is a very crude representation of the state machine
  54. * describing the context life.
  55. * refcount pincount active
  56. * S0: initial state 0 0 0
  57. * S1: context created 1 0 0
  58. * S2: context is currently running 2 1 X
  59. * S3: GPU referenced, but not current 2 0 1
  60. * S4: context is current, but destroyed 1 1 0
  61. * S5: like S3, but destroyed 1 0 1
  62. *
  63. * The most common (but not all) transitions:
  64. * S0->S1: client creates a context
  65. * S1->S2: client submits execbuf with context
  66. * S2->S3: other clients submits execbuf with context
  67. * S3->S1: context object was retired
  68. * S3->S2: clients submits another execbuf
  69. * S2->S4: context destroy called with current context
  70. * S3->S5->S0: destroy path
  71. * S4->S5->S0: destroy path on current context
  72. *
  73. * There are two confusing terms used above:
  74. * The "current context" means the context which is currently running on the
  75. * GPU. The GPU has loaded its state already and has stored away the gtt
  76. * offset of the BO. The GPU is not actively referencing the data at this
  77. * offset, but it will on the next context switch. The only way to avoid this
  78. * is to do a GPU reset.
  79. *
  80. * An "active context' is one which was previously the "current context" and is
  81. * on the active list waiting for the next context switch to occur. Until this
  82. * happens, the object must remain at the same gtt offset. It is therefore
  83. * possible to destroy a context, but it is still active.
  84. *
  85. */
  86. #include <linux/log2.h>
  87. #include <drm/drmP.h>
  88. #include <drm/i915_drm.h>
  89. #include "i915_drv.h"
  90. #include "i915_trace.h"
  91. #include "intel_workarounds.h"
  92. #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
  93. static void lut_close(struct i915_gem_context *ctx)
  94. {
  95. struct i915_lut_handle *lut, *ln;
  96. struct radix_tree_iter iter;
  97. void __rcu **slot;
  98. list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
  99. list_del(&lut->obj_link);
  100. kmem_cache_free(ctx->i915->luts, lut);
  101. }
  102. rcu_read_lock();
  103. radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
  104. struct i915_vma *vma = rcu_dereference_raw(*slot);
  105. radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
  106. __i915_gem_object_release_unless_active(vma->obj);
  107. }
  108. rcu_read_unlock();
  109. }
  110. static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
  111. {
  112. unsigned int max;
  113. lockdep_assert_held(&i915->contexts.mutex);
  114. if (INTEL_GEN(i915) >= 11)
  115. max = GEN11_MAX_CONTEXT_HW_ID;
  116. else if (USES_GUC_SUBMISSION(i915))
  117. /*
  118. * When using GuC in proxy submission, GuC consumes the
  119. * highest bit in the context id to indicate proxy submission.
  120. */
  121. max = MAX_GUC_CONTEXT_HW_ID;
  122. else
  123. max = MAX_CONTEXT_HW_ID;
  124. return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
  125. }
  126. static int steal_hw_id(struct drm_i915_private *i915)
  127. {
  128. struct i915_gem_context *ctx, *cn;
  129. LIST_HEAD(pinned);
  130. int id = -ENOSPC;
  131. lockdep_assert_held(&i915->contexts.mutex);
  132. list_for_each_entry_safe(ctx, cn,
  133. &i915->contexts.hw_id_list, hw_id_link) {
  134. if (atomic_read(&ctx->hw_id_pin_count)) {
  135. list_move_tail(&ctx->hw_id_link, &pinned);
  136. continue;
  137. }
  138. GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
  139. list_del_init(&ctx->hw_id_link);
  140. id = ctx->hw_id;
  141. break;
  142. }
  143. /*
  144. * Remember how far we got up on the last repossesion scan, so the
  145. * list is kept in a "least recently scanned" order.
  146. */
  147. list_splice_tail(&pinned, &i915->contexts.hw_id_list);
  148. return id;
  149. }
  150. static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
  151. {
  152. int ret;
  153. lockdep_assert_held(&i915->contexts.mutex);
  154. /*
  155. * We prefer to steal/stall ourselves and our users over that of the
  156. * entire system. That may be a little unfair to our users, and
  157. * even hurt high priority clients. The choice is whether to oomkill
  158. * something else, or steal a context id.
  159. */
  160. ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
  161. if (unlikely(ret < 0)) {
  162. ret = steal_hw_id(i915);
  163. if (ret < 0) /* once again for the correct errno code */
  164. ret = new_hw_id(i915, GFP_KERNEL);
  165. if (ret < 0)
  166. return ret;
  167. }
  168. *out = ret;
  169. return 0;
  170. }
  171. static void release_hw_id(struct i915_gem_context *ctx)
  172. {
  173. struct drm_i915_private *i915 = ctx->i915;
  174. if (list_empty(&ctx->hw_id_link))
  175. return;
  176. mutex_lock(&i915->contexts.mutex);
  177. if (!list_empty(&ctx->hw_id_link)) {
  178. ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
  179. list_del_init(&ctx->hw_id_link);
  180. }
  181. mutex_unlock(&i915->contexts.mutex);
  182. }
  183. static void i915_gem_context_free(struct i915_gem_context *ctx)
  184. {
  185. unsigned int n;
  186. lockdep_assert_held(&ctx->i915->drm.struct_mutex);
  187. GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
  188. release_hw_id(ctx);
  189. i915_ppgtt_put(ctx->ppgtt);
  190. for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
  191. struct intel_context *ce = &ctx->__engine[n];
  192. if (ce->ops)
  193. ce->ops->destroy(ce);
  194. }
  195. kfree(ctx->name);
  196. put_pid(ctx->pid);
  197. list_del(&ctx->link);
  198. kfree_rcu(ctx, rcu);
  199. }
  200. static void contexts_free(struct drm_i915_private *i915)
  201. {
  202. struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
  203. struct i915_gem_context *ctx, *cn;
  204. lockdep_assert_held(&i915->drm.struct_mutex);
  205. llist_for_each_entry_safe(ctx, cn, freed, free_link)
  206. i915_gem_context_free(ctx);
  207. }
  208. static void contexts_free_first(struct drm_i915_private *i915)
  209. {
  210. struct i915_gem_context *ctx;
  211. struct llist_node *freed;
  212. lockdep_assert_held(&i915->drm.struct_mutex);
  213. freed = llist_del_first(&i915->contexts.free_list);
  214. if (!freed)
  215. return;
  216. ctx = container_of(freed, typeof(*ctx), free_link);
  217. i915_gem_context_free(ctx);
  218. }
  219. static void contexts_free_worker(struct work_struct *work)
  220. {
  221. struct drm_i915_private *i915 =
  222. container_of(work, typeof(*i915), contexts.free_work);
  223. mutex_lock(&i915->drm.struct_mutex);
  224. contexts_free(i915);
  225. mutex_unlock(&i915->drm.struct_mutex);
  226. }
  227. void i915_gem_context_release(struct kref *ref)
  228. {
  229. struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
  230. struct drm_i915_private *i915 = ctx->i915;
  231. trace_i915_context_free(ctx);
  232. if (llist_add(&ctx->free_link, &i915->contexts.free_list))
  233. queue_work(i915->wq, &i915->contexts.free_work);
  234. }
  235. static void context_close(struct i915_gem_context *ctx)
  236. {
  237. i915_gem_context_set_closed(ctx);
  238. /*
  239. * This context will never again be assinged to HW, so we can
  240. * reuse its ID for the next context.
  241. */
  242. release_hw_id(ctx);
  243. /*
  244. * The LUT uses the VMA as a backpointer to unref the object,
  245. * so we need to clear the LUT before we close all the VMA (inside
  246. * the ppgtt).
  247. */
  248. lut_close(ctx);
  249. if (ctx->ppgtt)
  250. i915_ppgtt_close(&ctx->ppgtt->vm);
  251. ctx->file_priv = ERR_PTR(-EBADF);
  252. i915_gem_context_put(ctx);
  253. }
  254. static u32 default_desc_template(const struct drm_i915_private *i915,
  255. const struct i915_hw_ppgtt *ppgtt)
  256. {
  257. u32 address_mode;
  258. u32 desc;
  259. desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
  260. address_mode = INTEL_LEGACY_32B_CONTEXT;
  261. if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
  262. address_mode = INTEL_LEGACY_64B_CONTEXT;
  263. desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
  264. if (IS_GEN8(i915))
  265. desc |= GEN8_CTX_L3LLC_COHERENT;
  266. /* TODO: WaDisableLiteRestore when we start using semaphore
  267. * signalling between Command Streamers
  268. * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
  269. */
  270. return desc;
  271. }
  272. static struct i915_gem_context *
  273. __create_hw_context(struct drm_i915_private *dev_priv,
  274. struct drm_i915_file_private *file_priv)
  275. {
  276. struct i915_gem_context *ctx;
  277. unsigned int n;
  278. int ret;
  279. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  280. if (ctx == NULL)
  281. return ERR_PTR(-ENOMEM);
  282. kref_init(&ctx->ref);
  283. list_add_tail(&ctx->link, &dev_priv->contexts.list);
  284. ctx->i915 = dev_priv;
  285. ctx->sched.priority = I915_PRIORITY_NORMAL;
  286. for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
  287. struct intel_context *ce = &ctx->__engine[n];
  288. ce->gem_context = ctx;
  289. }
  290. INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
  291. INIT_LIST_HEAD(&ctx->handles_list);
  292. INIT_LIST_HEAD(&ctx->hw_id_link);
  293. /* Default context will never have a file_priv */
  294. ret = DEFAULT_CONTEXT_HANDLE;
  295. if (file_priv) {
  296. ret = idr_alloc(&file_priv->context_idr, ctx,
  297. DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
  298. if (ret < 0)
  299. goto err_lut;
  300. }
  301. ctx->user_handle = ret;
  302. ctx->file_priv = file_priv;
  303. if (file_priv) {
  304. ctx->pid = get_task_pid(current, PIDTYPE_PID);
  305. ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
  306. current->comm,
  307. pid_nr(ctx->pid),
  308. ctx->user_handle);
  309. if (!ctx->name) {
  310. ret = -ENOMEM;
  311. goto err_pid;
  312. }
  313. }
  314. /* NB: Mark all slices as needing a remap so that when the context first
  315. * loads it will restore whatever remap state already exists. If there
  316. * is no remap info, it will be a NOP. */
  317. ctx->remap_slice = ALL_L3_SLICES(dev_priv);
  318. i915_gem_context_set_bannable(ctx);
  319. ctx->ring_size = 4 * PAGE_SIZE;
  320. ctx->desc_template =
  321. default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
  322. return ctx;
  323. err_pid:
  324. put_pid(ctx->pid);
  325. idr_remove(&file_priv->context_idr, ctx->user_handle);
  326. err_lut:
  327. context_close(ctx);
  328. return ERR_PTR(ret);
  329. }
  330. static void __destroy_hw_context(struct i915_gem_context *ctx,
  331. struct drm_i915_file_private *file_priv)
  332. {
  333. idr_remove(&file_priv->context_idr, ctx->user_handle);
  334. context_close(ctx);
  335. }
  336. static struct i915_gem_context *
  337. i915_gem_create_context(struct drm_i915_private *dev_priv,
  338. struct drm_i915_file_private *file_priv)
  339. {
  340. struct i915_gem_context *ctx;
  341. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  342. /* Reap the most stale context */
  343. contexts_free_first(dev_priv);
  344. ctx = __create_hw_context(dev_priv, file_priv);
  345. if (IS_ERR(ctx))
  346. return ctx;
  347. if (USES_FULL_PPGTT(dev_priv)) {
  348. struct i915_hw_ppgtt *ppgtt;
  349. ppgtt = i915_ppgtt_create(dev_priv, file_priv);
  350. if (IS_ERR(ppgtt)) {
  351. DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
  352. PTR_ERR(ppgtt));
  353. __destroy_hw_context(ctx, file_priv);
  354. return ERR_CAST(ppgtt);
  355. }
  356. ctx->ppgtt = ppgtt;
  357. ctx->desc_template = default_desc_template(dev_priv, ppgtt);
  358. }
  359. trace_i915_context_create(ctx);
  360. return ctx;
  361. }
  362. /**
  363. * i915_gem_context_create_gvt - create a GVT GEM context
  364. * @dev: drm device *
  365. *
  366. * This function is used to create a GVT specific GEM context.
  367. *
  368. * Returns:
  369. * pointer to i915_gem_context on success, error pointer if failed
  370. *
  371. */
  372. struct i915_gem_context *
  373. i915_gem_context_create_gvt(struct drm_device *dev)
  374. {
  375. struct i915_gem_context *ctx;
  376. int ret;
  377. if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
  378. return ERR_PTR(-ENODEV);
  379. ret = i915_mutex_lock_interruptible(dev);
  380. if (ret)
  381. return ERR_PTR(ret);
  382. ctx = __create_hw_context(to_i915(dev), NULL);
  383. if (IS_ERR(ctx))
  384. goto out;
  385. ctx->file_priv = ERR_PTR(-EBADF);
  386. i915_gem_context_set_closed(ctx); /* not user accessible */
  387. i915_gem_context_clear_bannable(ctx);
  388. i915_gem_context_set_force_single_submission(ctx);
  389. if (!USES_GUC_SUBMISSION(to_i915(dev)))
  390. ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
  391. GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
  392. out:
  393. mutex_unlock(&dev->struct_mutex);
  394. return ctx;
  395. }
  396. static void
  397. destroy_kernel_context(struct i915_gem_context **ctxp)
  398. {
  399. struct i915_gem_context *ctx;
  400. /* Keep the context ref so that we can free it immediately ourselves */
  401. ctx = i915_gem_context_get(fetch_and_zero(ctxp));
  402. GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
  403. context_close(ctx);
  404. i915_gem_context_free(ctx);
  405. }
  406. struct i915_gem_context *
  407. i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
  408. {
  409. struct i915_gem_context *ctx;
  410. int err;
  411. ctx = i915_gem_create_context(i915, NULL);
  412. if (IS_ERR(ctx))
  413. return ctx;
  414. err = i915_gem_context_pin_hw_id(ctx);
  415. if (err) {
  416. destroy_kernel_context(&ctx);
  417. return ERR_PTR(err);
  418. }
  419. i915_gem_context_clear_bannable(ctx);
  420. ctx->sched.priority = prio;
  421. ctx->ring_size = PAGE_SIZE;
  422. GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
  423. return ctx;
  424. }
  425. static void init_contexts(struct drm_i915_private *i915)
  426. {
  427. mutex_init(&i915->contexts.mutex);
  428. INIT_LIST_HEAD(&i915->contexts.list);
  429. /* Using the simple ida interface, the max is limited by sizeof(int) */
  430. BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
  431. BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
  432. ida_init(&i915->contexts.hw_ida);
  433. INIT_LIST_HEAD(&i915->contexts.hw_id_list);
  434. INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
  435. init_llist_head(&i915->contexts.free_list);
  436. }
  437. static bool needs_preempt_context(struct drm_i915_private *i915)
  438. {
  439. return HAS_LOGICAL_RING_PREEMPTION(i915);
  440. }
  441. int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
  442. {
  443. struct i915_gem_context *ctx;
  444. int ret;
  445. /* Reassure ourselves we are only called once */
  446. GEM_BUG_ON(dev_priv->kernel_context);
  447. GEM_BUG_ON(dev_priv->preempt_context);
  448. ret = intel_ctx_workarounds_init(dev_priv);
  449. if (ret)
  450. return ret;
  451. init_contexts(dev_priv);
  452. /* lowest priority; idle task */
  453. ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
  454. if (IS_ERR(ctx)) {
  455. DRM_ERROR("Failed to create default global context\n");
  456. return PTR_ERR(ctx);
  457. }
  458. /*
  459. * For easy recognisablity, we want the kernel context to be 0 and then
  460. * all user contexts will have non-zero hw_id. Kernel contexts are
  461. * permanently pinned, so that we never suffer a stall and can
  462. * use them from any allocation context (e.g. for evicting other
  463. * contexts and from inside the shrinker).
  464. */
  465. GEM_BUG_ON(ctx->hw_id);
  466. GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
  467. dev_priv->kernel_context = ctx;
  468. /* highest priority; preempting task */
  469. if (needs_preempt_context(dev_priv)) {
  470. ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
  471. if (!IS_ERR(ctx))
  472. dev_priv->preempt_context = ctx;
  473. else
  474. DRM_ERROR("Failed to create preempt context; disabling preemption\n");
  475. }
  476. DRM_DEBUG_DRIVER("%s context support initialized\n",
  477. DRIVER_CAPS(dev_priv)->has_logical_contexts ?
  478. "logical" : "fake");
  479. return 0;
  480. }
  481. void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
  482. {
  483. struct intel_engine_cs *engine;
  484. enum intel_engine_id id;
  485. lockdep_assert_held(&dev_priv->drm.struct_mutex);
  486. for_each_engine(engine, dev_priv, id)
  487. intel_engine_lost_context(engine);
  488. }
  489. void i915_gem_contexts_fini(struct drm_i915_private *i915)
  490. {
  491. lockdep_assert_held(&i915->drm.struct_mutex);
  492. if (i915->preempt_context)
  493. destroy_kernel_context(&i915->preempt_context);
  494. destroy_kernel_context(&i915->kernel_context);
  495. /* Must free all deferred contexts (via flush_workqueue) first */
  496. GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
  497. ida_destroy(&i915->contexts.hw_ida);
  498. }
  499. static int context_idr_cleanup(int id, void *p, void *data)
  500. {
  501. struct i915_gem_context *ctx = p;
  502. context_close(ctx);
  503. return 0;
  504. }
  505. int i915_gem_context_open(struct drm_i915_private *i915,
  506. struct drm_file *file)
  507. {
  508. struct drm_i915_file_private *file_priv = file->driver_priv;
  509. struct i915_gem_context *ctx;
  510. idr_init(&file_priv->context_idr);
  511. mutex_lock(&i915->drm.struct_mutex);
  512. ctx = i915_gem_create_context(i915, file_priv);
  513. mutex_unlock(&i915->drm.struct_mutex);
  514. if (IS_ERR(ctx)) {
  515. idr_destroy(&file_priv->context_idr);
  516. return PTR_ERR(ctx);
  517. }
  518. GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
  519. return 0;
  520. }
  521. void i915_gem_context_close(struct drm_file *file)
  522. {
  523. struct drm_i915_file_private *file_priv = file->driver_priv;
  524. lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
  525. idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
  526. idr_destroy(&file_priv->context_idr);
  527. }
  528. static struct i915_request *
  529. last_request_on_engine(struct i915_timeline *timeline,
  530. struct intel_engine_cs *engine)
  531. {
  532. struct i915_request *rq;
  533. GEM_BUG_ON(timeline == &engine->timeline);
  534. rq = i915_gem_active_raw(&timeline->last_request,
  535. &engine->i915->drm.struct_mutex);
  536. if (rq && rq->engine == engine) {
  537. GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
  538. timeline->name, engine->name,
  539. rq->fence.context, rq->fence.seqno);
  540. GEM_BUG_ON(rq->timeline != timeline);
  541. return rq;
  542. }
  543. return NULL;
  544. }
  545. static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
  546. {
  547. struct drm_i915_private *i915 = engine->i915;
  548. const struct intel_context * const ce =
  549. to_intel_context(i915->kernel_context, engine);
  550. struct i915_timeline *barrier = ce->ring->timeline;
  551. struct intel_ring *ring;
  552. bool any_active = false;
  553. lockdep_assert_held(&i915->drm.struct_mutex);
  554. list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
  555. struct i915_request *rq;
  556. rq = last_request_on_engine(ring->timeline, engine);
  557. if (!rq)
  558. continue;
  559. any_active = true;
  560. if (rq->hw_context == ce)
  561. continue;
  562. /*
  563. * Was this request submitted after the previous
  564. * switch-to-kernel-context?
  565. */
  566. if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
  567. GEM_TRACE("%s needs barrier for %llx:%d\n",
  568. ring->timeline->name,
  569. rq->fence.context,
  570. rq->fence.seqno);
  571. return false;
  572. }
  573. GEM_TRACE("%s has barrier after %llx:%d\n",
  574. ring->timeline->name,
  575. rq->fence.context,
  576. rq->fence.seqno);
  577. }
  578. /*
  579. * If any other timeline was still active and behind the last barrier,
  580. * then our last switch-to-kernel-context must still be queued and
  581. * will run last (leaving the engine in the kernel context when it
  582. * eventually idles).
  583. */
  584. if (any_active)
  585. return true;
  586. /* The engine is idle; check that it is idling in the kernel context. */
  587. return engine->last_retired_context == ce;
  588. }
  589. int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
  590. {
  591. struct intel_engine_cs *engine;
  592. enum intel_engine_id id;
  593. GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
  594. lockdep_assert_held(&i915->drm.struct_mutex);
  595. GEM_BUG_ON(!i915->kernel_context);
  596. i915_retire_requests(i915);
  597. for_each_engine(engine, i915, id) {
  598. struct intel_ring *ring;
  599. struct i915_request *rq;
  600. GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
  601. if (engine_has_kernel_context_barrier(engine))
  602. continue;
  603. GEM_TRACE("emit barrier on %s\n", engine->name);
  604. rq = i915_request_alloc(engine, i915->kernel_context);
  605. if (IS_ERR(rq))
  606. return PTR_ERR(rq);
  607. /* Queue this switch after all other activity */
  608. list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
  609. struct i915_request *prev;
  610. prev = last_request_on_engine(ring->timeline, engine);
  611. if (!prev)
  612. continue;
  613. if (prev->gem_context == i915->kernel_context)
  614. continue;
  615. GEM_TRACE("add barrier on %s for %llx:%d\n",
  616. engine->name,
  617. prev->fence.context,
  618. prev->fence.seqno);
  619. i915_sw_fence_await_sw_fence_gfp(&rq->submit,
  620. &prev->submit,
  621. I915_FENCE_GFP);
  622. i915_timeline_sync_set(rq->timeline, &prev->fence);
  623. }
  624. i915_request_add(rq);
  625. }
  626. return 0;
  627. }
  628. static bool client_is_banned(struct drm_i915_file_private *file_priv)
  629. {
  630. return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
  631. }
  632. int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
  633. struct drm_file *file)
  634. {
  635. struct drm_i915_private *dev_priv = to_i915(dev);
  636. struct drm_i915_gem_context_create *args = data;
  637. struct drm_i915_file_private *file_priv = file->driver_priv;
  638. struct i915_gem_context *ctx;
  639. int ret;
  640. if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
  641. return -ENODEV;
  642. if (args->pad != 0)
  643. return -EINVAL;
  644. if (client_is_banned(file_priv)) {
  645. DRM_DEBUG("client %s[%d] banned from creating ctx\n",
  646. current->comm,
  647. pid_nr(get_task_pid(current, PIDTYPE_PID)));
  648. return -EIO;
  649. }
  650. ret = i915_mutex_lock_interruptible(dev);
  651. if (ret)
  652. return ret;
  653. ctx = i915_gem_create_context(dev_priv, file_priv);
  654. mutex_unlock(&dev->struct_mutex);
  655. if (IS_ERR(ctx))
  656. return PTR_ERR(ctx);
  657. GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
  658. args->ctx_id = ctx->user_handle;
  659. DRM_DEBUG("HW context %d created\n", args->ctx_id);
  660. return 0;
  661. }
  662. int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
  663. struct drm_file *file)
  664. {
  665. struct drm_i915_gem_context_destroy *args = data;
  666. struct drm_i915_file_private *file_priv = file->driver_priv;
  667. struct i915_gem_context *ctx;
  668. int ret;
  669. if (args->pad != 0)
  670. return -EINVAL;
  671. if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
  672. return -ENOENT;
  673. ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
  674. if (!ctx)
  675. return -ENOENT;
  676. ret = mutex_lock_interruptible(&dev->struct_mutex);
  677. if (ret)
  678. goto out;
  679. __destroy_hw_context(ctx, file_priv);
  680. mutex_unlock(&dev->struct_mutex);
  681. out:
  682. i915_gem_context_put(ctx);
  683. return 0;
  684. }
  685. int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
  686. struct drm_file *file)
  687. {
  688. struct drm_i915_file_private *file_priv = file->driver_priv;
  689. struct drm_i915_gem_context_param *args = data;
  690. struct i915_gem_context *ctx;
  691. int ret = 0;
  692. ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
  693. if (!ctx)
  694. return -ENOENT;
  695. args->size = 0;
  696. switch (args->param) {
  697. case I915_CONTEXT_PARAM_BAN_PERIOD:
  698. ret = -EINVAL;
  699. break;
  700. case I915_CONTEXT_PARAM_NO_ZEROMAP:
  701. args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
  702. break;
  703. case I915_CONTEXT_PARAM_GTT_SIZE:
  704. if (ctx->ppgtt)
  705. args->value = ctx->ppgtt->vm.total;
  706. else if (to_i915(dev)->mm.aliasing_ppgtt)
  707. args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
  708. else
  709. args->value = to_i915(dev)->ggtt.vm.total;
  710. break;
  711. case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
  712. args->value = i915_gem_context_no_error_capture(ctx);
  713. break;
  714. case I915_CONTEXT_PARAM_BANNABLE:
  715. args->value = i915_gem_context_is_bannable(ctx);
  716. break;
  717. case I915_CONTEXT_PARAM_PRIORITY:
  718. args->value = ctx->sched.priority;
  719. break;
  720. default:
  721. ret = -EINVAL;
  722. break;
  723. }
  724. i915_gem_context_put(ctx);
  725. return ret;
  726. }
  727. int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
  728. struct drm_file *file)
  729. {
  730. struct drm_i915_file_private *file_priv = file->driver_priv;
  731. struct drm_i915_gem_context_param *args = data;
  732. struct i915_gem_context *ctx;
  733. int ret = 0;
  734. ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
  735. if (!ctx)
  736. return -ENOENT;
  737. switch (args->param) {
  738. case I915_CONTEXT_PARAM_BAN_PERIOD:
  739. ret = -EINVAL;
  740. break;
  741. case I915_CONTEXT_PARAM_NO_ZEROMAP:
  742. if (args->size)
  743. ret = -EINVAL;
  744. else if (args->value)
  745. set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
  746. else
  747. clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
  748. break;
  749. case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
  750. if (args->size)
  751. ret = -EINVAL;
  752. else if (args->value)
  753. i915_gem_context_set_no_error_capture(ctx);
  754. else
  755. i915_gem_context_clear_no_error_capture(ctx);
  756. break;
  757. case I915_CONTEXT_PARAM_BANNABLE:
  758. if (args->size)
  759. ret = -EINVAL;
  760. else if (!capable(CAP_SYS_ADMIN) && !args->value)
  761. ret = -EPERM;
  762. else if (args->value)
  763. i915_gem_context_set_bannable(ctx);
  764. else
  765. i915_gem_context_clear_bannable(ctx);
  766. break;
  767. case I915_CONTEXT_PARAM_PRIORITY:
  768. {
  769. s64 priority = args->value;
  770. if (args->size)
  771. ret = -EINVAL;
  772. else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
  773. ret = -ENODEV;
  774. else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
  775. priority < I915_CONTEXT_MIN_USER_PRIORITY)
  776. ret = -EINVAL;
  777. else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
  778. !capable(CAP_SYS_NICE))
  779. ret = -EPERM;
  780. else
  781. ctx->sched.priority = priority;
  782. }
  783. break;
  784. default:
  785. ret = -EINVAL;
  786. break;
  787. }
  788. i915_gem_context_put(ctx);
  789. return ret;
  790. }
  791. int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
  792. void *data, struct drm_file *file)
  793. {
  794. struct drm_i915_private *dev_priv = to_i915(dev);
  795. struct drm_i915_reset_stats *args = data;
  796. struct i915_gem_context *ctx;
  797. int ret;
  798. if (args->flags || args->pad)
  799. return -EINVAL;
  800. ret = -ENOENT;
  801. rcu_read_lock();
  802. ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
  803. if (!ctx)
  804. goto out;
  805. /*
  806. * We opt for unserialised reads here. This may result in tearing
  807. * in the extremely unlikely event of a GPU hang on this context
  808. * as we are querying them. If we need that extra layer of protection,
  809. * we should wrap the hangstats with a seqlock.
  810. */
  811. if (capable(CAP_SYS_ADMIN))
  812. args->reset_count = i915_reset_count(&dev_priv->gpu_error);
  813. else
  814. args->reset_count = 0;
  815. args->batch_active = atomic_read(&ctx->guilty_count);
  816. args->batch_pending = atomic_read(&ctx->active_count);
  817. ret = 0;
  818. out:
  819. rcu_read_unlock();
  820. return ret;
  821. }
  822. int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
  823. {
  824. struct drm_i915_private *i915 = ctx->i915;
  825. int err = 0;
  826. mutex_lock(&i915->contexts.mutex);
  827. GEM_BUG_ON(i915_gem_context_is_closed(ctx));
  828. if (list_empty(&ctx->hw_id_link)) {
  829. GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
  830. err = assign_hw_id(i915, &ctx->hw_id);
  831. if (err)
  832. goto out_unlock;
  833. list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
  834. }
  835. GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
  836. atomic_inc(&ctx->hw_id_pin_count);
  837. out_unlock:
  838. mutex_unlock(&i915->contexts.mutex);
  839. return err;
  840. }
  841. #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  842. #include "selftests/mock_context.c"
  843. #include "selftests/i915_gem_context.c"
  844. #endif