i915_gem_execbuffer.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500
  1. /*
  2. * Copyright © 2008,2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/dma_remapping.h>
  34. #define __EXEC_OBJECT_HAS_PIN (1<<31)
  35. #define __EXEC_OBJECT_HAS_FENCE (1<<30)
  36. struct eb_vmas {
  37. struct list_head vmas;
  38. int and;
  39. union {
  40. struct i915_vma *lut[0];
  41. struct hlist_head buckets[0];
  42. };
  43. };
  44. static struct eb_vmas *
  45. eb_create(struct drm_i915_gem_execbuffer2 *args)
  46. {
  47. struct eb_vmas *eb = NULL;
  48. if (args->flags & I915_EXEC_HANDLE_LUT) {
  49. unsigned size = args->buffer_count;
  50. size *= sizeof(struct i915_vma *);
  51. size += sizeof(struct eb_vmas);
  52. eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  53. }
  54. if (eb == NULL) {
  55. unsigned size = args->buffer_count;
  56. unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  57. BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  58. while (count > 2*size)
  59. count >>= 1;
  60. eb = kzalloc(count*sizeof(struct hlist_head) +
  61. sizeof(struct eb_vmas),
  62. GFP_TEMPORARY);
  63. if (eb == NULL)
  64. return eb;
  65. eb->and = count - 1;
  66. } else
  67. eb->and = -args->buffer_count;
  68. INIT_LIST_HEAD(&eb->vmas);
  69. return eb;
  70. }
  71. static void
  72. eb_reset(struct eb_vmas *eb)
  73. {
  74. if (eb->and >= 0)
  75. memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  76. }
  77. static int
  78. eb_lookup_vmas(struct eb_vmas *eb,
  79. struct drm_i915_gem_exec_object2 *exec,
  80. const struct drm_i915_gem_execbuffer2 *args,
  81. struct i915_address_space *vm,
  82. struct drm_file *file)
  83. {
  84. struct drm_i915_private *dev_priv = vm->dev->dev_private;
  85. struct drm_i915_gem_object *obj;
  86. struct list_head objects;
  87. int i, ret;
  88. INIT_LIST_HEAD(&objects);
  89. spin_lock(&file->table_lock);
  90. /* Grab a reference to the object and release the lock so we can lookup
  91. * or create the VMA without using GFP_ATOMIC */
  92. for (i = 0; i < args->buffer_count; i++) {
  93. obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
  94. if (obj == NULL) {
  95. spin_unlock(&file->table_lock);
  96. DRM_DEBUG("Invalid object handle %d at index %d\n",
  97. exec[i].handle, i);
  98. ret = -ENOENT;
  99. goto err;
  100. }
  101. if (!list_empty(&obj->obj_exec_link)) {
  102. spin_unlock(&file->table_lock);
  103. DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
  104. obj, exec[i].handle, i);
  105. ret = -EINVAL;
  106. goto err;
  107. }
  108. drm_gem_object_reference(&obj->base);
  109. list_add_tail(&obj->obj_exec_link, &objects);
  110. }
  111. spin_unlock(&file->table_lock);
  112. i = 0;
  113. while (!list_empty(&objects)) {
  114. struct i915_vma *vma;
  115. struct i915_address_space *bind_vm = vm;
  116. if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
  117. USES_FULL_PPGTT(vm->dev)) {
  118. ret = -EINVAL;
  119. goto err;
  120. }
  121. /* If we have secure dispatch, or the userspace assures us that
  122. * they know what they're doing, use the GGTT VM.
  123. */
  124. if (((args->flags & I915_EXEC_SECURE) &&
  125. (i == (args->buffer_count - 1))))
  126. bind_vm = &dev_priv->gtt.base;
  127. obj = list_first_entry(&objects,
  128. struct drm_i915_gem_object,
  129. obj_exec_link);
  130. /*
  131. * NOTE: We can leak any vmas created here when something fails
  132. * later on. But that's no issue since vma_unbind can deal with
  133. * vmas which are not actually bound. And since only
  134. * lookup_or_create exists as an interface to get at the vma
  135. * from the (obj, vm) we don't run the risk of creating
  136. * duplicated vmas for the same vm.
  137. */
  138. vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
  139. if (IS_ERR(vma)) {
  140. DRM_DEBUG("Failed to lookup VMA\n");
  141. ret = PTR_ERR(vma);
  142. goto err;
  143. }
  144. /* Transfer ownership from the objects list to the vmas list. */
  145. list_add_tail(&vma->exec_list, &eb->vmas);
  146. list_del_init(&obj->obj_exec_link);
  147. vma->exec_entry = &exec[i];
  148. if (eb->and < 0) {
  149. eb->lut[i] = vma;
  150. } else {
  151. uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
  152. vma->exec_handle = handle;
  153. hlist_add_head(&vma->exec_node,
  154. &eb->buckets[handle & eb->and]);
  155. }
  156. ++i;
  157. }
  158. return 0;
  159. err:
  160. while (!list_empty(&objects)) {
  161. obj = list_first_entry(&objects,
  162. struct drm_i915_gem_object,
  163. obj_exec_link);
  164. list_del_init(&obj->obj_exec_link);
  165. drm_gem_object_unreference(&obj->base);
  166. }
  167. /*
  168. * Objects already transfered to the vmas list will be unreferenced by
  169. * eb_destroy.
  170. */
  171. return ret;
  172. }
  173. static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
  174. {
  175. if (eb->and < 0) {
  176. if (handle >= -eb->and)
  177. return NULL;
  178. return eb->lut[handle];
  179. } else {
  180. struct hlist_head *head;
  181. struct hlist_node *node;
  182. head = &eb->buckets[handle & eb->and];
  183. hlist_for_each(node, head) {
  184. struct i915_vma *vma;
  185. vma = hlist_entry(node, struct i915_vma, exec_node);
  186. if (vma->exec_handle == handle)
  187. return vma;
  188. }
  189. return NULL;
  190. }
  191. }
  192. static void
  193. i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
  194. {
  195. struct drm_i915_gem_exec_object2 *entry;
  196. struct drm_i915_gem_object *obj = vma->obj;
  197. if (!drm_mm_node_allocated(&vma->node))
  198. return;
  199. entry = vma->exec_entry;
  200. if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
  201. i915_gem_object_unpin_fence(obj);
  202. if (entry->flags & __EXEC_OBJECT_HAS_PIN)
  203. vma->pin_count--;
  204. entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
  205. }
  206. static void eb_destroy(struct eb_vmas *eb)
  207. {
  208. while (!list_empty(&eb->vmas)) {
  209. struct i915_vma *vma;
  210. vma = list_first_entry(&eb->vmas,
  211. struct i915_vma,
  212. exec_list);
  213. list_del_init(&vma->exec_list);
  214. i915_gem_execbuffer_unreserve_vma(vma);
  215. drm_gem_object_unreference(&vma->obj->base);
  216. }
  217. kfree(eb);
  218. }
  219. static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
  220. {
  221. return (HAS_LLC(obj->base.dev) ||
  222. obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
  223. !obj->map_and_fenceable ||
  224. obj->cache_level != I915_CACHE_NONE);
  225. }
  226. static int
  227. relocate_entry_cpu(struct drm_i915_gem_object *obj,
  228. struct drm_i915_gem_relocation_entry *reloc,
  229. uint64_t target_offset)
  230. {
  231. struct drm_device *dev = obj->base.dev;
  232. uint32_t page_offset = offset_in_page(reloc->offset);
  233. uint64_t delta = reloc->delta + target_offset;
  234. char *vaddr;
  235. int ret;
  236. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  237. if (ret)
  238. return ret;
  239. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  240. reloc->offset >> PAGE_SHIFT));
  241. *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
  242. if (INTEL_INFO(dev)->gen >= 8) {
  243. page_offset = offset_in_page(page_offset + sizeof(uint32_t));
  244. if (page_offset == 0) {
  245. kunmap_atomic(vaddr);
  246. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  247. (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
  248. }
  249. *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
  250. }
  251. kunmap_atomic(vaddr);
  252. return 0;
  253. }
  254. static int
  255. relocate_entry_gtt(struct drm_i915_gem_object *obj,
  256. struct drm_i915_gem_relocation_entry *reloc,
  257. uint64_t target_offset)
  258. {
  259. struct drm_device *dev = obj->base.dev;
  260. struct drm_i915_private *dev_priv = dev->dev_private;
  261. uint64_t delta = reloc->delta + target_offset;
  262. uint32_t __iomem *reloc_entry;
  263. void __iomem *reloc_page;
  264. int ret;
  265. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  266. if (ret)
  267. return ret;
  268. ret = i915_gem_object_put_fence(obj);
  269. if (ret)
  270. return ret;
  271. /* Map the page containing the relocation we're going to perform. */
  272. reloc->offset += i915_gem_obj_ggtt_offset(obj);
  273. reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  274. reloc->offset & PAGE_MASK);
  275. reloc_entry = (uint32_t __iomem *)
  276. (reloc_page + offset_in_page(reloc->offset));
  277. iowrite32(lower_32_bits(delta), reloc_entry);
  278. if (INTEL_INFO(dev)->gen >= 8) {
  279. reloc_entry += 1;
  280. if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
  281. io_mapping_unmap_atomic(reloc_page);
  282. reloc_page = io_mapping_map_atomic_wc(
  283. dev_priv->gtt.mappable,
  284. reloc->offset + sizeof(uint32_t));
  285. reloc_entry = reloc_page;
  286. }
  287. iowrite32(upper_32_bits(delta), reloc_entry);
  288. }
  289. io_mapping_unmap_atomic(reloc_page);
  290. return 0;
  291. }
  292. static int
  293. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  294. struct eb_vmas *eb,
  295. struct drm_i915_gem_relocation_entry *reloc)
  296. {
  297. struct drm_device *dev = obj->base.dev;
  298. struct drm_gem_object *target_obj;
  299. struct drm_i915_gem_object *target_i915_obj;
  300. struct i915_vma *target_vma;
  301. uint64_t target_offset;
  302. int ret;
  303. /* we've already hold a reference to all valid objects */
  304. target_vma = eb_get_vma(eb, reloc->target_handle);
  305. if (unlikely(target_vma == NULL))
  306. return -ENOENT;
  307. target_i915_obj = target_vma->obj;
  308. target_obj = &target_vma->obj->base;
  309. target_offset = target_vma->node.start;
  310. /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
  311. * pipe_control writes because the gpu doesn't properly redirect them
  312. * through the ppgtt for non_secure batchbuffers. */
  313. if (unlikely(IS_GEN6(dev) &&
  314. reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
  315. !target_i915_obj->has_global_gtt_mapping)) {
  316. struct i915_vma *vma =
  317. list_first_entry(&target_i915_obj->vma_list,
  318. typeof(*vma), vma_link);
  319. vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
  320. }
  321. /* Validate that the target is in a valid r/w GPU domain */
  322. if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
  323. DRM_DEBUG("reloc with multiple write domains: "
  324. "obj %p target %d offset %d "
  325. "read %08x write %08x",
  326. obj, reloc->target_handle,
  327. (int) reloc->offset,
  328. reloc->read_domains,
  329. reloc->write_domain);
  330. return -EINVAL;
  331. }
  332. if (unlikely((reloc->write_domain | reloc->read_domains)
  333. & ~I915_GEM_GPU_DOMAINS)) {
  334. DRM_DEBUG("reloc with read/write non-GPU domains: "
  335. "obj %p target %d offset %d "
  336. "read %08x write %08x",
  337. obj, reloc->target_handle,
  338. (int) reloc->offset,
  339. reloc->read_domains,
  340. reloc->write_domain);
  341. return -EINVAL;
  342. }
  343. target_obj->pending_read_domains |= reloc->read_domains;
  344. target_obj->pending_write_domain |= reloc->write_domain;
  345. /* If the relocation already has the right value in it, no
  346. * more work needs to be done.
  347. */
  348. if (target_offset == reloc->presumed_offset)
  349. return 0;
  350. /* Check that the relocation address is valid... */
  351. if (unlikely(reloc->offset >
  352. obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
  353. DRM_DEBUG("Relocation beyond object bounds: "
  354. "obj %p target %d offset %d size %d.\n",
  355. obj, reloc->target_handle,
  356. (int) reloc->offset,
  357. (int) obj->base.size);
  358. return -EINVAL;
  359. }
  360. if (unlikely(reloc->offset & 3)) {
  361. DRM_DEBUG("Relocation not 4-byte aligned: "
  362. "obj %p target %d offset %d.\n",
  363. obj, reloc->target_handle,
  364. (int) reloc->offset);
  365. return -EINVAL;
  366. }
  367. /* We can't wait for rendering with pagefaults disabled */
  368. if (obj->active && in_atomic())
  369. return -EFAULT;
  370. if (use_cpu_reloc(obj))
  371. ret = relocate_entry_cpu(obj, reloc, target_offset);
  372. else
  373. ret = relocate_entry_gtt(obj, reloc, target_offset);
  374. if (ret)
  375. return ret;
  376. /* and update the user's relocation entry */
  377. reloc->presumed_offset = target_offset;
  378. return 0;
  379. }
  380. static int
  381. i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
  382. struct eb_vmas *eb)
  383. {
  384. #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
  385. struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
  386. struct drm_i915_gem_relocation_entry __user *user_relocs;
  387. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  388. int remain, ret;
  389. user_relocs = to_user_ptr(entry->relocs_ptr);
  390. remain = entry->relocation_count;
  391. while (remain) {
  392. struct drm_i915_gem_relocation_entry *r = stack_reloc;
  393. int count = remain;
  394. if (count > ARRAY_SIZE(stack_reloc))
  395. count = ARRAY_SIZE(stack_reloc);
  396. remain -= count;
  397. if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
  398. return -EFAULT;
  399. do {
  400. u64 offset = r->presumed_offset;
  401. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
  402. if (ret)
  403. return ret;
  404. if (r->presumed_offset != offset &&
  405. __copy_to_user_inatomic(&user_relocs->presumed_offset,
  406. &r->presumed_offset,
  407. sizeof(r->presumed_offset))) {
  408. return -EFAULT;
  409. }
  410. user_relocs++;
  411. r++;
  412. } while (--count);
  413. }
  414. return 0;
  415. #undef N_RELOC
  416. }
  417. static int
  418. i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
  419. struct eb_vmas *eb,
  420. struct drm_i915_gem_relocation_entry *relocs)
  421. {
  422. const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  423. int i, ret;
  424. for (i = 0; i < entry->relocation_count; i++) {
  425. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
  426. if (ret)
  427. return ret;
  428. }
  429. return 0;
  430. }
  431. static int
  432. i915_gem_execbuffer_relocate(struct eb_vmas *eb)
  433. {
  434. struct i915_vma *vma;
  435. int ret = 0;
  436. /* This is the fast path and we cannot handle a pagefault whilst
  437. * holding the struct mutex lest the user pass in the relocations
  438. * contained within a mmaped bo. For in such a case we, the page
  439. * fault handler would call i915_gem_fault() and we would try to
  440. * acquire the struct mutex again. Obviously this is bad and so
  441. * lockdep complains vehemently.
  442. */
  443. pagefault_disable();
  444. list_for_each_entry(vma, &eb->vmas, exec_list) {
  445. ret = i915_gem_execbuffer_relocate_vma(vma, eb);
  446. if (ret)
  447. break;
  448. }
  449. pagefault_enable();
  450. return ret;
  451. }
  452. static int
  453. need_reloc_mappable(struct i915_vma *vma)
  454. {
  455. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  456. return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
  457. i915_is_ggtt(vma->vm);
  458. }
  459. static int
  460. i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
  461. struct intel_engine_cs *ring,
  462. bool *need_reloc)
  463. {
  464. struct drm_i915_gem_object *obj = vma->obj;
  465. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  466. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  467. bool need_fence;
  468. unsigned flags;
  469. int ret;
  470. flags = 0;
  471. need_fence =
  472. has_fenced_gpu_access &&
  473. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  474. obj->tiling_mode != I915_TILING_NONE;
  475. if (need_fence || need_reloc_mappable(vma))
  476. flags |= PIN_MAPPABLE;
  477. if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
  478. flags |= PIN_GLOBAL;
  479. ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
  480. if (ret)
  481. return ret;
  482. entry->flags |= __EXEC_OBJECT_HAS_PIN;
  483. if (has_fenced_gpu_access) {
  484. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  485. ret = i915_gem_object_get_fence(obj);
  486. if (ret)
  487. return ret;
  488. if (i915_gem_object_pin_fence(obj))
  489. entry->flags |= __EXEC_OBJECT_HAS_FENCE;
  490. obj->pending_fenced_gpu_access = true;
  491. }
  492. }
  493. if (entry->offset != vma->node.start) {
  494. entry->offset = vma->node.start;
  495. *need_reloc = true;
  496. }
  497. if (entry->flags & EXEC_OBJECT_WRITE) {
  498. obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
  499. obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
  500. }
  501. return 0;
  502. }
  503. static int
  504. i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
  505. struct list_head *vmas,
  506. bool *need_relocs)
  507. {
  508. struct drm_i915_gem_object *obj;
  509. struct i915_vma *vma;
  510. struct i915_address_space *vm;
  511. struct list_head ordered_vmas;
  512. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  513. int retry;
  514. if (list_empty(vmas))
  515. return 0;
  516. i915_gem_retire_requests_ring(ring);
  517. vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
  518. INIT_LIST_HEAD(&ordered_vmas);
  519. while (!list_empty(vmas)) {
  520. struct drm_i915_gem_exec_object2 *entry;
  521. bool need_fence, need_mappable;
  522. vma = list_first_entry(vmas, struct i915_vma, exec_list);
  523. obj = vma->obj;
  524. entry = vma->exec_entry;
  525. need_fence =
  526. has_fenced_gpu_access &&
  527. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  528. obj->tiling_mode != I915_TILING_NONE;
  529. need_mappable = need_fence || need_reloc_mappable(vma);
  530. if (need_mappable)
  531. list_move(&vma->exec_list, &ordered_vmas);
  532. else
  533. list_move_tail(&vma->exec_list, &ordered_vmas);
  534. obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
  535. obj->base.pending_write_domain = 0;
  536. obj->pending_fenced_gpu_access = false;
  537. }
  538. list_splice(&ordered_vmas, vmas);
  539. /* Attempt to pin all of the buffers into the GTT.
  540. * This is done in 3 phases:
  541. *
  542. * 1a. Unbind all objects that do not match the GTT constraints for
  543. * the execbuffer (fenceable, mappable, alignment etc).
  544. * 1b. Increment pin count for already bound objects.
  545. * 2. Bind new objects.
  546. * 3. Decrement pin count.
  547. *
  548. * This avoid unnecessary unbinding of later objects in order to make
  549. * room for the earlier objects *unless* we need to defragment.
  550. */
  551. retry = 0;
  552. do {
  553. int ret = 0;
  554. /* Unbind any ill-fitting objects or pin. */
  555. list_for_each_entry(vma, vmas, exec_list) {
  556. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  557. bool need_fence, need_mappable;
  558. obj = vma->obj;
  559. if (!drm_mm_node_allocated(&vma->node))
  560. continue;
  561. need_fence =
  562. has_fenced_gpu_access &&
  563. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  564. obj->tiling_mode != I915_TILING_NONE;
  565. need_mappable = need_fence || need_reloc_mappable(vma);
  566. WARN_ON((need_mappable || need_fence) &&
  567. !i915_is_ggtt(vma->vm));
  568. if ((entry->alignment &&
  569. vma->node.start & (entry->alignment - 1)) ||
  570. (need_mappable && !obj->map_and_fenceable))
  571. ret = i915_vma_unbind(vma);
  572. else
  573. ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
  574. if (ret)
  575. goto err;
  576. }
  577. /* Bind fresh objects */
  578. list_for_each_entry(vma, vmas, exec_list) {
  579. if (drm_mm_node_allocated(&vma->node))
  580. continue;
  581. ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
  582. if (ret)
  583. goto err;
  584. }
  585. err:
  586. if (ret != -ENOSPC || retry++)
  587. return ret;
  588. /* Decrement pin count for bound objects */
  589. list_for_each_entry(vma, vmas, exec_list)
  590. i915_gem_execbuffer_unreserve_vma(vma);
  591. ret = i915_gem_evict_vm(vm, true);
  592. if (ret)
  593. return ret;
  594. } while (1);
  595. }
  596. static int
  597. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  598. struct drm_i915_gem_execbuffer2 *args,
  599. struct drm_file *file,
  600. struct intel_engine_cs *ring,
  601. struct eb_vmas *eb,
  602. struct drm_i915_gem_exec_object2 *exec)
  603. {
  604. struct drm_i915_gem_relocation_entry *reloc;
  605. struct i915_address_space *vm;
  606. struct i915_vma *vma;
  607. bool need_relocs;
  608. int *reloc_offset;
  609. int i, total, ret;
  610. unsigned count = args->buffer_count;
  611. if (WARN_ON(list_empty(&eb->vmas)))
  612. return 0;
  613. vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
  614. /* We may process another execbuffer during the unlock... */
  615. while (!list_empty(&eb->vmas)) {
  616. vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
  617. list_del_init(&vma->exec_list);
  618. i915_gem_execbuffer_unreserve_vma(vma);
  619. drm_gem_object_unreference(&vma->obj->base);
  620. }
  621. mutex_unlock(&dev->struct_mutex);
  622. total = 0;
  623. for (i = 0; i < count; i++)
  624. total += exec[i].relocation_count;
  625. reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
  626. reloc = drm_malloc_ab(total, sizeof(*reloc));
  627. if (reloc == NULL || reloc_offset == NULL) {
  628. drm_free_large(reloc);
  629. drm_free_large(reloc_offset);
  630. mutex_lock(&dev->struct_mutex);
  631. return -ENOMEM;
  632. }
  633. total = 0;
  634. for (i = 0; i < count; i++) {
  635. struct drm_i915_gem_relocation_entry __user *user_relocs;
  636. u64 invalid_offset = (u64)-1;
  637. int j;
  638. user_relocs = to_user_ptr(exec[i].relocs_ptr);
  639. if (copy_from_user(reloc+total, user_relocs,
  640. exec[i].relocation_count * sizeof(*reloc))) {
  641. ret = -EFAULT;
  642. mutex_lock(&dev->struct_mutex);
  643. goto err;
  644. }
  645. /* As we do not update the known relocation offsets after
  646. * relocating (due to the complexities in lock handling),
  647. * we need to mark them as invalid now so that we force the
  648. * relocation processing next time. Just in case the target
  649. * object is evicted and then rebound into its old
  650. * presumed_offset before the next execbuffer - if that
  651. * happened we would make the mistake of assuming that the
  652. * relocations were valid.
  653. */
  654. for (j = 0; j < exec[i].relocation_count; j++) {
  655. if (copy_to_user(&user_relocs[j].presumed_offset,
  656. &invalid_offset,
  657. sizeof(invalid_offset))) {
  658. ret = -EFAULT;
  659. mutex_lock(&dev->struct_mutex);
  660. goto err;
  661. }
  662. }
  663. reloc_offset[i] = total;
  664. total += exec[i].relocation_count;
  665. }
  666. ret = i915_mutex_lock_interruptible(dev);
  667. if (ret) {
  668. mutex_lock(&dev->struct_mutex);
  669. goto err;
  670. }
  671. /* reacquire the objects */
  672. eb_reset(eb);
  673. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  674. if (ret)
  675. goto err;
  676. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  677. ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
  678. if (ret)
  679. goto err;
  680. list_for_each_entry(vma, &eb->vmas, exec_list) {
  681. int offset = vma->exec_entry - exec;
  682. ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
  683. reloc + reloc_offset[offset]);
  684. if (ret)
  685. goto err;
  686. }
  687. /* Leave the user relocations as are, this is the painfully slow path,
  688. * and we want to avoid the complication of dropping the lock whilst
  689. * having buffers reserved in the aperture and so causing spurious
  690. * ENOSPC for random operations.
  691. */
  692. err:
  693. drm_free_large(reloc);
  694. drm_free_large(reloc_offset);
  695. return ret;
  696. }
  697. static int
  698. i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
  699. struct list_head *vmas)
  700. {
  701. struct i915_vma *vma;
  702. uint32_t flush_domains = 0;
  703. bool flush_chipset = false;
  704. int ret;
  705. list_for_each_entry(vma, vmas, exec_list) {
  706. struct drm_i915_gem_object *obj = vma->obj;
  707. ret = i915_gem_object_sync(obj, ring);
  708. if (ret)
  709. return ret;
  710. if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
  711. flush_chipset |= i915_gem_clflush_object(obj, false);
  712. flush_domains |= obj->base.write_domain;
  713. }
  714. if (flush_chipset)
  715. i915_gem_chipset_flush(ring->dev);
  716. if (flush_domains & I915_GEM_DOMAIN_GTT)
  717. wmb();
  718. /* Unconditionally invalidate gpu caches and ensure that we do flush
  719. * any residual writes from the previous batch.
  720. */
  721. return intel_ring_invalidate_all_caches(ring);
  722. }
  723. static bool
  724. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
  725. {
  726. if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
  727. return false;
  728. return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
  729. }
  730. static int
  731. validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
  732. int count)
  733. {
  734. int i;
  735. unsigned relocs_total = 0;
  736. unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
  737. for (i = 0; i < count; i++) {
  738. char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
  739. int length; /* limited by fault_in_pages_readable() */
  740. if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
  741. return -EINVAL;
  742. /* First check for malicious input causing overflow in
  743. * the worst case where we need to allocate the entire
  744. * relocation tree as a single array.
  745. */
  746. if (exec[i].relocation_count > relocs_max - relocs_total)
  747. return -EINVAL;
  748. relocs_total += exec[i].relocation_count;
  749. length = exec[i].relocation_count *
  750. sizeof(struct drm_i915_gem_relocation_entry);
  751. /*
  752. * We must check that the entire relocation array is safe
  753. * to read, but since we may need to update the presumed
  754. * offsets during execution, check for full write access.
  755. */
  756. if (!access_ok(VERIFY_WRITE, ptr, length))
  757. return -EFAULT;
  758. if (likely(!i915.prefault_disable)) {
  759. if (fault_in_multipages_readable(ptr, length))
  760. return -EFAULT;
  761. }
  762. }
  763. return 0;
  764. }
  765. static struct intel_context *
  766. i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
  767. struct intel_engine_cs *ring, const u32 ctx_id)
  768. {
  769. struct intel_context *ctx = NULL;
  770. struct i915_ctx_hang_stats *hs;
  771. if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
  772. return ERR_PTR(-EINVAL);
  773. ctx = i915_gem_context_get(file->driver_priv, ctx_id);
  774. if (IS_ERR(ctx))
  775. return ctx;
  776. hs = &ctx->hang_stats;
  777. if (hs->banned) {
  778. DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
  779. return ERR_PTR(-EIO);
  780. }
  781. return ctx;
  782. }
  783. static void
  784. i915_gem_execbuffer_move_to_active(struct list_head *vmas,
  785. struct intel_engine_cs *ring)
  786. {
  787. struct i915_vma *vma;
  788. list_for_each_entry(vma, vmas, exec_list) {
  789. struct drm_i915_gem_object *obj = vma->obj;
  790. u32 old_read = obj->base.read_domains;
  791. u32 old_write = obj->base.write_domain;
  792. obj->base.write_domain = obj->base.pending_write_domain;
  793. if (obj->base.write_domain == 0)
  794. obj->base.pending_read_domains |= obj->base.read_domains;
  795. obj->base.read_domains = obj->base.pending_read_domains;
  796. obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
  797. i915_vma_move_to_active(vma, ring);
  798. if (obj->base.write_domain) {
  799. obj->dirty = 1;
  800. obj->last_write_seqno = intel_ring_get_seqno(ring);
  801. /* check for potential scanout */
  802. if (i915_gem_obj_ggtt_bound(obj) &&
  803. i915_gem_obj_to_ggtt(obj)->pin_count)
  804. intel_mark_fb_busy(obj, ring);
  805. /* update for the implicit flush after a batch */
  806. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  807. }
  808. trace_i915_gem_object_change_domain(obj, old_read, old_write);
  809. }
  810. }
  811. static void
  812. i915_gem_execbuffer_retire_commands(struct drm_device *dev,
  813. struct drm_file *file,
  814. struct intel_engine_cs *ring,
  815. struct drm_i915_gem_object *obj)
  816. {
  817. /* Unconditionally force add_request to emit a full flush. */
  818. ring->gpu_caches_dirty = true;
  819. /* Add a breadcrumb for the completion of the batch buffer */
  820. (void)__i915_add_request(ring, file, obj, NULL);
  821. }
  822. static int
  823. i915_reset_gen7_sol_offsets(struct drm_device *dev,
  824. struct intel_engine_cs *ring)
  825. {
  826. struct drm_i915_private *dev_priv = dev->dev_private;
  827. int ret, i;
  828. if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
  829. DRM_DEBUG("sol reset is gen7/rcs only\n");
  830. return -EINVAL;
  831. }
  832. ret = intel_ring_begin(ring, 4 * 3);
  833. if (ret)
  834. return ret;
  835. for (i = 0; i < 4; i++) {
  836. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  837. intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
  838. intel_ring_emit(ring, 0);
  839. }
  840. intel_ring_advance(ring);
  841. return 0;
  842. }
  843. /**
  844. * Find one BSD ring to dispatch the corresponding BSD command.
  845. * The Ring ID is returned.
  846. */
  847. static int gen8_dispatch_bsd_ring(struct drm_device *dev,
  848. struct drm_file *file)
  849. {
  850. struct drm_i915_private *dev_priv = dev->dev_private;
  851. struct drm_i915_file_private *file_priv = file->driver_priv;
  852. /* Check whether the file_priv is using one ring */
  853. if (file_priv->bsd_ring)
  854. return file_priv->bsd_ring->id;
  855. else {
  856. /* If no, use the ping-pong mechanism to select one ring */
  857. int ring_id;
  858. mutex_lock(&dev->struct_mutex);
  859. if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
  860. ring_id = VCS;
  861. dev_priv->mm.bsd_ring_dispatch_index = 1;
  862. } else {
  863. ring_id = VCS2;
  864. dev_priv->mm.bsd_ring_dispatch_index = 0;
  865. }
  866. file_priv->bsd_ring = &dev_priv->ring[ring_id];
  867. mutex_unlock(&dev->struct_mutex);
  868. return ring_id;
  869. }
  870. }
  871. static int
  872. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  873. struct drm_file *file,
  874. struct drm_i915_gem_execbuffer2 *args,
  875. struct drm_i915_gem_exec_object2 *exec)
  876. {
  877. struct drm_i915_private *dev_priv = dev->dev_private;
  878. struct eb_vmas *eb;
  879. struct drm_i915_gem_object *batch_obj;
  880. struct drm_clip_rect *cliprects = NULL;
  881. struct intel_engine_cs *ring;
  882. struct intel_context *ctx;
  883. struct i915_address_space *vm;
  884. const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
  885. u64 exec_start = args->batch_start_offset, exec_len;
  886. u32 mask, flags;
  887. int ret, mode, i;
  888. bool need_relocs;
  889. if (!i915_gem_check_execbuffer(args))
  890. return -EINVAL;
  891. ret = validate_exec_list(exec, args->buffer_count);
  892. if (ret)
  893. return ret;
  894. flags = 0;
  895. if (args->flags & I915_EXEC_SECURE) {
  896. if (!file->is_master || !capable(CAP_SYS_ADMIN))
  897. return -EPERM;
  898. flags |= I915_DISPATCH_SECURE;
  899. }
  900. if (args->flags & I915_EXEC_IS_PINNED)
  901. flags |= I915_DISPATCH_PINNED;
  902. if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
  903. DRM_DEBUG("execbuf with unknown ring: %d\n",
  904. (int)(args->flags & I915_EXEC_RING_MASK));
  905. return -EINVAL;
  906. }
  907. if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
  908. ring = &dev_priv->ring[RCS];
  909. else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
  910. if (HAS_BSD2(dev)) {
  911. int ring_id;
  912. ring_id = gen8_dispatch_bsd_ring(dev, file);
  913. ring = &dev_priv->ring[ring_id];
  914. } else
  915. ring = &dev_priv->ring[VCS];
  916. } else
  917. ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
  918. if (!intel_ring_initialized(ring)) {
  919. DRM_DEBUG("execbuf with invalid ring: %d\n",
  920. (int)(args->flags & I915_EXEC_RING_MASK));
  921. return -EINVAL;
  922. }
  923. mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  924. mask = I915_EXEC_CONSTANTS_MASK;
  925. switch (mode) {
  926. case I915_EXEC_CONSTANTS_REL_GENERAL:
  927. case I915_EXEC_CONSTANTS_ABSOLUTE:
  928. case I915_EXEC_CONSTANTS_REL_SURFACE:
  929. if (mode != 0 && ring != &dev_priv->ring[RCS]) {
  930. DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
  931. return -EINVAL;
  932. }
  933. if (mode != dev_priv->relative_constants_mode) {
  934. if (INTEL_INFO(dev)->gen < 4) {
  935. DRM_DEBUG("no rel constants on pre-gen4\n");
  936. return -EINVAL;
  937. }
  938. if (INTEL_INFO(dev)->gen > 5 &&
  939. mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
  940. DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
  941. return -EINVAL;
  942. }
  943. /* The HW changed the meaning on this bit on gen6 */
  944. if (INTEL_INFO(dev)->gen >= 6)
  945. mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  946. }
  947. break;
  948. default:
  949. DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
  950. return -EINVAL;
  951. }
  952. if (args->buffer_count < 1) {
  953. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  954. return -EINVAL;
  955. }
  956. if (args->num_cliprects != 0) {
  957. if (ring != &dev_priv->ring[RCS]) {
  958. DRM_DEBUG("clip rectangles are only valid with the render ring\n");
  959. return -EINVAL;
  960. }
  961. if (INTEL_INFO(dev)->gen >= 5) {
  962. DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
  963. return -EINVAL;
  964. }
  965. if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
  966. DRM_DEBUG("execbuf with %u cliprects\n",
  967. args->num_cliprects);
  968. return -EINVAL;
  969. }
  970. cliprects = kcalloc(args->num_cliprects,
  971. sizeof(*cliprects),
  972. GFP_KERNEL);
  973. if (cliprects == NULL) {
  974. ret = -ENOMEM;
  975. goto pre_mutex_err;
  976. }
  977. if (copy_from_user(cliprects,
  978. to_user_ptr(args->cliprects_ptr),
  979. sizeof(*cliprects)*args->num_cliprects)) {
  980. ret = -EFAULT;
  981. goto pre_mutex_err;
  982. }
  983. } else {
  984. if (args->DR4 == 0xffffffff) {
  985. DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
  986. args->DR4 = 0;
  987. }
  988. if (args->DR1 || args->DR4 || args->cliprects_ptr) {
  989. DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
  990. return -EINVAL;
  991. }
  992. }
  993. intel_runtime_pm_get(dev_priv);
  994. ret = i915_mutex_lock_interruptible(dev);
  995. if (ret)
  996. goto pre_mutex_err;
  997. if (dev_priv->ums.mm_suspended) {
  998. mutex_unlock(&dev->struct_mutex);
  999. ret = -EBUSY;
  1000. goto pre_mutex_err;
  1001. }
  1002. ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
  1003. if (IS_ERR(ctx)) {
  1004. mutex_unlock(&dev->struct_mutex);
  1005. ret = PTR_ERR(ctx);
  1006. goto pre_mutex_err;
  1007. }
  1008. i915_gem_context_reference(ctx);
  1009. vm = ctx->vm;
  1010. if (!USES_FULL_PPGTT(dev))
  1011. vm = &dev_priv->gtt.base;
  1012. eb = eb_create(args);
  1013. if (eb == NULL) {
  1014. i915_gem_context_unreference(ctx);
  1015. mutex_unlock(&dev->struct_mutex);
  1016. ret = -ENOMEM;
  1017. goto pre_mutex_err;
  1018. }
  1019. /* Look up object handles */
  1020. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  1021. if (ret)
  1022. goto err;
  1023. /* take note of the batch buffer before we might reorder the lists */
  1024. batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
  1025. /* Move the objects en-masse into the GTT, evicting if necessary. */
  1026. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  1027. ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
  1028. if (ret)
  1029. goto err;
  1030. /* The objects are in their final locations, apply the relocations. */
  1031. if (need_relocs)
  1032. ret = i915_gem_execbuffer_relocate(eb);
  1033. if (ret) {
  1034. if (ret == -EFAULT) {
  1035. ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
  1036. eb, exec);
  1037. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1038. }
  1039. if (ret)
  1040. goto err;
  1041. }
  1042. /* Set the pending read domains for the batch buffer to COMMAND */
  1043. if (batch_obj->base.pending_write_domain) {
  1044. DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
  1045. ret = -EINVAL;
  1046. goto err;
  1047. }
  1048. batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  1049. if (i915_needs_cmd_parser(ring)) {
  1050. ret = i915_parse_cmds(ring,
  1051. batch_obj,
  1052. args->batch_start_offset,
  1053. file->is_master);
  1054. if (ret)
  1055. goto err;
  1056. /*
  1057. * XXX: Actually do this when enabling batch copy...
  1058. *
  1059. * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
  1060. * from MI_BATCH_BUFFER_START commands issued in the
  1061. * dispatch_execbuffer implementations. We specifically don't
  1062. * want that set when the command parser is enabled.
  1063. */
  1064. }
  1065. /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
  1066. * batch" bit. Hence we need to pin secure batches into the global gtt.
  1067. * hsw should have this fixed, but bdw mucks it up again. */
  1068. if (flags & I915_DISPATCH_SECURE &&
  1069. !batch_obj->has_global_gtt_mapping) {
  1070. /* When we have multiple VMs, we'll need to make sure that we
  1071. * allocate space first */
  1072. struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
  1073. BUG_ON(!vma);
  1074. vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
  1075. }
  1076. if (flags & I915_DISPATCH_SECURE)
  1077. exec_start += i915_gem_obj_ggtt_offset(batch_obj);
  1078. else
  1079. exec_start += i915_gem_obj_offset(batch_obj, vm);
  1080. ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
  1081. if (ret)
  1082. goto err;
  1083. ret = i915_switch_context(ring, ctx);
  1084. if (ret)
  1085. goto err;
  1086. if (ring == &dev_priv->ring[RCS] &&
  1087. mode != dev_priv->relative_constants_mode) {
  1088. ret = intel_ring_begin(ring, 4);
  1089. if (ret)
  1090. goto err;
  1091. intel_ring_emit(ring, MI_NOOP);
  1092. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1093. intel_ring_emit(ring, INSTPM);
  1094. intel_ring_emit(ring, mask << 16 | mode);
  1095. intel_ring_advance(ring);
  1096. dev_priv->relative_constants_mode = mode;
  1097. }
  1098. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  1099. ret = i915_reset_gen7_sol_offsets(dev, ring);
  1100. if (ret)
  1101. goto err;
  1102. }
  1103. exec_len = args->batch_len;
  1104. if (cliprects) {
  1105. for (i = 0; i < args->num_cliprects; i++) {
  1106. ret = i915_emit_box(dev, &cliprects[i],
  1107. args->DR1, args->DR4);
  1108. if (ret)
  1109. goto err;
  1110. ret = ring->dispatch_execbuffer(ring,
  1111. exec_start, exec_len,
  1112. flags);
  1113. if (ret)
  1114. goto err;
  1115. }
  1116. } else {
  1117. ret = ring->dispatch_execbuffer(ring,
  1118. exec_start, exec_len,
  1119. flags);
  1120. if (ret)
  1121. goto err;
  1122. }
  1123. trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
  1124. i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
  1125. i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
  1126. err:
  1127. /* the request owns the ref now */
  1128. i915_gem_context_unreference(ctx);
  1129. eb_destroy(eb);
  1130. mutex_unlock(&dev->struct_mutex);
  1131. pre_mutex_err:
  1132. kfree(cliprects);
  1133. /* intel_gpu_busy should also get a ref, so it will free when the device
  1134. * is really idle. */
  1135. intel_runtime_pm_put(dev_priv);
  1136. return ret;
  1137. }
  1138. /*
  1139. * Legacy execbuffer just creates an exec2 list from the original exec object
  1140. * list array and passes it to the real function.
  1141. */
  1142. int
  1143. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1144. struct drm_file *file)
  1145. {
  1146. struct drm_i915_gem_execbuffer *args = data;
  1147. struct drm_i915_gem_execbuffer2 exec2;
  1148. struct drm_i915_gem_exec_object *exec_list = NULL;
  1149. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1150. int ret, i;
  1151. if (args->buffer_count < 1) {
  1152. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1153. return -EINVAL;
  1154. }
  1155. /* Copy in the exec list from userland */
  1156. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  1157. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  1158. if (exec_list == NULL || exec2_list == NULL) {
  1159. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1160. args->buffer_count);
  1161. drm_free_large(exec_list);
  1162. drm_free_large(exec2_list);
  1163. return -ENOMEM;
  1164. }
  1165. ret = copy_from_user(exec_list,
  1166. to_user_ptr(args->buffers_ptr),
  1167. sizeof(*exec_list) * args->buffer_count);
  1168. if (ret != 0) {
  1169. DRM_DEBUG("copy %d exec entries failed %d\n",
  1170. args->buffer_count, ret);
  1171. drm_free_large(exec_list);
  1172. drm_free_large(exec2_list);
  1173. return -EFAULT;
  1174. }
  1175. for (i = 0; i < args->buffer_count; i++) {
  1176. exec2_list[i].handle = exec_list[i].handle;
  1177. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  1178. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  1179. exec2_list[i].alignment = exec_list[i].alignment;
  1180. exec2_list[i].offset = exec_list[i].offset;
  1181. if (INTEL_INFO(dev)->gen < 4)
  1182. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  1183. else
  1184. exec2_list[i].flags = 0;
  1185. }
  1186. exec2.buffers_ptr = args->buffers_ptr;
  1187. exec2.buffer_count = args->buffer_count;
  1188. exec2.batch_start_offset = args->batch_start_offset;
  1189. exec2.batch_len = args->batch_len;
  1190. exec2.DR1 = args->DR1;
  1191. exec2.DR4 = args->DR4;
  1192. exec2.num_cliprects = args->num_cliprects;
  1193. exec2.cliprects_ptr = args->cliprects_ptr;
  1194. exec2.flags = I915_EXEC_RENDER;
  1195. i915_execbuffer2_set_context_id(exec2, 0);
  1196. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  1197. if (!ret) {
  1198. /* Copy the new buffer offsets back to the user's exec list. */
  1199. for (i = 0; i < args->buffer_count; i++)
  1200. exec_list[i].offset = exec2_list[i].offset;
  1201. /* ... and back out to userspace */
  1202. ret = copy_to_user(to_user_ptr(args->buffers_ptr),
  1203. exec_list,
  1204. sizeof(*exec_list) * args->buffer_count);
  1205. if (ret) {
  1206. ret = -EFAULT;
  1207. DRM_DEBUG("failed to copy %d exec entries "
  1208. "back to user (%d)\n",
  1209. args->buffer_count, ret);
  1210. }
  1211. }
  1212. drm_free_large(exec_list);
  1213. drm_free_large(exec2_list);
  1214. return ret;
  1215. }
  1216. int
  1217. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1218. struct drm_file *file)
  1219. {
  1220. struct drm_i915_gem_execbuffer2 *args = data;
  1221. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1222. int ret;
  1223. if (args->buffer_count < 1 ||
  1224. args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
  1225. DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
  1226. return -EINVAL;
  1227. }
  1228. if (args->rsvd2 != 0) {
  1229. DRM_DEBUG("dirty rvsd2 field\n");
  1230. return -EINVAL;
  1231. }
  1232. exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
  1233. GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  1234. if (exec2_list == NULL)
  1235. exec2_list = drm_malloc_ab(sizeof(*exec2_list),
  1236. args->buffer_count);
  1237. if (exec2_list == NULL) {
  1238. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1239. args->buffer_count);
  1240. return -ENOMEM;
  1241. }
  1242. ret = copy_from_user(exec2_list,
  1243. to_user_ptr(args->buffers_ptr),
  1244. sizeof(*exec2_list) * args->buffer_count);
  1245. if (ret != 0) {
  1246. DRM_DEBUG("copy %d exec entries failed %d\n",
  1247. args->buffer_count, ret);
  1248. drm_free_large(exec2_list);
  1249. return -EFAULT;
  1250. }
  1251. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  1252. if (!ret) {
  1253. /* Copy the new buffer offsets back to the user's exec list. */
  1254. ret = copy_to_user(to_user_ptr(args->buffers_ptr),
  1255. exec2_list,
  1256. sizeof(*exec2_list) * args->buffer_count);
  1257. if (ret) {
  1258. ret = -EFAULT;
  1259. DRM_DEBUG("failed to copy %d exec entries "
  1260. "back to user (%d)\n",
  1261. args->buffer_count, ret);
  1262. }
  1263. }
  1264. drm_free_large(exec2_list);
  1265. return ret;
  1266. }