i915_gem_execbuffer.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704
  1. /*
  2. * Copyright © 2008,2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/dma_remapping.h>
  34. #define __EXEC_OBJECT_HAS_PIN (1<<31)
  35. #define __EXEC_OBJECT_HAS_FENCE (1<<30)
  36. #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
  37. #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  38. #define __EXEC_OBJECT_PURGEABLE (1<<27)
  39. #define BATCH_OFFSET_BIAS (256*1024)
  40. struct eb_vmas {
  41. struct list_head vmas;
  42. int and;
  43. union {
  44. struct i915_vma *lut[0];
  45. struct hlist_head buckets[0];
  46. };
  47. };
  48. static struct eb_vmas *
  49. eb_create(struct drm_i915_gem_execbuffer2 *args)
  50. {
  51. struct eb_vmas *eb = NULL;
  52. if (args->flags & I915_EXEC_HANDLE_LUT) {
  53. unsigned size = args->buffer_count;
  54. size *= sizeof(struct i915_vma *);
  55. size += sizeof(struct eb_vmas);
  56. eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  57. }
  58. if (eb == NULL) {
  59. unsigned size = args->buffer_count;
  60. unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  61. BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  62. while (count > 2*size)
  63. count >>= 1;
  64. eb = kzalloc(count*sizeof(struct hlist_head) +
  65. sizeof(struct eb_vmas),
  66. GFP_TEMPORARY);
  67. if (eb == NULL)
  68. return eb;
  69. eb->and = count - 1;
  70. } else
  71. eb->and = -args->buffer_count;
  72. INIT_LIST_HEAD(&eb->vmas);
  73. return eb;
  74. }
  75. static void
  76. eb_reset(struct eb_vmas *eb)
  77. {
  78. if (eb->and >= 0)
  79. memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  80. }
  81. static int
  82. eb_lookup_vmas(struct eb_vmas *eb,
  83. struct drm_i915_gem_exec_object2 *exec,
  84. const struct drm_i915_gem_execbuffer2 *args,
  85. struct i915_address_space *vm,
  86. struct drm_file *file)
  87. {
  88. struct drm_i915_gem_object *obj;
  89. struct list_head objects;
  90. int i, ret;
  91. INIT_LIST_HEAD(&objects);
  92. spin_lock(&file->table_lock);
  93. /* Grab a reference to the object and release the lock so we can lookup
  94. * or create the VMA without using GFP_ATOMIC */
  95. for (i = 0; i < args->buffer_count; i++) {
  96. obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
  97. if (obj == NULL) {
  98. spin_unlock(&file->table_lock);
  99. DRM_DEBUG("Invalid object handle %d at index %d\n",
  100. exec[i].handle, i);
  101. ret = -ENOENT;
  102. goto err;
  103. }
  104. if (!list_empty(&obj->obj_exec_link)) {
  105. spin_unlock(&file->table_lock);
  106. DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
  107. obj, exec[i].handle, i);
  108. ret = -EINVAL;
  109. goto err;
  110. }
  111. drm_gem_object_reference(&obj->base);
  112. list_add_tail(&obj->obj_exec_link, &objects);
  113. }
  114. spin_unlock(&file->table_lock);
  115. i = 0;
  116. while (!list_empty(&objects)) {
  117. struct i915_vma *vma;
  118. obj = list_first_entry(&objects,
  119. struct drm_i915_gem_object,
  120. obj_exec_link);
  121. /*
  122. * NOTE: We can leak any vmas created here when something fails
  123. * later on. But that's no issue since vma_unbind can deal with
  124. * vmas which are not actually bound. And since only
  125. * lookup_or_create exists as an interface to get at the vma
  126. * from the (obj, vm) we don't run the risk of creating
  127. * duplicated vmas for the same vm.
  128. */
  129. vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
  130. if (IS_ERR(vma)) {
  131. DRM_DEBUG("Failed to lookup VMA\n");
  132. ret = PTR_ERR(vma);
  133. goto err;
  134. }
  135. /* Transfer ownership from the objects list to the vmas list. */
  136. list_add_tail(&vma->exec_list, &eb->vmas);
  137. list_del_init(&obj->obj_exec_link);
  138. vma->exec_entry = &exec[i];
  139. if (eb->and < 0) {
  140. eb->lut[i] = vma;
  141. } else {
  142. uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
  143. vma->exec_handle = handle;
  144. hlist_add_head(&vma->exec_node,
  145. &eb->buckets[handle & eb->and]);
  146. }
  147. ++i;
  148. }
  149. return 0;
  150. err:
  151. while (!list_empty(&objects)) {
  152. obj = list_first_entry(&objects,
  153. struct drm_i915_gem_object,
  154. obj_exec_link);
  155. list_del_init(&obj->obj_exec_link);
  156. drm_gem_object_unreference(&obj->base);
  157. }
  158. /*
  159. * Objects already transfered to the vmas list will be unreferenced by
  160. * eb_destroy.
  161. */
  162. return ret;
  163. }
  164. static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
  165. {
  166. if (eb->and < 0) {
  167. if (handle >= -eb->and)
  168. return NULL;
  169. return eb->lut[handle];
  170. } else {
  171. struct hlist_head *head;
  172. struct hlist_node *node;
  173. head = &eb->buckets[handle & eb->and];
  174. hlist_for_each(node, head) {
  175. struct i915_vma *vma;
  176. vma = hlist_entry(node, struct i915_vma, exec_node);
  177. if (vma->exec_handle == handle)
  178. return vma;
  179. }
  180. return NULL;
  181. }
  182. }
  183. static void
  184. i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
  185. {
  186. struct drm_i915_gem_exec_object2 *entry;
  187. struct drm_i915_gem_object *obj = vma->obj;
  188. if (!drm_mm_node_allocated(&vma->node))
  189. return;
  190. entry = vma->exec_entry;
  191. if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
  192. i915_gem_object_unpin_fence(obj);
  193. if (entry->flags & __EXEC_OBJECT_HAS_PIN)
  194. vma->pin_count--;
  195. if (entry->flags & __EXEC_OBJECT_PURGEABLE)
  196. obj->madv = I915_MADV_DONTNEED;
  197. entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
  198. __EXEC_OBJECT_HAS_PIN |
  199. __EXEC_OBJECT_PURGEABLE);
  200. }
  201. static void eb_destroy(struct eb_vmas *eb)
  202. {
  203. while (!list_empty(&eb->vmas)) {
  204. struct i915_vma *vma;
  205. vma = list_first_entry(&eb->vmas,
  206. struct i915_vma,
  207. exec_list);
  208. list_del_init(&vma->exec_list);
  209. i915_gem_execbuffer_unreserve_vma(vma);
  210. drm_gem_object_unreference(&vma->obj->base);
  211. }
  212. kfree(eb);
  213. }
  214. static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
  215. {
  216. return (HAS_LLC(obj->base.dev) ||
  217. obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
  218. !obj->map_and_fenceable ||
  219. obj->cache_level != I915_CACHE_NONE);
  220. }
  221. static int
  222. relocate_entry_cpu(struct drm_i915_gem_object *obj,
  223. struct drm_i915_gem_relocation_entry *reloc,
  224. uint64_t target_offset)
  225. {
  226. struct drm_device *dev = obj->base.dev;
  227. uint32_t page_offset = offset_in_page(reloc->offset);
  228. uint64_t delta = reloc->delta + target_offset;
  229. char *vaddr;
  230. int ret;
  231. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  232. if (ret)
  233. return ret;
  234. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  235. reloc->offset >> PAGE_SHIFT));
  236. *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
  237. if (INTEL_INFO(dev)->gen >= 8) {
  238. page_offset = offset_in_page(page_offset + sizeof(uint32_t));
  239. if (page_offset == 0) {
  240. kunmap_atomic(vaddr);
  241. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  242. (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
  243. }
  244. *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
  245. }
  246. kunmap_atomic(vaddr);
  247. return 0;
  248. }
  249. static int
  250. relocate_entry_gtt(struct drm_i915_gem_object *obj,
  251. struct drm_i915_gem_relocation_entry *reloc,
  252. uint64_t target_offset)
  253. {
  254. struct drm_device *dev = obj->base.dev;
  255. struct drm_i915_private *dev_priv = dev->dev_private;
  256. uint64_t delta = reloc->delta + target_offset;
  257. uint64_t offset;
  258. void __iomem *reloc_page;
  259. int ret;
  260. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  261. if (ret)
  262. return ret;
  263. ret = i915_gem_object_put_fence(obj);
  264. if (ret)
  265. return ret;
  266. /* Map the page containing the relocation we're going to perform. */
  267. offset = i915_gem_obj_ggtt_offset(obj);
  268. offset += reloc->offset;
  269. reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  270. offset & PAGE_MASK);
  271. iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
  272. if (INTEL_INFO(dev)->gen >= 8) {
  273. offset += sizeof(uint32_t);
  274. if (offset_in_page(offset) == 0) {
  275. io_mapping_unmap_atomic(reloc_page);
  276. reloc_page =
  277. io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  278. offset);
  279. }
  280. iowrite32(upper_32_bits(delta),
  281. reloc_page + offset_in_page(offset));
  282. }
  283. io_mapping_unmap_atomic(reloc_page);
  284. return 0;
  285. }
  286. static int
  287. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  288. struct eb_vmas *eb,
  289. struct drm_i915_gem_relocation_entry *reloc)
  290. {
  291. struct drm_device *dev = obj->base.dev;
  292. struct drm_gem_object *target_obj;
  293. struct drm_i915_gem_object *target_i915_obj;
  294. struct i915_vma *target_vma;
  295. uint64_t target_offset;
  296. int ret;
  297. /* we've already hold a reference to all valid objects */
  298. target_vma = eb_get_vma(eb, reloc->target_handle);
  299. if (unlikely(target_vma == NULL))
  300. return -ENOENT;
  301. target_i915_obj = target_vma->obj;
  302. target_obj = &target_vma->obj->base;
  303. target_offset = target_vma->node.start;
  304. /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
  305. * pipe_control writes because the gpu doesn't properly redirect them
  306. * through the ppgtt for non_secure batchbuffers. */
  307. if (unlikely(IS_GEN6(dev) &&
  308. reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
  309. !(target_vma->bound & GLOBAL_BIND))) {
  310. ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
  311. GLOBAL_BIND);
  312. if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
  313. return ret;
  314. }
  315. /* Validate that the target is in a valid r/w GPU domain */
  316. if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
  317. DRM_DEBUG("reloc with multiple write domains: "
  318. "obj %p target %d offset %d "
  319. "read %08x write %08x",
  320. obj, reloc->target_handle,
  321. (int) reloc->offset,
  322. reloc->read_domains,
  323. reloc->write_domain);
  324. return -EINVAL;
  325. }
  326. if (unlikely((reloc->write_domain | reloc->read_domains)
  327. & ~I915_GEM_GPU_DOMAINS)) {
  328. DRM_DEBUG("reloc with read/write non-GPU domains: "
  329. "obj %p target %d offset %d "
  330. "read %08x write %08x",
  331. obj, reloc->target_handle,
  332. (int) reloc->offset,
  333. reloc->read_domains,
  334. reloc->write_domain);
  335. return -EINVAL;
  336. }
  337. target_obj->pending_read_domains |= reloc->read_domains;
  338. target_obj->pending_write_domain |= reloc->write_domain;
  339. /* If the relocation already has the right value in it, no
  340. * more work needs to be done.
  341. */
  342. if (target_offset == reloc->presumed_offset)
  343. return 0;
  344. /* Check that the relocation address is valid... */
  345. if (unlikely(reloc->offset >
  346. obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
  347. DRM_DEBUG("Relocation beyond object bounds: "
  348. "obj %p target %d offset %d size %d.\n",
  349. obj, reloc->target_handle,
  350. (int) reloc->offset,
  351. (int) obj->base.size);
  352. return -EINVAL;
  353. }
  354. if (unlikely(reloc->offset & 3)) {
  355. DRM_DEBUG("Relocation not 4-byte aligned: "
  356. "obj %p target %d offset %d.\n",
  357. obj, reloc->target_handle,
  358. (int) reloc->offset);
  359. return -EINVAL;
  360. }
  361. /* We can't wait for rendering with pagefaults disabled */
  362. if (obj->active && in_atomic())
  363. return -EFAULT;
  364. if (use_cpu_reloc(obj))
  365. ret = relocate_entry_cpu(obj, reloc, target_offset);
  366. else
  367. ret = relocate_entry_gtt(obj, reloc, target_offset);
  368. if (ret)
  369. return ret;
  370. /* and update the user's relocation entry */
  371. reloc->presumed_offset = target_offset;
  372. return 0;
  373. }
  374. static int
  375. i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
  376. struct eb_vmas *eb)
  377. {
  378. #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
  379. struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
  380. struct drm_i915_gem_relocation_entry __user *user_relocs;
  381. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  382. int remain, ret;
  383. user_relocs = to_user_ptr(entry->relocs_ptr);
  384. remain = entry->relocation_count;
  385. while (remain) {
  386. struct drm_i915_gem_relocation_entry *r = stack_reloc;
  387. int count = remain;
  388. if (count > ARRAY_SIZE(stack_reloc))
  389. count = ARRAY_SIZE(stack_reloc);
  390. remain -= count;
  391. if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
  392. return -EFAULT;
  393. do {
  394. u64 offset = r->presumed_offset;
  395. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
  396. if (ret)
  397. return ret;
  398. if (r->presumed_offset != offset &&
  399. __copy_to_user_inatomic(&user_relocs->presumed_offset,
  400. &r->presumed_offset,
  401. sizeof(r->presumed_offset))) {
  402. return -EFAULT;
  403. }
  404. user_relocs++;
  405. r++;
  406. } while (--count);
  407. }
  408. return 0;
  409. #undef N_RELOC
  410. }
  411. static int
  412. i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
  413. struct eb_vmas *eb,
  414. struct drm_i915_gem_relocation_entry *relocs)
  415. {
  416. const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  417. int i, ret;
  418. for (i = 0; i < entry->relocation_count; i++) {
  419. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
  420. if (ret)
  421. return ret;
  422. }
  423. return 0;
  424. }
  425. static int
  426. i915_gem_execbuffer_relocate(struct eb_vmas *eb)
  427. {
  428. struct i915_vma *vma;
  429. int ret = 0;
  430. /* This is the fast path and we cannot handle a pagefault whilst
  431. * holding the struct mutex lest the user pass in the relocations
  432. * contained within a mmaped bo. For in such a case we, the page
  433. * fault handler would call i915_gem_fault() and we would try to
  434. * acquire the struct mutex again. Obviously this is bad and so
  435. * lockdep complains vehemently.
  436. */
  437. pagefault_disable();
  438. list_for_each_entry(vma, &eb->vmas, exec_list) {
  439. ret = i915_gem_execbuffer_relocate_vma(vma, eb);
  440. if (ret)
  441. break;
  442. }
  443. pagefault_enable();
  444. return ret;
  445. }
  446. static int
  447. i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
  448. struct intel_engine_cs *ring,
  449. bool *need_reloc)
  450. {
  451. struct drm_i915_gem_object *obj = vma->obj;
  452. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  453. uint64_t flags;
  454. int ret;
  455. flags = 0;
  456. if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
  457. flags |= PIN_GLOBAL | PIN_MAPPABLE;
  458. if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
  459. flags |= PIN_GLOBAL;
  460. if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
  461. flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
  462. ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
  463. if (ret)
  464. return ret;
  465. entry->flags |= __EXEC_OBJECT_HAS_PIN;
  466. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  467. ret = i915_gem_object_get_fence(obj);
  468. if (ret)
  469. return ret;
  470. if (i915_gem_object_pin_fence(obj))
  471. entry->flags |= __EXEC_OBJECT_HAS_FENCE;
  472. }
  473. if (entry->offset != vma->node.start) {
  474. entry->offset = vma->node.start;
  475. *need_reloc = true;
  476. }
  477. if (entry->flags & EXEC_OBJECT_WRITE) {
  478. obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
  479. obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
  480. }
  481. return 0;
  482. }
  483. static bool
  484. need_reloc_mappable(struct i915_vma *vma)
  485. {
  486. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  487. if (entry->relocation_count == 0)
  488. return false;
  489. if (!i915_is_ggtt(vma->vm))
  490. return false;
  491. /* See also use_cpu_reloc() */
  492. if (HAS_LLC(vma->obj->base.dev))
  493. return false;
  494. if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  495. return false;
  496. return true;
  497. }
  498. static bool
  499. eb_vma_misplaced(struct i915_vma *vma)
  500. {
  501. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  502. struct drm_i915_gem_object *obj = vma->obj;
  503. WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
  504. !i915_is_ggtt(vma->vm));
  505. if (entry->alignment &&
  506. vma->node.start & (entry->alignment - 1))
  507. return true;
  508. if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
  509. return true;
  510. if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
  511. vma->node.start < BATCH_OFFSET_BIAS)
  512. return true;
  513. return false;
  514. }
  515. static int
  516. i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
  517. struct list_head *vmas,
  518. bool *need_relocs)
  519. {
  520. struct drm_i915_gem_object *obj;
  521. struct i915_vma *vma;
  522. struct i915_address_space *vm;
  523. struct list_head ordered_vmas;
  524. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  525. int retry;
  526. i915_gem_retire_requests_ring(ring);
  527. vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
  528. INIT_LIST_HEAD(&ordered_vmas);
  529. while (!list_empty(vmas)) {
  530. struct drm_i915_gem_exec_object2 *entry;
  531. bool need_fence, need_mappable;
  532. vma = list_first_entry(vmas, struct i915_vma, exec_list);
  533. obj = vma->obj;
  534. entry = vma->exec_entry;
  535. if (!has_fenced_gpu_access)
  536. entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
  537. need_fence =
  538. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  539. obj->tiling_mode != I915_TILING_NONE;
  540. need_mappable = need_fence || need_reloc_mappable(vma);
  541. if (need_mappable) {
  542. entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
  543. list_move(&vma->exec_list, &ordered_vmas);
  544. } else
  545. list_move_tail(&vma->exec_list, &ordered_vmas);
  546. obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
  547. obj->base.pending_write_domain = 0;
  548. }
  549. list_splice(&ordered_vmas, vmas);
  550. /* Attempt to pin all of the buffers into the GTT.
  551. * This is done in 3 phases:
  552. *
  553. * 1a. Unbind all objects that do not match the GTT constraints for
  554. * the execbuffer (fenceable, mappable, alignment etc).
  555. * 1b. Increment pin count for already bound objects.
  556. * 2. Bind new objects.
  557. * 3. Decrement pin count.
  558. *
  559. * This avoid unnecessary unbinding of later objects in order to make
  560. * room for the earlier objects *unless* we need to defragment.
  561. */
  562. retry = 0;
  563. do {
  564. int ret = 0;
  565. /* Unbind any ill-fitting objects or pin. */
  566. list_for_each_entry(vma, vmas, exec_list) {
  567. if (!drm_mm_node_allocated(&vma->node))
  568. continue;
  569. if (eb_vma_misplaced(vma))
  570. ret = i915_vma_unbind(vma);
  571. else
  572. ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
  573. if (ret)
  574. goto err;
  575. }
  576. /* Bind fresh objects */
  577. list_for_each_entry(vma, vmas, exec_list) {
  578. if (drm_mm_node_allocated(&vma->node))
  579. continue;
  580. ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
  581. if (ret)
  582. goto err;
  583. }
  584. err:
  585. if (ret != -ENOSPC || retry++)
  586. return ret;
  587. /* Decrement pin count for bound objects */
  588. list_for_each_entry(vma, vmas, exec_list)
  589. i915_gem_execbuffer_unreserve_vma(vma);
  590. ret = i915_gem_evict_vm(vm, true);
  591. if (ret)
  592. return ret;
  593. } while (1);
  594. }
  595. static int
  596. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  597. struct drm_i915_gem_execbuffer2 *args,
  598. struct drm_file *file,
  599. struct intel_engine_cs *ring,
  600. struct eb_vmas *eb,
  601. struct drm_i915_gem_exec_object2 *exec)
  602. {
  603. struct drm_i915_gem_relocation_entry *reloc;
  604. struct i915_address_space *vm;
  605. struct i915_vma *vma;
  606. bool need_relocs;
  607. int *reloc_offset;
  608. int i, total, ret;
  609. unsigned count = args->buffer_count;
  610. vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
  611. /* We may process another execbuffer during the unlock... */
  612. while (!list_empty(&eb->vmas)) {
  613. vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
  614. list_del_init(&vma->exec_list);
  615. i915_gem_execbuffer_unreserve_vma(vma);
  616. drm_gem_object_unreference(&vma->obj->base);
  617. }
  618. mutex_unlock(&dev->struct_mutex);
  619. total = 0;
  620. for (i = 0; i < count; i++)
  621. total += exec[i].relocation_count;
  622. reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
  623. reloc = drm_malloc_ab(total, sizeof(*reloc));
  624. if (reloc == NULL || reloc_offset == NULL) {
  625. drm_free_large(reloc);
  626. drm_free_large(reloc_offset);
  627. mutex_lock(&dev->struct_mutex);
  628. return -ENOMEM;
  629. }
  630. total = 0;
  631. for (i = 0; i < count; i++) {
  632. struct drm_i915_gem_relocation_entry __user *user_relocs;
  633. u64 invalid_offset = (u64)-1;
  634. int j;
  635. user_relocs = to_user_ptr(exec[i].relocs_ptr);
  636. if (copy_from_user(reloc+total, user_relocs,
  637. exec[i].relocation_count * sizeof(*reloc))) {
  638. ret = -EFAULT;
  639. mutex_lock(&dev->struct_mutex);
  640. goto err;
  641. }
  642. /* As we do not update the known relocation offsets after
  643. * relocating (due to the complexities in lock handling),
  644. * we need to mark them as invalid now so that we force the
  645. * relocation processing next time. Just in case the target
  646. * object is evicted and then rebound into its old
  647. * presumed_offset before the next execbuffer - if that
  648. * happened we would make the mistake of assuming that the
  649. * relocations were valid.
  650. */
  651. for (j = 0; j < exec[i].relocation_count; j++) {
  652. if (__copy_to_user(&user_relocs[j].presumed_offset,
  653. &invalid_offset,
  654. sizeof(invalid_offset))) {
  655. ret = -EFAULT;
  656. mutex_lock(&dev->struct_mutex);
  657. goto err;
  658. }
  659. }
  660. reloc_offset[i] = total;
  661. total += exec[i].relocation_count;
  662. }
  663. ret = i915_mutex_lock_interruptible(dev);
  664. if (ret) {
  665. mutex_lock(&dev->struct_mutex);
  666. goto err;
  667. }
  668. /* reacquire the objects */
  669. eb_reset(eb);
  670. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  671. if (ret)
  672. goto err;
  673. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  674. ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
  675. if (ret)
  676. goto err;
  677. list_for_each_entry(vma, &eb->vmas, exec_list) {
  678. int offset = vma->exec_entry - exec;
  679. ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
  680. reloc + reloc_offset[offset]);
  681. if (ret)
  682. goto err;
  683. }
  684. /* Leave the user relocations as are, this is the painfully slow path,
  685. * and we want to avoid the complication of dropping the lock whilst
  686. * having buffers reserved in the aperture and so causing spurious
  687. * ENOSPC for random operations.
  688. */
  689. err:
  690. drm_free_large(reloc);
  691. drm_free_large(reloc_offset);
  692. return ret;
  693. }
  694. static int
  695. i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
  696. struct list_head *vmas)
  697. {
  698. struct i915_vma *vma;
  699. uint32_t flush_domains = 0;
  700. bool flush_chipset = false;
  701. int ret;
  702. list_for_each_entry(vma, vmas, exec_list) {
  703. struct drm_i915_gem_object *obj = vma->obj;
  704. ret = i915_gem_object_sync(obj, ring);
  705. if (ret)
  706. return ret;
  707. if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
  708. flush_chipset |= i915_gem_clflush_object(obj, false);
  709. flush_domains |= obj->base.write_domain;
  710. }
  711. if (flush_chipset)
  712. i915_gem_chipset_flush(ring->dev);
  713. if (flush_domains & I915_GEM_DOMAIN_GTT)
  714. wmb();
  715. /* Unconditionally invalidate gpu caches and ensure that we do flush
  716. * any residual writes from the previous batch.
  717. */
  718. return intel_ring_invalidate_all_caches(ring);
  719. }
  720. static bool
  721. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
  722. {
  723. if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
  724. return false;
  725. return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
  726. }
  727. static int
  728. validate_exec_list(struct drm_device *dev,
  729. struct drm_i915_gem_exec_object2 *exec,
  730. int count)
  731. {
  732. unsigned relocs_total = 0;
  733. unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
  734. unsigned invalid_flags;
  735. int i;
  736. invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
  737. if (USES_FULL_PPGTT(dev))
  738. invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
  739. for (i = 0; i < count; i++) {
  740. char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
  741. int length; /* limited by fault_in_pages_readable() */
  742. if (exec[i].flags & invalid_flags)
  743. return -EINVAL;
  744. /* First check for malicious input causing overflow in
  745. * the worst case where we need to allocate the entire
  746. * relocation tree as a single array.
  747. */
  748. if (exec[i].relocation_count > relocs_max - relocs_total)
  749. return -EINVAL;
  750. relocs_total += exec[i].relocation_count;
  751. length = exec[i].relocation_count *
  752. sizeof(struct drm_i915_gem_relocation_entry);
  753. /*
  754. * We must check that the entire relocation array is safe
  755. * to read, but since we may need to update the presumed
  756. * offsets during execution, check for full write access.
  757. */
  758. if (!access_ok(VERIFY_WRITE, ptr, length))
  759. return -EFAULT;
  760. if (likely(!i915.prefault_disable)) {
  761. if (fault_in_multipages_readable(ptr, length))
  762. return -EFAULT;
  763. }
  764. }
  765. return 0;
  766. }
  767. static struct intel_context *
  768. i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
  769. struct intel_engine_cs *ring, const u32 ctx_id)
  770. {
  771. struct intel_context *ctx = NULL;
  772. struct i915_ctx_hang_stats *hs;
  773. if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
  774. return ERR_PTR(-EINVAL);
  775. ctx = i915_gem_context_get(file->driver_priv, ctx_id);
  776. if (IS_ERR(ctx))
  777. return ctx;
  778. hs = &ctx->hang_stats;
  779. if (hs->banned) {
  780. DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
  781. return ERR_PTR(-EIO);
  782. }
  783. if (i915.enable_execlists && !ctx->engine[ring->id].state) {
  784. int ret = intel_lr_context_deferred_create(ctx, ring);
  785. if (ret) {
  786. DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
  787. return ERR_PTR(ret);
  788. }
  789. }
  790. return ctx;
  791. }
  792. void
  793. i915_gem_execbuffer_move_to_active(struct list_head *vmas,
  794. struct intel_engine_cs *ring)
  795. {
  796. struct drm_i915_gem_request *req = intel_ring_get_request(ring);
  797. struct i915_vma *vma;
  798. list_for_each_entry(vma, vmas, exec_list) {
  799. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  800. struct drm_i915_gem_object *obj = vma->obj;
  801. u32 old_read = obj->base.read_domains;
  802. u32 old_write = obj->base.write_domain;
  803. obj->base.write_domain = obj->base.pending_write_domain;
  804. if (obj->base.write_domain == 0)
  805. obj->base.pending_read_domains |= obj->base.read_domains;
  806. obj->base.read_domains = obj->base.pending_read_domains;
  807. i915_vma_move_to_active(vma, ring);
  808. if (obj->base.write_domain) {
  809. obj->dirty = 1;
  810. i915_gem_request_assign(&obj->last_write_req, req);
  811. intel_fb_obj_invalidate(obj, ring);
  812. /* update for the implicit flush after a batch */
  813. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  814. }
  815. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  816. i915_gem_request_assign(&obj->last_fenced_req, req);
  817. if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
  818. struct drm_i915_private *dev_priv = to_i915(ring->dev);
  819. list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
  820. &dev_priv->mm.fence_list);
  821. }
  822. }
  823. trace_i915_gem_object_change_domain(obj, old_read, old_write);
  824. }
  825. }
  826. void
  827. i915_gem_execbuffer_retire_commands(struct drm_device *dev,
  828. struct drm_file *file,
  829. struct intel_engine_cs *ring,
  830. struct drm_i915_gem_object *obj)
  831. {
  832. /* Unconditionally force add_request to emit a full flush. */
  833. ring->gpu_caches_dirty = true;
  834. /* Add a breadcrumb for the completion of the batch buffer */
  835. (void)__i915_add_request(ring, file, obj);
  836. }
  837. static int
  838. i915_reset_gen7_sol_offsets(struct drm_device *dev,
  839. struct intel_engine_cs *ring)
  840. {
  841. struct drm_i915_private *dev_priv = dev->dev_private;
  842. int ret, i;
  843. if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
  844. DRM_DEBUG("sol reset is gen7/rcs only\n");
  845. return -EINVAL;
  846. }
  847. ret = intel_ring_begin(ring, 4 * 3);
  848. if (ret)
  849. return ret;
  850. for (i = 0; i < 4; i++) {
  851. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  852. intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
  853. intel_ring_emit(ring, 0);
  854. }
  855. intel_ring_advance(ring);
  856. return 0;
  857. }
  858. static int
  859. i915_emit_box(struct intel_engine_cs *ring,
  860. struct drm_clip_rect *box,
  861. int DR1, int DR4)
  862. {
  863. int ret;
  864. if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  865. box->y2 <= 0 || box->x2 <= 0) {
  866. DRM_ERROR("Bad box %d,%d..%d,%d\n",
  867. box->x1, box->y1, box->x2, box->y2);
  868. return -EINVAL;
  869. }
  870. if (INTEL_INFO(ring->dev)->gen >= 4) {
  871. ret = intel_ring_begin(ring, 4);
  872. if (ret)
  873. return ret;
  874. intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
  875. intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
  876. intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
  877. intel_ring_emit(ring, DR4);
  878. } else {
  879. ret = intel_ring_begin(ring, 6);
  880. if (ret)
  881. return ret;
  882. intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
  883. intel_ring_emit(ring, DR1);
  884. intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
  885. intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
  886. intel_ring_emit(ring, DR4);
  887. intel_ring_emit(ring, 0);
  888. }
  889. intel_ring_advance(ring);
  890. return 0;
  891. }
  892. static struct drm_i915_gem_object*
  893. i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
  894. struct drm_i915_gem_exec_object2 *shadow_exec_entry,
  895. struct eb_vmas *eb,
  896. struct drm_i915_gem_object *batch_obj,
  897. u32 batch_start_offset,
  898. u32 batch_len,
  899. bool is_master,
  900. u32 *flags)
  901. {
  902. struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
  903. struct drm_i915_gem_object *shadow_batch_obj;
  904. bool need_reloc = false;
  905. int ret;
  906. shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
  907. batch_obj->base.size);
  908. if (IS_ERR(shadow_batch_obj))
  909. return shadow_batch_obj;
  910. ret = i915_parse_cmds(ring,
  911. batch_obj,
  912. shadow_batch_obj,
  913. batch_start_offset,
  914. batch_len,
  915. is_master);
  916. if (ret) {
  917. if (ret == -EACCES)
  918. return batch_obj;
  919. } else {
  920. struct i915_vma *vma;
  921. memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
  922. vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
  923. vma->exec_entry = shadow_exec_entry;
  924. vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
  925. drm_gem_object_reference(&shadow_batch_obj->base);
  926. i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
  927. list_add_tail(&vma->exec_list, &eb->vmas);
  928. shadow_batch_obj->base.pending_read_domains =
  929. batch_obj->base.pending_read_domains;
  930. /*
  931. * Set the DISPATCH_SECURE bit to remove the NON_SECURE
  932. * bit from MI_BATCH_BUFFER_START commands issued in the
  933. * dispatch_execbuffer implementations. We specifically
  934. * don't want that set when the command parser is
  935. * enabled.
  936. *
  937. * FIXME: with aliasing ppgtt, buffers that should only
  938. * be in ggtt still end up in the aliasing ppgtt. remove
  939. * this check when that is fixed.
  940. */
  941. if (USES_FULL_PPGTT(dev))
  942. *flags |= I915_DISPATCH_SECURE;
  943. }
  944. return ret ? ERR_PTR(ret) : shadow_batch_obj;
  945. }
  946. int
  947. i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
  948. struct intel_engine_cs *ring,
  949. struct intel_context *ctx,
  950. struct drm_i915_gem_execbuffer2 *args,
  951. struct list_head *vmas,
  952. struct drm_i915_gem_object *batch_obj,
  953. u64 exec_start, u32 flags)
  954. {
  955. struct drm_clip_rect *cliprects = NULL;
  956. struct drm_i915_private *dev_priv = dev->dev_private;
  957. u64 exec_len;
  958. int instp_mode;
  959. u32 instp_mask;
  960. int i, ret = 0;
  961. if (args->num_cliprects != 0) {
  962. if (ring != &dev_priv->ring[RCS]) {
  963. DRM_DEBUG("clip rectangles are only valid with the render ring\n");
  964. return -EINVAL;
  965. }
  966. if (INTEL_INFO(dev)->gen >= 5) {
  967. DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
  968. return -EINVAL;
  969. }
  970. if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
  971. DRM_DEBUG("execbuf with %u cliprects\n",
  972. args->num_cliprects);
  973. return -EINVAL;
  974. }
  975. cliprects = kcalloc(args->num_cliprects,
  976. sizeof(*cliprects),
  977. GFP_KERNEL);
  978. if (cliprects == NULL) {
  979. ret = -ENOMEM;
  980. goto error;
  981. }
  982. if (copy_from_user(cliprects,
  983. to_user_ptr(args->cliprects_ptr),
  984. sizeof(*cliprects)*args->num_cliprects)) {
  985. ret = -EFAULT;
  986. goto error;
  987. }
  988. } else {
  989. if (args->DR4 == 0xffffffff) {
  990. DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
  991. args->DR4 = 0;
  992. }
  993. if (args->DR1 || args->DR4 || args->cliprects_ptr) {
  994. DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
  995. return -EINVAL;
  996. }
  997. }
  998. ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
  999. if (ret)
  1000. goto error;
  1001. ret = i915_switch_context(ring, ctx);
  1002. if (ret)
  1003. goto error;
  1004. instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  1005. instp_mask = I915_EXEC_CONSTANTS_MASK;
  1006. switch (instp_mode) {
  1007. case I915_EXEC_CONSTANTS_REL_GENERAL:
  1008. case I915_EXEC_CONSTANTS_ABSOLUTE:
  1009. case I915_EXEC_CONSTANTS_REL_SURFACE:
  1010. if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
  1011. DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
  1012. ret = -EINVAL;
  1013. goto error;
  1014. }
  1015. if (instp_mode != dev_priv->relative_constants_mode) {
  1016. if (INTEL_INFO(dev)->gen < 4) {
  1017. DRM_DEBUG("no rel constants on pre-gen4\n");
  1018. ret = -EINVAL;
  1019. goto error;
  1020. }
  1021. if (INTEL_INFO(dev)->gen > 5 &&
  1022. instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
  1023. DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
  1024. ret = -EINVAL;
  1025. goto error;
  1026. }
  1027. /* The HW changed the meaning on this bit on gen6 */
  1028. if (INTEL_INFO(dev)->gen >= 6)
  1029. instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  1030. }
  1031. break;
  1032. default:
  1033. DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
  1034. ret = -EINVAL;
  1035. goto error;
  1036. }
  1037. if (ring == &dev_priv->ring[RCS] &&
  1038. instp_mode != dev_priv->relative_constants_mode) {
  1039. ret = intel_ring_begin(ring, 4);
  1040. if (ret)
  1041. goto error;
  1042. intel_ring_emit(ring, MI_NOOP);
  1043. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1044. intel_ring_emit(ring, INSTPM);
  1045. intel_ring_emit(ring, instp_mask << 16 | instp_mode);
  1046. intel_ring_advance(ring);
  1047. dev_priv->relative_constants_mode = instp_mode;
  1048. }
  1049. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  1050. ret = i915_reset_gen7_sol_offsets(dev, ring);
  1051. if (ret)
  1052. goto error;
  1053. }
  1054. exec_len = args->batch_len;
  1055. if (cliprects) {
  1056. for (i = 0; i < args->num_cliprects; i++) {
  1057. ret = i915_emit_box(ring, &cliprects[i],
  1058. args->DR1, args->DR4);
  1059. if (ret)
  1060. goto error;
  1061. ret = ring->dispatch_execbuffer(ring,
  1062. exec_start, exec_len,
  1063. flags);
  1064. if (ret)
  1065. goto error;
  1066. }
  1067. } else {
  1068. ret = ring->dispatch_execbuffer(ring,
  1069. exec_start, exec_len,
  1070. flags);
  1071. if (ret)
  1072. return ret;
  1073. }
  1074. trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
  1075. i915_gem_execbuffer_move_to_active(vmas, ring);
  1076. i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
  1077. error:
  1078. kfree(cliprects);
  1079. return ret;
  1080. }
  1081. /**
  1082. * Find one BSD ring to dispatch the corresponding BSD command.
  1083. * The Ring ID is returned.
  1084. */
  1085. static int gen8_dispatch_bsd_ring(struct drm_device *dev,
  1086. struct drm_file *file)
  1087. {
  1088. struct drm_i915_private *dev_priv = dev->dev_private;
  1089. struct drm_i915_file_private *file_priv = file->driver_priv;
  1090. /* Check whether the file_priv is using one ring */
  1091. if (file_priv->bsd_ring)
  1092. return file_priv->bsd_ring->id;
  1093. else {
  1094. /* If no, use the ping-pong mechanism to select one ring */
  1095. int ring_id;
  1096. mutex_lock(&dev->struct_mutex);
  1097. if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
  1098. ring_id = VCS;
  1099. dev_priv->mm.bsd_ring_dispatch_index = 1;
  1100. } else {
  1101. ring_id = VCS2;
  1102. dev_priv->mm.bsd_ring_dispatch_index = 0;
  1103. }
  1104. file_priv->bsd_ring = &dev_priv->ring[ring_id];
  1105. mutex_unlock(&dev->struct_mutex);
  1106. return ring_id;
  1107. }
  1108. }
  1109. static struct drm_i915_gem_object *
  1110. eb_get_batch(struct eb_vmas *eb)
  1111. {
  1112. struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
  1113. /*
  1114. * SNA is doing fancy tricks with compressing batch buffers, which leads
  1115. * to negative relocation deltas. Usually that works out ok since the
  1116. * relocate address is still positive, except when the batch is placed
  1117. * very low in the GTT. Ensure this doesn't happen.
  1118. *
  1119. * Note that actual hangs have only been observed on gen7, but for
  1120. * paranoia do it everywhere.
  1121. */
  1122. vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
  1123. return vma->obj;
  1124. }
  1125. static int
  1126. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  1127. struct drm_file *file,
  1128. struct drm_i915_gem_execbuffer2 *args,
  1129. struct drm_i915_gem_exec_object2 *exec)
  1130. {
  1131. struct drm_i915_private *dev_priv = dev->dev_private;
  1132. struct eb_vmas *eb;
  1133. struct drm_i915_gem_object *batch_obj;
  1134. struct drm_i915_gem_exec_object2 shadow_exec_entry;
  1135. struct intel_engine_cs *ring;
  1136. struct intel_context *ctx;
  1137. struct i915_address_space *vm;
  1138. const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
  1139. u64 exec_start = args->batch_start_offset;
  1140. u32 flags;
  1141. int ret;
  1142. bool need_relocs;
  1143. if (!i915_gem_check_execbuffer(args))
  1144. return -EINVAL;
  1145. ret = validate_exec_list(dev, exec, args->buffer_count);
  1146. if (ret)
  1147. return ret;
  1148. flags = 0;
  1149. if (args->flags & I915_EXEC_SECURE) {
  1150. if (!file->is_master || !capable(CAP_SYS_ADMIN))
  1151. return -EPERM;
  1152. flags |= I915_DISPATCH_SECURE;
  1153. }
  1154. if (args->flags & I915_EXEC_IS_PINNED)
  1155. flags |= I915_DISPATCH_PINNED;
  1156. if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
  1157. DRM_DEBUG("execbuf with unknown ring: %d\n",
  1158. (int)(args->flags & I915_EXEC_RING_MASK));
  1159. return -EINVAL;
  1160. }
  1161. if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
  1162. ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
  1163. DRM_DEBUG("execbuf with non bsd ring but with invalid "
  1164. "bsd dispatch flags: %d\n", (int)(args->flags));
  1165. return -EINVAL;
  1166. }
  1167. if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
  1168. ring = &dev_priv->ring[RCS];
  1169. else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
  1170. if (HAS_BSD2(dev)) {
  1171. int ring_id;
  1172. switch (args->flags & I915_EXEC_BSD_MASK) {
  1173. case I915_EXEC_BSD_DEFAULT:
  1174. ring_id = gen8_dispatch_bsd_ring(dev, file);
  1175. ring = &dev_priv->ring[ring_id];
  1176. break;
  1177. case I915_EXEC_BSD_RING1:
  1178. ring = &dev_priv->ring[VCS];
  1179. break;
  1180. case I915_EXEC_BSD_RING2:
  1181. ring = &dev_priv->ring[VCS2];
  1182. break;
  1183. default:
  1184. DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
  1185. (int)(args->flags & I915_EXEC_BSD_MASK));
  1186. return -EINVAL;
  1187. }
  1188. } else
  1189. ring = &dev_priv->ring[VCS];
  1190. } else
  1191. ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
  1192. if (!intel_ring_initialized(ring)) {
  1193. DRM_DEBUG("execbuf with invalid ring: %d\n",
  1194. (int)(args->flags & I915_EXEC_RING_MASK));
  1195. return -EINVAL;
  1196. }
  1197. if (args->buffer_count < 1) {
  1198. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1199. return -EINVAL;
  1200. }
  1201. intel_runtime_pm_get(dev_priv);
  1202. ret = i915_mutex_lock_interruptible(dev);
  1203. if (ret)
  1204. goto pre_mutex_err;
  1205. ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
  1206. if (IS_ERR(ctx)) {
  1207. mutex_unlock(&dev->struct_mutex);
  1208. ret = PTR_ERR(ctx);
  1209. goto pre_mutex_err;
  1210. }
  1211. i915_gem_context_reference(ctx);
  1212. if (ctx->ppgtt)
  1213. vm = &ctx->ppgtt->base;
  1214. else
  1215. vm = &dev_priv->gtt.base;
  1216. eb = eb_create(args);
  1217. if (eb == NULL) {
  1218. i915_gem_context_unreference(ctx);
  1219. mutex_unlock(&dev->struct_mutex);
  1220. ret = -ENOMEM;
  1221. goto pre_mutex_err;
  1222. }
  1223. /* Look up object handles */
  1224. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  1225. if (ret)
  1226. goto err;
  1227. /* take note of the batch buffer before we might reorder the lists */
  1228. batch_obj = eb_get_batch(eb);
  1229. /* Move the objects en-masse into the GTT, evicting if necessary. */
  1230. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  1231. ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
  1232. if (ret)
  1233. goto err;
  1234. /* The objects are in their final locations, apply the relocations. */
  1235. if (need_relocs)
  1236. ret = i915_gem_execbuffer_relocate(eb);
  1237. if (ret) {
  1238. if (ret == -EFAULT) {
  1239. ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
  1240. eb, exec);
  1241. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1242. }
  1243. if (ret)
  1244. goto err;
  1245. }
  1246. /* Set the pending read domains for the batch buffer to COMMAND */
  1247. if (batch_obj->base.pending_write_domain) {
  1248. DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
  1249. ret = -EINVAL;
  1250. goto err;
  1251. }
  1252. if (i915_needs_cmd_parser(ring) && args->batch_len) {
  1253. batch_obj = i915_gem_execbuffer_parse(ring,
  1254. &shadow_exec_entry,
  1255. eb,
  1256. batch_obj,
  1257. args->batch_start_offset,
  1258. args->batch_len,
  1259. file->is_master,
  1260. &flags);
  1261. if (IS_ERR(batch_obj)) {
  1262. ret = PTR_ERR(batch_obj);
  1263. goto err;
  1264. }
  1265. }
  1266. batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  1267. /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
  1268. * batch" bit. Hence we need to pin secure batches into the global gtt.
  1269. * hsw should have this fixed, but bdw mucks it up again. */
  1270. if (flags & I915_DISPATCH_SECURE) {
  1271. /*
  1272. * So on first glance it looks freaky that we pin the batch here
  1273. * outside of the reservation loop. But:
  1274. * - The batch is already pinned into the relevant ppgtt, so we
  1275. * already have the backing storage fully allocated.
  1276. * - No other BO uses the global gtt (well contexts, but meh),
  1277. * so we don't really have issues with mutliple objects not
  1278. * fitting due to fragmentation.
  1279. * So this is actually safe.
  1280. */
  1281. ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
  1282. if (ret)
  1283. goto err;
  1284. exec_start += i915_gem_obj_ggtt_offset(batch_obj);
  1285. } else
  1286. exec_start += i915_gem_obj_offset(batch_obj, vm);
  1287. ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
  1288. &eb->vmas, batch_obj, exec_start, flags);
  1289. /*
  1290. * FIXME: We crucially rely upon the active tracking for the (ppgtt)
  1291. * batch vma for correctness. For less ugly and less fragility this
  1292. * needs to be adjusted to also track the ggtt batch vma properly as
  1293. * active.
  1294. */
  1295. if (flags & I915_DISPATCH_SECURE)
  1296. i915_gem_object_ggtt_unpin(batch_obj);
  1297. err:
  1298. /* the request owns the ref now */
  1299. i915_gem_context_unreference(ctx);
  1300. eb_destroy(eb);
  1301. mutex_unlock(&dev->struct_mutex);
  1302. pre_mutex_err:
  1303. /* intel_gpu_busy should also get a ref, so it will free when the device
  1304. * is really idle. */
  1305. intel_runtime_pm_put(dev_priv);
  1306. return ret;
  1307. }
  1308. /*
  1309. * Legacy execbuffer just creates an exec2 list from the original exec object
  1310. * list array and passes it to the real function.
  1311. */
  1312. int
  1313. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1314. struct drm_file *file)
  1315. {
  1316. struct drm_i915_gem_execbuffer *args = data;
  1317. struct drm_i915_gem_execbuffer2 exec2;
  1318. struct drm_i915_gem_exec_object *exec_list = NULL;
  1319. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1320. int ret, i;
  1321. if (args->buffer_count < 1) {
  1322. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1323. return -EINVAL;
  1324. }
  1325. /* Copy in the exec list from userland */
  1326. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  1327. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  1328. if (exec_list == NULL || exec2_list == NULL) {
  1329. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1330. args->buffer_count);
  1331. drm_free_large(exec_list);
  1332. drm_free_large(exec2_list);
  1333. return -ENOMEM;
  1334. }
  1335. ret = copy_from_user(exec_list,
  1336. to_user_ptr(args->buffers_ptr),
  1337. sizeof(*exec_list) * args->buffer_count);
  1338. if (ret != 0) {
  1339. DRM_DEBUG("copy %d exec entries failed %d\n",
  1340. args->buffer_count, ret);
  1341. drm_free_large(exec_list);
  1342. drm_free_large(exec2_list);
  1343. return -EFAULT;
  1344. }
  1345. for (i = 0; i < args->buffer_count; i++) {
  1346. exec2_list[i].handle = exec_list[i].handle;
  1347. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  1348. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  1349. exec2_list[i].alignment = exec_list[i].alignment;
  1350. exec2_list[i].offset = exec_list[i].offset;
  1351. if (INTEL_INFO(dev)->gen < 4)
  1352. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  1353. else
  1354. exec2_list[i].flags = 0;
  1355. }
  1356. exec2.buffers_ptr = args->buffers_ptr;
  1357. exec2.buffer_count = args->buffer_count;
  1358. exec2.batch_start_offset = args->batch_start_offset;
  1359. exec2.batch_len = args->batch_len;
  1360. exec2.DR1 = args->DR1;
  1361. exec2.DR4 = args->DR4;
  1362. exec2.num_cliprects = args->num_cliprects;
  1363. exec2.cliprects_ptr = args->cliprects_ptr;
  1364. exec2.flags = I915_EXEC_RENDER;
  1365. i915_execbuffer2_set_context_id(exec2, 0);
  1366. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  1367. if (!ret) {
  1368. struct drm_i915_gem_exec_object __user *user_exec_list =
  1369. to_user_ptr(args->buffers_ptr);
  1370. /* Copy the new buffer offsets back to the user's exec list. */
  1371. for (i = 0; i < args->buffer_count; i++) {
  1372. ret = __copy_to_user(&user_exec_list[i].offset,
  1373. &exec2_list[i].offset,
  1374. sizeof(user_exec_list[i].offset));
  1375. if (ret) {
  1376. ret = -EFAULT;
  1377. DRM_DEBUG("failed to copy %d exec entries "
  1378. "back to user (%d)\n",
  1379. args->buffer_count, ret);
  1380. break;
  1381. }
  1382. }
  1383. }
  1384. drm_free_large(exec_list);
  1385. drm_free_large(exec2_list);
  1386. return ret;
  1387. }
  1388. int
  1389. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1390. struct drm_file *file)
  1391. {
  1392. struct drm_i915_gem_execbuffer2 *args = data;
  1393. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1394. int ret;
  1395. if (args->buffer_count < 1 ||
  1396. args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
  1397. DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
  1398. return -EINVAL;
  1399. }
  1400. if (args->rsvd2 != 0) {
  1401. DRM_DEBUG("dirty rvsd2 field\n");
  1402. return -EINVAL;
  1403. }
  1404. exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
  1405. GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  1406. if (exec2_list == NULL)
  1407. exec2_list = drm_malloc_ab(sizeof(*exec2_list),
  1408. args->buffer_count);
  1409. if (exec2_list == NULL) {
  1410. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1411. args->buffer_count);
  1412. return -ENOMEM;
  1413. }
  1414. ret = copy_from_user(exec2_list,
  1415. to_user_ptr(args->buffers_ptr),
  1416. sizeof(*exec2_list) * args->buffer_count);
  1417. if (ret != 0) {
  1418. DRM_DEBUG("copy %d exec entries failed %d\n",
  1419. args->buffer_count, ret);
  1420. drm_free_large(exec2_list);
  1421. return -EFAULT;
  1422. }
  1423. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  1424. if (!ret) {
  1425. /* Copy the new buffer offsets back to the user's exec list. */
  1426. struct drm_i915_gem_exec_object2 __user *user_exec_list =
  1427. to_user_ptr(args->buffers_ptr);
  1428. int i;
  1429. for (i = 0; i < args->buffer_count; i++) {
  1430. ret = __copy_to_user(&user_exec_list[i].offset,
  1431. &exec2_list[i].offset,
  1432. sizeof(user_exec_list[i].offset));
  1433. if (ret) {
  1434. ret = -EFAULT;
  1435. DRM_DEBUG("failed to copy %d exec entries "
  1436. "back to user\n",
  1437. args->buffer_count);
  1438. break;
  1439. }
  1440. }
  1441. }
  1442. drm_free_large(exec2_list);
  1443. return ret;
  1444. }