i915_gem_execbuffer.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780
  1. /*
  2. * Copyright © 2008,2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <drm/drmP.h>
  29. #include <drm/i915_drm.h>
  30. #include "i915_drv.h"
  31. #include "i915_trace.h"
  32. #include "intel_drv.h"
  33. #include <linux/dma_remapping.h>
  34. #define __EXEC_OBJECT_HAS_PIN (1<<31)
  35. #define __EXEC_OBJECT_HAS_FENCE (1<<30)
  36. #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
  37. #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  38. #define __EXEC_OBJECT_PURGEABLE (1<<27)
  39. #define BATCH_OFFSET_BIAS (256*1024)
  40. struct eb_vmas {
  41. struct list_head vmas;
  42. int and;
  43. union {
  44. struct i915_vma *lut[0];
  45. struct hlist_head buckets[0];
  46. };
  47. };
  48. static struct eb_vmas *
  49. eb_create(struct drm_i915_gem_execbuffer2 *args)
  50. {
  51. struct eb_vmas *eb = NULL;
  52. if (args->flags & I915_EXEC_HANDLE_LUT) {
  53. unsigned size = args->buffer_count;
  54. size *= sizeof(struct i915_vma *);
  55. size += sizeof(struct eb_vmas);
  56. eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  57. }
  58. if (eb == NULL) {
  59. unsigned size = args->buffer_count;
  60. unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  61. BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  62. while (count > 2*size)
  63. count >>= 1;
  64. eb = kzalloc(count*sizeof(struct hlist_head) +
  65. sizeof(struct eb_vmas),
  66. GFP_TEMPORARY);
  67. if (eb == NULL)
  68. return eb;
  69. eb->and = count - 1;
  70. } else
  71. eb->and = -args->buffer_count;
  72. INIT_LIST_HEAD(&eb->vmas);
  73. return eb;
  74. }
  75. static void
  76. eb_reset(struct eb_vmas *eb)
  77. {
  78. if (eb->and >= 0)
  79. memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  80. }
  81. static int
  82. eb_lookup_vmas(struct eb_vmas *eb,
  83. struct drm_i915_gem_exec_object2 *exec,
  84. const struct drm_i915_gem_execbuffer2 *args,
  85. struct i915_address_space *vm,
  86. struct drm_file *file)
  87. {
  88. struct drm_i915_gem_object *obj;
  89. struct list_head objects;
  90. int i, ret;
  91. INIT_LIST_HEAD(&objects);
  92. spin_lock(&file->table_lock);
  93. /* Grab a reference to the object and release the lock so we can lookup
  94. * or create the VMA without using GFP_ATOMIC */
  95. for (i = 0; i < args->buffer_count; i++) {
  96. obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
  97. if (obj == NULL) {
  98. spin_unlock(&file->table_lock);
  99. DRM_DEBUG("Invalid object handle %d at index %d\n",
  100. exec[i].handle, i);
  101. ret = -ENOENT;
  102. goto err;
  103. }
  104. if (!list_empty(&obj->obj_exec_link)) {
  105. spin_unlock(&file->table_lock);
  106. DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
  107. obj, exec[i].handle, i);
  108. ret = -EINVAL;
  109. goto err;
  110. }
  111. drm_gem_object_reference(&obj->base);
  112. list_add_tail(&obj->obj_exec_link, &objects);
  113. }
  114. spin_unlock(&file->table_lock);
  115. i = 0;
  116. while (!list_empty(&objects)) {
  117. struct i915_vma *vma;
  118. obj = list_first_entry(&objects,
  119. struct drm_i915_gem_object,
  120. obj_exec_link);
  121. /*
  122. * NOTE: We can leak any vmas created here when something fails
  123. * later on. But that's no issue since vma_unbind can deal with
  124. * vmas which are not actually bound. And since only
  125. * lookup_or_create exists as an interface to get at the vma
  126. * from the (obj, vm) we don't run the risk of creating
  127. * duplicated vmas for the same vm.
  128. */
  129. vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
  130. if (IS_ERR(vma)) {
  131. DRM_DEBUG("Failed to lookup VMA\n");
  132. ret = PTR_ERR(vma);
  133. goto err;
  134. }
  135. /* Transfer ownership from the objects list to the vmas list. */
  136. list_add_tail(&vma->exec_list, &eb->vmas);
  137. list_del_init(&obj->obj_exec_link);
  138. vma->exec_entry = &exec[i];
  139. if (eb->and < 0) {
  140. eb->lut[i] = vma;
  141. } else {
  142. uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
  143. vma->exec_handle = handle;
  144. hlist_add_head(&vma->exec_node,
  145. &eb->buckets[handle & eb->and]);
  146. }
  147. ++i;
  148. }
  149. return 0;
  150. err:
  151. while (!list_empty(&objects)) {
  152. obj = list_first_entry(&objects,
  153. struct drm_i915_gem_object,
  154. obj_exec_link);
  155. list_del_init(&obj->obj_exec_link);
  156. drm_gem_object_unreference(&obj->base);
  157. }
  158. /*
  159. * Objects already transfered to the vmas list will be unreferenced by
  160. * eb_destroy.
  161. */
  162. return ret;
  163. }
  164. static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
  165. {
  166. if (eb->and < 0) {
  167. if (handle >= -eb->and)
  168. return NULL;
  169. return eb->lut[handle];
  170. } else {
  171. struct hlist_head *head;
  172. struct hlist_node *node;
  173. head = &eb->buckets[handle & eb->and];
  174. hlist_for_each(node, head) {
  175. struct i915_vma *vma;
  176. vma = hlist_entry(node, struct i915_vma, exec_node);
  177. if (vma->exec_handle == handle)
  178. return vma;
  179. }
  180. return NULL;
  181. }
  182. }
  183. static void
  184. i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
  185. {
  186. struct drm_i915_gem_exec_object2 *entry;
  187. struct drm_i915_gem_object *obj = vma->obj;
  188. if (!drm_mm_node_allocated(&vma->node))
  189. return;
  190. entry = vma->exec_entry;
  191. if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
  192. i915_gem_object_unpin_fence(obj);
  193. if (entry->flags & __EXEC_OBJECT_HAS_PIN)
  194. vma->pin_count--;
  195. if (entry->flags & __EXEC_OBJECT_PURGEABLE)
  196. obj->madv = I915_MADV_DONTNEED;
  197. entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
  198. __EXEC_OBJECT_HAS_PIN |
  199. __EXEC_OBJECT_PURGEABLE);
  200. }
  201. static void eb_destroy(struct eb_vmas *eb)
  202. {
  203. while (!list_empty(&eb->vmas)) {
  204. struct i915_vma *vma;
  205. vma = list_first_entry(&eb->vmas,
  206. struct i915_vma,
  207. exec_list);
  208. list_del_init(&vma->exec_list);
  209. i915_gem_execbuffer_unreserve_vma(vma);
  210. drm_gem_object_unreference(&vma->obj->base);
  211. }
  212. kfree(eb);
  213. }
  214. static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
  215. {
  216. return (HAS_LLC(obj->base.dev) ||
  217. obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
  218. obj->cache_level != I915_CACHE_NONE);
  219. }
  220. static int
  221. relocate_entry_cpu(struct drm_i915_gem_object *obj,
  222. struct drm_i915_gem_relocation_entry *reloc,
  223. uint64_t target_offset)
  224. {
  225. struct drm_device *dev = obj->base.dev;
  226. uint32_t page_offset = offset_in_page(reloc->offset);
  227. uint64_t delta = reloc->delta + target_offset;
  228. char *vaddr;
  229. int ret;
  230. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  231. if (ret)
  232. return ret;
  233. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  234. reloc->offset >> PAGE_SHIFT));
  235. *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
  236. if (INTEL_INFO(dev)->gen >= 8) {
  237. page_offset = offset_in_page(page_offset + sizeof(uint32_t));
  238. if (page_offset == 0) {
  239. kunmap_atomic(vaddr);
  240. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  241. (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
  242. }
  243. *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
  244. }
  245. kunmap_atomic(vaddr);
  246. return 0;
  247. }
  248. static int
  249. relocate_entry_gtt(struct drm_i915_gem_object *obj,
  250. struct drm_i915_gem_relocation_entry *reloc,
  251. uint64_t target_offset)
  252. {
  253. struct drm_device *dev = obj->base.dev;
  254. struct drm_i915_private *dev_priv = dev->dev_private;
  255. uint64_t delta = reloc->delta + target_offset;
  256. uint64_t offset;
  257. void __iomem *reloc_page;
  258. int ret;
  259. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  260. if (ret)
  261. return ret;
  262. ret = i915_gem_object_put_fence(obj);
  263. if (ret)
  264. return ret;
  265. /* Map the page containing the relocation we're going to perform. */
  266. offset = i915_gem_obj_ggtt_offset(obj);
  267. offset += reloc->offset;
  268. reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  269. offset & PAGE_MASK);
  270. iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
  271. if (INTEL_INFO(dev)->gen >= 8) {
  272. offset += sizeof(uint32_t);
  273. if (offset_in_page(offset) == 0) {
  274. io_mapping_unmap_atomic(reloc_page);
  275. reloc_page =
  276. io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
  277. offset);
  278. }
  279. iowrite32(upper_32_bits(delta),
  280. reloc_page + offset_in_page(offset));
  281. }
  282. io_mapping_unmap_atomic(reloc_page);
  283. return 0;
  284. }
  285. static void
  286. clflush_write32(void *addr, uint32_t value)
  287. {
  288. /* This is not a fast path, so KISS. */
  289. drm_clflush_virt_range(addr, sizeof(uint32_t));
  290. *(uint32_t *)addr = value;
  291. drm_clflush_virt_range(addr, sizeof(uint32_t));
  292. }
  293. static int
  294. relocate_entry_clflush(struct drm_i915_gem_object *obj,
  295. struct drm_i915_gem_relocation_entry *reloc,
  296. uint64_t target_offset)
  297. {
  298. struct drm_device *dev = obj->base.dev;
  299. uint32_t page_offset = offset_in_page(reloc->offset);
  300. uint64_t delta = (int)reloc->delta + target_offset;
  301. char *vaddr;
  302. int ret;
  303. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  304. if (ret)
  305. return ret;
  306. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  307. reloc->offset >> PAGE_SHIFT));
  308. clflush_write32(vaddr + page_offset, lower_32_bits(delta));
  309. if (INTEL_INFO(dev)->gen >= 8) {
  310. page_offset = offset_in_page(page_offset + sizeof(uint32_t));
  311. if (page_offset == 0) {
  312. kunmap_atomic(vaddr);
  313. vaddr = kmap_atomic(i915_gem_object_get_page(obj,
  314. (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
  315. }
  316. clflush_write32(vaddr + page_offset, upper_32_bits(delta));
  317. }
  318. kunmap_atomic(vaddr);
  319. return 0;
  320. }
  321. static int
  322. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  323. struct eb_vmas *eb,
  324. struct drm_i915_gem_relocation_entry *reloc)
  325. {
  326. struct drm_device *dev = obj->base.dev;
  327. struct drm_gem_object *target_obj;
  328. struct drm_i915_gem_object *target_i915_obj;
  329. struct i915_vma *target_vma;
  330. uint64_t target_offset;
  331. int ret;
  332. /* we've already hold a reference to all valid objects */
  333. target_vma = eb_get_vma(eb, reloc->target_handle);
  334. if (unlikely(target_vma == NULL))
  335. return -ENOENT;
  336. target_i915_obj = target_vma->obj;
  337. target_obj = &target_vma->obj->base;
  338. target_offset = target_vma->node.start;
  339. /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
  340. * pipe_control writes because the gpu doesn't properly redirect them
  341. * through the ppgtt for non_secure batchbuffers. */
  342. if (unlikely(IS_GEN6(dev) &&
  343. reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
  344. !(target_vma->bound & GLOBAL_BIND))) {
  345. ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
  346. GLOBAL_BIND);
  347. if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
  348. return ret;
  349. }
  350. /* Validate that the target is in a valid r/w GPU domain */
  351. if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
  352. DRM_DEBUG("reloc with multiple write domains: "
  353. "obj %p target %d offset %d "
  354. "read %08x write %08x",
  355. obj, reloc->target_handle,
  356. (int) reloc->offset,
  357. reloc->read_domains,
  358. reloc->write_domain);
  359. return -EINVAL;
  360. }
  361. if (unlikely((reloc->write_domain | reloc->read_domains)
  362. & ~I915_GEM_GPU_DOMAINS)) {
  363. DRM_DEBUG("reloc with read/write non-GPU domains: "
  364. "obj %p target %d offset %d "
  365. "read %08x write %08x",
  366. obj, reloc->target_handle,
  367. (int) reloc->offset,
  368. reloc->read_domains,
  369. reloc->write_domain);
  370. return -EINVAL;
  371. }
  372. target_obj->pending_read_domains |= reloc->read_domains;
  373. target_obj->pending_write_domain |= reloc->write_domain;
  374. /* If the relocation already has the right value in it, no
  375. * more work needs to be done.
  376. */
  377. if (target_offset == reloc->presumed_offset)
  378. return 0;
  379. /* Check that the relocation address is valid... */
  380. if (unlikely(reloc->offset >
  381. obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
  382. DRM_DEBUG("Relocation beyond object bounds: "
  383. "obj %p target %d offset %d size %d.\n",
  384. obj, reloc->target_handle,
  385. (int) reloc->offset,
  386. (int) obj->base.size);
  387. return -EINVAL;
  388. }
  389. if (unlikely(reloc->offset & 3)) {
  390. DRM_DEBUG("Relocation not 4-byte aligned: "
  391. "obj %p target %d offset %d.\n",
  392. obj, reloc->target_handle,
  393. (int) reloc->offset);
  394. return -EINVAL;
  395. }
  396. /* We can't wait for rendering with pagefaults disabled */
  397. if (obj->active && in_atomic())
  398. return -EFAULT;
  399. if (use_cpu_reloc(obj))
  400. ret = relocate_entry_cpu(obj, reloc, target_offset);
  401. else if (obj->map_and_fenceable)
  402. ret = relocate_entry_gtt(obj, reloc, target_offset);
  403. else if (cpu_has_clflush)
  404. ret = relocate_entry_clflush(obj, reloc, target_offset);
  405. else {
  406. WARN_ONCE(1, "Impossible case in relocation handling\n");
  407. ret = -ENODEV;
  408. }
  409. if (ret)
  410. return ret;
  411. /* and update the user's relocation entry */
  412. reloc->presumed_offset = target_offset;
  413. return 0;
  414. }
  415. static int
  416. i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
  417. struct eb_vmas *eb)
  418. {
  419. #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
  420. struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
  421. struct drm_i915_gem_relocation_entry __user *user_relocs;
  422. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  423. int remain, ret;
  424. user_relocs = to_user_ptr(entry->relocs_ptr);
  425. remain = entry->relocation_count;
  426. while (remain) {
  427. struct drm_i915_gem_relocation_entry *r = stack_reloc;
  428. int count = remain;
  429. if (count > ARRAY_SIZE(stack_reloc))
  430. count = ARRAY_SIZE(stack_reloc);
  431. remain -= count;
  432. if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
  433. return -EFAULT;
  434. do {
  435. u64 offset = r->presumed_offset;
  436. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
  437. if (ret)
  438. return ret;
  439. if (r->presumed_offset != offset &&
  440. __copy_to_user_inatomic(&user_relocs->presumed_offset,
  441. &r->presumed_offset,
  442. sizeof(r->presumed_offset))) {
  443. return -EFAULT;
  444. }
  445. user_relocs++;
  446. r++;
  447. } while (--count);
  448. }
  449. return 0;
  450. #undef N_RELOC
  451. }
  452. static int
  453. i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
  454. struct eb_vmas *eb,
  455. struct drm_i915_gem_relocation_entry *relocs)
  456. {
  457. const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  458. int i, ret;
  459. for (i = 0; i < entry->relocation_count; i++) {
  460. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
  461. if (ret)
  462. return ret;
  463. }
  464. return 0;
  465. }
  466. static int
  467. i915_gem_execbuffer_relocate(struct eb_vmas *eb)
  468. {
  469. struct i915_vma *vma;
  470. int ret = 0;
  471. /* This is the fast path and we cannot handle a pagefault whilst
  472. * holding the struct mutex lest the user pass in the relocations
  473. * contained within a mmaped bo. For in such a case we, the page
  474. * fault handler would call i915_gem_fault() and we would try to
  475. * acquire the struct mutex again. Obviously this is bad and so
  476. * lockdep complains vehemently.
  477. */
  478. pagefault_disable();
  479. list_for_each_entry(vma, &eb->vmas, exec_list) {
  480. ret = i915_gem_execbuffer_relocate_vma(vma, eb);
  481. if (ret)
  482. break;
  483. }
  484. pagefault_enable();
  485. return ret;
  486. }
  487. static bool only_mappable_for_reloc(unsigned int flags)
  488. {
  489. return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
  490. __EXEC_OBJECT_NEEDS_MAP;
  491. }
  492. static int
  493. i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
  494. struct intel_engine_cs *ring,
  495. bool *need_reloc)
  496. {
  497. struct drm_i915_gem_object *obj = vma->obj;
  498. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  499. uint64_t flags;
  500. int ret;
  501. flags = 0;
  502. if (!drm_mm_node_allocated(&vma->node)) {
  503. if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
  504. flags |= PIN_GLOBAL | PIN_MAPPABLE;
  505. if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
  506. flags |= PIN_GLOBAL;
  507. if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
  508. flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
  509. }
  510. ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
  511. if ((ret == -ENOSPC || ret == -E2BIG) &&
  512. only_mappable_for_reloc(entry->flags))
  513. ret = i915_gem_object_pin(obj, vma->vm,
  514. entry->alignment,
  515. flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
  516. if (ret)
  517. return ret;
  518. entry->flags |= __EXEC_OBJECT_HAS_PIN;
  519. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  520. ret = i915_gem_object_get_fence(obj);
  521. if (ret)
  522. return ret;
  523. if (i915_gem_object_pin_fence(obj))
  524. entry->flags |= __EXEC_OBJECT_HAS_FENCE;
  525. }
  526. if (entry->offset != vma->node.start) {
  527. entry->offset = vma->node.start;
  528. *need_reloc = true;
  529. }
  530. if (entry->flags & EXEC_OBJECT_WRITE) {
  531. obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
  532. obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
  533. }
  534. return 0;
  535. }
  536. static bool
  537. need_reloc_mappable(struct i915_vma *vma)
  538. {
  539. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  540. if (entry->relocation_count == 0)
  541. return false;
  542. if (!i915_is_ggtt(vma->vm))
  543. return false;
  544. /* See also use_cpu_reloc() */
  545. if (HAS_LLC(vma->obj->base.dev))
  546. return false;
  547. if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  548. return false;
  549. return true;
  550. }
  551. static bool
  552. eb_vma_misplaced(struct i915_vma *vma)
  553. {
  554. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  555. struct drm_i915_gem_object *obj = vma->obj;
  556. WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
  557. !i915_is_ggtt(vma->vm));
  558. if (entry->alignment &&
  559. vma->node.start & (entry->alignment - 1))
  560. return true;
  561. if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
  562. vma->node.start < BATCH_OFFSET_BIAS)
  563. return true;
  564. /* avoid costly ping-pong once a batch bo ended up non-mappable */
  565. if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
  566. return !only_mappable_for_reloc(entry->flags);
  567. return false;
  568. }
  569. static int
  570. i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
  571. struct list_head *vmas,
  572. bool *need_relocs)
  573. {
  574. struct drm_i915_gem_object *obj;
  575. struct i915_vma *vma;
  576. struct i915_address_space *vm;
  577. struct list_head ordered_vmas;
  578. bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
  579. int retry;
  580. i915_gem_retire_requests_ring(ring);
  581. vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
  582. INIT_LIST_HEAD(&ordered_vmas);
  583. while (!list_empty(vmas)) {
  584. struct drm_i915_gem_exec_object2 *entry;
  585. bool need_fence, need_mappable;
  586. vma = list_first_entry(vmas, struct i915_vma, exec_list);
  587. obj = vma->obj;
  588. entry = vma->exec_entry;
  589. if (!has_fenced_gpu_access)
  590. entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
  591. need_fence =
  592. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  593. obj->tiling_mode != I915_TILING_NONE;
  594. need_mappable = need_fence || need_reloc_mappable(vma);
  595. if (need_mappable) {
  596. entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
  597. list_move(&vma->exec_list, &ordered_vmas);
  598. } else
  599. list_move_tail(&vma->exec_list, &ordered_vmas);
  600. obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
  601. obj->base.pending_write_domain = 0;
  602. }
  603. list_splice(&ordered_vmas, vmas);
  604. /* Attempt to pin all of the buffers into the GTT.
  605. * This is done in 3 phases:
  606. *
  607. * 1a. Unbind all objects that do not match the GTT constraints for
  608. * the execbuffer (fenceable, mappable, alignment etc).
  609. * 1b. Increment pin count for already bound objects.
  610. * 2. Bind new objects.
  611. * 3. Decrement pin count.
  612. *
  613. * This avoid unnecessary unbinding of later objects in order to make
  614. * room for the earlier objects *unless* we need to defragment.
  615. */
  616. retry = 0;
  617. do {
  618. int ret = 0;
  619. /* Unbind any ill-fitting objects or pin. */
  620. list_for_each_entry(vma, vmas, exec_list) {
  621. if (!drm_mm_node_allocated(&vma->node))
  622. continue;
  623. if (eb_vma_misplaced(vma))
  624. ret = i915_vma_unbind(vma);
  625. else
  626. ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
  627. if (ret)
  628. goto err;
  629. }
  630. /* Bind fresh objects */
  631. list_for_each_entry(vma, vmas, exec_list) {
  632. if (drm_mm_node_allocated(&vma->node))
  633. continue;
  634. ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
  635. if (ret)
  636. goto err;
  637. }
  638. err:
  639. if (ret != -ENOSPC || retry++)
  640. return ret;
  641. /* Decrement pin count for bound objects */
  642. list_for_each_entry(vma, vmas, exec_list)
  643. i915_gem_execbuffer_unreserve_vma(vma);
  644. ret = i915_gem_evict_vm(vm, true);
  645. if (ret)
  646. return ret;
  647. } while (1);
  648. }
  649. static int
  650. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  651. struct drm_i915_gem_execbuffer2 *args,
  652. struct drm_file *file,
  653. struct intel_engine_cs *ring,
  654. struct eb_vmas *eb,
  655. struct drm_i915_gem_exec_object2 *exec)
  656. {
  657. struct drm_i915_gem_relocation_entry *reloc;
  658. struct i915_address_space *vm;
  659. struct i915_vma *vma;
  660. bool need_relocs;
  661. int *reloc_offset;
  662. int i, total, ret;
  663. unsigned count = args->buffer_count;
  664. vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
  665. /* We may process another execbuffer during the unlock... */
  666. while (!list_empty(&eb->vmas)) {
  667. vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
  668. list_del_init(&vma->exec_list);
  669. i915_gem_execbuffer_unreserve_vma(vma);
  670. drm_gem_object_unreference(&vma->obj->base);
  671. }
  672. mutex_unlock(&dev->struct_mutex);
  673. total = 0;
  674. for (i = 0; i < count; i++)
  675. total += exec[i].relocation_count;
  676. reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
  677. reloc = drm_malloc_ab(total, sizeof(*reloc));
  678. if (reloc == NULL || reloc_offset == NULL) {
  679. drm_free_large(reloc);
  680. drm_free_large(reloc_offset);
  681. mutex_lock(&dev->struct_mutex);
  682. return -ENOMEM;
  683. }
  684. total = 0;
  685. for (i = 0; i < count; i++) {
  686. struct drm_i915_gem_relocation_entry __user *user_relocs;
  687. u64 invalid_offset = (u64)-1;
  688. int j;
  689. user_relocs = to_user_ptr(exec[i].relocs_ptr);
  690. if (copy_from_user(reloc+total, user_relocs,
  691. exec[i].relocation_count * sizeof(*reloc))) {
  692. ret = -EFAULT;
  693. mutex_lock(&dev->struct_mutex);
  694. goto err;
  695. }
  696. /* As we do not update the known relocation offsets after
  697. * relocating (due to the complexities in lock handling),
  698. * we need to mark them as invalid now so that we force the
  699. * relocation processing next time. Just in case the target
  700. * object is evicted and then rebound into its old
  701. * presumed_offset before the next execbuffer - if that
  702. * happened we would make the mistake of assuming that the
  703. * relocations were valid.
  704. */
  705. for (j = 0; j < exec[i].relocation_count; j++) {
  706. if (__copy_to_user(&user_relocs[j].presumed_offset,
  707. &invalid_offset,
  708. sizeof(invalid_offset))) {
  709. ret = -EFAULT;
  710. mutex_lock(&dev->struct_mutex);
  711. goto err;
  712. }
  713. }
  714. reloc_offset[i] = total;
  715. total += exec[i].relocation_count;
  716. }
  717. ret = i915_mutex_lock_interruptible(dev);
  718. if (ret) {
  719. mutex_lock(&dev->struct_mutex);
  720. goto err;
  721. }
  722. /* reacquire the objects */
  723. eb_reset(eb);
  724. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  725. if (ret)
  726. goto err;
  727. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  728. ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
  729. if (ret)
  730. goto err;
  731. list_for_each_entry(vma, &eb->vmas, exec_list) {
  732. int offset = vma->exec_entry - exec;
  733. ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
  734. reloc + reloc_offset[offset]);
  735. if (ret)
  736. goto err;
  737. }
  738. /* Leave the user relocations as are, this is the painfully slow path,
  739. * and we want to avoid the complication of dropping the lock whilst
  740. * having buffers reserved in the aperture and so causing spurious
  741. * ENOSPC for random operations.
  742. */
  743. err:
  744. drm_free_large(reloc);
  745. drm_free_large(reloc_offset);
  746. return ret;
  747. }
  748. static int
  749. i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
  750. struct list_head *vmas)
  751. {
  752. struct i915_vma *vma;
  753. uint32_t flush_domains = 0;
  754. bool flush_chipset = false;
  755. int ret;
  756. list_for_each_entry(vma, vmas, exec_list) {
  757. struct drm_i915_gem_object *obj = vma->obj;
  758. ret = i915_gem_object_sync(obj, ring);
  759. if (ret)
  760. return ret;
  761. if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
  762. flush_chipset |= i915_gem_clflush_object(obj, false);
  763. flush_domains |= obj->base.write_domain;
  764. }
  765. if (flush_chipset)
  766. i915_gem_chipset_flush(ring->dev);
  767. if (flush_domains & I915_GEM_DOMAIN_GTT)
  768. wmb();
  769. /* Unconditionally invalidate gpu caches and ensure that we do flush
  770. * any residual writes from the previous batch.
  771. */
  772. return intel_ring_invalidate_all_caches(ring);
  773. }
  774. static bool
  775. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
  776. {
  777. if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
  778. return false;
  779. return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
  780. }
  781. static int
  782. validate_exec_list(struct drm_device *dev,
  783. struct drm_i915_gem_exec_object2 *exec,
  784. int count)
  785. {
  786. unsigned relocs_total = 0;
  787. unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
  788. unsigned invalid_flags;
  789. int i;
  790. invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
  791. if (USES_FULL_PPGTT(dev))
  792. invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
  793. for (i = 0; i < count; i++) {
  794. char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
  795. int length; /* limited by fault_in_pages_readable() */
  796. if (exec[i].flags & invalid_flags)
  797. return -EINVAL;
  798. /* First check for malicious input causing overflow in
  799. * the worst case where we need to allocate the entire
  800. * relocation tree as a single array.
  801. */
  802. if (exec[i].relocation_count > relocs_max - relocs_total)
  803. return -EINVAL;
  804. relocs_total += exec[i].relocation_count;
  805. length = exec[i].relocation_count *
  806. sizeof(struct drm_i915_gem_relocation_entry);
  807. /*
  808. * We must check that the entire relocation array is safe
  809. * to read, but since we may need to update the presumed
  810. * offsets during execution, check for full write access.
  811. */
  812. if (!access_ok(VERIFY_WRITE, ptr, length))
  813. return -EFAULT;
  814. if (likely(!i915.prefault_disable)) {
  815. if (fault_in_multipages_readable(ptr, length))
  816. return -EFAULT;
  817. }
  818. }
  819. return 0;
  820. }
  821. static struct intel_context *
  822. i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
  823. struct intel_engine_cs *ring, const u32 ctx_id)
  824. {
  825. struct intel_context *ctx = NULL;
  826. struct i915_ctx_hang_stats *hs;
  827. if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
  828. return ERR_PTR(-EINVAL);
  829. ctx = i915_gem_context_get(file->driver_priv, ctx_id);
  830. if (IS_ERR(ctx))
  831. return ctx;
  832. hs = &ctx->hang_stats;
  833. if (hs->banned) {
  834. DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
  835. return ERR_PTR(-EIO);
  836. }
  837. if (i915.enable_execlists && !ctx->engine[ring->id].state) {
  838. int ret = intel_lr_context_deferred_create(ctx, ring);
  839. if (ret) {
  840. DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
  841. return ERR_PTR(ret);
  842. }
  843. }
  844. return ctx;
  845. }
  846. void
  847. i915_gem_execbuffer_move_to_active(struct list_head *vmas,
  848. struct intel_engine_cs *ring)
  849. {
  850. struct drm_i915_gem_request *req = intel_ring_get_request(ring);
  851. struct i915_vma *vma;
  852. list_for_each_entry(vma, vmas, exec_list) {
  853. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  854. struct drm_i915_gem_object *obj = vma->obj;
  855. u32 old_read = obj->base.read_domains;
  856. u32 old_write = obj->base.write_domain;
  857. obj->base.write_domain = obj->base.pending_write_domain;
  858. if (obj->base.write_domain == 0)
  859. obj->base.pending_read_domains |= obj->base.read_domains;
  860. obj->base.read_domains = obj->base.pending_read_domains;
  861. i915_vma_move_to_active(vma, ring);
  862. if (obj->base.write_domain) {
  863. obj->dirty = 1;
  864. i915_gem_request_assign(&obj->last_write_req, req);
  865. intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
  866. /* update for the implicit flush after a batch */
  867. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  868. }
  869. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  870. i915_gem_request_assign(&obj->last_fenced_req, req);
  871. if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
  872. struct drm_i915_private *dev_priv = to_i915(ring->dev);
  873. list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
  874. &dev_priv->mm.fence_list);
  875. }
  876. }
  877. trace_i915_gem_object_change_domain(obj, old_read, old_write);
  878. }
  879. }
  880. void
  881. i915_gem_execbuffer_retire_commands(struct drm_device *dev,
  882. struct drm_file *file,
  883. struct intel_engine_cs *ring,
  884. struct drm_i915_gem_object *obj)
  885. {
  886. /* Unconditionally force add_request to emit a full flush. */
  887. ring->gpu_caches_dirty = true;
  888. /* Add a breadcrumb for the completion of the batch buffer */
  889. (void)__i915_add_request(ring, file, obj);
  890. }
  891. static int
  892. i915_reset_gen7_sol_offsets(struct drm_device *dev,
  893. struct intel_engine_cs *ring)
  894. {
  895. struct drm_i915_private *dev_priv = dev->dev_private;
  896. int ret, i;
  897. if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
  898. DRM_DEBUG("sol reset is gen7/rcs only\n");
  899. return -EINVAL;
  900. }
  901. ret = intel_ring_begin(ring, 4 * 3);
  902. if (ret)
  903. return ret;
  904. for (i = 0; i < 4; i++) {
  905. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  906. intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
  907. intel_ring_emit(ring, 0);
  908. }
  909. intel_ring_advance(ring);
  910. return 0;
  911. }
  912. static int
  913. i915_emit_box(struct intel_engine_cs *ring,
  914. struct drm_clip_rect *box,
  915. int DR1, int DR4)
  916. {
  917. int ret;
  918. if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  919. box->y2 <= 0 || box->x2 <= 0) {
  920. DRM_ERROR("Bad box %d,%d..%d,%d\n",
  921. box->x1, box->y1, box->x2, box->y2);
  922. return -EINVAL;
  923. }
  924. if (INTEL_INFO(ring->dev)->gen >= 4) {
  925. ret = intel_ring_begin(ring, 4);
  926. if (ret)
  927. return ret;
  928. intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
  929. intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
  930. intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
  931. intel_ring_emit(ring, DR4);
  932. } else {
  933. ret = intel_ring_begin(ring, 6);
  934. if (ret)
  935. return ret;
  936. intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
  937. intel_ring_emit(ring, DR1);
  938. intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
  939. intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
  940. intel_ring_emit(ring, DR4);
  941. intel_ring_emit(ring, 0);
  942. }
  943. intel_ring_advance(ring);
  944. return 0;
  945. }
  946. static struct drm_i915_gem_object*
  947. i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
  948. struct drm_i915_gem_exec_object2 *shadow_exec_entry,
  949. struct eb_vmas *eb,
  950. struct drm_i915_gem_object *batch_obj,
  951. u32 batch_start_offset,
  952. u32 batch_len,
  953. bool is_master)
  954. {
  955. struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
  956. struct drm_i915_gem_object *shadow_batch_obj;
  957. struct i915_vma *vma;
  958. int ret;
  959. shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
  960. PAGE_ALIGN(batch_len));
  961. if (IS_ERR(shadow_batch_obj))
  962. return shadow_batch_obj;
  963. ret = i915_parse_cmds(ring,
  964. batch_obj,
  965. shadow_batch_obj,
  966. batch_start_offset,
  967. batch_len,
  968. is_master);
  969. if (ret)
  970. goto err;
  971. ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
  972. if (ret)
  973. goto err;
  974. memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
  975. vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
  976. vma->exec_entry = shadow_exec_entry;
  977. vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
  978. drm_gem_object_reference(&shadow_batch_obj->base);
  979. list_add_tail(&vma->exec_list, &eb->vmas);
  980. shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
  981. return shadow_batch_obj;
  982. err:
  983. if (ret == -EACCES) /* unhandled chained batch */
  984. return batch_obj;
  985. else
  986. return ERR_PTR(ret);
  987. }
  988. int
  989. i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
  990. struct intel_engine_cs *ring,
  991. struct intel_context *ctx,
  992. struct drm_i915_gem_execbuffer2 *args,
  993. struct list_head *vmas,
  994. struct drm_i915_gem_object *batch_obj,
  995. u64 exec_start, u32 dispatch_flags)
  996. {
  997. struct drm_clip_rect *cliprects = NULL;
  998. struct drm_i915_private *dev_priv = dev->dev_private;
  999. u64 exec_len;
  1000. int instp_mode;
  1001. u32 instp_mask;
  1002. int i, ret = 0;
  1003. if (args->num_cliprects != 0) {
  1004. if (ring != &dev_priv->ring[RCS]) {
  1005. DRM_DEBUG("clip rectangles are only valid with the render ring\n");
  1006. return -EINVAL;
  1007. }
  1008. if (INTEL_INFO(dev)->gen >= 5) {
  1009. DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
  1010. return -EINVAL;
  1011. }
  1012. if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
  1013. DRM_DEBUG("execbuf with %u cliprects\n",
  1014. args->num_cliprects);
  1015. return -EINVAL;
  1016. }
  1017. cliprects = kcalloc(args->num_cliprects,
  1018. sizeof(*cliprects),
  1019. GFP_KERNEL);
  1020. if (cliprects == NULL) {
  1021. ret = -ENOMEM;
  1022. goto error;
  1023. }
  1024. if (copy_from_user(cliprects,
  1025. to_user_ptr(args->cliprects_ptr),
  1026. sizeof(*cliprects)*args->num_cliprects)) {
  1027. ret = -EFAULT;
  1028. goto error;
  1029. }
  1030. } else {
  1031. if (args->DR4 == 0xffffffff) {
  1032. DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
  1033. args->DR4 = 0;
  1034. }
  1035. if (args->DR1 || args->DR4 || args->cliprects_ptr) {
  1036. DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
  1037. return -EINVAL;
  1038. }
  1039. }
  1040. ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
  1041. if (ret)
  1042. goto error;
  1043. ret = i915_switch_context(ring, ctx);
  1044. if (ret)
  1045. goto error;
  1046. if (ctx->ppgtt)
  1047. WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
  1048. "%s didn't clear reload\n", ring->name);
  1049. else if (dev_priv->mm.aliasing_ppgtt)
  1050. WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
  1051. (1<<ring->id), "%s didn't clear reload\n", ring->name);
  1052. instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  1053. instp_mask = I915_EXEC_CONSTANTS_MASK;
  1054. switch (instp_mode) {
  1055. case I915_EXEC_CONSTANTS_REL_GENERAL:
  1056. case I915_EXEC_CONSTANTS_ABSOLUTE:
  1057. case I915_EXEC_CONSTANTS_REL_SURFACE:
  1058. if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
  1059. DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
  1060. ret = -EINVAL;
  1061. goto error;
  1062. }
  1063. if (instp_mode != dev_priv->relative_constants_mode) {
  1064. if (INTEL_INFO(dev)->gen < 4) {
  1065. DRM_DEBUG("no rel constants on pre-gen4\n");
  1066. ret = -EINVAL;
  1067. goto error;
  1068. }
  1069. if (INTEL_INFO(dev)->gen > 5 &&
  1070. instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
  1071. DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
  1072. ret = -EINVAL;
  1073. goto error;
  1074. }
  1075. /* The HW changed the meaning on this bit on gen6 */
  1076. if (INTEL_INFO(dev)->gen >= 6)
  1077. instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  1078. }
  1079. break;
  1080. default:
  1081. DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
  1082. ret = -EINVAL;
  1083. goto error;
  1084. }
  1085. if (ring == &dev_priv->ring[RCS] &&
  1086. instp_mode != dev_priv->relative_constants_mode) {
  1087. ret = intel_ring_begin(ring, 4);
  1088. if (ret)
  1089. goto error;
  1090. intel_ring_emit(ring, MI_NOOP);
  1091. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1092. intel_ring_emit(ring, INSTPM);
  1093. intel_ring_emit(ring, instp_mask << 16 | instp_mode);
  1094. intel_ring_advance(ring);
  1095. dev_priv->relative_constants_mode = instp_mode;
  1096. }
  1097. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  1098. ret = i915_reset_gen7_sol_offsets(dev, ring);
  1099. if (ret)
  1100. goto error;
  1101. }
  1102. exec_len = args->batch_len;
  1103. if (cliprects) {
  1104. for (i = 0; i < args->num_cliprects; i++) {
  1105. ret = i915_emit_box(ring, &cliprects[i],
  1106. args->DR1, args->DR4);
  1107. if (ret)
  1108. goto error;
  1109. ret = ring->dispatch_execbuffer(ring,
  1110. exec_start, exec_len,
  1111. dispatch_flags);
  1112. if (ret)
  1113. goto error;
  1114. }
  1115. } else {
  1116. ret = ring->dispatch_execbuffer(ring,
  1117. exec_start, exec_len,
  1118. dispatch_flags);
  1119. if (ret)
  1120. return ret;
  1121. }
  1122. trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
  1123. i915_gem_execbuffer_move_to_active(vmas, ring);
  1124. i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
  1125. error:
  1126. kfree(cliprects);
  1127. return ret;
  1128. }
  1129. /**
  1130. * Find one BSD ring to dispatch the corresponding BSD command.
  1131. * The Ring ID is returned.
  1132. */
  1133. static int gen8_dispatch_bsd_ring(struct drm_device *dev,
  1134. struct drm_file *file)
  1135. {
  1136. struct drm_i915_private *dev_priv = dev->dev_private;
  1137. struct drm_i915_file_private *file_priv = file->driver_priv;
  1138. /* Check whether the file_priv is using one ring */
  1139. if (file_priv->bsd_ring)
  1140. return file_priv->bsd_ring->id;
  1141. else {
  1142. /* If no, use the ping-pong mechanism to select one ring */
  1143. int ring_id;
  1144. mutex_lock(&dev->struct_mutex);
  1145. if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
  1146. ring_id = VCS;
  1147. dev_priv->mm.bsd_ring_dispatch_index = 1;
  1148. } else {
  1149. ring_id = VCS2;
  1150. dev_priv->mm.bsd_ring_dispatch_index = 0;
  1151. }
  1152. file_priv->bsd_ring = &dev_priv->ring[ring_id];
  1153. mutex_unlock(&dev->struct_mutex);
  1154. return ring_id;
  1155. }
  1156. }
  1157. static struct drm_i915_gem_object *
  1158. eb_get_batch(struct eb_vmas *eb)
  1159. {
  1160. struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
  1161. /*
  1162. * SNA is doing fancy tricks with compressing batch buffers, which leads
  1163. * to negative relocation deltas. Usually that works out ok since the
  1164. * relocate address is still positive, except when the batch is placed
  1165. * very low in the GTT. Ensure this doesn't happen.
  1166. *
  1167. * Note that actual hangs have only been observed on gen7, but for
  1168. * paranoia do it everywhere.
  1169. */
  1170. vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
  1171. return vma->obj;
  1172. }
  1173. static int
  1174. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  1175. struct drm_file *file,
  1176. struct drm_i915_gem_execbuffer2 *args,
  1177. struct drm_i915_gem_exec_object2 *exec)
  1178. {
  1179. struct drm_i915_private *dev_priv = dev->dev_private;
  1180. struct eb_vmas *eb;
  1181. struct drm_i915_gem_object *batch_obj;
  1182. struct drm_i915_gem_exec_object2 shadow_exec_entry;
  1183. struct intel_engine_cs *ring;
  1184. struct intel_context *ctx;
  1185. struct i915_address_space *vm;
  1186. const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
  1187. u64 exec_start = args->batch_start_offset;
  1188. u32 dispatch_flags;
  1189. int ret;
  1190. bool need_relocs;
  1191. if (!i915_gem_check_execbuffer(args))
  1192. return -EINVAL;
  1193. ret = validate_exec_list(dev, exec, args->buffer_count);
  1194. if (ret)
  1195. return ret;
  1196. dispatch_flags = 0;
  1197. if (args->flags & I915_EXEC_SECURE) {
  1198. if (!file->is_master || !capable(CAP_SYS_ADMIN))
  1199. return -EPERM;
  1200. dispatch_flags |= I915_DISPATCH_SECURE;
  1201. }
  1202. if (args->flags & I915_EXEC_IS_PINNED)
  1203. dispatch_flags |= I915_DISPATCH_PINNED;
  1204. if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
  1205. DRM_DEBUG("execbuf with unknown ring: %d\n",
  1206. (int)(args->flags & I915_EXEC_RING_MASK));
  1207. return -EINVAL;
  1208. }
  1209. if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
  1210. ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
  1211. DRM_DEBUG("execbuf with non bsd ring but with invalid "
  1212. "bsd dispatch flags: %d\n", (int)(args->flags));
  1213. return -EINVAL;
  1214. }
  1215. if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
  1216. ring = &dev_priv->ring[RCS];
  1217. else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
  1218. if (HAS_BSD2(dev)) {
  1219. int ring_id;
  1220. switch (args->flags & I915_EXEC_BSD_MASK) {
  1221. case I915_EXEC_BSD_DEFAULT:
  1222. ring_id = gen8_dispatch_bsd_ring(dev, file);
  1223. ring = &dev_priv->ring[ring_id];
  1224. break;
  1225. case I915_EXEC_BSD_RING1:
  1226. ring = &dev_priv->ring[VCS];
  1227. break;
  1228. case I915_EXEC_BSD_RING2:
  1229. ring = &dev_priv->ring[VCS2];
  1230. break;
  1231. default:
  1232. DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
  1233. (int)(args->flags & I915_EXEC_BSD_MASK));
  1234. return -EINVAL;
  1235. }
  1236. } else
  1237. ring = &dev_priv->ring[VCS];
  1238. } else
  1239. ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
  1240. if (!intel_ring_initialized(ring)) {
  1241. DRM_DEBUG("execbuf with invalid ring: %d\n",
  1242. (int)(args->flags & I915_EXEC_RING_MASK));
  1243. return -EINVAL;
  1244. }
  1245. if (args->buffer_count < 1) {
  1246. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1247. return -EINVAL;
  1248. }
  1249. intel_runtime_pm_get(dev_priv);
  1250. ret = i915_mutex_lock_interruptible(dev);
  1251. if (ret)
  1252. goto pre_mutex_err;
  1253. ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
  1254. if (IS_ERR(ctx)) {
  1255. mutex_unlock(&dev->struct_mutex);
  1256. ret = PTR_ERR(ctx);
  1257. goto pre_mutex_err;
  1258. }
  1259. i915_gem_context_reference(ctx);
  1260. if (ctx->ppgtt)
  1261. vm = &ctx->ppgtt->base;
  1262. else
  1263. vm = &dev_priv->gtt.base;
  1264. eb = eb_create(args);
  1265. if (eb == NULL) {
  1266. i915_gem_context_unreference(ctx);
  1267. mutex_unlock(&dev->struct_mutex);
  1268. ret = -ENOMEM;
  1269. goto pre_mutex_err;
  1270. }
  1271. /* Look up object handles */
  1272. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  1273. if (ret)
  1274. goto err;
  1275. /* take note of the batch buffer before we might reorder the lists */
  1276. batch_obj = eb_get_batch(eb);
  1277. /* Move the objects en-masse into the GTT, evicting if necessary. */
  1278. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  1279. ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
  1280. if (ret)
  1281. goto err;
  1282. /* The objects are in their final locations, apply the relocations. */
  1283. if (need_relocs)
  1284. ret = i915_gem_execbuffer_relocate(eb);
  1285. if (ret) {
  1286. if (ret == -EFAULT) {
  1287. ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
  1288. eb, exec);
  1289. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1290. }
  1291. if (ret)
  1292. goto err;
  1293. }
  1294. /* Set the pending read domains for the batch buffer to COMMAND */
  1295. if (batch_obj->base.pending_write_domain) {
  1296. DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
  1297. ret = -EINVAL;
  1298. goto err;
  1299. }
  1300. if (i915_needs_cmd_parser(ring) && args->batch_len) {
  1301. batch_obj = i915_gem_execbuffer_parse(ring,
  1302. &shadow_exec_entry,
  1303. eb,
  1304. batch_obj,
  1305. args->batch_start_offset,
  1306. args->batch_len,
  1307. file->is_master);
  1308. if (IS_ERR(batch_obj)) {
  1309. ret = PTR_ERR(batch_obj);
  1310. goto err;
  1311. }
  1312. /*
  1313. * Set the DISPATCH_SECURE bit to remove the NON_SECURE
  1314. * bit from MI_BATCH_BUFFER_START commands issued in the
  1315. * dispatch_execbuffer implementations. We specifically
  1316. * don't want that set when the command parser is
  1317. * enabled.
  1318. *
  1319. * FIXME: with aliasing ppgtt, buffers that should only
  1320. * be in ggtt still end up in the aliasing ppgtt. remove
  1321. * this check when that is fixed.
  1322. */
  1323. if (USES_FULL_PPGTT(dev))
  1324. dispatch_flags |= I915_DISPATCH_SECURE;
  1325. exec_start = 0;
  1326. }
  1327. batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  1328. /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
  1329. * batch" bit. Hence we need to pin secure batches into the global gtt.
  1330. * hsw should have this fixed, but bdw mucks it up again. */
  1331. if (dispatch_flags & I915_DISPATCH_SECURE) {
  1332. /*
  1333. * So on first glance it looks freaky that we pin the batch here
  1334. * outside of the reservation loop. But:
  1335. * - The batch is already pinned into the relevant ppgtt, so we
  1336. * already have the backing storage fully allocated.
  1337. * - No other BO uses the global gtt (well contexts, but meh),
  1338. * so we don't really have issues with multiple objects not
  1339. * fitting due to fragmentation.
  1340. * So this is actually safe.
  1341. */
  1342. ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
  1343. if (ret)
  1344. goto err;
  1345. exec_start += i915_gem_obj_ggtt_offset(batch_obj);
  1346. } else
  1347. exec_start += i915_gem_obj_offset(batch_obj, vm);
  1348. ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
  1349. &eb->vmas, batch_obj, exec_start,
  1350. dispatch_flags);
  1351. /*
  1352. * FIXME: We crucially rely upon the active tracking for the (ppgtt)
  1353. * batch vma for correctness. For less ugly and less fragility this
  1354. * needs to be adjusted to also track the ggtt batch vma properly as
  1355. * active.
  1356. */
  1357. if (dispatch_flags & I915_DISPATCH_SECURE)
  1358. i915_gem_object_ggtt_unpin(batch_obj);
  1359. err:
  1360. /* the request owns the ref now */
  1361. i915_gem_context_unreference(ctx);
  1362. eb_destroy(eb);
  1363. mutex_unlock(&dev->struct_mutex);
  1364. pre_mutex_err:
  1365. /* intel_gpu_busy should also get a ref, so it will free when the device
  1366. * is really idle. */
  1367. intel_runtime_pm_put(dev_priv);
  1368. return ret;
  1369. }
  1370. /*
  1371. * Legacy execbuffer just creates an exec2 list from the original exec object
  1372. * list array and passes it to the real function.
  1373. */
  1374. int
  1375. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1376. struct drm_file *file)
  1377. {
  1378. struct drm_i915_gem_execbuffer *args = data;
  1379. struct drm_i915_gem_execbuffer2 exec2;
  1380. struct drm_i915_gem_exec_object *exec_list = NULL;
  1381. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1382. int ret, i;
  1383. if (args->buffer_count < 1) {
  1384. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1385. return -EINVAL;
  1386. }
  1387. /* Copy in the exec list from userland */
  1388. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  1389. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  1390. if (exec_list == NULL || exec2_list == NULL) {
  1391. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1392. args->buffer_count);
  1393. drm_free_large(exec_list);
  1394. drm_free_large(exec2_list);
  1395. return -ENOMEM;
  1396. }
  1397. ret = copy_from_user(exec_list,
  1398. to_user_ptr(args->buffers_ptr),
  1399. sizeof(*exec_list) * args->buffer_count);
  1400. if (ret != 0) {
  1401. DRM_DEBUG("copy %d exec entries failed %d\n",
  1402. args->buffer_count, ret);
  1403. drm_free_large(exec_list);
  1404. drm_free_large(exec2_list);
  1405. return -EFAULT;
  1406. }
  1407. for (i = 0; i < args->buffer_count; i++) {
  1408. exec2_list[i].handle = exec_list[i].handle;
  1409. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  1410. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  1411. exec2_list[i].alignment = exec_list[i].alignment;
  1412. exec2_list[i].offset = exec_list[i].offset;
  1413. if (INTEL_INFO(dev)->gen < 4)
  1414. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  1415. else
  1416. exec2_list[i].flags = 0;
  1417. }
  1418. exec2.buffers_ptr = args->buffers_ptr;
  1419. exec2.buffer_count = args->buffer_count;
  1420. exec2.batch_start_offset = args->batch_start_offset;
  1421. exec2.batch_len = args->batch_len;
  1422. exec2.DR1 = args->DR1;
  1423. exec2.DR4 = args->DR4;
  1424. exec2.num_cliprects = args->num_cliprects;
  1425. exec2.cliprects_ptr = args->cliprects_ptr;
  1426. exec2.flags = I915_EXEC_RENDER;
  1427. i915_execbuffer2_set_context_id(exec2, 0);
  1428. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  1429. if (!ret) {
  1430. struct drm_i915_gem_exec_object __user *user_exec_list =
  1431. to_user_ptr(args->buffers_ptr);
  1432. /* Copy the new buffer offsets back to the user's exec list. */
  1433. for (i = 0; i < args->buffer_count; i++) {
  1434. ret = __copy_to_user(&user_exec_list[i].offset,
  1435. &exec2_list[i].offset,
  1436. sizeof(user_exec_list[i].offset));
  1437. if (ret) {
  1438. ret = -EFAULT;
  1439. DRM_DEBUG("failed to copy %d exec entries "
  1440. "back to user (%d)\n",
  1441. args->buffer_count, ret);
  1442. break;
  1443. }
  1444. }
  1445. }
  1446. drm_free_large(exec_list);
  1447. drm_free_large(exec2_list);
  1448. return ret;
  1449. }
  1450. int
  1451. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1452. struct drm_file *file)
  1453. {
  1454. struct drm_i915_gem_execbuffer2 *args = data;
  1455. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1456. int ret;
  1457. if (args->buffer_count < 1 ||
  1458. args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
  1459. DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
  1460. return -EINVAL;
  1461. }
  1462. if (args->rsvd2 != 0) {
  1463. DRM_DEBUG("dirty rvsd2 field\n");
  1464. return -EINVAL;
  1465. }
  1466. exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
  1467. GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  1468. if (exec2_list == NULL)
  1469. exec2_list = drm_malloc_ab(sizeof(*exec2_list),
  1470. args->buffer_count);
  1471. if (exec2_list == NULL) {
  1472. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1473. args->buffer_count);
  1474. return -ENOMEM;
  1475. }
  1476. ret = copy_from_user(exec2_list,
  1477. to_user_ptr(args->buffers_ptr),
  1478. sizeof(*exec2_list) * args->buffer_count);
  1479. if (ret != 0) {
  1480. DRM_DEBUG("copy %d exec entries failed %d\n",
  1481. args->buffer_count, ret);
  1482. drm_free_large(exec2_list);
  1483. return -EFAULT;
  1484. }
  1485. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  1486. if (!ret) {
  1487. /* Copy the new buffer offsets back to the user's exec list. */
  1488. struct drm_i915_gem_exec_object2 __user *user_exec_list =
  1489. to_user_ptr(args->buffers_ptr);
  1490. int i;
  1491. for (i = 0; i < args->buffer_count; i++) {
  1492. ret = __copy_to_user(&user_exec_list[i].offset,
  1493. &exec2_list[i].offset,
  1494. sizeof(user_exec_list[i].offset));
  1495. if (ret) {
  1496. ret = -EFAULT;
  1497. DRM_DEBUG("failed to copy %d exec entries "
  1498. "back to user\n",
  1499. args->buffer_count);
  1500. break;
  1501. }
  1502. }
  1503. }
  1504. drm_free_large(exec2_list);
  1505. return ret;
  1506. }