i915_gem_execbuffer.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908
  1. /*
  2. * Copyright © 2008,2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Chris Wilson <chris@chris-wilson.co.uk>
  26. *
  27. */
  28. #include <linux/dma_remapping.h>
  29. #include <linux/reservation.h>
  30. #include <linux/uaccess.h>
  31. #include <drm/drmP.h>
  32. #include <drm/i915_drm.h>
  33. #include "i915_drv.h"
  34. #include "i915_gem_dmabuf.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37. #include "intel_frontbuffer.h"
  38. #define __EXEC_OBJECT_HAS_PIN (1<<31)
  39. #define __EXEC_OBJECT_HAS_FENCE (1<<30)
  40. #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
  41. #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
  42. #define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
  43. #define BATCH_OFFSET_BIAS (256*1024)
  44. struct i915_execbuffer_params {
  45. struct drm_device *dev;
  46. struct drm_file *file;
  47. struct i915_vma *batch;
  48. u32 dispatch_flags;
  49. u32 args_batch_start_offset;
  50. struct intel_engine_cs *engine;
  51. struct i915_gem_context *ctx;
  52. struct drm_i915_gem_request *request;
  53. };
  54. struct eb_vmas {
  55. struct list_head vmas;
  56. int and;
  57. union {
  58. struct i915_vma *lut[0];
  59. struct hlist_head buckets[0];
  60. };
  61. };
  62. static struct eb_vmas *
  63. eb_create(struct drm_i915_gem_execbuffer2 *args)
  64. {
  65. struct eb_vmas *eb = NULL;
  66. if (args->flags & I915_EXEC_HANDLE_LUT) {
  67. unsigned size = args->buffer_count;
  68. size *= sizeof(struct i915_vma *);
  69. size += sizeof(struct eb_vmas);
  70. eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
  71. }
  72. if (eb == NULL) {
  73. unsigned size = args->buffer_count;
  74. unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
  75. BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
  76. while (count > 2*size)
  77. count >>= 1;
  78. eb = kzalloc(count*sizeof(struct hlist_head) +
  79. sizeof(struct eb_vmas),
  80. GFP_TEMPORARY);
  81. if (eb == NULL)
  82. return eb;
  83. eb->and = count - 1;
  84. } else
  85. eb->and = -args->buffer_count;
  86. INIT_LIST_HEAD(&eb->vmas);
  87. return eb;
  88. }
  89. static void
  90. eb_reset(struct eb_vmas *eb)
  91. {
  92. if (eb->and >= 0)
  93. memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
  94. }
  95. static struct i915_vma *
  96. eb_get_batch(struct eb_vmas *eb)
  97. {
  98. struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
  99. /*
  100. * SNA is doing fancy tricks with compressing batch buffers, which leads
  101. * to negative relocation deltas. Usually that works out ok since the
  102. * relocate address is still positive, except when the batch is placed
  103. * very low in the GTT. Ensure this doesn't happen.
  104. *
  105. * Note that actual hangs have only been observed on gen7, but for
  106. * paranoia do it everywhere.
  107. */
  108. if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
  109. vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
  110. return vma;
  111. }
  112. static int
  113. eb_lookup_vmas(struct eb_vmas *eb,
  114. struct drm_i915_gem_exec_object2 *exec,
  115. const struct drm_i915_gem_execbuffer2 *args,
  116. struct i915_address_space *vm,
  117. struct drm_file *file)
  118. {
  119. struct drm_i915_gem_object *obj;
  120. struct list_head objects;
  121. int i, ret;
  122. INIT_LIST_HEAD(&objects);
  123. spin_lock(&file->table_lock);
  124. /* Grab a reference to the object and release the lock so we can lookup
  125. * or create the VMA without using GFP_ATOMIC */
  126. for (i = 0; i < args->buffer_count; i++) {
  127. obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
  128. if (obj == NULL) {
  129. spin_unlock(&file->table_lock);
  130. DRM_DEBUG("Invalid object handle %d at index %d\n",
  131. exec[i].handle, i);
  132. ret = -ENOENT;
  133. goto err;
  134. }
  135. if (!list_empty(&obj->obj_exec_link)) {
  136. spin_unlock(&file->table_lock);
  137. DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
  138. obj, exec[i].handle, i);
  139. ret = -EINVAL;
  140. goto err;
  141. }
  142. i915_gem_object_get(obj);
  143. list_add_tail(&obj->obj_exec_link, &objects);
  144. }
  145. spin_unlock(&file->table_lock);
  146. i = 0;
  147. while (!list_empty(&objects)) {
  148. struct i915_vma *vma;
  149. obj = list_first_entry(&objects,
  150. struct drm_i915_gem_object,
  151. obj_exec_link);
  152. /*
  153. * NOTE: We can leak any vmas created here when something fails
  154. * later on. But that's no issue since vma_unbind can deal with
  155. * vmas which are not actually bound. And since only
  156. * lookup_or_create exists as an interface to get at the vma
  157. * from the (obj, vm) we don't run the risk of creating
  158. * duplicated vmas for the same vm.
  159. */
  160. vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
  161. if (IS_ERR(vma)) {
  162. DRM_DEBUG("Failed to lookup VMA\n");
  163. ret = PTR_ERR(vma);
  164. goto err;
  165. }
  166. /* Transfer ownership from the objects list to the vmas list. */
  167. list_add_tail(&vma->exec_list, &eb->vmas);
  168. list_del_init(&obj->obj_exec_link);
  169. vma->exec_entry = &exec[i];
  170. if (eb->and < 0) {
  171. eb->lut[i] = vma;
  172. } else {
  173. uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
  174. vma->exec_handle = handle;
  175. hlist_add_head(&vma->exec_node,
  176. &eb->buckets[handle & eb->and]);
  177. }
  178. ++i;
  179. }
  180. return 0;
  181. err:
  182. while (!list_empty(&objects)) {
  183. obj = list_first_entry(&objects,
  184. struct drm_i915_gem_object,
  185. obj_exec_link);
  186. list_del_init(&obj->obj_exec_link);
  187. i915_gem_object_put(obj);
  188. }
  189. /*
  190. * Objects already transfered to the vmas list will be unreferenced by
  191. * eb_destroy.
  192. */
  193. return ret;
  194. }
  195. static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
  196. {
  197. if (eb->and < 0) {
  198. if (handle >= -eb->and)
  199. return NULL;
  200. return eb->lut[handle];
  201. } else {
  202. struct hlist_head *head;
  203. struct i915_vma *vma;
  204. head = &eb->buckets[handle & eb->and];
  205. hlist_for_each_entry(vma, head, exec_node) {
  206. if (vma->exec_handle == handle)
  207. return vma;
  208. }
  209. return NULL;
  210. }
  211. }
  212. static void
  213. i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
  214. {
  215. struct drm_i915_gem_exec_object2 *entry;
  216. struct drm_i915_gem_object *obj = vma->obj;
  217. if (!drm_mm_node_allocated(&vma->node))
  218. return;
  219. entry = vma->exec_entry;
  220. if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
  221. i915_gem_object_unpin_fence(obj);
  222. if (entry->flags & __EXEC_OBJECT_HAS_PIN)
  223. __i915_vma_unpin(vma);
  224. entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
  225. }
  226. static void eb_destroy(struct eb_vmas *eb)
  227. {
  228. while (!list_empty(&eb->vmas)) {
  229. struct i915_vma *vma;
  230. vma = list_first_entry(&eb->vmas,
  231. struct i915_vma,
  232. exec_list);
  233. list_del_init(&vma->exec_list);
  234. i915_gem_execbuffer_unreserve_vma(vma);
  235. i915_gem_object_put(vma->obj);
  236. }
  237. kfree(eb);
  238. }
  239. static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
  240. {
  241. return (HAS_LLC(obj->base.dev) ||
  242. obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
  243. obj->cache_level != I915_CACHE_NONE);
  244. }
  245. /* Used to convert any address to canonical form.
  246. * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
  247. * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
  248. * addresses to be in a canonical form:
  249. * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
  250. * canonical form [63:48] == [47]."
  251. */
  252. #define GEN8_HIGH_ADDRESS_BIT 47
  253. static inline uint64_t gen8_canonical_addr(uint64_t address)
  254. {
  255. return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
  256. }
  257. static inline uint64_t gen8_noncanonical_addr(uint64_t address)
  258. {
  259. return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
  260. }
  261. static inline uint64_t
  262. relocation_target(struct drm_i915_gem_relocation_entry *reloc,
  263. uint64_t target_offset)
  264. {
  265. return gen8_canonical_addr((int)reloc->delta + target_offset);
  266. }
  267. static int
  268. relocate_entry_cpu(struct drm_i915_gem_object *obj,
  269. struct drm_i915_gem_relocation_entry *reloc,
  270. uint64_t target_offset)
  271. {
  272. struct drm_device *dev = obj->base.dev;
  273. uint32_t page_offset = offset_in_page(reloc->offset);
  274. uint64_t delta = relocation_target(reloc, target_offset);
  275. char *vaddr;
  276. int ret;
  277. ret = i915_gem_object_set_to_cpu_domain(obj, true);
  278. if (ret)
  279. return ret;
  280. vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
  281. reloc->offset >> PAGE_SHIFT));
  282. *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
  283. if (INTEL_INFO(dev)->gen >= 8) {
  284. page_offset = offset_in_page(page_offset + sizeof(uint32_t));
  285. if (page_offset == 0) {
  286. kunmap_atomic(vaddr);
  287. vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
  288. (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
  289. }
  290. *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
  291. }
  292. kunmap_atomic(vaddr);
  293. return 0;
  294. }
  295. static int
  296. relocate_entry_gtt(struct drm_i915_gem_object *obj,
  297. struct drm_i915_gem_relocation_entry *reloc,
  298. uint64_t target_offset)
  299. {
  300. struct drm_device *dev = obj->base.dev;
  301. struct drm_i915_private *dev_priv = to_i915(dev);
  302. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  303. uint64_t delta = relocation_target(reloc, target_offset);
  304. uint64_t offset;
  305. void __iomem *reloc_page;
  306. int ret;
  307. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  308. if (ret)
  309. return ret;
  310. ret = i915_gem_object_put_fence(obj);
  311. if (ret)
  312. return ret;
  313. /* Map the page containing the relocation we're going to perform. */
  314. offset = i915_gem_obj_ggtt_offset(obj);
  315. offset += reloc->offset;
  316. reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
  317. offset & PAGE_MASK);
  318. iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
  319. if (INTEL_INFO(dev)->gen >= 8) {
  320. offset += sizeof(uint32_t);
  321. if (offset_in_page(offset) == 0) {
  322. io_mapping_unmap_atomic(reloc_page);
  323. reloc_page =
  324. io_mapping_map_atomic_wc(ggtt->mappable,
  325. offset);
  326. }
  327. iowrite32(upper_32_bits(delta),
  328. reloc_page + offset_in_page(offset));
  329. }
  330. io_mapping_unmap_atomic(reloc_page);
  331. return 0;
  332. }
  333. static void
  334. clflush_write32(void *addr, uint32_t value)
  335. {
  336. /* This is not a fast path, so KISS. */
  337. drm_clflush_virt_range(addr, sizeof(uint32_t));
  338. *(uint32_t *)addr = value;
  339. drm_clflush_virt_range(addr, sizeof(uint32_t));
  340. }
  341. static int
  342. relocate_entry_clflush(struct drm_i915_gem_object *obj,
  343. struct drm_i915_gem_relocation_entry *reloc,
  344. uint64_t target_offset)
  345. {
  346. struct drm_device *dev = obj->base.dev;
  347. uint32_t page_offset = offset_in_page(reloc->offset);
  348. uint64_t delta = relocation_target(reloc, target_offset);
  349. char *vaddr;
  350. int ret;
  351. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  352. if (ret)
  353. return ret;
  354. vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
  355. reloc->offset >> PAGE_SHIFT));
  356. clflush_write32(vaddr + page_offset, lower_32_bits(delta));
  357. if (INTEL_INFO(dev)->gen >= 8) {
  358. page_offset = offset_in_page(page_offset + sizeof(uint32_t));
  359. if (page_offset == 0) {
  360. kunmap_atomic(vaddr);
  361. vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
  362. (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
  363. }
  364. clflush_write32(vaddr + page_offset, upper_32_bits(delta));
  365. }
  366. kunmap_atomic(vaddr);
  367. return 0;
  368. }
  369. static bool object_is_idle(struct drm_i915_gem_object *obj)
  370. {
  371. unsigned long active = i915_gem_object_get_active(obj);
  372. int idx;
  373. for_each_active(active, idx) {
  374. if (!i915_gem_active_is_idle(&obj->last_read[idx],
  375. &obj->base.dev->struct_mutex))
  376. return false;
  377. }
  378. return true;
  379. }
  380. static int
  381. i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
  382. struct eb_vmas *eb,
  383. struct drm_i915_gem_relocation_entry *reloc)
  384. {
  385. struct drm_device *dev = obj->base.dev;
  386. struct drm_gem_object *target_obj;
  387. struct drm_i915_gem_object *target_i915_obj;
  388. struct i915_vma *target_vma;
  389. uint64_t target_offset;
  390. int ret;
  391. /* we've already hold a reference to all valid objects */
  392. target_vma = eb_get_vma(eb, reloc->target_handle);
  393. if (unlikely(target_vma == NULL))
  394. return -ENOENT;
  395. target_i915_obj = target_vma->obj;
  396. target_obj = &target_vma->obj->base;
  397. target_offset = gen8_canonical_addr(target_vma->node.start);
  398. /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
  399. * pipe_control writes because the gpu doesn't properly redirect them
  400. * through the ppgtt for non_secure batchbuffers. */
  401. if (unlikely(IS_GEN6(dev) &&
  402. reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
  403. ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
  404. PIN_GLOBAL);
  405. if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
  406. return ret;
  407. }
  408. /* Validate that the target is in a valid r/w GPU domain */
  409. if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
  410. DRM_DEBUG("reloc with multiple write domains: "
  411. "obj %p target %d offset %d "
  412. "read %08x write %08x",
  413. obj, reloc->target_handle,
  414. (int) reloc->offset,
  415. reloc->read_domains,
  416. reloc->write_domain);
  417. return -EINVAL;
  418. }
  419. if (unlikely((reloc->write_domain | reloc->read_domains)
  420. & ~I915_GEM_GPU_DOMAINS)) {
  421. DRM_DEBUG("reloc with read/write non-GPU domains: "
  422. "obj %p target %d offset %d "
  423. "read %08x write %08x",
  424. obj, reloc->target_handle,
  425. (int) reloc->offset,
  426. reloc->read_domains,
  427. reloc->write_domain);
  428. return -EINVAL;
  429. }
  430. target_obj->pending_read_domains |= reloc->read_domains;
  431. target_obj->pending_write_domain |= reloc->write_domain;
  432. /* If the relocation already has the right value in it, no
  433. * more work needs to be done.
  434. */
  435. if (target_offset == reloc->presumed_offset)
  436. return 0;
  437. /* Check that the relocation address is valid... */
  438. if (unlikely(reloc->offset >
  439. obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
  440. DRM_DEBUG("Relocation beyond object bounds: "
  441. "obj %p target %d offset %d size %d.\n",
  442. obj, reloc->target_handle,
  443. (int) reloc->offset,
  444. (int) obj->base.size);
  445. return -EINVAL;
  446. }
  447. if (unlikely(reloc->offset & 3)) {
  448. DRM_DEBUG("Relocation not 4-byte aligned: "
  449. "obj %p target %d offset %d.\n",
  450. obj, reloc->target_handle,
  451. (int) reloc->offset);
  452. return -EINVAL;
  453. }
  454. /* We can't wait for rendering with pagefaults disabled */
  455. if (pagefault_disabled() && !object_is_idle(obj))
  456. return -EFAULT;
  457. if (use_cpu_reloc(obj))
  458. ret = relocate_entry_cpu(obj, reloc, target_offset);
  459. else if (obj->map_and_fenceable)
  460. ret = relocate_entry_gtt(obj, reloc, target_offset);
  461. else if (static_cpu_has(X86_FEATURE_CLFLUSH))
  462. ret = relocate_entry_clflush(obj, reloc, target_offset);
  463. else {
  464. WARN_ONCE(1, "Impossible case in relocation handling\n");
  465. ret = -ENODEV;
  466. }
  467. if (ret)
  468. return ret;
  469. /* and update the user's relocation entry */
  470. reloc->presumed_offset = target_offset;
  471. return 0;
  472. }
  473. static int
  474. i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
  475. struct eb_vmas *eb)
  476. {
  477. #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
  478. struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
  479. struct drm_i915_gem_relocation_entry __user *user_relocs;
  480. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  481. int remain, ret;
  482. user_relocs = u64_to_user_ptr(entry->relocs_ptr);
  483. remain = entry->relocation_count;
  484. while (remain) {
  485. struct drm_i915_gem_relocation_entry *r = stack_reloc;
  486. int count = remain;
  487. if (count > ARRAY_SIZE(stack_reloc))
  488. count = ARRAY_SIZE(stack_reloc);
  489. remain -= count;
  490. if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
  491. return -EFAULT;
  492. do {
  493. u64 offset = r->presumed_offset;
  494. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
  495. if (ret)
  496. return ret;
  497. if (r->presumed_offset != offset &&
  498. __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
  499. return -EFAULT;
  500. }
  501. user_relocs++;
  502. r++;
  503. } while (--count);
  504. }
  505. return 0;
  506. #undef N_RELOC
  507. }
  508. static int
  509. i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
  510. struct eb_vmas *eb,
  511. struct drm_i915_gem_relocation_entry *relocs)
  512. {
  513. const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  514. int i, ret;
  515. for (i = 0; i < entry->relocation_count; i++) {
  516. ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
  517. if (ret)
  518. return ret;
  519. }
  520. return 0;
  521. }
  522. static int
  523. i915_gem_execbuffer_relocate(struct eb_vmas *eb)
  524. {
  525. struct i915_vma *vma;
  526. int ret = 0;
  527. /* This is the fast path and we cannot handle a pagefault whilst
  528. * holding the struct mutex lest the user pass in the relocations
  529. * contained within a mmaped bo. For in such a case we, the page
  530. * fault handler would call i915_gem_fault() and we would try to
  531. * acquire the struct mutex again. Obviously this is bad and so
  532. * lockdep complains vehemently.
  533. */
  534. pagefault_disable();
  535. list_for_each_entry(vma, &eb->vmas, exec_list) {
  536. ret = i915_gem_execbuffer_relocate_vma(vma, eb);
  537. if (ret)
  538. break;
  539. }
  540. pagefault_enable();
  541. return ret;
  542. }
  543. static bool only_mappable_for_reloc(unsigned int flags)
  544. {
  545. return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
  546. __EXEC_OBJECT_NEEDS_MAP;
  547. }
  548. static int
  549. i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
  550. struct intel_engine_cs *engine,
  551. bool *need_reloc)
  552. {
  553. struct drm_i915_gem_object *obj = vma->obj;
  554. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  555. uint64_t flags;
  556. int ret;
  557. flags = PIN_USER;
  558. if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
  559. flags |= PIN_GLOBAL;
  560. if (!drm_mm_node_allocated(&vma->node)) {
  561. /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
  562. * limit address to the first 4GBs for unflagged objects.
  563. */
  564. if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
  565. flags |= PIN_ZONE_4G;
  566. if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
  567. flags |= PIN_GLOBAL | PIN_MAPPABLE;
  568. if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
  569. flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
  570. if (entry->flags & EXEC_OBJECT_PINNED)
  571. flags |= entry->offset | PIN_OFFSET_FIXED;
  572. if ((flags & PIN_MAPPABLE) == 0)
  573. flags |= PIN_HIGH;
  574. }
  575. ret = i915_vma_pin(vma,
  576. entry->pad_to_size,
  577. entry->alignment,
  578. flags);
  579. if ((ret == -ENOSPC || ret == -E2BIG) &&
  580. only_mappable_for_reloc(entry->flags))
  581. ret = i915_vma_pin(vma,
  582. entry->pad_to_size,
  583. entry->alignment,
  584. flags & ~PIN_MAPPABLE);
  585. if (ret)
  586. return ret;
  587. entry->flags |= __EXEC_OBJECT_HAS_PIN;
  588. if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
  589. ret = i915_gem_object_get_fence(obj);
  590. if (ret)
  591. return ret;
  592. if (i915_gem_object_pin_fence(obj))
  593. entry->flags |= __EXEC_OBJECT_HAS_FENCE;
  594. }
  595. if (entry->offset != vma->node.start) {
  596. entry->offset = vma->node.start;
  597. *need_reloc = true;
  598. }
  599. if (entry->flags & EXEC_OBJECT_WRITE) {
  600. obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
  601. obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
  602. }
  603. return 0;
  604. }
  605. static bool
  606. need_reloc_mappable(struct i915_vma *vma)
  607. {
  608. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  609. if (entry->relocation_count == 0)
  610. return false;
  611. if (!i915_vma_is_ggtt(vma))
  612. return false;
  613. /* See also use_cpu_reloc() */
  614. if (HAS_LLC(vma->obj->base.dev))
  615. return false;
  616. if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  617. return false;
  618. return true;
  619. }
  620. static bool
  621. eb_vma_misplaced(struct i915_vma *vma)
  622. {
  623. struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
  624. struct drm_i915_gem_object *obj = vma->obj;
  625. WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
  626. !i915_vma_is_ggtt(vma));
  627. if (entry->alignment &&
  628. vma->node.start & (entry->alignment - 1))
  629. return true;
  630. if (vma->node.size < entry->pad_to_size)
  631. return true;
  632. if (entry->flags & EXEC_OBJECT_PINNED &&
  633. vma->node.start != entry->offset)
  634. return true;
  635. if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
  636. vma->node.start < BATCH_OFFSET_BIAS)
  637. return true;
  638. /* avoid costly ping-pong once a batch bo ended up non-mappable */
  639. if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
  640. return !only_mappable_for_reloc(entry->flags);
  641. if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
  642. (vma->node.start + vma->node.size - 1) >> 32)
  643. return true;
  644. return false;
  645. }
  646. static int
  647. i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
  648. struct list_head *vmas,
  649. struct i915_gem_context *ctx,
  650. bool *need_relocs)
  651. {
  652. struct drm_i915_gem_object *obj;
  653. struct i915_vma *vma;
  654. struct i915_address_space *vm;
  655. struct list_head ordered_vmas;
  656. struct list_head pinned_vmas;
  657. bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
  658. int retry;
  659. vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
  660. INIT_LIST_HEAD(&ordered_vmas);
  661. INIT_LIST_HEAD(&pinned_vmas);
  662. while (!list_empty(vmas)) {
  663. struct drm_i915_gem_exec_object2 *entry;
  664. bool need_fence, need_mappable;
  665. vma = list_first_entry(vmas, struct i915_vma, exec_list);
  666. obj = vma->obj;
  667. entry = vma->exec_entry;
  668. if (ctx->flags & CONTEXT_NO_ZEROMAP)
  669. entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
  670. if (!has_fenced_gpu_access)
  671. entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
  672. need_fence =
  673. entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
  674. i915_gem_object_is_tiled(obj);
  675. need_mappable = need_fence || need_reloc_mappable(vma);
  676. if (entry->flags & EXEC_OBJECT_PINNED)
  677. list_move_tail(&vma->exec_list, &pinned_vmas);
  678. else if (need_mappable) {
  679. entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
  680. list_move(&vma->exec_list, &ordered_vmas);
  681. } else
  682. list_move_tail(&vma->exec_list, &ordered_vmas);
  683. obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
  684. obj->base.pending_write_domain = 0;
  685. }
  686. list_splice(&ordered_vmas, vmas);
  687. list_splice(&pinned_vmas, vmas);
  688. /* Attempt to pin all of the buffers into the GTT.
  689. * This is done in 3 phases:
  690. *
  691. * 1a. Unbind all objects that do not match the GTT constraints for
  692. * the execbuffer (fenceable, mappable, alignment etc).
  693. * 1b. Increment pin count for already bound objects.
  694. * 2. Bind new objects.
  695. * 3. Decrement pin count.
  696. *
  697. * This avoid unnecessary unbinding of later objects in order to make
  698. * room for the earlier objects *unless* we need to defragment.
  699. */
  700. retry = 0;
  701. do {
  702. int ret = 0;
  703. /* Unbind any ill-fitting objects or pin. */
  704. list_for_each_entry(vma, vmas, exec_list) {
  705. if (!drm_mm_node_allocated(&vma->node))
  706. continue;
  707. if (eb_vma_misplaced(vma))
  708. ret = i915_vma_unbind(vma);
  709. else
  710. ret = i915_gem_execbuffer_reserve_vma(vma,
  711. engine,
  712. need_relocs);
  713. if (ret)
  714. goto err;
  715. }
  716. /* Bind fresh objects */
  717. list_for_each_entry(vma, vmas, exec_list) {
  718. if (drm_mm_node_allocated(&vma->node))
  719. continue;
  720. ret = i915_gem_execbuffer_reserve_vma(vma, engine,
  721. need_relocs);
  722. if (ret)
  723. goto err;
  724. }
  725. err:
  726. if (ret != -ENOSPC || retry++)
  727. return ret;
  728. /* Decrement pin count for bound objects */
  729. list_for_each_entry(vma, vmas, exec_list)
  730. i915_gem_execbuffer_unreserve_vma(vma);
  731. ret = i915_gem_evict_vm(vm, true);
  732. if (ret)
  733. return ret;
  734. } while (1);
  735. }
  736. static int
  737. i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
  738. struct drm_i915_gem_execbuffer2 *args,
  739. struct drm_file *file,
  740. struct intel_engine_cs *engine,
  741. struct eb_vmas *eb,
  742. struct drm_i915_gem_exec_object2 *exec,
  743. struct i915_gem_context *ctx)
  744. {
  745. struct drm_i915_gem_relocation_entry *reloc;
  746. struct i915_address_space *vm;
  747. struct i915_vma *vma;
  748. bool need_relocs;
  749. int *reloc_offset;
  750. int i, total, ret;
  751. unsigned count = args->buffer_count;
  752. vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
  753. /* We may process another execbuffer during the unlock... */
  754. while (!list_empty(&eb->vmas)) {
  755. vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
  756. list_del_init(&vma->exec_list);
  757. i915_gem_execbuffer_unreserve_vma(vma);
  758. i915_gem_object_put(vma->obj);
  759. }
  760. mutex_unlock(&dev->struct_mutex);
  761. total = 0;
  762. for (i = 0; i < count; i++)
  763. total += exec[i].relocation_count;
  764. reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
  765. reloc = drm_malloc_ab(total, sizeof(*reloc));
  766. if (reloc == NULL || reloc_offset == NULL) {
  767. drm_free_large(reloc);
  768. drm_free_large(reloc_offset);
  769. mutex_lock(&dev->struct_mutex);
  770. return -ENOMEM;
  771. }
  772. total = 0;
  773. for (i = 0; i < count; i++) {
  774. struct drm_i915_gem_relocation_entry __user *user_relocs;
  775. u64 invalid_offset = (u64)-1;
  776. int j;
  777. user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
  778. if (copy_from_user(reloc+total, user_relocs,
  779. exec[i].relocation_count * sizeof(*reloc))) {
  780. ret = -EFAULT;
  781. mutex_lock(&dev->struct_mutex);
  782. goto err;
  783. }
  784. /* As we do not update the known relocation offsets after
  785. * relocating (due to the complexities in lock handling),
  786. * we need to mark them as invalid now so that we force the
  787. * relocation processing next time. Just in case the target
  788. * object is evicted and then rebound into its old
  789. * presumed_offset before the next execbuffer - if that
  790. * happened we would make the mistake of assuming that the
  791. * relocations were valid.
  792. */
  793. for (j = 0; j < exec[i].relocation_count; j++) {
  794. if (__copy_to_user(&user_relocs[j].presumed_offset,
  795. &invalid_offset,
  796. sizeof(invalid_offset))) {
  797. ret = -EFAULT;
  798. mutex_lock(&dev->struct_mutex);
  799. goto err;
  800. }
  801. }
  802. reloc_offset[i] = total;
  803. total += exec[i].relocation_count;
  804. }
  805. ret = i915_mutex_lock_interruptible(dev);
  806. if (ret) {
  807. mutex_lock(&dev->struct_mutex);
  808. goto err;
  809. }
  810. /* reacquire the objects */
  811. eb_reset(eb);
  812. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  813. if (ret)
  814. goto err;
  815. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  816. ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
  817. &need_relocs);
  818. if (ret)
  819. goto err;
  820. list_for_each_entry(vma, &eb->vmas, exec_list) {
  821. int offset = vma->exec_entry - exec;
  822. ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
  823. reloc + reloc_offset[offset]);
  824. if (ret)
  825. goto err;
  826. }
  827. /* Leave the user relocations as are, this is the painfully slow path,
  828. * and we want to avoid the complication of dropping the lock whilst
  829. * having buffers reserved in the aperture and so causing spurious
  830. * ENOSPC for random operations.
  831. */
  832. err:
  833. drm_free_large(reloc);
  834. drm_free_large(reloc_offset);
  835. return ret;
  836. }
  837. static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
  838. {
  839. unsigned int mask;
  840. mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
  841. mask <<= I915_BO_ACTIVE_SHIFT;
  842. return mask;
  843. }
  844. static int
  845. i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
  846. struct list_head *vmas)
  847. {
  848. const unsigned int other_rings = eb_other_engines(req);
  849. struct i915_vma *vma;
  850. uint32_t flush_domains = 0;
  851. bool flush_chipset = false;
  852. int ret;
  853. list_for_each_entry(vma, vmas, exec_list) {
  854. struct drm_i915_gem_object *obj = vma->obj;
  855. if (obj->flags & other_rings) {
  856. ret = i915_gem_object_sync(obj, req);
  857. if (ret)
  858. return ret;
  859. }
  860. if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
  861. flush_chipset |= i915_gem_clflush_object(obj, false);
  862. flush_domains |= obj->base.write_domain;
  863. }
  864. if (flush_chipset)
  865. i915_gem_chipset_flush(req->engine->i915);
  866. if (flush_domains & I915_GEM_DOMAIN_GTT)
  867. wmb();
  868. /* Unconditionally invalidate GPU caches and TLBs. */
  869. return req->engine->emit_flush(req, EMIT_INVALIDATE);
  870. }
  871. static bool
  872. i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
  873. {
  874. if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
  875. return false;
  876. /* Kernel clipping was a DRI1 misfeature */
  877. if (exec->num_cliprects || exec->cliprects_ptr)
  878. return false;
  879. if (exec->DR4 == 0xffffffff) {
  880. DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
  881. exec->DR4 = 0;
  882. }
  883. if (exec->DR1 || exec->DR4)
  884. return false;
  885. if ((exec->batch_start_offset | exec->batch_len) & 0x7)
  886. return false;
  887. return true;
  888. }
  889. static int
  890. validate_exec_list(struct drm_device *dev,
  891. struct drm_i915_gem_exec_object2 *exec,
  892. int count)
  893. {
  894. unsigned relocs_total = 0;
  895. unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
  896. unsigned invalid_flags;
  897. int i;
  898. /* INTERNAL flags must not overlap with external ones */
  899. BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
  900. invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
  901. if (USES_FULL_PPGTT(dev))
  902. invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
  903. for (i = 0; i < count; i++) {
  904. char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
  905. int length; /* limited by fault_in_pages_readable() */
  906. if (exec[i].flags & invalid_flags)
  907. return -EINVAL;
  908. /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
  909. * any non-page-aligned or non-canonical addresses.
  910. */
  911. if (exec[i].flags & EXEC_OBJECT_PINNED) {
  912. if (exec[i].offset !=
  913. gen8_canonical_addr(exec[i].offset & PAGE_MASK))
  914. return -EINVAL;
  915. /* From drm_mm perspective address space is continuous,
  916. * so from this point we're always using non-canonical
  917. * form internally.
  918. */
  919. exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
  920. }
  921. if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
  922. return -EINVAL;
  923. /* pad_to_size was once a reserved field, so sanitize it */
  924. if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
  925. if (offset_in_page(exec[i].pad_to_size))
  926. return -EINVAL;
  927. } else {
  928. exec[i].pad_to_size = 0;
  929. }
  930. /* First check for malicious input causing overflow in
  931. * the worst case where we need to allocate the entire
  932. * relocation tree as a single array.
  933. */
  934. if (exec[i].relocation_count > relocs_max - relocs_total)
  935. return -EINVAL;
  936. relocs_total += exec[i].relocation_count;
  937. length = exec[i].relocation_count *
  938. sizeof(struct drm_i915_gem_relocation_entry);
  939. /*
  940. * We must check that the entire relocation array is safe
  941. * to read, but since we may need to update the presumed
  942. * offsets during execution, check for full write access.
  943. */
  944. if (!access_ok(VERIFY_WRITE, ptr, length))
  945. return -EFAULT;
  946. if (likely(!i915.prefault_disable)) {
  947. if (fault_in_multipages_readable(ptr, length))
  948. return -EFAULT;
  949. }
  950. }
  951. return 0;
  952. }
  953. static struct i915_gem_context *
  954. i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
  955. struct intel_engine_cs *engine, const u32 ctx_id)
  956. {
  957. struct i915_gem_context *ctx = NULL;
  958. struct i915_ctx_hang_stats *hs;
  959. if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
  960. return ERR_PTR(-EINVAL);
  961. ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
  962. if (IS_ERR(ctx))
  963. return ctx;
  964. hs = &ctx->hang_stats;
  965. if (hs->banned) {
  966. DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
  967. return ERR_PTR(-EIO);
  968. }
  969. return ctx;
  970. }
  971. void i915_vma_move_to_active(struct i915_vma *vma,
  972. struct drm_i915_gem_request *req,
  973. unsigned int flags)
  974. {
  975. struct drm_i915_gem_object *obj = vma->obj;
  976. const unsigned int idx = req->engine->id;
  977. GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  978. obj->dirty = 1; /* be paranoid */
  979. /* Add a reference if we're newly entering the active list.
  980. * The order in which we add operations to the retirement queue is
  981. * vital here: mark_active adds to the start of the callback list,
  982. * such that subsequent callbacks are called first. Therefore we
  983. * add the active reference first and queue for it to be dropped
  984. * *last*.
  985. */
  986. if (!i915_gem_object_is_active(obj))
  987. i915_gem_object_get(obj);
  988. i915_gem_object_set_active(obj, idx);
  989. i915_gem_active_set(&obj->last_read[idx], req);
  990. if (flags & EXEC_OBJECT_WRITE) {
  991. i915_gem_active_set(&obj->last_write, req);
  992. intel_fb_obj_invalidate(obj, ORIGIN_CS);
  993. /* update for the implicit flush after a batch */
  994. obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  995. }
  996. if (flags & EXEC_OBJECT_NEEDS_FENCE) {
  997. i915_gem_active_set(&obj->last_fence, req);
  998. if (flags & __EXEC_OBJECT_HAS_FENCE) {
  999. struct drm_i915_private *dev_priv = req->i915;
  1000. list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
  1001. &dev_priv->mm.fence_list);
  1002. }
  1003. }
  1004. i915_vma_set_active(vma, idx);
  1005. i915_gem_active_set(&vma->last_read[idx], req);
  1006. list_move_tail(&vma->vm_link, &vma->vm->active_list);
  1007. }
  1008. static void eb_export_fence(struct drm_i915_gem_object *obj,
  1009. struct drm_i915_gem_request *req,
  1010. unsigned int flags)
  1011. {
  1012. struct reservation_object *resv;
  1013. resv = i915_gem_object_get_dmabuf_resv(obj);
  1014. if (!resv)
  1015. return;
  1016. /* Ignore errors from failing to allocate the new fence, we can't
  1017. * handle an error right now. Worst case should be missed
  1018. * synchronisation leading to rendering corruption.
  1019. */
  1020. ww_mutex_lock(&resv->lock, NULL);
  1021. if (flags & EXEC_OBJECT_WRITE)
  1022. reservation_object_add_excl_fence(resv, &req->fence);
  1023. else if (reservation_object_reserve_shared(resv) == 0)
  1024. reservation_object_add_shared_fence(resv, &req->fence);
  1025. ww_mutex_unlock(&resv->lock);
  1026. }
  1027. static void
  1028. i915_gem_execbuffer_move_to_active(struct list_head *vmas,
  1029. struct drm_i915_gem_request *req)
  1030. {
  1031. struct i915_vma *vma;
  1032. list_for_each_entry(vma, vmas, exec_list) {
  1033. struct drm_i915_gem_object *obj = vma->obj;
  1034. u32 old_read = obj->base.read_domains;
  1035. u32 old_write = obj->base.write_domain;
  1036. obj->base.write_domain = obj->base.pending_write_domain;
  1037. if (obj->base.write_domain)
  1038. vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
  1039. else
  1040. obj->base.pending_read_domains |= obj->base.read_domains;
  1041. obj->base.read_domains = obj->base.pending_read_domains;
  1042. i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
  1043. eb_export_fence(obj, req, vma->exec_entry->flags);
  1044. trace_i915_gem_object_change_domain(obj, old_read, old_write);
  1045. }
  1046. }
  1047. static int
  1048. i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
  1049. {
  1050. struct intel_ring *ring = req->ring;
  1051. int ret, i;
  1052. if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
  1053. DRM_DEBUG("sol reset is gen7/rcs only\n");
  1054. return -EINVAL;
  1055. }
  1056. ret = intel_ring_begin(req, 4 * 3);
  1057. if (ret)
  1058. return ret;
  1059. for (i = 0; i < 4; i++) {
  1060. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1061. intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
  1062. intel_ring_emit(ring, 0);
  1063. }
  1064. intel_ring_advance(ring);
  1065. return 0;
  1066. }
  1067. static struct i915_vma*
  1068. i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
  1069. struct drm_i915_gem_exec_object2 *shadow_exec_entry,
  1070. struct drm_i915_gem_object *batch_obj,
  1071. struct eb_vmas *eb,
  1072. u32 batch_start_offset,
  1073. u32 batch_len,
  1074. bool is_master)
  1075. {
  1076. struct drm_i915_gem_object *shadow_batch_obj;
  1077. struct i915_vma *vma;
  1078. int ret;
  1079. shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
  1080. PAGE_ALIGN(batch_len));
  1081. if (IS_ERR(shadow_batch_obj))
  1082. return ERR_CAST(shadow_batch_obj);
  1083. ret = intel_engine_cmd_parser(engine,
  1084. batch_obj,
  1085. shadow_batch_obj,
  1086. batch_start_offset,
  1087. batch_len,
  1088. is_master);
  1089. if (ret)
  1090. goto err;
  1091. ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
  1092. if (ret)
  1093. goto err;
  1094. i915_gem_object_unpin_pages(shadow_batch_obj);
  1095. memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
  1096. vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
  1097. vma->exec_entry = shadow_exec_entry;
  1098. vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
  1099. i915_gem_object_get(shadow_batch_obj);
  1100. list_add_tail(&vma->exec_list, &eb->vmas);
  1101. return vma;
  1102. err:
  1103. i915_gem_object_unpin_pages(shadow_batch_obj);
  1104. if (ret == -EACCES) /* unhandled chained batch */
  1105. return NULL;
  1106. else
  1107. return ERR_PTR(ret);
  1108. }
  1109. static int
  1110. execbuf_submit(struct i915_execbuffer_params *params,
  1111. struct drm_i915_gem_execbuffer2 *args,
  1112. struct list_head *vmas)
  1113. {
  1114. struct drm_i915_private *dev_priv = params->request->i915;
  1115. u64 exec_start, exec_len;
  1116. int instp_mode;
  1117. u32 instp_mask;
  1118. int ret;
  1119. ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
  1120. if (ret)
  1121. return ret;
  1122. ret = i915_switch_context(params->request);
  1123. if (ret)
  1124. return ret;
  1125. instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
  1126. instp_mask = I915_EXEC_CONSTANTS_MASK;
  1127. switch (instp_mode) {
  1128. case I915_EXEC_CONSTANTS_REL_GENERAL:
  1129. case I915_EXEC_CONSTANTS_ABSOLUTE:
  1130. case I915_EXEC_CONSTANTS_REL_SURFACE:
  1131. if (instp_mode != 0 && params->engine->id != RCS) {
  1132. DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
  1133. return -EINVAL;
  1134. }
  1135. if (instp_mode != dev_priv->relative_constants_mode) {
  1136. if (INTEL_INFO(dev_priv)->gen < 4) {
  1137. DRM_DEBUG("no rel constants on pre-gen4\n");
  1138. return -EINVAL;
  1139. }
  1140. if (INTEL_INFO(dev_priv)->gen > 5 &&
  1141. instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
  1142. DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
  1143. return -EINVAL;
  1144. }
  1145. /* The HW changed the meaning on this bit on gen6 */
  1146. if (INTEL_INFO(dev_priv)->gen >= 6)
  1147. instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
  1148. }
  1149. break;
  1150. default:
  1151. DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
  1152. return -EINVAL;
  1153. }
  1154. if (params->engine->id == RCS &&
  1155. instp_mode != dev_priv->relative_constants_mode) {
  1156. struct intel_ring *ring = params->request->ring;
  1157. ret = intel_ring_begin(params->request, 4);
  1158. if (ret)
  1159. return ret;
  1160. intel_ring_emit(ring, MI_NOOP);
  1161. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  1162. intel_ring_emit_reg(ring, INSTPM);
  1163. intel_ring_emit(ring, instp_mask << 16 | instp_mode);
  1164. intel_ring_advance(ring);
  1165. dev_priv->relative_constants_mode = instp_mode;
  1166. }
  1167. if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
  1168. ret = i915_reset_gen7_sol_offsets(params->request);
  1169. if (ret)
  1170. return ret;
  1171. }
  1172. exec_len = args->batch_len;
  1173. exec_start = params->batch->node.start +
  1174. params->args_batch_start_offset;
  1175. if (exec_len == 0)
  1176. exec_len = params->batch->size;
  1177. ret = params->engine->emit_bb_start(params->request,
  1178. exec_start, exec_len,
  1179. params->dispatch_flags);
  1180. if (ret)
  1181. return ret;
  1182. trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
  1183. i915_gem_execbuffer_move_to_active(vmas, params->request);
  1184. return 0;
  1185. }
  1186. /**
  1187. * Find one BSD ring to dispatch the corresponding BSD command.
  1188. * The engine index is returned.
  1189. */
  1190. static unsigned int
  1191. gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
  1192. struct drm_file *file)
  1193. {
  1194. struct drm_i915_file_private *file_priv = file->driver_priv;
  1195. /* Check whether the file_priv has already selected one ring. */
  1196. if ((int)file_priv->bsd_engine < 0) {
  1197. /* If not, use the ping-pong mechanism to select one. */
  1198. mutex_lock(&dev_priv->drm.struct_mutex);
  1199. file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
  1200. dev_priv->mm.bsd_engine_dispatch_index ^= 1;
  1201. mutex_unlock(&dev_priv->drm.struct_mutex);
  1202. }
  1203. return file_priv->bsd_engine;
  1204. }
  1205. #define I915_USER_RINGS (4)
  1206. static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
  1207. [I915_EXEC_DEFAULT] = RCS,
  1208. [I915_EXEC_RENDER] = RCS,
  1209. [I915_EXEC_BLT] = BCS,
  1210. [I915_EXEC_BSD] = VCS,
  1211. [I915_EXEC_VEBOX] = VECS
  1212. };
  1213. static struct intel_engine_cs *
  1214. eb_select_engine(struct drm_i915_private *dev_priv,
  1215. struct drm_file *file,
  1216. struct drm_i915_gem_execbuffer2 *args)
  1217. {
  1218. unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
  1219. struct intel_engine_cs *engine;
  1220. if (user_ring_id > I915_USER_RINGS) {
  1221. DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
  1222. return NULL;
  1223. }
  1224. if ((user_ring_id != I915_EXEC_BSD) &&
  1225. ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
  1226. DRM_DEBUG("execbuf with non bsd ring but with invalid "
  1227. "bsd dispatch flags: %d\n", (int)(args->flags));
  1228. return NULL;
  1229. }
  1230. if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
  1231. unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
  1232. if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
  1233. bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
  1234. } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
  1235. bsd_idx <= I915_EXEC_BSD_RING2) {
  1236. bsd_idx >>= I915_EXEC_BSD_SHIFT;
  1237. bsd_idx--;
  1238. } else {
  1239. DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
  1240. bsd_idx);
  1241. return NULL;
  1242. }
  1243. engine = &dev_priv->engine[_VCS(bsd_idx)];
  1244. } else {
  1245. engine = &dev_priv->engine[user_ring_map[user_ring_id]];
  1246. }
  1247. if (!intel_engine_initialized(engine)) {
  1248. DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
  1249. return NULL;
  1250. }
  1251. return engine;
  1252. }
  1253. static int
  1254. i915_gem_do_execbuffer(struct drm_device *dev, void *data,
  1255. struct drm_file *file,
  1256. struct drm_i915_gem_execbuffer2 *args,
  1257. struct drm_i915_gem_exec_object2 *exec)
  1258. {
  1259. struct drm_i915_private *dev_priv = to_i915(dev);
  1260. struct i915_ggtt *ggtt = &dev_priv->ggtt;
  1261. struct eb_vmas *eb;
  1262. struct drm_i915_gem_exec_object2 shadow_exec_entry;
  1263. struct intel_engine_cs *engine;
  1264. struct i915_gem_context *ctx;
  1265. struct i915_address_space *vm;
  1266. struct i915_execbuffer_params params_master; /* XXX: will be removed later */
  1267. struct i915_execbuffer_params *params = &params_master;
  1268. const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
  1269. u32 dispatch_flags;
  1270. int ret;
  1271. bool need_relocs;
  1272. if (!i915_gem_check_execbuffer(args))
  1273. return -EINVAL;
  1274. ret = validate_exec_list(dev, exec, args->buffer_count);
  1275. if (ret)
  1276. return ret;
  1277. dispatch_flags = 0;
  1278. if (args->flags & I915_EXEC_SECURE) {
  1279. if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
  1280. return -EPERM;
  1281. dispatch_flags |= I915_DISPATCH_SECURE;
  1282. }
  1283. if (args->flags & I915_EXEC_IS_PINNED)
  1284. dispatch_flags |= I915_DISPATCH_PINNED;
  1285. engine = eb_select_engine(dev_priv, file, args);
  1286. if (!engine)
  1287. return -EINVAL;
  1288. if (args->buffer_count < 1) {
  1289. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1290. return -EINVAL;
  1291. }
  1292. if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
  1293. if (!HAS_RESOURCE_STREAMER(dev)) {
  1294. DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
  1295. return -EINVAL;
  1296. }
  1297. if (engine->id != RCS) {
  1298. DRM_DEBUG("RS is not available on %s\n",
  1299. engine->name);
  1300. return -EINVAL;
  1301. }
  1302. dispatch_flags |= I915_DISPATCH_RS;
  1303. }
  1304. /* Take a local wakeref for preparing to dispatch the execbuf as
  1305. * we expect to access the hardware fairly frequently in the
  1306. * process. Upon first dispatch, we acquire another prolonged
  1307. * wakeref that we hold until the GPU has been idle for at least
  1308. * 100ms.
  1309. */
  1310. intel_runtime_pm_get(dev_priv);
  1311. ret = i915_mutex_lock_interruptible(dev);
  1312. if (ret)
  1313. goto pre_mutex_err;
  1314. ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
  1315. if (IS_ERR(ctx)) {
  1316. mutex_unlock(&dev->struct_mutex);
  1317. ret = PTR_ERR(ctx);
  1318. goto pre_mutex_err;
  1319. }
  1320. i915_gem_context_get(ctx);
  1321. if (ctx->ppgtt)
  1322. vm = &ctx->ppgtt->base;
  1323. else
  1324. vm = &ggtt->base;
  1325. memset(&params_master, 0x00, sizeof(params_master));
  1326. eb = eb_create(args);
  1327. if (eb == NULL) {
  1328. i915_gem_context_put(ctx);
  1329. mutex_unlock(&dev->struct_mutex);
  1330. ret = -ENOMEM;
  1331. goto pre_mutex_err;
  1332. }
  1333. /* Look up object handles */
  1334. ret = eb_lookup_vmas(eb, exec, args, vm, file);
  1335. if (ret)
  1336. goto err;
  1337. /* take note of the batch buffer before we might reorder the lists */
  1338. params->batch = eb_get_batch(eb);
  1339. /* Move the objects en-masse into the GTT, evicting if necessary. */
  1340. need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
  1341. ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
  1342. &need_relocs);
  1343. if (ret)
  1344. goto err;
  1345. /* The objects are in their final locations, apply the relocations. */
  1346. if (need_relocs)
  1347. ret = i915_gem_execbuffer_relocate(eb);
  1348. if (ret) {
  1349. if (ret == -EFAULT) {
  1350. ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
  1351. engine,
  1352. eb, exec, ctx);
  1353. BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1354. }
  1355. if (ret)
  1356. goto err;
  1357. }
  1358. /* Set the pending read domains for the batch buffer to COMMAND */
  1359. if (params->batch->obj->base.pending_write_domain) {
  1360. DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
  1361. ret = -EINVAL;
  1362. goto err;
  1363. }
  1364. params->args_batch_start_offset = args->batch_start_offset;
  1365. if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
  1366. struct i915_vma *vma;
  1367. vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
  1368. params->batch->obj,
  1369. eb,
  1370. args->batch_start_offset,
  1371. args->batch_len,
  1372. drm_is_current_master(file));
  1373. if (IS_ERR(vma)) {
  1374. ret = PTR_ERR(vma);
  1375. goto err;
  1376. }
  1377. if (vma) {
  1378. /*
  1379. * Batch parsed and accepted:
  1380. *
  1381. * Set the DISPATCH_SECURE bit to remove the NON_SECURE
  1382. * bit from MI_BATCH_BUFFER_START commands issued in
  1383. * the dispatch_execbuffer implementations. We
  1384. * specifically don't want that set on batches the
  1385. * command parser has accepted.
  1386. */
  1387. dispatch_flags |= I915_DISPATCH_SECURE;
  1388. params->args_batch_start_offset = 0;
  1389. params->batch = vma;
  1390. }
  1391. }
  1392. params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
  1393. /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
  1394. * batch" bit. Hence we need to pin secure batches into the global gtt.
  1395. * hsw should have this fixed, but bdw mucks it up again. */
  1396. if (dispatch_flags & I915_DISPATCH_SECURE) {
  1397. struct drm_i915_gem_object *obj = params->batch->obj;
  1398. /*
  1399. * So on first glance it looks freaky that we pin the batch here
  1400. * outside of the reservation loop. But:
  1401. * - The batch is already pinned into the relevant ppgtt, so we
  1402. * already have the backing storage fully allocated.
  1403. * - No other BO uses the global gtt (well contexts, but meh),
  1404. * so we don't really have issues with multiple objects not
  1405. * fitting due to fragmentation.
  1406. * So this is actually safe.
  1407. */
  1408. ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
  1409. if (ret)
  1410. goto err;
  1411. params->batch = i915_gem_obj_to_ggtt(obj);
  1412. }
  1413. /* Allocate a request for this batch buffer nice and early. */
  1414. params->request = i915_gem_request_alloc(engine, ctx);
  1415. if (IS_ERR(params->request)) {
  1416. ret = PTR_ERR(params->request);
  1417. goto err_batch_unpin;
  1418. }
  1419. /* Whilst this request exists, batch_obj will be on the
  1420. * active_list, and so will hold the active reference. Only when this
  1421. * request is retired will the the batch_obj be moved onto the
  1422. * inactive_list and lose its active reference. Hence we do not need
  1423. * to explicitly hold another reference here.
  1424. */
  1425. params->request->batch_obj = params->batch->obj;
  1426. ret = i915_gem_request_add_to_client(params->request, file);
  1427. if (ret)
  1428. goto err_request;
  1429. /*
  1430. * Save assorted stuff away to pass through to *_submission().
  1431. * NB: This data should be 'persistent' and not local as it will
  1432. * kept around beyond the duration of the IOCTL once the GPU
  1433. * scheduler arrives.
  1434. */
  1435. params->dev = dev;
  1436. params->file = file;
  1437. params->engine = engine;
  1438. params->dispatch_flags = dispatch_flags;
  1439. params->ctx = ctx;
  1440. ret = execbuf_submit(params, args, &eb->vmas);
  1441. err_request:
  1442. __i915_add_request(params->request, ret == 0);
  1443. err_batch_unpin:
  1444. /*
  1445. * FIXME: We crucially rely upon the active tracking for the (ppgtt)
  1446. * batch vma for correctness. For less ugly and less fragility this
  1447. * needs to be adjusted to also track the ggtt batch vma properly as
  1448. * active.
  1449. */
  1450. if (dispatch_flags & I915_DISPATCH_SECURE)
  1451. i915_vma_unpin(params->batch);
  1452. err:
  1453. /* the request owns the ref now */
  1454. i915_gem_context_put(ctx);
  1455. eb_destroy(eb);
  1456. mutex_unlock(&dev->struct_mutex);
  1457. pre_mutex_err:
  1458. /* intel_gpu_busy should also get a ref, so it will free when the device
  1459. * is really idle. */
  1460. intel_runtime_pm_put(dev_priv);
  1461. return ret;
  1462. }
  1463. /*
  1464. * Legacy execbuffer just creates an exec2 list from the original exec object
  1465. * list array and passes it to the real function.
  1466. */
  1467. int
  1468. i915_gem_execbuffer(struct drm_device *dev, void *data,
  1469. struct drm_file *file)
  1470. {
  1471. struct drm_i915_gem_execbuffer *args = data;
  1472. struct drm_i915_gem_execbuffer2 exec2;
  1473. struct drm_i915_gem_exec_object *exec_list = NULL;
  1474. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1475. int ret, i;
  1476. if (args->buffer_count < 1) {
  1477. DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
  1478. return -EINVAL;
  1479. }
  1480. /* Copy in the exec list from userland */
  1481. exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
  1482. exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
  1483. if (exec_list == NULL || exec2_list == NULL) {
  1484. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1485. args->buffer_count);
  1486. drm_free_large(exec_list);
  1487. drm_free_large(exec2_list);
  1488. return -ENOMEM;
  1489. }
  1490. ret = copy_from_user(exec_list,
  1491. u64_to_user_ptr(args->buffers_ptr),
  1492. sizeof(*exec_list) * args->buffer_count);
  1493. if (ret != 0) {
  1494. DRM_DEBUG("copy %d exec entries failed %d\n",
  1495. args->buffer_count, ret);
  1496. drm_free_large(exec_list);
  1497. drm_free_large(exec2_list);
  1498. return -EFAULT;
  1499. }
  1500. for (i = 0; i < args->buffer_count; i++) {
  1501. exec2_list[i].handle = exec_list[i].handle;
  1502. exec2_list[i].relocation_count = exec_list[i].relocation_count;
  1503. exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
  1504. exec2_list[i].alignment = exec_list[i].alignment;
  1505. exec2_list[i].offset = exec_list[i].offset;
  1506. if (INTEL_INFO(dev)->gen < 4)
  1507. exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
  1508. else
  1509. exec2_list[i].flags = 0;
  1510. }
  1511. exec2.buffers_ptr = args->buffers_ptr;
  1512. exec2.buffer_count = args->buffer_count;
  1513. exec2.batch_start_offset = args->batch_start_offset;
  1514. exec2.batch_len = args->batch_len;
  1515. exec2.DR1 = args->DR1;
  1516. exec2.DR4 = args->DR4;
  1517. exec2.num_cliprects = args->num_cliprects;
  1518. exec2.cliprects_ptr = args->cliprects_ptr;
  1519. exec2.flags = I915_EXEC_RENDER;
  1520. i915_execbuffer2_set_context_id(exec2, 0);
  1521. ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
  1522. if (!ret) {
  1523. struct drm_i915_gem_exec_object __user *user_exec_list =
  1524. u64_to_user_ptr(args->buffers_ptr);
  1525. /* Copy the new buffer offsets back to the user's exec list. */
  1526. for (i = 0; i < args->buffer_count; i++) {
  1527. exec2_list[i].offset =
  1528. gen8_canonical_addr(exec2_list[i].offset);
  1529. ret = __copy_to_user(&user_exec_list[i].offset,
  1530. &exec2_list[i].offset,
  1531. sizeof(user_exec_list[i].offset));
  1532. if (ret) {
  1533. ret = -EFAULT;
  1534. DRM_DEBUG("failed to copy %d exec entries "
  1535. "back to user (%d)\n",
  1536. args->buffer_count, ret);
  1537. break;
  1538. }
  1539. }
  1540. }
  1541. drm_free_large(exec_list);
  1542. drm_free_large(exec2_list);
  1543. return ret;
  1544. }
  1545. int
  1546. i915_gem_execbuffer2(struct drm_device *dev, void *data,
  1547. struct drm_file *file)
  1548. {
  1549. struct drm_i915_gem_execbuffer2 *args = data;
  1550. struct drm_i915_gem_exec_object2 *exec2_list = NULL;
  1551. int ret;
  1552. if (args->buffer_count < 1 ||
  1553. args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
  1554. DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
  1555. return -EINVAL;
  1556. }
  1557. if (args->rsvd2 != 0) {
  1558. DRM_DEBUG("dirty rvsd2 field\n");
  1559. return -EINVAL;
  1560. }
  1561. exec2_list = drm_malloc_gfp(args->buffer_count,
  1562. sizeof(*exec2_list),
  1563. GFP_TEMPORARY);
  1564. if (exec2_list == NULL) {
  1565. DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
  1566. args->buffer_count);
  1567. return -ENOMEM;
  1568. }
  1569. ret = copy_from_user(exec2_list,
  1570. u64_to_user_ptr(args->buffers_ptr),
  1571. sizeof(*exec2_list) * args->buffer_count);
  1572. if (ret != 0) {
  1573. DRM_DEBUG("copy %d exec entries failed %d\n",
  1574. args->buffer_count, ret);
  1575. drm_free_large(exec2_list);
  1576. return -EFAULT;
  1577. }
  1578. ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
  1579. if (!ret) {
  1580. /* Copy the new buffer offsets back to the user's exec list. */
  1581. struct drm_i915_gem_exec_object2 __user *user_exec_list =
  1582. u64_to_user_ptr(args->buffers_ptr);
  1583. int i;
  1584. for (i = 0; i < args->buffer_count; i++) {
  1585. exec2_list[i].offset =
  1586. gen8_canonical_addr(exec2_list[i].offset);
  1587. ret = __copy_to_user(&user_exec_list[i].offset,
  1588. &exec2_list[i].offset,
  1589. sizeof(user_exec_list[i].offset));
  1590. if (ret) {
  1591. ret = -EFAULT;
  1592. DRM_DEBUG("failed to copy %d exec entries "
  1593. "back to user\n",
  1594. args->buffer_count);
  1595. break;
  1596. }
  1597. }
  1598. }
  1599. drm_free_large(exec2_list);
  1600. return ret;
  1601. }