amdgpu_gem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/ktime.h>
  29. #include <drm/drmP.h>
  30. #include <drm/amdgpu_drm.h>
  31. #include "amdgpu.h"
  32. void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  33. {
  34. struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  35. if (robj) {
  36. if (robj->gem_base.import_attach)
  37. drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
  38. amdgpu_mn_unregister(robj);
  39. amdgpu_bo_unref(&robj);
  40. }
  41. }
  42. int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  43. int alignment, u32 initial_domain,
  44. u64 flags, bool kernel,
  45. struct drm_gem_object **obj)
  46. {
  47. struct amdgpu_bo *robj;
  48. unsigned long max_size;
  49. int r;
  50. *obj = NULL;
  51. /* At least align on page size */
  52. if (alignment < PAGE_SIZE) {
  53. alignment = PAGE_SIZE;
  54. }
  55. if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
  56. /* Maximum bo size is the unpinned gtt size since we use the gtt to
  57. * handle vram to system pool migrations.
  58. */
  59. max_size = adev->mc.gtt_size - adev->gart_pin_size;
  60. if (size > max_size) {
  61. DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
  62. size >> 20, max_size >> 20);
  63. return -ENOMEM;
  64. }
  65. }
  66. retry:
  67. r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
  68. flags, NULL, NULL, &robj);
  69. if (r) {
  70. if (r != -ERESTARTSYS) {
  71. if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
  72. initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
  73. goto retry;
  74. }
  75. DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
  76. size, initial_domain, alignment, r);
  77. }
  78. return r;
  79. }
  80. *obj = &robj->gem_base;
  81. return 0;
  82. }
  83. void amdgpu_gem_force_release(struct amdgpu_device *adev)
  84. {
  85. struct drm_device *ddev = adev->ddev;
  86. struct drm_file *file;
  87. mutex_lock(&ddev->struct_mutex);
  88. list_for_each_entry(file, &ddev->filelist, lhead) {
  89. struct drm_gem_object *gobj;
  90. int handle;
  91. WARN_ONCE(1, "Still active user space clients!\n");
  92. spin_lock(&file->table_lock);
  93. idr_for_each_entry(&file->object_idr, gobj, handle) {
  94. WARN_ONCE(1, "And also active allocations!\n");
  95. drm_gem_object_unreference(gobj);
  96. }
  97. idr_destroy(&file->object_idr);
  98. spin_unlock(&file->table_lock);
  99. }
  100. mutex_unlock(&ddev->struct_mutex);
  101. }
  102. /*
  103. * Call from drm_gem_handle_create which appear in both new and open ioctl
  104. * case.
  105. */
  106. int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
  107. {
  108. struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
  109. struct amdgpu_device *adev = rbo->adev;
  110. struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
  111. struct amdgpu_vm *vm = &fpriv->vm;
  112. struct amdgpu_bo_va *bo_va;
  113. int r;
  114. r = amdgpu_bo_reserve(rbo, false);
  115. if (r)
  116. return r;
  117. bo_va = amdgpu_vm_bo_find(vm, rbo);
  118. if (!bo_va) {
  119. bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
  120. } else {
  121. ++bo_va->ref_count;
  122. }
  123. amdgpu_bo_unreserve(rbo);
  124. return 0;
  125. }
  126. void amdgpu_gem_object_close(struct drm_gem_object *obj,
  127. struct drm_file *file_priv)
  128. {
  129. struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
  130. struct amdgpu_device *adev = rbo->adev;
  131. struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
  132. struct amdgpu_vm *vm = &fpriv->vm;
  133. struct amdgpu_bo_va *bo_va;
  134. int r;
  135. r = amdgpu_bo_reserve(rbo, true);
  136. if (r) {
  137. dev_err(adev->dev, "leaking bo va because "
  138. "we fail to reserve bo (%d)\n", r);
  139. return;
  140. }
  141. bo_va = amdgpu_vm_bo_find(vm, rbo);
  142. if (bo_va) {
  143. if (--bo_va->ref_count == 0) {
  144. amdgpu_vm_bo_rmv(adev, bo_va);
  145. }
  146. }
  147. amdgpu_bo_unreserve(rbo);
  148. }
  149. static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
  150. {
  151. if (r == -EDEADLK) {
  152. r = amdgpu_gpu_reset(adev);
  153. if (!r)
  154. r = -EAGAIN;
  155. }
  156. return r;
  157. }
  158. /*
  159. * GEM ioctls.
  160. */
  161. int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
  162. struct drm_file *filp)
  163. {
  164. struct amdgpu_device *adev = dev->dev_private;
  165. union drm_amdgpu_gem_create *args = data;
  166. uint64_t size = args->in.bo_size;
  167. struct drm_gem_object *gobj;
  168. uint32_t handle;
  169. bool kernel = false;
  170. int r;
  171. /* create a gem object to contain this object in */
  172. if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
  173. AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
  174. kernel = true;
  175. if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
  176. size = size << AMDGPU_GDS_SHIFT;
  177. else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
  178. size = size << AMDGPU_GWS_SHIFT;
  179. else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
  180. size = size << AMDGPU_OA_SHIFT;
  181. else {
  182. r = -EINVAL;
  183. goto error_unlock;
  184. }
  185. }
  186. size = roundup(size, PAGE_SIZE);
  187. r = amdgpu_gem_object_create(adev, size, args->in.alignment,
  188. (u32)(0xffffffff & args->in.domains),
  189. args->in.domain_flags,
  190. kernel, &gobj);
  191. if (r)
  192. goto error_unlock;
  193. r = drm_gem_handle_create(filp, gobj, &handle);
  194. /* drop reference from allocate - handle holds it now */
  195. drm_gem_object_unreference_unlocked(gobj);
  196. if (r)
  197. goto error_unlock;
  198. memset(args, 0, sizeof(*args));
  199. args->out.handle = handle;
  200. return 0;
  201. error_unlock:
  202. r = amdgpu_gem_handle_lockup(adev, r);
  203. return r;
  204. }
  205. int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
  206. struct drm_file *filp)
  207. {
  208. struct amdgpu_device *adev = dev->dev_private;
  209. struct drm_amdgpu_gem_userptr *args = data;
  210. struct drm_gem_object *gobj;
  211. struct amdgpu_bo *bo;
  212. uint32_t handle;
  213. int r;
  214. if (offset_in_page(args->addr | args->size))
  215. return -EINVAL;
  216. /* reject unknown flag values */
  217. if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
  218. AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
  219. AMDGPU_GEM_USERPTR_REGISTER))
  220. return -EINVAL;
  221. if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
  222. !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
  223. !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
  224. /* if we want to write to it we must require anonymous
  225. memory and install a MMU notifier */
  226. return -EACCES;
  227. }
  228. /* create a gem object to contain this object in */
  229. r = amdgpu_gem_object_create(adev, args->size, 0,
  230. AMDGPU_GEM_DOMAIN_CPU, 0,
  231. 0, &gobj);
  232. if (r)
  233. goto handle_lockup;
  234. bo = gem_to_amdgpu_bo(gobj);
  235. bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
  236. bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
  237. r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
  238. if (r)
  239. goto release_object;
  240. if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
  241. r = amdgpu_mn_register(bo, args->addr);
  242. if (r)
  243. goto release_object;
  244. }
  245. if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
  246. down_read(&current->mm->mmap_sem);
  247. r = amdgpu_bo_reserve(bo, true);
  248. if (r) {
  249. up_read(&current->mm->mmap_sem);
  250. goto release_object;
  251. }
  252. amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
  253. r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
  254. amdgpu_bo_unreserve(bo);
  255. up_read(&current->mm->mmap_sem);
  256. if (r)
  257. goto release_object;
  258. }
  259. r = drm_gem_handle_create(filp, gobj, &handle);
  260. /* drop reference from allocate - handle holds it now */
  261. drm_gem_object_unreference_unlocked(gobj);
  262. if (r)
  263. goto handle_lockup;
  264. args->handle = handle;
  265. return 0;
  266. release_object:
  267. drm_gem_object_unreference_unlocked(gobj);
  268. handle_lockup:
  269. r = amdgpu_gem_handle_lockup(adev, r);
  270. return r;
  271. }
  272. int amdgpu_mode_dumb_mmap(struct drm_file *filp,
  273. struct drm_device *dev,
  274. uint32_t handle, uint64_t *offset_p)
  275. {
  276. struct drm_gem_object *gobj;
  277. struct amdgpu_bo *robj;
  278. gobj = drm_gem_object_lookup(dev, filp, handle);
  279. if (gobj == NULL) {
  280. return -ENOENT;
  281. }
  282. robj = gem_to_amdgpu_bo(gobj);
  283. if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
  284. (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
  285. drm_gem_object_unreference_unlocked(gobj);
  286. return -EPERM;
  287. }
  288. *offset_p = amdgpu_bo_mmap_offset(robj);
  289. drm_gem_object_unreference_unlocked(gobj);
  290. return 0;
  291. }
  292. int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
  293. struct drm_file *filp)
  294. {
  295. union drm_amdgpu_gem_mmap *args = data;
  296. uint32_t handle = args->in.handle;
  297. memset(args, 0, sizeof(*args));
  298. return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
  299. }
  300. /**
  301. * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
  302. *
  303. * @timeout_ns: timeout in ns
  304. *
  305. * Calculate the timeout in jiffies from an absolute timeout in ns.
  306. */
  307. unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
  308. {
  309. unsigned long timeout_jiffies;
  310. ktime_t timeout;
  311. /* clamp timeout if it's to large */
  312. if (((int64_t)timeout_ns) < 0)
  313. return MAX_SCHEDULE_TIMEOUT;
  314. timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
  315. if (ktime_to_ns(timeout) < 0)
  316. return 0;
  317. timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
  318. /* clamp timeout to avoid unsigned-> signed overflow */
  319. if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
  320. return MAX_SCHEDULE_TIMEOUT - 1;
  321. return timeout_jiffies;
  322. }
  323. int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  324. struct drm_file *filp)
  325. {
  326. struct amdgpu_device *adev = dev->dev_private;
  327. union drm_amdgpu_gem_wait_idle *args = data;
  328. struct drm_gem_object *gobj;
  329. struct amdgpu_bo *robj;
  330. uint32_t handle = args->in.handle;
  331. unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
  332. int r = 0;
  333. long ret;
  334. gobj = drm_gem_object_lookup(dev, filp, handle);
  335. if (gobj == NULL) {
  336. return -ENOENT;
  337. }
  338. robj = gem_to_amdgpu_bo(gobj);
  339. if (timeout == 0)
  340. ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
  341. else
  342. ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
  343. /* ret == 0 means not signaled,
  344. * ret > 0 means signaled
  345. * ret < 0 means interrupted before timeout
  346. */
  347. if (ret >= 0) {
  348. memset(args, 0, sizeof(*args));
  349. args->out.status = (ret == 0);
  350. } else
  351. r = ret;
  352. drm_gem_object_unreference_unlocked(gobj);
  353. r = amdgpu_gem_handle_lockup(adev, r);
  354. return r;
  355. }
  356. int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  357. struct drm_file *filp)
  358. {
  359. struct drm_amdgpu_gem_metadata *args = data;
  360. struct drm_gem_object *gobj;
  361. struct amdgpu_bo *robj;
  362. int r = -1;
  363. DRM_DEBUG("%d \n", args->handle);
  364. gobj = drm_gem_object_lookup(dev, filp, args->handle);
  365. if (gobj == NULL)
  366. return -ENOENT;
  367. robj = gem_to_amdgpu_bo(gobj);
  368. r = amdgpu_bo_reserve(robj, false);
  369. if (unlikely(r != 0))
  370. goto out;
  371. if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
  372. amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
  373. r = amdgpu_bo_get_metadata(robj, args->data.data,
  374. sizeof(args->data.data),
  375. &args->data.data_size_bytes,
  376. &args->data.flags);
  377. } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
  378. if (args->data.data_size_bytes > sizeof(args->data.data)) {
  379. r = -EINVAL;
  380. goto unreserve;
  381. }
  382. r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
  383. if (!r)
  384. r = amdgpu_bo_set_metadata(robj, args->data.data,
  385. args->data.data_size_bytes,
  386. args->data.flags);
  387. }
  388. unreserve:
  389. amdgpu_bo_unreserve(robj);
  390. out:
  391. drm_gem_object_unreference_unlocked(gobj);
  392. return r;
  393. }
  394. /**
  395. * amdgpu_gem_va_update_vm -update the bo_va in its VM
  396. *
  397. * @adev: amdgpu_device pointer
  398. * @bo_va: bo_va to update
  399. *
  400. * Update the bo_va directly after setting it's address. Errors are not
  401. * vital here, so they are not reported back to userspace.
  402. */
  403. static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
  404. struct amdgpu_bo_va *bo_va, uint32_t operation)
  405. {
  406. struct ttm_validate_buffer tv, *entry;
  407. struct amdgpu_bo_list_entry vm_pd;
  408. struct ww_acquire_ctx ticket;
  409. struct list_head list, duplicates;
  410. unsigned domain;
  411. int r;
  412. INIT_LIST_HEAD(&list);
  413. INIT_LIST_HEAD(&duplicates);
  414. tv.bo = &bo_va->bo->tbo;
  415. tv.shared = true;
  416. list_add(&tv.head, &list);
  417. amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
  418. /* Provide duplicates to avoid -EALREADY */
  419. r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
  420. if (r)
  421. goto error_print;
  422. amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
  423. list_for_each_entry(entry, &list, head) {
  424. domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
  425. /* if anything is swapped out don't swap it in here,
  426. just abort and wait for the next CS */
  427. if (domain == AMDGPU_GEM_DOMAIN_CPU)
  428. goto error_unreserve;
  429. }
  430. list_for_each_entry(entry, &duplicates, head) {
  431. domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
  432. /* if anything is swapped out don't swap it in here,
  433. just abort and wait for the next CS */
  434. if (domain == AMDGPU_GEM_DOMAIN_CPU)
  435. goto error_unreserve;
  436. }
  437. r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
  438. if (r)
  439. goto error_unreserve;
  440. r = amdgpu_vm_clear_freed(adev, bo_va->vm);
  441. if (r)
  442. goto error_unreserve;
  443. if (operation == AMDGPU_VA_OP_MAP)
  444. r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
  445. error_unreserve:
  446. ttm_eu_backoff_reservation(&ticket, &list);
  447. error_print:
  448. if (r && r != -ERESTARTSYS)
  449. DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
  450. }
  451. int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
  452. struct drm_file *filp)
  453. {
  454. struct drm_amdgpu_gem_va *args = data;
  455. struct drm_gem_object *gobj;
  456. struct amdgpu_device *adev = dev->dev_private;
  457. struct amdgpu_fpriv *fpriv = filp->driver_priv;
  458. struct amdgpu_bo *rbo;
  459. struct amdgpu_bo_va *bo_va;
  460. struct ttm_validate_buffer tv, tv_pd;
  461. struct ww_acquire_ctx ticket;
  462. struct list_head list, duplicates;
  463. uint32_t invalid_flags, va_flags = 0;
  464. int r = 0;
  465. if (!adev->vm_manager.enabled)
  466. return -ENOTTY;
  467. if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
  468. dev_err(&dev->pdev->dev,
  469. "va_address 0x%lX is in reserved area 0x%X\n",
  470. (unsigned long)args->va_address,
  471. AMDGPU_VA_RESERVED_SIZE);
  472. return -EINVAL;
  473. }
  474. invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
  475. AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
  476. if ((args->flags & invalid_flags)) {
  477. dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
  478. args->flags, invalid_flags);
  479. return -EINVAL;
  480. }
  481. switch (args->operation) {
  482. case AMDGPU_VA_OP_MAP:
  483. case AMDGPU_VA_OP_UNMAP:
  484. break;
  485. default:
  486. dev_err(&dev->pdev->dev, "unsupported operation %d\n",
  487. args->operation);
  488. return -EINVAL;
  489. }
  490. gobj = drm_gem_object_lookup(dev, filp, args->handle);
  491. if (gobj == NULL)
  492. return -ENOENT;
  493. rbo = gem_to_amdgpu_bo(gobj);
  494. INIT_LIST_HEAD(&list);
  495. INIT_LIST_HEAD(&duplicates);
  496. tv.bo = &rbo->tbo;
  497. tv.shared = true;
  498. list_add(&tv.head, &list);
  499. if (args->operation == AMDGPU_VA_OP_MAP) {
  500. tv_pd.bo = &fpriv->vm.page_directory->tbo;
  501. tv_pd.shared = true;
  502. list_add(&tv_pd.head, &list);
  503. }
  504. r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
  505. if (r) {
  506. drm_gem_object_unreference_unlocked(gobj);
  507. return r;
  508. }
  509. bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
  510. if (!bo_va) {
  511. ttm_eu_backoff_reservation(&ticket, &list);
  512. drm_gem_object_unreference_unlocked(gobj);
  513. return -ENOENT;
  514. }
  515. switch (args->operation) {
  516. case AMDGPU_VA_OP_MAP:
  517. if (args->flags & AMDGPU_VM_PAGE_READABLE)
  518. va_flags |= AMDGPU_PTE_READABLE;
  519. if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
  520. va_flags |= AMDGPU_PTE_WRITEABLE;
  521. if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
  522. va_flags |= AMDGPU_PTE_EXECUTABLE;
  523. r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
  524. args->offset_in_bo, args->map_size,
  525. va_flags);
  526. break;
  527. case AMDGPU_VA_OP_UNMAP:
  528. r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
  529. break;
  530. default:
  531. break;
  532. }
  533. ttm_eu_backoff_reservation(&ticket, &list);
  534. if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
  535. amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
  536. drm_gem_object_unreference_unlocked(gobj);
  537. return r;
  538. }
  539. int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
  540. struct drm_file *filp)
  541. {
  542. struct drm_amdgpu_gem_op *args = data;
  543. struct drm_gem_object *gobj;
  544. struct amdgpu_bo *robj;
  545. int r;
  546. gobj = drm_gem_object_lookup(dev, filp, args->handle);
  547. if (gobj == NULL) {
  548. return -ENOENT;
  549. }
  550. robj = gem_to_amdgpu_bo(gobj);
  551. r = amdgpu_bo_reserve(robj, false);
  552. if (unlikely(r))
  553. goto out;
  554. switch (args->op) {
  555. case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
  556. struct drm_amdgpu_gem_create_in info;
  557. void __user *out = (void __user *)(long)args->value;
  558. info.bo_size = robj->gem_base.size;
  559. info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
  560. info.domains = robj->prefered_domains;
  561. info.domain_flags = robj->flags;
  562. amdgpu_bo_unreserve(robj);
  563. if (copy_to_user(out, &info, sizeof(info)))
  564. r = -EFAULT;
  565. break;
  566. }
  567. case AMDGPU_GEM_OP_SET_PLACEMENT:
  568. if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
  569. r = -EPERM;
  570. amdgpu_bo_unreserve(robj);
  571. break;
  572. }
  573. robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
  574. AMDGPU_GEM_DOMAIN_GTT |
  575. AMDGPU_GEM_DOMAIN_CPU);
  576. robj->allowed_domains = robj->prefered_domains;
  577. if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
  578. robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
  579. amdgpu_bo_unreserve(robj);
  580. break;
  581. default:
  582. amdgpu_bo_unreserve(robj);
  583. r = -EINVAL;
  584. }
  585. out:
  586. drm_gem_object_unreference_unlocked(gobj);
  587. return r;
  588. }
  589. int amdgpu_mode_dumb_create(struct drm_file *file_priv,
  590. struct drm_device *dev,
  591. struct drm_mode_create_dumb *args)
  592. {
  593. struct amdgpu_device *adev = dev->dev_private;
  594. struct drm_gem_object *gobj;
  595. uint32_t handle;
  596. int r;
  597. args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
  598. args->size = (u64)args->pitch * args->height;
  599. args->size = ALIGN(args->size, PAGE_SIZE);
  600. r = amdgpu_gem_object_create(adev, args->size, 0,
  601. AMDGPU_GEM_DOMAIN_VRAM,
  602. AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
  603. ttm_bo_type_device,
  604. &gobj);
  605. if (r)
  606. return -ENOMEM;
  607. r = drm_gem_handle_create(file_priv, gobj, &handle);
  608. /* drop reference from allocate - handle holds it now */
  609. drm_gem_object_unreference_unlocked(gobj);
  610. if (r) {
  611. return r;
  612. }
  613. args->handle = handle;
  614. return 0;
  615. }
  616. #if defined(CONFIG_DEBUG_FS)
  617. static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
  618. {
  619. struct drm_gem_object *gobj = ptr;
  620. struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
  621. struct seq_file *m = data;
  622. unsigned domain;
  623. const char *placement;
  624. unsigned pin_count;
  625. domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
  626. switch (domain) {
  627. case AMDGPU_GEM_DOMAIN_VRAM:
  628. placement = "VRAM";
  629. break;
  630. case AMDGPU_GEM_DOMAIN_GTT:
  631. placement = " GTT";
  632. break;
  633. case AMDGPU_GEM_DOMAIN_CPU:
  634. default:
  635. placement = " CPU";
  636. break;
  637. }
  638. seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
  639. id, amdgpu_bo_size(bo), placement,
  640. amdgpu_bo_gpu_offset(bo));
  641. pin_count = ACCESS_ONCE(bo->pin_count);
  642. if (pin_count)
  643. seq_printf(m, " pin count %d", pin_count);
  644. seq_printf(m, "\n");
  645. return 0;
  646. }
  647. static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
  648. {
  649. struct drm_info_node *node = (struct drm_info_node *)m->private;
  650. struct drm_device *dev = node->minor->dev;
  651. struct drm_file *file;
  652. int r;
  653. r = mutex_lock_interruptible(&dev->struct_mutex);
  654. if (r)
  655. return r;
  656. list_for_each_entry(file, &dev->filelist, lhead) {
  657. struct task_struct *task;
  658. /*
  659. * Although we have a valid reference on file->pid, that does
  660. * not guarantee that the task_struct who called get_pid() is
  661. * still alive (e.g. get_pid(current) => fork() => exit()).
  662. * Therefore, we need to protect this ->comm access using RCU.
  663. */
  664. rcu_read_lock();
  665. task = pid_task(file->pid, PIDTYPE_PID);
  666. seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
  667. task ? task->comm : "<unknown>");
  668. rcu_read_unlock();
  669. spin_lock(&file->table_lock);
  670. idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
  671. spin_unlock(&file->table_lock);
  672. }
  673. mutex_unlock(&dev->struct_mutex);
  674. return 0;
  675. }
  676. static struct drm_info_list amdgpu_debugfs_gem_list[] = {
  677. {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
  678. };
  679. #endif
  680. int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
  681. {
  682. #if defined(CONFIG_DEBUG_FS)
  683. return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
  684. #endif
  685. return 0;
  686. }