msm_gem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/spinlock.h>
  18. #include <linux/shmem_fs.h>
  19. #include <linux/dma-buf.h>
  20. #include <linux/pfn_t.h>
  21. #include "msm_drv.h"
  22. #include "msm_fence.h"
  23. #include "msm_gem.h"
  24. #include "msm_gpu.h"
  25. #include "msm_mmu.h"
  26. static dma_addr_t physaddr(struct drm_gem_object *obj)
  27. {
  28. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  29. struct msm_drm_private *priv = obj->dev->dev_private;
  30. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  31. priv->vram.paddr;
  32. }
  33. static bool use_pages(struct drm_gem_object *obj)
  34. {
  35. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  36. return !msm_obj->vram_node;
  37. }
  38. /* allocate pages from VRAM carveout, used when no IOMMU: */
  39. static struct page **get_pages_vram(struct drm_gem_object *obj,
  40. int npages)
  41. {
  42. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  43. struct msm_drm_private *priv = obj->dev->dev_private;
  44. dma_addr_t paddr;
  45. struct page **p;
  46. int ret, i;
  47. p = drm_malloc_ab(npages, sizeof(struct page *));
  48. if (!p)
  49. return ERR_PTR(-ENOMEM);
  50. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  51. if (ret) {
  52. drm_free_large(p);
  53. return ERR_PTR(ret);
  54. }
  55. paddr = physaddr(obj);
  56. for (i = 0; i < npages; i++) {
  57. p[i] = phys_to_page(paddr);
  58. paddr += PAGE_SIZE;
  59. }
  60. return p;
  61. }
  62. /* called with dev->struct_mutex held */
  63. static struct page **get_pages(struct drm_gem_object *obj)
  64. {
  65. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  66. if (!msm_obj->pages) {
  67. struct drm_device *dev = obj->dev;
  68. struct page **p;
  69. int npages = obj->size >> PAGE_SHIFT;
  70. if (use_pages(obj))
  71. p = drm_gem_get_pages(obj);
  72. else
  73. p = get_pages_vram(obj, npages);
  74. if (IS_ERR(p)) {
  75. dev_err(dev->dev, "could not get pages: %ld\n",
  76. PTR_ERR(p));
  77. return p;
  78. }
  79. msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  80. if (IS_ERR(msm_obj->sgt)) {
  81. dev_err(dev->dev, "failed to allocate sgt\n");
  82. return ERR_CAST(msm_obj->sgt);
  83. }
  84. msm_obj->pages = p;
  85. /* For non-cached buffers, ensure the new pages are clean
  86. * because display controller, GPU, etc. are not coherent:
  87. */
  88. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  89. dma_map_sg(dev->dev, msm_obj->sgt->sgl,
  90. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  91. }
  92. return msm_obj->pages;
  93. }
  94. static void put_pages(struct drm_gem_object *obj)
  95. {
  96. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  97. if (msm_obj->pages) {
  98. /* For non-cached buffers, ensure the new pages are clean
  99. * because display controller, GPU, etc. are not coherent:
  100. */
  101. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  102. dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
  103. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  104. sg_free_table(msm_obj->sgt);
  105. kfree(msm_obj->sgt);
  106. if (use_pages(obj))
  107. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  108. else {
  109. drm_mm_remove_node(msm_obj->vram_node);
  110. drm_free_large(msm_obj->pages);
  111. }
  112. msm_obj->pages = NULL;
  113. }
  114. }
  115. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  116. {
  117. struct drm_device *dev = obj->dev;
  118. struct page **p;
  119. mutex_lock(&dev->struct_mutex);
  120. p = get_pages(obj);
  121. mutex_unlock(&dev->struct_mutex);
  122. return p;
  123. }
  124. void msm_gem_put_pages(struct drm_gem_object *obj)
  125. {
  126. /* when we start tracking the pin count, then do something here */
  127. }
  128. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  129. struct vm_area_struct *vma)
  130. {
  131. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  132. vma->vm_flags &= ~VM_PFNMAP;
  133. vma->vm_flags |= VM_MIXEDMAP;
  134. if (msm_obj->flags & MSM_BO_WC) {
  135. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  136. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  137. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  138. } else {
  139. /*
  140. * Shunt off cached objs to shmem file so they have their own
  141. * address_space (so unmap_mapping_range does what we want,
  142. * in particular in the case of mmap'd dmabufs)
  143. */
  144. fput(vma->vm_file);
  145. get_file(obj->filp);
  146. vma->vm_pgoff = 0;
  147. vma->vm_file = obj->filp;
  148. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  149. }
  150. return 0;
  151. }
  152. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  153. {
  154. int ret;
  155. ret = drm_gem_mmap(filp, vma);
  156. if (ret) {
  157. DBG("mmap failed: %d", ret);
  158. return ret;
  159. }
  160. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  161. }
  162. int msm_gem_fault(struct vm_fault *vmf)
  163. {
  164. struct vm_area_struct *vma = vmf->vma;
  165. struct drm_gem_object *obj = vma->vm_private_data;
  166. struct drm_device *dev = obj->dev;
  167. struct msm_drm_private *priv = dev->dev_private;
  168. struct page **pages;
  169. unsigned long pfn;
  170. pgoff_t pgoff;
  171. int ret;
  172. /* This should only happen if userspace tries to pass a mmap'd
  173. * but unfaulted gem bo vaddr into submit ioctl, triggering
  174. * a page fault while struct_mutex is already held. This is
  175. * not a valid use-case so just bail.
  176. */
  177. if (priv->struct_mutex_task == current)
  178. return VM_FAULT_SIGBUS;
  179. /* Make sure we don't parallel update on a fault, nor move or remove
  180. * something from beneath our feet
  181. */
  182. ret = mutex_lock_interruptible(&dev->struct_mutex);
  183. if (ret)
  184. goto out;
  185. /* make sure we have pages attached now */
  186. pages = get_pages(obj);
  187. if (IS_ERR(pages)) {
  188. ret = PTR_ERR(pages);
  189. goto out_unlock;
  190. }
  191. /* We don't use vmf->pgoff since that has the fake offset: */
  192. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  193. pfn = page_to_pfn(pages[pgoff]);
  194. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  195. pfn, pfn << PAGE_SHIFT);
  196. ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  197. out_unlock:
  198. mutex_unlock(&dev->struct_mutex);
  199. out:
  200. switch (ret) {
  201. case -EAGAIN:
  202. case 0:
  203. case -ERESTARTSYS:
  204. case -EINTR:
  205. case -EBUSY:
  206. /*
  207. * EBUSY is ok: this just means that another thread
  208. * already did the job.
  209. */
  210. return VM_FAULT_NOPAGE;
  211. case -ENOMEM:
  212. return VM_FAULT_OOM;
  213. default:
  214. return VM_FAULT_SIGBUS;
  215. }
  216. }
  217. /** get mmap offset */
  218. static uint64_t mmap_offset(struct drm_gem_object *obj)
  219. {
  220. struct drm_device *dev = obj->dev;
  221. int ret;
  222. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  223. /* Make it mmapable */
  224. ret = drm_gem_create_mmap_offset(obj);
  225. if (ret) {
  226. dev_err(dev->dev, "could not allocate mmap offset\n");
  227. return 0;
  228. }
  229. return drm_vma_node_offset_addr(&obj->vma_node);
  230. }
  231. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  232. {
  233. uint64_t offset;
  234. mutex_lock(&obj->dev->struct_mutex);
  235. offset = mmap_offset(obj);
  236. mutex_unlock(&obj->dev->struct_mutex);
  237. return offset;
  238. }
  239. static void
  240. put_iova(struct drm_gem_object *obj)
  241. {
  242. struct drm_device *dev = obj->dev;
  243. struct msm_drm_private *priv = obj->dev->dev_private;
  244. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  245. int id;
  246. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  247. for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
  248. if (!priv->aspace[id])
  249. continue;
  250. msm_gem_unmap_vma(priv->aspace[id],
  251. &msm_obj->domain[id], msm_obj->sgt);
  252. }
  253. }
  254. /* should be called under struct_mutex.. although it can be called
  255. * from atomic context without struct_mutex to acquire an extra
  256. * iova ref if you know one is already held.
  257. *
  258. * That means when I do eventually need to add support for unpinning
  259. * the refcnt counter needs to be atomic_t.
  260. */
  261. int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
  262. uint64_t *iova)
  263. {
  264. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  265. int ret = 0;
  266. if (!msm_obj->domain[id].iova) {
  267. struct msm_drm_private *priv = obj->dev->dev_private;
  268. struct page **pages = get_pages(obj);
  269. if (IS_ERR(pages))
  270. return PTR_ERR(pages);
  271. if (iommu_present(&platform_bus_type)) {
  272. ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
  273. msm_obj->sgt, obj->size >> PAGE_SHIFT);
  274. } else {
  275. msm_obj->domain[id].iova = physaddr(obj);
  276. }
  277. }
  278. if (!ret)
  279. *iova = msm_obj->domain[id].iova;
  280. return ret;
  281. }
  282. /* get iova, taking a reference. Should have a matching put */
  283. int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
  284. {
  285. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  286. int ret;
  287. /* this is safe right now because we don't unmap until the
  288. * bo is deleted:
  289. */
  290. if (msm_obj->domain[id].iova) {
  291. *iova = msm_obj->domain[id].iova;
  292. return 0;
  293. }
  294. mutex_lock(&obj->dev->struct_mutex);
  295. ret = msm_gem_get_iova_locked(obj, id, iova);
  296. mutex_unlock(&obj->dev->struct_mutex);
  297. return ret;
  298. }
  299. /* get iova without taking a reference, used in places where you have
  300. * already done a 'msm_gem_get_iova()'.
  301. */
  302. uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
  303. {
  304. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  305. WARN_ON(!msm_obj->domain[id].iova);
  306. return msm_obj->domain[id].iova;
  307. }
  308. void msm_gem_put_iova(struct drm_gem_object *obj, int id)
  309. {
  310. // XXX TODO ..
  311. // NOTE: probably don't need a _locked() version.. we wouldn't
  312. // normally unmap here, but instead just mark that it could be
  313. // unmapped (if the iova refcnt drops to zero), but then later
  314. // if another _get_iova_locked() fails we can start unmapping
  315. // things that are no longer needed..
  316. }
  317. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  318. struct drm_mode_create_dumb *args)
  319. {
  320. args->pitch = align_pitch(args->width, args->bpp);
  321. args->size = PAGE_ALIGN(args->pitch * args->height);
  322. return msm_gem_new_handle(dev, file, args->size,
  323. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
  324. }
  325. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  326. uint32_t handle, uint64_t *offset)
  327. {
  328. struct drm_gem_object *obj;
  329. int ret = 0;
  330. /* GEM does all our handle to object mapping */
  331. obj = drm_gem_object_lookup(file, handle);
  332. if (obj == NULL) {
  333. ret = -ENOENT;
  334. goto fail;
  335. }
  336. *offset = msm_gem_mmap_offset(obj);
  337. drm_gem_object_unreference_unlocked(obj);
  338. fail:
  339. return ret;
  340. }
  341. void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
  342. {
  343. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  344. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  345. if (!msm_obj->vaddr) {
  346. struct page **pages = get_pages(obj);
  347. if (IS_ERR(pages))
  348. return ERR_CAST(pages);
  349. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  350. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  351. if (msm_obj->vaddr == NULL)
  352. return ERR_PTR(-ENOMEM);
  353. }
  354. msm_obj->vmap_count++;
  355. return msm_obj->vaddr;
  356. }
  357. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  358. {
  359. void *ret;
  360. mutex_lock(&obj->dev->struct_mutex);
  361. ret = msm_gem_get_vaddr_locked(obj);
  362. mutex_unlock(&obj->dev->struct_mutex);
  363. return ret;
  364. }
  365. void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
  366. {
  367. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  368. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  369. WARN_ON(msm_obj->vmap_count < 1);
  370. msm_obj->vmap_count--;
  371. }
  372. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  373. {
  374. mutex_lock(&obj->dev->struct_mutex);
  375. msm_gem_put_vaddr_locked(obj);
  376. mutex_unlock(&obj->dev->struct_mutex);
  377. }
  378. /* Update madvise status, returns true if not purged, else
  379. * false or -errno.
  380. */
  381. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  382. {
  383. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  384. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  385. if (msm_obj->madv != __MSM_MADV_PURGED)
  386. msm_obj->madv = madv;
  387. return (msm_obj->madv != __MSM_MADV_PURGED);
  388. }
  389. void msm_gem_purge(struct drm_gem_object *obj)
  390. {
  391. struct drm_device *dev = obj->dev;
  392. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  393. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  394. WARN_ON(!is_purgeable(msm_obj));
  395. WARN_ON(obj->import_attach);
  396. put_iova(obj);
  397. msm_gem_vunmap(obj);
  398. put_pages(obj);
  399. msm_obj->madv = __MSM_MADV_PURGED;
  400. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  401. drm_gem_free_mmap_offset(obj);
  402. /* Our goal here is to return as much of the memory as
  403. * is possible back to the system as we are called from OOM.
  404. * To do this we must instruct the shmfs to drop all of its
  405. * backing pages, *now*.
  406. */
  407. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  408. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  409. 0, (loff_t)-1);
  410. }
  411. void msm_gem_vunmap(struct drm_gem_object *obj)
  412. {
  413. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  414. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  415. return;
  416. vunmap(msm_obj->vaddr);
  417. msm_obj->vaddr = NULL;
  418. }
  419. /* must be called before _move_to_active().. */
  420. int msm_gem_sync_object(struct drm_gem_object *obj,
  421. struct msm_fence_context *fctx, bool exclusive)
  422. {
  423. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  424. struct reservation_object_list *fobj;
  425. struct dma_fence *fence;
  426. int i, ret;
  427. if (!exclusive) {
  428. /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
  429. * which makes this a slightly strange place to call it. OTOH this
  430. * is a convenient can-fail point to hook it in. (And similar to
  431. * how etnaviv and nouveau handle this.)
  432. */
  433. ret = reservation_object_reserve_shared(msm_obj->resv);
  434. if (ret)
  435. return ret;
  436. }
  437. fobj = reservation_object_get_list(msm_obj->resv);
  438. if (!fobj || (fobj->shared_count == 0)) {
  439. fence = reservation_object_get_excl(msm_obj->resv);
  440. /* don't need to wait on our own fences, since ring is fifo */
  441. if (fence && (fence->context != fctx->context)) {
  442. ret = dma_fence_wait(fence, true);
  443. if (ret)
  444. return ret;
  445. }
  446. }
  447. if (!exclusive || !fobj)
  448. return 0;
  449. for (i = 0; i < fobj->shared_count; i++) {
  450. fence = rcu_dereference_protected(fobj->shared[i],
  451. reservation_object_held(msm_obj->resv));
  452. if (fence->context != fctx->context) {
  453. ret = dma_fence_wait(fence, true);
  454. if (ret)
  455. return ret;
  456. }
  457. }
  458. return 0;
  459. }
  460. void msm_gem_move_to_active(struct drm_gem_object *obj,
  461. struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
  462. {
  463. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  464. WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
  465. msm_obj->gpu = gpu;
  466. if (exclusive)
  467. reservation_object_add_excl_fence(msm_obj->resv, fence);
  468. else
  469. reservation_object_add_shared_fence(msm_obj->resv, fence);
  470. list_del_init(&msm_obj->mm_list);
  471. list_add_tail(&msm_obj->mm_list, &gpu->active_list);
  472. }
  473. void msm_gem_move_to_inactive(struct drm_gem_object *obj)
  474. {
  475. struct drm_device *dev = obj->dev;
  476. struct msm_drm_private *priv = dev->dev_private;
  477. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  478. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  479. msm_obj->gpu = NULL;
  480. list_del_init(&msm_obj->mm_list);
  481. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  482. }
  483. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  484. {
  485. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  486. bool write = !!(op & MSM_PREP_WRITE);
  487. unsigned long remain =
  488. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  489. long ret;
  490. ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
  491. true, remain);
  492. if (ret == 0)
  493. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  494. else if (ret < 0)
  495. return ret;
  496. /* TODO cache maintenance */
  497. return 0;
  498. }
  499. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  500. {
  501. /* TODO cache maintenance */
  502. return 0;
  503. }
  504. #ifdef CONFIG_DEBUG_FS
  505. static void describe_fence(struct dma_fence *fence, const char *type,
  506. struct seq_file *m)
  507. {
  508. if (!dma_fence_is_signaled(fence))
  509. seq_printf(m, "\t%9s: %s %s seq %u\n", type,
  510. fence->ops->get_driver_name(fence),
  511. fence->ops->get_timeline_name(fence),
  512. fence->seqno);
  513. }
  514. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  515. {
  516. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  517. struct reservation_object *robj = msm_obj->resv;
  518. struct reservation_object_list *fobj;
  519. struct msm_drm_private *priv = obj->dev->dev_private;
  520. struct dma_fence *fence;
  521. uint64_t off = drm_vma_node_start(&obj->vma_node);
  522. const char *madv;
  523. unsigned id;
  524. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  525. switch (msm_obj->madv) {
  526. case __MSM_MADV_PURGED:
  527. madv = " purged";
  528. break;
  529. case MSM_MADV_DONTNEED:
  530. madv = " purgeable";
  531. break;
  532. case MSM_MADV_WILLNEED:
  533. default:
  534. madv = "";
  535. break;
  536. }
  537. seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
  538. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  539. obj->name, kref_read(&obj->refcount),
  540. off, msm_obj->vaddr);
  541. for (id = 0; id < priv->num_aspaces; id++)
  542. seq_printf(m, " %08llx", msm_obj->domain[id].iova);
  543. seq_printf(m, " %zu%s\n", obj->size, madv);
  544. rcu_read_lock();
  545. fobj = rcu_dereference(robj->fence);
  546. if (fobj) {
  547. unsigned int i, shared_count = fobj->shared_count;
  548. for (i = 0; i < shared_count; i++) {
  549. fence = rcu_dereference(fobj->shared[i]);
  550. describe_fence(fence, "Shared", m);
  551. }
  552. }
  553. fence = rcu_dereference(robj->fence_excl);
  554. if (fence)
  555. describe_fence(fence, "Exclusive", m);
  556. rcu_read_unlock();
  557. }
  558. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  559. {
  560. struct msm_gem_object *msm_obj;
  561. int count = 0;
  562. size_t size = 0;
  563. list_for_each_entry(msm_obj, list, mm_list) {
  564. struct drm_gem_object *obj = &msm_obj->base;
  565. seq_printf(m, " ");
  566. msm_gem_describe(obj, m);
  567. count++;
  568. size += obj->size;
  569. }
  570. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  571. }
  572. #endif
  573. void msm_gem_free_object(struct drm_gem_object *obj)
  574. {
  575. struct drm_device *dev = obj->dev;
  576. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  577. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  578. /* object should not be on active list: */
  579. WARN_ON(is_active(msm_obj));
  580. list_del(&msm_obj->mm_list);
  581. put_iova(obj);
  582. if (obj->import_attach) {
  583. if (msm_obj->vaddr)
  584. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  585. /* Don't drop the pages for imported dmabuf, as they are not
  586. * ours, just free the array we allocated:
  587. */
  588. if (msm_obj->pages)
  589. drm_free_large(msm_obj->pages);
  590. drm_prime_gem_destroy(obj, msm_obj->sgt);
  591. } else {
  592. msm_gem_vunmap(obj);
  593. put_pages(obj);
  594. }
  595. if (msm_obj->resv == &msm_obj->_resv)
  596. reservation_object_fini(msm_obj->resv);
  597. drm_gem_object_release(obj);
  598. kfree(msm_obj);
  599. }
  600. /* convenience method to construct a GEM buffer object, and userspace handle */
  601. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  602. uint32_t size, uint32_t flags, uint32_t *handle)
  603. {
  604. struct drm_gem_object *obj;
  605. int ret;
  606. ret = mutex_lock_interruptible(&dev->struct_mutex);
  607. if (ret)
  608. return ret;
  609. obj = msm_gem_new(dev, size, flags);
  610. mutex_unlock(&dev->struct_mutex);
  611. if (IS_ERR(obj))
  612. return PTR_ERR(obj);
  613. ret = drm_gem_handle_create(file, obj, handle);
  614. /* drop reference from allocate - handle holds it now */
  615. drm_gem_object_unreference_unlocked(obj);
  616. return ret;
  617. }
  618. static int msm_gem_new_impl(struct drm_device *dev,
  619. uint32_t size, uint32_t flags,
  620. struct reservation_object *resv,
  621. struct drm_gem_object **obj)
  622. {
  623. struct msm_drm_private *priv = dev->dev_private;
  624. struct msm_gem_object *msm_obj;
  625. bool use_vram = false;
  626. switch (flags & MSM_BO_CACHE_MASK) {
  627. case MSM_BO_UNCACHED:
  628. case MSM_BO_CACHED:
  629. case MSM_BO_WC:
  630. break;
  631. default:
  632. dev_err(dev->dev, "invalid cache flag: %x\n",
  633. (flags & MSM_BO_CACHE_MASK));
  634. return -EINVAL;
  635. }
  636. if (!iommu_present(&platform_bus_type))
  637. use_vram = true;
  638. else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
  639. use_vram = true;
  640. if (WARN_ON(use_vram && !priv->vram.size))
  641. return -EINVAL;
  642. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  643. if (!msm_obj)
  644. return -ENOMEM;
  645. if (use_vram)
  646. msm_obj->vram_node = &msm_obj->domain[0].node;
  647. msm_obj->flags = flags;
  648. msm_obj->madv = MSM_MADV_WILLNEED;
  649. if (resv) {
  650. msm_obj->resv = resv;
  651. } else {
  652. msm_obj->resv = &msm_obj->_resv;
  653. reservation_object_init(msm_obj->resv);
  654. }
  655. INIT_LIST_HEAD(&msm_obj->submit_entry);
  656. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  657. *obj = &msm_obj->base;
  658. return 0;
  659. }
  660. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  661. uint32_t size, uint32_t flags)
  662. {
  663. struct drm_gem_object *obj = NULL;
  664. int ret;
  665. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  666. size = PAGE_ALIGN(size);
  667. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
  668. if (ret)
  669. goto fail;
  670. if (use_pages(obj)) {
  671. ret = drm_gem_object_init(dev, obj, size);
  672. if (ret)
  673. goto fail;
  674. } else {
  675. drm_gem_private_object_init(dev, obj, size);
  676. }
  677. return obj;
  678. fail:
  679. drm_gem_object_unreference(obj);
  680. return ERR_PTR(ret);
  681. }
  682. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  683. struct dma_buf *dmabuf, struct sg_table *sgt)
  684. {
  685. struct msm_gem_object *msm_obj;
  686. struct drm_gem_object *obj;
  687. uint32_t size;
  688. int ret, npages;
  689. /* if we don't have IOMMU, don't bother pretending we can import: */
  690. if (!iommu_present(&platform_bus_type)) {
  691. dev_err(dev->dev, "cannot import without IOMMU\n");
  692. return ERR_PTR(-EINVAL);
  693. }
  694. size = PAGE_ALIGN(dmabuf->size);
  695. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
  696. if (ret)
  697. goto fail;
  698. drm_gem_private_object_init(dev, obj, size);
  699. npages = size / PAGE_SIZE;
  700. msm_obj = to_msm_bo(obj);
  701. msm_obj->sgt = sgt;
  702. msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
  703. if (!msm_obj->pages) {
  704. ret = -ENOMEM;
  705. goto fail;
  706. }
  707. ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
  708. if (ret)
  709. goto fail;
  710. return obj;
  711. fail:
  712. drm_gem_object_unreference_unlocked(obj);
  713. return ERR_PTR(ret);
  714. }