msm_gem.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/spinlock.h>
  18. #include <linux/shmem_fs.h>
  19. #include <linux/dma-buf.h>
  20. #include <linux/pfn_t.h>
  21. #include "msm_drv.h"
  22. #include "msm_fence.h"
  23. #include "msm_gem.h"
  24. #include "msm_gpu.h"
  25. #include "msm_mmu.h"
  26. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  27. static dma_addr_t physaddr(struct drm_gem_object *obj)
  28. {
  29. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  30. struct msm_drm_private *priv = obj->dev->dev_private;
  31. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  32. priv->vram.paddr;
  33. }
  34. static bool use_pages(struct drm_gem_object *obj)
  35. {
  36. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  37. return !msm_obj->vram_node;
  38. }
  39. /* allocate pages from VRAM carveout, used when no IOMMU: */
  40. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  41. {
  42. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  43. struct msm_drm_private *priv = obj->dev->dev_private;
  44. dma_addr_t paddr;
  45. struct page **p;
  46. int ret, i;
  47. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  48. if (!p)
  49. return ERR_PTR(-ENOMEM);
  50. spin_lock(&priv->vram.lock);
  51. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  52. spin_unlock(&priv->vram.lock);
  53. if (ret) {
  54. kvfree(p);
  55. return ERR_PTR(ret);
  56. }
  57. paddr = physaddr(obj);
  58. for (i = 0; i < npages; i++) {
  59. p[i] = phys_to_page(paddr);
  60. paddr += PAGE_SIZE;
  61. }
  62. return p;
  63. }
  64. static struct page **get_pages(struct drm_gem_object *obj)
  65. {
  66. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  67. if (!msm_obj->pages) {
  68. struct drm_device *dev = obj->dev;
  69. struct page **p;
  70. int npages = obj->size >> PAGE_SHIFT;
  71. if (use_pages(obj))
  72. p = drm_gem_get_pages(obj);
  73. else
  74. p = get_pages_vram(obj, npages);
  75. if (IS_ERR(p)) {
  76. dev_err(dev->dev, "could not get pages: %ld\n",
  77. PTR_ERR(p));
  78. return p;
  79. }
  80. msm_obj->pages = p;
  81. msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  82. if (IS_ERR(msm_obj->sgt)) {
  83. void *ptr = ERR_CAST(msm_obj->sgt);
  84. dev_err(dev->dev, "failed to allocate sgt\n");
  85. msm_obj->sgt = NULL;
  86. return ptr;
  87. }
  88. /* For non-cached buffers, ensure the new pages are clean
  89. * because display controller, GPU, etc. are not coherent:
  90. */
  91. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  92. dma_map_sg(dev->dev, msm_obj->sgt->sgl,
  93. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  94. }
  95. return msm_obj->pages;
  96. }
  97. static void put_pages_vram(struct drm_gem_object *obj)
  98. {
  99. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  100. struct msm_drm_private *priv = obj->dev->dev_private;
  101. spin_lock(&priv->vram.lock);
  102. drm_mm_remove_node(msm_obj->vram_node);
  103. spin_unlock(&priv->vram.lock);
  104. kvfree(msm_obj->pages);
  105. }
  106. static void put_pages(struct drm_gem_object *obj)
  107. {
  108. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  109. if (msm_obj->pages) {
  110. /* For non-cached buffers, ensure the new pages are clean
  111. * because display controller, GPU, etc. are not coherent:
  112. */
  113. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  114. dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
  115. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  116. if (msm_obj->sgt)
  117. sg_free_table(msm_obj->sgt);
  118. kfree(msm_obj->sgt);
  119. if (use_pages(obj))
  120. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  121. else
  122. put_pages_vram(obj);
  123. msm_obj->pages = NULL;
  124. }
  125. }
  126. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  127. {
  128. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  129. struct page **p;
  130. mutex_lock(&msm_obj->lock);
  131. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  132. mutex_unlock(&msm_obj->lock);
  133. return ERR_PTR(-EBUSY);
  134. }
  135. p = get_pages(obj);
  136. mutex_unlock(&msm_obj->lock);
  137. return p;
  138. }
  139. void msm_gem_put_pages(struct drm_gem_object *obj)
  140. {
  141. /* when we start tracking the pin count, then do something here */
  142. }
  143. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  144. struct vm_area_struct *vma)
  145. {
  146. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  147. vma->vm_flags &= ~VM_PFNMAP;
  148. vma->vm_flags |= VM_MIXEDMAP;
  149. if (msm_obj->flags & MSM_BO_WC) {
  150. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  151. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  152. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  153. } else {
  154. /*
  155. * Shunt off cached objs to shmem file so they have their own
  156. * address_space (so unmap_mapping_range does what we want,
  157. * in particular in the case of mmap'd dmabufs)
  158. */
  159. fput(vma->vm_file);
  160. get_file(obj->filp);
  161. vma->vm_pgoff = 0;
  162. vma->vm_file = obj->filp;
  163. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  164. }
  165. return 0;
  166. }
  167. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  168. {
  169. int ret;
  170. ret = drm_gem_mmap(filp, vma);
  171. if (ret) {
  172. DBG("mmap failed: %d", ret);
  173. return ret;
  174. }
  175. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  176. }
  177. int msm_gem_fault(struct vm_fault *vmf)
  178. {
  179. struct vm_area_struct *vma = vmf->vma;
  180. struct drm_gem_object *obj = vma->vm_private_data;
  181. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  182. struct page **pages;
  183. unsigned long pfn;
  184. pgoff_t pgoff;
  185. int ret;
  186. /*
  187. * vm_ops.open/drm_gem_mmap_obj and close get and put
  188. * a reference on obj. So, we dont need to hold one here.
  189. */
  190. ret = mutex_lock_interruptible(&msm_obj->lock);
  191. if (ret)
  192. goto out;
  193. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  194. mutex_unlock(&msm_obj->lock);
  195. return VM_FAULT_SIGBUS;
  196. }
  197. /* make sure we have pages attached now */
  198. pages = get_pages(obj);
  199. if (IS_ERR(pages)) {
  200. ret = PTR_ERR(pages);
  201. goto out_unlock;
  202. }
  203. /* We don't use vmf->pgoff since that has the fake offset: */
  204. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  205. pfn = page_to_pfn(pages[pgoff]);
  206. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  207. pfn, pfn << PAGE_SHIFT);
  208. ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  209. out_unlock:
  210. mutex_unlock(&msm_obj->lock);
  211. out:
  212. switch (ret) {
  213. case -EAGAIN:
  214. case 0:
  215. case -ERESTARTSYS:
  216. case -EINTR:
  217. case -EBUSY:
  218. /*
  219. * EBUSY is ok: this just means that another thread
  220. * already did the job.
  221. */
  222. return VM_FAULT_NOPAGE;
  223. case -ENOMEM:
  224. return VM_FAULT_OOM;
  225. default:
  226. return VM_FAULT_SIGBUS;
  227. }
  228. }
  229. /** get mmap offset */
  230. static uint64_t mmap_offset(struct drm_gem_object *obj)
  231. {
  232. struct drm_device *dev = obj->dev;
  233. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  234. int ret;
  235. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  236. /* Make it mmapable */
  237. ret = drm_gem_create_mmap_offset(obj);
  238. if (ret) {
  239. dev_err(dev->dev, "could not allocate mmap offset\n");
  240. return 0;
  241. }
  242. return drm_vma_node_offset_addr(&obj->vma_node);
  243. }
  244. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  245. {
  246. uint64_t offset;
  247. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  248. mutex_lock(&msm_obj->lock);
  249. offset = mmap_offset(obj);
  250. mutex_unlock(&msm_obj->lock);
  251. return offset;
  252. }
  253. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  254. struct msm_gem_address_space *aspace)
  255. {
  256. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  257. struct msm_gem_vma *vma;
  258. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  259. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  260. if (!vma)
  261. return ERR_PTR(-ENOMEM);
  262. vma->aspace = aspace;
  263. list_add_tail(&vma->list, &msm_obj->vmas);
  264. return vma;
  265. }
  266. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  267. struct msm_gem_address_space *aspace)
  268. {
  269. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  270. struct msm_gem_vma *vma;
  271. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  272. list_for_each_entry(vma, &msm_obj->vmas, list) {
  273. if (vma->aspace == aspace)
  274. return vma;
  275. }
  276. return NULL;
  277. }
  278. static void del_vma(struct msm_gem_vma *vma)
  279. {
  280. if (!vma)
  281. return;
  282. list_del(&vma->list);
  283. kfree(vma);
  284. }
  285. /* Called with msm_obj->lock locked */
  286. static void
  287. put_iova(struct drm_gem_object *obj)
  288. {
  289. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  290. struct msm_gem_vma *vma, *tmp;
  291. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  292. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  293. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
  294. del_vma(vma);
  295. }
  296. }
  297. /* get iova, taking a reference. Should have a matching put */
  298. int msm_gem_get_iova(struct drm_gem_object *obj,
  299. struct msm_gem_address_space *aspace, uint64_t *iova)
  300. {
  301. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  302. struct msm_gem_vma *vma;
  303. int ret = 0;
  304. mutex_lock(&msm_obj->lock);
  305. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  306. mutex_unlock(&msm_obj->lock);
  307. return -EBUSY;
  308. }
  309. vma = lookup_vma(obj, aspace);
  310. if (!vma) {
  311. struct page **pages;
  312. vma = add_vma(obj, aspace);
  313. if (IS_ERR(vma)) {
  314. ret = PTR_ERR(vma);
  315. goto unlock;
  316. }
  317. pages = get_pages(obj);
  318. if (IS_ERR(pages)) {
  319. ret = PTR_ERR(pages);
  320. goto fail;
  321. }
  322. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  323. obj->size >> PAGE_SHIFT);
  324. if (ret)
  325. goto fail;
  326. }
  327. *iova = vma->iova;
  328. mutex_unlock(&msm_obj->lock);
  329. return 0;
  330. fail:
  331. del_vma(vma);
  332. unlock:
  333. mutex_unlock(&msm_obj->lock);
  334. return ret;
  335. }
  336. /* get iova without taking a reference, used in places where you have
  337. * already done a 'msm_gem_get_iova()'.
  338. */
  339. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  340. struct msm_gem_address_space *aspace)
  341. {
  342. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  343. struct msm_gem_vma *vma;
  344. mutex_lock(&msm_obj->lock);
  345. vma = lookup_vma(obj, aspace);
  346. mutex_unlock(&msm_obj->lock);
  347. WARN_ON(!vma);
  348. return vma ? vma->iova : 0;
  349. }
  350. void msm_gem_put_iova(struct drm_gem_object *obj,
  351. struct msm_gem_address_space *aspace)
  352. {
  353. // XXX TODO ..
  354. // NOTE: probably don't need a _locked() version.. we wouldn't
  355. // normally unmap here, but instead just mark that it could be
  356. // unmapped (if the iova refcnt drops to zero), but then later
  357. // if another _get_iova_locked() fails we can start unmapping
  358. // things that are no longer needed..
  359. }
  360. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  361. struct drm_mode_create_dumb *args)
  362. {
  363. args->pitch = align_pitch(args->width, args->bpp);
  364. args->size = PAGE_ALIGN(args->pitch * args->height);
  365. return msm_gem_new_handle(dev, file, args->size,
  366. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
  367. }
  368. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  369. uint32_t handle, uint64_t *offset)
  370. {
  371. struct drm_gem_object *obj;
  372. int ret = 0;
  373. /* GEM does all our handle to object mapping */
  374. obj = drm_gem_object_lookup(file, handle);
  375. if (obj == NULL) {
  376. ret = -ENOENT;
  377. goto fail;
  378. }
  379. *offset = msm_gem_mmap_offset(obj);
  380. drm_gem_object_put_unlocked(obj);
  381. fail:
  382. return ret;
  383. }
  384. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  385. {
  386. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  387. int ret = 0;
  388. mutex_lock(&msm_obj->lock);
  389. if (WARN_ON(msm_obj->madv > madv)) {
  390. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  391. msm_obj->madv, madv);
  392. mutex_unlock(&msm_obj->lock);
  393. return ERR_PTR(-EBUSY);
  394. }
  395. /* increment vmap_count *before* vmap() call, so shrinker can
  396. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  397. * This guarantees that we won't try to msm_gem_vunmap() this
  398. * same object from within the vmap() call (while we already
  399. * hold msm_obj->lock)
  400. */
  401. msm_obj->vmap_count++;
  402. if (!msm_obj->vaddr) {
  403. struct page **pages = get_pages(obj);
  404. if (IS_ERR(pages)) {
  405. ret = PTR_ERR(pages);
  406. goto fail;
  407. }
  408. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  409. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  410. if (msm_obj->vaddr == NULL) {
  411. ret = -ENOMEM;
  412. goto fail;
  413. }
  414. }
  415. mutex_unlock(&msm_obj->lock);
  416. return msm_obj->vaddr;
  417. fail:
  418. msm_obj->vmap_count--;
  419. mutex_unlock(&msm_obj->lock);
  420. return ERR_PTR(ret);
  421. }
  422. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  423. {
  424. return get_vaddr(obj, MSM_MADV_WILLNEED);
  425. }
  426. /*
  427. * Don't use this! It is for the very special case of dumping
  428. * submits from GPU hangs or faults, were the bo may already
  429. * be MSM_MADV_DONTNEED, but we know the buffer is still on the
  430. * active list.
  431. */
  432. void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
  433. {
  434. return get_vaddr(obj, __MSM_MADV_PURGED);
  435. }
  436. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  437. {
  438. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  439. mutex_lock(&msm_obj->lock);
  440. WARN_ON(msm_obj->vmap_count < 1);
  441. msm_obj->vmap_count--;
  442. mutex_unlock(&msm_obj->lock);
  443. }
  444. /* Update madvise status, returns true if not purged, else
  445. * false or -errno.
  446. */
  447. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  448. {
  449. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  450. mutex_lock(&msm_obj->lock);
  451. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  452. if (msm_obj->madv != __MSM_MADV_PURGED)
  453. msm_obj->madv = madv;
  454. madv = msm_obj->madv;
  455. mutex_unlock(&msm_obj->lock);
  456. return (madv != __MSM_MADV_PURGED);
  457. }
  458. void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  459. {
  460. struct drm_device *dev = obj->dev;
  461. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  462. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  463. WARN_ON(!is_purgeable(msm_obj));
  464. WARN_ON(obj->import_attach);
  465. mutex_lock_nested(&msm_obj->lock, subclass);
  466. put_iova(obj);
  467. msm_gem_vunmap_locked(obj);
  468. put_pages(obj);
  469. msm_obj->madv = __MSM_MADV_PURGED;
  470. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  471. drm_gem_free_mmap_offset(obj);
  472. /* Our goal here is to return as much of the memory as
  473. * is possible back to the system as we are called from OOM.
  474. * To do this we must instruct the shmfs to drop all of its
  475. * backing pages, *now*.
  476. */
  477. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  478. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  479. 0, (loff_t)-1);
  480. mutex_unlock(&msm_obj->lock);
  481. }
  482. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  483. {
  484. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  485. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  486. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  487. return;
  488. vunmap(msm_obj->vaddr);
  489. msm_obj->vaddr = NULL;
  490. }
  491. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  492. {
  493. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  494. mutex_lock_nested(&msm_obj->lock, subclass);
  495. msm_gem_vunmap_locked(obj);
  496. mutex_unlock(&msm_obj->lock);
  497. }
  498. /* must be called before _move_to_active().. */
  499. int msm_gem_sync_object(struct drm_gem_object *obj,
  500. struct msm_fence_context *fctx, bool exclusive)
  501. {
  502. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  503. struct reservation_object_list *fobj;
  504. struct dma_fence *fence;
  505. int i, ret;
  506. fobj = reservation_object_get_list(msm_obj->resv);
  507. if (!fobj || (fobj->shared_count == 0)) {
  508. fence = reservation_object_get_excl(msm_obj->resv);
  509. /* don't need to wait on our own fences, since ring is fifo */
  510. if (fence && (fence->context != fctx->context)) {
  511. ret = dma_fence_wait(fence, true);
  512. if (ret)
  513. return ret;
  514. }
  515. }
  516. if (!exclusive || !fobj)
  517. return 0;
  518. for (i = 0; i < fobj->shared_count; i++) {
  519. fence = rcu_dereference_protected(fobj->shared[i],
  520. reservation_object_held(msm_obj->resv));
  521. if (fence->context != fctx->context) {
  522. ret = dma_fence_wait(fence, true);
  523. if (ret)
  524. return ret;
  525. }
  526. }
  527. return 0;
  528. }
  529. void msm_gem_move_to_active(struct drm_gem_object *obj,
  530. struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
  531. {
  532. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  533. WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
  534. msm_obj->gpu = gpu;
  535. if (exclusive)
  536. reservation_object_add_excl_fence(msm_obj->resv, fence);
  537. else
  538. reservation_object_add_shared_fence(msm_obj->resv, fence);
  539. list_del_init(&msm_obj->mm_list);
  540. list_add_tail(&msm_obj->mm_list, &gpu->active_list);
  541. }
  542. void msm_gem_move_to_inactive(struct drm_gem_object *obj)
  543. {
  544. struct drm_device *dev = obj->dev;
  545. struct msm_drm_private *priv = dev->dev_private;
  546. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  547. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  548. msm_obj->gpu = NULL;
  549. list_del_init(&msm_obj->mm_list);
  550. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  551. }
  552. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  553. {
  554. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  555. bool write = !!(op & MSM_PREP_WRITE);
  556. unsigned long remain =
  557. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  558. long ret;
  559. ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
  560. true, remain);
  561. if (ret == 0)
  562. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  563. else if (ret < 0)
  564. return ret;
  565. /* TODO cache maintenance */
  566. return 0;
  567. }
  568. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  569. {
  570. /* TODO cache maintenance */
  571. return 0;
  572. }
  573. #ifdef CONFIG_DEBUG_FS
  574. static void describe_fence(struct dma_fence *fence, const char *type,
  575. struct seq_file *m)
  576. {
  577. if (!dma_fence_is_signaled(fence))
  578. seq_printf(m, "\t%9s: %s %s seq %u\n", type,
  579. fence->ops->get_driver_name(fence),
  580. fence->ops->get_timeline_name(fence),
  581. fence->seqno);
  582. }
  583. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  584. {
  585. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  586. struct reservation_object *robj = msm_obj->resv;
  587. struct reservation_object_list *fobj;
  588. struct dma_fence *fence;
  589. struct msm_gem_vma *vma;
  590. uint64_t off = drm_vma_node_start(&obj->vma_node);
  591. const char *madv;
  592. mutex_lock(&msm_obj->lock);
  593. switch (msm_obj->madv) {
  594. case __MSM_MADV_PURGED:
  595. madv = " purged";
  596. break;
  597. case MSM_MADV_DONTNEED:
  598. madv = " purgeable";
  599. break;
  600. case MSM_MADV_WILLNEED:
  601. default:
  602. madv = "";
  603. break;
  604. }
  605. seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
  606. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  607. obj->name, kref_read(&obj->refcount),
  608. off, msm_obj->vaddr);
  609. /* FIXME: we need to print the address space here too */
  610. list_for_each_entry(vma, &msm_obj->vmas, list)
  611. seq_printf(m, " %08llx", vma->iova);
  612. seq_printf(m, " %zu%s\n", obj->size, madv);
  613. rcu_read_lock();
  614. fobj = rcu_dereference(robj->fence);
  615. if (fobj) {
  616. unsigned int i, shared_count = fobj->shared_count;
  617. for (i = 0; i < shared_count; i++) {
  618. fence = rcu_dereference(fobj->shared[i]);
  619. describe_fence(fence, "Shared", m);
  620. }
  621. }
  622. fence = rcu_dereference(robj->fence_excl);
  623. if (fence)
  624. describe_fence(fence, "Exclusive", m);
  625. rcu_read_unlock();
  626. mutex_unlock(&msm_obj->lock);
  627. }
  628. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  629. {
  630. struct msm_gem_object *msm_obj;
  631. int count = 0;
  632. size_t size = 0;
  633. list_for_each_entry(msm_obj, list, mm_list) {
  634. struct drm_gem_object *obj = &msm_obj->base;
  635. seq_printf(m, " ");
  636. msm_gem_describe(obj, m);
  637. count++;
  638. size += obj->size;
  639. }
  640. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  641. }
  642. #endif
  643. /* don't call directly! Use drm_gem_object_put() and friends */
  644. void msm_gem_free_object(struct drm_gem_object *obj)
  645. {
  646. struct drm_device *dev = obj->dev;
  647. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  648. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  649. /* object should not be on active list: */
  650. WARN_ON(is_active(msm_obj));
  651. list_del(&msm_obj->mm_list);
  652. mutex_lock(&msm_obj->lock);
  653. put_iova(obj);
  654. if (obj->import_attach) {
  655. if (msm_obj->vaddr)
  656. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  657. /* Don't drop the pages for imported dmabuf, as they are not
  658. * ours, just free the array we allocated:
  659. */
  660. if (msm_obj->pages)
  661. kvfree(msm_obj->pages);
  662. drm_prime_gem_destroy(obj, msm_obj->sgt);
  663. } else {
  664. msm_gem_vunmap_locked(obj);
  665. put_pages(obj);
  666. }
  667. if (msm_obj->resv == &msm_obj->_resv)
  668. reservation_object_fini(msm_obj->resv);
  669. drm_gem_object_release(obj);
  670. mutex_unlock(&msm_obj->lock);
  671. kfree(msm_obj);
  672. }
  673. /* convenience method to construct a GEM buffer object, and userspace handle */
  674. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  675. uint32_t size, uint32_t flags, uint32_t *handle)
  676. {
  677. struct drm_gem_object *obj;
  678. int ret;
  679. obj = msm_gem_new(dev, size, flags);
  680. if (IS_ERR(obj))
  681. return PTR_ERR(obj);
  682. ret = drm_gem_handle_create(file, obj, handle);
  683. /* drop reference from allocate - handle holds it now */
  684. drm_gem_object_put_unlocked(obj);
  685. return ret;
  686. }
  687. static int msm_gem_new_impl(struct drm_device *dev,
  688. uint32_t size, uint32_t flags,
  689. struct reservation_object *resv,
  690. struct drm_gem_object **obj,
  691. bool struct_mutex_locked)
  692. {
  693. struct msm_drm_private *priv = dev->dev_private;
  694. struct msm_gem_object *msm_obj;
  695. switch (flags & MSM_BO_CACHE_MASK) {
  696. case MSM_BO_UNCACHED:
  697. case MSM_BO_CACHED:
  698. case MSM_BO_WC:
  699. break;
  700. default:
  701. dev_err(dev->dev, "invalid cache flag: %x\n",
  702. (flags & MSM_BO_CACHE_MASK));
  703. return -EINVAL;
  704. }
  705. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  706. if (!msm_obj)
  707. return -ENOMEM;
  708. mutex_init(&msm_obj->lock);
  709. msm_obj->flags = flags;
  710. msm_obj->madv = MSM_MADV_WILLNEED;
  711. if (resv) {
  712. msm_obj->resv = resv;
  713. } else {
  714. msm_obj->resv = &msm_obj->_resv;
  715. reservation_object_init(msm_obj->resv);
  716. }
  717. INIT_LIST_HEAD(&msm_obj->submit_entry);
  718. INIT_LIST_HEAD(&msm_obj->vmas);
  719. if (struct_mutex_locked) {
  720. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  721. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  722. } else {
  723. mutex_lock(&dev->struct_mutex);
  724. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  725. mutex_unlock(&dev->struct_mutex);
  726. }
  727. *obj = &msm_obj->base;
  728. return 0;
  729. }
  730. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  731. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  732. {
  733. struct msm_drm_private *priv = dev->dev_private;
  734. struct drm_gem_object *obj = NULL;
  735. bool use_vram = false;
  736. int ret;
  737. size = PAGE_ALIGN(size);
  738. if (!iommu_present(&platform_bus_type))
  739. use_vram = true;
  740. else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
  741. use_vram = true;
  742. if (WARN_ON(use_vram && !priv->vram.size))
  743. return ERR_PTR(-EINVAL);
  744. /* Disallow zero sized objects as they make the underlying
  745. * infrastructure grumpy
  746. */
  747. if (size == 0)
  748. return ERR_PTR(-EINVAL);
  749. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  750. if (ret)
  751. goto fail;
  752. if (use_vram) {
  753. struct msm_gem_vma *vma;
  754. struct page **pages;
  755. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  756. mutex_lock(&msm_obj->lock);
  757. vma = add_vma(obj, NULL);
  758. mutex_unlock(&msm_obj->lock);
  759. if (IS_ERR(vma)) {
  760. ret = PTR_ERR(vma);
  761. goto fail;
  762. }
  763. to_msm_bo(obj)->vram_node = &vma->node;
  764. drm_gem_private_object_init(dev, obj, size);
  765. pages = get_pages(obj);
  766. if (IS_ERR(pages)) {
  767. ret = PTR_ERR(pages);
  768. goto fail;
  769. }
  770. vma->iova = physaddr(obj);
  771. } else {
  772. ret = drm_gem_object_init(dev, obj, size);
  773. if (ret)
  774. goto fail;
  775. }
  776. return obj;
  777. fail:
  778. drm_gem_object_put_unlocked(obj);
  779. return ERR_PTR(ret);
  780. }
  781. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  782. uint32_t size, uint32_t flags)
  783. {
  784. return _msm_gem_new(dev, size, flags, true);
  785. }
  786. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  787. uint32_t size, uint32_t flags)
  788. {
  789. return _msm_gem_new(dev, size, flags, false);
  790. }
  791. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  792. struct dma_buf *dmabuf, struct sg_table *sgt)
  793. {
  794. struct msm_gem_object *msm_obj;
  795. struct drm_gem_object *obj;
  796. uint32_t size;
  797. int ret, npages;
  798. /* if we don't have IOMMU, don't bother pretending we can import: */
  799. if (!iommu_present(&platform_bus_type)) {
  800. dev_err(dev->dev, "cannot import without IOMMU\n");
  801. return ERR_PTR(-EINVAL);
  802. }
  803. size = PAGE_ALIGN(dmabuf->size);
  804. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
  805. if (ret)
  806. goto fail;
  807. drm_gem_private_object_init(dev, obj, size);
  808. npages = size / PAGE_SIZE;
  809. msm_obj = to_msm_bo(obj);
  810. mutex_lock(&msm_obj->lock);
  811. msm_obj->sgt = sgt;
  812. msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  813. if (!msm_obj->pages) {
  814. mutex_unlock(&msm_obj->lock);
  815. ret = -ENOMEM;
  816. goto fail;
  817. }
  818. ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
  819. if (ret) {
  820. mutex_unlock(&msm_obj->lock);
  821. goto fail;
  822. }
  823. mutex_unlock(&msm_obj->lock);
  824. return obj;
  825. fail:
  826. drm_gem_object_put_unlocked(obj);
  827. return ERR_PTR(ret);
  828. }
  829. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  830. uint32_t flags, struct msm_gem_address_space *aspace,
  831. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  832. {
  833. void *vaddr;
  834. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  835. int ret;
  836. if (IS_ERR(obj))
  837. return ERR_CAST(obj);
  838. if (iova) {
  839. ret = msm_gem_get_iova(obj, aspace, iova);
  840. if (ret) {
  841. drm_gem_object_put(obj);
  842. return ERR_PTR(ret);
  843. }
  844. }
  845. vaddr = msm_gem_get_vaddr(obj);
  846. if (IS_ERR(vaddr)) {
  847. msm_gem_put_iova(obj, aspace);
  848. drm_gem_object_put(obj);
  849. return ERR_CAST(vaddr);
  850. }
  851. if (bo)
  852. *bo = obj;
  853. return vaddr;
  854. }
  855. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  856. uint32_t flags, struct msm_gem_address_space *aspace,
  857. struct drm_gem_object **bo, uint64_t *iova)
  858. {
  859. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  860. }
  861. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  862. uint32_t flags, struct msm_gem_address_space *aspace,
  863. struct drm_gem_object **bo, uint64_t *iova)
  864. {
  865. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  866. }