msm_gem.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/spinlock.h>
  18. #include <linux/shmem_fs.h>
  19. #include <linux/dma-buf.h>
  20. #include <linux/pfn_t.h>
  21. #include "msm_drv.h"
  22. #include "msm_fence.h"
  23. #include "msm_gem.h"
  24. #include "msm_gpu.h"
  25. #include "msm_mmu.h"
  26. static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  27. static dma_addr_t physaddr(struct drm_gem_object *obj)
  28. {
  29. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  30. struct msm_drm_private *priv = obj->dev->dev_private;
  31. return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  32. priv->vram.paddr;
  33. }
  34. static bool use_pages(struct drm_gem_object *obj)
  35. {
  36. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  37. return !msm_obj->vram_node;
  38. }
  39. /* allocate pages from VRAM carveout, used when no IOMMU: */
  40. static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  41. {
  42. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  43. struct msm_drm_private *priv = obj->dev->dev_private;
  44. dma_addr_t paddr;
  45. struct page **p;
  46. int ret, i;
  47. p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  48. if (!p)
  49. return ERR_PTR(-ENOMEM);
  50. spin_lock(&priv->vram.lock);
  51. ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  52. spin_unlock(&priv->vram.lock);
  53. if (ret) {
  54. kvfree(p);
  55. return ERR_PTR(ret);
  56. }
  57. paddr = physaddr(obj);
  58. for (i = 0; i < npages; i++) {
  59. p[i] = phys_to_page(paddr);
  60. paddr += PAGE_SIZE;
  61. }
  62. return p;
  63. }
  64. static struct page **get_pages(struct drm_gem_object *obj)
  65. {
  66. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  67. if (!msm_obj->pages) {
  68. struct drm_device *dev = obj->dev;
  69. struct page **p;
  70. int npages = obj->size >> PAGE_SHIFT;
  71. if (use_pages(obj))
  72. p = drm_gem_get_pages(obj);
  73. else
  74. p = get_pages_vram(obj, npages);
  75. if (IS_ERR(p)) {
  76. dev_err(dev->dev, "could not get pages: %ld\n",
  77. PTR_ERR(p));
  78. return p;
  79. }
  80. msm_obj->pages = p;
  81. msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  82. if (IS_ERR(msm_obj->sgt)) {
  83. void *ptr = ERR_CAST(msm_obj->sgt);
  84. dev_err(dev->dev, "failed to allocate sgt\n");
  85. msm_obj->sgt = NULL;
  86. return ptr;
  87. }
  88. /* For non-cached buffers, ensure the new pages are clean
  89. * because display controller, GPU, etc. are not coherent:
  90. */
  91. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  92. dma_map_sg(dev->dev, msm_obj->sgt->sgl,
  93. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  94. }
  95. return msm_obj->pages;
  96. }
  97. static void put_pages_vram(struct drm_gem_object *obj)
  98. {
  99. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  100. struct msm_drm_private *priv = obj->dev->dev_private;
  101. spin_lock(&priv->vram.lock);
  102. drm_mm_remove_node(msm_obj->vram_node);
  103. spin_unlock(&priv->vram.lock);
  104. kvfree(msm_obj->pages);
  105. }
  106. static void put_pages(struct drm_gem_object *obj)
  107. {
  108. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  109. if (msm_obj->pages) {
  110. /* For non-cached buffers, ensure the new pages are clean
  111. * because display controller, GPU, etc. are not coherent:
  112. */
  113. if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
  114. dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
  115. msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  116. if (msm_obj->sgt)
  117. sg_free_table(msm_obj->sgt);
  118. kfree(msm_obj->sgt);
  119. if (use_pages(obj))
  120. drm_gem_put_pages(obj, msm_obj->pages, true, false);
  121. else
  122. put_pages_vram(obj);
  123. msm_obj->pages = NULL;
  124. }
  125. }
  126. struct page **msm_gem_get_pages(struct drm_gem_object *obj)
  127. {
  128. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  129. struct page **p;
  130. mutex_lock(&msm_obj->lock);
  131. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  132. mutex_unlock(&msm_obj->lock);
  133. return ERR_PTR(-EBUSY);
  134. }
  135. p = get_pages(obj);
  136. mutex_unlock(&msm_obj->lock);
  137. return p;
  138. }
  139. void msm_gem_put_pages(struct drm_gem_object *obj)
  140. {
  141. /* when we start tracking the pin count, then do something here */
  142. }
  143. int msm_gem_mmap_obj(struct drm_gem_object *obj,
  144. struct vm_area_struct *vma)
  145. {
  146. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  147. vma->vm_flags &= ~VM_PFNMAP;
  148. vma->vm_flags |= VM_MIXEDMAP;
  149. if (msm_obj->flags & MSM_BO_WC) {
  150. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  151. } else if (msm_obj->flags & MSM_BO_UNCACHED) {
  152. vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  153. } else {
  154. /*
  155. * Shunt off cached objs to shmem file so they have their own
  156. * address_space (so unmap_mapping_range does what we want,
  157. * in particular in the case of mmap'd dmabufs)
  158. */
  159. fput(vma->vm_file);
  160. get_file(obj->filp);
  161. vma->vm_pgoff = 0;
  162. vma->vm_file = obj->filp;
  163. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  164. }
  165. return 0;
  166. }
  167. int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  168. {
  169. int ret;
  170. ret = drm_gem_mmap(filp, vma);
  171. if (ret) {
  172. DBG("mmap failed: %d", ret);
  173. return ret;
  174. }
  175. return msm_gem_mmap_obj(vma->vm_private_data, vma);
  176. }
  177. int msm_gem_fault(struct vm_fault *vmf)
  178. {
  179. struct vm_area_struct *vma = vmf->vma;
  180. struct drm_gem_object *obj = vma->vm_private_data;
  181. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  182. struct page **pages;
  183. unsigned long pfn;
  184. pgoff_t pgoff;
  185. int ret;
  186. /*
  187. * vm_ops.open/drm_gem_mmap_obj and close get and put
  188. * a reference on obj. So, we dont need to hold one here.
  189. */
  190. ret = mutex_lock_interruptible(&msm_obj->lock);
  191. if (ret)
  192. goto out;
  193. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  194. mutex_unlock(&msm_obj->lock);
  195. return VM_FAULT_SIGBUS;
  196. }
  197. /* make sure we have pages attached now */
  198. pages = get_pages(obj);
  199. if (IS_ERR(pages)) {
  200. ret = PTR_ERR(pages);
  201. goto out_unlock;
  202. }
  203. /* We don't use vmf->pgoff since that has the fake offset: */
  204. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  205. pfn = page_to_pfn(pages[pgoff]);
  206. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  207. pfn, pfn << PAGE_SHIFT);
  208. ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
  209. out_unlock:
  210. mutex_unlock(&msm_obj->lock);
  211. out:
  212. switch (ret) {
  213. case -EAGAIN:
  214. case 0:
  215. case -ERESTARTSYS:
  216. case -EINTR:
  217. case -EBUSY:
  218. /*
  219. * EBUSY is ok: this just means that another thread
  220. * already did the job.
  221. */
  222. return VM_FAULT_NOPAGE;
  223. case -ENOMEM:
  224. return VM_FAULT_OOM;
  225. default:
  226. return VM_FAULT_SIGBUS;
  227. }
  228. }
  229. /** get mmap offset */
  230. static uint64_t mmap_offset(struct drm_gem_object *obj)
  231. {
  232. struct drm_device *dev = obj->dev;
  233. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  234. int ret;
  235. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  236. /* Make it mmapable */
  237. ret = drm_gem_create_mmap_offset(obj);
  238. if (ret) {
  239. dev_err(dev->dev, "could not allocate mmap offset\n");
  240. return 0;
  241. }
  242. return drm_vma_node_offset_addr(&obj->vma_node);
  243. }
  244. uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
  245. {
  246. uint64_t offset;
  247. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  248. mutex_lock(&msm_obj->lock);
  249. offset = mmap_offset(obj);
  250. mutex_unlock(&msm_obj->lock);
  251. return offset;
  252. }
  253. static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
  254. struct msm_gem_address_space *aspace)
  255. {
  256. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  257. struct msm_gem_vma *vma;
  258. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  259. vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  260. if (!vma)
  261. return ERR_PTR(-ENOMEM);
  262. vma->aspace = aspace;
  263. list_add_tail(&vma->list, &msm_obj->vmas);
  264. return vma;
  265. }
  266. static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
  267. struct msm_gem_address_space *aspace)
  268. {
  269. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  270. struct msm_gem_vma *vma;
  271. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  272. list_for_each_entry(vma, &msm_obj->vmas, list) {
  273. if (vma->aspace == aspace)
  274. return vma;
  275. }
  276. return NULL;
  277. }
  278. static void del_vma(struct msm_gem_vma *vma)
  279. {
  280. if (!vma)
  281. return;
  282. list_del(&vma->list);
  283. kfree(vma);
  284. }
  285. /* Called with msm_obj->lock locked */
  286. static void
  287. put_iova(struct drm_gem_object *obj)
  288. {
  289. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  290. struct msm_gem_vma *vma, *tmp;
  291. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  292. list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
  293. msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
  294. del_vma(vma);
  295. }
  296. }
  297. /* get iova, taking a reference. Should have a matching put */
  298. int msm_gem_get_iova(struct drm_gem_object *obj,
  299. struct msm_gem_address_space *aspace, uint64_t *iova)
  300. {
  301. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  302. struct msm_gem_vma *vma;
  303. int ret = 0;
  304. mutex_lock(&msm_obj->lock);
  305. if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
  306. mutex_unlock(&msm_obj->lock);
  307. return -EBUSY;
  308. }
  309. vma = lookup_vma(obj, aspace);
  310. if (!vma) {
  311. struct page **pages;
  312. vma = add_vma(obj, aspace);
  313. if (IS_ERR(vma)) {
  314. ret = PTR_ERR(vma);
  315. goto unlock;
  316. }
  317. pages = get_pages(obj);
  318. if (IS_ERR(pages)) {
  319. ret = PTR_ERR(pages);
  320. goto fail;
  321. }
  322. ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
  323. obj->size >> PAGE_SHIFT);
  324. if (ret)
  325. goto fail;
  326. }
  327. *iova = vma->iova;
  328. mutex_unlock(&msm_obj->lock);
  329. return 0;
  330. fail:
  331. del_vma(vma);
  332. unlock:
  333. mutex_unlock(&msm_obj->lock);
  334. return ret;
  335. }
  336. /* get iova without taking a reference, used in places where you have
  337. * already done a 'msm_gem_get_iova()'.
  338. */
  339. uint64_t msm_gem_iova(struct drm_gem_object *obj,
  340. struct msm_gem_address_space *aspace)
  341. {
  342. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  343. struct msm_gem_vma *vma;
  344. mutex_lock(&msm_obj->lock);
  345. vma = lookup_vma(obj, aspace);
  346. mutex_unlock(&msm_obj->lock);
  347. WARN_ON(!vma);
  348. return vma ? vma->iova : 0;
  349. }
  350. void msm_gem_put_iova(struct drm_gem_object *obj,
  351. struct msm_gem_address_space *aspace)
  352. {
  353. // XXX TODO ..
  354. // NOTE: probably don't need a _locked() version.. we wouldn't
  355. // normally unmap here, but instead just mark that it could be
  356. // unmapped (if the iova refcnt drops to zero), but then later
  357. // if another _get_iova_locked() fails we can start unmapping
  358. // things that are no longer needed..
  359. }
  360. int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  361. struct drm_mode_create_dumb *args)
  362. {
  363. args->pitch = align_pitch(args->width, args->bpp);
  364. args->size = PAGE_ALIGN(args->pitch * args->height);
  365. return msm_gem_new_handle(dev, file, args->size,
  366. MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
  367. }
  368. int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  369. uint32_t handle, uint64_t *offset)
  370. {
  371. struct drm_gem_object *obj;
  372. int ret = 0;
  373. /* GEM does all our handle to object mapping */
  374. obj = drm_gem_object_lookup(file, handle);
  375. if (obj == NULL) {
  376. ret = -ENOENT;
  377. goto fail;
  378. }
  379. *offset = msm_gem_mmap_offset(obj);
  380. drm_gem_object_unreference_unlocked(obj);
  381. fail:
  382. return ret;
  383. }
  384. static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
  385. {
  386. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  387. int ret = 0;
  388. mutex_lock(&msm_obj->lock);
  389. if (WARN_ON(msm_obj->madv > madv)) {
  390. dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
  391. msm_obj->madv, madv);
  392. mutex_unlock(&msm_obj->lock);
  393. return ERR_PTR(-EBUSY);
  394. }
  395. /* increment vmap_count *before* vmap() call, so shrinker can
  396. * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
  397. * This guarantees that we won't try to msm_gem_vunmap() this
  398. * same object from within the vmap() call (while we already
  399. * hold msm_obj->lock)
  400. */
  401. msm_obj->vmap_count++;
  402. if (!msm_obj->vaddr) {
  403. struct page **pages = get_pages(obj);
  404. if (IS_ERR(pages)) {
  405. ret = PTR_ERR(pages);
  406. goto fail;
  407. }
  408. msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
  409. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  410. if (msm_obj->vaddr == NULL) {
  411. ret = -ENOMEM;
  412. goto fail;
  413. }
  414. }
  415. mutex_unlock(&msm_obj->lock);
  416. return msm_obj->vaddr;
  417. fail:
  418. msm_obj->vmap_count--;
  419. mutex_unlock(&msm_obj->lock);
  420. return ERR_PTR(ret);
  421. }
  422. void *msm_gem_get_vaddr(struct drm_gem_object *obj)
  423. {
  424. return get_vaddr(obj, MSM_MADV_WILLNEED);
  425. }
  426. /*
  427. * Don't use this! It is for the very special case of dumping
  428. * submits from GPU hangs or faults, were the bo may already
  429. * be MSM_MADV_DONTNEED, but we know the buffer is still on the
  430. * active list.
  431. */
  432. void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
  433. {
  434. return get_vaddr(obj, __MSM_MADV_PURGED);
  435. }
  436. void msm_gem_put_vaddr(struct drm_gem_object *obj)
  437. {
  438. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  439. mutex_lock(&msm_obj->lock);
  440. WARN_ON(msm_obj->vmap_count < 1);
  441. msm_obj->vmap_count--;
  442. mutex_unlock(&msm_obj->lock);
  443. }
  444. /* Update madvise status, returns true if not purged, else
  445. * false or -errno.
  446. */
  447. int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
  448. {
  449. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  450. mutex_lock(&msm_obj->lock);
  451. WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  452. if (msm_obj->madv != __MSM_MADV_PURGED)
  453. msm_obj->madv = madv;
  454. madv = msm_obj->madv;
  455. mutex_unlock(&msm_obj->lock);
  456. return (madv != __MSM_MADV_PURGED);
  457. }
  458. void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  459. {
  460. struct drm_device *dev = obj->dev;
  461. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  462. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  463. WARN_ON(!is_purgeable(msm_obj));
  464. WARN_ON(obj->import_attach);
  465. mutex_lock_nested(&msm_obj->lock, subclass);
  466. put_iova(obj);
  467. msm_gem_vunmap_locked(obj);
  468. put_pages(obj);
  469. msm_obj->madv = __MSM_MADV_PURGED;
  470. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  471. drm_gem_free_mmap_offset(obj);
  472. /* Our goal here is to return as much of the memory as
  473. * is possible back to the system as we are called from OOM.
  474. * To do this we must instruct the shmfs to drop all of its
  475. * backing pages, *now*.
  476. */
  477. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  478. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
  479. 0, (loff_t)-1);
  480. mutex_unlock(&msm_obj->lock);
  481. }
  482. static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
  483. {
  484. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  485. WARN_ON(!mutex_is_locked(&msm_obj->lock));
  486. if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
  487. return;
  488. vunmap(msm_obj->vaddr);
  489. msm_obj->vaddr = NULL;
  490. }
  491. void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
  492. {
  493. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  494. mutex_lock_nested(&msm_obj->lock, subclass);
  495. msm_gem_vunmap_locked(obj);
  496. mutex_unlock(&msm_obj->lock);
  497. }
  498. /* must be called before _move_to_active().. */
  499. int msm_gem_sync_object(struct drm_gem_object *obj,
  500. struct msm_fence_context *fctx, bool exclusive)
  501. {
  502. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  503. struct reservation_object_list *fobj;
  504. struct dma_fence *fence;
  505. int i, ret;
  506. fobj = reservation_object_get_list(msm_obj->resv);
  507. if (!fobj || (fobj->shared_count == 0)) {
  508. fence = reservation_object_get_excl(msm_obj->resv);
  509. /* don't need to wait on our own fences, since ring is fifo */
  510. if (fence && (fence->context != fctx->context)) {
  511. ret = dma_fence_wait(fence, true);
  512. if (ret)
  513. return ret;
  514. }
  515. }
  516. if (!exclusive || !fobj)
  517. return 0;
  518. for (i = 0; i < fobj->shared_count; i++) {
  519. fence = rcu_dereference_protected(fobj->shared[i],
  520. reservation_object_held(msm_obj->resv));
  521. if (fence->context != fctx->context) {
  522. ret = dma_fence_wait(fence, true);
  523. if (ret)
  524. return ret;
  525. }
  526. }
  527. return 0;
  528. }
  529. void msm_gem_move_to_active(struct drm_gem_object *obj,
  530. struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
  531. {
  532. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  533. WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
  534. msm_obj->gpu = gpu;
  535. if (exclusive)
  536. reservation_object_add_excl_fence(msm_obj->resv, fence);
  537. else
  538. reservation_object_add_shared_fence(msm_obj->resv, fence);
  539. list_del_init(&msm_obj->mm_list);
  540. list_add_tail(&msm_obj->mm_list, &gpu->active_list);
  541. }
  542. void msm_gem_move_to_inactive(struct drm_gem_object *obj)
  543. {
  544. struct drm_device *dev = obj->dev;
  545. struct msm_drm_private *priv = dev->dev_private;
  546. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  547. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  548. msm_obj->gpu = NULL;
  549. list_del_init(&msm_obj->mm_list);
  550. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  551. }
  552. int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
  553. {
  554. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  555. bool write = !!(op & MSM_PREP_WRITE);
  556. unsigned long remain =
  557. op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
  558. long ret;
  559. ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
  560. true, remain);
  561. if (ret == 0)
  562. return remain == 0 ? -EBUSY : -ETIMEDOUT;
  563. else if (ret < 0)
  564. return ret;
  565. /* TODO cache maintenance */
  566. return 0;
  567. }
  568. int msm_gem_cpu_fini(struct drm_gem_object *obj)
  569. {
  570. /* TODO cache maintenance */
  571. return 0;
  572. }
  573. #ifdef CONFIG_DEBUG_FS
  574. static void describe_fence(struct dma_fence *fence, const char *type,
  575. struct seq_file *m)
  576. {
  577. if (!dma_fence_is_signaled(fence))
  578. seq_printf(m, "\t%9s: %s %s seq %u\n", type,
  579. fence->ops->get_driver_name(fence),
  580. fence->ops->get_timeline_name(fence),
  581. fence->seqno);
  582. }
  583. void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  584. {
  585. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  586. struct reservation_object *robj = msm_obj->resv;
  587. struct reservation_object_list *fobj;
  588. struct dma_fence *fence;
  589. struct msm_gem_vma *vma;
  590. uint64_t off = drm_vma_node_start(&obj->vma_node);
  591. const char *madv;
  592. mutex_lock(&msm_obj->lock);
  593. switch (msm_obj->madv) {
  594. case __MSM_MADV_PURGED:
  595. madv = " purged";
  596. break;
  597. case MSM_MADV_DONTNEED:
  598. madv = " purgeable";
  599. break;
  600. case MSM_MADV_WILLNEED:
  601. default:
  602. madv = "";
  603. break;
  604. }
  605. seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
  606. msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
  607. obj->name, kref_read(&obj->refcount),
  608. off, msm_obj->vaddr);
  609. /* FIXME: we need to print the address space here too */
  610. list_for_each_entry(vma, &msm_obj->vmas, list)
  611. seq_printf(m, " %08llx", vma->iova);
  612. seq_printf(m, " %zu%s\n", obj->size, madv);
  613. rcu_read_lock();
  614. fobj = rcu_dereference(robj->fence);
  615. if (fobj) {
  616. unsigned int i, shared_count = fobj->shared_count;
  617. for (i = 0; i < shared_count; i++) {
  618. fence = rcu_dereference(fobj->shared[i]);
  619. describe_fence(fence, "Shared", m);
  620. }
  621. }
  622. fence = rcu_dereference(robj->fence_excl);
  623. if (fence)
  624. describe_fence(fence, "Exclusive", m);
  625. rcu_read_unlock();
  626. mutex_unlock(&msm_obj->lock);
  627. }
  628. void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
  629. {
  630. struct msm_gem_object *msm_obj;
  631. int count = 0;
  632. size_t size = 0;
  633. list_for_each_entry(msm_obj, list, mm_list) {
  634. struct drm_gem_object *obj = &msm_obj->base;
  635. seq_printf(m, " ");
  636. msm_gem_describe(obj, m);
  637. count++;
  638. size += obj->size;
  639. }
  640. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  641. }
  642. #endif
  643. void msm_gem_free_object(struct drm_gem_object *obj)
  644. {
  645. struct drm_device *dev = obj->dev;
  646. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  647. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  648. /* object should not be on active list: */
  649. WARN_ON(is_active(msm_obj));
  650. list_del(&msm_obj->mm_list);
  651. mutex_lock(&msm_obj->lock);
  652. put_iova(obj);
  653. if (obj->import_attach) {
  654. if (msm_obj->vaddr)
  655. dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
  656. /* Don't drop the pages for imported dmabuf, as they are not
  657. * ours, just free the array we allocated:
  658. */
  659. if (msm_obj->pages)
  660. kvfree(msm_obj->pages);
  661. drm_prime_gem_destroy(obj, msm_obj->sgt);
  662. } else {
  663. msm_gem_vunmap_locked(obj);
  664. put_pages(obj);
  665. }
  666. if (msm_obj->resv == &msm_obj->_resv)
  667. reservation_object_fini(msm_obj->resv);
  668. drm_gem_object_release(obj);
  669. mutex_unlock(&msm_obj->lock);
  670. kfree(msm_obj);
  671. }
  672. /* convenience method to construct a GEM buffer object, and userspace handle */
  673. int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  674. uint32_t size, uint32_t flags, uint32_t *handle)
  675. {
  676. struct drm_gem_object *obj;
  677. int ret;
  678. obj = msm_gem_new(dev, size, flags);
  679. if (IS_ERR(obj))
  680. return PTR_ERR(obj);
  681. ret = drm_gem_handle_create(file, obj, handle);
  682. /* drop reference from allocate - handle holds it now */
  683. drm_gem_object_unreference_unlocked(obj);
  684. return ret;
  685. }
  686. static int msm_gem_new_impl(struct drm_device *dev,
  687. uint32_t size, uint32_t flags,
  688. struct reservation_object *resv,
  689. struct drm_gem_object **obj,
  690. bool struct_mutex_locked)
  691. {
  692. struct msm_drm_private *priv = dev->dev_private;
  693. struct msm_gem_object *msm_obj;
  694. switch (flags & MSM_BO_CACHE_MASK) {
  695. case MSM_BO_UNCACHED:
  696. case MSM_BO_CACHED:
  697. case MSM_BO_WC:
  698. break;
  699. default:
  700. dev_err(dev->dev, "invalid cache flag: %x\n",
  701. (flags & MSM_BO_CACHE_MASK));
  702. return -EINVAL;
  703. }
  704. msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
  705. if (!msm_obj)
  706. return -ENOMEM;
  707. mutex_init(&msm_obj->lock);
  708. msm_obj->flags = flags;
  709. msm_obj->madv = MSM_MADV_WILLNEED;
  710. if (resv) {
  711. msm_obj->resv = resv;
  712. } else {
  713. msm_obj->resv = &msm_obj->_resv;
  714. reservation_object_init(msm_obj->resv);
  715. }
  716. INIT_LIST_HEAD(&msm_obj->submit_entry);
  717. INIT_LIST_HEAD(&msm_obj->vmas);
  718. if (struct_mutex_locked) {
  719. WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  720. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  721. } else {
  722. mutex_lock(&dev->struct_mutex);
  723. list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
  724. mutex_unlock(&dev->struct_mutex);
  725. }
  726. *obj = &msm_obj->base;
  727. return 0;
  728. }
  729. static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
  730. uint32_t size, uint32_t flags, bool struct_mutex_locked)
  731. {
  732. struct msm_drm_private *priv = dev->dev_private;
  733. struct drm_gem_object *obj = NULL;
  734. bool use_vram = false;
  735. int ret;
  736. size = PAGE_ALIGN(size);
  737. if (!iommu_present(&platform_bus_type))
  738. use_vram = true;
  739. else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
  740. use_vram = true;
  741. if (WARN_ON(use_vram && !priv->vram.size))
  742. return ERR_PTR(-EINVAL);
  743. /* Disallow zero sized objects as they make the underlying
  744. * infrastructure grumpy
  745. */
  746. if (size == 0)
  747. return ERR_PTR(-EINVAL);
  748. ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
  749. if (ret)
  750. goto fail;
  751. if (use_vram) {
  752. struct msm_gem_vma *vma;
  753. struct page **pages;
  754. struct msm_gem_object *msm_obj = to_msm_bo(obj);
  755. mutex_lock(&msm_obj->lock);
  756. vma = add_vma(obj, NULL);
  757. mutex_unlock(&msm_obj->lock);
  758. if (IS_ERR(vma)) {
  759. ret = PTR_ERR(vma);
  760. goto fail;
  761. }
  762. to_msm_bo(obj)->vram_node = &vma->node;
  763. drm_gem_private_object_init(dev, obj, size);
  764. pages = get_pages(obj);
  765. if (IS_ERR(pages)) {
  766. ret = PTR_ERR(pages);
  767. goto fail;
  768. }
  769. vma->iova = physaddr(obj);
  770. } else {
  771. ret = drm_gem_object_init(dev, obj, size);
  772. if (ret)
  773. goto fail;
  774. }
  775. return obj;
  776. fail:
  777. drm_gem_object_unreference_unlocked(obj);
  778. return ERR_PTR(ret);
  779. }
  780. struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
  781. uint32_t size, uint32_t flags)
  782. {
  783. return _msm_gem_new(dev, size, flags, true);
  784. }
  785. struct drm_gem_object *msm_gem_new(struct drm_device *dev,
  786. uint32_t size, uint32_t flags)
  787. {
  788. return _msm_gem_new(dev, size, flags, false);
  789. }
  790. struct drm_gem_object *msm_gem_import(struct drm_device *dev,
  791. struct dma_buf *dmabuf, struct sg_table *sgt)
  792. {
  793. struct msm_gem_object *msm_obj;
  794. struct drm_gem_object *obj;
  795. uint32_t size;
  796. int ret, npages;
  797. /* if we don't have IOMMU, don't bother pretending we can import: */
  798. if (!iommu_present(&platform_bus_type)) {
  799. dev_err(dev->dev, "cannot import without IOMMU\n");
  800. return ERR_PTR(-EINVAL);
  801. }
  802. size = PAGE_ALIGN(dmabuf->size);
  803. ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
  804. if (ret)
  805. goto fail;
  806. drm_gem_private_object_init(dev, obj, size);
  807. npages = size / PAGE_SIZE;
  808. msm_obj = to_msm_bo(obj);
  809. mutex_lock(&msm_obj->lock);
  810. msm_obj->sgt = sgt;
  811. msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  812. if (!msm_obj->pages) {
  813. mutex_unlock(&msm_obj->lock);
  814. ret = -ENOMEM;
  815. goto fail;
  816. }
  817. ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
  818. if (ret) {
  819. mutex_unlock(&msm_obj->lock);
  820. goto fail;
  821. }
  822. mutex_unlock(&msm_obj->lock);
  823. return obj;
  824. fail:
  825. drm_gem_object_unreference_unlocked(obj);
  826. return ERR_PTR(ret);
  827. }
  828. static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  829. uint32_t flags, struct msm_gem_address_space *aspace,
  830. struct drm_gem_object **bo, uint64_t *iova, bool locked)
  831. {
  832. void *vaddr;
  833. struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
  834. int ret;
  835. if (IS_ERR(obj))
  836. return ERR_CAST(obj);
  837. if (iova) {
  838. ret = msm_gem_get_iova(obj, aspace, iova);
  839. if (ret) {
  840. drm_gem_object_unreference(obj);
  841. return ERR_PTR(ret);
  842. }
  843. }
  844. vaddr = msm_gem_get_vaddr(obj);
  845. if (IS_ERR(vaddr)) {
  846. msm_gem_put_iova(obj, aspace);
  847. drm_gem_object_unreference(obj);
  848. return ERR_CAST(vaddr);
  849. }
  850. if (bo)
  851. *bo = obj;
  852. return vaddr;
  853. }
  854. void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
  855. uint32_t flags, struct msm_gem_address_space *aspace,
  856. struct drm_gem_object **bo, uint64_t *iova)
  857. {
  858. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
  859. }
  860. void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
  861. uint32_t flags, struct msm_gem_address_space *aspace,
  862. struct drm_gem_object **bo, uint64_t *iova)
  863. {
  864. return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
  865. }