etnaviv_gem.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/spinlock.h>
  17. #include <linux/shmem_fs.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/sched/task.h>
  20. #include "etnaviv_drv.h"
  21. #include "etnaviv_gem.h"
  22. #include "etnaviv_gpu.h"
  23. #include "etnaviv_mmu.h"
  24. static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  25. {
  26. struct drm_device *dev = etnaviv_obj->base.dev;
  27. struct sg_table *sgt = etnaviv_obj->sgt;
  28. /*
  29. * For non-cached buffers, ensure the new pages are clean
  30. * because display controller, GPU, etc. are not coherent.
  31. */
  32. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  33. dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  34. }
  35. static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  36. {
  37. struct drm_device *dev = etnaviv_obj->base.dev;
  38. struct sg_table *sgt = etnaviv_obj->sgt;
  39. /*
  40. * For non-cached buffers, ensure the new pages are clean
  41. * because display controller, GPU, etc. are not coherent:
  42. *
  43. * WARNING: The DMA API does not support concurrent CPU
  44. * and device access to the memory area. With BIDIRECTIONAL,
  45. * we will clean the cache lines which overlap the region,
  46. * and invalidate all cache lines (partially) contained in
  47. * the region.
  48. *
  49. * If you have dirty data in the overlapping cache lines,
  50. * that will corrupt the GPU-written data. If you have
  51. * written into the remainder of the region, this can
  52. * discard those writes.
  53. */
  54. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  55. dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  56. }
  57. /* called with etnaviv_obj->lock held */
  58. static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  59. {
  60. struct drm_device *dev = etnaviv_obj->base.dev;
  61. struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  62. if (IS_ERR(p)) {
  63. dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  64. return PTR_ERR(p);
  65. }
  66. etnaviv_obj->pages = p;
  67. return 0;
  68. }
  69. static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  70. {
  71. if (etnaviv_obj->sgt) {
  72. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  73. sg_free_table(etnaviv_obj->sgt);
  74. kfree(etnaviv_obj->sgt);
  75. etnaviv_obj->sgt = NULL;
  76. }
  77. if (etnaviv_obj->pages) {
  78. drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  79. true, false);
  80. etnaviv_obj->pages = NULL;
  81. }
  82. }
  83. struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  84. {
  85. int ret;
  86. lockdep_assert_held(&etnaviv_obj->lock);
  87. if (!etnaviv_obj->pages) {
  88. ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  89. if (ret < 0)
  90. return ERR_PTR(ret);
  91. }
  92. if (!etnaviv_obj->sgt) {
  93. struct drm_device *dev = etnaviv_obj->base.dev;
  94. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  95. struct sg_table *sgt;
  96. sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
  97. if (IS_ERR(sgt)) {
  98. dev_err(dev->dev, "failed to allocate sgt: %ld\n",
  99. PTR_ERR(sgt));
  100. return ERR_CAST(sgt);
  101. }
  102. etnaviv_obj->sgt = sgt;
  103. etnaviv_gem_scatter_map(etnaviv_obj);
  104. }
  105. return etnaviv_obj->pages;
  106. }
  107. void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
  108. {
  109. lockdep_assert_held(&etnaviv_obj->lock);
  110. /* when we start tracking the pin count, then do something here */
  111. }
  112. static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  113. struct vm_area_struct *vma)
  114. {
  115. pgprot_t vm_page_prot;
  116. vma->vm_flags &= ~VM_PFNMAP;
  117. vma->vm_flags |= VM_MIXEDMAP;
  118. vm_page_prot = vm_get_page_prot(vma->vm_flags);
  119. if (etnaviv_obj->flags & ETNA_BO_WC) {
  120. vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
  121. } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
  122. vma->vm_page_prot = pgprot_noncached(vm_page_prot);
  123. } else {
  124. /*
  125. * Shunt off cached objs to shmem file so they have their own
  126. * address_space (so unmap_mapping_range does what we want,
  127. * in particular in the case of mmap'd dmabufs)
  128. */
  129. fput(vma->vm_file);
  130. get_file(etnaviv_obj->base.filp);
  131. vma->vm_pgoff = 0;
  132. vma->vm_file = etnaviv_obj->base.filp;
  133. vma->vm_page_prot = vm_page_prot;
  134. }
  135. return 0;
  136. }
  137. int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  138. {
  139. struct etnaviv_gem_object *obj;
  140. int ret;
  141. ret = drm_gem_mmap(filp, vma);
  142. if (ret) {
  143. DBG("mmap failed: %d", ret);
  144. return ret;
  145. }
  146. obj = to_etnaviv_bo(vma->vm_private_data);
  147. return obj->ops->mmap(obj, vma);
  148. }
  149. int etnaviv_gem_fault(struct vm_fault *vmf)
  150. {
  151. struct vm_area_struct *vma = vmf->vma;
  152. struct drm_gem_object *obj = vma->vm_private_data;
  153. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  154. struct page **pages, *page;
  155. pgoff_t pgoff;
  156. int ret;
  157. /*
  158. * Make sure we don't parallel update on a fault, nor move or remove
  159. * something from beneath our feet. Note that vm_insert_page() is
  160. * specifically coded to take care of this, so we don't have to.
  161. */
  162. ret = mutex_lock_interruptible(&etnaviv_obj->lock);
  163. if (ret)
  164. goto out;
  165. /* make sure we have pages attached now */
  166. pages = etnaviv_gem_get_pages(etnaviv_obj);
  167. mutex_unlock(&etnaviv_obj->lock);
  168. if (IS_ERR(pages)) {
  169. ret = PTR_ERR(pages);
  170. goto out;
  171. }
  172. /* We don't use vmf->pgoff since that has the fake offset: */
  173. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  174. page = pages[pgoff];
  175. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  176. page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
  177. ret = vm_insert_page(vma, vmf->address, page);
  178. out:
  179. switch (ret) {
  180. case -EAGAIN:
  181. case 0:
  182. case -ERESTARTSYS:
  183. case -EINTR:
  184. case -EBUSY:
  185. /*
  186. * EBUSY is ok: this just means that another thread
  187. * already did the job.
  188. */
  189. return VM_FAULT_NOPAGE;
  190. case -ENOMEM:
  191. return VM_FAULT_OOM;
  192. default:
  193. return VM_FAULT_SIGBUS;
  194. }
  195. }
  196. int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
  197. {
  198. int ret;
  199. /* Make it mmapable */
  200. ret = drm_gem_create_mmap_offset(obj);
  201. if (ret)
  202. dev_err(obj->dev->dev, "could not allocate mmap offset\n");
  203. else
  204. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  205. return ret;
  206. }
  207. static struct etnaviv_vram_mapping *
  208. etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
  209. struct etnaviv_iommu *mmu)
  210. {
  211. struct etnaviv_vram_mapping *mapping;
  212. list_for_each_entry(mapping, &obj->vram_list, obj_node) {
  213. if (mapping->mmu == mmu)
  214. return mapping;
  215. }
  216. return NULL;
  217. }
  218. void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
  219. {
  220. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  221. drm_gem_object_get(&etnaviv_obj->base);
  222. mutex_lock(&etnaviv_obj->lock);
  223. WARN_ON(mapping->use == 0);
  224. mapping->use += 1;
  225. mutex_unlock(&etnaviv_obj->lock);
  226. }
  227. void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
  228. {
  229. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  230. mutex_lock(&etnaviv_obj->lock);
  231. WARN_ON(mapping->use == 0);
  232. mapping->use -= 1;
  233. mutex_unlock(&etnaviv_obj->lock);
  234. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  235. }
  236. struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
  237. struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
  238. {
  239. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  240. struct etnaviv_vram_mapping *mapping;
  241. struct page **pages;
  242. int ret = 0;
  243. mutex_lock(&etnaviv_obj->lock);
  244. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
  245. if (mapping) {
  246. /*
  247. * Holding the object lock prevents the use count changing
  248. * beneath us. If the use count is zero, the MMU might be
  249. * reaping this object, so take the lock and re-check that
  250. * the MMU owns this mapping to close this race.
  251. */
  252. if (mapping->use == 0) {
  253. mutex_lock(&gpu->mmu->lock);
  254. if (mapping->mmu == gpu->mmu)
  255. mapping->use += 1;
  256. else
  257. mapping = NULL;
  258. mutex_unlock(&gpu->mmu->lock);
  259. if (mapping)
  260. goto out;
  261. } else {
  262. mapping->use += 1;
  263. goto out;
  264. }
  265. }
  266. pages = etnaviv_gem_get_pages(etnaviv_obj);
  267. if (IS_ERR(pages)) {
  268. ret = PTR_ERR(pages);
  269. goto out;
  270. }
  271. /*
  272. * See if we have a reaped vram mapping we can re-use before
  273. * allocating a fresh mapping.
  274. */
  275. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
  276. if (!mapping) {
  277. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  278. if (!mapping) {
  279. ret = -ENOMEM;
  280. goto out;
  281. }
  282. INIT_LIST_HEAD(&mapping->scan_node);
  283. mapping->object = etnaviv_obj;
  284. } else {
  285. list_del(&mapping->obj_node);
  286. }
  287. mapping->mmu = gpu->mmu;
  288. mapping->use = 1;
  289. ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
  290. mapping);
  291. if (ret < 0)
  292. kfree(mapping);
  293. else
  294. list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
  295. out:
  296. mutex_unlock(&etnaviv_obj->lock);
  297. if (ret)
  298. return ERR_PTR(ret);
  299. /* Take a reference on the object */
  300. drm_gem_object_get(obj);
  301. return mapping;
  302. }
  303. void *etnaviv_gem_vmap(struct drm_gem_object *obj)
  304. {
  305. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  306. if (etnaviv_obj->vaddr)
  307. return etnaviv_obj->vaddr;
  308. mutex_lock(&etnaviv_obj->lock);
  309. /*
  310. * Need to check again, as we might have raced with another thread
  311. * while waiting for the mutex.
  312. */
  313. if (!etnaviv_obj->vaddr)
  314. etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
  315. mutex_unlock(&etnaviv_obj->lock);
  316. return etnaviv_obj->vaddr;
  317. }
  318. static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
  319. {
  320. struct page **pages;
  321. lockdep_assert_held(&obj->lock);
  322. pages = etnaviv_gem_get_pages(obj);
  323. if (IS_ERR(pages))
  324. return NULL;
  325. return vmap(pages, obj->base.size >> PAGE_SHIFT,
  326. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  327. }
  328. static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
  329. {
  330. if (op & ETNA_PREP_READ)
  331. return DMA_FROM_DEVICE;
  332. else if (op & ETNA_PREP_WRITE)
  333. return DMA_TO_DEVICE;
  334. else
  335. return DMA_BIDIRECTIONAL;
  336. }
  337. int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
  338. struct timespec *timeout)
  339. {
  340. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  341. struct drm_device *dev = obj->dev;
  342. bool write = !!(op & ETNA_PREP_WRITE);
  343. int ret;
  344. if (!etnaviv_obj->sgt) {
  345. void *ret;
  346. mutex_lock(&etnaviv_obj->lock);
  347. ret = etnaviv_gem_get_pages(etnaviv_obj);
  348. mutex_unlock(&etnaviv_obj->lock);
  349. if (IS_ERR(ret))
  350. return PTR_ERR(ret);
  351. }
  352. if (op & ETNA_PREP_NOSYNC) {
  353. if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
  354. write))
  355. return -EBUSY;
  356. } else {
  357. unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
  358. ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
  359. write, true, remain);
  360. if (ret <= 0)
  361. return ret == 0 ? -ETIMEDOUT : ret;
  362. }
  363. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  364. dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
  365. etnaviv_obj->sgt->nents,
  366. etnaviv_op_to_dma_dir(op));
  367. etnaviv_obj->last_cpu_prep_op = op;
  368. }
  369. return 0;
  370. }
  371. int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
  372. {
  373. struct drm_device *dev = obj->dev;
  374. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  375. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  376. /* fini without a prep is almost certainly a userspace error */
  377. WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
  378. dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
  379. etnaviv_obj->sgt->nents,
  380. etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
  381. etnaviv_obj->last_cpu_prep_op = 0;
  382. }
  383. return 0;
  384. }
  385. int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
  386. struct timespec *timeout)
  387. {
  388. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  389. return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
  390. }
  391. #ifdef CONFIG_DEBUG_FS
  392. static void etnaviv_gem_describe_fence(struct dma_fence *fence,
  393. const char *type, struct seq_file *m)
  394. {
  395. if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  396. seq_printf(m, "\t%9s: %s %s seq %u\n",
  397. type,
  398. fence->ops->get_driver_name(fence),
  399. fence->ops->get_timeline_name(fence),
  400. fence->seqno);
  401. }
  402. static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  403. {
  404. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  405. struct reservation_object *robj = etnaviv_obj->resv;
  406. struct reservation_object_list *fobj;
  407. struct dma_fence *fence;
  408. unsigned long off = drm_vma_node_start(&obj->vma_node);
  409. seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
  410. etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
  411. obj->name, kref_read(&obj->refcount),
  412. off, etnaviv_obj->vaddr, obj->size);
  413. rcu_read_lock();
  414. fobj = rcu_dereference(robj->fence);
  415. if (fobj) {
  416. unsigned int i, shared_count = fobj->shared_count;
  417. for (i = 0; i < shared_count; i++) {
  418. fence = rcu_dereference(fobj->shared[i]);
  419. etnaviv_gem_describe_fence(fence, "Shared", m);
  420. }
  421. }
  422. fence = rcu_dereference(robj->fence_excl);
  423. if (fence)
  424. etnaviv_gem_describe_fence(fence, "Exclusive", m);
  425. rcu_read_unlock();
  426. }
  427. void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
  428. struct seq_file *m)
  429. {
  430. struct etnaviv_gem_object *etnaviv_obj;
  431. int count = 0;
  432. size_t size = 0;
  433. mutex_lock(&priv->gem_lock);
  434. list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
  435. struct drm_gem_object *obj = &etnaviv_obj->base;
  436. seq_puts(m, " ");
  437. etnaviv_gem_describe(obj, m);
  438. count++;
  439. size += obj->size;
  440. }
  441. mutex_unlock(&priv->gem_lock);
  442. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  443. }
  444. #endif
  445. static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
  446. {
  447. vunmap(etnaviv_obj->vaddr);
  448. put_pages(etnaviv_obj);
  449. }
  450. static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
  451. .get_pages = etnaviv_gem_shmem_get_pages,
  452. .release = etnaviv_gem_shmem_release,
  453. .vmap = etnaviv_gem_vmap_impl,
  454. .mmap = etnaviv_gem_mmap_obj,
  455. };
  456. void etnaviv_gem_free_object(struct drm_gem_object *obj)
  457. {
  458. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  459. struct etnaviv_drm_private *priv = obj->dev->dev_private;
  460. struct etnaviv_vram_mapping *mapping, *tmp;
  461. /* object should not be active */
  462. WARN_ON(is_active(etnaviv_obj));
  463. mutex_lock(&priv->gem_lock);
  464. list_del(&etnaviv_obj->gem_node);
  465. mutex_unlock(&priv->gem_lock);
  466. list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
  467. obj_node) {
  468. struct etnaviv_iommu *mmu = mapping->mmu;
  469. WARN_ON(mapping->use);
  470. if (mmu)
  471. etnaviv_iommu_unmap_gem(mmu, mapping);
  472. list_del(&mapping->obj_node);
  473. kfree(mapping);
  474. }
  475. drm_gem_free_mmap_offset(obj);
  476. etnaviv_obj->ops->release(etnaviv_obj);
  477. if (etnaviv_obj->resv == &etnaviv_obj->_resv)
  478. reservation_object_fini(&etnaviv_obj->_resv);
  479. drm_gem_object_release(obj);
  480. kfree(etnaviv_obj);
  481. }
  482. int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
  483. {
  484. struct etnaviv_drm_private *priv = dev->dev_private;
  485. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  486. mutex_lock(&priv->gem_lock);
  487. list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
  488. mutex_unlock(&priv->gem_lock);
  489. return 0;
  490. }
  491. static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
  492. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  493. struct drm_gem_object **obj)
  494. {
  495. struct etnaviv_gem_object *etnaviv_obj;
  496. unsigned sz = sizeof(*etnaviv_obj);
  497. bool valid = true;
  498. /* validate flags */
  499. switch (flags & ETNA_BO_CACHE_MASK) {
  500. case ETNA_BO_UNCACHED:
  501. case ETNA_BO_CACHED:
  502. case ETNA_BO_WC:
  503. break;
  504. default:
  505. valid = false;
  506. }
  507. if (!valid) {
  508. dev_err(dev->dev, "invalid cache flag: %x\n",
  509. (flags & ETNA_BO_CACHE_MASK));
  510. return -EINVAL;
  511. }
  512. etnaviv_obj = kzalloc(sz, GFP_KERNEL);
  513. if (!etnaviv_obj)
  514. return -ENOMEM;
  515. etnaviv_obj->flags = flags;
  516. etnaviv_obj->ops = ops;
  517. if (robj) {
  518. etnaviv_obj->resv = robj;
  519. } else {
  520. etnaviv_obj->resv = &etnaviv_obj->_resv;
  521. reservation_object_init(&etnaviv_obj->_resv);
  522. }
  523. mutex_init(&etnaviv_obj->lock);
  524. INIT_LIST_HEAD(&etnaviv_obj->vram_list);
  525. *obj = &etnaviv_obj->base;
  526. return 0;
  527. }
  528. static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
  529. u32 size, u32 flags)
  530. {
  531. struct drm_gem_object *obj = NULL;
  532. int ret;
  533. size = PAGE_ALIGN(size);
  534. ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
  535. &etnaviv_gem_shmem_ops, &obj);
  536. if (ret)
  537. goto fail;
  538. ret = drm_gem_object_init(dev, obj, size);
  539. if (ret == 0) {
  540. struct address_space *mapping;
  541. /*
  542. * Our buffers are kept pinned, so allocating them
  543. * from the MOVABLE zone is a really bad idea, and
  544. * conflicts with CMA. See coments above new_inode()
  545. * why this is required _and_ expected if you're
  546. * going to pin these pages.
  547. */
  548. mapping = obj->filp->f_mapping;
  549. mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
  550. __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
  551. }
  552. if (ret)
  553. goto fail;
  554. return obj;
  555. fail:
  556. drm_gem_object_put_unlocked(obj);
  557. return ERR_PTR(ret);
  558. }
  559. /* convenience method to construct a GEM buffer object, and userspace handle */
  560. int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  561. u32 size, u32 flags, u32 *handle)
  562. {
  563. struct drm_gem_object *obj;
  564. int ret;
  565. obj = __etnaviv_gem_new(dev, size, flags);
  566. if (IS_ERR(obj))
  567. return PTR_ERR(obj);
  568. ret = etnaviv_gem_obj_add(dev, obj);
  569. if (ret < 0) {
  570. drm_gem_object_put_unlocked(obj);
  571. return ret;
  572. }
  573. ret = drm_gem_handle_create(file, obj, handle);
  574. /* drop reference from allocate - handle holds it now */
  575. drm_gem_object_put_unlocked(obj);
  576. return ret;
  577. }
  578. struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
  579. u32 size, u32 flags)
  580. {
  581. struct drm_gem_object *obj;
  582. int ret;
  583. obj = __etnaviv_gem_new(dev, size, flags);
  584. if (IS_ERR(obj))
  585. return obj;
  586. ret = etnaviv_gem_obj_add(dev, obj);
  587. if (ret < 0) {
  588. drm_gem_object_put_unlocked(obj);
  589. return ERR_PTR(ret);
  590. }
  591. return obj;
  592. }
  593. int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
  594. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  595. struct etnaviv_gem_object **res)
  596. {
  597. struct drm_gem_object *obj;
  598. int ret;
  599. ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
  600. if (ret)
  601. return ret;
  602. drm_gem_private_object_init(dev, obj, size);
  603. *res = to_etnaviv_bo(obj);
  604. return 0;
  605. }
  606. struct get_pages_work {
  607. struct work_struct work;
  608. struct mm_struct *mm;
  609. struct task_struct *task;
  610. struct etnaviv_gem_object *etnaviv_obj;
  611. };
  612. static struct page **etnaviv_gem_userptr_do_get_pages(
  613. struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
  614. {
  615. int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  616. struct page **pvec;
  617. uintptr_t ptr;
  618. unsigned int flags = 0;
  619. pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  620. if (!pvec)
  621. return ERR_PTR(-ENOMEM);
  622. if (!etnaviv_obj->userptr.ro)
  623. flags |= FOLL_WRITE;
  624. pinned = 0;
  625. ptr = etnaviv_obj->userptr.ptr;
  626. down_read(&mm->mmap_sem);
  627. while (pinned < npages) {
  628. ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
  629. flags, pvec + pinned, NULL, NULL);
  630. if (ret < 0)
  631. break;
  632. ptr += ret * PAGE_SIZE;
  633. pinned += ret;
  634. }
  635. up_read(&mm->mmap_sem);
  636. if (ret < 0) {
  637. release_pages(pvec, pinned);
  638. kvfree(pvec);
  639. return ERR_PTR(ret);
  640. }
  641. return pvec;
  642. }
  643. static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
  644. {
  645. struct get_pages_work *work = container_of(_work, typeof(*work), work);
  646. struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
  647. struct page **pvec;
  648. pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
  649. mutex_lock(&etnaviv_obj->lock);
  650. if (IS_ERR(pvec)) {
  651. etnaviv_obj->userptr.work = ERR_CAST(pvec);
  652. } else {
  653. etnaviv_obj->userptr.work = NULL;
  654. etnaviv_obj->pages = pvec;
  655. }
  656. mutex_unlock(&etnaviv_obj->lock);
  657. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  658. mmput(work->mm);
  659. put_task_struct(work->task);
  660. kfree(work);
  661. }
  662. static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  663. {
  664. struct page **pvec = NULL;
  665. struct get_pages_work *work;
  666. struct mm_struct *mm;
  667. int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  668. if (etnaviv_obj->userptr.work) {
  669. if (IS_ERR(etnaviv_obj->userptr.work)) {
  670. ret = PTR_ERR(etnaviv_obj->userptr.work);
  671. etnaviv_obj->userptr.work = NULL;
  672. } else {
  673. ret = -EAGAIN;
  674. }
  675. return ret;
  676. }
  677. mm = get_task_mm(etnaviv_obj->userptr.task);
  678. pinned = 0;
  679. if (mm == current->mm) {
  680. pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  681. if (!pvec) {
  682. mmput(mm);
  683. return -ENOMEM;
  684. }
  685. pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
  686. !etnaviv_obj->userptr.ro, pvec);
  687. if (pinned < 0) {
  688. kvfree(pvec);
  689. mmput(mm);
  690. return pinned;
  691. }
  692. if (pinned == npages) {
  693. etnaviv_obj->pages = pvec;
  694. mmput(mm);
  695. return 0;
  696. }
  697. }
  698. release_pages(pvec, pinned);
  699. kvfree(pvec);
  700. work = kmalloc(sizeof(*work), GFP_KERNEL);
  701. if (!work) {
  702. mmput(mm);
  703. return -ENOMEM;
  704. }
  705. get_task_struct(current);
  706. drm_gem_object_get(&etnaviv_obj->base);
  707. work->mm = mm;
  708. work->task = current;
  709. work->etnaviv_obj = etnaviv_obj;
  710. etnaviv_obj->userptr.work = &work->work;
  711. INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
  712. etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
  713. return -EAGAIN;
  714. }
  715. static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
  716. {
  717. if (etnaviv_obj->sgt) {
  718. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  719. sg_free_table(etnaviv_obj->sgt);
  720. kfree(etnaviv_obj->sgt);
  721. }
  722. if (etnaviv_obj->pages) {
  723. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  724. release_pages(etnaviv_obj->pages, npages);
  725. kvfree(etnaviv_obj->pages);
  726. }
  727. put_task_struct(etnaviv_obj->userptr.task);
  728. }
  729. static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  730. struct vm_area_struct *vma)
  731. {
  732. return -EINVAL;
  733. }
  734. static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
  735. .get_pages = etnaviv_gem_userptr_get_pages,
  736. .release = etnaviv_gem_userptr_release,
  737. .vmap = etnaviv_gem_vmap_impl,
  738. .mmap = etnaviv_gem_userptr_mmap_obj,
  739. };
  740. int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
  741. uintptr_t ptr, u32 size, u32 flags, u32 *handle)
  742. {
  743. struct etnaviv_gem_object *etnaviv_obj;
  744. int ret;
  745. ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
  746. &etnaviv_gem_userptr_ops, &etnaviv_obj);
  747. if (ret)
  748. return ret;
  749. etnaviv_obj->userptr.ptr = ptr;
  750. etnaviv_obj->userptr.task = current;
  751. etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
  752. get_task_struct(current);
  753. ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
  754. if (ret)
  755. goto unreference;
  756. ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
  757. unreference:
  758. /* drop reference from allocate - handle holds it now */
  759. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  760. return ret;
  761. }