etnaviv_gem.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/spinlock.h>
  17. #include <linux/shmem_fs.h>
  18. #include <linux/sched/mm.h>
  19. #include <linux/sched/task.h>
  20. #include "etnaviv_drv.h"
  21. #include "etnaviv_gem.h"
  22. #include "etnaviv_gpu.h"
  23. #include "etnaviv_mmu.h"
  24. static struct lock_class_key etnaviv_shm_lock_class;
  25. static struct lock_class_key etnaviv_userptr_lock_class;
  26. static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  27. {
  28. struct drm_device *dev = etnaviv_obj->base.dev;
  29. struct sg_table *sgt = etnaviv_obj->sgt;
  30. /*
  31. * For non-cached buffers, ensure the new pages are clean
  32. * because display controller, GPU, etc. are not coherent.
  33. */
  34. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  35. dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  36. }
  37. static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  38. {
  39. struct drm_device *dev = etnaviv_obj->base.dev;
  40. struct sg_table *sgt = etnaviv_obj->sgt;
  41. /*
  42. * For non-cached buffers, ensure the new pages are clean
  43. * because display controller, GPU, etc. are not coherent:
  44. *
  45. * WARNING: The DMA API does not support concurrent CPU
  46. * and device access to the memory area. With BIDIRECTIONAL,
  47. * we will clean the cache lines which overlap the region,
  48. * and invalidate all cache lines (partially) contained in
  49. * the region.
  50. *
  51. * If you have dirty data in the overlapping cache lines,
  52. * that will corrupt the GPU-written data. If you have
  53. * written into the remainder of the region, this can
  54. * discard those writes.
  55. */
  56. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  57. dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  58. }
  59. /* called with etnaviv_obj->lock held */
  60. static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  61. {
  62. struct drm_device *dev = etnaviv_obj->base.dev;
  63. struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  64. if (IS_ERR(p)) {
  65. dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  66. return PTR_ERR(p);
  67. }
  68. etnaviv_obj->pages = p;
  69. return 0;
  70. }
  71. static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  72. {
  73. if (etnaviv_obj->sgt) {
  74. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  75. sg_free_table(etnaviv_obj->sgt);
  76. kfree(etnaviv_obj->sgt);
  77. etnaviv_obj->sgt = NULL;
  78. }
  79. if (etnaviv_obj->pages) {
  80. drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  81. true, false);
  82. etnaviv_obj->pages = NULL;
  83. }
  84. }
  85. struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  86. {
  87. int ret;
  88. lockdep_assert_held(&etnaviv_obj->lock);
  89. if (!etnaviv_obj->pages) {
  90. ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  91. if (ret < 0)
  92. return ERR_PTR(ret);
  93. }
  94. if (!etnaviv_obj->sgt) {
  95. struct drm_device *dev = etnaviv_obj->base.dev;
  96. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  97. struct sg_table *sgt;
  98. sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
  99. if (IS_ERR(sgt)) {
  100. dev_err(dev->dev, "failed to allocate sgt: %ld\n",
  101. PTR_ERR(sgt));
  102. return ERR_CAST(sgt);
  103. }
  104. etnaviv_obj->sgt = sgt;
  105. etnaviv_gem_scatter_map(etnaviv_obj);
  106. }
  107. return etnaviv_obj->pages;
  108. }
  109. void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
  110. {
  111. lockdep_assert_held(&etnaviv_obj->lock);
  112. /* when we start tracking the pin count, then do something here */
  113. }
  114. static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  115. struct vm_area_struct *vma)
  116. {
  117. pgprot_t vm_page_prot;
  118. vma->vm_flags &= ~VM_PFNMAP;
  119. vma->vm_flags |= VM_MIXEDMAP;
  120. vm_page_prot = vm_get_page_prot(vma->vm_flags);
  121. if (etnaviv_obj->flags & ETNA_BO_WC) {
  122. vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
  123. } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
  124. vma->vm_page_prot = pgprot_noncached(vm_page_prot);
  125. } else {
  126. /*
  127. * Shunt off cached objs to shmem file so they have their own
  128. * address_space (so unmap_mapping_range does what we want,
  129. * in particular in the case of mmap'd dmabufs)
  130. */
  131. fput(vma->vm_file);
  132. get_file(etnaviv_obj->base.filp);
  133. vma->vm_pgoff = 0;
  134. vma->vm_file = etnaviv_obj->base.filp;
  135. vma->vm_page_prot = vm_page_prot;
  136. }
  137. return 0;
  138. }
  139. int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  140. {
  141. struct etnaviv_gem_object *obj;
  142. int ret;
  143. ret = drm_gem_mmap(filp, vma);
  144. if (ret) {
  145. DBG("mmap failed: %d", ret);
  146. return ret;
  147. }
  148. obj = to_etnaviv_bo(vma->vm_private_data);
  149. return obj->ops->mmap(obj, vma);
  150. }
  151. int etnaviv_gem_fault(struct vm_fault *vmf)
  152. {
  153. struct vm_area_struct *vma = vmf->vma;
  154. struct drm_gem_object *obj = vma->vm_private_data;
  155. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  156. struct page **pages, *page;
  157. pgoff_t pgoff;
  158. int ret;
  159. /*
  160. * Make sure we don't parallel update on a fault, nor move or remove
  161. * something from beneath our feet. Note that vm_insert_page() is
  162. * specifically coded to take care of this, so we don't have to.
  163. */
  164. ret = mutex_lock_interruptible(&etnaviv_obj->lock);
  165. if (ret)
  166. goto out;
  167. /* make sure we have pages attached now */
  168. pages = etnaviv_gem_get_pages(etnaviv_obj);
  169. mutex_unlock(&etnaviv_obj->lock);
  170. if (IS_ERR(pages)) {
  171. ret = PTR_ERR(pages);
  172. goto out;
  173. }
  174. /* We don't use vmf->pgoff since that has the fake offset: */
  175. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  176. page = pages[pgoff];
  177. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  178. page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
  179. ret = vm_insert_page(vma, vmf->address, page);
  180. out:
  181. switch (ret) {
  182. case -EAGAIN:
  183. case 0:
  184. case -ERESTARTSYS:
  185. case -EINTR:
  186. case -EBUSY:
  187. /*
  188. * EBUSY is ok: this just means that another thread
  189. * already did the job.
  190. */
  191. return VM_FAULT_NOPAGE;
  192. case -ENOMEM:
  193. return VM_FAULT_OOM;
  194. default:
  195. return VM_FAULT_SIGBUS;
  196. }
  197. }
  198. int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
  199. {
  200. int ret;
  201. /* Make it mmapable */
  202. ret = drm_gem_create_mmap_offset(obj);
  203. if (ret)
  204. dev_err(obj->dev->dev, "could not allocate mmap offset\n");
  205. else
  206. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  207. return ret;
  208. }
  209. static struct etnaviv_vram_mapping *
  210. etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
  211. struct etnaviv_iommu *mmu)
  212. {
  213. struct etnaviv_vram_mapping *mapping;
  214. list_for_each_entry(mapping, &obj->vram_list, obj_node) {
  215. if (mapping->mmu == mmu)
  216. return mapping;
  217. }
  218. return NULL;
  219. }
  220. void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
  221. {
  222. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  223. drm_gem_object_get(&etnaviv_obj->base);
  224. mutex_lock(&etnaviv_obj->lock);
  225. WARN_ON(mapping->use == 0);
  226. mapping->use += 1;
  227. mutex_unlock(&etnaviv_obj->lock);
  228. }
  229. void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
  230. {
  231. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  232. mutex_lock(&etnaviv_obj->lock);
  233. WARN_ON(mapping->use == 0);
  234. mapping->use -= 1;
  235. mutex_unlock(&etnaviv_obj->lock);
  236. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  237. }
  238. struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
  239. struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
  240. {
  241. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  242. struct etnaviv_vram_mapping *mapping;
  243. struct page **pages;
  244. int ret = 0;
  245. mutex_lock(&etnaviv_obj->lock);
  246. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
  247. if (mapping) {
  248. /*
  249. * Holding the object lock prevents the use count changing
  250. * beneath us. If the use count is zero, the MMU might be
  251. * reaping this object, so take the lock and re-check that
  252. * the MMU owns this mapping to close this race.
  253. */
  254. if (mapping->use == 0) {
  255. mutex_lock(&gpu->mmu->lock);
  256. if (mapping->mmu == gpu->mmu)
  257. mapping->use += 1;
  258. else
  259. mapping = NULL;
  260. mutex_unlock(&gpu->mmu->lock);
  261. if (mapping)
  262. goto out;
  263. } else {
  264. mapping->use += 1;
  265. goto out;
  266. }
  267. }
  268. pages = etnaviv_gem_get_pages(etnaviv_obj);
  269. if (IS_ERR(pages)) {
  270. ret = PTR_ERR(pages);
  271. goto out;
  272. }
  273. /*
  274. * See if we have a reaped vram mapping we can re-use before
  275. * allocating a fresh mapping.
  276. */
  277. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
  278. if (!mapping) {
  279. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  280. if (!mapping) {
  281. ret = -ENOMEM;
  282. goto out;
  283. }
  284. INIT_LIST_HEAD(&mapping->scan_node);
  285. mapping->object = etnaviv_obj;
  286. } else {
  287. list_del(&mapping->obj_node);
  288. }
  289. mapping->mmu = gpu->mmu;
  290. mapping->use = 1;
  291. ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
  292. mapping);
  293. if (ret < 0)
  294. kfree(mapping);
  295. else
  296. list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
  297. out:
  298. mutex_unlock(&etnaviv_obj->lock);
  299. if (ret)
  300. return ERR_PTR(ret);
  301. /* Take a reference on the object */
  302. drm_gem_object_get(obj);
  303. return mapping;
  304. }
  305. void *etnaviv_gem_vmap(struct drm_gem_object *obj)
  306. {
  307. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  308. if (etnaviv_obj->vaddr)
  309. return etnaviv_obj->vaddr;
  310. mutex_lock(&etnaviv_obj->lock);
  311. /*
  312. * Need to check again, as we might have raced with another thread
  313. * while waiting for the mutex.
  314. */
  315. if (!etnaviv_obj->vaddr)
  316. etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
  317. mutex_unlock(&etnaviv_obj->lock);
  318. return etnaviv_obj->vaddr;
  319. }
  320. static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
  321. {
  322. struct page **pages;
  323. lockdep_assert_held(&obj->lock);
  324. pages = etnaviv_gem_get_pages(obj);
  325. if (IS_ERR(pages))
  326. return NULL;
  327. return vmap(pages, obj->base.size >> PAGE_SHIFT,
  328. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  329. }
  330. static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
  331. {
  332. if (op & ETNA_PREP_READ)
  333. return DMA_FROM_DEVICE;
  334. else if (op & ETNA_PREP_WRITE)
  335. return DMA_TO_DEVICE;
  336. else
  337. return DMA_BIDIRECTIONAL;
  338. }
  339. int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
  340. struct timespec *timeout)
  341. {
  342. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  343. struct drm_device *dev = obj->dev;
  344. bool write = !!(op & ETNA_PREP_WRITE);
  345. int ret;
  346. if (!etnaviv_obj->sgt) {
  347. void *ret;
  348. mutex_lock(&etnaviv_obj->lock);
  349. ret = etnaviv_gem_get_pages(etnaviv_obj);
  350. mutex_unlock(&etnaviv_obj->lock);
  351. if (IS_ERR(ret))
  352. return PTR_ERR(ret);
  353. }
  354. if (op & ETNA_PREP_NOSYNC) {
  355. if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
  356. write))
  357. return -EBUSY;
  358. } else {
  359. unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
  360. ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
  361. write, true, remain);
  362. if (ret <= 0)
  363. return ret == 0 ? -ETIMEDOUT : ret;
  364. }
  365. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  366. dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
  367. etnaviv_obj->sgt->nents,
  368. etnaviv_op_to_dma_dir(op));
  369. etnaviv_obj->last_cpu_prep_op = op;
  370. }
  371. return 0;
  372. }
  373. int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
  374. {
  375. struct drm_device *dev = obj->dev;
  376. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  377. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  378. /* fini without a prep is almost certainly a userspace error */
  379. WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
  380. dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
  381. etnaviv_obj->sgt->nents,
  382. etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
  383. etnaviv_obj->last_cpu_prep_op = 0;
  384. }
  385. return 0;
  386. }
  387. int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
  388. struct timespec *timeout)
  389. {
  390. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  391. return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
  392. }
  393. #ifdef CONFIG_DEBUG_FS
  394. static void etnaviv_gem_describe_fence(struct dma_fence *fence,
  395. const char *type, struct seq_file *m)
  396. {
  397. if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  398. seq_printf(m, "\t%9s: %s %s seq %u\n",
  399. type,
  400. fence->ops->get_driver_name(fence),
  401. fence->ops->get_timeline_name(fence),
  402. fence->seqno);
  403. }
  404. static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  405. {
  406. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  407. struct reservation_object *robj = etnaviv_obj->resv;
  408. struct reservation_object_list *fobj;
  409. struct dma_fence *fence;
  410. unsigned long off = drm_vma_node_start(&obj->vma_node);
  411. seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
  412. etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
  413. obj->name, kref_read(&obj->refcount),
  414. off, etnaviv_obj->vaddr, obj->size);
  415. rcu_read_lock();
  416. fobj = rcu_dereference(robj->fence);
  417. if (fobj) {
  418. unsigned int i, shared_count = fobj->shared_count;
  419. for (i = 0; i < shared_count; i++) {
  420. fence = rcu_dereference(fobj->shared[i]);
  421. etnaviv_gem_describe_fence(fence, "Shared", m);
  422. }
  423. }
  424. fence = rcu_dereference(robj->fence_excl);
  425. if (fence)
  426. etnaviv_gem_describe_fence(fence, "Exclusive", m);
  427. rcu_read_unlock();
  428. }
  429. void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
  430. struct seq_file *m)
  431. {
  432. struct etnaviv_gem_object *etnaviv_obj;
  433. int count = 0;
  434. size_t size = 0;
  435. mutex_lock(&priv->gem_lock);
  436. list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
  437. struct drm_gem_object *obj = &etnaviv_obj->base;
  438. seq_puts(m, " ");
  439. etnaviv_gem_describe(obj, m);
  440. count++;
  441. size += obj->size;
  442. }
  443. mutex_unlock(&priv->gem_lock);
  444. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  445. }
  446. #endif
  447. static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
  448. {
  449. vunmap(etnaviv_obj->vaddr);
  450. put_pages(etnaviv_obj);
  451. }
  452. static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
  453. .get_pages = etnaviv_gem_shmem_get_pages,
  454. .release = etnaviv_gem_shmem_release,
  455. .vmap = etnaviv_gem_vmap_impl,
  456. .mmap = etnaviv_gem_mmap_obj,
  457. };
  458. void etnaviv_gem_free_object(struct drm_gem_object *obj)
  459. {
  460. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  461. struct etnaviv_drm_private *priv = obj->dev->dev_private;
  462. struct etnaviv_vram_mapping *mapping, *tmp;
  463. /* object should not be active */
  464. WARN_ON(is_active(etnaviv_obj));
  465. mutex_lock(&priv->gem_lock);
  466. list_del(&etnaviv_obj->gem_node);
  467. mutex_unlock(&priv->gem_lock);
  468. list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
  469. obj_node) {
  470. struct etnaviv_iommu *mmu = mapping->mmu;
  471. WARN_ON(mapping->use);
  472. if (mmu)
  473. etnaviv_iommu_unmap_gem(mmu, mapping);
  474. list_del(&mapping->obj_node);
  475. kfree(mapping);
  476. }
  477. drm_gem_free_mmap_offset(obj);
  478. etnaviv_obj->ops->release(etnaviv_obj);
  479. if (etnaviv_obj->resv == &etnaviv_obj->_resv)
  480. reservation_object_fini(&etnaviv_obj->_resv);
  481. drm_gem_object_release(obj);
  482. kfree(etnaviv_obj);
  483. }
  484. void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
  485. {
  486. struct etnaviv_drm_private *priv = dev->dev_private;
  487. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  488. mutex_lock(&priv->gem_lock);
  489. list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
  490. mutex_unlock(&priv->gem_lock);
  491. }
  492. static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
  493. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  494. struct drm_gem_object **obj)
  495. {
  496. struct etnaviv_gem_object *etnaviv_obj;
  497. unsigned sz = sizeof(*etnaviv_obj);
  498. bool valid = true;
  499. /* validate flags */
  500. switch (flags & ETNA_BO_CACHE_MASK) {
  501. case ETNA_BO_UNCACHED:
  502. case ETNA_BO_CACHED:
  503. case ETNA_BO_WC:
  504. break;
  505. default:
  506. valid = false;
  507. }
  508. if (!valid) {
  509. dev_err(dev->dev, "invalid cache flag: %x\n",
  510. (flags & ETNA_BO_CACHE_MASK));
  511. return -EINVAL;
  512. }
  513. etnaviv_obj = kzalloc(sz, GFP_KERNEL);
  514. if (!etnaviv_obj)
  515. return -ENOMEM;
  516. etnaviv_obj->flags = flags;
  517. etnaviv_obj->ops = ops;
  518. if (robj) {
  519. etnaviv_obj->resv = robj;
  520. } else {
  521. etnaviv_obj->resv = &etnaviv_obj->_resv;
  522. reservation_object_init(&etnaviv_obj->_resv);
  523. }
  524. mutex_init(&etnaviv_obj->lock);
  525. INIT_LIST_HEAD(&etnaviv_obj->vram_list);
  526. *obj = &etnaviv_obj->base;
  527. return 0;
  528. }
  529. /* convenience method to construct a GEM buffer object, and userspace handle */
  530. int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  531. u32 size, u32 flags, u32 *handle)
  532. {
  533. struct drm_gem_object *obj = NULL;
  534. int ret;
  535. size = PAGE_ALIGN(size);
  536. ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
  537. &etnaviv_gem_shmem_ops, &obj);
  538. if (ret)
  539. goto fail;
  540. lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
  541. ret = drm_gem_object_init(dev, obj, size);
  542. if (ret == 0) {
  543. struct address_space *mapping;
  544. /*
  545. * Our buffers are kept pinned, so allocating them
  546. * from the MOVABLE zone is a really bad idea, and
  547. * conflicts with CMA. See comments above new_inode()
  548. * why this is required _and_ expected if you're
  549. * going to pin these pages.
  550. */
  551. mapping = obj->filp->f_mapping;
  552. mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
  553. __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
  554. }
  555. if (ret)
  556. goto fail;
  557. etnaviv_gem_obj_add(dev, obj);
  558. ret = drm_gem_handle_create(file, obj, handle);
  559. /* drop reference from allocate - handle holds it now */
  560. fail:
  561. drm_gem_object_put_unlocked(obj);
  562. return ret;
  563. }
  564. int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
  565. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  566. struct etnaviv_gem_object **res)
  567. {
  568. struct drm_gem_object *obj;
  569. int ret;
  570. ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
  571. if (ret)
  572. return ret;
  573. drm_gem_private_object_init(dev, obj, size);
  574. *res = to_etnaviv_bo(obj);
  575. return 0;
  576. }
  577. static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  578. {
  579. struct page **pvec = NULL;
  580. struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
  581. int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  582. might_lock_read(&current->mm->mmap_sem);
  583. if (userptr->mm != current->mm)
  584. return -EPERM;
  585. pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  586. if (!pvec)
  587. return -ENOMEM;
  588. do {
  589. unsigned num_pages = npages - pinned;
  590. uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
  591. struct page **pages = pvec + pinned;
  592. ret = get_user_pages_fast(ptr, num_pages,
  593. !userptr->ro ? FOLL_WRITE : 0, pages);
  594. if (ret < 0) {
  595. release_pages(pvec, pinned);
  596. kvfree(pvec);
  597. return ret;
  598. }
  599. pinned += ret;
  600. } while (pinned < npages);
  601. etnaviv_obj->pages = pvec;
  602. return 0;
  603. }
  604. static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
  605. {
  606. if (etnaviv_obj->sgt) {
  607. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  608. sg_free_table(etnaviv_obj->sgt);
  609. kfree(etnaviv_obj->sgt);
  610. }
  611. if (etnaviv_obj->pages) {
  612. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  613. release_pages(etnaviv_obj->pages, npages);
  614. kvfree(etnaviv_obj->pages);
  615. }
  616. }
  617. static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  618. struct vm_area_struct *vma)
  619. {
  620. return -EINVAL;
  621. }
  622. static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
  623. .get_pages = etnaviv_gem_userptr_get_pages,
  624. .release = etnaviv_gem_userptr_release,
  625. .vmap = etnaviv_gem_vmap_impl,
  626. .mmap = etnaviv_gem_userptr_mmap_obj,
  627. };
  628. int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
  629. uintptr_t ptr, u32 size, u32 flags, u32 *handle)
  630. {
  631. struct etnaviv_gem_object *etnaviv_obj;
  632. int ret;
  633. ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
  634. &etnaviv_gem_userptr_ops, &etnaviv_obj);
  635. if (ret)
  636. return ret;
  637. lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
  638. etnaviv_obj->userptr.ptr = ptr;
  639. etnaviv_obj->userptr.mm = current->mm;
  640. etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
  641. etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
  642. ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
  643. /* drop reference from allocate - handle holds it now */
  644. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  645. return ret;
  646. }