armada_gem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /*
  2. * Copyright (C) 2012 Russell King
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-buf.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/shmem_fs.h>
  11. #include "armada_drm.h"
  12. #include "armada_gem.h"
  13. #include <drm/armada_drm.h>
  14. #include "armada_ioctlP.h"
  15. static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
  16. {
  17. struct drm_gem_object *gobj = vmf->vma->vm_private_data;
  18. struct armada_gem_object *obj = drm_to_armada_gem(gobj);
  19. unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  20. pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
  21. return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
  22. }
  23. const struct vm_operations_struct armada_gem_vm_ops = {
  24. .fault = armada_gem_vm_fault,
  25. .open = drm_gem_vm_open,
  26. .close = drm_gem_vm_close,
  27. };
  28. static size_t roundup_gem_size(size_t size)
  29. {
  30. return roundup(size, PAGE_SIZE);
  31. }
  32. void armada_gem_free_object(struct drm_gem_object *obj)
  33. {
  34. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  35. struct armada_private *priv = obj->dev->dev_private;
  36. DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  37. drm_gem_free_mmap_offset(&dobj->obj);
  38. might_lock(&priv->linear_lock);
  39. if (dobj->page) {
  40. /* page backed memory */
  41. unsigned int order = get_order(dobj->obj.size);
  42. __free_pages(dobj->page, order);
  43. } else if (dobj->linear) {
  44. /* linear backed memory */
  45. mutex_lock(&priv->linear_lock);
  46. drm_mm_remove_node(dobj->linear);
  47. mutex_unlock(&priv->linear_lock);
  48. kfree(dobj->linear);
  49. if (dobj->addr)
  50. iounmap(dobj->addr);
  51. }
  52. if (dobj->obj.import_attach) {
  53. /* We only ever display imported data */
  54. if (dobj->sgt)
  55. dma_buf_unmap_attachment(dobj->obj.import_attach,
  56. dobj->sgt, DMA_TO_DEVICE);
  57. drm_prime_gem_destroy(&dobj->obj, NULL);
  58. }
  59. drm_gem_object_release(&dobj->obj);
  60. kfree(dobj);
  61. }
  62. int
  63. armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  64. {
  65. struct armada_private *priv = dev->dev_private;
  66. size_t size = obj->obj.size;
  67. if (obj->page || obj->linear)
  68. return 0;
  69. /*
  70. * If it is a small allocation (typically cursor, which will
  71. * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  72. * Framebuffers will never be this small (our minimum size for
  73. * framebuffers is larger than this anyway.) Such objects are
  74. * only accessed by the CPU so we don't need any special handing
  75. * here.
  76. */
  77. if (size <= 8192) {
  78. unsigned int order = get_order(size);
  79. struct page *p = alloc_pages(GFP_KERNEL, order);
  80. if (p) {
  81. obj->addr = page_address(p);
  82. obj->phys_addr = page_to_phys(p);
  83. obj->page = p;
  84. memset(obj->addr, 0, PAGE_ALIGN(size));
  85. }
  86. }
  87. /*
  88. * We could grab something from CMA if it's enabled, but that
  89. * involves building in a problem:
  90. *
  91. * CMA's interface uses dma_alloc_coherent(), which provides us
  92. * with an CPU virtual address and a device address.
  93. *
  94. * The CPU virtual address may be either an address in the kernel
  95. * direct mapped region (for example, as it would be on x86) or
  96. * it may be remapped into another part of kernel memory space
  97. * (eg, as it would be on ARM.) This means virt_to_phys() on the
  98. * returned virtual address is invalid depending on the architecture
  99. * implementation.
  100. *
  101. * The device address may also not be a physical address; it may
  102. * be that there is some kind of remapping between the device and
  103. * system RAM, which makes the use of the device address also
  104. * unsafe to re-use as a physical address.
  105. *
  106. * This makes DRM usage of dma_alloc_coherent() in a generic way
  107. * at best very questionable and unsafe.
  108. */
  109. /* Otherwise, grab it from our linear allocation */
  110. if (!obj->page) {
  111. struct drm_mm_node *node;
  112. unsigned align = min_t(unsigned, size, SZ_2M);
  113. void __iomem *ptr;
  114. int ret;
  115. node = kzalloc(sizeof(*node), GFP_KERNEL);
  116. if (!node)
  117. return -ENOSPC;
  118. mutex_lock(&priv->linear_lock);
  119. ret = drm_mm_insert_node_generic(&priv->linear, node,
  120. size, align, 0, 0);
  121. mutex_unlock(&priv->linear_lock);
  122. if (ret) {
  123. kfree(node);
  124. return ret;
  125. }
  126. obj->linear = node;
  127. /* Ensure that the memory we're returning is cleared. */
  128. ptr = ioremap_wc(obj->linear->start, size);
  129. if (!ptr) {
  130. mutex_lock(&priv->linear_lock);
  131. drm_mm_remove_node(obj->linear);
  132. mutex_unlock(&priv->linear_lock);
  133. kfree(obj->linear);
  134. obj->linear = NULL;
  135. return -ENOMEM;
  136. }
  137. memset_io(ptr, 0, size);
  138. iounmap(ptr);
  139. obj->phys_addr = obj->linear->start;
  140. obj->dev_addr = obj->linear->start;
  141. obj->mapped = true;
  142. }
  143. DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
  144. (unsigned long long)obj->phys_addr,
  145. (unsigned long long)obj->dev_addr);
  146. return 0;
  147. }
  148. void *
  149. armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
  150. {
  151. /* only linear objects need to be ioremap'd */
  152. if (!dobj->addr && dobj->linear)
  153. dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
  154. return dobj->addr;
  155. }
  156. struct armada_gem_object *
  157. armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
  158. {
  159. struct armada_gem_object *obj;
  160. size = roundup_gem_size(size);
  161. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  162. if (!obj)
  163. return NULL;
  164. drm_gem_private_object_init(dev, &obj->obj, size);
  165. DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
  166. return obj;
  167. }
  168. static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
  169. size_t size)
  170. {
  171. struct armada_gem_object *obj;
  172. struct address_space *mapping;
  173. size = roundup_gem_size(size);
  174. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  175. if (!obj)
  176. return NULL;
  177. if (drm_gem_object_init(dev, &obj->obj, size)) {
  178. kfree(obj);
  179. return NULL;
  180. }
  181. mapping = obj->obj.filp->f_mapping;
  182. mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
  183. DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
  184. return obj;
  185. }
  186. /* Dumb alloc support */
  187. int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  188. struct drm_mode_create_dumb *args)
  189. {
  190. struct armada_gem_object *dobj;
  191. u32 handle;
  192. size_t size;
  193. int ret;
  194. args->pitch = armada_pitch(args->width, args->bpp);
  195. args->size = size = args->pitch * args->height;
  196. dobj = armada_gem_alloc_private_object(dev, size);
  197. if (dobj == NULL)
  198. return -ENOMEM;
  199. ret = armada_gem_linear_back(dev, dobj);
  200. if (ret)
  201. goto err;
  202. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  203. if (ret)
  204. goto err;
  205. args->handle = handle;
  206. /* drop reference from allocate - handle holds it now */
  207. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  208. err:
  209. drm_gem_object_put_unlocked(&dobj->obj);
  210. return ret;
  211. }
  212. /* Private driver gem ioctls */
  213. int armada_gem_create_ioctl(struct drm_device *dev, void *data,
  214. struct drm_file *file)
  215. {
  216. struct drm_armada_gem_create *args = data;
  217. struct armada_gem_object *dobj;
  218. size_t size;
  219. u32 handle;
  220. int ret;
  221. if (args->size == 0)
  222. return -ENOMEM;
  223. size = args->size;
  224. dobj = armada_gem_alloc_object(dev, size);
  225. if (dobj == NULL)
  226. return -ENOMEM;
  227. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  228. if (ret)
  229. goto err;
  230. args->handle = handle;
  231. /* drop reference from allocate - handle holds it now */
  232. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  233. err:
  234. drm_gem_object_put_unlocked(&dobj->obj);
  235. return ret;
  236. }
  237. /* Map a shmem-backed object into process memory space */
  238. int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
  239. struct drm_file *file)
  240. {
  241. struct drm_armada_gem_mmap *args = data;
  242. struct armada_gem_object *dobj;
  243. unsigned long addr;
  244. dobj = armada_gem_object_lookup(file, args->handle);
  245. if (dobj == NULL)
  246. return -ENOENT;
  247. if (!dobj->obj.filp) {
  248. drm_gem_object_put_unlocked(&dobj->obj);
  249. return -EINVAL;
  250. }
  251. addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
  252. MAP_SHARED, args->offset);
  253. drm_gem_object_put_unlocked(&dobj->obj);
  254. if (IS_ERR_VALUE(addr))
  255. return addr;
  256. args->addr = addr;
  257. return 0;
  258. }
  259. int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  260. struct drm_file *file)
  261. {
  262. struct drm_armada_gem_pwrite *args = data;
  263. struct armada_gem_object *dobj;
  264. char __user *ptr;
  265. int ret;
  266. DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
  267. args->handle, args->offset, args->size, args->ptr);
  268. if (args->size == 0)
  269. return 0;
  270. ptr = (char __user *)(uintptr_t)args->ptr;
  271. if (!access_ok(VERIFY_READ, ptr, args->size))
  272. return -EFAULT;
  273. ret = fault_in_pages_readable(ptr, args->size);
  274. if (ret)
  275. return ret;
  276. dobj = armada_gem_object_lookup(file, args->handle);
  277. if (dobj == NULL)
  278. return -ENOENT;
  279. /* Must be a kernel-mapped object */
  280. if (!dobj->addr)
  281. return -EINVAL;
  282. if (args->offset > dobj->obj.size ||
  283. args->size > dobj->obj.size - args->offset) {
  284. DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
  285. ret = -EINVAL;
  286. goto unref;
  287. }
  288. if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
  289. ret = -EFAULT;
  290. } else if (dobj->update) {
  291. dobj->update(dobj->update_data);
  292. ret = 0;
  293. }
  294. unref:
  295. drm_gem_object_put_unlocked(&dobj->obj);
  296. return ret;
  297. }
  298. /* Prime support */
  299. static struct sg_table *
  300. armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  301. enum dma_data_direction dir)
  302. {
  303. struct drm_gem_object *obj = attach->dmabuf->priv;
  304. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  305. struct scatterlist *sg;
  306. struct sg_table *sgt;
  307. int i, num;
  308. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  309. if (!sgt)
  310. return NULL;
  311. if (dobj->obj.filp) {
  312. struct address_space *mapping;
  313. int count;
  314. count = dobj->obj.size / PAGE_SIZE;
  315. if (sg_alloc_table(sgt, count, GFP_KERNEL))
  316. goto free_sgt;
  317. mapping = dobj->obj.filp->f_mapping;
  318. for_each_sg(sgt->sgl, sg, count, i) {
  319. struct page *page;
  320. page = shmem_read_mapping_page(mapping, i);
  321. if (IS_ERR(page)) {
  322. num = i;
  323. goto release;
  324. }
  325. sg_set_page(sg, page, PAGE_SIZE, 0);
  326. }
  327. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
  328. num = sgt->nents;
  329. goto release;
  330. }
  331. } else if (dobj->page) {
  332. /* Single contiguous page */
  333. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  334. goto free_sgt;
  335. sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  336. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  337. goto free_table;
  338. } else if (dobj->linear) {
  339. /* Single contiguous physical region - no struct page */
  340. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  341. goto free_sgt;
  342. sg_dma_address(sgt->sgl) = dobj->dev_addr;
  343. sg_dma_len(sgt->sgl) = dobj->obj.size;
  344. } else {
  345. goto free_sgt;
  346. }
  347. return sgt;
  348. release:
  349. for_each_sg(sgt->sgl, sg, num, i)
  350. put_page(sg_page(sg));
  351. free_table:
  352. sg_free_table(sgt);
  353. free_sgt:
  354. kfree(sgt);
  355. return NULL;
  356. }
  357. static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  358. struct sg_table *sgt, enum dma_data_direction dir)
  359. {
  360. struct drm_gem_object *obj = attach->dmabuf->priv;
  361. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  362. int i;
  363. if (!dobj->linear)
  364. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  365. if (dobj->obj.filp) {
  366. struct scatterlist *sg;
  367. for_each_sg(sgt->sgl, sg, sgt->nents, i)
  368. put_page(sg_page(sg));
  369. }
  370. sg_free_table(sgt);
  371. kfree(sgt);
  372. }
  373. static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
  374. {
  375. return NULL;
  376. }
  377. static void
  378. armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
  379. {
  380. }
  381. static int
  382. armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  383. {
  384. return -EINVAL;
  385. }
  386. static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
  387. .map_dma_buf = armada_gem_prime_map_dma_buf,
  388. .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
  389. .release = drm_gem_dmabuf_release,
  390. .map = armada_gem_dmabuf_no_kmap,
  391. .unmap = armada_gem_dmabuf_no_kunmap,
  392. .mmap = armada_gem_dmabuf_mmap,
  393. };
  394. struct dma_buf *
  395. armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
  396. int flags)
  397. {
  398. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  399. exp_info.ops = &armada_gem_prime_dmabuf_ops;
  400. exp_info.size = obj->size;
  401. exp_info.flags = O_RDWR;
  402. exp_info.priv = obj;
  403. return drm_gem_dmabuf_export(dev, &exp_info);
  404. }
  405. struct drm_gem_object *
  406. armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
  407. {
  408. struct dma_buf_attachment *attach;
  409. struct armada_gem_object *dobj;
  410. if (buf->ops == &armada_gem_prime_dmabuf_ops) {
  411. struct drm_gem_object *obj = buf->priv;
  412. if (obj->dev == dev) {
  413. /*
  414. * Importing our own dmabuf(s) increases the
  415. * refcount on the gem object itself.
  416. */
  417. drm_gem_object_get(obj);
  418. return obj;
  419. }
  420. }
  421. attach = dma_buf_attach(buf, dev->dev);
  422. if (IS_ERR(attach))
  423. return ERR_CAST(attach);
  424. dobj = armada_gem_alloc_private_object(dev, buf->size);
  425. if (!dobj) {
  426. dma_buf_detach(buf, attach);
  427. return ERR_PTR(-ENOMEM);
  428. }
  429. dobj->obj.import_attach = attach;
  430. get_dma_buf(buf);
  431. /*
  432. * Don't call dma_buf_map_attachment() here - it maps the
  433. * scatterlist immediately for DMA, and this is not always
  434. * an appropriate thing to do.
  435. */
  436. return &dobj->obj;
  437. }
  438. int armada_gem_map_import(struct armada_gem_object *dobj)
  439. {
  440. int ret;
  441. dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
  442. DMA_TO_DEVICE);
  443. if (IS_ERR(dobj->sgt)) {
  444. ret = PTR_ERR(dobj->sgt);
  445. dobj->sgt = NULL;
  446. DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
  447. return ret;
  448. }
  449. if (dobj->sgt->nents > 1) {
  450. DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
  451. return -EINVAL;
  452. }
  453. if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
  454. DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
  455. return -EINVAL;
  456. }
  457. dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
  458. dobj->mapped = true;
  459. return 0;
  460. }