drm_gem_cma_helper.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. /*
  2. * drm gem CMA (contiguous memory allocator) helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. *
  6. * Based on Samsung Exynos code
  7. *
  8. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/slab.h>
  21. #include <linux/mutex.h>
  22. #include <linux/export.h>
  23. #include <linux/dma-buf.h>
  24. #include <linux/dma-mapping.h>
  25. #include <drm/drmP.h>
  26. #include <drm/drm.h>
  27. #include <drm/drm_gem_cma_helper.h>
  28. #include <drm/drm_vma_manager.h>
  29. /**
  30. * DOC: cma helpers
  31. *
  32. * The Contiguous Memory Allocator reserves a pool of memory at early boot
  33. * that is used to service requests for large blocks of contiguous memory.
  34. *
  35. * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  36. * objects that are physically contiguous in memory. This is useful for
  37. * display drivers that are unable to map scattered buffers via an IOMMU.
  38. */
  39. /**
  40. * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
  41. * @drm: DRM device
  42. * @size: size of the object to allocate
  43. *
  44. * This function creates and initializes a GEM CMA object of the given size,
  45. * but doesn't allocate any memory to back the object.
  46. *
  47. * Returns:
  48. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  49. * error code on failure.
  50. */
  51. static struct drm_gem_cma_object *
  52. __drm_gem_cma_create(struct drm_device *drm, size_t size)
  53. {
  54. struct drm_gem_cma_object *cma_obj;
  55. struct drm_gem_object *gem_obj;
  56. int ret;
  57. if (drm->driver->gem_create_object)
  58. gem_obj = drm->driver->gem_create_object(drm, size);
  59. else
  60. gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  61. if (!gem_obj)
  62. return ERR_PTR(-ENOMEM);
  63. cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
  64. ret = drm_gem_object_init(drm, gem_obj, size);
  65. if (ret)
  66. goto error;
  67. ret = drm_gem_create_mmap_offset(gem_obj);
  68. if (ret) {
  69. drm_gem_object_release(gem_obj);
  70. goto error;
  71. }
  72. return cma_obj;
  73. error:
  74. kfree(cma_obj);
  75. return ERR_PTR(ret);
  76. }
  77. /**
  78. * drm_gem_cma_create - allocate an object with the given size
  79. * @drm: DRM device
  80. * @size: size of the object to allocate
  81. *
  82. * This function creates a CMA GEM object and allocates a contiguous chunk of
  83. * memory as backing store. The backing memory has the writecombine attribute
  84. * set.
  85. *
  86. * Returns:
  87. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  88. * error code on failure.
  89. */
  90. struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  91. size_t size)
  92. {
  93. struct drm_gem_cma_object *cma_obj;
  94. int ret;
  95. size = round_up(size, PAGE_SIZE);
  96. cma_obj = __drm_gem_cma_create(drm, size);
  97. if (IS_ERR(cma_obj))
  98. return cma_obj;
  99. cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
  100. GFP_KERNEL | __GFP_NOWARN);
  101. if (!cma_obj->vaddr) {
  102. dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
  103. size);
  104. ret = -ENOMEM;
  105. goto error;
  106. }
  107. return cma_obj;
  108. error:
  109. drm_gem_object_put_unlocked(&cma_obj->base);
  110. return ERR_PTR(ret);
  111. }
  112. EXPORT_SYMBOL_GPL(drm_gem_cma_create);
  113. /**
  114. * drm_gem_cma_create_with_handle - allocate an object with the given size and
  115. * return a GEM handle to it
  116. * @file_priv: DRM file-private structure to register the handle for
  117. * @drm: DRM device
  118. * @size: size of the object to allocate
  119. * @handle: return location for the GEM handle
  120. *
  121. * This function creates a CMA GEM object, allocating a physically contiguous
  122. * chunk of memory as backing store. The GEM object is then added to the list
  123. * of object associated with the given file and a handle to it is returned.
  124. *
  125. * Returns:
  126. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  127. * error code on failure.
  128. */
  129. static struct drm_gem_cma_object *
  130. drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  131. struct drm_device *drm, size_t size,
  132. uint32_t *handle)
  133. {
  134. struct drm_gem_cma_object *cma_obj;
  135. struct drm_gem_object *gem_obj;
  136. int ret;
  137. cma_obj = drm_gem_cma_create(drm, size);
  138. if (IS_ERR(cma_obj))
  139. return cma_obj;
  140. gem_obj = &cma_obj->base;
  141. /*
  142. * allocate a id of idr table where the obj is registered
  143. * and handle has the id what user can see.
  144. */
  145. ret = drm_gem_handle_create(file_priv, gem_obj, handle);
  146. /* drop reference from allocate - handle holds it now. */
  147. drm_gem_object_put_unlocked(gem_obj);
  148. if (ret)
  149. return ERR_PTR(ret);
  150. return cma_obj;
  151. }
  152. /**
  153. * drm_gem_cma_free_object - free resources associated with a CMA GEM object
  154. * @gem_obj: GEM object to free
  155. *
  156. * This function frees the backing memory of the CMA GEM object, cleans up the
  157. * GEM object state and frees the memory used to store the object itself.
  158. * Drivers using the CMA helpers should set this as their
  159. * &drm_driver.gem_free_object_unlocked callback.
  160. */
  161. void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  162. {
  163. struct drm_gem_cma_object *cma_obj;
  164. cma_obj = to_drm_gem_cma_obj(gem_obj);
  165. if (cma_obj->vaddr) {
  166. dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
  167. cma_obj->vaddr, cma_obj->paddr);
  168. } else if (gem_obj->import_attach) {
  169. drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
  170. }
  171. drm_gem_object_release(gem_obj);
  172. kfree(cma_obj);
  173. }
  174. EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
  175. /**
  176. * drm_gem_cma_dumb_create_internal - create a dumb buffer object
  177. * @file_priv: DRM file-private structure to create the dumb buffer for
  178. * @drm: DRM device
  179. * @args: IOCTL data
  180. *
  181. * This aligns the pitch and size arguments to the minimum required. This is
  182. * an internal helper that can be wrapped by a driver to account for hardware
  183. * with more specific alignment requirements. It should not be used directly
  184. * as their &drm_driver.dumb_create callback.
  185. *
  186. * Returns:
  187. * 0 on success or a negative error code on failure.
  188. */
  189. int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
  190. struct drm_device *drm,
  191. struct drm_mode_create_dumb *args)
  192. {
  193. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  194. struct drm_gem_cma_object *cma_obj;
  195. if (args->pitch < min_pitch)
  196. args->pitch = min_pitch;
  197. if (args->size < args->pitch * args->height)
  198. args->size = args->pitch * args->height;
  199. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  200. &args->handle);
  201. return PTR_ERR_OR_ZERO(cma_obj);
  202. }
  203. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
  204. /**
  205. * drm_gem_cma_dumb_create - create a dumb buffer object
  206. * @file_priv: DRM file-private structure to create the dumb buffer for
  207. * @drm: DRM device
  208. * @args: IOCTL data
  209. *
  210. * This function computes the pitch of the dumb buffer and rounds it up to an
  211. * integer number of bytes per pixel. Drivers for hardware that doesn't have
  212. * any additional restrictions on the pitch can directly use this function as
  213. * their &drm_driver.dumb_create callback.
  214. *
  215. * For hardware with additional restrictions, drivers can adjust the fields
  216. * set up by userspace and pass the IOCTL data along to the
  217. * drm_gem_cma_dumb_create_internal() function.
  218. *
  219. * Returns:
  220. * 0 on success or a negative error code on failure.
  221. */
  222. int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  223. struct drm_device *drm,
  224. struct drm_mode_create_dumb *args)
  225. {
  226. struct drm_gem_cma_object *cma_obj;
  227. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  228. args->size = args->pitch * args->height;
  229. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  230. &args->handle);
  231. return PTR_ERR_OR_ZERO(cma_obj);
  232. }
  233. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
  234. const struct vm_operations_struct drm_gem_cma_vm_ops = {
  235. .open = drm_gem_vm_open,
  236. .close = drm_gem_vm_close,
  237. };
  238. EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
  239. static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
  240. struct vm_area_struct *vma)
  241. {
  242. int ret;
  243. /*
  244. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  245. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  246. * the whole buffer.
  247. */
  248. vma->vm_flags &= ~VM_PFNMAP;
  249. vma->vm_pgoff = 0;
  250. ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
  251. cma_obj->paddr, vma->vm_end - vma->vm_start);
  252. if (ret)
  253. drm_gem_vm_close(vma);
  254. return ret;
  255. }
  256. /**
  257. * drm_gem_cma_mmap - memory-map a CMA GEM object
  258. * @filp: file object
  259. * @vma: VMA for the area to be mapped
  260. *
  261. * This function implements an augmented version of the GEM DRM file mmap
  262. * operation for CMA objects: In addition to the usual GEM VMA setup it
  263. * immediately faults in the entire object instead of using on-demaind
  264. * faulting. Drivers which employ the CMA helpers should use this function
  265. * as their ->mmap() handler in the DRM device file's file_operations
  266. * structure.
  267. *
  268. * Instead of directly referencing this function, drivers should use the
  269. * DEFINE_DRM_GEM_CMA_FOPS().macro.
  270. *
  271. * Returns:
  272. * 0 on success or a negative error code on failure.
  273. */
  274. int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
  275. {
  276. struct drm_gem_cma_object *cma_obj;
  277. struct drm_gem_object *gem_obj;
  278. int ret;
  279. ret = drm_gem_mmap(filp, vma);
  280. if (ret)
  281. return ret;
  282. gem_obj = vma->vm_private_data;
  283. cma_obj = to_drm_gem_cma_obj(gem_obj);
  284. return drm_gem_cma_mmap_obj(cma_obj, vma);
  285. }
  286. EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  287. #ifndef CONFIG_MMU
  288. /**
  289. * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
  290. * @filp: file object
  291. * @addr: memory address
  292. * @len: buffer size
  293. * @pgoff: page offset
  294. * @flags: memory flags
  295. *
  296. * This function is used in noMMU platforms to propose address mapping
  297. * for a given buffer.
  298. * It's intended to be used as a direct handler for the struct
  299. * &file_operations.get_unmapped_area operation.
  300. *
  301. * Returns:
  302. * mapping address on success or a negative error code on failure.
  303. */
  304. unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
  305. unsigned long addr,
  306. unsigned long len,
  307. unsigned long pgoff,
  308. unsigned long flags)
  309. {
  310. struct drm_gem_cma_object *cma_obj;
  311. struct drm_gem_object *obj = NULL;
  312. struct drm_file *priv = filp->private_data;
  313. struct drm_device *dev = priv->minor->dev;
  314. struct drm_vma_offset_node *node;
  315. if (drm_dev_is_unplugged(dev))
  316. return -ENODEV;
  317. drm_vma_offset_lock_lookup(dev->vma_offset_manager);
  318. node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
  319. pgoff,
  320. len >> PAGE_SHIFT);
  321. if (likely(node)) {
  322. obj = container_of(node, struct drm_gem_object, vma_node);
  323. /*
  324. * When the object is being freed, after it hits 0-refcnt it
  325. * proceeds to tear down the object. In the process it will
  326. * attempt to remove the VMA offset and so acquire this
  327. * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
  328. * that matches our range, we know it is in the process of being
  329. * destroyed and will be freed as soon as we release the lock -
  330. * so we have to check for the 0-refcnted object and treat it as
  331. * invalid.
  332. */
  333. if (!kref_get_unless_zero(&obj->refcount))
  334. obj = NULL;
  335. }
  336. drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
  337. if (!obj)
  338. return -EINVAL;
  339. if (!drm_vma_node_is_allowed(node, priv)) {
  340. drm_gem_object_put_unlocked(obj);
  341. return -EACCES;
  342. }
  343. cma_obj = to_drm_gem_cma_obj(obj);
  344. drm_gem_object_put_unlocked(obj);
  345. return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
  346. }
  347. EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
  348. #endif
  349. #ifdef CONFIG_DEBUG_FS
  350. /**
  351. * drm_gem_cma_describe - describe a CMA GEM object for debugfs
  352. * @cma_obj: CMA GEM object
  353. * @m: debugfs file handle
  354. *
  355. * This function can be used to dump a human-readable representation of the
  356. * CMA GEM object into a synthetic file.
  357. */
  358. void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
  359. struct seq_file *m)
  360. {
  361. struct drm_gem_object *obj = &cma_obj->base;
  362. uint64_t off;
  363. off = drm_vma_node_start(&obj->vma_node);
  364. seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
  365. obj->name, kref_read(&obj->refcount),
  366. off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
  367. seq_printf(m, "\n");
  368. }
  369. EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
  370. #endif
  371. /**
  372. * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
  373. * pages for a CMA GEM object
  374. * @obj: GEM object
  375. *
  376. * This function exports a scatter/gather table suitable for PRIME usage by
  377. * calling the standard DMA mapping API. Drivers using the CMA helpers should
  378. * set this as their &drm_driver.gem_prime_get_sg_table callback.
  379. *
  380. * Returns:
  381. * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  382. */
  383. struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
  384. {
  385. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  386. struct sg_table *sgt;
  387. int ret;
  388. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  389. if (!sgt)
  390. return NULL;
  391. ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
  392. cma_obj->paddr, obj->size);
  393. if (ret < 0)
  394. goto out;
  395. return sgt;
  396. out:
  397. kfree(sgt);
  398. return NULL;
  399. }
  400. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
  401. /**
  402. * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
  403. * driver's scatter/gather table of pinned pages
  404. * @dev: device to import into
  405. * @attach: DMA-BUF attachment
  406. * @sgt: scatter/gather table of pinned pages
  407. *
  408. * This function imports a scatter/gather table exported via DMA-BUF by
  409. * another driver. Imported buffers must be physically contiguous in memory
  410. * (i.e. the scatter/gather table must contain a single entry). Drivers that
  411. * use the CMA helpers should set this as their
  412. * &drm_driver.gem_prime_import_sg_table callback.
  413. *
  414. * Returns:
  415. * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
  416. * error code on failure.
  417. */
  418. struct drm_gem_object *
  419. drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
  420. struct dma_buf_attachment *attach,
  421. struct sg_table *sgt)
  422. {
  423. struct drm_gem_cma_object *cma_obj;
  424. if (sgt->nents != 1)
  425. return ERR_PTR(-EINVAL);
  426. /* Create a CMA GEM buffer. */
  427. cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
  428. if (IS_ERR(cma_obj))
  429. return ERR_CAST(cma_obj);
  430. cma_obj->paddr = sg_dma_address(sgt->sgl);
  431. cma_obj->sgt = sgt;
  432. DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
  433. return &cma_obj->base;
  434. }
  435. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
  436. /**
  437. * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
  438. * @obj: GEM object
  439. * @vma: VMA for the area to be mapped
  440. *
  441. * This function maps a buffer imported via DRM PRIME into a userspace
  442. * process's address space. Drivers that use the CMA helpers should set this
  443. * as their &drm_driver.gem_prime_mmap callback.
  444. *
  445. * Returns:
  446. * 0 on success or a negative error code on failure.
  447. */
  448. int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
  449. struct vm_area_struct *vma)
  450. {
  451. struct drm_gem_cma_object *cma_obj;
  452. int ret;
  453. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  454. if (ret < 0)
  455. return ret;
  456. cma_obj = to_drm_gem_cma_obj(obj);
  457. return drm_gem_cma_mmap_obj(cma_obj, vma);
  458. }
  459. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  460. /**
  461. * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
  462. * address space
  463. * @obj: GEM object
  464. *
  465. * This function maps a buffer exported via DRM PRIME into the kernel's
  466. * virtual address space. Since the CMA buffers are already mapped into the
  467. * kernel virtual address space this simply returns the cached virtual
  468. * address. Drivers using the CMA helpers should set this as their DRM
  469. * driver's &drm_driver.gem_prime_vmap callback.
  470. *
  471. * Returns:
  472. * The kernel virtual address of the CMA GEM object's backing store.
  473. */
  474. void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
  475. {
  476. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  477. return cma_obj->vaddr;
  478. }
  479. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
  480. /**
  481. * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
  482. * address space
  483. * @obj: GEM object
  484. * @vaddr: kernel virtual address where the CMA GEM object was mapped
  485. *
  486. * This function removes a buffer exported via DRM PRIME from the kernel's
  487. * virtual address space. This is a no-op because CMA buffers cannot be
  488. * unmapped from kernel space. Drivers using the CMA helpers should set this
  489. * as their &drm_driver.gem_prime_vunmap callback.
  490. */
  491. void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  492. {
  493. /* Nothing to do */
  494. }
  495. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);