drm_gem_cma_helper.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * drm gem CMA (contiguous memory allocator) helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. *
  6. * Based on Samsung Exynos code
  7. *
  8. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <linux/mm.h>
  20. #include <linux/slab.h>
  21. #include <linux/mutex.h>
  22. #include <linux/export.h>
  23. #include <linux/dma-buf.h>
  24. #include <linux/dma-mapping.h>
  25. #include <drm/drmP.h>
  26. #include <drm/drm.h>
  27. #include <drm/drm_gem_cma_helper.h>
  28. #include <drm/drm_vma_manager.h>
  29. /**
  30. * DOC: cma helpers
  31. *
  32. * The Contiguous Memory Allocator reserves a pool of memory at early boot
  33. * that is used to service requests for large blocks of contiguous memory.
  34. *
  35. * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
  36. * objects that are physically contiguous in memory. This is useful for
  37. * display drivers that are unable to map scattered buffers via an IOMMU.
  38. */
  39. /**
  40. * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
  41. * @drm: DRM device
  42. * @size: size of the object to allocate
  43. *
  44. * This function creates and initializes a GEM CMA object of the given size,
  45. * but doesn't allocate any memory to back the object.
  46. *
  47. * Returns:
  48. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  49. * error code on failure.
  50. */
  51. static struct drm_gem_cma_object *
  52. __drm_gem_cma_create(struct drm_device *drm, size_t size)
  53. {
  54. struct drm_gem_cma_object *cma_obj;
  55. struct drm_gem_object *gem_obj;
  56. int ret;
  57. if (drm->driver->gem_create_object)
  58. gem_obj = drm->driver->gem_create_object(drm, size);
  59. else
  60. gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
  61. if (!gem_obj)
  62. return ERR_PTR(-ENOMEM);
  63. cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
  64. ret = drm_gem_object_init(drm, gem_obj, size);
  65. if (ret)
  66. goto error;
  67. ret = drm_gem_create_mmap_offset(gem_obj);
  68. if (ret) {
  69. drm_gem_object_release(gem_obj);
  70. goto error;
  71. }
  72. return cma_obj;
  73. error:
  74. kfree(cma_obj);
  75. return ERR_PTR(ret);
  76. }
  77. /**
  78. * drm_gem_cma_create - allocate an object with the given size
  79. * @drm: DRM device
  80. * @size: size of the object to allocate
  81. *
  82. * This function creates a CMA GEM object and allocates a contiguous chunk of
  83. * memory as backing store. The backing memory has the writecombine attribute
  84. * set.
  85. *
  86. * Returns:
  87. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  88. * error code on failure.
  89. */
  90. struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
  91. size_t size)
  92. {
  93. struct drm_gem_cma_object *cma_obj;
  94. int ret;
  95. size = round_up(size, PAGE_SIZE);
  96. cma_obj = __drm_gem_cma_create(drm, size);
  97. if (IS_ERR(cma_obj))
  98. return cma_obj;
  99. cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
  100. GFP_KERNEL | __GFP_NOWARN);
  101. if (!cma_obj->vaddr) {
  102. dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
  103. size);
  104. ret = -ENOMEM;
  105. goto error;
  106. }
  107. return cma_obj;
  108. error:
  109. drm_gem_object_put_unlocked(&cma_obj->base);
  110. return ERR_PTR(ret);
  111. }
  112. EXPORT_SYMBOL_GPL(drm_gem_cma_create);
  113. /**
  114. * drm_gem_cma_create_with_handle - allocate an object with the given size and
  115. * return a GEM handle to it
  116. * @file_priv: DRM file-private structure to register the handle for
  117. * @drm: DRM device
  118. * @size: size of the object to allocate
  119. * @handle: return location for the GEM handle
  120. *
  121. * This function creates a CMA GEM object, allocating a physically contiguous
  122. * chunk of memory as backing store. The GEM object is then added to the list
  123. * of object associated with the given file and a handle to it is returned.
  124. *
  125. * Returns:
  126. * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
  127. * error code on failure.
  128. */
  129. static struct drm_gem_cma_object *
  130. drm_gem_cma_create_with_handle(struct drm_file *file_priv,
  131. struct drm_device *drm, size_t size,
  132. uint32_t *handle)
  133. {
  134. struct drm_gem_cma_object *cma_obj;
  135. struct drm_gem_object *gem_obj;
  136. int ret;
  137. cma_obj = drm_gem_cma_create(drm, size);
  138. if (IS_ERR(cma_obj))
  139. return cma_obj;
  140. gem_obj = &cma_obj->base;
  141. /*
  142. * allocate a id of idr table where the obj is registered
  143. * and handle has the id what user can see.
  144. */
  145. ret = drm_gem_handle_create(file_priv, gem_obj, handle);
  146. /* drop reference from allocate - handle holds it now. */
  147. drm_gem_object_put_unlocked(gem_obj);
  148. if (ret)
  149. return ERR_PTR(ret);
  150. return cma_obj;
  151. }
  152. /**
  153. * drm_gem_cma_free_object - free resources associated with a CMA GEM object
  154. * @gem_obj: GEM object to free
  155. *
  156. * This function frees the backing memory of the CMA GEM object, cleans up the
  157. * GEM object state and frees the memory used to store the object itself.
  158. * Drivers using the CMA helpers should set this as their
  159. * &drm_driver.gem_free_object callback.
  160. */
  161. void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
  162. {
  163. struct drm_gem_cma_object *cma_obj;
  164. cma_obj = to_drm_gem_cma_obj(gem_obj);
  165. if (cma_obj->vaddr) {
  166. dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
  167. cma_obj->vaddr, cma_obj->paddr);
  168. } else if (gem_obj->import_attach) {
  169. drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
  170. }
  171. drm_gem_object_release(gem_obj);
  172. kfree(cma_obj);
  173. }
  174. EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
  175. /**
  176. * drm_gem_cma_dumb_create_internal - create a dumb buffer object
  177. * @file_priv: DRM file-private structure to create the dumb buffer for
  178. * @drm: DRM device
  179. * @args: IOCTL data
  180. *
  181. * This aligns the pitch and size arguments to the minimum required. This is
  182. * an internal helper that can be wrapped by a driver to account for hardware
  183. * with more specific alignment requirements. It should not be used directly
  184. * as their &drm_driver.dumb_create callback.
  185. *
  186. * Returns:
  187. * 0 on success or a negative error code on failure.
  188. */
  189. int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
  190. struct drm_device *drm,
  191. struct drm_mode_create_dumb *args)
  192. {
  193. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  194. struct drm_gem_cma_object *cma_obj;
  195. if (args->pitch < min_pitch)
  196. args->pitch = min_pitch;
  197. if (args->size < args->pitch * args->height)
  198. args->size = args->pitch * args->height;
  199. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  200. &args->handle);
  201. return PTR_ERR_OR_ZERO(cma_obj);
  202. }
  203. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
  204. /**
  205. * drm_gem_cma_dumb_create - create a dumb buffer object
  206. * @file_priv: DRM file-private structure to create the dumb buffer for
  207. * @drm: DRM device
  208. * @args: IOCTL data
  209. *
  210. * This function computes the pitch of the dumb buffer and rounds it up to an
  211. * integer number of bytes per pixel. Drivers for hardware that doesn't have
  212. * any additional restrictions on the pitch can directly use this function as
  213. * their &drm_driver.dumb_create callback.
  214. *
  215. * For hardware with additional restrictions, drivers can adjust the fields
  216. * set up by userspace and pass the IOCTL data along to the
  217. * drm_gem_cma_dumb_create_internal() function.
  218. *
  219. * Returns:
  220. * 0 on success or a negative error code on failure.
  221. */
  222. int drm_gem_cma_dumb_create(struct drm_file *file_priv,
  223. struct drm_device *drm,
  224. struct drm_mode_create_dumb *args)
  225. {
  226. struct drm_gem_cma_object *cma_obj;
  227. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  228. args->size = args->pitch * args->height;
  229. cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
  230. &args->handle);
  231. return PTR_ERR_OR_ZERO(cma_obj);
  232. }
  233. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
  234. /**
  235. * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
  236. * object
  237. * @file_priv: DRM file-private structure containing the GEM object
  238. * @drm: DRM device
  239. * @handle: GEM object handle
  240. * @offset: return location for the fake mmap offset
  241. *
  242. * This function look up an object by its handle and returns the fake mmap
  243. * offset associated with it. Drivers using the CMA helpers should set this
  244. * as their &drm_driver.dumb_map_offset callback.
  245. *
  246. * Returns:
  247. * 0 on success or a negative error code on failure.
  248. */
  249. int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
  250. struct drm_device *drm, u32 handle,
  251. u64 *offset)
  252. {
  253. struct drm_gem_object *gem_obj;
  254. gem_obj = drm_gem_object_lookup(file_priv, handle);
  255. if (!gem_obj) {
  256. dev_err(drm->dev, "failed to lookup GEM object\n");
  257. return -EINVAL;
  258. }
  259. *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
  260. drm_gem_object_put_unlocked(gem_obj);
  261. return 0;
  262. }
  263. EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
  264. const struct vm_operations_struct drm_gem_cma_vm_ops = {
  265. .open = drm_gem_vm_open,
  266. .close = drm_gem_vm_close,
  267. };
  268. EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
  269. static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
  270. struct vm_area_struct *vma)
  271. {
  272. int ret;
  273. /*
  274. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  275. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  276. * the whole buffer.
  277. */
  278. vma->vm_flags &= ~VM_PFNMAP;
  279. vma->vm_pgoff = 0;
  280. ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
  281. cma_obj->paddr, vma->vm_end - vma->vm_start);
  282. if (ret)
  283. drm_gem_vm_close(vma);
  284. return ret;
  285. }
  286. /**
  287. * drm_gem_cma_mmap - memory-map a CMA GEM object
  288. * @filp: file object
  289. * @vma: VMA for the area to be mapped
  290. *
  291. * This function implements an augmented version of the GEM DRM file mmap
  292. * operation for CMA objects: In addition to the usual GEM VMA setup it
  293. * immediately faults in the entire object instead of using on-demaind
  294. * faulting. Drivers which employ the CMA helpers should use this function
  295. * as their ->mmap() handler in the DRM device file's file_operations
  296. * structure.
  297. *
  298. * Instead of directly referencing this function, drivers should use the
  299. * DEFINE_DRM_GEM_CMA_FOPS().macro.
  300. *
  301. * Returns:
  302. * 0 on success or a negative error code on failure.
  303. */
  304. int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
  305. {
  306. struct drm_gem_cma_object *cma_obj;
  307. struct drm_gem_object *gem_obj;
  308. int ret;
  309. ret = drm_gem_mmap(filp, vma);
  310. if (ret)
  311. return ret;
  312. gem_obj = vma->vm_private_data;
  313. cma_obj = to_drm_gem_cma_obj(gem_obj);
  314. return drm_gem_cma_mmap_obj(cma_obj, vma);
  315. }
  316. EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
  317. #ifndef CONFIG_MMU
  318. /**
  319. * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
  320. * @filp: file object
  321. * @addr: memory address
  322. * @len: buffer size
  323. * @pgoff: page offset
  324. * @flags: memory flags
  325. *
  326. * This function is used in noMMU platforms to propose address mapping
  327. * for a given buffer.
  328. * It's intended to be used as a direct handler for the struct
  329. * &file_operations.get_unmapped_area operation.
  330. *
  331. * Returns:
  332. * mapping address on success or a negative error code on failure.
  333. */
  334. unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
  335. unsigned long addr,
  336. unsigned long len,
  337. unsigned long pgoff,
  338. unsigned long flags)
  339. {
  340. struct drm_gem_cma_object *cma_obj;
  341. struct drm_gem_object *obj = NULL;
  342. struct drm_file *priv = filp->private_data;
  343. struct drm_device *dev = priv->minor->dev;
  344. struct drm_vma_offset_node *node;
  345. if (drm_device_is_unplugged(dev))
  346. return -ENODEV;
  347. drm_vma_offset_lock_lookup(dev->vma_offset_manager);
  348. node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
  349. pgoff,
  350. len >> PAGE_SHIFT);
  351. if (likely(node)) {
  352. obj = container_of(node, struct drm_gem_object, vma_node);
  353. /*
  354. * When the object is being freed, after it hits 0-refcnt it
  355. * proceeds to tear down the object. In the process it will
  356. * attempt to remove the VMA offset and so acquire this
  357. * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
  358. * that matches our range, we know it is in the process of being
  359. * destroyed and will be freed as soon as we release the lock -
  360. * so we have to check for the 0-refcnted object and treat it as
  361. * invalid.
  362. */
  363. if (!kref_get_unless_zero(&obj->refcount))
  364. obj = NULL;
  365. }
  366. drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
  367. if (!obj)
  368. return -EINVAL;
  369. if (!drm_vma_node_is_allowed(node, priv)) {
  370. drm_gem_object_put_unlocked(obj);
  371. return -EACCES;
  372. }
  373. cma_obj = to_drm_gem_cma_obj(obj);
  374. drm_gem_object_put_unlocked(obj);
  375. return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
  376. }
  377. EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
  378. #endif
  379. #ifdef CONFIG_DEBUG_FS
  380. /**
  381. * drm_gem_cma_describe - describe a CMA GEM object for debugfs
  382. * @cma_obj: CMA GEM object
  383. * @m: debugfs file handle
  384. *
  385. * This function can be used to dump a human-readable representation of the
  386. * CMA GEM object into a synthetic file.
  387. */
  388. void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
  389. struct seq_file *m)
  390. {
  391. struct drm_gem_object *obj = &cma_obj->base;
  392. uint64_t off;
  393. off = drm_vma_node_start(&obj->vma_node);
  394. seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
  395. obj->name, kref_read(&obj->refcount),
  396. off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
  397. seq_printf(m, "\n");
  398. }
  399. EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
  400. #endif
  401. /**
  402. * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
  403. * pages for a CMA GEM object
  404. * @obj: GEM object
  405. *
  406. * This function exports a scatter/gather table suitable for PRIME usage by
  407. * calling the standard DMA mapping API. Drivers using the CMA helpers should
  408. * set this as their &drm_driver.gem_prime_get_sg_table callback.
  409. *
  410. * Returns:
  411. * A pointer to the scatter/gather table of pinned pages or NULL on failure.
  412. */
  413. struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
  414. {
  415. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  416. struct sg_table *sgt;
  417. int ret;
  418. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  419. if (!sgt)
  420. return NULL;
  421. ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
  422. cma_obj->paddr, obj->size);
  423. if (ret < 0)
  424. goto out;
  425. return sgt;
  426. out:
  427. kfree(sgt);
  428. return NULL;
  429. }
  430. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
  431. /**
  432. * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
  433. * driver's scatter/gather table of pinned pages
  434. * @dev: device to import into
  435. * @attach: DMA-BUF attachment
  436. * @sgt: scatter/gather table of pinned pages
  437. *
  438. * This function imports a scatter/gather table exported via DMA-BUF by
  439. * another driver. Imported buffers must be physically contiguous in memory
  440. * (i.e. the scatter/gather table must contain a single entry). Drivers that
  441. * use the CMA helpers should set this as their
  442. * &drm_driver.gem_prime_import_sg_table callback.
  443. *
  444. * Returns:
  445. * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
  446. * error code on failure.
  447. */
  448. struct drm_gem_object *
  449. drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
  450. struct dma_buf_attachment *attach,
  451. struct sg_table *sgt)
  452. {
  453. struct drm_gem_cma_object *cma_obj;
  454. if (sgt->nents != 1)
  455. return ERR_PTR(-EINVAL);
  456. /* Create a CMA GEM buffer. */
  457. cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
  458. if (IS_ERR(cma_obj))
  459. return ERR_CAST(cma_obj);
  460. cma_obj->paddr = sg_dma_address(sgt->sgl);
  461. cma_obj->sgt = sgt;
  462. DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
  463. return &cma_obj->base;
  464. }
  465. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
  466. /**
  467. * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
  468. * @obj: GEM object
  469. * @vma: VMA for the area to be mapped
  470. *
  471. * This function maps a buffer imported via DRM PRIME into a userspace
  472. * process's address space. Drivers that use the CMA helpers should set this
  473. * as their &drm_driver.gem_prime_mmap callback.
  474. *
  475. * Returns:
  476. * 0 on success or a negative error code on failure.
  477. */
  478. int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
  479. struct vm_area_struct *vma)
  480. {
  481. struct drm_gem_cma_object *cma_obj;
  482. int ret;
  483. ret = drm_gem_mmap_obj(obj, obj->size, vma);
  484. if (ret < 0)
  485. return ret;
  486. cma_obj = to_drm_gem_cma_obj(obj);
  487. return drm_gem_cma_mmap_obj(cma_obj, vma);
  488. }
  489. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
  490. /**
  491. * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
  492. * address space
  493. * @obj: GEM object
  494. *
  495. * This function maps a buffer exported via DRM PRIME into the kernel's
  496. * virtual address space. Since the CMA buffers are already mapped into the
  497. * kernel virtual address space this simply returns the cached virtual
  498. * address. Drivers using the CMA helpers should set this as their DRM
  499. * driver's &drm_driver.gem_prime_vmap callback.
  500. *
  501. * Returns:
  502. * The kernel virtual address of the CMA GEM object's backing store.
  503. */
  504. void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
  505. {
  506. struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
  507. return cma_obj->vaddr;
  508. }
  509. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
  510. /**
  511. * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
  512. * address space
  513. * @obj: GEM object
  514. * @vaddr: kernel virtual address where the CMA GEM object was mapped
  515. *
  516. * This function removes a buffer exported via DRM PRIME from the kernel's
  517. * virtual address space. This is a no-op because CMA buffers cannot be
  518. * unmapped from kernel space. Drivers using the CMA helpers should set this
  519. * as their &drm_driver.gem_prime_vunmap callback.
  520. */
  521. void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
  522. {
  523. /* Nothing to do */
  524. }
  525. EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);