drm_fb_cma_helper.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * drm kms/fb cma (contiguous memory allocator) helper functions
  3. *
  4. * Copyright (C) 2012 Analog Device Inc.
  5. * Author: Lars-Peter Clausen <lars@metafoo.de>
  6. *
  7. * Based on udl_fbdev.c
  8. * Copyright (C) 2012 Red Hat
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #include <drm/drmP.h>
  20. #include <drm/drm_atomic.h>
  21. #include <drm/drm_crtc.h>
  22. #include <drm/drm_fb_helper.h>
  23. #include <drm/drm_crtc_helper.h>
  24. #include <drm/drm_gem_cma_helper.h>
  25. #include <drm/drm_fb_cma_helper.h>
  26. #include <linux/dma-buf.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/module.h>
  29. #include <linux/reservation.h>
  30. #define DEFAULT_FBDEFIO_DELAY_MS 50
  31. struct drm_fb_cma {
  32. struct drm_framebuffer fb;
  33. struct drm_gem_cma_object *obj[4];
  34. };
  35. struct drm_fbdev_cma {
  36. struct drm_fb_helper fb_helper;
  37. struct drm_fb_cma *fb;
  38. };
  39. /**
  40. * DOC: framebuffer cma helper functions
  41. *
  42. * Provides helper functions for creating a cma (contiguous memory allocator)
  43. * backed framebuffer.
  44. *
  45. * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
  46. * callback function to create a cma backed framebuffer.
  47. *
  48. * An fbdev framebuffer backed by cma is also available by calling
  49. * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
  50. * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
  51. * will be set up automatically. dirty() is called by
  52. * drm_fb_helper_deferred_io() in process context (struct delayed_work).
  53. *
  54. * Example fbdev deferred io code::
  55. *
  56. * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
  57. * struct drm_file *file_priv,
  58. * unsigned flags, unsigned color,
  59. * struct drm_clip_rect *clips,
  60. * unsigned num_clips)
  61. * {
  62. * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
  63. * ... push changes ...
  64. * return 0;
  65. * }
  66. *
  67. * static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
  68. * .destroy = drm_fb_cma_destroy,
  69. * .create_handle = drm_fb_cma_create_handle,
  70. * .dirty = driver_fbdev_fb_dirty,
  71. * };
  72. *
  73. * static int driver_fbdev_create(struct drm_fb_helper *helper,
  74. * struct drm_fb_helper_surface_size *sizes)
  75. * {
  76. * return drm_fbdev_cma_create_with_funcs(helper, sizes,
  77. * &driver_fbdev_fb_funcs);
  78. * }
  79. *
  80. * static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
  81. * .fb_probe = driver_fbdev_create,
  82. * };
  83. *
  84. * Initialize:
  85. * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
  86. * dev->mode_config.num_crtc,
  87. * dev->mode_config.num_connector,
  88. * &driver_fb_helper_funcs);
  89. *
  90. */
  91. static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
  92. {
  93. return container_of(helper, struct drm_fbdev_cma, fb_helper);
  94. }
  95. static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
  96. {
  97. return container_of(fb, struct drm_fb_cma, fb);
  98. }
  99. void drm_fb_cma_destroy(struct drm_framebuffer *fb)
  100. {
  101. struct drm_fb_cma *fb_cma = to_fb_cma(fb);
  102. int i;
  103. for (i = 0; i < 4; i++) {
  104. if (fb_cma->obj[i])
  105. drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
  106. }
  107. drm_framebuffer_cleanup(fb);
  108. kfree(fb_cma);
  109. }
  110. EXPORT_SYMBOL(drm_fb_cma_destroy);
  111. int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
  112. struct drm_file *file_priv, unsigned int *handle)
  113. {
  114. struct drm_fb_cma *fb_cma = to_fb_cma(fb);
  115. return drm_gem_handle_create(file_priv,
  116. &fb_cma->obj[0]->base, handle);
  117. }
  118. EXPORT_SYMBOL(drm_fb_cma_create_handle);
  119. static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
  120. .destroy = drm_fb_cma_destroy,
  121. .create_handle = drm_fb_cma_create_handle,
  122. };
  123. static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
  124. const struct drm_mode_fb_cmd2 *mode_cmd,
  125. struct drm_gem_cma_object **obj,
  126. unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
  127. {
  128. struct drm_fb_cma *fb_cma;
  129. int ret;
  130. int i;
  131. fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
  132. if (!fb_cma)
  133. return ERR_PTR(-ENOMEM);
  134. drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
  135. for (i = 0; i < num_planes; i++)
  136. fb_cma->obj[i] = obj[i];
  137. ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
  138. if (ret) {
  139. dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
  140. kfree(fb_cma);
  141. return ERR_PTR(ret);
  142. }
  143. return fb_cma;
  144. }
  145. /**
  146. * drm_fb_cma_create_with_funcs() - helper function for the
  147. * &drm_mode_config_funcs ->fb_create
  148. * callback function
  149. * @dev: DRM device
  150. * @file_priv: drm file for the ioctl call
  151. * @mode_cmd: metadata from the userspace fb creation request
  152. * @funcs: vtable to be used for the new framebuffer object
  153. *
  154. * This can be used to set &drm_framebuffer_funcs for drivers that need the
  155. * dirty() callback. Use drm_fb_cma_create() if you don't need to change
  156. * &drm_framebuffer_funcs.
  157. */
  158. struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
  159. struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
  160. const struct drm_framebuffer_funcs *funcs)
  161. {
  162. const struct drm_format_info *info;
  163. struct drm_fb_cma *fb_cma;
  164. struct drm_gem_cma_object *objs[4];
  165. struct drm_gem_object *obj;
  166. int ret;
  167. int i;
  168. info = drm_format_info(mode_cmd->pixel_format);
  169. if (!info)
  170. return ERR_PTR(-EINVAL);
  171. for (i = 0; i < info->num_planes; i++) {
  172. unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
  173. unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
  174. unsigned int min_size;
  175. obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
  176. if (!obj) {
  177. dev_err(dev->dev, "Failed to lookup GEM object\n");
  178. ret = -ENXIO;
  179. goto err_gem_object_unreference;
  180. }
  181. min_size = (height - 1) * mode_cmd->pitches[i]
  182. + width * info->cpp[i]
  183. + mode_cmd->offsets[i];
  184. if (obj->size < min_size) {
  185. drm_gem_object_unreference_unlocked(obj);
  186. ret = -EINVAL;
  187. goto err_gem_object_unreference;
  188. }
  189. objs[i] = to_drm_gem_cma_obj(obj);
  190. }
  191. fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
  192. if (IS_ERR(fb_cma)) {
  193. ret = PTR_ERR(fb_cma);
  194. goto err_gem_object_unreference;
  195. }
  196. return &fb_cma->fb;
  197. err_gem_object_unreference:
  198. for (i--; i >= 0; i--)
  199. drm_gem_object_unreference_unlocked(&objs[i]->base);
  200. return ERR_PTR(ret);
  201. }
  202. EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
  203. /**
  204. * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
  205. * @dev: DRM device
  206. * @file_priv: drm file for the ioctl call
  207. * @mode_cmd: metadata from the userspace fb creation request
  208. *
  209. * If your hardware has special alignment or pitch requirements these should be
  210. * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
  211. * you need to set &drm_framebuffer_funcs ->dirty.
  212. */
  213. struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
  214. struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
  215. {
  216. return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
  217. &drm_fb_cma_funcs);
  218. }
  219. EXPORT_SYMBOL_GPL(drm_fb_cma_create);
  220. /**
  221. * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
  222. * @fb: The framebuffer
  223. * @plane: Which plane
  224. *
  225. * Return the CMA GEM object for given framebuffer.
  226. *
  227. * This function will usually be called from the CRTC callback functions.
  228. */
  229. struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
  230. unsigned int plane)
  231. {
  232. struct drm_fb_cma *fb_cma = to_fb_cma(fb);
  233. if (plane >= 4)
  234. return NULL;
  235. return fb_cma->obj[plane];
  236. }
  237. EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
  238. /**
  239. * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
  240. * @plane: Which plane
  241. * @state: Plane state attach fence to
  242. *
  243. * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
  244. *
  245. * This function checks if the plane FB has an dma-buf attached, extracts
  246. * the exclusive fence and attaches it to plane state for the atomic helper
  247. * to wait on.
  248. *
  249. * There is no need for cleanup_fb for CMA based framebuffer drivers.
  250. */
  251. int drm_fb_cma_prepare_fb(struct drm_plane *plane,
  252. struct drm_plane_state *state)
  253. {
  254. struct dma_buf *dma_buf;
  255. struct dma_fence *fence;
  256. if ((plane->state->fb == state->fb) || !state->fb)
  257. return 0;
  258. dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
  259. if (dma_buf) {
  260. fence = reservation_object_get_excl_rcu(dma_buf->resv);
  261. drm_atomic_set_fence_for_plane(state, fence);
  262. }
  263. return 0;
  264. }
  265. EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
  266. #ifdef CONFIG_DEBUG_FS
  267. static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
  268. {
  269. struct drm_fb_cma *fb_cma = to_fb_cma(fb);
  270. const struct drm_format_info *info;
  271. int i;
  272. seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
  273. (char *)&fb->pixel_format);
  274. info = drm_format_info(fb->pixel_format);
  275. for (i = 0; i < info->num_planes; i++) {
  276. seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
  277. i, fb->offsets[i], fb->pitches[i]);
  278. drm_gem_cma_describe(fb_cma->obj[i], m);
  279. }
  280. }
  281. /**
  282. * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
  283. * in debugfs.
  284. * @m: output file
  285. * @arg: private data for the callback
  286. */
  287. int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
  288. {
  289. struct drm_info_node *node = (struct drm_info_node *) m->private;
  290. struct drm_device *dev = node->minor->dev;
  291. struct drm_framebuffer *fb;
  292. mutex_lock(&dev->mode_config.fb_lock);
  293. drm_for_each_fb(fb, dev)
  294. drm_fb_cma_describe(fb, m);
  295. mutex_unlock(&dev->mode_config.fb_lock);
  296. return 0;
  297. }
  298. EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
  299. #endif
  300. static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
  301. {
  302. return dma_mmap_writecombine(info->device, vma, info->screen_base,
  303. info->fix.smem_start, info->fix.smem_len);
  304. }
  305. static struct fb_ops drm_fbdev_cma_ops = {
  306. .owner = THIS_MODULE,
  307. DRM_FB_HELPER_DEFAULT_OPS,
  308. .fb_fillrect = drm_fb_helper_sys_fillrect,
  309. .fb_copyarea = drm_fb_helper_sys_copyarea,
  310. .fb_imageblit = drm_fb_helper_sys_imageblit,
  311. .fb_mmap = drm_fb_cma_mmap,
  312. };
  313. static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
  314. struct vm_area_struct *vma)
  315. {
  316. fb_deferred_io_mmap(info, vma);
  317. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  318. return 0;
  319. }
  320. static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
  321. struct drm_gem_cma_object *cma_obj)
  322. {
  323. struct fb_deferred_io *fbdefio;
  324. struct fb_ops *fbops;
  325. /*
  326. * Per device structures are needed because:
  327. * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
  328. * fbdefio: individual delays
  329. */
  330. fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
  331. fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
  332. if (!fbdefio || !fbops) {
  333. kfree(fbdefio);
  334. kfree(fbops);
  335. return -ENOMEM;
  336. }
  337. /* can't be offset from vaddr since dirty() uses cma_obj */
  338. fbi->screen_buffer = cma_obj->vaddr;
  339. /* fb_deferred_io_fault() needs a physical address */
  340. fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
  341. *fbops = *fbi->fbops;
  342. fbi->fbops = fbops;
  343. fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
  344. fbdefio->deferred_io = drm_fb_helper_deferred_io;
  345. fbi->fbdefio = fbdefio;
  346. fb_deferred_io_init(fbi);
  347. fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
  348. return 0;
  349. }
  350. static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
  351. {
  352. if (!fbi->fbdefio)
  353. return;
  354. fb_deferred_io_cleanup(fbi);
  355. kfree(fbi->fbdefio);
  356. kfree(fbi->fbops);
  357. }
  358. /*
  359. * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
  360. * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
  361. */
  362. int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
  363. struct drm_fb_helper_surface_size *sizes,
  364. const struct drm_framebuffer_funcs *funcs)
  365. {
  366. struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
  367. struct drm_mode_fb_cmd2 mode_cmd = { 0 };
  368. struct drm_device *dev = helper->dev;
  369. struct drm_gem_cma_object *obj;
  370. struct drm_framebuffer *fb;
  371. unsigned int bytes_per_pixel;
  372. unsigned long offset;
  373. struct fb_info *fbi;
  374. size_t size;
  375. int ret;
  376. DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
  377. sizes->surface_width, sizes->surface_height,
  378. sizes->surface_bpp);
  379. bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
  380. mode_cmd.width = sizes->surface_width;
  381. mode_cmd.height = sizes->surface_height;
  382. mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
  383. mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
  384. sizes->surface_depth);
  385. size = mode_cmd.pitches[0] * mode_cmd.height;
  386. obj = drm_gem_cma_create(dev, size);
  387. if (IS_ERR(obj))
  388. return -ENOMEM;
  389. fbi = drm_fb_helper_alloc_fbi(helper);
  390. if (IS_ERR(fbi)) {
  391. ret = PTR_ERR(fbi);
  392. goto err_gem_free_object;
  393. }
  394. fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
  395. if (IS_ERR(fbdev_cma->fb)) {
  396. dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
  397. ret = PTR_ERR(fbdev_cma->fb);
  398. goto err_fb_info_destroy;
  399. }
  400. fb = &fbdev_cma->fb->fb;
  401. helper->fb = fb;
  402. fbi->par = helper;
  403. fbi->flags = FBINFO_FLAG_DEFAULT;
  404. fbi->fbops = &drm_fbdev_cma_ops;
  405. drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
  406. drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
  407. offset = fbi->var.xoffset * bytes_per_pixel;
  408. offset += fbi->var.yoffset * fb->pitches[0];
  409. dev->mode_config.fb_base = (resource_size_t)obj->paddr;
  410. fbi->screen_base = obj->vaddr + offset;
  411. fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
  412. fbi->screen_size = size;
  413. fbi->fix.smem_len = size;
  414. if (funcs->dirty) {
  415. ret = drm_fbdev_cma_defio_init(fbi, obj);
  416. if (ret)
  417. goto err_cma_destroy;
  418. }
  419. return 0;
  420. err_cma_destroy:
  421. drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
  422. drm_fb_cma_destroy(&fbdev_cma->fb->fb);
  423. err_fb_info_destroy:
  424. drm_fb_helper_release_fbi(helper);
  425. err_gem_free_object:
  426. drm_gem_object_unreference_unlocked(&obj->base);
  427. return ret;
  428. }
  429. EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
  430. static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
  431. struct drm_fb_helper_surface_size *sizes)
  432. {
  433. return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
  434. }
  435. static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
  436. .fb_probe = drm_fbdev_cma_create,
  437. };
  438. /**
  439. * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
  440. * @dev: DRM device
  441. * @preferred_bpp: Preferred bits per pixel for the device
  442. * @num_crtc: Number of CRTCs
  443. * @max_conn_count: Maximum number of connectors
  444. * @funcs: fb helper functions, in particular fb_probe()
  445. *
  446. * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  447. */
  448. struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
  449. unsigned int preferred_bpp, unsigned int num_crtc,
  450. unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
  451. {
  452. struct drm_fbdev_cma *fbdev_cma;
  453. struct drm_fb_helper *helper;
  454. int ret;
  455. fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
  456. if (!fbdev_cma) {
  457. dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
  458. return ERR_PTR(-ENOMEM);
  459. }
  460. helper = &fbdev_cma->fb_helper;
  461. drm_fb_helper_prepare(dev, helper, funcs);
  462. ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
  463. if (ret < 0) {
  464. dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
  465. goto err_free;
  466. }
  467. ret = drm_fb_helper_single_add_all_connectors(helper);
  468. if (ret < 0) {
  469. dev_err(dev->dev, "Failed to add connectors.\n");
  470. goto err_drm_fb_helper_fini;
  471. }
  472. ret = drm_fb_helper_initial_config(helper, preferred_bpp);
  473. if (ret < 0) {
  474. dev_err(dev->dev, "Failed to set initial hw configuration.\n");
  475. goto err_drm_fb_helper_fini;
  476. }
  477. return fbdev_cma;
  478. err_drm_fb_helper_fini:
  479. drm_fb_helper_fini(helper);
  480. err_free:
  481. kfree(fbdev_cma);
  482. return ERR_PTR(ret);
  483. }
  484. EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
  485. /**
  486. * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
  487. * @dev: DRM device
  488. * @preferred_bpp: Preferred bits per pixel for the device
  489. * @num_crtc: Number of CRTCs
  490. * @max_conn_count: Maximum number of connectors
  491. *
  492. * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  493. */
  494. struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
  495. unsigned int preferred_bpp, unsigned int num_crtc,
  496. unsigned int max_conn_count)
  497. {
  498. return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
  499. max_conn_count, &drm_fb_cma_helper_funcs);
  500. }
  501. EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
  502. /**
  503. * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
  504. * @fbdev_cma: The drm_fbdev_cma struct
  505. */
  506. void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
  507. {
  508. drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
  509. if (fbdev_cma->fb_helper.fbdev)
  510. drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
  511. drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
  512. if (fbdev_cma->fb) {
  513. drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
  514. drm_fb_cma_destroy(&fbdev_cma->fb->fb);
  515. }
  516. drm_fb_helper_fini(&fbdev_cma->fb_helper);
  517. kfree(fbdev_cma);
  518. }
  519. EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
  520. /**
  521. * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
  522. * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
  523. *
  524. * This function is usually called from the DRM drivers lastclose callback.
  525. */
  526. void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
  527. {
  528. if (fbdev_cma)
  529. drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
  530. }
  531. EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
  532. /**
  533. * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
  534. * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
  535. *
  536. * This function is usually called from the DRM drivers output_poll_changed
  537. * callback.
  538. */
  539. void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
  540. {
  541. if (fbdev_cma)
  542. drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
  543. }
  544. EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
  545. /**
  546. * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
  547. * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
  548. * @state: desired state, zero to resume, non-zero to suspend
  549. *
  550. * Calls drm_fb_helper_set_suspend, which is a wrapper around
  551. * fb_set_suspend implemented by fbdev core.
  552. */
  553. void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
  554. {
  555. if (fbdev_cma)
  556. drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
  557. }
  558. EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);