drm_atomic.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_mode.h>
  31. #include <drm/drm_print.h>
  32. #include <drm/drm_writeback.h>
  33. #include <linux/sync_file.h>
  34. #include "drm_crtc_internal.h"
  35. #include "drm_internal.h"
  36. void __drm_crtc_commit_free(struct kref *kref)
  37. {
  38. struct drm_crtc_commit *commit =
  39. container_of(kref, struct drm_crtc_commit, ref);
  40. kfree(commit);
  41. }
  42. EXPORT_SYMBOL(__drm_crtc_commit_free);
  43. /**
  44. * drm_atomic_state_default_release -
  45. * release memory initialized by drm_atomic_state_init
  46. * @state: atomic state
  47. *
  48. * Free all the memory allocated by drm_atomic_state_init.
  49. * This should only be used by drivers which are still subclassing
  50. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  51. */
  52. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  53. {
  54. kfree(state->connectors);
  55. kfree(state->crtcs);
  56. kfree(state->planes);
  57. kfree(state->private_objs);
  58. }
  59. EXPORT_SYMBOL(drm_atomic_state_default_release);
  60. /**
  61. * drm_atomic_state_init - init new atomic state
  62. * @dev: DRM device
  63. * @state: atomic state
  64. *
  65. * Default implementation for filling in a new atomic state.
  66. * This should only be used by drivers which are still subclassing
  67. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  68. */
  69. int
  70. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  71. {
  72. kref_init(&state->ref);
  73. /* TODO legacy paths should maybe do a better job about
  74. * setting this appropriately?
  75. */
  76. state->allow_modeset = true;
  77. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  78. sizeof(*state->crtcs), GFP_KERNEL);
  79. if (!state->crtcs)
  80. goto fail;
  81. state->planes = kcalloc(dev->mode_config.num_total_plane,
  82. sizeof(*state->planes), GFP_KERNEL);
  83. if (!state->planes)
  84. goto fail;
  85. state->dev = dev;
  86. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  87. return 0;
  88. fail:
  89. drm_atomic_state_default_release(state);
  90. return -ENOMEM;
  91. }
  92. EXPORT_SYMBOL(drm_atomic_state_init);
  93. /**
  94. * drm_atomic_state_alloc - allocate atomic state
  95. * @dev: DRM device
  96. *
  97. * This allocates an empty atomic state to track updates.
  98. */
  99. struct drm_atomic_state *
  100. drm_atomic_state_alloc(struct drm_device *dev)
  101. {
  102. struct drm_mode_config *config = &dev->mode_config;
  103. if (!config->funcs->atomic_state_alloc) {
  104. struct drm_atomic_state *state;
  105. state = kzalloc(sizeof(*state), GFP_KERNEL);
  106. if (!state)
  107. return NULL;
  108. if (drm_atomic_state_init(dev, state) < 0) {
  109. kfree(state);
  110. return NULL;
  111. }
  112. return state;
  113. }
  114. return config->funcs->atomic_state_alloc(dev);
  115. }
  116. EXPORT_SYMBOL(drm_atomic_state_alloc);
  117. /**
  118. * drm_atomic_state_default_clear - clear base atomic state
  119. * @state: atomic state
  120. *
  121. * Default implementation for clearing atomic state.
  122. * This should only be used by drivers which are still subclassing
  123. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  124. */
  125. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  126. {
  127. struct drm_device *dev = state->dev;
  128. struct drm_mode_config *config = &dev->mode_config;
  129. int i;
  130. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  131. for (i = 0; i < state->num_connector; i++) {
  132. struct drm_connector *connector = state->connectors[i].ptr;
  133. if (!connector)
  134. continue;
  135. connector->funcs->atomic_destroy_state(connector,
  136. state->connectors[i].state);
  137. state->connectors[i].ptr = NULL;
  138. state->connectors[i].state = NULL;
  139. state->connectors[i].old_state = NULL;
  140. state->connectors[i].new_state = NULL;
  141. drm_connector_put(connector);
  142. }
  143. for (i = 0; i < config->num_crtc; i++) {
  144. struct drm_crtc *crtc = state->crtcs[i].ptr;
  145. if (!crtc)
  146. continue;
  147. crtc->funcs->atomic_destroy_state(crtc,
  148. state->crtcs[i].state);
  149. state->crtcs[i].ptr = NULL;
  150. state->crtcs[i].state = NULL;
  151. state->crtcs[i].old_state = NULL;
  152. state->crtcs[i].new_state = NULL;
  153. if (state->crtcs[i].commit) {
  154. drm_crtc_commit_put(state->crtcs[i].commit);
  155. state->crtcs[i].commit = NULL;
  156. }
  157. }
  158. for (i = 0; i < config->num_total_plane; i++) {
  159. struct drm_plane *plane = state->planes[i].ptr;
  160. if (!plane)
  161. continue;
  162. plane->funcs->atomic_destroy_state(plane,
  163. state->planes[i].state);
  164. state->planes[i].ptr = NULL;
  165. state->planes[i].state = NULL;
  166. state->planes[i].old_state = NULL;
  167. state->planes[i].new_state = NULL;
  168. }
  169. for (i = 0; i < state->num_private_objs; i++) {
  170. struct drm_private_obj *obj = state->private_objs[i].ptr;
  171. obj->funcs->atomic_destroy_state(obj,
  172. state->private_objs[i].state);
  173. state->private_objs[i].ptr = NULL;
  174. state->private_objs[i].state = NULL;
  175. state->private_objs[i].old_state = NULL;
  176. state->private_objs[i].new_state = NULL;
  177. }
  178. state->num_private_objs = 0;
  179. if (state->fake_commit) {
  180. drm_crtc_commit_put(state->fake_commit);
  181. state->fake_commit = NULL;
  182. }
  183. }
  184. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  185. /**
  186. * drm_atomic_state_clear - clear state object
  187. * @state: atomic state
  188. *
  189. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  190. * all locks. So someone else could sneak in and change the current modeset
  191. * configuration. Which means that all the state assembled in @state is no
  192. * longer an atomic update to the current state, but to some arbitrary earlier
  193. * state. Which could break assumptions the driver's
  194. * &drm_mode_config_funcs.atomic_check likely relies on.
  195. *
  196. * Hence we must clear all cached state and completely start over, using this
  197. * function.
  198. */
  199. void drm_atomic_state_clear(struct drm_atomic_state *state)
  200. {
  201. struct drm_device *dev = state->dev;
  202. struct drm_mode_config *config = &dev->mode_config;
  203. if (config->funcs->atomic_state_clear)
  204. config->funcs->atomic_state_clear(state);
  205. else
  206. drm_atomic_state_default_clear(state);
  207. }
  208. EXPORT_SYMBOL(drm_atomic_state_clear);
  209. /**
  210. * __drm_atomic_state_free - free all memory for an atomic state
  211. * @ref: This atomic state to deallocate
  212. *
  213. * This frees all memory associated with an atomic state, including all the
  214. * per-object state for planes, crtcs and connectors.
  215. */
  216. void __drm_atomic_state_free(struct kref *ref)
  217. {
  218. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  219. struct drm_mode_config *config = &state->dev->mode_config;
  220. drm_atomic_state_clear(state);
  221. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  222. if (config->funcs->atomic_state_free) {
  223. config->funcs->atomic_state_free(state);
  224. } else {
  225. drm_atomic_state_default_release(state);
  226. kfree(state);
  227. }
  228. }
  229. EXPORT_SYMBOL(__drm_atomic_state_free);
  230. /**
  231. * drm_atomic_get_crtc_state - get crtc state
  232. * @state: global atomic state object
  233. * @crtc: crtc to get state object for
  234. *
  235. * This function returns the crtc state for the given crtc, allocating it if
  236. * needed. It will also grab the relevant crtc lock to make sure that the state
  237. * is consistent.
  238. *
  239. * Returns:
  240. *
  241. * Either the allocated state or the error code encoded into the pointer. When
  242. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  243. * entire atomic sequence must be restarted. All other errors are fatal.
  244. */
  245. struct drm_crtc_state *
  246. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  247. struct drm_crtc *crtc)
  248. {
  249. int ret, index = drm_crtc_index(crtc);
  250. struct drm_crtc_state *crtc_state;
  251. WARN_ON(!state->acquire_ctx);
  252. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  253. if (crtc_state)
  254. return crtc_state;
  255. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  256. if (ret)
  257. return ERR_PTR(ret);
  258. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  259. if (!crtc_state)
  260. return ERR_PTR(-ENOMEM);
  261. state->crtcs[index].state = crtc_state;
  262. state->crtcs[index].old_state = crtc->state;
  263. state->crtcs[index].new_state = crtc_state;
  264. state->crtcs[index].ptr = crtc;
  265. crtc_state->state = state;
  266. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  267. crtc->base.id, crtc->name, crtc_state, state);
  268. return crtc_state;
  269. }
  270. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  271. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  272. struct drm_crtc_state *state)
  273. {
  274. /* NOTE: we explicitly don't enforce constraints such as primary
  275. * layer covering entire screen, since that is something we want
  276. * to allow (on hw that supports it). For hw that does not, it
  277. * should be checked in driver's crtc->atomic_check() vfunc.
  278. *
  279. * TODO: Add generic modeset state checks once we support those.
  280. */
  281. if (state->active && !state->enable) {
  282. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  283. crtc->base.id, crtc->name);
  284. return -EINVAL;
  285. }
  286. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  287. * as this is a kernel-internal detail that userspace should never
  288. * be able to trigger. */
  289. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  290. WARN_ON(state->enable && !state->mode_blob)) {
  291. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  292. crtc->base.id, crtc->name);
  293. return -EINVAL;
  294. }
  295. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  296. WARN_ON(!state->enable && state->mode_blob)) {
  297. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  298. crtc->base.id, crtc->name);
  299. return -EINVAL;
  300. }
  301. /*
  302. * Reject event generation for when a CRTC is off and stays off.
  303. * It wouldn't be hard to implement this, but userspace has a track
  304. * record of happily burning through 100% cpu (or worse, crash) when the
  305. * display pipe is suspended. To avoid all that fun just reject updates
  306. * that ask for events since likely that indicates a bug in the
  307. * compositor's drawing loop. This is consistent with the vblank IOCTL
  308. * and legacy page_flip IOCTL which also reject service on a disabled
  309. * pipe.
  310. */
  311. if (state->event && !state->active && !crtc->state->active) {
  312. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
  313. crtc->base.id, crtc->name);
  314. return -EINVAL;
  315. }
  316. return 0;
  317. }
  318. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  319. const struct drm_crtc_state *state)
  320. {
  321. struct drm_crtc *crtc = state->crtc;
  322. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  323. drm_printf(p, "\tenable=%d\n", state->enable);
  324. drm_printf(p, "\tactive=%d\n", state->active);
  325. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  326. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  327. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  328. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  329. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  330. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  331. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  332. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  333. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  334. if (crtc->funcs->atomic_print_state)
  335. crtc->funcs->atomic_print_state(p, state);
  336. }
  337. static int drm_atomic_connector_check(struct drm_connector *connector,
  338. struct drm_connector_state *state)
  339. {
  340. struct drm_crtc_state *crtc_state;
  341. struct drm_writeback_job *writeback_job = state->writeback_job;
  342. if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
  343. return 0;
  344. if (writeback_job->fb && !state->crtc) {
  345. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
  346. connector->base.id, connector->name);
  347. return -EINVAL;
  348. }
  349. if (state->crtc)
  350. crtc_state = drm_atomic_get_existing_crtc_state(state->state,
  351. state->crtc);
  352. if (writeback_job->fb && !crtc_state->active) {
  353. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
  354. connector->base.id, connector->name,
  355. state->crtc->base.id);
  356. return -EINVAL;
  357. }
  358. if (writeback_job->out_fence && !writeback_job->fb) {
  359. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
  360. connector->base.id, connector->name);
  361. return -EINVAL;
  362. }
  363. return 0;
  364. }
  365. /**
  366. * drm_atomic_get_plane_state - get plane state
  367. * @state: global atomic state object
  368. * @plane: plane to get state object for
  369. *
  370. * This function returns the plane state for the given plane, allocating it if
  371. * needed. It will also grab the relevant plane lock to make sure that the state
  372. * is consistent.
  373. *
  374. * Returns:
  375. *
  376. * Either the allocated state or the error code encoded into the pointer. When
  377. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  378. * entire atomic sequence must be restarted. All other errors are fatal.
  379. */
  380. struct drm_plane_state *
  381. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  382. struct drm_plane *plane)
  383. {
  384. int ret, index = drm_plane_index(plane);
  385. struct drm_plane_state *plane_state;
  386. WARN_ON(!state->acquire_ctx);
  387. /* the legacy pointers should never be set */
  388. WARN_ON(plane->fb);
  389. WARN_ON(plane->old_fb);
  390. WARN_ON(plane->crtc);
  391. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  392. if (plane_state)
  393. return plane_state;
  394. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  395. if (ret)
  396. return ERR_PTR(ret);
  397. plane_state = plane->funcs->atomic_duplicate_state(plane);
  398. if (!plane_state)
  399. return ERR_PTR(-ENOMEM);
  400. state->planes[index].state = plane_state;
  401. state->planes[index].ptr = plane;
  402. state->planes[index].old_state = plane->state;
  403. state->planes[index].new_state = plane_state;
  404. plane_state->state = state;
  405. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  406. plane->base.id, plane->name, plane_state, state);
  407. if (plane_state->crtc) {
  408. struct drm_crtc_state *crtc_state;
  409. crtc_state = drm_atomic_get_crtc_state(state,
  410. plane_state->crtc);
  411. if (IS_ERR(crtc_state))
  412. return ERR_CAST(crtc_state);
  413. }
  414. return plane_state;
  415. }
  416. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  417. static bool
  418. plane_switching_crtc(struct drm_atomic_state *state,
  419. struct drm_plane *plane,
  420. struct drm_plane_state *plane_state)
  421. {
  422. if (!plane->state->crtc || !plane_state->crtc)
  423. return false;
  424. if (plane->state->crtc == plane_state->crtc)
  425. return false;
  426. /* This could be refined, but currently there's no helper or driver code
  427. * to implement direct switching of active planes nor userspace to take
  428. * advantage of more direct plane switching without the intermediate
  429. * full OFF state.
  430. */
  431. return true;
  432. }
  433. /**
  434. * drm_atomic_plane_check - check plane state
  435. * @plane: plane to check
  436. * @state: plane state to check
  437. *
  438. * Provides core sanity checks for plane state.
  439. *
  440. * RETURNS:
  441. * Zero on success, error code on failure
  442. */
  443. static int drm_atomic_plane_check(struct drm_plane *plane,
  444. struct drm_plane_state *state)
  445. {
  446. unsigned int fb_width, fb_height;
  447. int ret;
  448. /* either *both* CRTC and FB must be set, or neither */
  449. if (state->crtc && !state->fb) {
  450. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
  451. plane->base.id, plane->name);
  452. return -EINVAL;
  453. } else if (state->fb && !state->crtc) {
  454. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
  455. plane->base.id, plane->name);
  456. return -EINVAL;
  457. }
  458. /* if disabled, we don't care about the rest of the state: */
  459. if (!state->crtc)
  460. return 0;
  461. /* Check whether this plane is usable on this CRTC */
  462. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  463. DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
  464. state->crtc->base.id, state->crtc->name,
  465. plane->base.id, plane->name);
  466. return -EINVAL;
  467. }
  468. /* Check whether this plane supports the fb pixel format. */
  469. ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
  470. state->fb->modifier);
  471. if (ret) {
  472. struct drm_format_name_buf format_name;
  473. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
  474. plane->base.id, plane->name,
  475. drm_get_format_name(state->fb->format->format,
  476. &format_name),
  477. state->fb->modifier);
  478. return ret;
  479. }
  480. /* Give drivers some help against integer overflows */
  481. if (state->crtc_w > INT_MAX ||
  482. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  483. state->crtc_h > INT_MAX ||
  484. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  485. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
  486. plane->base.id, plane->name,
  487. state->crtc_w, state->crtc_h,
  488. state->crtc_x, state->crtc_y);
  489. return -ERANGE;
  490. }
  491. fb_width = state->fb->width << 16;
  492. fb_height = state->fb->height << 16;
  493. /* Make sure source coordinates are inside the fb. */
  494. if (state->src_w > fb_width ||
  495. state->src_x > fb_width - state->src_w ||
  496. state->src_h > fb_height ||
  497. state->src_y > fb_height - state->src_h) {
  498. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
  499. "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
  500. plane->base.id, plane->name,
  501. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  502. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  503. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  504. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
  505. state->fb->width, state->fb->height);
  506. return -ENOSPC;
  507. }
  508. if (plane_switching_crtc(state->state, plane, state)) {
  509. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  510. plane->base.id, plane->name);
  511. return -EINVAL;
  512. }
  513. return 0;
  514. }
  515. static void drm_atomic_plane_print_state(struct drm_printer *p,
  516. const struct drm_plane_state *state)
  517. {
  518. struct drm_plane *plane = state->plane;
  519. struct drm_rect src = drm_plane_state_src(state);
  520. struct drm_rect dest = drm_plane_state_dest(state);
  521. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  522. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  523. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  524. if (state->fb)
  525. drm_framebuffer_print_info(p, 2, state->fb);
  526. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  527. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  528. drm_printf(p, "\trotation=%x\n", state->rotation);
  529. drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
  530. drm_printf(p, "\tcolor-encoding=%s\n",
  531. drm_get_color_encoding_name(state->color_encoding));
  532. drm_printf(p, "\tcolor-range=%s\n",
  533. drm_get_color_range_name(state->color_range));
  534. if (plane->funcs->atomic_print_state)
  535. plane->funcs->atomic_print_state(p, state);
  536. }
  537. /**
  538. * DOC: handling driver private state
  539. *
  540. * Very often the DRM objects exposed to userspace in the atomic modeset api
  541. * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
  542. * underlying hardware. Especially for any kind of shared resources (e.g. shared
  543. * clocks, scaler units, bandwidth and fifo limits shared among a group of
  544. * planes or CRTCs, and so on) it makes sense to model these as independent
  545. * objects. Drivers then need to do similar state tracking and commit ordering for
  546. * such private (since not exposed to userpace) objects as the atomic core and
  547. * helpers already provide for connectors, planes and CRTCs.
  548. *
  549. * To make this easier on drivers the atomic core provides some support to track
  550. * driver private state objects using struct &drm_private_obj, with the
  551. * associated state struct &drm_private_state.
  552. *
  553. * Similar to userspace-exposed objects, private state structures can be
  554. * acquired by calling drm_atomic_get_private_obj_state(). Since this function
  555. * does not take care of locking, drivers should wrap it for each type of
  556. * private state object they have with the required call to drm_modeset_lock()
  557. * for the corresponding &drm_modeset_lock.
  558. *
  559. * All private state structures contained in a &drm_atomic_state update can be
  560. * iterated using for_each_oldnew_private_obj_in_state(),
  561. * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
  562. * Drivers are recommended to wrap these for each type of driver private state
  563. * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
  564. * least if they want to iterate over all objects of a given type.
  565. *
  566. * An earlier way to handle driver private state was by subclassing struct
  567. * &drm_atomic_state. But since that encourages non-standard ways to implement
  568. * the check/commit split atomic requires (by using e.g. "check and rollback or
  569. * commit instead" of "duplicate state, check, then either commit or release
  570. * duplicated state) it is deprecated in favour of using &drm_private_state.
  571. */
  572. /**
  573. * drm_atomic_private_obj_init - initialize private object
  574. * @obj: private object
  575. * @state: initial private object state
  576. * @funcs: pointer to the struct of function pointers that identify the object
  577. * type
  578. *
  579. * Initialize the private object, which can be embedded into any
  580. * driver private object that needs its own atomic state.
  581. */
  582. void
  583. drm_atomic_private_obj_init(struct drm_private_obj *obj,
  584. struct drm_private_state *state,
  585. const struct drm_private_state_funcs *funcs)
  586. {
  587. memset(obj, 0, sizeof(*obj));
  588. obj->state = state;
  589. obj->funcs = funcs;
  590. }
  591. EXPORT_SYMBOL(drm_atomic_private_obj_init);
  592. /**
  593. * drm_atomic_private_obj_fini - finalize private object
  594. * @obj: private object
  595. *
  596. * Finalize the private object.
  597. */
  598. void
  599. drm_atomic_private_obj_fini(struct drm_private_obj *obj)
  600. {
  601. obj->funcs->atomic_destroy_state(obj, obj->state);
  602. }
  603. EXPORT_SYMBOL(drm_atomic_private_obj_fini);
  604. /**
  605. * drm_atomic_get_private_obj_state - get private object state
  606. * @state: global atomic state
  607. * @obj: private object to get the state for
  608. *
  609. * This function returns the private object state for the given private object,
  610. * allocating the state if needed. It does not grab any locks as the caller is
  611. * expected to care of any required locking.
  612. *
  613. * RETURNS:
  614. *
  615. * Either the allocated state or the error code encoded into a pointer.
  616. */
  617. struct drm_private_state *
  618. drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
  619. struct drm_private_obj *obj)
  620. {
  621. int index, num_objs, i;
  622. size_t size;
  623. struct __drm_private_objs_state *arr;
  624. struct drm_private_state *obj_state;
  625. for (i = 0; i < state->num_private_objs; i++)
  626. if (obj == state->private_objs[i].ptr)
  627. return state->private_objs[i].state;
  628. num_objs = state->num_private_objs + 1;
  629. size = sizeof(*state->private_objs) * num_objs;
  630. arr = krealloc(state->private_objs, size, GFP_KERNEL);
  631. if (!arr)
  632. return ERR_PTR(-ENOMEM);
  633. state->private_objs = arr;
  634. index = state->num_private_objs;
  635. memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
  636. obj_state = obj->funcs->atomic_duplicate_state(obj);
  637. if (!obj_state)
  638. return ERR_PTR(-ENOMEM);
  639. state->private_objs[index].state = obj_state;
  640. state->private_objs[index].old_state = obj->state;
  641. state->private_objs[index].new_state = obj_state;
  642. state->private_objs[index].ptr = obj;
  643. obj_state->state = state;
  644. state->num_private_objs = num_objs;
  645. DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
  646. obj, obj_state, state);
  647. return obj_state;
  648. }
  649. EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
  650. /**
  651. * drm_atomic_get_connector_state - get connector state
  652. * @state: global atomic state object
  653. * @connector: connector to get state object for
  654. *
  655. * This function returns the connector state for the given connector,
  656. * allocating it if needed. It will also grab the relevant connector lock to
  657. * make sure that the state is consistent.
  658. *
  659. * Returns:
  660. *
  661. * Either the allocated state or the error code encoded into the pointer. When
  662. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  663. * entire atomic sequence must be restarted. All other errors are fatal.
  664. */
  665. struct drm_connector_state *
  666. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  667. struct drm_connector *connector)
  668. {
  669. int ret, index;
  670. struct drm_mode_config *config = &connector->dev->mode_config;
  671. struct drm_connector_state *connector_state;
  672. WARN_ON(!state->acquire_ctx);
  673. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  674. if (ret)
  675. return ERR_PTR(ret);
  676. index = drm_connector_index(connector);
  677. if (index >= state->num_connector) {
  678. struct __drm_connnectors_state *c;
  679. int alloc = max(index + 1, config->num_connector);
  680. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  681. if (!c)
  682. return ERR_PTR(-ENOMEM);
  683. state->connectors = c;
  684. memset(&state->connectors[state->num_connector], 0,
  685. sizeof(*state->connectors) * (alloc - state->num_connector));
  686. state->num_connector = alloc;
  687. }
  688. if (state->connectors[index].state)
  689. return state->connectors[index].state;
  690. connector_state = connector->funcs->atomic_duplicate_state(connector);
  691. if (!connector_state)
  692. return ERR_PTR(-ENOMEM);
  693. drm_connector_get(connector);
  694. state->connectors[index].state = connector_state;
  695. state->connectors[index].old_state = connector->state;
  696. state->connectors[index].new_state = connector_state;
  697. state->connectors[index].ptr = connector;
  698. connector_state->state = state;
  699. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
  700. connector->base.id, connector->name,
  701. connector_state, state);
  702. if (connector_state->crtc) {
  703. struct drm_crtc_state *crtc_state;
  704. crtc_state = drm_atomic_get_crtc_state(state,
  705. connector_state->crtc);
  706. if (IS_ERR(crtc_state))
  707. return ERR_CAST(crtc_state);
  708. }
  709. return connector_state;
  710. }
  711. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  712. static void drm_atomic_connector_print_state(struct drm_printer *p,
  713. const struct drm_connector_state *state)
  714. {
  715. struct drm_connector *connector = state->connector;
  716. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  717. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  718. if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
  719. if (state->writeback_job && state->writeback_job->fb)
  720. drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
  721. if (connector->funcs->atomic_print_state)
  722. connector->funcs->atomic_print_state(p, state);
  723. }
  724. /**
  725. * drm_atomic_add_affected_connectors - add connectors for crtc
  726. * @state: atomic state
  727. * @crtc: DRM crtc
  728. *
  729. * This function walks the current configuration and adds all connectors
  730. * currently using @crtc to the atomic configuration @state. Note that this
  731. * function must acquire the connection mutex. This can potentially cause
  732. * unneeded seralization if the update is just for the planes on one crtc. Hence
  733. * drivers and helpers should only call this when really needed (e.g. when a
  734. * full modeset needs to happen due to some change).
  735. *
  736. * Returns:
  737. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  738. * then the w/w mutex code has detected a deadlock and the entire atomic
  739. * sequence must be restarted. All other errors are fatal.
  740. */
  741. int
  742. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  743. struct drm_crtc *crtc)
  744. {
  745. struct drm_mode_config *config = &state->dev->mode_config;
  746. struct drm_connector *connector;
  747. struct drm_connector_state *conn_state;
  748. struct drm_connector_list_iter conn_iter;
  749. struct drm_crtc_state *crtc_state;
  750. int ret;
  751. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  752. if (IS_ERR(crtc_state))
  753. return PTR_ERR(crtc_state);
  754. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  755. if (ret)
  756. return ret;
  757. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  758. crtc->base.id, crtc->name, state);
  759. /*
  760. * Changed connectors are already in @state, so only need to look
  761. * at the connector_mask in crtc_state.
  762. */
  763. drm_connector_list_iter_begin(state->dev, &conn_iter);
  764. drm_for_each_connector_iter(connector, &conn_iter) {
  765. if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
  766. continue;
  767. conn_state = drm_atomic_get_connector_state(state, connector);
  768. if (IS_ERR(conn_state)) {
  769. drm_connector_list_iter_end(&conn_iter);
  770. return PTR_ERR(conn_state);
  771. }
  772. }
  773. drm_connector_list_iter_end(&conn_iter);
  774. return 0;
  775. }
  776. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  777. /**
  778. * drm_atomic_add_affected_planes - add planes for crtc
  779. * @state: atomic state
  780. * @crtc: DRM crtc
  781. *
  782. * This function walks the current configuration and adds all planes
  783. * currently used by @crtc to the atomic configuration @state. This is useful
  784. * when an atomic commit also needs to check all currently enabled plane on
  785. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  786. * to avoid special code to force-enable all planes.
  787. *
  788. * Since acquiring a plane state will always also acquire the w/w mutex of the
  789. * current CRTC for that plane (if there is any) adding all the plane states for
  790. * a CRTC will not reduce parallism of atomic updates.
  791. *
  792. * Returns:
  793. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  794. * then the w/w mutex code has detected a deadlock and the entire atomic
  795. * sequence must be restarted. All other errors are fatal.
  796. */
  797. int
  798. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  799. struct drm_crtc *crtc)
  800. {
  801. struct drm_plane *plane;
  802. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  803. DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
  804. crtc->base.id, crtc->name, state);
  805. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  806. struct drm_plane_state *plane_state =
  807. drm_atomic_get_plane_state(state, plane);
  808. if (IS_ERR(plane_state))
  809. return PTR_ERR(plane_state);
  810. }
  811. return 0;
  812. }
  813. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  814. /**
  815. * drm_atomic_check_only - check whether a given config would work
  816. * @state: atomic configuration to check
  817. *
  818. * Note that this function can return -EDEADLK if the driver needed to acquire
  819. * more locks but encountered a deadlock. The caller must then do the usual w/w
  820. * backoff dance and restart. All other errors are fatal.
  821. *
  822. * Returns:
  823. * 0 on success, negative error code on failure.
  824. */
  825. int drm_atomic_check_only(struct drm_atomic_state *state)
  826. {
  827. struct drm_device *dev = state->dev;
  828. struct drm_mode_config *config = &dev->mode_config;
  829. struct drm_plane *plane;
  830. struct drm_plane_state *plane_state;
  831. struct drm_crtc *crtc;
  832. struct drm_crtc_state *crtc_state;
  833. struct drm_connector *conn;
  834. struct drm_connector_state *conn_state;
  835. int i, ret = 0;
  836. DRM_DEBUG_ATOMIC("checking %p\n", state);
  837. for_each_new_plane_in_state(state, plane, plane_state, i) {
  838. ret = drm_atomic_plane_check(plane, plane_state);
  839. if (ret) {
  840. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  841. plane->base.id, plane->name);
  842. return ret;
  843. }
  844. }
  845. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  846. ret = drm_atomic_crtc_check(crtc, crtc_state);
  847. if (ret) {
  848. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  849. crtc->base.id, crtc->name);
  850. return ret;
  851. }
  852. }
  853. for_each_new_connector_in_state(state, conn, conn_state, i) {
  854. ret = drm_atomic_connector_check(conn, conn_state);
  855. if (ret) {
  856. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
  857. conn->base.id, conn->name);
  858. return ret;
  859. }
  860. }
  861. if (config->funcs->atomic_check) {
  862. ret = config->funcs->atomic_check(state->dev, state);
  863. if (ret) {
  864. DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
  865. state, ret);
  866. return ret;
  867. }
  868. }
  869. if (!state->allow_modeset) {
  870. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  871. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  872. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  873. crtc->base.id, crtc->name);
  874. return -EINVAL;
  875. }
  876. }
  877. }
  878. return 0;
  879. }
  880. EXPORT_SYMBOL(drm_atomic_check_only);
  881. /**
  882. * drm_atomic_commit - commit configuration atomically
  883. * @state: atomic configuration to check
  884. *
  885. * Note that this function can return -EDEADLK if the driver needed to acquire
  886. * more locks but encountered a deadlock. The caller must then do the usual w/w
  887. * backoff dance and restart. All other errors are fatal.
  888. *
  889. * This function will take its own reference on @state.
  890. * Callers should always release their reference with drm_atomic_state_put().
  891. *
  892. * Returns:
  893. * 0 on success, negative error code on failure.
  894. */
  895. int drm_atomic_commit(struct drm_atomic_state *state)
  896. {
  897. struct drm_mode_config *config = &state->dev->mode_config;
  898. int ret;
  899. ret = drm_atomic_check_only(state);
  900. if (ret)
  901. return ret;
  902. DRM_DEBUG_ATOMIC("committing %p\n", state);
  903. return config->funcs->atomic_commit(state->dev, state, false);
  904. }
  905. EXPORT_SYMBOL(drm_atomic_commit);
  906. /**
  907. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  908. * @state: atomic configuration to check
  909. *
  910. * Note that this function can return -EDEADLK if the driver needed to acquire
  911. * more locks but encountered a deadlock. The caller must then do the usual w/w
  912. * backoff dance and restart. All other errors are fatal.
  913. *
  914. * This function will take its own reference on @state.
  915. * Callers should always release their reference with drm_atomic_state_put().
  916. *
  917. * Returns:
  918. * 0 on success, negative error code on failure.
  919. */
  920. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  921. {
  922. struct drm_mode_config *config = &state->dev->mode_config;
  923. int ret;
  924. ret = drm_atomic_check_only(state);
  925. if (ret)
  926. return ret;
  927. DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
  928. return config->funcs->atomic_commit(state->dev, state, true);
  929. }
  930. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  931. void drm_atomic_print_state(const struct drm_atomic_state *state)
  932. {
  933. struct drm_printer p = drm_info_printer(state->dev->dev);
  934. struct drm_plane *plane;
  935. struct drm_plane_state *plane_state;
  936. struct drm_crtc *crtc;
  937. struct drm_crtc_state *crtc_state;
  938. struct drm_connector *connector;
  939. struct drm_connector_state *connector_state;
  940. int i;
  941. DRM_DEBUG_ATOMIC("checking %p\n", state);
  942. for_each_new_plane_in_state(state, plane, plane_state, i)
  943. drm_atomic_plane_print_state(&p, plane_state);
  944. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  945. drm_atomic_crtc_print_state(&p, crtc_state);
  946. for_each_new_connector_in_state(state, connector, connector_state, i)
  947. drm_atomic_connector_print_state(&p, connector_state);
  948. }
  949. static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  950. bool take_locks)
  951. {
  952. struct drm_mode_config *config = &dev->mode_config;
  953. struct drm_plane *plane;
  954. struct drm_crtc *crtc;
  955. struct drm_connector *connector;
  956. struct drm_connector_list_iter conn_iter;
  957. if (!drm_drv_uses_atomic_modeset(dev))
  958. return;
  959. list_for_each_entry(plane, &config->plane_list, head) {
  960. if (take_locks)
  961. drm_modeset_lock(&plane->mutex, NULL);
  962. drm_atomic_plane_print_state(p, plane->state);
  963. if (take_locks)
  964. drm_modeset_unlock(&plane->mutex);
  965. }
  966. list_for_each_entry(crtc, &config->crtc_list, head) {
  967. if (take_locks)
  968. drm_modeset_lock(&crtc->mutex, NULL);
  969. drm_atomic_crtc_print_state(p, crtc->state);
  970. if (take_locks)
  971. drm_modeset_unlock(&crtc->mutex);
  972. }
  973. drm_connector_list_iter_begin(dev, &conn_iter);
  974. if (take_locks)
  975. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  976. drm_for_each_connector_iter(connector, &conn_iter)
  977. drm_atomic_connector_print_state(p, connector->state);
  978. if (take_locks)
  979. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  980. drm_connector_list_iter_end(&conn_iter);
  981. }
  982. /**
  983. * drm_state_dump - dump entire device atomic state
  984. * @dev: the drm device
  985. * @p: where to print the state to
  986. *
  987. * Just for debugging. Drivers might want an option to dump state
  988. * to dmesg in case of error irq's. (Hint, you probably want to
  989. * ratelimit this!)
  990. *
  991. * The caller must drm_modeset_lock_all(), or if this is called
  992. * from error irq handler, it should not be enabled by default.
  993. * (Ie. if you are debugging errors you might not care that this
  994. * is racey. But calling this without all modeset locks held is
  995. * not inherently safe.)
  996. */
  997. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  998. {
  999. __drm_state_dump(dev, p, false);
  1000. }
  1001. EXPORT_SYMBOL(drm_state_dump);
  1002. #ifdef CONFIG_DEBUG_FS
  1003. static int drm_state_info(struct seq_file *m, void *data)
  1004. {
  1005. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1006. struct drm_device *dev = node->minor->dev;
  1007. struct drm_printer p = drm_seq_file_printer(m);
  1008. __drm_state_dump(dev, &p, true);
  1009. return 0;
  1010. }
  1011. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1012. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1013. {"state", drm_state_info, 0},
  1014. };
  1015. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1016. {
  1017. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1018. ARRAY_SIZE(drm_atomic_debugfs_list),
  1019. minor->debugfs_root, minor);
  1020. }
  1021. #endif