drm_atomic_uapi.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. * Copyright (C) 2018 Intel Corp.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors:
  25. * Rob Clark <robdclark@gmail.com>
  26. * Daniel Vetter <daniel.vetter@ffwll.ch>
  27. */
  28. #include <drm/drm_atomic_uapi.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_print.h>
  31. #include <drm/drm_drv.h>
  32. #include <drm/drm_writeback.h>
  33. #include <drm/drm_vblank.h>
  34. #include <linux/dma-fence.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/sync_file.h>
  37. #include <linux/file.h>
  38. #include "drm_crtc_internal.h"
  39. /**
  40. * DOC: overview
  41. *
  42. * This file contains the marshalling and demarshalling glue for the atomic UAPI
  43. * in all it's form: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
  44. * SET_PROPERTY IOCTls. Plus interface functions for compatibility helpers and
  45. * drivers which have special needs to construct their own atomic updates, e.g.
  46. * for load detect or similiar.
  47. */
  48. /**
  49. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  50. * @state: the CRTC whose incoming state to update
  51. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  52. *
  53. * Set a mode (originating from the kernel) on the desired CRTC state and update
  54. * the enable property.
  55. *
  56. * RETURNS:
  57. * Zero on success, error code on failure. Cannot return -EDEADLK.
  58. */
  59. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  60. const struct drm_display_mode *mode)
  61. {
  62. struct drm_crtc *crtc = state->crtc;
  63. struct drm_mode_modeinfo umode;
  64. /* Early return for no change. */
  65. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  66. return 0;
  67. drm_property_blob_put(state->mode_blob);
  68. state->mode_blob = NULL;
  69. if (mode) {
  70. drm_mode_convert_to_umode(&umode, mode);
  71. state->mode_blob =
  72. drm_property_create_blob(state->crtc->dev,
  73. sizeof(umode),
  74. &umode);
  75. if (IS_ERR(state->mode_blob))
  76. return PTR_ERR(state->mode_blob);
  77. drm_mode_copy(&state->mode, mode);
  78. state->enable = true;
  79. DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  80. mode->name, crtc->base.id, crtc->name, state);
  81. } else {
  82. memset(&state->mode, 0, sizeof(state->mode));
  83. state->enable = false;
  84. DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  85. crtc->base.id, crtc->name, state);
  86. }
  87. return 0;
  88. }
  89. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  90. /**
  91. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  92. * @state: the CRTC whose incoming state to update
  93. * @blob: pointer to blob property to use for mode
  94. *
  95. * Set a mode (originating from a blob property) on the desired CRTC state.
  96. * This function will take a reference on the blob property for the CRTC state,
  97. * and release the reference held on the state's existing mode property, if any
  98. * was set.
  99. *
  100. * RETURNS:
  101. * Zero on success, error code on failure. Cannot return -EDEADLK.
  102. */
  103. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  104. struct drm_property_blob *blob)
  105. {
  106. struct drm_crtc *crtc = state->crtc;
  107. if (blob == state->mode_blob)
  108. return 0;
  109. drm_property_blob_put(state->mode_blob);
  110. state->mode_blob = NULL;
  111. memset(&state->mode, 0, sizeof(state->mode));
  112. if (blob) {
  113. int ret;
  114. if (blob->length != sizeof(struct drm_mode_modeinfo)) {
  115. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
  116. crtc->base.id, crtc->name,
  117. blob->length);
  118. return -EINVAL;
  119. }
  120. ret = drm_mode_convert_umode(crtc->dev,
  121. &state->mode, blob->data);
  122. if (ret) {
  123. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
  124. crtc->base.id, crtc->name,
  125. ret, drm_get_mode_status_name(state->mode.status));
  126. drm_mode_debug_printmodeline(&state->mode);
  127. return -EINVAL;
  128. }
  129. state->mode_blob = drm_property_blob_get(blob);
  130. state->enable = true;
  131. DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  132. state->mode.name, crtc->base.id, crtc->name,
  133. state);
  134. } else {
  135. state->enable = false;
  136. DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  137. crtc->base.id, crtc->name, state);
  138. }
  139. return 0;
  140. }
  141. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  142. /**
  143. * drm_atomic_set_crtc_for_plane - set crtc for plane
  144. * @plane_state: the plane whose incoming state to update
  145. * @crtc: crtc to use for the plane
  146. *
  147. * Changing the assigned crtc for a plane requires us to grab the lock and state
  148. * for the new crtc, as needed. This function takes care of all these details
  149. * besides updating the pointer in the state object itself.
  150. *
  151. * Returns:
  152. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  153. * then the w/w mutex code has detected a deadlock and the entire atomic
  154. * sequence must be restarted. All other errors are fatal.
  155. */
  156. int
  157. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  158. struct drm_crtc *crtc)
  159. {
  160. struct drm_plane *plane = plane_state->plane;
  161. struct drm_crtc_state *crtc_state;
  162. /* Nothing to do for same crtc*/
  163. if (plane_state->crtc == crtc)
  164. return 0;
  165. if (plane_state->crtc) {
  166. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  167. plane_state->crtc);
  168. if (WARN_ON(IS_ERR(crtc_state)))
  169. return PTR_ERR(crtc_state);
  170. crtc_state->plane_mask &= ~drm_plane_mask(plane);
  171. }
  172. plane_state->crtc = crtc;
  173. if (crtc) {
  174. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  175. crtc);
  176. if (IS_ERR(crtc_state))
  177. return PTR_ERR(crtc_state);
  178. crtc_state->plane_mask |= drm_plane_mask(plane);
  179. }
  180. if (crtc)
  181. DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
  182. plane->base.id, plane->name, plane_state,
  183. crtc->base.id, crtc->name);
  184. else
  185. DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
  186. plane->base.id, plane->name, plane_state);
  187. return 0;
  188. }
  189. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  190. /**
  191. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  192. * @plane_state: atomic state object for the plane
  193. * @fb: fb to use for the plane
  194. *
  195. * Changing the assigned framebuffer for a plane requires us to grab a reference
  196. * to the new fb and drop the reference to the old fb, if there is one. This
  197. * function takes care of all these details besides updating the pointer in the
  198. * state object itself.
  199. */
  200. void
  201. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  202. struct drm_framebuffer *fb)
  203. {
  204. struct drm_plane *plane = plane_state->plane;
  205. if (fb)
  206. DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
  207. fb->base.id, plane->base.id, plane->name,
  208. plane_state);
  209. else
  210. DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
  211. plane->base.id, plane->name, plane_state);
  212. drm_framebuffer_assign(&plane_state->fb, fb);
  213. }
  214. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  215. /**
  216. * drm_atomic_set_fence_for_plane - set fence for plane
  217. * @plane_state: atomic state object for the plane
  218. * @fence: dma_fence to use for the plane
  219. *
  220. * Helper to setup the plane_state fence in case it is not set yet.
  221. * By using this drivers doesn't need to worry if the user choose
  222. * implicit or explicit fencing.
  223. *
  224. * This function will not set the fence to the state if it was set
  225. * via explicit fencing interfaces on the atomic ioctl. In that case it will
  226. * drop the reference to the fence as we are not storing it anywhere.
  227. * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  228. * with the received implicit fence. In both cases this function consumes a
  229. * reference for @fence.
  230. *
  231. * This way explicit fencing can be used to overrule implicit fencing, which is
  232. * important to make explicit fencing use-cases work: One example is using one
  233. * buffer for 2 screens with different refresh rates. Implicit fencing will
  234. * clamp rendering to the refresh rate of the slower screen, whereas explicit
  235. * fence allows 2 independent render and display loops on a single buffer. If a
  236. * driver allows obeys both implicit and explicit fences for plane updates, then
  237. * it will break all the benefits of explicit fencing.
  238. */
  239. void
  240. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  241. struct dma_fence *fence)
  242. {
  243. if (plane_state->fence) {
  244. dma_fence_put(fence);
  245. return;
  246. }
  247. plane_state->fence = fence;
  248. }
  249. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  250. /**
  251. * drm_atomic_set_crtc_for_connector - set crtc for connector
  252. * @conn_state: atomic state object for the connector
  253. * @crtc: crtc to use for the connector
  254. *
  255. * Changing the assigned crtc for a connector requires us to grab the lock and
  256. * state for the new crtc, as needed. This function takes care of all these
  257. * details besides updating the pointer in the state object itself.
  258. *
  259. * Returns:
  260. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  261. * then the w/w mutex code has detected a deadlock and the entire atomic
  262. * sequence must be restarted. All other errors are fatal.
  263. */
  264. int
  265. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  266. struct drm_crtc *crtc)
  267. {
  268. struct drm_connector *connector = conn_state->connector;
  269. struct drm_crtc_state *crtc_state;
  270. if (conn_state->crtc == crtc)
  271. return 0;
  272. if (conn_state->crtc) {
  273. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  274. conn_state->crtc);
  275. crtc_state->connector_mask &=
  276. ~drm_connector_mask(conn_state->connector);
  277. drm_connector_put(conn_state->connector);
  278. conn_state->crtc = NULL;
  279. }
  280. if (crtc) {
  281. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  282. if (IS_ERR(crtc_state))
  283. return PTR_ERR(crtc_state);
  284. crtc_state->connector_mask |=
  285. drm_connector_mask(conn_state->connector);
  286. drm_connector_get(conn_state->connector);
  287. conn_state->crtc = crtc;
  288. DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
  289. connector->base.id, connector->name,
  290. conn_state, crtc->base.id, crtc->name);
  291. } else {
  292. DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
  293. connector->base.id, connector->name,
  294. conn_state);
  295. }
  296. return 0;
  297. }
  298. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  299. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  300. struct drm_crtc *crtc, s32 __user *fence_ptr)
  301. {
  302. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  303. }
  304. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  305. struct drm_crtc *crtc)
  306. {
  307. s32 __user *fence_ptr;
  308. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  309. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  310. return fence_ptr;
  311. }
  312. static int set_out_fence_for_connector(struct drm_atomic_state *state,
  313. struct drm_connector *connector,
  314. s32 __user *fence_ptr)
  315. {
  316. unsigned int index = drm_connector_index(connector);
  317. if (!fence_ptr)
  318. return 0;
  319. if (put_user(-1, fence_ptr))
  320. return -EFAULT;
  321. state->connectors[index].out_fence_ptr = fence_ptr;
  322. return 0;
  323. }
  324. static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
  325. struct drm_connector *connector)
  326. {
  327. unsigned int index = drm_connector_index(connector);
  328. s32 __user *fence_ptr;
  329. fence_ptr = state->connectors[index].out_fence_ptr;
  330. state->connectors[index].out_fence_ptr = NULL;
  331. return fence_ptr;
  332. }
  333. static int
  334. drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
  335. struct drm_property_blob **blob,
  336. uint64_t blob_id,
  337. ssize_t expected_size,
  338. ssize_t expected_elem_size,
  339. bool *replaced)
  340. {
  341. struct drm_property_blob *new_blob = NULL;
  342. if (blob_id != 0) {
  343. new_blob = drm_property_lookup_blob(dev, blob_id);
  344. if (new_blob == NULL)
  345. return -EINVAL;
  346. if (expected_size > 0 &&
  347. new_blob->length != expected_size) {
  348. drm_property_blob_put(new_blob);
  349. return -EINVAL;
  350. }
  351. if (expected_elem_size > 0 &&
  352. new_blob->length % expected_elem_size != 0) {
  353. drm_property_blob_put(new_blob);
  354. return -EINVAL;
  355. }
  356. }
  357. *replaced |= drm_property_replace_blob(blob, new_blob);
  358. drm_property_blob_put(new_blob);
  359. return 0;
  360. }
  361. static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  362. struct drm_crtc_state *state, struct drm_property *property,
  363. uint64_t val)
  364. {
  365. struct drm_device *dev = crtc->dev;
  366. struct drm_mode_config *config = &dev->mode_config;
  367. bool replaced = false;
  368. int ret;
  369. if (property == config->prop_active)
  370. state->active = val;
  371. else if (property == config->prop_mode_id) {
  372. struct drm_property_blob *mode =
  373. drm_property_lookup_blob(dev, val);
  374. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  375. drm_property_blob_put(mode);
  376. return ret;
  377. } else if (property == config->degamma_lut_property) {
  378. ret = drm_atomic_replace_property_blob_from_id(dev,
  379. &state->degamma_lut,
  380. val,
  381. -1, sizeof(struct drm_color_lut),
  382. &replaced);
  383. state->color_mgmt_changed |= replaced;
  384. return ret;
  385. } else if (property == config->ctm_property) {
  386. ret = drm_atomic_replace_property_blob_from_id(dev,
  387. &state->ctm,
  388. val,
  389. sizeof(struct drm_color_ctm), -1,
  390. &replaced);
  391. state->color_mgmt_changed |= replaced;
  392. return ret;
  393. } else if (property == config->gamma_lut_property) {
  394. ret = drm_atomic_replace_property_blob_from_id(dev,
  395. &state->gamma_lut,
  396. val,
  397. -1, sizeof(struct drm_color_lut),
  398. &replaced);
  399. state->color_mgmt_changed |= replaced;
  400. return ret;
  401. } else if (property == config->prop_out_fence_ptr) {
  402. s32 __user *fence_ptr = u64_to_user_ptr(val);
  403. if (!fence_ptr)
  404. return 0;
  405. if (put_user(-1, fence_ptr))
  406. return -EFAULT;
  407. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  408. } else if (crtc->funcs->atomic_set_property) {
  409. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  410. } else {
  411. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
  412. crtc->base.id, crtc->name,
  413. property->base.id, property->name);
  414. return -EINVAL;
  415. }
  416. return 0;
  417. }
  418. static int
  419. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  420. const struct drm_crtc_state *state,
  421. struct drm_property *property, uint64_t *val)
  422. {
  423. struct drm_device *dev = crtc->dev;
  424. struct drm_mode_config *config = &dev->mode_config;
  425. if (property == config->prop_active)
  426. *val = state->active;
  427. else if (property == config->prop_mode_id)
  428. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  429. else if (property == config->degamma_lut_property)
  430. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  431. else if (property == config->ctm_property)
  432. *val = (state->ctm) ? state->ctm->base.id : 0;
  433. else if (property == config->gamma_lut_property)
  434. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  435. else if (property == config->prop_out_fence_ptr)
  436. *val = 0;
  437. else if (crtc->funcs->atomic_get_property)
  438. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  439. else
  440. return -EINVAL;
  441. return 0;
  442. }
  443. static int drm_atomic_plane_set_property(struct drm_plane *plane,
  444. struct drm_plane_state *state, struct drm_property *property,
  445. uint64_t val)
  446. {
  447. struct drm_device *dev = plane->dev;
  448. struct drm_mode_config *config = &dev->mode_config;
  449. if (property == config->prop_fb_id) {
  450. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  451. drm_atomic_set_fb_for_plane(state, fb);
  452. if (fb)
  453. drm_framebuffer_put(fb);
  454. } else if (property == config->prop_in_fence_fd) {
  455. if (state->fence)
  456. return -EINVAL;
  457. if (U642I64(val) == -1)
  458. return 0;
  459. state->fence = sync_file_get_fence(val);
  460. if (!state->fence)
  461. return -EINVAL;
  462. } else if (property == config->prop_crtc_id) {
  463. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  464. return drm_atomic_set_crtc_for_plane(state, crtc);
  465. } else if (property == config->prop_crtc_x) {
  466. state->crtc_x = U642I64(val);
  467. } else if (property == config->prop_crtc_y) {
  468. state->crtc_y = U642I64(val);
  469. } else if (property == config->prop_crtc_w) {
  470. state->crtc_w = val;
  471. } else if (property == config->prop_crtc_h) {
  472. state->crtc_h = val;
  473. } else if (property == config->prop_src_x) {
  474. state->src_x = val;
  475. } else if (property == config->prop_src_y) {
  476. state->src_y = val;
  477. } else if (property == config->prop_src_w) {
  478. state->src_w = val;
  479. } else if (property == config->prop_src_h) {
  480. state->src_h = val;
  481. } else if (property == plane->alpha_property) {
  482. state->alpha = val;
  483. } else if (property == plane->blend_mode_property) {
  484. state->pixel_blend_mode = val;
  485. } else if (property == plane->rotation_property) {
  486. if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
  487. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
  488. plane->base.id, plane->name, val);
  489. return -EINVAL;
  490. }
  491. state->rotation = val;
  492. } else if (property == plane->zpos_property) {
  493. state->zpos = val;
  494. } else if (property == plane->color_encoding_property) {
  495. state->color_encoding = val;
  496. } else if (property == plane->color_range_property) {
  497. state->color_range = val;
  498. } else if (plane->funcs->atomic_set_property) {
  499. return plane->funcs->atomic_set_property(plane, state,
  500. property, val);
  501. } else {
  502. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
  503. plane->base.id, plane->name,
  504. property->base.id, property->name);
  505. return -EINVAL;
  506. }
  507. return 0;
  508. }
  509. static int
  510. drm_atomic_plane_get_property(struct drm_plane *plane,
  511. const struct drm_plane_state *state,
  512. struct drm_property *property, uint64_t *val)
  513. {
  514. struct drm_device *dev = plane->dev;
  515. struct drm_mode_config *config = &dev->mode_config;
  516. if (property == config->prop_fb_id) {
  517. *val = (state->fb) ? state->fb->base.id : 0;
  518. } else if (property == config->prop_in_fence_fd) {
  519. *val = -1;
  520. } else if (property == config->prop_crtc_id) {
  521. *val = (state->crtc) ? state->crtc->base.id : 0;
  522. } else if (property == config->prop_crtc_x) {
  523. *val = I642U64(state->crtc_x);
  524. } else if (property == config->prop_crtc_y) {
  525. *val = I642U64(state->crtc_y);
  526. } else if (property == config->prop_crtc_w) {
  527. *val = state->crtc_w;
  528. } else if (property == config->prop_crtc_h) {
  529. *val = state->crtc_h;
  530. } else if (property == config->prop_src_x) {
  531. *val = state->src_x;
  532. } else if (property == config->prop_src_y) {
  533. *val = state->src_y;
  534. } else if (property == config->prop_src_w) {
  535. *val = state->src_w;
  536. } else if (property == config->prop_src_h) {
  537. *val = state->src_h;
  538. } else if (property == plane->alpha_property) {
  539. *val = state->alpha;
  540. } else if (property == plane->blend_mode_property) {
  541. *val = state->pixel_blend_mode;
  542. } else if (property == plane->rotation_property) {
  543. *val = state->rotation;
  544. } else if (property == plane->zpos_property) {
  545. *val = state->zpos;
  546. } else if (property == plane->color_encoding_property) {
  547. *val = state->color_encoding;
  548. } else if (property == plane->color_range_property) {
  549. *val = state->color_range;
  550. } else if (plane->funcs->atomic_get_property) {
  551. return plane->funcs->atomic_get_property(plane, state, property, val);
  552. } else {
  553. return -EINVAL;
  554. }
  555. return 0;
  556. }
  557. static struct drm_writeback_job *
  558. drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
  559. {
  560. WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
  561. if (!conn_state->writeback_job)
  562. conn_state->writeback_job =
  563. kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
  564. return conn_state->writeback_job;
  565. }
  566. static int drm_atomic_set_writeback_fb_for_connector(
  567. struct drm_connector_state *conn_state,
  568. struct drm_framebuffer *fb)
  569. {
  570. struct drm_writeback_job *job =
  571. drm_atomic_get_writeback_job(conn_state);
  572. if (!job)
  573. return -ENOMEM;
  574. drm_framebuffer_assign(&job->fb, fb);
  575. if (fb)
  576. DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
  577. fb->base.id, conn_state);
  578. else
  579. DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
  580. conn_state);
  581. return 0;
  582. }
  583. static int drm_atomic_connector_set_property(struct drm_connector *connector,
  584. struct drm_connector_state *state, struct drm_property *property,
  585. uint64_t val)
  586. {
  587. struct drm_device *dev = connector->dev;
  588. struct drm_mode_config *config = &dev->mode_config;
  589. if (property == config->prop_crtc_id) {
  590. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  591. return drm_atomic_set_crtc_for_connector(state, crtc);
  592. } else if (property == config->dpms_property) {
  593. /* setting DPMS property requires special handling, which
  594. * is done in legacy setprop path for us. Disallow (for
  595. * now?) atomic writes to DPMS property:
  596. */
  597. return -EINVAL;
  598. } else if (property == config->tv_select_subconnector_property) {
  599. state->tv.subconnector = val;
  600. } else if (property == config->tv_left_margin_property) {
  601. state->tv.margins.left = val;
  602. } else if (property == config->tv_right_margin_property) {
  603. state->tv.margins.right = val;
  604. } else if (property == config->tv_top_margin_property) {
  605. state->tv.margins.top = val;
  606. } else if (property == config->tv_bottom_margin_property) {
  607. state->tv.margins.bottom = val;
  608. } else if (property == config->tv_mode_property) {
  609. state->tv.mode = val;
  610. } else if (property == config->tv_brightness_property) {
  611. state->tv.brightness = val;
  612. } else if (property == config->tv_contrast_property) {
  613. state->tv.contrast = val;
  614. } else if (property == config->tv_flicker_reduction_property) {
  615. state->tv.flicker_reduction = val;
  616. } else if (property == config->tv_overscan_property) {
  617. state->tv.overscan = val;
  618. } else if (property == config->tv_saturation_property) {
  619. state->tv.saturation = val;
  620. } else if (property == config->tv_hue_property) {
  621. state->tv.hue = val;
  622. } else if (property == config->link_status_property) {
  623. /* Never downgrade from GOOD to BAD on userspace's request here,
  624. * only hw issues can do that.
  625. *
  626. * For an atomic property the userspace doesn't need to be able
  627. * to understand all the properties, but needs to be able to
  628. * restore the state it wants on VT switch. So if the userspace
  629. * tries to change the link_status from GOOD to BAD, driver
  630. * silently rejects it and returns a 0. This prevents userspace
  631. * from accidently breaking the display when it restores the
  632. * state.
  633. */
  634. if (state->link_status != DRM_LINK_STATUS_GOOD)
  635. state->link_status = val;
  636. } else if (property == config->aspect_ratio_property) {
  637. state->picture_aspect_ratio = val;
  638. } else if (property == config->content_type_property) {
  639. state->content_type = val;
  640. } else if (property == connector->scaling_mode_property) {
  641. state->scaling_mode = val;
  642. } else if (property == connector->content_protection_property) {
  643. if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
  644. DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
  645. return -EINVAL;
  646. }
  647. state->content_protection = val;
  648. } else if (property == config->writeback_fb_id_property) {
  649. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  650. int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
  651. if (fb)
  652. drm_framebuffer_put(fb);
  653. return ret;
  654. } else if (property == config->writeback_out_fence_ptr_property) {
  655. s32 __user *fence_ptr = u64_to_user_ptr(val);
  656. return set_out_fence_for_connector(state->state, connector,
  657. fence_ptr);
  658. } else if (connector->funcs->atomic_set_property) {
  659. return connector->funcs->atomic_set_property(connector,
  660. state, property, val);
  661. } else {
  662. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
  663. connector->base.id, connector->name,
  664. property->base.id, property->name);
  665. return -EINVAL;
  666. }
  667. return 0;
  668. }
  669. static int
  670. drm_atomic_connector_get_property(struct drm_connector *connector,
  671. const struct drm_connector_state *state,
  672. struct drm_property *property, uint64_t *val)
  673. {
  674. struct drm_device *dev = connector->dev;
  675. struct drm_mode_config *config = &dev->mode_config;
  676. if (property == config->prop_crtc_id) {
  677. *val = (state->crtc) ? state->crtc->base.id : 0;
  678. } else if (property == config->dpms_property) {
  679. *val = connector->dpms;
  680. } else if (property == config->tv_select_subconnector_property) {
  681. *val = state->tv.subconnector;
  682. } else if (property == config->tv_left_margin_property) {
  683. *val = state->tv.margins.left;
  684. } else if (property == config->tv_right_margin_property) {
  685. *val = state->tv.margins.right;
  686. } else if (property == config->tv_top_margin_property) {
  687. *val = state->tv.margins.top;
  688. } else if (property == config->tv_bottom_margin_property) {
  689. *val = state->tv.margins.bottom;
  690. } else if (property == config->tv_mode_property) {
  691. *val = state->tv.mode;
  692. } else if (property == config->tv_brightness_property) {
  693. *val = state->tv.brightness;
  694. } else if (property == config->tv_contrast_property) {
  695. *val = state->tv.contrast;
  696. } else if (property == config->tv_flicker_reduction_property) {
  697. *val = state->tv.flicker_reduction;
  698. } else if (property == config->tv_overscan_property) {
  699. *val = state->tv.overscan;
  700. } else if (property == config->tv_saturation_property) {
  701. *val = state->tv.saturation;
  702. } else if (property == config->tv_hue_property) {
  703. *val = state->tv.hue;
  704. } else if (property == config->link_status_property) {
  705. *val = state->link_status;
  706. } else if (property == config->aspect_ratio_property) {
  707. *val = state->picture_aspect_ratio;
  708. } else if (property == config->content_type_property) {
  709. *val = state->content_type;
  710. } else if (property == connector->scaling_mode_property) {
  711. *val = state->scaling_mode;
  712. } else if (property == connector->content_protection_property) {
  713. *val = state->content_protection;
  714. } else if (property == config->writeback_fb_id_property) {
  715. /* Writeback framebuffer is one-shot, write and forget */
  716. *val = 0;
  717. } else if (property == config->writeback_out_fence_ptr_property) {
  718. *val = 0;
  719. } else if (connector->funcs->atomic_get_property) {
  720. return connector->funcs->atomic_get_property(connector,
  721. state, property, val);
  722. } else {
  723. return -EINVAL;
  724. }
  725. return 0;
  726. }
  727. int drm_atomic_get_property(struct drm_mode_object *obj,
  728. struct drm_property *property, uint64_t *val)
  729. {
  730. struct drm_device *dev = property->dev;
  731. int ret;
  732. switch (obj->type) {
  733. case DRM_MODE_OBJECT_CONNECTOR: {
  734. struct drm_connector *connector = obj_to_connector(obj);
  735. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  736. ret = drm_atomic_connector_get_property(connector,
  737. connector->state, property, val);
  738. break;
  739. }
  740. case DRM_MODE_OBJECT_CRTC: {
  741. struct drm_crtc *crtc = obj_to_crtc(obj);
  742. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  743. ret = drm_atomic_crtc_get_property(crtc,
  744. crtc->state, property, val);
  745. break;
  746. }
  747. case DRM_MODE_OBJECT_PLANE: {
  748. struct drm_plane *plane = obj_to_plane(obj);
  749. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  750. ret = drm_atomic_plane_get_property(plane,
  751. plane->state, property, val);
  752. break;
  753. }
  754. default:
  755. ret = -EINVAL;
  756. break;
  757. }
  758. return ret;
  759. }
  760. /*
  761. * The big monster ioctl
  762. */
  763. static struct drm_pending_vblank_event *create_vblank_event(
  764. struct drm_crtc *crtc, uint64_t user_data)
  765. {
  766. struct drm_pending_vblank_event *e = NULL;
  767. e = kzalloc(sizeof *e, GFP_KERNEL);
  768. if (!e)
  769. return NULL;
  770. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  771. e->event.base.length = sizeof(e->event);
  772. e->event.vbl.crtc_id = crtc->base.id;
  773. e->event.vbl.user_data = user_data;
  774. return e;
  775. }
  776. int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
  777. struct drm_connector *connector,
  778. int mode)
  779. {
  780. struct drm_connector *tmp_connector;
  781. struct drm_connector_state *new_conn_state;
  782. struct drm_crtc *crtc;
  783. struct drm_crtc_state *crtc_state;
  784. int i, ret, old_mode = connector->dpms;
  785. bool active = false;
  786. ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
  787. state->acquire_ctx);
  788. if (ret)
  789. return ret;
  790. if (mode != DRM_MODE_DPMS_ON)
  791. mode = DRM_MODE_DPMS_OFF;
  792. connector->dpms = mode;
  793. crtc = connector->state->crtc;
  794. if (!crtc)
  795. goto out;
  796. ret = drm_atomic_add_affected_connectors(state, crtc);
  797. if (ret)
  798. goto out;
  799. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  800. if (IS_ERR(crtc_state)) {
  801. ret = PTR_ERR(crtc_state);
  802. goto out;
  803. }
  804. for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
  805. if (new_conn_state->crtc != crtc)
  806. continue;
  807. if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
  808. active = true;
  809. break;
  810. }
  811. }
  812. crtc_state->active = active;
  813. ret = drm_atomic_commit(state);
  814. out:
  815. if (ret != 0)
  816. connector->dpms = old_mode;
  817. return ret;
  818. }
  819. int drm_atomic_set_property(struct drm_atomic_state *state,
  820. struct drm_mode_object *obj,
  821. struct drm_property *prop,
  822. uint64_t prop_value)
  823. {
  824. struct drm_mode_object *ref;
  825. int ret;
  826. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  827. return -EINVAL;
  828. switch (obj->type) {
  829. case DRM_MODE_OBJECT_CONNECTOR: {
  830. struct drm_connector *connector = obj_to_connector(obj);
  831. struct drm_connector_state *connector_state;
  832. connector_state = drm_atomic_get_connector_state(state, connector);
  833. if (IS_ERR(connector_state)) {
  834. ret = PTR_ERR(connector_state);
  835. break;
  836. }
  837. ret = drm_atomic_connector_set_property(connector,
  838. connector_state, prop, prop_value);
  839. break;
  840. }
  841. case DRM_MODE_OBJECT_CRTC: {
  842. struct drm_crtc *crtc = obj_to_crtc(obj);
  843. struct drm_crtc_state *crtc_state;
  844. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  845. if (IS_ERR(crtc_state)) {
  846. ret = PTR_ERR(crtc_state);
  847. break;
  848. }
  849. ret = drm_atomic_crtc_set_property(crtc,
  850. crtc_state, prop, prop_value);
  851. break;
  852. }
  853. case DRM_MODE_OBJECT_PLANE: {
  854. struct drm_plane *plane = obj_to_plane(obj);
  855. struct drm_plane_state *plane_state;
  856. plane_state = drm_atomic_get_plane_state(state, plane);
  857. if (IS_ERR(plane_state)) {
  858. ret = PTR_ERR(plane_state);
  859. break;
  860. }
  861. ret = drm_atomic_plane_set_property(plane,
  862. plane_state, prop, prop_value);
  863. break;
  864. }
  865. default:
  866. ret = -EINVAL;
  867. break;
  868. }
  869. drm_property_change_valid_put(prop, ref);
  870. return ret;
  871. }
  872. /**
  873. * DOC: explicit fencing properties
  874. *
  875. * Explicit fencing allows userspace to control the buffer synchronization
  876. * between devices. A Fence or a group of fences are transfered to/from
  877. * userspace using Sync File fds and there are two DRM properties for that.
  878. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  879. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  880. *
  881. * As a contrast, with implicit fencing the kernel keeps track of any
  882. * ongoing rendering, and automatically ensures that the atomic update waits
  883. * for any pending rendering to complete. For shared buffers represented with
  884. * a &struct dma_buf this is tracked in &struct reservation_object.
  885. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  886. * whereas explicit fencing is what Android wants.
  887. *
  888. * "IN_FENCE_FD”:
  889. * Use this property to pass a fence that DRM should wait on before
  890. * proceeding with the Atomic Commit request and show the framebuffer for
  891. * the plane on the screen. The fence can be either a normal fence or a
  892. * merged one, the sync_file framework will handle both cases and use a
  893. * fence_array if a merged fence is received. Passing -1 here means no
  894. * fences to wait on.
  895. *
  896. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  897. * it will only check if the Sync File is a valid one.
  898. *
  899. * On the driver side the fence is stored on the @fence parameter of
  900. * &struct drm_plane_state. Drivers which also support implicit fencing
  901. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  902. * to make sure there's consistent behaviour between drivers in precedence
  903. * of implicit vs. explicit fencing.
  904. *
  905. * "OUT_FENCE_PTR”:
  906. * Use this property to pass a file descriptor pointer to DRM. Once the
  907. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  908. * the file descriptor number of a Sync File. This Sync File contains the
  909. * CRTC fence that will be signaled when all framebuffers present on the
  910. * Atomic Commit * request for that given CRTC are scanned out on the
  911. * screen.
  912. *
  913. * The Atomic Commit request fails if a invalid pointer is passed. If the
  914. * Atomic Commit request fails for any other reason the out fence fd
  915. * returned will be -1. On a Atomic Commit with the
  916. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  917. *
  918. * Note that out-fences don't have a special interface to drivers and are
  919. * internally represented by a &struct drm_pending_vblank_event in struct
  920. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  921. * helpers and for the DRM event handling for existing userspace.
  922. */
  923. struct drm_out_fence_state {
  924. s32 __user *out_fence_ptr;
  925. struct sync_file *sync_file;
  926. int fd;
  927. };
  928. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  929. struct dma_fence *fence)
  930. {
  931. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  932. if (fence_state->fd < 0)
  933. return fence_state->fd;
  934. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  935. return -EFAULT;
  936. fence_state->sync_file = sync_file_create(fence);
  937. if (!fence_state->sync_file)
  938. return -ENOMEM;
  939. return 0;
  940. }
  941. static int prepare_signaling(struct drm_device *dev,
  942. struct drm_atomic_state *state,
  943. struct drm_mode_atomic *arg,
  944. struct drm_file *file_priv,
  945. struct drm_out_fence_state **fence_state,
  946. unsigned int *num_fences)
  947. {
  948. struct drm_crtc *crtc;
  949. struct drm_crtc_state *crtc_state;
  950. struct drm_connector *conn;
  951. struct drm_connector_state *conn_state;
  952. int i, c = 0, ret;
  953. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  954. return 0;
  955. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  956. s32 __user *fence_ptr;
  957. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  958. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  959. struct drm_pending_vblank_event *e;
  960. e = create_vblank_event(crtc, arg->user_data);
  961. if (!e)
  962. return -ENOMEM;
  963. crtc_state->event = e;
  964. }
  965. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  966. struct drm_pending_vblank_event *e = crtc_state->event;
  967. if (!file_priv)
  968. continue;
  969. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  970. &e->event.base);
  971. if (ret) {
  972. kfree(e);
  973. crtc_state->event = NULL;
  974. return ret;
  975. }
  976. }
  977. if (fence_ptr) {
  978. struct dma_fence *fence;
  979. struct drm_out_fence_state *f;
  980. f = krealloc(*fence_state, sizeof(**fence_state) *
  981. (*num_fences + 1), GFP_KERNEL);
  982. if (!f)
  983. return -ENOMEM;
  984. memset(&f[*num_fences], 0, sizeof(*f));
  985. f[*num_fences].out_fence_ptr = fence_ptr;
  986. *fence_state = f;
  987. fence = drm_crtc_create_fence(crtc);
  988. if (!fence)
  989. return -ENOMEM;
  990. ret = setup_out_fence(&f[(*num_fences)++], fence);
  991. if (ret) {
  992. dma_fence_put(fence);
  993. return ret;
  994. }
  995. crtc_state->event->base.fence = fence;
  996. }
  997. c++;
  998. }
  999. for_each_new_connector_in_state(state, conn, conn_state, i) {
  1000. struct drm_writeback_connector *wb_conn;
  1001. struct drm_writeback_job *job;
  1002. struct drm_out_fence_state *f;
  1003. struct dma_fence *fence;
  1004. s32 __user *fence_ptr;
  1005. fence_ptr = get_out_fence_for_connector(state, conn);
  1006. if (!fence_ptr)
  1007. continue;
  1008. job = drm_atomic_get_writeback_job(conn_state);
  1009. if (!job)
  1010. return -ENOMEM;
  1011. f = krealloc(*fence_state, sizeof(**fence_state) *
  1012. (*num_fences + 1), GFP_KERNEL);
  1013. if (!f)
  1014. return -ENOMEM;
  1015. memset(&f[*num_fences], 0, sizeof(*f));
  1016. f[*num_fences].out_fence_ptr = fence_ptr;
  1017. *fence_state = f;
  1018. wb_conn = drm_connector_to_writeback(conn);
  1019. fence = drm_writeback_get_out_fence(wb_conn);
  1020. if (!fence)
  1021. return -ENOMEM;
  1022. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1023. if (ret) {
  1024. dma_fence_put(fence);
  1025. return ret;
  1026. }
  1027. job->out_fence = fence;
  1028. }
  1029. /*
  1030. * Having this flag means user mode pends on event which will never
  1031. * reach due to lack of at least one CRTC for signaling
  1032. */
  1033. if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1034. return -EINVAL;
  1035. return 0;
  1036. }
  1037. static void complete_signaling(struct drm_device *dev,
  1038. struct drm_atomic_state *state,
  1039. struct drm_out_fence_state *fence_state,
  1040. unsigned int num_fences,
  1041. bool install_fds)
  1042. {
  1043. struct drm_crtc *crtc;
  1044. struct drm_crtc_state *crtc_state;
  1045. int i;
  1046. if (install_fds) {
  1047. for (i = 0; i < num_fences; i++)
  1048. fd_install(fence_state[i].fd,
  1049. fence_state[i].sync_file->file);
  1050. kfree(fence_state);
  1051. return;
  1052. }
  1053. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1054. struct drm_pending_vblank_event *event = crtc_state->event;
  1055. /*
  1056. * Free the allocated event. drm_atomic_helper_setup_commit
  1057. * can allocate an event too, so only free it if it's ours
  1058. * to prevent a double free in drm_atomic_state_clear.
  1059. */
  1060. if (event && (event->base.fence || event->base.file_priv)) {
  1061. drm_event_cancel_free(dev, &event->base);
  1062. crtc_state->event = NULL;
  1063. }
  1064. }
  1065. if (!fence_state)
  1066. return;
  1067. for (i = 0; i < num_fences; i++) {
  1068. if (fence_state[i].sync_file)
  1069. fput(fence_state[i].sync_file->file);
  1070. if (fence_state[i].fd >= 0)
  1071. put_unused_fd(fence_state[i].fd);
  1072. /* If this fails log error to the user */
  1073. if (fence_state[i].out_fence_ptr &&
  1074. put_user(-1, fence_state[i].out_fence_ptr))
  1075. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  1076. }
  1077. kfree(fence_state);
  1078. }
  1079. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1080. void *data, struct drm_file *file_priv)
  1081. {
  1082. struct drm_mode_atomic *arg = data;
  1083. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1084. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1085. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1086. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1087. unsigned int copied_objs, copied_props;
  1088. struct drm_atomic_state *state;
  1089. struct drm_modeset_acquire_ctx ctx;
  1090. struct drm_out_fence_state *fence_state;
  1091. int ret = 0;
  1092. unsigned int i, j, num_fences;
  1093. /* disallow for drivers not supporting atomic: */
  1094. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1095. return -EOPNOTSUPP;
  1096. /* disallow for userspace that has not enabled atomic cap (even
  1097. * though this may be a bit overkill, since legacy userspace
  1098. * wouldn't know how to call this ioctl)
  1099. */
  1100. if (!file_priv->atomic)
  1101. return -EINVAL;
  1102. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  1103. return -EINVAL;
  1104. if (arg->reserved)
  1105. return -EINVAL;
  1106. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  1107. !dev->mode_config.async_page_flip)
  1108. return -EINVAL;
  1109. /* can't test and expect an event at the same time. */
  1110. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1111. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1112. return -EINVAL;
  1113. drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
  1114. state = drm_atomic_state_alloc(dev);
  1115. if (!state)
  1116. return -ENOMEM;
  1117. state->acquire_ctx = &ctx;
  1118. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1119. retry:
  1120. copied_objs = 0;
  1121. copied_props = 0;
  1122. fence_state = NULL;
  1123. num_fences = 0;
  1124. for (i = 0; i < arg->count_objs; i++) {
  1125. uint32_t obj_id, count_props;
  1126. struct drm_mode_object *obj;
  1127. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1128. ret = -EFAULT;
  1129. goto out;
  1130. }
  1131. obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
  1132. if (!obj) {
  1133. ret = -ENOENT;
  1134. goto out;
  1135. }
  1136. if (!obj->properties) {
  1137. drm_mode_object_put(obj);
  1138. ret = -ENOENT;
  1139. goto out;
  1140. }
  1141. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1142. drm_mode_object_put(obj);
  1143. ret = -EFAULT;
  1144. goto out;
  1145. }
  1146. copied_objs++;
  1147. for (j = 0; j < count_props; j++) {
  1148. uint32_t prop_id;
  1149. uint64_t prop_value;
  1150. struct drm_property *prop;
  1151. if (get_user(prop_id, props_ptr + copied_props)) {
  1152. drm_mode_object_put(obj);
  1153. ret = -EFAULT;
  1154. goto out;
  1155. }
  1156. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1157. if (!prop) {
  1158. drm_mode_object_put(obj);
  1159. ret = -ENOENT;
  1160. goto out;
  1161. }
  1162. if (copy_from_user(&prop_value,
  1163. prop_values_ptr + copied_props,
  1164. sizeof(prop_value))) {
  1165. drm_mode_object_put(obj);
  1166. ret = -EFAULT;
  1167. goto out;
  1168. }
  1169. ret = drm_atomic_set_property(state, obj, prop,
  1170. prop_value);
  1171. if (ret) {
  1172. drm_mode_object_put(obj);
  1173. goto out;
  1174. }
  1175. copied_props++;
  1176. }
  1177. drm_mode_object_put(obj);
  1178. }
  1179. ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
  1180. &num_fences);
  1181. if (ret)
  1182. goto out;
  1183. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  1184. ret = drm_atomic_check_only(state);
  1185. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  1186. ret = drm_atomic_nonblocking_commit(state);
  1187. } else {
  1188. if (unlikely(drm_debug & DRM_UT_STATE))
  1189. drm_atomic_print_state(state);
  1190. ret = drm_atomic_commit(state);
  1191. }
  1192. out:
  1193. complete_signaling(dev, state, fence_state, num_fences, !ret);
  1194. if (ret == -EDEADLK) {
  1195. drm_atomic_state_clear(state);
  1196. ret = drm_modeset_backoff(&ctx);
  1197. if (!ret)
  1198. goto retry;
  1199. }
  1200. drm_atomic_state_put(state);
  1201. drm_modeset_drop_locks(&ctx);
  1202. drm_modeset_acquire_fini(&ctx);
  1203. return ret;
  1204. }