drm_atomic.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_plane_helper.h>
  31. #include <drm/drm_print.h>
  32. #include <linux/sync_file.h>
  33. #include "drm_crtc_internal.h"
  34. static void crtc_commit_free(struct kref *kref)
  35. {
  36. struct drm_crtc_commit *commit =
  37. container_of(kref, struct drm_crtc_commit, ref);
  38. kfree(commit);
  39. }
  40. void drm_crtc_commit_put(struct drm_crtc_commit *commit)
  41. {
  42. kref_put(&commit->ref, crtc_commit_free);
  43. }
  44. EXPORT_SYMBOL(drm_crtc_commit_put);
  45. /**
  46. * drm_atomic_state_default_release -
  47. * release memory initialized by drm_atomic_state_init
  48. * @state: atomic state
  49. *
  50. * Free all the memory allocated by drm_atomic_state_init.
  51. * This is useful for drivers that subclass the atomic state.
  52. */
  53. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  54. {
  55. kfree(state->connectors);
  56. kfree(state->crtcs);
  57. kfree(state->planes);
  58. }
  59. EXPORT_SYMBOL(drm_atomic_state_default_release);
  60. /**
  61. * drm_atomic_state_init - init new atomic state
  62. * @dev: DRM device
  63. * @state: atomic state
  64. *
  65. * Default implementation for filling in a new atomic state.
  66. * This is useful for drivers that subclass the atomic state.
  67. */
  68. int
  69. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  70. {
  71. kref_init(&state->ref);
  72. /* TODO legacy paths should maybe do a better job about
  73. * setting this appropriately?
  74. */
  75. state->allow_modeset = true;
  76. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  77. sizeof(*state->crtcs), GFP_KERNEL);
  78. if (!state->crtcs)
  79. goto fail;
  80. state->planes = kcalloc(dev->mode_config.num_total_plane,
  81. sizeof(*state->planes), GFP_KERNEL);
  82. if (!state->planes)
  83. goto fail;
  84. state->dev = dev;
  85. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  86. return 0;
  87. fail:
  88. drm_atomic_state_default_release(state);
  89. return -ENOMEM;
  90. }
  91. EXPORT_SYMBOL(drm_atomic_state_init);
  92. /**
  93. * drm_atomic_state_alloc - allocate atomic state
  94. * @dev: DRM device
  95. *
  96. * This allocates an empty atomic state to track updates.
  97. */
  98. struct drm_atomic_state *
  99. drm_atomic_state_alloc(struct drm_device *dev)
  100. {
  101. struct drm_mode_config *config = &dev->mode_config;
  102. struct drm_atomic_state *state;
  103. if (!config->funcs->atomic_state_alloc) {
  104. state = kzalloc(sizeof(*state), GFP_KERNEL);
  105. if (!state)
  106. return NULL;
  107. if (drm_atomic_state_init(dev, state) < 0) {
  108. kfree(state);
  109. return NULL;
  110. }
  111. return state;
  112. }
  113. return config->funcs->atomic_state_alloc(dev);
  114. }
  115. EXPORT_SYMBOL(drm_atomic_state_alloc);
  116. /**
  117. * drm_atomic_state_default_clear - clear base atomic state
  118. * @state: atomic state
  119. *
  120. * Default implementation for clearing atomic state.
  121. * This is useful for drivers that subclass the atomic state.
  122. */
  123. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  124. {
  125. struct drm_device *dev = state->dev;
  126. struct drm_mode_config *config = &dev->mode_config;
  127. int i;
  128. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  129. for (i = 0; i < state->num_connector; i++) {
  130. struct drm_connector *connector = state->connectors[i].ptr;
  131. if (!connector)
  132. continue;
  133. connector->funcs->atomic_destroy_state(connector,
  134. state->connectors[i].state);
  135. state->connectors[i].ptr = NULL;
  136. state->connectors[i].state = NULL;
  137. drm_connector_unreference(connector);
  138. }
  139. for (i = 0; i < config->num_crtc; i++) {
  140. struct drm_crtc *crtc = state->crtcs[i].ptr;
  141. if (!crtc)
  142. continue;
  143. crtc->funcs->atomic_destroy_state(crtc,
  144. state->crtcs[i].state);
  145. if (state->crtcs[i].commit) {
  146. kfree(state->crtcs[i].commit->event);
  147. state->crtcs[i].commit->event = NULL;
  148. drm_crtc_commit_put(state->crtcs[i].commit);
  149. }
  150. state->crtcs[i].commit = NULL;
  151. state->crtcs[i].ptr = NULL;
  152. state->crtcs[i].state = NULL;
  153. }
  154. for (i = 0; i < config->num_total_plane; i++) {
  155. struct drm_plane *plane = state->planes[i].ptr;
  156. if (!plane)
  157. continue;
  158. plane->funcs->atomic_destroy_state(plane,
  159. state->planes[i].state);
  160. state->planes[i].ptr = NULL;
  161. state->planes[i].state = NULL;
  162. }
  163. }
  164. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  165. /**
  166. * drm_atomic_state_clear - clear state object
  167. * @state: atomic state
  168. *
  169. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  170. * all locks. So someone else could sneak in and change the current modeset
  171. * configuration. Which means that all the state assembled in @state is no
  172. * longer an atomic update to the current state, but to some arbitrary earlier
  173. * state. Which could break assumptions the driver's ->atomic_check likely
  174. * relies on.
  175. *
  176. * Hence we must clear all cached state and completely start over, using this
  177. * function.
  178. */
  179. void drm_atomic_state_clear(struct drm_atomic_state *state)
  180. {
  181. struct drm_device *dev = state->dev;
  182. struct drm_mode_config *config = &dev->mode_config;
  183. if (config->funcs->atomic_state_clear)
  184. config->funcs->atomic_state_clear(state);
  185. else
  186. drm_atomic_state_default_clear(state);
  187. }
  188. EXPORT_SYMBOL(drm_atomic_state_clear);
  189. /**
  190. * __drm_atomic_state_free - free all memory for an atomic state
  191. * @ref: This atomic state to deallocate
  192. *
  193. * This frees all memory associated with an atomic state, including all the
  194. * per-object state for planes, crtcs and connectors.
  195. */
  196. void __drm_atomic_state_free(struct kref *ref)
  197. {
  198. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  199. struct drm_mode_config *config = &state->dev->mode_config;
  200. drm_atomic_state_clear(state);
  201. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  202. if (config->funcs->atomic_state_free) {
  203. config->funcs->atomic_state_free(state);
  204. } else {
  205. drm_atomic_state_default_release(state);
  206. kfree(state);
  207. }
  208. }
  209. EXPORT_SYMBOL(__drm_atomic_state_free);
  210. /**
  211. * drm_atomic_get_crtc_state - get crtc state
  212. * @state: global atomic state object
  213. * @crtc: crtc to get state object for
  214. *
  215. * This function returns the crtc state for the given crtc, allocating it if
  216. * needed. It will also grab the relevant crtc lock to make sure that the state
  217. * is consistent.
  218. *
  219. * Returns:
  220. *
  221. * Either the allocated state or the error code encoded into the pointer. When
  222. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  223. * entire atomic sequence must be restarted. All other errors are fatal.
  224. */
  225. struct drm_crtc_state *
  226. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  227. struct drm_crtc *crtc)
  228. {
  229. int ret, index = drm_crtc_index(crtc);
  230. struct drm_crtc_state *crtc_state;
  231. WARN_ON(!state->acquire_ctx);
  232. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  233. if (crtc_state)
  234. return crtc_state;
  235. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  236. if (ret)
  237. return ERR_PTR(ret);
  238. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  239. if (!crtc_state)
  240. return ERR_PTR(-ENOMEM);
  241. state->crtcs[index].state = crtc_state;
  242. state->crtcs[index].ptr = crtc;
  243. crtc_state->state = state;
  244. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  245. crtc->base.id, crtc->name, crtc_state, state);
  246. return crtc_state;
  247. }
  248. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  249. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  250. struct drm_crtc *crtc, s64 __user *fence_ptr)
  251. {
  252. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  253. }
  254. static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  255. struct drm_crtc *crtc)
  256. {
  257. s64 __user *fence_ptr;
  258. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  259. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  260. return fence_ptr;
  261. }
  262. /**
  263. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  264. * @state: the CRTC whose incoming state to update
  265. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  266. *
  267. * Set a mode (originating from the kernel) on the desired CRTC state. Does
  268. * not change any other state properties, including enable, active, or
  269. * mode_changed.
  270. *
  271. * RETURNS:
  272. * Zero on success, error code on failure. Cannot return -EDEADLK.
  273. */
  274. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  275. struct drm_display_mode *mode)
  276. {
  277. struct drm_mode_modeinfo umode;
  278. /* Early return for no change. */
  279. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  280. return 0;
  281. drm_property_unreference_blob(state->mode_blob);
  282. state->mode_blob = NULL;
  283. if (mode) {
  284. drm_mode_convert_to_umode(&umode, mode);
  285. state->mode_blob =
  286. drm_property_create_blob(state->crtc->dev,
  287. sizeof(umode),
  288. &umode);
  289. if (IS_ERR(state->mode_blob))
  290. return PTR_ERR(state->mode_blob);
  291. drm_mode_copy(&state->mode, mode);
  292. state->enable = true;
  293. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  294. mode->name, state);
  295. } else {
  296. memset(&state->mode, 0, sizeof(state->mode));
  297. state->enable = false;
  298. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  299. state);
  300. }
  301. return 0;
  302. }
  303. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  304. /**
  305. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  306. * @state: the CRTC whose incoming state to update
  307. * @blob: pointer to blob property to use for mode
  308. *
  309. * Set a mode (originating from a blob property) on the desired CRTC state.
  310. * This function will take a reference on the blob property for the CRTC state,
  311. * and release the reference held on the state's existing mode property, if any
  312. * was set.
  313. *
  314. * RETURNS:
  315. * Zero on success, error code on failure. Cannot return -EDEADLK.
  316. */
  317. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  318. struct drm_property_blob *blob)
  319. {
  320. if (blob == state->mode_blob)
  321. return 0;
  322. drm_property_unreference_blob(state->mode_blob);
  323. state->mode_blob = NULL;
  324. memset(&state->mode, 0, sizeof(state->mode));
  325. if (blob) {
  326. if (blob->length != sizeof(struct drm_mode_modeinfo) ||
  327. drm_mode_convert_umode(&state->mode,
  328. (const struct drm_mode_modeinfo *)
  329. blob->data))
  330. return -EINVAL;
  331. state->mode_blob = drm_property_reference_blob(blob);
  332. state->enable = true;
  333. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  334. state->mode.name, state);
  335. } else {
  336. state->enable = false;
  337. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  338. state);
  339. }
  340. return 0;
  341. }
  342. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  343. /**
  344. * drm_atomic_replace_property_blob - replace a blob property
  345. * @blob: a pointer to the member blob to be replaced
  346. * @new_blob: the new blob to replace with
  347. * @replaced: whether the blob has been replaced
  348. *
  349. * RETURNS:
  350. * Zero on success, error code on failure
  351. */
  352. static void
  353. drm_atomic_replace_property_blob(struct drm_property_blob **blob,
  354. struct drm_property_blob *new_blob,
  355. bool *replaced)
  356. {
  357. struct drm_property_blob *old_blob = *blob;
  358. if (old_blob == new_blob)
  359. return;
  360. drm_property_unreference_blob(old_blob);
  361. if (new_blob)
  362. drm_property_reference_blob(new_blob);
  363. *blob = new_blob;
  364. *replaced = true;
  365. return;
  366. }
  367. static int
  368. drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
  369. struct drm_property_blob **blob,
  370. uint64_t blob_id,
  371. ssize_t expected_size,
  372. bool *replaced)
  373. {
  374. struct drm_property_blob *new_blob = NULL;
  375. if (blob_id != 0) {
  376. new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
  377. if (new_blob == NULL)
  378. return -EINVAL;
  379. if (expected_size > 0 && expected_size != new_blob->length) {
  380. drm_property_unreference_blob(new_blob);
  381. return -EINVAL;
  382. }
  383. }
  384. drm_atomic_replace_property_blob(blob, new_blob, replaced);
  385. drm_property_unreference_blob(new_blob);
  386. return 0;
  387. }
  388. /**
  389. * drm_atomic_crtc_set_property - set property on CRTC
  390. * @crtc: the drm CRTC to set a property on
  391. * @state: the state object to update with the new property value
  392. * @property: the property to set
  393. * @val: the new property value
  394. *
  395. * Use this instead of calling crtc->atomic_set_property directly.
  396. * This function handles generic/core properties and calls out to
  397. * driver's ->atomic_set_property() for driver properties. To ensure
  398. * consistent behavior you must call this function rather than the
  399. * driver hook directly.
  400. *
  401. * RETURNS:
  402. * Zero on success, error code on failure
  403. */
  404. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  405. struct drm_crtc_state *state, struct drm_property *property,
  406. uint64_t val)
  407. {
  408. struct drm_device *dev = crtc->dev;
  409. struct drm_mode_config *config = &dev->mode_config;
  410. bool replaced = false;
  411. int ret;
  412. if (property == config->prop_active)
  413. state->active = val;
  414. else if (property == config->prop_mode_id) {
  415. struct drm_property_blob *mode =
  416. drm_property_lookup_blob(dev, val);
  417. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  418. drm_property_unreference_blob(mode);
  419. return ret;
  420. } else if (property == config->degamma_lut_property) {
  421. ret = drm_atomic_replace_property_blob_from_id(crtc,
  422. &state->degamma_lut,
  423. val,
  424. -1,
  425. &replaced);
  426. state->color_mgmt_changed |= replaced;
  427. return ret;
  428. } else if (property == config->ctm_property) {
  429. ret = drm_atomic_replace_property_blob_from_id(crtc,
  430. &state->ctm,
  431. val,
  432. sizeof(struct drm_color_ctm),
  433. &replaced);
  434. state->color_mgmt_changed |= replaced;
  435. return ret;
  436. } else if (property == config->gamma_lut_property) {
  437. ret = drm_atomic_replace_property_blob_from_id(crtc,
  438. &state->gamma_lut,
  439. val,
  440. -1,
  441. &replaced);
  442. state->color_mgmt_changed |= replaced;
  443. return ret;
  444. } else if (property == config->prop_out_fence_ptr) {
  445. s64 __user *fence_ptr = u64_to_user_ptr(val);
  446. if (!fence_ptr)
  447. return 0;
  448. if (put_user(-1, fence_ptr))
  449. return -EFAULT;
  450. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  451. } else if (crtc->funcs->atomic_set_property)
  452. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  453. else
  454. return -EINVAL;
  455. return 0;
  456. }
  457. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  458. /**
  459. * drm_atomic_crtc_get_property - get property value from CRTC state
  460. * @crtc: the drm CRTC to set a property on
  461. * @state: the state object to get the property value from
  462. * @property: the property to set
  463. * @val: return location for the property value
  464. *
  465. * This function handles generic/core properties and calls out to
  466. * driver's ->atomic_get_property() for driver properties. To ensure
  467. * consistent behavior you must call this function rather than the
  468. * driver hook directly.
  469. *
  470. * RETURNS:
  471. * Zero on success, error code on failure
  472. */
  473. static int
  474. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  475. const struct drm_crtc_state *state,
  476. struct drm_property *property, uint64_t *val)
  477. {
  478. struct drm_device *dev = crtc->dev;
  479. struct drm_mode_config *config = &dev->mode_config;
  480. if (property == config->prop_active)
  481. *val = state->active;
  482. else if (property == config->prop_mode_id)
  483. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  484. else if (property == config->degamma_lut_property)
  485. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  486. else if (property == config->ctm_property)
  487. *val = (state->ctm) ? state->ctm->base.id : 0;
  488. else if (property == config->gamma_lut_property)
  489. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  490. else if (property == config->prop_out_fence_ptr)
  491. *val = 0;
  492. else if (crtc->funcs->atomic_get_property)
  493. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  494. else
  495. return -EINVAL;
  496. return 0;
  497. }
  498. /**
  499. * drm_atomic_crtc_check - check crtc state
  500. * @crtc: crtc to check
  501. * @state: crtc state to check
  502. *
  503. * Provides core sanity checks for crtc state.
  504. *
  505. * RETURNS:
  506. * Zero on success, error code on failure
  507. */
  508. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  509. struct drm_crtc_state *state)
  510. {
  511. /* NOTE: we explicitly don't enforce constraints such as primary
  512. * layer covering entire screen, since that is something we want
  513. * to allow (on hw that supports it). For hw that does not, it
  514. * should be checked in driver's crtc->atomic_check() vfunc.
  515. *
  516. * TODO: Add generic modeset state checks once we support those.
  517. */
  518. if (state->active && !state->enable) {
  519. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  520. crtc->base.id, crtc->name);
  521. return -EINVAL;
  522. }
  523. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  524. * as this is a kernel-internal detail that userspace should never
  525. * be able to trigger. */
  526. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  527. WARN_ON(state->enable && !state->mode_blob)) {
  528. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  529. crtc->base.id, crtc->name);
  530. return -EINVAL;
  531. }
  532. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  533. WARN_ON(!state->enable && state->mode_blob)) {
  534. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  535. crtc->base.id, crtc->name);
  536. return -EINVAL;
  537. }
  538. /*
  539. * Reject event generation for when a CRTC is off and stays off.
  540. * It wouldn't be hard to implement this, but userspace has a track
  541. * record of happily burning through 100% cpu (or worse, crash) when the
  542. * display pipe is suspended. To avoid all that fun just reject updates
  543. * that ask for events since likely that indicates a bug in the
  544. * compositor's drawing loop. This is consistent with the vblank IOCTL
  545. * and legacy page_flip IOCTL which also reject service on a disabled
  546. * pipe.
  547. */
  548. if (state->event && !state->active && !crtc->state->active) {
  549. DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
  550. crtc->base.id);
  551. return -EINVAL;
  552. }
  553. return 0;
  554. }
  555. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  556. const struct drm_crtc_state *state)
  557. {
  558. struct drm_crtc *crtc = state->crtc;
  559. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  560. drm_printf(p, "\tenable=%d\n", state->enable);
  561. drm_printf(p, "\tactive=%d\n", state->active);
  562. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  563. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  564. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  565. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  566. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  567. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  568. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  569. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  570. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  571. if (crtc->funcs->atomic_print_state)
  572. crtc->funcs->atomic_print_state(p, state);
  573. }
  574. /**
  575. * drm_atomic_get_plane_state - get plane state
  576. * @state: global atomic state object
  577. * @plane: plane to get state object for
  578. *
  579. * This function returns the plane state for the given plane, allocating it if
  580. * needed. It will also grab the relevant plane lock to make sure that the state
  581. * is consistent.
  582. *
  583. * Returns:
  584. *
  585. * Either the allocated state or the error code encoded into the pointer. When
  586. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  587. * entire atomic sequence must be restarted. All other errors are fatal.
  588. */
  589. struct drm_plane_state *
  590. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  591. struct drm_plane *plane)
  592. {
  593. int ret, index = drm_plane_index(plane);
  594. struct drm_plane_state *plane_state;
  595. WARN_ON(!state->acquire_ctx);
  596. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  597. if (plane_state)
  598. return plane_state;
  599. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  600. if (ret)
  601. return ERR_PTR(ret);
  602. plane_state = plane->funcs->atomic_duplicate_state(plane);
  603. if (!plane_state)
  604. return ERR_PTR(-ENOMEM);
  605. state->planes[index].state = plane_state;
  606. state->planes[index].ptr = plane;
  607. plane_state->state = state;
  608. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  609. plane->base.id, plane->name, plane_state, state);
  610. if (plane_state->crtc) {
  611. struct drm_crtc_state *crtc_state;
  612. crtc_state = drm_atomic_get_crtc_state(state,
  613. plane_state->crtc);
  614. if (IS_ERR(crtc_state))
  615. return ERR_CAST(crtc_state);
  616. }
  617. return plane_state;
  618. }
  619. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  620. /**
  621. * drm_atomic_plane_set_property - set property on plane
  622. * @plane: the drm plane to set a property on
  623. * @state: the state object to update with the new property value
  624. * @property: the property to set
  625. * @val: the new property value
  626. *
  627. * Use this instead of calling plane->atomic_set_property directly.
  628. * This function handles generic/core properties and calls out to
  629. * driver's ->atomic_set_property() for driver properties. To ensure
  630. * consistent behavior you must call this function rather than the
  631. * driver hook directly.
  632. *
  633. * RETURNS:
  634. * Zero on success, error code on failure
  635. */
  636. int drm_atomic_plane_set_property(struct drm_plane *plane,
  637. struct drm_plane_state *state, struct drm_property *property,
  638. uint64_t val)
  639. {
  640. struct drm_device *dev = plane->dev;
  641. struct drm_mode_config *config = &dev->mode_config;
  642. if (property == config->prop_fb_id) {
  643. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
  644. drm_atomic_set_fb_for_plane(state, fb);
  645. if (fb)
  646. drm_framebuffer_unreference(fb);
  647. } else if (property == config->prop_in_fence_fd) {
  648. if (state->fence)
  649. return -EINVAL;
  650. if (U642I64(val) == -1)
  651. return 0;
  652. state->fence = sync_file_get_fence(val);
  653. if (!state->fence)
  654. return -EINVAL;
  655. } else if (property == config->prop_crtc_id) {
  656. struct drm_crtc *crtc = drm_crtc_find(dev, val);
  657. return drm_atomic_set_crtc_for_plane(state, crtc);
  658. } else if (property == config->prop_crtc_x) {
  659. state->crtc_x = U642I64(val);
  660. } else if (property == config->prop_crtc_y) {
  661. state->crtc_y = U642I64(val);
  662. } else if (property == config->prop_crtc_w) {
  663. state->crtc_w = val;
  664. } else if (property == config->prop_crtc_h) {
  665. state->crtc_h = val;
  666. } else if (property == config->prop_src_x) {
  667. state->src_x = val;
  668. } else if (property == config->prop_src_y) {
  669. state->src_y = val;
  670. } else if (property == config->prop_src_w) {
  671. state->src_w = val;
  672. } else if (property == config->prop_src_h) {
  673. state->src_h = val;
  674. } else if (property == plane->rotation_property) {
  675. if (!is_power_of_2(val & DRM_ROTATE_MASK))
  676. return -EINVAL;
  677. state->rotation = val;
  678. } else if (property == plane->zpos_property) {
  679. state->zpos = val;
  680. } else if (plane->funcs->atomic_set_property) {
  681. return plane->funcs->atomic_set_property(plane, state,
  682. property, val);
  683. } else {
  684. return -EINVAL;
  685. }
  686. return 0;
  687. }
  688. EXPORT_SYMBOL(drm_atomic_plane_set_property);
  689. /**
  690. * drm_atomic_plane_get_property - get property value from plane state
  691. * @plane: the drm plane to set a property on
  692. * @state: the state object to get the property value from
  693. * @property: the property to set
  694. * @val: return location for the property value
  695. *
  696. * This function handles generic/core properties and calls out to
  697. * driver's ->atomic_get_property() for driver properties. To ensure
  698. * consistent behavior you must call this function rather than the
  699. * driver hook directly.
  700. *
  701. * RETURNS:
  702. * Zero on success, error code on failure
  703. */
  704. static int
  705. drm_atomic_plane_get_property(struct drm_plane *plane,
  706. const struct drm_plane_state *state,
  707. struct drm_property *property, uint64_t *val)
  708. {
  709. struct drm_device *dev = plane->dev;
  710. struct drm_mode_config *config = &dev->mode_config;
  711. if (property == config->prop_fb_id) {
  712. *val = (state->fb) ? state->fb->base.id : 0;
  713. } else if (property == config->prop_in_fence_fd) {
  714. *val = -1;
  715. } else if (property == config->prop_crtc_id) {
  716. *val = (state->crtc) ? state->crtc->base.id : 0;
  717. } else if (property == config->prop_crtc_x) {
  718. *val = I642U64(state->crtc_x);
  719. } else if (property == config->prop_crtc_y) {
  720. *val = I642U64(state->crtc_y);
  721. } else if (property == config->prop_crtc_w) {
  722. *val = state->crtc_w;
  723. } else if (property == config->prop_crtc_h) {
  724. *val = state->crtc_h;
  725. } else if (property == config->prop_src_x) {
  726. *val = state->src_x;
  727. } else if (property == config->prop_src_y) {
  728. *val = state->src_y;
  729. } else if (property == config->prop_src_w) {
  730. *val = state->src_w;
  731. } else if (property == config->prop_src_h) {
  732. *val = state->src_h;
  733. } else if (property == plane->rotation_property) {
  734. *val = state->rotation;
  735. } else if (property == plane->zpos_property) {
  736. *val = state->zpos;
  737. } else if (plane->funcs->atomic_get_property) {
  738. return plane->funcs->atomic_get_property(plane, state, property, val);
  739. } else {
  740. return -EINVAL;
  741. }
  742. return 0;
  743. }
  744. static bool
  745. plane_switching_crtc(struct drm_atomic_state *state,
  746. struct drm_plane *plane,
  747. struct drm_plane_state *plane_state)
  748. {
  749. if (!plane->state->crtc || !plane_state->crtc)
  750. return false;
  751. if (plane->state->crtc == plane_state->crtc)
  752. return false;
  753. /* This could be refined, but currently there's no helper or driver code
  754. * to implement direct switching of active planes nor userspace to take
  755. * advantage of more direct plane switching without the intermediate
  756. * full OFF state.
  757. */
  758. return true;
  759. }
  760. /**
  761. * drm_atomic_plane_check - check plane state
  762. * @plane: plane to check
  763. * @state: plane state to check
  764. *
  765. * Provides core sanity checks for plane state.
  766. *
  767. * RETURNS:
  768. * Zero on success, error code on failure
  769. */
  770. static int drm_atomic_plane_check(struct drm_plane *plane,
  771. struct drm_plane_state *state)
  772. {
  773. unsigned int fb_width, fb_height;
  774. int ret;
  775. /* either *both* CRTC and FB must be set, or neither */
  776. if (WARN_ON(state->crtc && !state->fb)) {
  777. DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
  778. return -EINVAL;
  779. } else if (WARN_ON(state->fb && !state->crtc)) {
  780. DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
  781. return -EINVAL;
  782. }
  783. /* if disabled, we don't care about the rest of the state: */
  784. if (!state->crtc)
  785. return 0;
  786. /* Check whether this plane is usable on this CRTC */
  787. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  788. DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
  789. return -EINVAL;
  790. }
  791. /* Check whether this plane supports the fb pixel format. */
  792. ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
  793. if (ret) {
  794. struct drm_format_name_buf format_name;
  795. DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
  796. drm_get_format_name(state->fb->format->format,
  797. &format_name));
  798. return ret;
  799. }
  800. /* Give drivers some help against integer overflows */
  801. if (state->crtc_w > INT_MAX ||
  802. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  803. state->crtc_h > INT_MAX ||
  804. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  805. DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
  806. state->crtc_w, state->crtc_h,
  807. state->crtc_x, state->crtc_y);
  808. return -ERANGE;
  809. }
  810. fb_width = state->fb->width << 16;
  811. fb_height = state->fb->height << 16;
  812. /* Make sure source coordinates are inside the fb. */
  813. if (state->src_w > fb_width ||
  814. state->src_x > fb_width - state->src_w ||
  815. state->src_h > fb_height ||
  816. state->src_y > fb_height - state->src_h) {
  817. DRM_DEBUG_ATOMIC("Invalid source coordinates "
  818. "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
  819. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  820. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  821. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  822. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
  823. return -ENOSPC;
  824. }
  825. if (plane_switching_crtc(state->state, plane, state)) {
  826. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  827. plane->base.id, plane->name);
  828. return -EINVAL;
  829. }
  830. return 0;
  831. }
  832. static void drm_atomic_plane_print_state(struct drm_printer *p,
  833. const struct drm_plane_state *state)
  834. {
  835. struct drm_plane *plane = state->plane;
  836. struct drm_rect src = drm_plane_state_src(state);
  837. struct drm_rect dest = drm_plane_state_dest(state);
  838. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  839. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  840. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  841. if (state->fb) {
  842. struct drm_framebuffer *fb = state->fb;
  843. int i, n = fb->format->num_planes;
  844. struct drm_format_name_buf format_name;
  845. drm_printf(p, "\t\tformat=%s\n",
  846. drm_get_format_name(fb->format->format, &format_name));
  847. drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
  848. drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
  849. drm_printf(p, "\t\tlayers:\n");
  850. for (i = 0; i < n; i++) {
  851. drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
  852. drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
  853. }
  854. }
  855. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  856. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  857. drm_printf(p, "\trotation=%x\n", state->rotation);
  858. if (plane->funcs->atomic_print_state)
  859. plane->funcs->atomic_print_state(p, state);
  860. }
  861. /**
  862. * drm_atomic_get_connector_state - get connector state
  863. * @state: global atomic state object
  864. * @connector: connector to get state object for
  865. *
  866. * This function returns the connector state for the given connector,
  867. * allocating it if needed. It will also grab the relevant connector lock to
  868. * make sure that the state is consistent.
  869. *
  870. * Returns:
  871. *
  872. * Either the allocated state or the error code encoded into the pointer. When
  873. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  874. * entire atomic sequence must be restarted. All other errors are fatal.
  875. */
  876. struct drm_connector_state *
  877. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  878. struct drm_connector *connector)
  879. {
  880. int ret, index;
  881. struct drm_mode_config *config = &connector->dev->mode_config;
  882. struct drm_connector_state *connector_state;
  883. WARN_ON(!state->acquire_ctx);
  884. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  885. if (ret)
  886. return ERR_PTR(ret);
  887. index = drm_connector_index(connector);
  888. if (index >= state->num_connector) {
  889. struct __drm_connnectors_state *c;
  890. int alloc = max(index + 1, config->num_connector);
  891. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  892. if (!c)
  893. return ERR_PTR(-ENOMEM);
  894. state->connectors = c;
  895. memset(&state->connectors[state->num_connector], 0,
  896. sizeof(*state->connectors) * (alloc - state->num_connector));
  897. state->num_connector = alloc;
  898. }
  899. if (state->connectors[index].state)
  900. return state->connectors[index].state;
  901. connector_state = connector->funcs->atomic_duplicate_state(connector);
  902. if (!connector_state)
  903. return ERR_PTR(-ENOMEM);
  904. drm_connector_reference(connector);
  905. state->connectors[index].state = connector_state;
  906. state->connectors[index].ptr = connector;
  907. connector_state->state = state;
  908. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
  909. connector->base.id, connector_state, state);
  910. if (connector_state->crtc) {
  911. struct drm_crtc_state *crtc_state;
  912. crtc_state = drm_atomic_get_crtc_state(state,
  913. connector_state->crtc);
  914. if (IS_ERR(crtc_state))
  915. return ERR_CAST(crtc_state);
  916. }
  917. return connector_state;
  918. }
  919. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  920. /**
  921. * drm_atomic_connector_set_property - set property on connector.
  922. * @connector: the drm connector to set a property on
  923. * @state: the state object to update with the new property value
  924. * @property: the property to set
  925. * @val: the new property value
  926. *
  927. * Use this instead of calling connector->atomic_set_property directly.
  928. * This function handles generic/core properties and calls out to
  929. * driver's ->atomic_set_property() for driver properties. To ensure
  930. * consistent behavior you must call this function rather than the
  931. * driver hook directly.
  932. *
  933. * RETURNS:
  934. * Zero on success, error code on failure
  935. */
  936. int drm_atomic_connector_set_property(struct drm_connector *connector,
  937. struct drm_connector_state *state, struct drm_property *property,
  938. uint64_t val)
  939. {
  940. struct drm_device *dev = connector->dev;
  941. struct drm_mode_config *config = &dev->mode_config;
  942. if (property == config->prop_crtc_id) {
  943. struct drm_crtc *crtc = drm_crtc_find(dev, val);
  944. return drm_atomic_set_crtc_for_connector(state, crtc);
  945. } else if (property == config->dpms_property) {
  946. /* setting DPMS property requires special handling, which
  947. * is done in legacy setprop path for us. Disallow (for
  948. * now?) atomic writes to DPMS property:
  949. */
  950. return -EINVAL;
  951. } else if (property == config->tv_select_subconnector_property) {
  952. state->tv.subconnector = val;
  953. } else if (property == config->tv_left_margin_property) {
  954. state->tv.margins.left = val;
  955. } else if (property == config->tv_right_margin_property) {
  956. state->tv.margins.right = val;
  957. } else if (property == config->tv_top_margin_property) {
  958. state->tv.margins.top = val;
  959. } else if (property == config->tv_bottom_margin_property) {
  960. state->tv.margins.bottom = val;
  961. } else if (property == config->tv_mode_property) {
  962. state->tv.mode = val;
  963. } else if (property == config->tv_brightness_property) {
  964. state->tv.brightness = val;
  965. } else if (property == config->tv_contrast_property) {
  966. state->tv.contrast = val;
  967. } else if (property == config->tv_flicker_reduction_property) {
  968. state->tv.flicker_reduction = val;
  969. } else if (property == config->tv_overscan_property) {
  970. state->tv.overscan = val;
  971. } else if (property == config->tv_saturation_property) {
  972. state->tv.saturation = val;
  973. } else if (property == config->tv_hue_property) {
  974. state->tv.hue = val;
  975. } else if (connector->funcs->atomic_set_property) {
  976. return connector->funcs->atomic_set_property(connector,
  977. state, property, val);
  978. } else {
  979. return -EINVAL;
  980. }
  981. return 0;
  982. }
  983. EXPORT_SYMBOL(drm_atomic_connector_set_property);
  984. static void drm_atomic_connector_print_state(struct drm_printer *p,
  985. const struct drm_connector_state *state)
  986. {
  987. struct drm_connector *connector = state->connector;
  988. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  989. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  990. if (connector->funcs->atomic_print_state)
  991. connector->funcs->atomic_print_state(p, state);
  992. }
  993. /**
  994. * drm_atomic_connector_get_property - get property value from connector state
  995. * @connector: the drm connector to set a property on
  996. * @state: the state object to get the property value from
  997. * @property: the property to set
  998. * @val: return location for the property value
  999. *
  1000. * This function handles generic/core properties and calls out to
  1001. * driver's ->atomic_get_property() for driver properties. To ensure
  1002. * consistent behavior you must call this function rather than the
  1003. * driver hook directly.
  1004. *
  1005. * RETURNS:
  1006. * Zero on success, error code on failure
  1007. */
  1008. static int
  1009. drm_atomic_connector_get_property(struct drm_connector *connector,
  1010. const struct drm_connector_state *state,
  1011. struct drm_property *property, uint64_t *val)
  1012. {
  1013. struct drm_device *dev = connector->dev;
  1014. struct drm_mode_config *config = &dev->mode_config;
  1015. if (property == config->prop_crtc_id) {
  1016. *val = (state->crtc) ? state->crtc->base.id : 0;
  1017. } else if (property == config->dpms_property) {
  1018. *val = connector->dpms;
  1019. } else if (property == config->tv_select_subconnector_property) {
  1020. *val = state->tv.subconnector;
  1021. } else if (property == config->tv_left_margin_property) {
  1022. *val = state->tv.margins.left;
  1023. } else if (property == config->tv_right_margin_property) {
  1024. *val = state->tv.margins.right;
  1025. } else if (property == config->tv_top_margin_property) {
  1026. *val = state->tv.margins.top;
  1027. } else if (property == config->tv_bottom_margin_property) {
  1028. *val = state->tv.margins.bottom;
  1029. } else if (property == config->tv_mode_property) {
  1030. *val = state->tv.mode;
  1031. } else if (property == config->tv_brightness_property) {
  1032. *val = state->tv.brightness;
  1033. } else if (property == config->tv_contrast_property) {
  1034. *val = state->tv.contrast;
  1035. } else if (property == config->tv_flicker_reduction_property) {
  1036. *val = state->tv.flicker_reduction;
  1037. } else if (property == config->tv_overscan_property) {
  1038. *val = state->tv.overscan;
  1039. } else if (property == config->tv_saturation_property) {
  1040. *val = state->tv.saturation;
  1041. } else if (property == config->tv_hue_property) {
  1042. *val = state->tv.hue;
  1043. } else if (connector->funcs->atomic_get_property) {
  1044. return connector->funcs->atomic_get_property(connector,
  1045. state, property, val);
  1046. } else {
  1047. return -EINVAL;
  1048. }
  1049. return 0;
  1050. }
  1051. int drm_atomic_get_property(struct drm_mode_object *obj,
  1052. struct drm_property *property, uint64_t *val)
  1053. {
  1054. struct drm_device *dev = property->dev;
  1055. int ret;
  1056. switch (obj->type) {
  1057. case DRM_MODE_OBJECT_CONNECTOR: {
  1058. struct drm_connector *connector = obj_to_connector(obj);
  1059. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  1060. ret = drm_atomic_connector_get_property(connector,
  1061. connector->state, property, val);
  1062. break;
  1063. }
  1064. case DRM_MODE_OBJECT_CRTC: {
  1065. struct drm_crtc *crtc = obj_to_crtc(obj);
  1066. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  1067. ret = drm_atomic_crtc_get_property(crtc,
  1068. crtc->state, property, val);
  1069. break;
  1070. }
  1071. case DRM_MODE_OBJECT_PLANE: {
  1072. struct drm_plane *plane = obj_to_plane(obj);
  1073. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  1074. ret = drm_atomic_plane_get_property(plane,
  1075. plane->state, property, val);
  1076. break;
  1077. }
  1078. default:
  1079. ret = -EINVAL;
  1080. break;
  1081. }
  1082. return ret;
  1083. }
  1084. /**
  1085. * drm_atomic_set_crtc_for_plane - set crtc for plane
  1086. * @plane_state: the plane whose incoming state to update
  1087. * @crtc: crtc to use for the plane
  1088. *
  1089. * Changing the assigned crtc for a plane requires us to grab the lock and state
  1090. * for the new crtc, as needed. This function takes care of all these details
  1091. * besides updating the pointer in the state object itself.
  1092. *
  1093. * Returns:
  1094. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1095. * then the w/w mutex code has detected a deadlock and the entire atomic
  1096. * sequence must be restarted. All other errors are fatal.
  1097. */
  1098. int
  1099. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  1100. struct drm_crtc *crtc)
  1101. {
  1102. struct drm_plane *plane = plane_state->plane;
  1103. struct drm_crtc_state *crtc_state;
  1104. if (plane_state->crtc) {
  1105. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1106. plane_state->crtc);
  1107. if (WARN_ON(IS_ERR(crtc_state)))
  1108. return PTR_ERR(crtc_state);
  1109. crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
  1110. }
  1111. plane_state->crtc = crtc;
  1112. if (crtc) {
  1113. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1114. crtc);
  1115. if (IS_ERR(crtc_state))
  1116. return PTR_ERR(crtc_state);
  1117. crtc_state->plane_mask |= (1 << drm_plane_index(plane));
  1118. }
  1119. if (crtc)
  1120. DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
  1121. plane_state, crtc->base.id, crtc->name);
  1122. else
  1123. DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
  1124. plane_state);
  1125. return 0;
  1126. }
  1127. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  1128. /**
  1129. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  1130. * @plane_state: atomic state object for the plane
  1131. * @fb: fb to use for the plane
  1132. *
  1133. * Changing the assigned framebuffer for a plane requires us to grab a reference
  1134. * to the new fb and drop the reference to the old fb, if there is one. This
  1135. * function takes care of all these details besides updating the pointer in the
  1136. * state object itself.
  1137. */
  1138. void
  1139. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1140. struct drm_framebuffer *fb)
  1141. {
  1142. if (fb)
  1143. DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
  1144. fb->base.id, plane_state);
  1145. else
  1146. DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
  1147. plane_state);
  1148. drm_framebuffer_assign(&plane_state->fb, fb);
  1149. }
  1150. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1151. /**
  1152. * drm_atomic_set_fence_for_plane - set fence for plane
  1153. * @plane_state: atomic state object for the plane
  1154. * @fence: dma_fence to use for the plane
  1155. *
  1156. * Helper to setup the plane_state fence in case it is not set yet.
  1157. * By using this drivers doesn't need to worry if the user choose
  1158. * implicit or explicit fencing.
  1159. *
  1160. * This function will not set the fence to the state if it was set
  1161. * via explicit fencing interfaces on the atomic ioctl. It will
  1162. * all drope the reference to the fence as we not storing it
  1163. * anywhere.
  1164. *
  1165. * Otherwise, if plane_state->fence is not set this function we
  1166. * just set it with the received implict fence.
  1167. */
  1168. void
  1169. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  1170. struct dma_fence *fence)
  1171. {
  1172. if (plane_state->fence) {
  1173. dma_fence_put(fence);
  1174. return;
  1175. }
  1176. plane_state->fence = fence;
  1177. }
  1178. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  1179. /**
  1180. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1181. * @conn_state: atomic state object for the connector
  1182. * @crtc: crtc to use for the connector
  1183. *
  1184. * Changing the assigned crtc for a connector requires us to grab the lock and
  1185. * state for the new crtc, as needed. This function takes care of all these
  1186. * details besides updating the pointer in the state object itself.
  1187. *
  1188. * Returns:
  1189. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1190. * then the w/w mutex code has detected a deadlock and the entire atomic
  1191. * sequence must be restarted. All other errors are fatal.
  1192. */
  1193. int
  1194. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1195. struct drm_crtc *crtc)
  1196. {
  1197. struct drm_crtc_state *crtc_state;
  1198. if (conn_state->crtc == crtc)
  1199. return 0;
  1200. if (conn_state->crtc) {
  1201. crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
  1202. conn_state->crtc);
  1203. crtc_state->connector_mask &=
  1204. ~(1 << drm_connector_index(conn_state->connector));
  1205. drm_connector_unreference(conn_state->connector);
  1206. conn_state->crtc = NULL;
  1207. }
  1208. if (crtc) {
  1209. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1210. if (IS_ERR(crtc_state))
  1211. return PTR_ERR(crtc_state);
  1212. crtc_state->connector_mask |=
  1213. 1 << drm_connector_index(conn_state->connector);
  1214. drm_connector_reference(conn_state->connector);
  1215. conn_state->crtc = crtc;
  1216. DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
  1217. conn_state, crtc->base.id, crtc->name);
  1218. } else {
  1219. DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
  1220. conn_state);
  1221. }
  1222. return 0;
  1223. }
  1224. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1225. /**
  1226. * drm_atomic_add_affected_connectors - add connectors for crtc
  1227. * @state: atomic state
  1228. * @crtc: DRM crtc
  1229. *
  1230. * This function walks the current configuration and adds all connectors
  1231. * currently using @crtc to the atomic configuration @state. Note that this
  1232. * function must acquire the connection mutex. This can potentially cause
  1233. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1234. * drivers and helpers should only call this when really needed (e.g. when a
  1235. * full modeset needs to happen due to some change).
  1236. *
  1237. * Returns:
  1238. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1239. * then the w/w mutex code has detected a deadlock and the entire atomic
  1240. * sequence must be restarted. All other errors are fatal.
  1241. */
  1242. int
  1243. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1244. struct drm_crtc *crtc)
  1245. {
  1246. struct drm_mode_config *config = &state->dev->mode_config;
  1247. struct drm_connector *connector;
  1248. struct drm_connector_state *conn_state;
  1249. struct drm_connector_list_iter conn_iter;
  1250. int ret;
  1251. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1252. if (ret)
  1253. return ret;
  1254. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1255. crtc->base.id, crtc->name, state);
  1256. /*
  1257. * Changed connectors are already in @state, so only need to look at the
  1258. * current configuration.
  1259. */
  1260. drm_connector_list_iter_get(state->dev, &conn_iter);
  1261. drm_for_each_connector_iter(connector, &conn_iter) {
  1262. if (connector->state->crtc != crtc)
  1263. continue;
  1264. conn_state = drm_atomic_get_connector_state(state, connector);
  1265. if (IS_ERR(conn_state)) {
  1266. drm_connector_list_iter_put(&conn_iter);
  1267. return PTR_ERR(conn_state);
  1268. }
  1269. }
  1270. drm_connector_list_iter_put(&conn_iter);
  1271. return 0;
  1272. }
  1273. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1274. /**
  1275. * drm_atomic_add_affected_planes - add planes for crtc
  1276. * @state: atomic state
  1277. * @crtc: DRM crtc
  1278. *
  1279. * This function walks the current configuration and adds all planes
  1280. * currently used by @crtc to the atomic configuration @state. This is useful
  1281. * when an atomic commit also needs to check all currently enabled plane on
  1282. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1283. * to avoid special code to force-enable all planes.
  1284. *
  1285. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1286. * current CRTC for that plane (if there is any) adding all the plane states for
  1287. * a CRTC will not reduce parallism of atomic updates.
  1288. *
  1289. * Returns:
  1290. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1291. * then the w/w mutex code has detected a deadlock and the entire atomic
  1292. * sequence must be restarted. All other errors are fatal.
  1293. */
  1294. int
  1295. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1296. struct drm_crtc *crtc)
  1297. {
  1298. struct drm_plane *plane;
  1299. WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
  1300. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1301. struct drm_plane_state *plane_state =
  1302. drm_atomic_get_plane_state(state, plane);
  1303. if (IS_ERR(plane_state))
  1304. return PTR_ERR(plane_state);
  1305. }
  1306. return 0;
  1307. }
  1308. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1309. /**
  1310. * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
  1311. * @state: atomic state
  1312. *
  1313. * This function should be used by legacy entry points which don't understand
  1314. * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
  1315. * the slowpath completed.
  1316. */
  1317. void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
  1318. {
  1319. struct drm_device *dev = state->dev;
  1320. unsigned crtc_mask = 0;
  1321. struct drm_crtc *crtc;
  1322. int ret;
  1323. bool global = false;
  1324. drm_for_each_crtc(crtc, dev) {
  1325. if (crtc->acquire_ctx != state->acquire_ctx)
  1326. continue;
  1327. crtc_mask |= drm_crtc_mask(crtc);
  1328. crtc->acquire_ctx = NULL;
  1329. }
  1330. if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
  1331. global = true;
  1332. dev->mode_config.acquire_ctx = NULL;
  1333. }
  1334. retry:
  1335. drm_modeset_backoff(state->acquire_ctx);
  1336. ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
  1337. if (ret)
  1338. goto retry;
  1339. drm_for_each_crtc(crtc, dev)
  1340. if (drm_crtc_mask(crtc) & crtc_mask)
  1341. crtc->acquire_ctx = state->acquire_ctx;
  1342. if (global)
  1343. dev->mode_config.acquire_ctx = state->acquire_ctx;
  1344. }
  1345. EXPORT_SYMBOL(drm_atomic_legacy_backoff);
  1346. /**
  1347. * drm_atomic_check_only - check whether a given config would work
  1348. * @state: atomic configuration to check
  1349. *
  1350. * Note that this function can return -EDEADLK if the driver needed to acquire
  1351. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1352. * backoff dance and restart. All other errors are fatal.
  1353. *
  1354. * Returns:
  1355. * 0 on success, negative error code on failure.
  1356. */
  1357. int drm_atomic_check_only(struct drm_atomic_state *state)
  1358. {
  1359. struct drm_device *dev = state->dev;
  1360. struct drm_mode_config *config = &dev->mode_config;
  1361. struct drm_plane *plane;
  1362. struct drm_plane_state *plane_state;
  1363. struct drm_crtc *crtc;
  1364. struct drm_crtc_state *crtc_state;
  1365. int i, ret = 0;
  1366. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1367. for_each_plane_in_state(state, plane, plane_state, i) {
  1368. ret = drm_atomic_plane_check(plane, plane_state);
  1369. if (ret) {
  1370. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1371. plane->base.id, plane->name);
  1372. return ret;
  1373. }
  1374. }
  1375. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1376. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1377. if (ret) {
  1378. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1379. crtc->base.id, crtc->name);
  1380. return ret;
  1381. }
  1382. }
  1383. if (config->funcs->atomic_check)
  1384. ret = config->funcs->atomic_check(state->dev, state);
  1385. if (!state->allow_modeset) {
  1386. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1387. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1388. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1389. crtc->base.id, crtc->name);
  1390. return -EINVAL;
  1391. }
  1392. }
  1393. }
  1394. return ret;
  1395. }
  1396. EXPORT_SYMBOL(drm_atomic_check_only);
  1397. /**
  1398. * drm_atomic_commit - commit configuration atomically
  1399. * @state: atomic configuration to check
  1400. *
  1401. * Note that this function can return -EDEADLK if the driver needed to acquire
  1402. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1403. * backoff dance and restart. All other errors are fatal.
  1404. *
  1405. * Also note that on successful execution ownership of @state is transferred
  1406. * from the caller of this function to the function itself. The caller must not
  1407. * free or in any other way access @state. If the function fails then the caller
  1408. * must clean up @state itself.
  1409. *
  1410. * Returns:
  1411. * 0 on success, negative error code on failure.
  1412. */
  1413. int drm_atomic_commit(struct drm_atomic_state *state)
  1414. {
  1415. struct drm_mode_config *config = &state->dev->mode_config;
  1416. int ret;
  1417. ret = drm_atomic_check_only(state);
  1418. if (ret)
  1419. return ret;
  1420. DRM_DEBUG_ATOMIC("commiting %p\n", state);
  1421. return config->funcs->atomic_commit(state->dev, state, false);
  1422. }
  1423. EXPORT_SYMBOL(drm_atomic_commit);
  1424. /**
  1425. * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
  1426. * @state: atomic configuration to check
  1427. *
  1428. * Note that this function can return -EDEADLK if the driver needed to acquire
  1429. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1430. * backoff dance and restart. All other errors are fatal.
  1431. *
  1432. * Also note that on successful execution ownership of @state is transferred
  1433. * from the caller of this function to the function itself. The caller must not
  1434. * free or in any other way access @state. If the function fails then the caller
  1435. * must clean up @state itself.
  1436. *
  1437. * Returns:
  1438. * 0 on success, negative error code on failure.
  1439. */
  1440. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1441. {
  1442. struct drm_mode_config *config = &state->dev->mode_config;
  1443. int ret;
  1444. ret = drm_atomic_check_only(state);
  1445. if (ret)
  1446. return ret;
  1447. DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
  1448. return config->funcs->atomic_commit(state->dev, state, true);
  1449. }
  1450. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1451. static void drm_atomic_print_state(const struct drm_atomic_state *state)
  1452. {
  1453. struct drm_printer p = drm_info_printer(state->dev->dev);
  1454. struct drm_plane *plane;
  1455. struct drm_plane_state *plane_state;
  1456. struct drm_crtc *crtc;
  1457. struct drm_crtc_state *crtc_state;
  1458. struct drm_connector *connector;
  1459. struct drm_connector_state *connector_state;
  1460. int i;
  1461. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1462. for_each_plane_in_state(state, plane, plane_state, i)
  1463. drm_atomic_plane_print_state(&p, plane_state);
  1464. for_each_crtc_in_state(state, crtc, crtc_state, i)
  1465. drm_atomic_crtc_print_state(&p, crtc_state);
  1466. for_each_connector_in_state(state, connector, connector_state, i)
  1467. drm_atomic_connector_print_state(&p, connector_state);
  1468. }
  1469. /**
  1470. * drm_state_dump - dump entire device atomic state
  1471. * @dev: the drm device
  1472. * @p: where to print the state to
  1473. *
  1474. * Just for debugging. Drivers might want an option to dump state
  1475. * to dmesg in case of error irq's. (Hint, you probably want to
  1476. * ratelimit this!)
  1477. *
  1478. * The caller must drm_modeset_lock_all(), or if this is called
  1479. * from error irq handler, it should not be enabled by default.
  1480. * (Ie. if you are debugging errors you might not care that this
  1481. * is racey. But calling this without all modeset locks held is
  1482. * not inherently safe.)
  1483. */
  1484. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1485. {
  1486. struct drm_mode_config *config = &dev->mode_config;
  1487. struct drm_plane *plane;
  1488. struct drm_crtc *crtc;
  1489. struct drm_connector *connector;
  1490. struct drm_connector_list_iter conn_iter;
  1491. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1492. return;
  1493. list_for_each_entry(plane, &config->plane_list, head)
  1494. drm_atomic_plane_print_state(p, plane->state);
  1495. list_for_each_entry(crtc, &config->crtc_list, head)
  1496. drm_atomic_crtc_print_state(p, crtc->state);
  1497. drm_connector_list_iter_get(dev, &conn_iter);
  1498. drm_for_each_connector_iter(connector, &conn_iter)
  1499. drm_atomic_connector_print_state(p, connector->state);
  1500. drm_connector_list_iter_put(&conn_iter);
  1501. }
  1502. EXPORT_SYMBOL(drm_state_dump);
  1503. #ifdef CONFIG_DEBUG_FS
  1504. static int drm_state_info(struct seq_file *m, void *data)
  1505. {
  1506. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1507. struct drm_device *dev = node->minor->dev;
  1508. struct drm_printer p = drm_seq_file_printer(m);
  1509. drm_modeset_lock_all(dev);
  1510. drm_state_dump(dev, &p);
  1511. drm_modeset_unlock_all(dev);
  1512. return 0;
  1513. }
  1514. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1515. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1516. {"state", drm_state_info, 0},
  1517. };
  1518. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1519. {
  1520. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1521. ARRAY_SIZE(drm_atomic_debugfs_list),
  1522. minor->debugfs_root, minor);
  1523. }
  1524. int drm_atomic_debugfs_cleanup(struct drm_minor *minor)
  1525. {
  1526. return drm_debugfs_remove_files(drm_atomic_debugfs_list,
  1527. ARRAY_SIZE(drm_atomic_debugfs_list),
  1528. minor);
  1529. }
  1530. #endif
  1531. /*
  1532. * The big monstor ioctl
  1533. */
  1534. static struct drm_pending_vblank_event *create_vblank_event(
  1535. struct drm_device *dev, uint64_t user_data)
  1536. {
  1537. struct drm_pending_vblank_event *e = NULL;
  1538. e = kzalloc(sizeof *e, GFP_KERNEL);
  1539. if (!e)
  1540. return NULL;
  1541. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1542. e->event.base.length = sizeof(e->event);
  1543. e->event.user_data = user_data;
  1544. return e;
  1545. }
  1546. static int atomic_set_prop(struct drm_atomic_state *state,
  1547. struct drm_mode_object *obj, struct drm_property *prop,
  1548. uint64_t prop_value)
  1549. {
  1550. struct drm_mode_object *ref;
  1551. int ret;
  1552. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1553. return -EINVAL;
  1554. switch (obj->type) {
  1555. case DRM_MODE_OBJECT_CONNECTOR: {
  1556. struct drm_connector *connector = obj_to_connector(obj);
  1557. struct drm_connector_state *connector_state;
  1558. connector_state = drm_atomic_get_connector_state(state, connector);
  1559. if (IS_ERR(connector_state)) {
  1560. ret = PTR_ERR(connector_state);
  1561. break;
  1562. }
  1563. ret = drm_atomic_connector_set_property(connector,
  1564. connector_state, prop, prop_value);
  1565. break;
  1566. }
  1567. case DRM_MODE_OBJECT_CRTC: {
  1568. struct drm_crtc *crtc = obj_to_crtc(obj);
  1569. struct drm_crtc_state *crtc_state;
  1570. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1571. if (IS_ERR(crtc_state)) {
  1572. ret = PTR_ERR(crtc_state);
  1573. break;
  1574. }
  1575. ret = drm_atomic_crtc_set_property(crtc,
  1576. crtc_state, prop, prop_value);
  1577. break;
  1578. }
  1579. case DRM_MODE_OBJECT_PLANE: {
  1580. struct drm_plane *plane = obj_to_plane(obj);
  1581. struct drm_plane_state *plane_state;
  1582. plane_state = drm_atomic_get_plane_state(state, plane);
  1583. if (IS_ERR(plane_state)) {
  1584. ret = PTR_ERR(plane_state);
  1585. break;
  1586. }
  1587. ret = drm_atomic_plane_set_property(plane,
  1588. plane_state, prop, prop_value);
  1589. break;
  1590. }
  1591. default:
  1592. ret = -EINVAL;
  1593. break;
  1594. }
  1595. drm_property_change_valid_put(prop, ref);
  1596. return ret;
  1597. }
  1598. /**
  1599. * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
  1600. *
  1601. * @dev: drm device to check.
  1602. * @plane_mask: plane mask for planes that were updated.
  1603. * @ret: return value, can be -EDEADLK for a retry.
  1604. *
  1605. * Before doing an update plane->old_fb is set to plane->fb,
  1606. * but before dropping the locks old_fb needs to be set to NULL
  1607. * and plane->fb updated. This is a common operation for each
  1608. * atomic update, so this call is split off as a helper.
  1609. */
  1610. void drm_atomic_clean_old_fb(struct drm_device *dev,
  1611. unsigned plane_mask,
  1612. int ret)
  1613. {
  1614. struct drm_plane *plane;
  1615. /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
  1616. * locks (ie. while it is still safe to deref plane->state). We
  1617. * need to do this here because the driver entry points cannot
  1618. * distinguish between legacy and atomic ioctls.
  1619. */
  1620. drm_for_each_plane_mask(plane, dev, plane_mask) {
  1621. if (ret == 0) {
  1622. struct drm_framebuffer *new_fb = plane->state->fb;
  1623. if (new_fb)
  1624. drm_framebuffer_reference(new_fb);
  1625. plane->fb = new_fb;
  1626. plane->crtc = plane->state->crtc;
  1627. if (plane->old_fb)
  1628. drm_framebuffer_unreference(plane->old_fb);
  1629. }
  1630. plane->old_fb = NULL;
  1631. }
  1632. }
  1633. EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  1634. /**
  1635. * DOC: explicit fencing properties
  1636. *
  1637. * Explicit fencing allows userspace to control the buffer synchronization
  1638. * between devices. A Fence or a group of fences are transfered to/from
  1639. * userspace using Sync File fds and there are two DRM properties for that.
  1640. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  1641. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  1642. *
  1643. * As a contrast, with implicit fencing the kernel keeps track of any
  1644. * ongoing rendering, and automatically ensures that the atomic update waits
  1645. * for any pending rendering to complete. For shared buffers represented with
  1646. * a &struct dma_buf this is tracked in &reservation_object structures.
  1647. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  1648. * whereas explicit fencing is what Android wants.
  1649. *
  1650. * "IN_FENCE_FD”:
  1651. * Use this property to pass a fence that DRM should wait on before
  1652. * proceeding with the Atomic Commit request and show the framebuffer for
  1653. * the plane on the screen. The fence can be either a normal fence or a
  1654. * merged one, the sync_file framework will handle both cases and use a
  1655. * fence_array if a merged fence is received. Passing -1 here means no
  1656. * fences to wait on.
  1657. *
  1658. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  1659. * it will only check if the Sync File is a valid one.
  1660. *
  1661. * On the driver side the fence is stored on the @fence parameter of
  1662. * &struct drm_plane_state. Drivers which also support implicit fencing
  1663. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  1664. * to make sure there's consistent behaviour between drivers in precedence
  1665. * of implicit vs. explicit fencing.
  1666. *
  1667. * "OUT_FENCE_PTR”:
  1668. * Use this property to pass a file descriptor pointer to DRM. Once the
  1669. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  1670. * the file descriptor number of a Sync File. This Sync File contains the
  1671. * CRTC fence that will be signaled when all framebuffers present on the
  1672. * Atomic Commit * request for that given CRTC are scanned out on the
  1673. * screen.
  1674. *
  1675. * The Atomic Commit request fails if a invalid pointer is passed. If the
  1676. * Atomic Commit request fails for any other reason the out fence fd
  1677. * returned will be -1. On a Atomic Commit with the
  1678. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  1679. *
  1680. * Note that out-fences don't have a special interface to drivers and are
  1681. * internally represented by a &struct drm_pending_vblank_event in struct
  1682. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  1683. * helpers and for the DRM event handling for existing userspace.
  1684. */
  1685. struct drm_out_fence_state {
  1686. s64 __user *out_fence_ptr;
  1687. struct sync_file *sync_file;
  1688. int fd;
  1689. };
  1690. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  1691. struct dma_fence *fence)
  1692. {
  1693. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  1694. if (fence_state->fd < 0)
  1695. return fence_state->fd;
  1696. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  1697. return -EFAULT;
  1698. fence_state->sync_file = sync_file_create(fence);
  1699. if (!fence_state->sync_file)
  1700. return -ENOMEM;
  1701. return 0;
  1702. }
  1703. static int prepare_crtc_signaling(struct drm_device *dev,
  1704. struct drm_atomic_state *state,
  1705. struct drm_mode_atomic *arg,
  1706. struct drm_file *file_priv,
  1707. struct drm_out_fence_state **fence_state,
  1708. unsigned int *num_fences)
  1709. {
  1710. struct drm_crtc *crtc;
  1711. struct drm_crtc_state *crtc_state;
  1712. int i, ret;
  1713. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  1714. return 0;
  1715. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1716. u64 __user *fence_ptr;
  1717. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  1718. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  1719. struct drm_pending_vblank_event *e;
  1720. e = create_vblank_event(dev, arg->user_data);
  1721. if (!e)
  1722. return -ENOMEM;
  1723. crtc_state->event = e;
  1724. }
  1725. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1726. struct drm_pending_vblank_event *e = crtc_state->event;
  1727. if (!file_priv)
  1728. continue;
  1729. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  1730. &e->event.base);
  1731. if (ret) {
  1732. kfree(e);
  1733. crtc_state->event = NULL;
  1734. return ret;
  1735. }
  1736. }
  1737. if (fence_ptr) {
  1738. struct dma_fence *fence;
  1739. struct drm_out_fence_state *f;
  1740. f = krealloc(*fence_state, sizeof(**fence_state) *
  1741. (*num_fences + 1), GFP_KERNEL);
  1742. if (!f)
  1743. return -ENOMEM;
  1744. memset(&f[*num_fences], 0, sizeof(*f));
  1745. f[*num_fences].out_fence_ptr = fence_ptr;
  1746. *fence_state = f;
  1747. fence = drm_crtc_create_fence(crtc);
  1748. if (!fence)
  1749. return -ENOMEM;
  1750. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1751. if (ret) {
  1752. dma_fence_put(fence);
  1753. return ret;
  1754. }
  1755. crtc_state->event->base.fence = fence;
  1756. }
  1757. }
  1758. return 0;
  1759. }
  1760. static void complete_crtc_signaling(struct drm_device *dev,
  1761. struct drm_atomic_state *state,
  1762. struct drm_out_fence_state *fence_state,
  1763. unsigned int num_fences,
  1764. bool install_fds)
  1765. {
  1766. struct drm_crtc *crtc;
  1767. struct drm_crtc_state *crtc_state;
  1768. int i;
  1769. if (install_fds) {
  1770. for (i = 0; i < num_fences; i++)
  1771. fd_install(fence_state[i].fd,
  1772. fence_state[i].sync_file->file);
  1773. kfree(fence_state);
  1774. return;
  1775. }
  1776. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1777. /*
  1778. * TEST_ONLY and PAGE_FLIP_EVENT are mutually
  1779. * exclusive, if they weren't, this code should be
  1780. * called on success for TEST_ONLY too.
  1781. */
  1782. if (crtc_state->event)
  1783. drm_event_cancel_free(dev, &crtc_state->event->base);
  1784. }
  1785. if (!fence_state)
  1786. return;
  1787. for (i = 0; i < num_fences; i++) {
  1788. if (fence_state[i].sync_file)
  1789. fput(fence_state[i].sync_file->file);
  1790. if (fence_state[i].fd >= 0)
  1791. put_unused_fd(fence_state[i].fd);
  1792. /* If this fails log error to the user */
  1793. if (fence_state[i].out_fence_ptr &&
  1794. put_user(-1, fence_state[i].out_fence_ptr))
  1795. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  1796. }
  1797. kfree(fence_state);
  1798. }
  1799. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1800. void *data, struct drm_file *file_priv)
  1801. {
  1802. struct drm_mode_atomic *arg = data;
  1803. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1804. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1805. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1806. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1807. unsigned int copied_objs, copied_props;
  1808. struct drm_atomic_state *state;
  1809. struct drm_modeset_acquire_ctx ctx;
  1810. struct drm_plane *plane;
  1811. struct drm_out_fence_state *fence_state = NULL;
  1812. unsigned plane_mask;
  1813. int ret = 0;
  1814. unsigned int i, j, num_fences = 0;
  1815. /* disallow for drivers not supporting atomic: */
  1816. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1817. return -EINVAL;
  1818. /* disallow for userspace that has not enabled atomic cap (even
  1819. * though this may be a bit overkill, since legacy userspace
  1820. * wouldn't know how to call this ioctl)
  1821. */
  1822. if (!file_priv->atomic)
  1823. return -EINVAL;
  1824. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  1825. return -EINVAL;
  1826. if (arg->reserved)
  1827. return -EINVAL;
  1828. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  1829. !dev->mode_config.async_page_flip)
  1830. return -EINVAL;
  1831. /* can't test and expect an event at the same time. */
  1832. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1833. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1834. return -EINVAL;
  1835. drm_modeset_acquire_init(&ctx, 0);
  1836. state = drm_atomic_state_alloc(dev);
  1837. if (!state)
  1838. return -ENOMEM;
  1839. state->acquire_ctx = &ctx;
  1840. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1841. retry:
  1842. plane_mask = 0;
  1843. copied_objs = 0;
  1844. copied_props = 0;
  1845. for (i = 0; i < arg->count_objs; i++) {
  1846. uint32_t obj_id, count_props;
  1847. struct drm_mode_object *obj;
  1848. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1849. ret = -EFAULT;
  1850. goto out;
  1851. }
  1852. obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
  1853. if (!obj) {
  1854. ret = -ENOENT;
  1855. goto out;
  1856. }
  1857. if (!obj->properties) {
  1858. drm_mode_object_unreference(obj);
  1859. ret = -ENOENT;
  1860. goto out;
  1861. }
  1862. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1863. drm_mode_object_unreference(obj);
  1864. ret = -EFAULT;
  1865. goto out;
  1866. }
  1867. copied_objs++;
  1868. for (j = 0; j < count_props; j++) {
  1869. uint32_t prop_id;
  1870. uint64_t prop_value;
  1871. struct drm_property *prop;
  1872. if (get_user(prop_id, props_ptr + copied_props)) {
  1873. drm_mode_object_unreference(obj);
  1874. ret = -EFAULT;
  1875. goto out;
  1876. }
  1877. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1878. if (!prop) {
  1879. drm_mode_object_unreference(obj);
  1880. ret = -ENOENT;
  1881. goto out;
  1882. }
  1883. if (copy_from_user(&prop_value,
  1884. prop_values_ptr + copied_props,
  1885. sizeof(prop_value))) {
  1886. drm_mode_object_unreference(obj);
  1887. ret = -EFAULT;
  1888. goto out;
  1889. }
  1890. ret = atomic_set_prop(state, obj, prop, prop_value);
  1891. if (ret) {
  1892. drm_mode_object_unreference(obj);
  1893. goto out;
  1894. }
  1895. copied_props++;
  1896. }
  1897. if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
  1898. !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
  1899. plane = obj_to_plane(obj);
  1900. plane_mask |= (1 << drm_plane_index(plane));
  1901. plane->old_fb = plane->fb;
  1902. }
  1903. drm_mode_object_unreference(obj);
  1904. }
  1905. ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
  1906. &num_fences);
  1907. if (ret)
  1908. goto out;
  1909. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  1910. ret = drm_atomic_check_only(state);
  1911. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  1912. ret = drm_atomic_nonblocking_commit(state);
  1913. } else {
  1914. if (unlikely(drm_debug & DRM_UT_STATE))
  1915. drm_atomic_print_state(state);
  1916. ret = drm_atomic_commit(state);
  1917. }
  1918. out:
  1919. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  1920. complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
  1921. if (ret == -EDEADLK) {
  1922. drm_atomic_state_clear(state);
  1923. drm_modeset_backoff(&ctx);
  1924. goto retry;
  1925. }
  1926. drm_atomic_state_put(state);
  1927. drm_modeset_drop_locks(&ctx);
  1928. drm_modeset_acquire_fini(&ctx);
  1929. return ret;
  1930. }