drm_atomic.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_plane_helper.h>
  31. #include <drm/drm_print.h>
  32. #include <linux/sync_file.h>
  33. #include "drm_crtc_internal.h"
  34. void __drm_crtc_commit_free(struct kref *kref)
  35. {
  36. struct drm_crtc_commit *commit =
  37. container_of(kref, struct drm_crtc_commit, ref);
  38. kfree(commit);
  39. }
  40. EXPORT_SYMBOL(__drm_crtc_commit_free);
  41. /**
  42. * drm_atomic_state_default_release -
  43. * release memory initialized by drm_atomic_state_init
  44. * @state: atomic state
  45. *
  46. * Free all the memory allocated by drm_atomic_state_init.
  47. * This is useful for drivers that subclass the atomic state.
  48. */
  49. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  50. {
  51. kfree(state->connectors);
  52. kfree(state->crtcs);
  53. kfree(state->planes);
  54. }
  55. EXPORT_SYMBOL(drm_atomic_state_default_release);
  56. /**
  57. * drm_atomic_state_init - init new atomic state
  58. * @dev: DRM device
  59. * @state: atomic state
  60. *
  61. * Default implementation for filling in a new atomic state.
  62. * This is useful for drivers that subclass the atomic state.
  63. */
  64. int
  65. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  66. {
  67. kref_init(&state->ref);
  68. /* TODO legacy paths should maybe do a better job about
  69. * setting this appropriately?
  70. */
  71. state->allow_modeset = true;
  72. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  73. sizeof(*state->crtcs), GFP_KERNEL);
  74. if (!state->crtcs)
  75. goto fail;
  76. state->planes = kcalloc(dev->mode_config.num_total_plane,
  77. sizeof(*state->planes), GFP_KERNEL);
  78. if (!state->planes)
  79. goto fail;
  80. state->dev = dev;
  81. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  82. return 0;
  83. fail:
  84. drm_atomic_state_default_release(state);
  85. return -ENOMEM;
  86. }
  87. EXPORT_SYMBOL(drm_atomic_state_init);
  88. /**
  89. * drm_atomic_state_alloc - allocate atomic state
  90. * @dev: DRM device
  91. *
  92. * This allocates an empty atomic state to track updates.
  93. */
  94. struct drm_atomic_state *
  95. drm_atomic_state_alloc(struct drm_device *dev)
  96. {
  97. struct drm_mode_config *config = &dev->mode_config;
  98. struct drm_atomic_state *state;
  99. if (!config->funcs->atomic_state_alloc) {
  100. state = kzalloc(sizeof(*state), GFP_KERNEL);
  101. if (!state)
  102. return NULL;
  103. if (drm_atomic_state_init(dev, state) < 0) {
  104. kfree(state);
  105. return NULL;
  106. }
  107. return state;
  108. }
  109. return config->funcs->atomic_state_alloc(dev);
  110. }
  111. EXPORT_SYMBOL(drm_atomic_state_alloc);
  112. /**
  113. * drm_atomic_state_default_clear - clear base atomic state
  114. * @state: atomic state
  115. *
  116. * Default implementation for clearing atomic state.
  117. * This is useful for drivers that subclass the atomic state.
  118. */
  119. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  120. {
  121. struct drm_device *dev = state->dev;
  122. struct drm_mode_config *config = &dev->mode_config;
  123. int i;
  124. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  125. for (i = 0; i < state->num_connector; i++) {
  126. struct drm_connector *connector = state->connectors[i].ptr;
  127. if (!connector)
  128. continue;
  129. connector->funcs->atomic_destroy_state(connector,
  130. state->connectors[i].state);
  131. state->connectors[i].ptr = NULL;
  132. state->connectors[i].state = NULL;
  133. drm_connector_put(connector);
  134. }
  135. for (i = 0; i < config->num_crtc; i++) {
  136. struct drm_crtc *crtc = state->crtcs[i].ptr;
  137. if (!crtc)
  138. continue;
  139. crtc->funcs->atomic_destroy_state(crtc,
  140. state->crtcs[i].state);
  141. if (state->crtcs[i].commit) {
  142. kfree(state->crtcs[i].commit->event);
  143. state->crtcs[i].commit->event = NULL;
  144. drm_crtc_commit_put(state->crtcs[i].commit);
  145. }
  146. state->crtcs[i].commit = NULL;
  147. state->crtcs[i].ptr = NULL;
  148. state->crtcs[i].state = NULL;
  149. }
  150. for (i = 0; i < config->num_total_plane; i++) {
  151. struct drm_plane *plane = state->planes[i].ptr;
  152. if (!plane)
  153. continue;
  154. plane->funcs->atomic_destroy_state(plane,
  155. state->planes[i].state);
  156. state->planes[i].ptr = NULL;
  157. state->planes[i].state = NULL;
  158. }
  159. }
  160. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  161. /**
  162. * drm_atomic_state_clear - clear state object
  163. * @state: atomic state
  164. *
  165. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  166. * all locks. So someone else could sneak in and change the current modeset
  167. * configuration. Which means that all the state assembled in @state is no
  168. * longer an atomic update to the current state, but to some arbitrary earlier
  169. * state. Which could break assumptions the driver's
  170. * &drm_mode_config_funcs.atomic_check likely relies on.
  171. *
  172. * Hence we must clear all cached state and completely start over, using this
  173. * function.
  174. */
  175. void drm_atomic_state_clear(struct drm_atomic_state *state)
  176. {
  177. struct drm_device *dev = state->dev;
  178. struct drm_mode_config *config = &dev->mode_config;
  179. if (config->funcs->atomic_state_clear)
  180. config->funcs->atomic_state_clear(state);
  181. else
  182. drm_atomic_state_default_clear(state);
  183. }
  184. EXPORT_SYMBOL(drm_atomic_state_clear);
  185. /**
  186. * __drm_atomic_state_free - free all memory for an atomic state
  187. * @ref: This atomic state to deallocate
  188. *
  189. * This frees all memory associated with an atomic state, including all the
  190. * per-object state for planes, crtcs and connectors.
  191. */
  192. void __drm_atomic_state_free(struct kref *ref)
  193. {
  194. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  195. struct drm_mode_config *config = &state->dev->mode_config;
  196. drm_atomic_state_clear(state);
  197. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  198. if (config->funcs->atomic_state_free) {
  199. config->funcs->atomic_state_free(state);
  200. } else {
  201. drm_atomic_state_default_release(state);
  202. kfree(state);
  203. }
  204. }
  205. EXPORT_SYMBOL(__drm_atomic_state_free);
  206. /**
  207. * drm_atomic_get_crtc_state - get crtc state
  208. * @state: global atomic state object
  209. * @crtc: crtc to get state object for
  210. *
  211. * This function returns the crtc state for the given crtc, allocating it if
  212. * needed. It will also grab the relevant crtc lock to make sure that the state
  213. * is consistent.
  214. *
  215. * Returns:
  216. *
  217. * Either the allocated state or the error code encoded into the pointer. When
  218. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  219. * entire atomic sequence must be restarted. All other errors are fatal.
  220. */
  221. struct drm_crtc_state *
  222. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  223. struct drm_crtc *crtc)
  224. {
  225. int ret, index = drm_crtc_index(crtc);
  226. struct drm_crtc_state *crtc_state;
  227. WARN_ON(!state->acquire_ctx);
  228. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  229. if (crtc_state)
  230. return crtc_state;
  231. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  232. if (ret)
  233. return ERR_PTR(ret);
  234. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  235. if (!crtc_state)
  236. return ERR_PTR(-ENOMEM);
  237. state->crtcs[index].state = crtc_state;
  238. state->crtcs[index].old_state = crtc->state;
  239. state->crtcs[index].new_state = crtc_state;
  240. state->crtcs[index].ptr = crtc;
  241. crtc_state->state = state;
  242. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  243. crtc->base.id, crtc->name, crtc_state, state);
  244. return crtc_state;
  245. }
  246. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  247. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  248. struct drm_crtc *crtc, s32 __user *fence_ptr)
  249. {
  250. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  251. }
  252. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  253. struct drm_crtc *crtc)
  254. {
  255. s32 __user *fence_ptr;
  256. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  257. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  258. return fence_ptr;
  259. }
  260. /**
  261. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  262. * @state: the CRTC whose incoming state to update
  263. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  264. *
  265. * Set a mode (originating from the kernel) on the desired CRTC state and update
  266. * the enable property.
  267. *
  268. * RETURNS:
  269. * Zero on success, error code on failure. Cannot return -EDEADLK.
  270. */
  271. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  272. struct drm_display_mode *mode)
  273. {
  274. struct drm_mode_modeinfo umode;
  275. /* Early return for no change. */
  276. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  277. return 0;
  278. drm_property_blob_put(state->mode_blob);
  279. state->mode_blob = NULL;
  280. if (mode) {
  281. drm_mode_convert_to_umode(&umode, mode);
  282. state->mode_blob =
  283. drm_property_create_blob(state->crtc->dev,
  284. sizeof(umode),
  285. &umode);
  286. if (IS_ERR(state->mode_blob))
  287. return PTR_ERR(state->mode_blob);
  288. drm_mode_copy(&state->mode, mode);
  289. state->enable = true;
  290. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  291. mode->name, state);
  292. } else {
  293. memset(&state->mode, 0, sizeof(state->mode));
  294. state->enable = false;
  295. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  296. state);
  297. }
  298. return 0;
  299. }
  300. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  301. /**
  302. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  303. * @state: the CRTC whose incoming state to update
  304. * @blob: pointer to blob property to use for mode
  305. *
  306. * Set a mode (originating from a blob property) on the desired CRTC state.
  307. * This function will take a reference on the blob property for the CRTC state,
  308. * and release the reference held on the state's existing mode property, if any
  309. * was set.
  310. *
  311. * RETURNS:
  312. * Zero on success, error code on failure. Cannot return -EDEADLK.
  313. */
  314. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  315. struct drm_property_blob *blob)
  316. {
  317. if (blob == state->mode_blob)
  318. return 0;
  319. drm_property_blob_put(state->mode_blob);
  320. state->mode_blob = NULL;
  321. memset(&state->mode, 0, sizeof(state->mode));
  322. if (blob) {
  323. if (blob->length != sizeof(struct drm_mode_modeinfo) ||
  324. drm_mode_convert_umode(&state->mode,
  325. (const struct drm_mode_modeinfo *)
  326. blob->data))
  327. return -EINVAL;
  328. state->mode_blob = drm_property_blob_get(blob);
  329. state->enable = true;
  330. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  331. state->mode.name, state);
  332. } else {
  333. state->enable = false;
  334. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  335. state);
  336. }
  337. return 0;
  338. }
  339. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  340. /**
  341. * drm_atomic_replace_property_blob - replace a blob property
  342. * @blob: a pointer to the member blob to be replaced
  343. * @new_blob: the new blob to replace with
  344. * @replaced: whether the blob has been replaced
  345. *
  346. * RETURNS:
  347. * Zero on success, error code on failure
  348. */
  349. static void
  350. drm_atomic_replace_property_blob(struct drm_property_blob **blob,
  351. struct drm_property_blob *new_blob,
  352. bool *replaced)
  353. {
  354. struct drm_property_blob *old_blob = *blob;
  355. if (old_blob == new_blob)
  356. return;
  357. drm_property_blob_put(old_blob);
  358. if (new_blob)
  359. drm_property_blob_get(new_blob);
  360. *blob = new_blob;
  361. *replaced = true;
  362. return;
  363. }
  364. static int
  365. drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
  366. struct drm_property_blob **blob,
  367. uint64_t blob_id,
  368. ssize_t expected_size,
  369. bool *replaced)
  370. {
  371. struct drm_property_blob *new_blob = NULL;
  372. if (blob_id != 0) {
  373. new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
  374. if (new_blob == NULL)
  375. return -EINVAL;
  376. if (expected_size > 0 && expected_size != new_blob->length) {
  377. drm_property_blob_put(new_blob);
  378. return -EINVAL;
  379. }
  380. }
  381. drm_atomic_replace_property_blob(blob, new_blob, replaced);
  382. drm_property_blob_put(new_blob);
  383. return 0;
  384. }
  385. /**
  386. * drm_atomic_crtc_set_property - set property on CRTC
  387. * @crtc: the drm CRTC to set a property on
  388. * @state: the state object to update with the new property value
  389. * @property: the property to set
  390. * @val: the new property value
  391. *
  392. * This function handles generic/core properties and calls out to driver's
  393. * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
  394. * consistent behavior you must call this function rather than the driver hook
  395. * directly.
  396. *
  397. * RETURNS:
  398. * Zero on success, error code on failure
  399. */
  400. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  401. struct drm_crtc_state *state, struct drm_property *property,
  402. uint64_t val)
  403. {
  404. struct drm_device *dev = crtc->dev;
  405. struct drm_mode_config *config = &dev->mode_config;
  406. bool replaced = false;
  407. int ret;
  408. if (property == config->prop_active)
  409. state->active = val;
  410. else if (property == config->prop_mode_id) {
  411. struct drm_property_blob *mode =
  412. drm_property_lookup_blob(dev, val);
  413. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  414. drm_property_blob_put(mode);
  415. return ret;
  416. } else if (property == config->degamma_lut_property) {
  417. ret = drm_atomic_replace_property_blob_from_id(crtc,
  418. &state->degamma_lut,
  419. val,
  420. -1,
  421. &replaced);
  422. state->color_mgmt_changed |= replaced;
  423. return ret;
  424. } else if (property == config->ctm_property) {
  425. ret = drm_atomic_replace_property_blob_from_id(crtc,
  426. &state->ctm,
  427. val,
  428. sizeof(struct drm_color_ctm),
  429. &replaced);
  430. state->color_mgmt_changed |= replaced;
  431. return ret;
  432. } else if (property == config->gamma_lut_property) {
  433. ret = drm_atomic_replace_property_blob_from_id(crtc,
  434. &state->gamma_lut,
  435. val,
  436. -1,
  437. &replaced);
  438. state->color_mgmt_changed |= replaced;
  439. return ret;
  440. } else if (property == config->prop_out_fence_ptr) {
  441. s32 __user *fence_ptr = u64_to_user_ptr(val);
  442. if (!fence_ptr)
  443. return 0;
  444. if (put_user(-1, fence_ptr))
  445. return -EFAULT;
  446. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  447. } else if (crtc->funcs->atomic_set_property)
  448. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  449. else
  450. return -EINVAL;
  451. return 0;
  452. }
  453. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  454. /**
  455. * drm_atomic_crtc_get_property - get property value from CRTC state
  456. * @crtc: the drm CRTC to set a property on
  457. * @state: the state object to get the property value from
  458. * @property: the property to set
  459. * @val: return location for the property value
  460. *
  461. * This function handles generic/core properties and calls out to driver's
  462. * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
  463. * consistent behavior you must call this function rather than the driver hook
  464. * directly.
  465. *
  466. * RETURNS:
  467. * Zero on success, error code on failure
  468. */
  469. static int
  470. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  471. const struct drm_crtc_state *state,
  472. struct drm_property *property, uint64_t *val)
  473. {
  474. struct drm_device *dev = crtc->dev;
  475. struct drm_mode_config *config = &dev->mode_config;
  476. if (property == config->prop_active)
  477. *val = state->active;
  478. else if (property == config->prop_mode_id)
  479. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  480. else if (property == config->degamma_lut_property)
  481. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  482. else if (property == config->ctm_property)
  483. *val = (state->ctm) ? state->ctm->base.id : 0;
  484. else if (property == config->gamma_lut_property)
  485. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  486. else if (property == config->prop_out_fence_ptr)
  487. *val = 0;
  488. else if (crtc->funcs->atomic_get_property)
  489. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  490. else
  491. return -EINVAL;
  492. return 0;
  493. }
  494. /**
  495. * drm_atomic_crtc_check - check crtc state
  496. * @crtc: crtc to check
  497. * @state: crtc state to check
  498. *
  499. * Provides core sanity checks for crtc state.
  500. *
  501. * RETURNS:
  502. * Zero on success, error code on failure
  503. */
  504. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  505. struct drm_crtc_state *state)
  506. {
  507. /* NOTE: we explicitly don't enforce constraints such as primary
  508. * layer covering entire screen, since that is something we want
  509. * to allow (on hw that supports it). For hw that does not, it
  510. * should be checked in driver's crtc->atomic_check() vfunc.
  511. *
  512. * TODO: Add generic modeset state checks once we support those.
  513. */
  514. if (state->active && !state->enable) {
  515. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  516. crtc->base.id, crtc->name);
  517. return -EINVAL;
  518. }
  519. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  520. * as this is a kernel-internal detail that userspace should never
  521. * be able to trigger. */
  522. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  523. WARN_ON(state->enable && !state->mode_blob)) {
  524. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  525. crtc->base.id, crtc->name);
  526. return -EINVAL;
  527. }
  528. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  529. WARN_ON(!state->enable && state->mode_blob)) {
  530. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  531. crtc->base.id, crtc->name);
  532. return -EINVAL;
  533. }
  534. /*
  535. * Reject event generation for when a CRTC is off and stays off.
  536. * It wouldn't be hard to implement this, but userspace has a track
  537. * record of happily burning through 100% cpu (or worse, crash) when the
  538. * display pipe is suspended. To avoid all that fun just reject updates
  539. * that ask for events since likely that indicates a bug in the
  540. * compositor's drawing loop. This is consistent with the vblank IOCTL
  541. * and legacy page_flip IOCTL which also reject service on a disabled
  542. * pipe.
  543. */
  544. if (state->event && !state->active && !crtc->state->active) {
  545. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
  546. crtc->base.id, crtc->name);
  547. return -EINVAL;
  548. }
  549. return 0;
  550. }
  551. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  552. const struct drm_crtc_state *state)
  553. {
  554. struct drm_crtc *crtc = state->crtc;
  555. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  556. drm_printf(p, "\tenable=%d\n", state->enable);
  557. drm_printf(p, "\tactive=%d\n", state->active);
  558. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  559. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  560. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  561. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  562. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  563. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  564. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  565. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  566. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  567. if (crtc->funcs->atomic_print_state)
  568. crtc->funcs->atomic_print_state(p, state);
  569. }
  570. /**
  571. * drm_atomic_get_plane_state - get plane state
  572. * @state: global atomic state object
  573. * @plane: plane to get state object for
  574. *
  575. * This function returns the plane state for the given plane, allocating it if
  576. * needed. It will also grab the relevant plane lock to make sure that the state
  577. * is consistent.
  578. *
  579. * Returns:
  580. *
  581. * Either the allocated state or the error code encoded into the pointer. When
  582. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  583. * entire atomic sequence must be restarted. All other errors are fatal.
  584. */
  585. struct drm_plane_state *
  586. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  587. struct drm_plane *plane)
  588. {
  589. int ret, index = drm_plane_index(plane);
  590. struct drm_plane_state *plane_state;
  591. WARN_ON(!state->acquire_ctx);
  592. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  593. if (plane_state)
  594. return plane_state;
  595. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  596. if (ret)
  597. return ERR_PTR(ret);
  598. plane_state = plane->funcs->atomic_duplicate_state(plane);
  599. if (!plane_state)
  600. return ERR_PTR(-ENOMEM);
  601. state->planes[index].state = plane_state;
  602. state->planes[index].ptr = plane;
  603. state->planes[index].old_state = plane->state;
  604. state->planes[index].new_state = plane_state;
  605. plane_state->state = state;
  606. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  607. plane->base.id, plane->name, plane_state, state);
  608. if (plane_state->crtc) {
  609. struct drm_crtc_state *crtc_state;
  610. crtc_state = drm_atomic_get_crtc_state(state,
  611. plane_state->crtc);
  612. if (IS_ERR(crtc_state))
  613. return ERR_CAST(crtc_state);
  614. }
  615. return plane_state;
  616. }
  617. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  618. /**
  619. * drm_atomic_plane_set_property - set property on plane
  620. * @plane: the drm plane to set a property on
  621. * @state: the state object to update with the new property value
  622. * @property: the property to set
  623. * @val: the new property value
  624. *
  625. * This function handles generic/core properties and calls out to driver's
  626. * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
  627. * consistent behavior you must call this function rather than the driver hook
  628. * directly.
  629. *
  630. * RETURNS:
  631. * Zero on success, error code on failure
  632. */
  633. int drm_atomic_plane_set_property(struct drm_plane *plane,
  634. struct drm_plane_state *state, struct drm_property *property,
  635. uint64_t val)
  636. {
  637. struct drm_device *dev = plane->dev;
  638. struct drm_mode_config *config = &dev->mode_config;
  639. if (property == config->prop_fb_id) {
  640. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
  641. drm_atomic_set_fb_for_plane(state, fb);
  642. if (fb)
  643. drm_framebuffer_put(fb);
  644. } else if (property == config->prop_in_fence_fd) {
  645. if (state->fence)
  646. return -EINVAL;
  647. if (U642I64(val) == -1)
  648. return 0;
  649. state->fence = sync_file_get_fence(val);
  650. if (!state->fence)
  651. return -EINVAL;
  652. } else if (property == config->prop_crtc_id) {
  653. struct drm_crtc *crtc = drm_crtc_find(dev, val);
  654. return drm_atomic_set_crtc_for_plane(state, crtc);
  655. } else if (property == config->prop_crtc_x) {
  656. state->crtc_x = U642I64(val);
  657. } else if (property == config->prop_crtc_y) {
  658. state->crtc_y = U642I64(val);
  659. } else if (property == config->prop_crtc_w) {
  660. state->crtc_w = val;
  661. } else if (property == config->prop_crtc_h) {
  662. state->crtc_h = val;
  663. } else if (property == config->prop_src_x) {
  664. state->src_x = val;
  665. } else if (property == config->prop_src_y) {
  666. state->src_y = val;
  667. } else if (property == config->prop_src_w) {
  668. state->src_w = val;
  669. } else if (property == config->prop_src_h) {
  670. state->src_h = val;
  671. } else if (property == plane->rotation_property) {
  672. if (!is_power_of_2(val & DRM_ROTATE_MASK))
  673. return -EINVAL;
  674. state->rotation = val;
  675. } else if (property == plane->zpos_property) {
  676. state->zpos = val;
  677. } else if (plane->funcs->atomic_set_property) {
  678. return plane->funcs->atomic_set_property(plane, state,
  679. property, val);
  680. } else {
  681. return -EINVAL;
  682. }
  683. return 0;
  684. }
  685. EXPORT_SYMBOL(drm_atomic_plane_set_property);
  686. /**
  687. * drm_atomic_plane_get_property - get property value from plane state
  688. * @plane: the drm plane to set a property on
  689. * @state: the state object to get the property value from
  690. * @property: the property to set
  691. * @val: return location for the property value
  692. *
  693. * This function handles generic/core properties and calls out to driver's
  694. * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
  695. * consistent behavior you must call this function rather than the driver hook
  696. * directly.
  697. *
  698. * RETURNS:
  699. * Zero on success, error code on failure
  700. */
  701. static int
  702. drm_atomic_plane_get_property(struct drm_plane *plane,
  703. const struct drm_plane_state *state,
  704. struct drm_property *property, uint64_t *val)
  705. {
  706. struct drm_device *dev = plane->dev;
  707. struct drm_mode_config *config = &dev->mode_config;
  708. if (property == config->prop_fb_id) {
  709. *val = (state->fb) ? state->fb->base.id : 0;
  710. } else if (property == config->prop_in_fence_fd) {
  711. *val = -1;
  712. } else if (property == config->prop_crtc_id) {
  713. *val = (state->crtc) ? state->crtc->base.id : 0;
  714. } else if (property == config->prop_crtc_x) {
  715. *val = I642U64(state->crtc_x);
  716. } else if (property == config->prop_crtc_y) {
  717. *val = I642U64(state->crtc_y);
  718. } else if (property == config->prop_crtc_w) {
  719. *val = state->crtc_w;
  720. } else if (property == config->prop_crtc_h) {
  721. *val = state->crtc_h;
  722. } else if (property == config->prop_src_x) {
  723. *val = state->src_x;
  724. } else if (property == config->prop_src_y) {
  725. *val = state->src_y;
  726. } else if (property == config->prop_src_w) {
  727. *val = state->src_w;
  728. } else if (property == config->prop_src_h) {
  729. *val = state->src_h;
  730. } else if (property == plane->rotation_property) {
  731. *val = state->rotation;
  732. } else if (property == plane->zpos_property) {
  733. *val = state->zpos;
  734. } else if (plane->funcs->atomic_get_property) {
  735. return plane->funcs->atomic_get_property(plane, state, property, val);
  736. } else {
  737. return -EINVAL;
  738. }
  739. return 0;
  740. }
  741. static bool
  742. plane_switching_crtc(struct drm_atomic_state *state,
  743. struct drm_plane *plane,
  744. struct drm_plane_state *plane_state)
  745. {
  746. if (!plane->state->crtc || !plane_state->crtc)
  747. return false;
  748. if (plane->state->crtc == plane_state->crtc)
  749. return false;
  750. /* This could be refined, but currently there's no helper or driver code
  751. * to implement direct switching of active planes nor userspace to take
  752. * advantage of more direct plane switching without the intermediate
  753. * full OFF state.
  754. */
  755. return true;
  756. }
  757. /**
  758. * drm_atomic_plane_check - check plane state
  759. * @plane: plane to check
  760. * @state: plane state to check
  761. *
  762. * Provides core sanity checks for plane state.
  763. *
  764. * RETURNS:
  765. * Zero on success, error code on failure
  766. */
  767. static int drm_atomic_plane_check(struct drm_plane *plane,
  768. struct drm_plane_state *state)
  769. {
  770. unsigned int fb_width, fb_height;
  771. int ret;
  772. /* either *both* CRTC and FB must be set, or neither */
  773. if (WARN_ON(state->crtc && !state->fb)) {
  774. DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
  775. return -EINVAL;
  776. } else if (WARN_ON(state->fb && !state->crtc)) {
  777. DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
  778. return -EINVAL;
  779. }
  780. /* if disabled, we don't care about the rest of the state: */
  781. if (!state->crtc)
  782. return 0;
  783. /* Check whether this plane is usable on this CRTC */
  784. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  785. DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
  786. return -EINVAL;
  787. }
  788. /* Check whether this plane supports the fb pixel format. */
  789. ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
  790. if (ret) {
  791. struct drm_format_name_buf format_name;
  792. DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
  793. drm_get_format_name(state->fb->format->format,
  794. &format_name));
  795. return ret;
  796. }
  797. /* Give drivers some help against integer overflows */
  798. if (state->crtc_w > INT_MAX ||
  799. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  800. state->crtc_h > INT_MAX ||
  801. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  802. DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
  803. state->crtc_w, state->crtc_h,
  804. state->crtc_x, state->crtc_y);
  805. return -ERANGE;
  806. }
  807. fb_width = state->fb->width << 16;
  808. fb_height = state->fb->height << 16;
  809. /* Make sure source coordinates are inside the fb. */
  810. if (state->src_w > fb_width ||
  811. state->src_x > fb_width - state->src_w ||
  812. state->src_h > fb_height ||
  813. state->src_y > fb_height - state->src_h) {
  814. DRM_DEBUG_ATOMIC("Invalid source coordinates "
  815. "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
  816. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  817. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  818. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  819. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
  820. return -ENOSPC;
  821. }
  822. if (plane_switching_crtc(state->state, plane, state)) {
  823. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  824. plane->base.id, plane->name);
  825. return -EINVAL;
  826. }
  827. return 0;
  828. }
  829. static void drm_atomic_plane_print_state(struct drm_printer *p,
  830. const struct drm_plane_state *state)
  831. {
  832. struct drm_plane *plane = state->plane;
  833. struct drm_rect src = drm_plane_state_src(state);
  834. struct drm_rect dest = drm_plane_state_dest(state);
  835. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  836. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  837. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  838. if (state->fb) {
  839. struct drm_framebuffer *fb = state->fb;
  840. int i, n = fb->format->num_planes;
  841. struct drm_format_name_buf format_name;
  842. drm_printf(p, "\t\tformat=%s\n",
  843. drm_get_format_name(fb->format->format, &format_name));
  844. drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
  845. drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
  846. drm_printf(p, "\t\tlayers:\n");
  847. for (i = 0; i < n; i++) {
  848. drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
  849. drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
  850. }
  851. }
  852. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  853. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  854. drm_printf(p, "\trotation=%x\n", state->rotation);
  855. if (plane->funcs->atomic_print_state)
  856. plane->funcs->atomic_print_state(p, state);
  857. }
  858. /**
  859. * drm_atomic_get_connector_state - get connector state
  860. * @state: global atomic state object
  861. * @connector: connector to get state object for
  862. *
  863. * This function returns the connector state for the given connector,
  864. * allocating it if needed. It will also grab the relevant connector lock to
  865. * make sure that the state is consistent.
  866. *
  867. * Returns:
  868. *
  869. * Either the allocated state or the error code encoded into the pointer. When
  870. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  871. * entire atomic sequence must be restarted. All other errors are fatal.
  872. */
  873. struct drm_connector_state *
  874. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  875. struct drm_connector *connector)
  876. {
  877. int ret, index;
  878. struct drm_mode_config *config = &connector->dev->mode_config;
  879. struct drm_connector_state *connector_state;
  880. WARN_ON(!state->acquire_ctx);
  881. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  882. if (ret)
  883. return ERR_PTR(ret);
  884. index = drm_connector_index(connector);
  885. if (index >= state->num_connector) {
  886. struct __drm_connnectors_state *c;
  887. int alloc = max(index + 1, config->num_connector);
  888. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  889. if (!c)
  890. return ERR_PTR(-ENOMEM);
  891. state->connectors = c;
  892. memset(&state->connectors[state->num_connector], 0,
  893. sizeof(*state->connectors) * (alloc - state->num_connector));
  894. state->num_connector = alloc;
  895. }
  896. if (state->connectors[index].state)
  897. return state->connectors[index].state;
  898. connector_state = connector->funcs->atomic_duplicate_state(connector);
  899. if (!connector_state)
  900. return ERR_PTR(-ENOMEM);
  901. drm_connector_get(connector);
  902. state->connectors[index].state = connector_state;
  903. state->connectors[index].old_state = connector->state;
  904. state->connectors[index].new_state = connector_state;
  905. state->connectors[index].ptr = connector;
  906. connector_state->state = state;
  907. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
  908. connector->base.id, connector->name,
  909. connector_state, state);
  910. if (connector_state->crtc) {
  911. struct drm_crtc_state *crtc_state;
  912. crtc_state = drm_atomic_get_crtc_state(state,
  913. connector_state->crtc);
  914. if (IS_ERR(crtc_state))
  915. return ERR_CAST(crtc_state);
  916. }
  917. return connector_state;
  918. }
  919. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  920. /**
  921. * drm_atomic_connector_set_property - set property on connector.
  922. * @connector: the drm connector to set a property on
  923. * @state: the state object to update with the new property value
  924. * @property: the property to set
  925. * @val: the new property value
  926. *
  927. * This function handles generic/core properties and calls out to driver's
  928. * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
  929. * consistent behavior you must call this function rather than the driver hook
  930. * directly.
  931. *
  932. * RETURNS:
  933. * Zero on success, error code on failure
  934. */
  935. int drm_atomic_connector_set_property(struct drm_connector *connector,
  936. struct drm_connector_state *state, struct drm_property *property,
  937. uint64_t val)
  938. {
  939. struct drm_device *dev = connector->dev;
  940. struct drm_mode_config *config = &dev->mode_config;
  941. if (property == config->prop_crtc_id) {
  942. struct drm_crtc *crtc = drm_crtc_find(dev, val);
  943. return drm_atomic_set_crtc_for_connector(state, crtc);
  944. } else if (property == config->dpms_property) {
  945. /* setting DPMS property requires special handling, which
  946. * is done in legacy setprop path for us. Disallow (for
  947. * now?) atomic writes to DPMS property:
  948. */
  949. return -EINVAL;
  950. } else if (property == config->tv_select_subconnector_property) {
  951. state->tv.subconnector = val;
  952. } else if (property == config->tv_left_margin_property) {
  953. state->tv.margins.left = val;
  954. } else if (property == config->tv_right_margin_property) {
  955. state->tv.margins.right = val;
  956. } else if (property == config->tv_top_margin_property) {
  957. state->tv.margins.top = val;
  958. } else if (property == config->tv_bottom_margin_property) {
  959. state->tv.margins.bottom = val;
  960. } else if (property == config->tv_mode_property) {
  961. state->tv.mode = val;
  962. } else if (property == config->tv_brightness_property) {
  963. state->tv.brightness = val;
  964. } else if (property == config->tv_contrast_property) {
  965. state->tv.contrast = val;
  966. } else if (property == config->tv_flicker_reduction_property) {
  967. state->tv.flicker_reduction = val;
  968. } else if (property == config->tv_overscan_property) {
  969. state->tv.overscan = val;
  970. } else if (property == config->tv_saturation_property) {
  971. state->tv.saturation = val;
  972. } else if (property == config->tv_hue_property) {
  973. state->tv.hue = val;
  974. } else if (property == config->link_status_property) {
  975. /* Never downgrade from GOOD to BAD on userspace's request here,
  976. * only hw issues can do that.
  977. *
  978. * For an atomic property the userspace doesn't need to be able
  979. * to understand all the properties, but needs to be able to
  980. * restore the state it wants on VT switch. So if the userspace
  981. * tries to change the link_status from GOOD to BAD, driver
  982. * silently rejects it and returns a 0. This prevents userspace
  983. * from accidently breaking the display when it restores the
  984. * state.
  985. */
  986. if (state->link_status != DRM_LINK_STATUS_GOOD)
  987. state->link_status = val;
  988. } else if (connector->funcs->atomic_set_property) {
  989. return connector->funcs->atomic_set_property(connector,
  990. state, property, val);
  991. } else {
  992. return -EINVAL;
  993. }
  994. return 0;
  995. }
  996. EXPORT_SYMBOL(drm_atomic_connector_set_property);
  997. static void drm_atomic_connector_print_state(struct drm_printer *p,
  998. const struct drm_connector_state *state)
  999. {
  1000. struct drm_connector *connector = state->connector;
  1001. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  1002. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  1003. if (connector->funcs->atomic_print_state)
  1004. connector->funcs->atomic_print_state(p, state);
  1005. }
  1006. /**
  1007. * drm_atomic_connector_get_property - get property value from connector state
  1008. * @connector: the drm connector to set a property on
  1009. * @state: the state object to get the property value from
  1010. * @property: the property to set
  1011. * @val: return location for the property value
  1012. *
  1013. * This function handles generic/core properties and calls out to driver's
  1014. * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
  1015. * consistent behavior you must call this function rather than the driver hook
  1016. * directly.
  1017. *
  1018. * RETURNS:
  1019. * Zero on success, error code on failure
  1020. */
  1021. static int
  1022. drm_atomic_connector_get_property(struct drm_connector *connector,
  1023. const struct drm_connector_state *state,
  1024. struct drm_property *property, uint64_t *val)
  1025. {
  1026. struct drm_device *dev = connector->dev;
  1027. struct drm_mode_config *config = &dev->mode_config;
  1028. if (property == config->prop_crtc_id) {
  1029. *val = (state->crtc) ? state->crtc->base.id : 0;
  1030. } else if (property == config->dpms_property) {
  1031. *val = connector->dpms;
  1032. } else if (property == config->tv_select_subconnector_property) {
  1033. *val = state->tv.subconnector;
  1034. } else if (property == config->tv_left_margin_property) {
  1035. *val = state->tv.margins.left;
  1036. } else if (property == config->tv_right_margin_property) {
  1037. *val = state->tv.margins.right;
  1038. } else if (property == config->tv_top_margin_property) {
  1039. *val = state->tv.margins.top;
  1040. } else if (property == config->tv_bottom_margin_property) {
  1041. *val = state->tv.margins.bottom;
  1042. } else if (property == config->tv_mode_property) {
  1043. *val = state->tv.mode;
  1044. } else if (property == config->tv_brightness_property) {
  1045. *val = state->tv.brightness;
  1046. } else if (property == config->tv_contrast_property) {
  1047. *val = state->tv.contrast;
  1048. } else if (property == config->tv_flicker_reduction_property) {
  1049. *val = state->tv.flicker_reduction;
  1050. } else if (property == config->tv_overscan_property) {
  1051. *val = state->tv.overscan;
  1052. } else if (property == config->tv_saturation_property) {
  1053. *val = state->tv.saturation;
  1054. } else if (property == config->tv_hue_property) {
  1055. *val = state->tv.hue;
  1056. } else if (property == config->link_status_property) {
  1057. *val = state->link_status;
  1058. } else if (connector->funcs->atomic_get_property) {
  1059. return connector->funcs->atomic_get_property(connector,
  1060. state, property, val);
  1061. } else {
  1062. return -EINVAL;
  1063. }
  1064. return 0;
  1065. }
  1066. int drm_atomic_get_property(struct drm_mode_object *obj,
  1067. struct drm_property *property, uint64_t *val)
  1068. {
  1069. struct drm_device *dev = property->dev;
  1070. int ret;
  1071. switch (obj->type) {
  1072. case DRM_MODE_OBJECT_CONNECTOR: {
  1073. struct drm_connector *connector = obj_to_connector(obj);
  1074. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  1075. ret = drm_atomic_connector_get_property(connector,
  1076. connector->state, property, val);
  1077. break;
  1078. }
  1079. case DRM_MODE_OBJECT_CRTC: {
  1080. struct drm_crtc *crtc = obj_to_crtc(obj);
  1081. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  1082. ret = drm_atomic_crtc_get_property(crtc,
  1083. crtc->state, property, val);
  1084. break;
  1085. }
  1086. case DRM_MODE_OBJECT_PLANE: {
  1087. struct drm_plane *plane = obj_to_plane(obj);
  1088. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  1089. ret = drm_atomic_plane_get_property(plane,
  1090. plane->state, property, val);
  1091. break;
  1092. }
  1093. default:
  1094. ret = -EINVAL;
  1095. break;
  1096. }
  1097. return ret;
  1098. }
  1099. /**
  1100. * drm_atomic_set_crtc_for_plane - set crtc for plane
  1101. * @plane_state: the plane whose incoming state to update
  1102. * @crtc: crtc to use for the plane
  1103. *
  1104. * Changing the assigned crtc for a plane requires us to grab the lock and state
  1105. * for the new crtc, as needed. This function takes care of all these details
  1106. * besides updating the pointer in the state object itself.
  1107. *
  1108. * Returns:
  1109. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1110. * then the w/w mutex code has detected a deadlock and the entire atomic
  1111. * sequence must be restarted. All other errors are fatal.
  1112. */
  1113. int
  1114. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  1115. struct drm_crtc *crtc)
  1116. {
  1117. struct drm_plane *plane = plane_state->plane;
  1118. struct drm_crtc_state *crtc_state;
  1119. if (plane_state->crtc) {
  1120. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1121. plane_state->crtc);
  1122. if (WARN_ON(IS_ERR(crtc_state)))
  1123. return PTR_ERR(crtc_state);
  1124. crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
  1125. }
  1126. plane_state->crtc = crtc;
  1127. if (crtc) {
  1128. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1129. crtc);
  1130. if (IS_ERR(crtc_state))
  1131. return PTR_ERR(crtc_state);
  1132. crtc_state->plane_mask |= (1 << drm_plane_index(plane));
  1133. }
  1134. if (crtc)
  1135. DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
  1136. plane_state, crtc->base.id, crtc->name);
  1137. else
  1138. DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
  1139. plane_state);
  1140. return 0;
  1141. }
  1142. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  1143. /**
  1144. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  1145. * @plane_state: atomic state object for the plane
  1146. * @fb: fb to use for the plane
  1147. *
  1148. * Changing the assigned framebuffer for a plane requires us to grab a reference
  1149. * to the new fb and drop the reference to the old fb, if there is one. This
  1150. * function takes care of all these details besides updating the pointer in the
  1151. * state object itself.
  1152. */
  1153. void
  1154. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1155. struct drm_framebuffer *fb)
  1156. {
  1157. if (fb)
  1158. DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
  1159. fb->base.id, plane_state);
  1160. else
  1161. DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
  1162. plane_state);
  1163. drm_framebuffer_assign(&plane_state->fb, fb);
  1164. }
  1165. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1166. /**
  1167. * drm_atomic_set_fence_for_plane - set fence for plane
  1168. * @plane_state: atomic state object for the plane
  1169. * @fence: dma_fence to use for the plane
  1170. *
  1171. * Helper to setup the plane_state fence in case it is not set yet.
  1172. * By using this drivers doesn't need to worry if the user choose
  1173. * implicit or explicit fencing.
  1174. *
  1175. * This function will not set the fence to the state if it was set
  1176. * via explicit fencing interfaces on the atomic ioctl. In that case it will
  1177. * drop the reference to the fence as we are not storing it anywhere.
  1178. * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  1179. * with the received implicit fence. In both cases this function consumes a
  1180. * reference for @fence.
  1181. */
  1182. void
  1183. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  1184. struct dma_fence *fence)
  1185. {
  1186. if (plane_state->fence) {
  1187. dma_fence_put(fence);
  1188. return;
  1189. }
  1190. plane_state->fence = fence;
  1191. }
  1192. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  1193. /**
  1194. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1195. * @conn_state: atomic state object for the connector
  1196. * @crtc: crtc to use for the connector
  1197. *
  1198. * Changing the assigned crtc for a connector requires us to grab the lock and
  1199. * state for the new crtc, as needed. This function takes care of all these
  1200. * details besides updating the pointer in the state object itself.
  1201. *
  1202. * Returns:
  1203. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1204. * then the w/w mutex code has detected a deadlock and the entire atomic
  1205. * sequence must be restarted. All other errors are fatal.
  1206. */
  1207. int
  1208. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1209. struct drm_crtc *crtc)
  1210. {
  1211. struct drm_crtc_state *crtc_state;
  1212. if (conn_state->crtc == crtc)
  1213. return 0;
  1214. if (conn_state->crtc) {
  1215. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  1216. conn_state->crtc);
  1217. crtc_state->connector_mask &=
  1218. ~(1 << drm_connector_index(conn_state->connector));
  1219. drm_connector_put(conn_state->connector);
  1220. conn_state->crtc = NULL;
  1221. }
  1222. if (crtc) {
  1223. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1224. if (IS_ERR(crtc_state))
  1225. return PTR_ERR(crtc_state);
  1226. crtc_state->connector_mask |=
  1227. 1 << drm_connector_index(conn_state->connector);
  1228. drm_connector_get(conn_state->connector);
  1229. conn_state->crtc = crtc;
  1230. DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
  1231. conn_state, crtc->base.id, crtc->name);
  1232. } else {
  1233. DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
  1234. conn_state);
  1235. }
  1236. return 0;
  1237. }
  1238. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1239. /**
  1240. * drm_atomic_add_affected_connectors - add connectors for crtc
  1241. * @state: atomic state
  1242. * @crtc: DRM crtc
  1243. *
  1244. * This function walks the current configuration and adds all connectors
  1245. * currently using @crtc to the atomic configuration @state. Note that this
  1246. * function must acquire the connection mutex. This can potentially cause
  1247. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1248. * drivers and helpers should only call this when really needed (e.g. when a
  1249. * full modeset needs to happen due to some change).
  1250. *
  1251. * Returns:
  1252. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1253. * then the w/w mutex code has detected a deadlock and the entire atomic
  1254. * sequence must be restarted. All other errors are fatal.
  1255. */
  1256. int
  1257. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1258. struct drm_crtc *crtc)
  1259. {
  1260. struct drm_mode_config *config = &state->dev->mode_config;
  1261. struct drm_connector *connector;
  1262. struct drm_connector_state *conn_state;
  1263. struct drm_connector_list_iter conn_iter;
  1264. struct drm_crtc_state *crtc_state;
  1265. int ret;
  1266. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1267. if (IS_ERR(crtc_state))
  1268. return PTR_ERR(crtc_state);
  1269. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1270. if (ret)
  1271. return ret;
  1272. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1273. crtc->base.id, crtc->name, state);
  1274. /*
  1275. * Changed connectors are already in @state, so only need to look
  1276. * at the connector_mask in crtc_state.
  1277. */
  1278. drm_connector_list_iter_begin(state->dev, &conn_iter);
  1279. drm_for_each_connector_iter(connector, &conn_iter) {
  1280. if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
  1281. continue;
  1282. conn_state = drm_atomic_get_connector_state(state, connector);
  1283. if (IS_ERR(conn_state)) {
  1284. drm_connector_list_iter_end(&conn_iter);
  1285. return PTR_ERR(conn_state);
  1286. }
  1287. }
  1288. drm_connector_list_iter_end(&conn_iter);
  1289. return 0;
  1290. }
  1291. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1292. /**
  1293. * drm_atomic_add_affected_planes - add planes for crtc
  1294. * @state: atomic state
  1295. * @crtc: DRM crtc
  1296. *
  1297. * This function walks the current configuration and adds all planes
  1298. * currently used by @crtc to the atomic configuration @state. This is useful
  1299. * when an atomic commit also needs to check all currently enabled plane on
  1300. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1301. * to avoid special code to force-enable all planes.
  1302. *
  1303. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1304. * current CRTC for that plane (if there is any) adding all the plane states for
  1305. * a CRTC will not reduce parallism of atomic updates.
  1306. *
  1307. * Returns:
  1308. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1309. * then the w/w mutex code has detected a deadlock and the entire atomic
  1310. * sequence must be restarted. All other errors are fatal.
  1311. */
  1312. int
  1313. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1314. struct drm_crtc *crtc)
  1315. {
  1316. struct drm_plane *plane;
  1317. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  1318. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1319. struct drm_plane_state *plane_state =
  1320. drm_atomic_get_plane_state(state, plane);
  1321. if (IS_ERR(plane_state))
  1322. return PTR_ERR(plane_state);
  1323. }
  1324. return 0;
  1325. }
  1326. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1327. /**
  1328. * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
  1329. * @state: atomic state
  1330. *
  1331. * This function should be used by legacy entry points which don't understand
  1332. * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
  1333. * the slowpath completed.
  1334. */
  1335. void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
  1336. {
  1337. struct drm_device *dev = state->dev;
  1338. int ret;
  1339. bool global = false;
  1340. if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
  1341. global = true;
  1342. dev->mode_config.acquire_ctx = NULL;
  1343. }
  1344. retry:
  1345. drm_modeset_backoff(state->acquire_ctx);
  1346. ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
  1347. if (ret)
  1348. goto retry;
  1349. if (global)
  1350. dev->mode_config.acquire_ctx = state->acquire_ctx;
  1351. }
  1352. EXPORT_SYMBOL(drm_atomic_legacy_backoff);
  1353. /**
  1354. * drm_atomic_check_only - check whether a given config would work
  1355. * @state: atomic configuration to check
  1356. *
  1357. * Note that this function can return -EDEADLK if the driver needed to acquire
  1358. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1359. * backoff dance and restart. All other errors are fatal.
  1360. *
  1361. * Returns:
  1362. * 0 on success, negative error code on failure.
  1363. */
  1364. int drm_atomic_check_only(struct drm_atomic_state *state)
  1365. {
  1366. struct drm_device *dev = state->dev;
  1367. struct drm_mode_config *config = &dev->mode_config;
  1368. struct drm_plane *plane;
  1369. struct drm_plane_state *plane_state;
  1370. struct drm_crtc *crtc;
  1371. struct drm_crtc_state *crtc_state;
  1372. int i, ret = 0;
  1373. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1374. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1375. ret = drm_atomic_plane_check(plane, plane_state);
  1376. if (ret) {
  1377. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1378. plane->base.id, plane->name);
  1379. return ret;
  1380. }
  1381. }
  1382. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1383. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1384. if (ret) {
  1385. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1386. crtc->base.id, crtc->name);
  1387. return ret;
  1388. }
  1389. }
  1390. if (config->funcs->atomic_check)
  1391. ret = config->funcs->atomic_check(state->dev, state);
  1392. if (!state->allow_modeset) {
  1393. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1394. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1395. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1396. crtc->base.id, crtc->name);
  1397. return -EINVAL;
  1398. }
  1399. }
  1400. }
  1401. return ret;
  1402. }
  1403. EXPORT_SYMBOL(drm_atomic_check_only);
  1404. /**
  1405. * drm_atomic_commit - commit configuration atomically
  1406. * @state: atomic configuration to check
  1407. *
  1408. * Note that this function can return -EDEADLK if the driver needed to acquire
  1409. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1410. * backoff dance and restart. All other errors are fatal.
  1411. *
  1412. * This function will take its own reference on @state.
  1413. * Callers should always release their reference with drm_atomic_state_put().
  1414. *
  1415. * Returns:
  1416. * 0 on success, negative error code on failure.
  1417. */
  1418. int drm_atomic_commit(struct drm_atomic_state *state)
  1419. {
  1420. struct drm_mode_config *config = &state->dev->mode_config;
  1421. int ret;
  1422. ret = drm_atomic_check_only(state);
  1423. if (ret)
  1424. return ret;
  1425. DRM_DEBUG_ATOMIC("commiting %p\n", state);
  1426. return config->funcs->atomic_commit(state->dev, state, false);
  1427. }
  1428. EXPORT_SYMBOL(drm_atomic_commit);
  1429. /**
  1430. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  1431. * @state: atomic configuration to check
  1432. *
  1433. * Note that this function can return -EDEADLK if the driver needed to acquire
  1434. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1435. * backoff dance and restart. All other errors are fatal.
  1436. *
  1437. * This function will take its own reference on @state.
  1438. * Callers should always release their reference with drm_atomic_state_put().
  1439. *
  1440. * Returns:
  1441. * 0 on success, negative error code on failure.
  1442. */
  1443. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1444. {
  1445. struct drm_mode_config *config = &state->dev->mode_config;
  1446. int ret;
  1447. ret = drm_atomic_check_only(state);
  1448. if (ret)
  1449. return ret;
  1450. DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
  1451. return config->funcs->atomic_commit(state->dev, state, true);
  1452. }
  1453. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1454. static void drm_atomic_print_state(const struct drm_atomic_state *state)
  1455. {
  1456. struct drm_printer p = drm_info_printer(state->dev->dev);
  1457. struct drm_plane *plane;
  1458. struct drm_plane_state *plane_state;
  1459. struct drm_crtc *crtc;
  1460. struct drm_crtc_state *crtc_state;
  1461. struct drm_connector *connector;
  1462. struct drm_connector_state *connector_state;
  1463. int i;
  1464. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1465. for_each_new_plane_in_state(state, plane, plane_state, i)
  1466. drm_atomic_plane_print_state(&p, plane_state);
  1467. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  1468. drm_atomic_crtc_print_state(&p, crtc_state);
  1469. for_each_new_connector_in_state(state, connector, connector_state, i)
  1470. drm_atomic_connector_print_state(&p, connector_state);
  1471. }
  1472. /**
  1473. * drm_state_dump - dump entire device atomic state
  1474. * @dev: the drm device
  1475. * @p: where to print the state to
  1476. *
  1477. * Just for debugging. Drivers might want an option to dump state
  1478. * to dmesg in case of error irq's. (Hint, you probably want to
  1479. * ratelimit this!)
  1480. *
  1481. * The caller must drm_modeset_lock_all(), or if this is called
  1482. * from error irq handler, it should not be enabled by default.
  1483. * (Ie. if you are debugging errors you might not care that this
  1484. * is racey. But calling this without all modeset locks held is
  1485. * not inherently safe.)
  1486. */
  1487. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1488. {
  1489. struct drm_mode_config *config = &dev->mode_config;
  1490. struct drm_plane *plane;
  1491. struct drm_crtc *crtc;
  1492. struct drm_connector *connector;
  1493. struct drm_connector_list_iter conn_iter;
  1494. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1495. return;
  1496. list_for_each_entry(plane, &config->plane_list, head)
  1497. drm_atomic_plane_print_state(p, plane->state);
  1498. list_for_each_entry(crtc, &config->crtc_list, head)
  1499. drm_atomic_crtc_print_state(p, crtc->state);
  1500. drm_connector_list_iter_begin(dev, &conn_iter);
  1501. drm_for_each_connector_iter(connector, &conn_iter)
  1502. drm_atomic_connector_print_state(p, connector->state);
  1503. drm_connector_list_iter_end(&conn_iter);
  1504. }
  1505. EXPORT_SYMBOL(drm_state_dump);
  1506. #ifdef CONFIG_DEBUG_FS
  1507. static int drm_state_info(struct seq_file *m, void *data)
  1508. {
  1509. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1510. struct drm_device *dev = node->minor->dev;
  1511. struct drm_printer p = drm_seq_file_printer(m);
  1512. drm_modeset_lock_all(dev);
  1513. drm_state_dump(dev, &p);
  1514. drm_modeset_unlock_all(dev);
  1515. return 0;
  1516. }
  1517. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1518. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1519. {"state", drm_state_info, 0},
  1520. };
  1521. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1522. {
  1523. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1524. ARRAY_SIZE(drm_atomic_debugfs_list),
  1525. minor->debugfs_root, minor);
  1526. }
  1527. #endif
  1528. /*
  1529. * The big monstor ioctl
  1530. */
  1531. static struct drm_pending_vblank_event *create_vblank_event(
  1532. struct drm_device *dev, uint64_t user_data)
  1533. {
  1534. struct drm_pending_vblank_event *e = NULL;
  1535. e = kzalloc(sizeof *e, GFP_KERNEL);
  1536. if (!e)
  1537. return NULL;
  1538. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1539. e->event.base.length = sizeof(e->event);
  1540. e->event.user_data = user_data;
  1541. return e;
  1542. }
  1543. static int atomic_set_prop(struct drm_atomic_state *state,
  1544. struct drm_mode_object *obj, struct drm_property *prop,
  1545. uint64_t prop_value)
  1546. {
  1547. struct drm_mode_object *ref;
  1548. int ret;
  1549. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1550. return -EINVAL;
  1551. switch (obj->type) {
  1552. case DRM_MODE_OBJECT_CONNECTOR: {
  1553. struct drm_connector *connector = obj_to_connector(obj);
  1554. struct drm_connector_state *connector_state;
  1555. connector_state = drm_atomic_get_connector_state(state, connector);
  1556. if (IS_ERR(connector_state)) {
  1557. ret = PTR_ERR(connector_state);
  1558. break;
  1559. }
  1560. ret = drm_atomic_connector_set_property(connector,
  1561. connector_state, prop, prop_value);
  1562. break;
  1563. }
  1564. case DRM_MODE_OBJECT_CRTC: {
  1565. struct drm_crtc *crtc = obj_to_crtc(obj);
  1566. struct drm_crtc_state *crtc_state;
  1567. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1568. if (IS_ERR(crtc_state)) {
  1569. ret = PTR_ERR(crtc_state);
  1570. break;
  1571. }
  1572. ret = drm_atomic_crtc_set_property(crtc,
  1573. crtc_state, prop, prop_value);
  1574. break;
  1575. }
  1576. case DRM_MODE_OBJECT_PLANE: {
  1577. struct drm_plane *plane = obj_to_plane(obj);
  1578. struct drm_plane_state *plane_state;
  1579. plane_state = drm_atomic_get_plane_state(state, plane);
  1580. if (IS_ERR(plane_state)) {
  1581. ret = PTR_ERR(plane_state);
  1582. break;
  1583. }
  1584. ret = drm_atomic_plane_set_property(plane,
  1585. plane_state, prop, prop_value);
  1586. break;
  1587. }
  1588. default:
  1589. ret = -EINVAL;
  1590. break;
  1591. }
  1592. drm_property_change_valid_put(prop, ref);
  1593. return ret;
  1594. }
  1595. /**
  1596. * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
  1597. *
  1598. * @dev: drm device to check.
  1599. * @plane_mask: plane mask for planes that were updated.
  1600. * @ret: return value, can be -EDEADLK for a retry.
  1601. *
  1602. * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
  1603. * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
  1604. * is a common operation for each atomic update, so this call is split off as a
  1605. * helper.
  1606. */
  1607. void drm_atomic_clean_old_fb(struct drm_device *dev,
  1608. unsigned plane_mask,
  1609. int ret)
  1610. {
  1611. struct drm_plane *plane;
  1612. /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
  1613. * locks (ie. while it is still safe to deref plane->state). We
  1614. * need to do this here because the driver entry points cannot
  1615. * distinguish between legacy and atomic ioctls.
  1616. */
  1617. drm_for_each_plane_mask(plane, dev, plane_mask) {
  1618. if (ret == 0) {
  1619. struct drm_framebuffer *new_fb = plane->state->fb;
  1620. if (new_fb)
  1621. drm_framebuffer_get(new_fb);
  1622. plane->fb = new_fb;
  1623. plane->crtc = plane->state->crtc;
  1624. if (plane->old_fb)
  1625. drm_framebuffer_put(plane->old_fb);
  1626. }
  1627. plane->old_fb = NULL;
  1628. }
  1629. }
  1630. EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  1631. /**
  1632. * DOC: explicit fencing properties
  1633. *
  1634. * Explicit fencing allows userspace to control the buffer synchronization
  1635. * between devices. A Fence or a group of fences are transfered to/from
  1636. * userspace using Sync File fds and there are two DRM properties for that.
  1637. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  1638. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  1639. *
  1640. * As a contrast, with implicit fencing the kernel keeps track of any
  1641. * ongoing rendering, and automatically ensures that the atomic update waits
  1642. * for any pending rendering to complete. For shared buffers represented with
  1643. * a &struct dma_buf this is tracked in &struct reservation_object.
  1644. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  1645. * whereas explicit fencing is what Android wants.
  1646. *
  1647. * "IN_FENCE_FD”:
  1648. * Use this property to pass a fence that DRM should wait on before
  1649. * proceeding with the Atomic Commit request and show the framebuffer for
  1650. * the plane on the screen. The fence can be either a normal fence or a
  1651. * merged one, the sync_file framework will handle both cases and use a
  1652. * fence_array if a merged fence is received. Passing -1 here means no
  1653. * fences to wait on.
  1654. *
  1655. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  1656. * it will only check if the Sync File is a valid one.
  1657. *
  1658. * On the driver side the fence is stored on the @fence parameter of
  1659. * &struct drm_plane_state. Drivers which also support implicit fencing
  1660. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  1661. * to make sure there's consistent behaviour between drivers in precedence
  1662. * of implicit vs. explicit fencing.
  1663. *
  1664. * "OUT_FENCE_PTR”:
  1665. * Use this property to pass a file descriptor pointer to DRM. Once the
  1666. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  1667. * the file descriptor number of a Sync File. This Sync File contains the
  1668. * CRTC fence that will be signaled when all framebuffers present on the
  1669. * Atomic Commit * request for that given CRTC are scanned out on the
  1670. * screen.
  1671. *
  1672. * The Atomic Commit request fails if a invalid pointer is passed. If the
  1673. * Atomic Commit request fails for any other reason the out fence fd
  1674. * returned will be -1. On a Atomic Commit with the
  1675. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  1676. *
  1677. * Note that out-fences don't have a special interface to drivers and are
  1678. * internally represented by a &struct drm_pending_vblank_event in struct
  1679. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  1680. * helpers and for the DRM event handling for existing userspace.
  1681. */
  1682. struct drm_out_fence_state {
  1683. s32 __user *out_fence_ptr;
  1684. struct sync_file *sync_file;
  1685. int fd;
  1686. };
  1687. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  1688. struct dma_fence *fence)
  1689. {
  1690. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  1691. if (fence_state->fd < 0)
  1692. return fence_state->fd;
  1693. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  1694. return -EFAULT;
  1695. fence_state->sync_file = sync_file_create(fence);
  1696. if (!fence_state->sync_file)
  1697. return -ENOMEM;
  1698. return 0;
  1699. }
  1700. static int prepare_crtc_signaling(struct drm_device *dev,
  1701. struct drm_atomic_state *state,
  1702. struct drm_mode_atomic *arg,
  1703. struct drm_file *file_priv,
  1704. struct drm_out_fence_state **fence_state,
  1705. unsigned int *num_fences)
  1706. {
  1707. struct drm_crtc *crtc;
  1708. struct drm_crtc_state *crtc_state;
  1709. int i, ret;
  1710. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  1711. return 0;
  1712. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1713. s32 __user *fence_ptr;
  1714. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  1715. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  1716. struct drm_pending_vblank_event *e;
  1717. e = create_vblank_event(dev, arg->user_data);
  1718. if (!e)
  1719. return -ENOMEM;
  1720. crtc_state->event = e;
  1721. }
  1722. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1723. struct drm_pending_vblank_event *e = crtc_state->event;
  1724. if (!file_priv)
  1725. continue;
  1726. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  1727. &e->event.base);
  1728. if (ret) {
  1729. kfree(e);
  1730. crtc_state->event = NULL;
  1731. return ret;
  1732. }
  1733. }
  1734. if (fence_ptr) {
  1735. struct dma_fence *fence;
  1736. struct drm_out_fence_state *f;
  1737. f = krealloc(*fence_state, sizeof(**fence_state) *
  1738. (*num_fences + 1), GFP_KERNEL);
  1739. if (!f)
  1740. return -ENOMEM;
  1741. memset(&f[*num_fences], 0, sizeof(*f));
  1742. f[*num_fences].out_fence_ptr = fence_ptr;
  1743. *fence_state = f;
  1744. fence = drm_crtc_create_fence(crtc);
  1745. if (!fence)
  1746. return -ENOMEM;
  1747. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1748. if (ret) {
  1749. dma_fence_put(fence);
  1750. return ret;
  1751. }
  1752. crtc_state->event->base.fence = fence;
  1753. }
  1754. }
  1755. return 0;
  1756. }
  1757. static void complete_crtc_signaling(struct drm_device *dev,
  1758. struct drm_atomic_state *state,
  1759. struct drm_out_fence_state *fence_state,
  1760. unsigned int num_fences,
  1761. bool install_fds)
  1762. {
  1763. struct drm_crtc *crtc;
  1764. struct drm_crtc_state *crtc_state;
  1765. int i;
  1766. if (install_fds) {
  1767. for (i = 0; i < num_fences; i++)
  1768. fd_install(fence_state[i].fd,
  1769. fence_state[i].sync_file->file);
  1770. kfree(fence_state);
  1771. return;
  1772. }
  1773. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1774. struct drm_pending_vblank_event *event = crtc_state->event;
  1775. /*
  1776. * Free the allocated event. drm_atomic_helper_setup_commit
  1777. * can allocate an event too, so only free it if it's ours
  1778. * to prevent a double free in drm_atomic_state_clear.
  1779. */
  1780. if (event && (event->base.fence || event->base.file_priv)) {
  1781. drm_event_cancel_free(dev, &event->base);
  1782. crtc_state->event = NULL;
  1783. }
  1784. }
  1785. if (!fence_state)
  1786. return;
  1787. for (i = 0; i < num_fences; i++) {
  1788. if (fence_state[i].sync_file)
  1789. fput(fence_state[i].sync_file->file);
  1790. if (fence_state[i].fd >= 0)
  1791. put_unused_fd(fence_state[i].fd);
  1792. /* If this fails log error to the user */
  1793. if (fence_state[i].out_fence_ptr &&
  1794. put_user(-1, fence_state[i].out_fence_ptr))
  1795. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  1796. }
  1797. kfree(fence_state);
  1798. }
  1799. int drm_atomic_remove_fb(struct drm_framebuffer *fb)
  1800. {
  1801. struct drm_modeset_acquire_ctx ctx;
  1802. struct drm_device *dev = fb->dev;
  1803. struct drm_atomic_state *state;
  1804. struct drm_plane *plane;
  1805. struct drm_connector *conn;
  1806. struct drm_connector_state *conn_state;
  1807. int i, ret = 0;
  1808. unsigned plane_mask;
  1809. state = drm_atomic_state_alloc(dev);
  1810. if (!state)
  1811. return -ENOMEM;
  1812. drm_modeset_acquire_init(&ctx, 0);
  1813. state->acquire_ctx = &ctx;
  1814. retry:
  1815. plane_mask = 0;
  1816. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  1817. if (ret)
  1818. goto unlock;
  1819. drm_for_each_plane(plane, dev) {
  1820. struct drm_plane_state *plane_state;
  1821. if (plane->state->fb != fb)
  1822. continue;
  1823. plane_state = drm_atomic_get_plane_state(state, plane);
  1824. if (IS_ERR(plane_state)) {
  1825. ret = PTR_ERR(plane_state);
  1826. goto unlock;
  1827. }
  1828. if (plane_state->crtc->primary == plane) {
  1829. struct drm_crtc_state *crtc_state;
  1830. crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
  1831. ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
  1832. if (ret)
  1833. goto unlock;
  1834. crtc_state->active = false;
  1835. ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
  1836. if (ret)
  1837. goto unlock;
  1838. }
  1839. drm_atomic_set_fb_for_plane(plane_state, NULL);
  1840. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  1841. if (ret)
  1842. goto unlock;
  1843. plane_mask |= BIT(drm_plane_index(plane));
  1844. plane->old_fb = plane->fb;
  1845. }
  1846. for_each_connector_in_state(state, conn, conn_state, i) {
  1847. ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
  1848. if (ret)
  1849. goto unlock;
  1850. }
  1851. if (plane_mask)
  1852. ret = drm_atomic_commit(state);
  1853. unlock:
  1854. if (plane_mask)
  1855. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  1856. if (ret == -EDEADLK) {
  1857. drm_modeset_backoff(&ctx);
  1858. goto retry;
  1859. }
  1860. drm_atomic_state_put(state);
  1861. drm_modeset_drop_locks(&ctx);
  1862. drm_modeset_acquire_fini(&ctx);
  1863. return ret;
  1864. }
  1865. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1866. void *data, struct drm_file *file_priv)
  1867. {
  1868. struct drm_mode_atomic *arg = data;
  1869. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1870. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1871. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1872. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1873. unsigned int copied_objs, copied_props;
  1874. struct drm_atomic_state *state;
  1875. struct drm_modeset_acquire_ctx ctx;
  1876. struct drm_plane *plane;
  1877. struct drm_out_fence_state *fence_state = NULL;
  1878. unsigned plane_mask;
  1879. int ret = 0;
  1880. unsigned int i, j, num_fences = 0;
  1881. /* disallow for drivers not supporting atomic: */
  1882. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1883. return -EINVAL;
  1884. /* disallow for userspace that has not enabled atomic cap (even
  1885. * though this may be a bit overkill, since legacy userspace
  1886. * wouldn't know how to call this ioctl)
  1887. */
  1888. if (!file_priv->atomic)
  1889. return -EINVAL;
  1890. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  1891. return -EINVAL;
  1892. if (arg->reserved)
  1893. return -EINVAL;
  1894. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  1895. !dev->mode_config.async_page_flip)
  1896. return -EINVAL;
  1897. /* can't test and expect an event at the same time. */
  1898. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1899. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1900. return -EINVAL;
  1901. drm_modeset_acquire_init(&ctx, 0);
  1902. state = drm_atomic_state_alloc(dev);
  1903. if (!state)
  1904. return -ENOMEM;
  1905. state->acquire_ctx = &ctx;
  1906. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1907. retry:
  1908. plane_mask = 0;
  1909. copied_objs = 0;
  1910. copied_props = 0;
  1911. for (i = 0; i < arg->count_objs; i++) {
  1912. uint32_t obj_id, count_props;
  1913. struct drm_mode_object *obj;
  1914. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1915. ret = -EFAULT;
  1916. goto out;
  1917. }
  1918. obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
  1919. if (!obj) {
  1920. ret = -ENOENT;
  1921. goto out;
  1922. }
  1923. if (!obj->properties) {
  1924. drm_mode_object_put(obj);
  1925. ret = -ENOENT;
  1926. goto out;
  1927. }
  1928. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1929. drm_mode_object_put(obj);
  1930. ret = -EFAULT;
  1931. goto out;
  1932. }
  1933. copied_objs++;
  1934. for (j = 0; j < count_props; j++) {
  1935. uint32_t prop_id;
  1936. uint64_t prop_value;
  1937. struct drm_property *prop;
  1938. if (get_user(prop_id, props_ptr + copied_props)) {
  1939. drm_mode_object_put(obj);
  1940. ret = -EFAULT;
  1941. goto out;
  1942. }
  1943. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1944. if (!prop) {
  1945. drm_mode_object_put(obj);
  1946. ret = -ENOENT;
  1947. goto out;
  1948. }
  1949. if (copy_from_user(&prop_value,
  1950. prop_values_ptr + copied_props,
  1951. sizeof(prop_value))) {
  1952. drm_mode_object_put(obj);
  1953. ret = -EFAULT;
  1954. goto out;
  1955. }
  1956. ret = atomic_set_prop(state, obj, prop, prop_value);
  1957. if (ret) {
  1958. drm_mode_object_put(obj);
  1959. goto out;
  1960. }
  1961. copied_props++;
  1962. }
  1963. if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
  1964. !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
  1965. plane = obj_to_plane(obj);
  1966. plane_mask |= (1 << drm_plane_index(plane));
  1967. plane->old_fb = plane->fb;
  1968. }
  1969. drm_mode_object_put(obj);
  1970. }
  1971. ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
  1972. &num_fences);
  1973. if (ret)
  1974. goto out;
  1975. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  1976. ret = drm_atomic_check_only(state);
  1977. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  1978. ret = drm_atomic_nonblocking_commit(state);
  1979. } else {
  1980. if (unlikely(drm_debug & DRM_UT_STATE))
  1981. drm_atomic_print_state(state);
  1982. ret = drm_atomic_commit(state);
  1983. }
  1984. out:
  1985. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  1986. complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
  1987. if (ret == -EDEADLK) {
  1988. drm_atomic_state_clear(state);
  1989. drm_modeset_backoff(&ctx);
  1990. goto retry;
  1991. }
  1992. drm_atomic_state_put(state);
  1993. drm_modeset_drop_locks(&ctx);
  1994. drm_modeset_acquire_fini(&ctx);
  1995. return ret;
  1996. }