drm_atomic.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_print.h>
  31. #include <linux/sync_file.h>
  32. #include "drm_crtc_internal.h"
  33. void __drm_crtc_commit_free(struct kref *kref)
  34. {
  35. struct drm_crtc_commit *commit =
  36. container_of(kref, struct drm_crtc_commit, ref);
  37. kfree(commit);
  38. }
  39. EXPORT_SYMBOL(__drm_crtc_commit_free);
  40. /**
  41. * drm_atomic_state_default_release -
  42. * release memory initialized by drm_atomic_state_init
  43. * @state: atomic state
  44. *
  45. * Free all the memory allocated by drm_atomic_state_init.
  46. * This is useful for drivers that subclass the atomic state.
  47. */
  48. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  49. {
  50. kfree(state->connectors);
  51. kfree(state->crtcs);
  52. kfree(state->planes);
  53. kfree(state->private_objs);
  54. }
  55. EXPORT_SYMBOL(drm_atomic_state_default_release);
  56. /**
  57. * drm_atomic_state_init - init new atomic state
  58. * @dev: DRM device
  59. * @state: atomic state
  60. *
  61. * Default implementation for filling in a new atomic state.
  62. * This is useful for drivers that subclass the atomic state.
  63. */
  64. int
  65. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  66. {
  67. kref_init(&state->ref);
  68. /* TODO legacy paths should maybe do a better job about
  69. * setting this appropriately?
  70. */
  71. state->allow_modeset = true;
  72. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  73. sizeof(*state->crtcs), GFP_KERNEL);
  74. if (!state->crtcs)
  75. goto fail;
  76. state->planes = kcalloc(dev->mode_config.num_total_plane,
  77. sizeof(*state->planes), GFP_KERNEL);
  78. if (!state->planes)
  79. goto fail;
  80. state->dev = dev;
  81. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  82. return 0;
  83. fail:
  84. drm_atomic_state_default_release(state);
  85. return -ENOMEM;
  86. }
  87. EXPORT_SYMBOL(drm_atomic_state_init);
  88. /**
  89. * drm_atomic_state_alloc - allocate atomic state
  90. * @dev: DRM device
  91. *
  92. * This allocates an empty atomic state to track updates.
  93. */
  94. struct drm_atomic_state *
  95. drm_atomic_state_alloc(struct drm_device *dev)
  96. {
  97. struct drm_mode_config *config = &dev->mode_config;
  98. if (!config->funcs->atomic_state_alloc) {
  99. struct drm_atomic_state *state;
  100. state = kzalloc(sizeof(*state), GFP_KERNEL);
  101. if (!state)
  102. return NULL;
  103. if (drm_atomic_state_init(dev, state) < 0) {
  104. kfree(state);
  105. return NULL;
  106. }
  107. return state;
  108. }
  109. return config->funcs->atomic_state_alloc(dev);
  110. }
  111. EXPORT_SYMBOL(drm_atomic_state_alloc);
  112. /**
  113. * drm_atomic_state_default_clear - clear base atomic state
  114. * @state: atomic state
  115. *
  116. * Default implementation for clearing atomic state.
  117. * This is useful for drivers that subclass the atomic state.
  118. */
  119. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  120. {
  121. struct drm_device *dev = state->dev;
  122. struct drm_mode_config *config = &dev->mode_config;
  123. int i;
  124. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  125. for (i = 0; i < state->num_connector; i++) {
  126. struct drm_connector *connector = state->connectors[i].ptr;
  127. if (!connector)
  128. continue;
  129. connector->funcs->atomic_destroy_state(connector,
  130. state->connectors[i].state);
  131. state->connectors[i].ptr = NULL;
  132. state->connectors[i].state = NULL;
  133. drm_connector_put(connector);
  134. }
  135. for (i = 0; i < config->num_crtc; i++) {
  136. struct drm_crtc *crtc = state->crtcs[i].ptr;
  137. if (!crtc)
  138. continue;
  139. crtc->funcs->atomic_destroy_state(crtc,
  140. state->crtcs[i].state);
  141. state->crtcs[i].ptr = NULL;
  142. state->crtcs[i].state = NULL;
  143. }
  144. for (i = 0; i < config->num_total_plane; i++) {
  145. struct drm_plane *plane = state->planes[i].ptr;
  146. if (!plane)
  147. continue;
  148. plane->funcs->atomic_destroy_state(plane,
  149. state->planes[i].state);
  150. state->planes[i].ptr = NULL;
  151. state->planes[i].state = NULL;
  152. }
  153. for (i = 0; i < state->num_private_objs; i++) {
  154. struct drm_private_obj *obj = state->private_objs[i].ptr;
  155. if (!obj)
  156. continue;
  157. obj->funcs->atomic_destroy_state(obj,
  158. state->private_objs[i].state);
  159. state->private_objs[i].ptr = NULL;
  160. state->private_objs[i].state = NULL;
  161. }
  162. state->num_private_objs = 0;
  163. if (state->fake_commit) {
  164. drm_crtc_commit_put(state->fake_commit);
  165. state->fake_commit = NULL;
  166. }
  167. }
  168. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  169. /**
  170. * drm_atomic_state_clear - clear state object
  171. * @state: atomic state
  172. *
  173. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  174. * all locks. So someone else could sneak in and change the current modeset
  175. * configuration. Which means that all the state assembled in @state is no
  176. * longer an atomic update to the current state, but to some arbitrary earlier
  177. * state. Which could break assumptions the driver's
  178. * &drm_mode_config_funcs.atomic_check likely relies on.
  179. *
  180. * Hence we must clear all cached state and completely start over, using this
  181. * function.
  182. */
  183. void drm_atomic_state_clear(struct drm_atomic_state *state)
  184. {
  185. struct drm_device *dev = state->dev;
  186. struct drm_mode_config *config = &dev->mode_config;
  187. if (config->funcs->atomic_state_clear)
  188. config->funcs->atomic_state_clear(state);
  189. else
  190. drm_atomic_state_default_clear(state);
  191. }
  192. EXPORT_SYMBOL(drm_atomic_state_clear);
  193. /**
  194. * __drm_atomic_state_free - free all memory for an atomic state
  195. * @ref: This atomic state to deallocate
  196. *
  197. * This frees all memory associated with an atomic state, including all the
  198. * per-object state for planes, crtcs and connectors.
  199. */
  200. void __drm_atomic_state_free(struct kref *ref)
  201. {
  202. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  203. struct drm_mode_config *config = &state->dev->mode_config;
  204. drm_atomic_state_clear(state);
  205. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  206. if (config->funcs->atomic_state_free) {
  207. config->funcs->atomic_state_free(state);
  208. } else {
  209. drm_atomic_state_default_release(state);
  210. kfree(state);
  211. }
  212. }
  213. EXPORT_SYMBOL(__drm_atomic_state_free);
  214. /**
  215. * drm_atomic_get_crtc_state - get crtc state
  216. * @state: global atomic state object
  217. * @crtc: crtc to get state object for
  218. *
  219. * This function returns the crtc state for the given crtc, allocating it if
  220. * needed. It will also grab the relevant crtc lock to make sure that the state
  221. * is consistent.
  222. *
  223. * Returns:
  224. *
  225. * Either the allocated state or the error code encoded into the pointer. When
  226. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  227. * entire atomic sequence must be restarted. All other errors are fatal.
  228. */
  229. struct drm_crtc_state *
  230. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  231. struct drm_crtc *crtc)
  232. {
  233. int ret, index = drm_crtc_index(crtc);
  234. struct drm_crtc_state *crtc_state;
  235. WARN_ON(!state->acquire_ctx);
  236. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  237. if (crtc_state)
  238. return crtc_state;
  239. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  240. if (ret)
  241. return ERR_PTR(ret);
  242. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  243. if (!crtc_state)
  244. return ERR_PTR(-ENOMEM);
  245. state->crtcs[index].state = crtc_state;
  246. state->crtcs[index].old_state = crtc->state;
  247. state->crtcs[index].new_state = crtc_state;
  248. state->crtcs[index].ptr = crtc;
  249. crtc_state->state = state;
  250. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  251. crtc->base.id, crtc->name, crtc_state, state);
  252. return crtc_state;
  253. }
  254. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  255. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  256. struct drm_crtc *crtc, s32 __user *fence_ptr)
  257. {
  258. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  259. }
  260. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  261. struct drm_crtc *crtc)
  262. {
  263. s32 __user *fence_ptr;
  264. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  265. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  266. return fence_ptr;
  267. }
  268. /**
  269. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  270. * @state: the CRTC whose incoming state to update
  271. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  272. *
  273. * Set a mode (originating from the kernel) on the desired CRTC state and update
  274. * the enable property.
  275. *
  276. * RETURNS:
  277. * Zero on success, error code on failure. Cannot return -EDEADLK.
  278. */
  279. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  280. const struct drm_display_mode *mode)
  281. {
  282. struct drm_mode_modeinfo umode;
  283. /* Early return for no change. */
  284. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  285. return 0;
  286. drm_property_blob_put(state->mode_blob);
  287. state->mode_blob = NULL;
  288. if (mode) {
  289. drm_mode_convert_to_umode(&umode, mode);
  290. state->mode_blob =
  291. drm_property_create_blob(state->crtc->dev,
  292. sizeof(umode),
  293. &umode);
  294. if (IS_ERR(state->mode_blob))
  295. return PTR_ERR(state->mode_blob);
  296. drm_mode_copy(&state->mode, mode);
  297. state->enable = true;
  298. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  299. mode->name, state);
  300. } else {
  301. memset(&state->mode, 0, sizeof(state->mode));
  302. state->enable = false;
  303. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  304. state);
  305. }
  306. return 0;
  307. }
  308. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  309. /**
  310. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  311. * @state: the CRTC whose incoming state to update
  312. * @blob: pointer to blob property to use for mode
  313. *
  314. * Set a mode (originating from a blob property) on the desired CRTC state.
  315. * This function will take a reference on the blob property for the CRTC state,
  316. * and release the reference held on the state's existing mode property, if any
  317. * was set.
  318. *
  319. * RETURNS:
  320. * Zero on success, error code on failure. Cannot return -EDEADLK.
  321. */
  322. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  323. struct drm_property_blob *blob)
  324. {
  325. if (blob == state->mode_blob)
  326. return 0;
  327. drm_property_blob_put(state->mode_blob);
  328. state->mode_blob = NULL;
  329. memset(&state->mode, 0, sizeof(state->mode));
  330. if (blob) {
  331. if (blob->length != sizeof(struct drm_mode_modeinfo) ||
  332. drm_mode_convert_umode(&state->mode,
  333. (const struct drm_mode_modeinfo *)
  334. blob->data))
  335. return -EINVAL;
  336. state->mode_blob = drm_property_blob_get(blob);
  337. state->enable = true;
  338. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  339. state->mode.name, state);
  340. } else {
  341. state->enable = false;
  342. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  343. state);
  344. }
  345. return 0;
  346. }
  347. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  348. static int
  349. drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
  350. struct drm_property_blob **blob,
  351. uint64_t blob_id,
  352. ssize_t expected_size,
  353. bool *replaced)
  354. {
  355. struct drm_property_blob *new_blob = NULL;
  356. if (blob_id != 0) {
  357. new_blob = drm_property_lookup_blob(dev, blob_id);
  358. if (new_blob == NULL)
  359. return -EINVAL;
  360. if (expected_size > 0 && expected_size != new_blob->length) {
  361. drm_property_blob_put(new_blob);
  362. return -EINVAL;
  363. }
  364. }
  365. *replaced |= drm_property_replace_blob(blob, new_blob);
  366. drm_property_blob_put(new_blob);
  367. return 0;
  368. }
  369. /**
  370. * drm_atomic_crtc_set_property - set property on CRTC
  371. * @crtc: the drm CRTC to set a property on
  372. * @state: the state object to update with the new property value
  373. * @property: the property to set
  374. * @val: the new property value
  375. *
  376. * This function handles generic/core properties and calls out to driver's
  377. * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
  378. * consistent behavior you must call this function rather than the driver hook
  379. * directly.
  380. *
  381. * RETURNS:
  382. * Zero on success, error code on failure
  383. */
  384. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  385. struct drm_crtc_state *state, struct drm_property *property,
  386. uint64_t val)
  387. {
  388. struct drm_device *dev = crtc->dev;
  389. struct drm_mode_config *config = &dev->mode_config;
  390. bool replaced = false;
  391. int ret;
  392. if (property == config->prop_active)
  393. state->active = val;
  394. else if (property == config->prop_mode_id) {
  395. struct drm_property_blob *mode =
  396. drm_property_lookup_blob(dev, val);
  397. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  398. drm_property_blob_put(mode);
  399. return ret;
  400. } else if (property == config->degamma_lut_property) {
  401. ret = drm_atomic_replace_property_blob_from_id(dev,
  402. &state->degamma_lut,
  403. val,
  404. -1,
  405. &replaced);
  406. state->color_mgmt_changed |= replaced;
  407. return ret;
  408. } else if (property == config->ctm_property) {
  409. ret = drm_atomic_replace_property_blob_from_id(dev,
  410. &state->ctm,
  411. val,
  412. sizeof(struct drm_color_ctm),
  413. &replaced);
  414. state->color_mgmt_changed |= replaced;
  415. return ret;
  416. } else if (property == config->gamma_lut_property) {
  417. ret = drm_atomic_replace_property_blob_from_id(dev,
  418. &state->gamma_lut,
  419. val,
  420. -1,
  421. &replaced);
  422. state->color_mgmt_changed |= replaced;
  423. return ret;
  424. } else if (property == config->prop_out_fence_ptr) {
  425. s32 __user *fence_ptr = u64_to_user_ptr(val);
  426. if (!fence_ptr)
  427. return 0;
  428. if (put_user(-1, fence_ptr))
  429. return -EFAULT;
  430. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  431. } else if (crtc->funcs->atomic_set_property)
  432. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  433. else
  434. return -EINVAL;
  435. return 0;
  436. }
  437. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  438. /**
  439. * drm_atomic_crtc_get_property - get property value from CRTC state
  440. * @crtc: the drm CRTC to set a property on
  441. * @state: the state object to get the property value from
  442. * @property: the property to set
  443. * @val: return location for the property value
  444. *
  445. * This function handles generic/core properties and calls out to driver's
  446. * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
  447. * consistent behavior you must call this function rather than the driver hook
  448. * directly.
  449. *
  450. * RETURNS:
  451. * Zero on success, error code on failure
  452. */
  453. static int
  454. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  455. const struct drm_crtc_state *state,
  456. struct drm_property *property, uint64_t *val)
  457. {
  458. struct drm_device *dev = crtc->dev;
  459. struct drm_mode_config *config = &dev->mode_config;
  460. if (property == config->prop_active)
  461. *val = state->active;
  462. else if (property == config->prop_mode_id)
  463. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  464. else if (property == config->degamma_lut_property)
  465. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  466. else if (property == config->ctm_property)
  467. *val = (state->ctm) ? state->ctm->base.id : 0;
  468. else if (property == config->gamma_lut_property)
  469. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  470. else if (property == config->prop_out_fence_ptr)
  471. *val = 0;
  472. else if (crtc->funcs->atomic_get_property)
  473. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  474. else
  475. return -EINVAL;
  476. return 0;
  477. }
  478. /**
  479. * drm_atomic_crtc_check - check crtc state
  480. * @crtc: crtc to check
  481. * @state: crtc state to check
  482. *
  483. * Provides core sanity checks for crtc state.
  484. *
  485. * RETURNS:
  486. * Zero on success, error code on failure
  487. */
  488. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  489. struct drm_crtc_state *state)
  490. {
  491. /* NOTE: we explicitly don't enforce constraints such as primary
  492. * layer covering entire screen, since that is something we want
  493. * to allow (on hw that supports it). For hw that does not, it
  494. * should be checked in driver's crtc->atomic_check() vfunc.
  495. *
  496. * TODO: Add generic modeset state checks once we support those.
  497. */
  498. if (state->active && !state->enable) {
  499. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  500. crtc->base.id, crtc->name);
  501. return -EINVAL;
  502. }
  503. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  504. * as this is a kernel-internal detail that userspace should never
  505. * be able to trigger. */
  506. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  507. WARN_ON(state->enable && !state->mode_blob)) {
  508. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  509. crtc->base.id, crtc->name);
  510. return -EINVAL;
  511. }
  512. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  513. WARN_ON(!state->enable && state->mode_blob)) {
  514. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  515. crtc->base.id, crtc->name);
  516. return -EINVAL;
  517. }
  518. /*
  519. * Reject event generation for when a CRTC is off and stays off.
  520. * It wouldn't be hard to implement this, but userspace has a track
  521. * record of happily burning through 100% cpu (or worse, crash) when the
  522. * display pipe is suspended. To avoid all that fun just reject updates
  523. * that ask for events since likely that indicates a bug in the
  524. * compositor's drawing loop. This is consistent with the vblank IOCTL
  525. * and legacy page_flip IOCTL which also reject service on a disabled
  526. * pipe.
  527. */
  528. if (state->event && !state->active && !crtc->state->active) {
  529. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
  530. crtc->base.id, crtc->name);
  531. return -EINVAL;
  532. }
  533. return 0;
  534. }
  535. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  536. const struct drm_crtc_state *state)
  537. {
  538. struct drm_crtc *crtc = state->crtc;
  539. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  540. drm_printf(p, "\tenable=%d\n", state->enable);
  541. drm_printf(p, "\tactive=%d\n", state->active);
  542. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  543. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  544. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  545. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  546. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  547. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  548. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  549. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  550. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  551. if (crtc->funcs->atomic_print_state)
  552. crtc->funcs->atomic_print_state(p, state);
  553. }
  554. /**
  555. * drm_atomic_get_plane_state - get plane state
  556. * @state: global atomic state object
  557. * @plane: plane to get state object for
  558. *
  559. * This function returns the plane state for the given plane, allocating it if
  560. * needed. It will also grab the relevant plane lock to make sure that the state
  561. * is consistent.
  562. *
  563. * Returns:
  564. *
  565. * Either the allocated state or the error code encoded into the pointer. When
  566. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  567. * entire atomic sequence must be restarted. All other errors are fatal.
  568. */
  569. struct drm_plane_state *
  570. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  571. struct drm_plane *plane)
  572. {
  573. int ret, index = drm_plane_index(plane);
  574. struct drm_plane_state *plane_state;
  575. WARN_ON(!state->acquire_ctx);
  576. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  577. if (plane_state)
  578. return plane_state;
  579. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  580. if (ret)
  581. return ERR_PTR(ret);
  582. plane_state = plane->funcs->atomic_duplicate_state(plane);
  583. if (!plane_state)
  584. return ERR_PTR(-ENOMEM);
  585. state->planes[index].state = plane_state;
  586. state->planes[index].ptr = plane;
  587. state->planes[index].old_state = plane->state;
  588. state->planes[index].new_state = plane_state;
  589. plane_state->state = state;
  590. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  591. plane->base.id, plane->name, plane_state, state);
  592. if (plane_state->crtc) {
  593. struct drm_crtc_state *crtc_state;
  594. crtc_state = drm_atomic_get_crtc_state(state,
  595. plane_state->crtc);
  596. if (IS_ERR(crtc_state))
  597. return ERR_CAST(crtc_state);
  598. }
  599. return plane_state;
  600. }
  601. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  602. /**
  603. * drm_atomic_plane_set_property - set property on plane
  604. * @plane: the drm plane to set a property on
  605. * @state: the state object to update with the new property value
  606. * @property: the property to set
  607. * @val: the new property value
  608. *
  609. * This function handles generic/core properties and calls out to driver's
  610. * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
  611. * consistent behavior you must call this function rather than the driver hook
  612. * directly.
  613. *
  614. * RETURNS:
  615. * Zero on success, error code on failure
  616. */
  617. static int drm_atomic_plane_set_property(struct drm_plane *plane,
  618. struct drm_plane_state *state, struct drm_property *property,
  619. uint64_t val)
  620. {
  621. struct drm_device *dev = plane->dev;
  622. struct drm_mode_config *config = &dev->mode_config;
  623. if (property == config->prop_fb_id) {
  624. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  625. drm_atomic_set_fb_for_plane(state, fb);
  626. if (fb)
  627. drm_framebuffer_put(fb);
  628. } else if (property == config->prop_in_fence_fd) {
  629. if (state->fence)
  630. return -EINVAL;
  631. if (U642I64(val) == -1)
  632. return 0;
  633. state->fence = sync_file_get_fence(val);
  634. if (!state->fence)
  635. return -EINVAL;
  636. } else if (property == config->prop_crtc_id) {
  637. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  638. return drm_atomic_set_crtc_for_plane(state, crtc);
  639. } else if (property == config->prop_crtc_x) {
  640. state->crtc_x = U642I64(val);
  641. } else if (property == config->prop_crtc_y) {
  642. state->crtc_y = U642I64(val);
  643. } else if (property == config->prop_crtc_w) {
  644. state->crtc_w = val;
  645. } else if (property == config->prop_crtc_h) {
  646. state->crtc_h = val;
  647. } else if (property == config->prop_src_x) {
  648. state->src_x = val;
  649. } else if (property == config->prop_src_y) {
  650. state->src_y = val;
  651. } else if (property == config->prop_src_w) {
  652. state->src_w = val;
  653. } else if (property == config->prop_src_h) {
  654. state->src_h = val;
  655. } else if (property == plane->rotation_property) {
  656. if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
  657. return -EINVAL;
  658. state->rotation = val;
  659. } else if (property == plane->zpos_property) {
  660. state->zpos = val;
  661. } else if (plane->funcs->atomic_set_property) {
  662. return plane->funcs->atomic_set_property(plane, state,
  663. property, val);
  664. } else {
  665. return -EINVAL;
  666. }
  667. return 0;
  668. }
  669. /**
  670. * drm_atomic_plane_get_property - get property value from plane state
  671. * @plane: the drm plane to set a property on
  672. * @state: the state object to get the property value from
  673. * @property: the property to set
  674. * @val: return location for the property value
  675. *
  676. * This function handles generic/core properties and calls out to driver's
  677. * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
  678. * consistent behavior you must call this function rather than the driver hook
  679. * directly.
  680. *
  681. * RETURNS:
  682. * Zero on success, error code on failure
  683. */
  684. static int
  685. drm_atomic_plane_get_property(struct drm_plane *plane,
  686. const struct drm_plane_state *state,
  687. struct drm_property *property, uint64_t *val)
  688. {
  689. struct drm_device *dev = plane->dev;
  690. struct drm_mode_config *config = &dev->mode_config;
  691. if (property == config->prop_fb_id) {
  692. *val = (state->fb) ? state->fb->base.id : 0;
  693. } else if (property == config->prop_in_fence_fd) {
  694. *val = -1;
  695. } else if (property == config->prop_crtc_id) {
  696. *val = (state->crtc) ? state->crtc->base.id : 0;
  697. } else if (property == config->prop_crtc_x) {
  698. *val = I642U64(state->crtc_x);
  699. } else if (property == config->prop_crtc_y) {
  700. *val = I642U64(state->crtc_y);
  701. } else if (property == config->prop_crtc_w) {
  702. *val = state->crtc_w;
  703. } else if (property == config->prop_crtc_h) {
  704. *val = state->crtc_h;
  705. } else if (property == config->prop_src_x) {
  706. *val = state->src_x;
  707. } else if (property == config->prop_src_y) {
  708. *val = state->src_y;
  709. } else if (property == config->prop_src_w) {
  710. *val = state->src_w;
  711. } else if (property == config->prop_src_h) {
  712. *val = state->src_h;
  713. } else if (property == plane->rotation_property) {
  714. *val = state->rotation;
  715. } else if (property == plane->zpos_property) {
  716. *val = state->zpos;
  717. } else if (plane->funcs->atomic_get_property) {
  718. return plane->funcs->atomic_get_property(plane, state, property, val);
  719. } else {
  720. return -EINVAL;
  721. }
  722. return 0;
  723. }
  724. static bool
  725. plane_switching_crtc(struct drm_atomic_state *state,
  726. struct drm_plane *plane,
  727. struct drm_plane_state *plane_state)
  728. {
  729. if (!plane->state->crtc || !plane_state->crtc)
  730. return false;
  731. if (plane->state->crtc == plane_state->crtc)
  732. return false;
  733. /* This could be refined, but currently there's no helper or driver code
  734. * to implement direct switching of active planes nor userspace to take
  735. * advantage of more direct plane switching without the intermediate
  736. * full OFF state.
  737. */
  738. return true;
  739. }
  740. /**
  741. * drm_atomic_plane_check - check plane state
  742. * @plane: plane to check
  743. * @state: plane state to check
  744. *
  745. * Provides core sanity checks for plane state.
  746. *
  747. * RETURNS:
  748. * Zero on success, error code on failure
  749. */
  750. static int drm_atomic_plane_check(struct drm_plane *plane,
  751. struct drm_plane_state *state)
  752. {
  753. unsigned int fb_width, fb_height;
  754. int ret;
  755. /* either *both* CRTC and FB must be set, or neither */
  756. if (WARN_ON(state->crtc && !state->fb)) {
  757. DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
  758. return -EINVAL;
  759. } else if (WARN_ON(state->fb && !state->crtc)) {
  760. DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
  761. return -EINVAL;
  762. }
  763. /* if disabled, we don't care about the rest of the state: */
  764. if (!state->crtc)
  765. return 0;
  766. /* Check whether this plane is usable on this CRTC */
  767. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  768. DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
  769. return -EINVAL;
  770. }
  771. /* Check whether this plane supports the fb pixel format. */
  772. ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
  773. if (ret) {
  774. struct drm_format_name_buf format_name;
  775. DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
  776. drm_get_format_name(state->fb->format->format,
  777. &format_name));
  778. return ret;
  779. }
  780. /* Give drivers some help against integer overflows */
  781. if (state->crtc_w > INT_MAX ||
  782. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  783. state->crtc_h > INT_MAX ||
  784. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  785. DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
  786. state->crtc_w, state->crtc_h,
  787. state->crtc_x, state->crtc_y);
  788. return -ERANGE;
  789. }
  790. fb_width = state->fb->width << 16;
  791. fb_height = state->fb->height << 16;
  792. /* Make sure source coordinates are inside the fb. */
  793. if (state->src_w > fb_width ||
  794. state->src_x > fb_width - state->src_w ||
  795. state->src_h > fb_height ||
  796. state->src_y > fb_height - state->src_h) {
  797. DRM_DEBUG_ATOMIC("Invalid source coordinates "
  798. "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
  799. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  800. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  801. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  802. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
  803. return -ENOSPC;
  804. }
  805. if (plane_switching_crtc(state->state, plane, state)) {
  806. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  807. plane->base.id, plane->name);
  808. return -EINVAL;
  809. }
  810. return 0;
  811. }
  812. static void drm_atomic_plane_print_state(struct drm_printer *p,
  813. const struct drm_plane_state *state)
  814. {
  815. struct drm_plane *plane = state->plane;
  816. struct drm_rect src = drm_plane_state_src(state);
  817. struct drm_rect dest = drm_plane_state_dest(state);
  818. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  819. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  820. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  821. if (state->fb) {
  822. struct drm_framebuffer *fb = state->fb;
  823. int i, n = fb->format->num_planes;
  824. struct drm_format_name_buf format_name;
  825. drm_printf(p, "\t\tformat=%s\n",
  826. drm_get_format_name(fb->format->format, &format_name));
  827. drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier);
  828. drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height);
  829. drm_printf(p, "\t\tlayers:\n");
  830. for (i = 0; i < n; i++) {
  831. drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]);
  832. drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]);
  833. }
  834. }
  835. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  836. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  837. drm_printf(p, "\trotation=%x\n", state->rotation);
  838. if (plane->funcs->atomic_print_state)
  839. plane->funcs->atomic_print_state(p, state);
  840. }
  841. /**
  842. * drm_atomic_private_obj_init - initialize private object
  843. * @obj: private object
  844. * @state: initial private object state
  845. * @funcs: pointer to the struct of function pointers that identify the object
  846. * type
  847. *
  848. * Initialize the private object, which can be embedded into any
  849. * driver private object that needs its own atomic state.
  850. */
  851. void
  852. drm_atomic_private_obj_init(struct drm_private_obj *obj,
  853. struct drm_private_state *state,
  854. const struct drm_private_state_funcs *funcs)
  855. {
  856. memset(obj, 0, sizeof(*obj));
  857. obj->state = state;
  858. obj->funcs = funcs;
  859. }
  860. EXPORT_SYMBOL(drm_atomic_private_obj_init);
  861. /**
  862. * drm_atomic_private_obj_fini - finalize private object
  863. * @obj: private object
  864. *
  865. * Finalize the private object.
  866. */
  867. void
  868. drm_atomic_private_obj_fini(struct drm_private_obj *obj)
  869. {
  870. obj->funcs->atomic_destroy_state(obj, obj->state);
  871. }
  872. EXPORT_SYMBOL(drm_atomic_private_obj_fini);
  873. /**
  874. * drm_atomic_get_private_obj_state - get private object state
  875. * @state: global atomic state
  876. * @obj: private object to get the state for
  877. *
  878. * This function returns the private object state for the given private object,
  879. * allocating the state if needed. It does not grab any locks as the caller is
  880. * expected to care of any required locking.
  881. *
  882. * RETURNS:
  883. *
  884. * Either the allocated state or the error code encoded into a pointer.
  885. */
  886. struct drm_private_state *
  887. drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
  888. struct drm_private_obj *obj)
  889. {
  890. int index, num_objs, i;
  891. size_t size;
  892. struct __drm_private_objs_state *arr;
  893. struct drm_private_state *obj_state;
  894. for (i = 0; i < state->num_private_objs; i++)
  895. if (obj == state->private_objs[i].ptr)
  896. return state->private_objs[i].state;
  897. num_objs = state->num_private_objs + 1;
  898. size = sizeof(*state->private_objs) * num_objs;
  899. arr = krealloc(state->private_objs, size, GFP_KERNEL);
  900. if (!arr)
  901. return ERR_PTR(-ENOMEM);
  902. state->private_objs = arr;
  903. index = state->num_private_objs;
  904. memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
  905. obj_state = obj->funcs->atomic_duplicate_state(obj);
  906. if (!obj_state)
  907. return ERR_PTR(-ENOMEM);
  908. state->private_objs[index].state = obj_state;
  909. state->private_objs[index].old_state = obj->state;
  910. state->private_objs[index].new_state = obj_state;
  911. state->private_objs[index].ptr = obj;
  912. state->num_private_objs = num_objs;
  913. DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
  914. obj, obj_state, state);
  915. return obj_state;
  916. }
  917. EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
  918. /**
  919. * drm_atomic_get_connector_state - get connector state
  920. * @state: global atomic state object
  921. * @connector: connector to get state object for
  922. *
  923. * This function returns the connector state for the given connector,
  924. * allocating it if needed. It will also grab the relevant connector lock to
  925. * make sure that the state is consistent.
  926. *
  927. * Returns:
  928. *
  929. * Either the allocated state or the error code encoded into the pointer. When
  930. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  931. * entire atomic sequence must be restarted. All other errors are fatal.
  932. */
  933. struct drm_connector_state *
  934. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  935. struct drm_connector *connector)
  936. {
  937. int ret, index;
  938. struct drm_mode_config *config = &connector->dev->mode_config;
  939. struct drm_connector_state *connector_state;
  940. WARN_ON(!state->acquire_ctx);
  941. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  942. if (ret)
  943. return ERR_PTR(ret);
  944. index = drm_connector_index(connector);
  945. if (index >= state->num_connector) {
  946. struct __drm_connnectors_state *c;
  947. int alloc = max(index + 1, config->num_connector);
  948. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  949. if (!c)
  950. return ERR_PTR(-ENOMEM);
  951. state->connectors = c;
  952. memset(&state->connectors[state->num_connector], 0,
  953. sizeof(*state->connectors) * (alloc - state->num_connector));
  954. state->num_connector = alloc;
  955. }
  956. if (state->connectors[index].state)
  957. return state->connectors[index].state;
  958. connector_state = connector->funcs->atomic_duplicate_state(connector);
  959. if (!connector_state)
  960. return ERR_PTR(-ENOMEM);
  961. drm_connector_get(connector);
  962. state->connectors[index].state = connector_state;
  963. state->connectors[index].old_state = connector->state;
  964. state->connectors[index].new_state = connector_state;
  965. state->connectors[index].ptr = connector;
  966. connector_state->state = state;
  967. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
  968. connector->base.id, connector->name,
  969. connector_state, state);
  970. if (connector_state->crtc) {
  971. struct drm_crtc_state *crtc_state;
  972. crtc_state = drm_atomic_get_crtc_state(state,
  973. connector_state->crtc);
  974. if (IS_ERR(crtc_state))
  975. return ERR_CAST(crtc_state);
  976. }
  977. return connector_state;
  978. }
  979. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  980. /**
  981. * drm_atomic_connector_set_property - set property on connector.
  982. * @connector: the drm connector to set a property on
  983. * @state: the state object to update with the new property value
  984. * @property: the property to set
  985. * @val: the new property value
  986. *
  987. * This function handles generic/core properties and calls out to driver's
  988. * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
  989. * consistent behavior you must call this function rather than the driver hook
  990. * directly.
  991. *
  992. * RETURNS:
  993. * Zero on success, error code on failure
  994. */
  995. static int drm_atomic_connector_set_property(struct drm_connector *connector,
  996. struct drm_connector_state *state, struct drm_property *property,
  997. uint64_t val)
  998. {
  999. struct drm_device *dev = connector->dev;
  1000. struct drm_mode_config *config = &dev->mode_config;
  1001. if (property == config->prop_crtc_id) {
  1002. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  1003. return drm_atomic_set_crtc_for_connector(state, crtc);
  1004. } else if (property == config->dpms_property) {
  1005. /* setting DPMS property requires special handling, which
  1006. * is done in legacy setprop path for us. Disallow (for
  1007. * now?) atomic writes to DPMS property:
  1008. */
  1009. return -EINVAL;
  1010. } else if (property == config->tv_select_subconnector_property) {
  1011. state->tv.subconnector = val;
  1012. } else if (property == config->tv_left_margin_property) {
  1013. state->tv.margins.left = val;
  1014. } else if (property == config->tv_right_margin_property) {
  1015. state->tv.margins.right = val;
  1016. } else if (property == config->tv_top_margin_property) {
  1017. state->tv.margins.top = val;
  1018. } else if (property == config->tv_bottom_margin_property) {
  1019. state->tv.margins.bottom = val;
  1020. } else if (property == config->tv_mode_property) {
  1021. state->tv.mode = val;
  1022. } else if (property == config->tv_brightness_property) {
  1023. state->tv.brightness = val;
  1024. } else if (property == config->tv_contrast_property) {
  1025. state->tv.contrast = val;
  1026. } else if (property == config->tv_flicker_reduction_property) {
  1027. state->tv.flicker_reduction = val;
  1028. } else if (property == config->tv_overscan_property) {
  1029. state->tv.overscan = val;
  1030. } else if (property == config->tv_saturation_property) {
  1031. state->tv.saturation = val;
  1032. } else if (property == config->tv_hue_property) {
  1033. state->tv.hue = val;
  1034. } else if (property == config->link_status_property) {
  1035. /* Never downgrade from GOOD to BAD on userspace's request here,
  1036. * only hw issues can do that.
  1037. *
  1038. * For an atomic property the userspace doesn't need to be able
  1039. * to understand all the properties, but needs to be able to
  1040. * restore the state it wants on VT switch. So if the userspace
  1041. * tries to change the link_status from GOOD to BAD, driver
  1042. * silently rejects it and returns a 0. This prevents userspace
  1043. * from accidently breaking the display when it restores the
  1044. * state.
  1045. */
  1046. if (state->link_status != DRM_LINK_STATUS_GOOD)
  1047. state->link_status = val;
  1048. } else if (property == config->aspect_ratio_property) {
  1049. state->picture_aspect_ratio = val;
  1050. } else if (property == connector->scaling_mode_property) {
  1051. state->scaling_mode = val;
  1052. } else if (connector->funcs->atomic_set_property) {
  1053. return connector->funcs->atomic_set_property(connector,
  1054. state, property, val);
  1055. } else {
  1056. return -EINVAL;
  1057. }
  1058. return 0;
  1059. }
  1060. static void drm_atomic_connector_print_state(struct drm_printer *p,
  1061. const struct drm_connector_state *state)
  1062. {
  1063. struct drm_connector *connector = state->connector;
  1064. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  1065. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  1066. if (connector->funcs->atomic_print_state)
  1067. connector->funcs->atomic_print_state(p, state);
  1068. }
  1069. /**
  1070. * drm_atomic_connector_get_property - get property value from connector state
  1071. * @connector: the drm connector to set a property on
  1072. * @state: the state object to get the property value from
  1073. * @property: the property to set
  1074. * @val: return location for the property value
  1075. *
  1076. * This function handles generic/core properties and calls out to driver's
  1077. * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
  1078. * consistent behavior you must call this function rather than the driver hook
  1079. * directly.
  1080. *
  1081. * RETURNS:
  1082. * Zero on success, error code on failure
  1083. */
  1084. static int
  1085. drm_atomic_connector_get_property(struct drm_connector *connector,
  1086. const struct drm_connector_state *state,
  1087. struct drm_property *property, uint64_t *val)
  1088. {
  1089. struct drm_device *dev = connector->dev;
  1090. struct drm_mode_config *config = &dev->mode_config;
  1091. if (property == config->prop_crtc_id) {
  1092. *val = (state->crtc) ? state->crtc->base.id : 0;
  1093. } else if (property == config->dpms_property) {
  1094. *val = connector->dpms;
  1095. } else if (property == config->tv_select_subconnector_property) {
  1096. *val = state->tv.subconnector;
  1097. } else if (property == config->tv_left_margin_property) {
  1098. *val = state->tv.margins.left;
  1099. } else if (property == config->tv_right_margin_property) {
  1100. *val = state->tv.margins.right;
  1101. } else if (property == config->tv_top_margin_property) {
  1102. *val = state->tv.margins.top;
  1103. } else if (property == config->tv_bottom_margin_property) {
  1104. *val = state->tv.margins.bottom;
  1105. } else if (property == config->tv_mode_property) {
  1106. *val = state->tv.mode;
  1107. } else if (property == config->tv_brightness_property) {
  1108. *val = state->tv.brightness;
  1109. } else if (property == config->tv_contrast_property) {
  1110. *val = state->tv.contrast;
  1111. } else if (property == config->tv_flicker_reduction_property) {
  1112. *val = state->tv.flicker_reduction;
  1113. } else if (property == config->tv_overscan_property) {
  1114. *val = state->tv.overscan;
  1115. } else if (property == config->tv_saturation_property) {
  1116. *val = state->tv.saturation;
  1117. } else if (property == config->tv_hue_property) {
  1118. *val = state->tv.hue;
  1119. } else if (property == config->link_status_property) {
  1120. *val = state->link_status;
  1121. } else if (property == config->aspect_ratio_property) {
  1122. *val = state->picture_aspect_ratio;
  1123. } else if (property == connector->scaling_mode_property) {
  1124. *val = state->scaling_mode;
  1125. } else if (connector->funcs->atomic_get_property) {
  1126. return connector->funcs->atomic_get_property(connector,
  1127. state, property, val);
  1128. } else {
  1129. return -EINVAL;
  1130. }
  1131. return 0;
  1132. }
  1133. int drm_atomic_get_property(struct drm_mode_object *obj,
  1134. struct drm_property *property, uint64_t *val)
  1135. {
  1136. struct drm_device *dev = property->dev;
  1137. int ret;
  1138. switch (obj->type) {
  1139. case DRM_MODE_OBJECT_CONNECTOR: {
  1140. struct drm_connector *connector = obj_to_connector(obj);
  1141. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  1142. ret = drm_atomic_connector_get_property(connector,
  1143. connector->state, property, val);
  1144. break;
  1145. }
  1146. case DRM_MODE_OBJECT_CRTC: {
  1147. struct drm_crtc *crtc = obj_to_crtc(obj);
  1148. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  1149. ret = drm_atomic_crtc_get_property(crtc,
  1150. crtc->state, property, val);
  1151. break;
  1152. }
  1153. case DRM_MODE_OBJECT_PLANE: {
  1154. struct drm_plane *plane = obj_to_plane(obj);
  1155. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  1156. ret = drm_atomic_plane_get_property(plane,
  1157. plane->state, property, val);
  1158. break;
  1159. }
  1160. default:
  1161. ret = -EINVAL;
  1162. break;
  1163. }
  1164. return ret;
  1165. }
  1166. /**
  1167. * drm_atomic_set_crtc_for_plane - set crtc for plane
  1168. * @plane_state: the plane whose incoming state to update
  1169. * @crtc: crtc to use for the plane
  1170. *
  1171. * Changing the assigned crtc for a plane requires us to grab the lock and state
  1172. * for the new crtc, as needed. This function takes care of all these details
  1173. * besides updating the pointer in the state object itself.
  1174. *
  1175. * Returns:
  1176. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1177. * then the w/w mutex code has detected a deadlock and the entire atomic
  1178. * sequence must be restarted. All other errors are fatal.
  1179. */
  1180. int
  1181. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  1182. struct drm_crtc *crtc)
  1183. {
  1184. struct drm_plane *plane = plane_state->plane;
  1185. struct drm_crtc_state *crtc_state;
  1186. if (plane_state->crtc) {
  1187. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1188. plane_state->crtc);
  1189. if (WARN_ON(IS_ERR(crtc_state)))
  1190. return PTR_ERR(crtc_state);
  1191. crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
  1192. }
  1193. plane_state->crtc = crtc;
  1194. if (crtc) {
  1195. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1196. crtc);
  1197. if (IS_ERR(crtc_state))
  1198. return PTR_ERR(crtc_state);
  1199. crtc_state->plane_mask |= (1 << drm_plane_index(plane));
  1200. }
  1201. if (crtc)
  1202. DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
  1203. plane_state, crtc->base.id, crtc->name);
  1204. else
  1205. DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
  1206. plane_state);
  1207. return 0;
  1208. }
  1209. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  1210. /**
  1211. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  1212. * @plane_state: atomic state object for the plane
  1213. * @fb: fb to use for the plane
  1214. *
  1215. * Changing the assigned framebuffer for a plane requires us to grab a reference
  1216. * to the new fb and drop the reference to the old fb, if there is one. This
  1217. * function takes care of all these details besides updating the pointer in the
  1218. * state object itself.
  1219. */
  1220. void
  1221. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1222. struct drm_framebuffer *fb)
  1223. {
  1224. if (fb)
  1225. DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
  1226. fb->base.id, plane_state);
  1227. else
  1228. DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
  1229. plane_state);
  1230. drm_framebuffer_assign(&plane_state->fb, fb);
  1231. }
  1232. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1233. /**
  1234. * drm_atomic_set_fence_for_plane - set fence for plane
  1235. * @plane_state: atomic state object for the plane
  1236. * @fence: dma_fence to use for the plane
  1237. *
  1238. * Helper to setup the plane_state fence in case it is not set yet.
  1239. * By using this drivers doesn't need to worry if the user choose
  1240. * implicit or explicit fencing.
  1241. *
  1242. * This function will not set the fence to the state if it was set
  1243. * via explicit fencing interfaces on the atomic ioctl. In that case it will
  1244. * drop the reference to the fence as we are not storing it anywhere.
  1245. * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  1246. * with the received implicit fence. In both cases this function consumes a
  1247. * reference for @fence.
  1248. */
  1249. void
  1250. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  1251. struct dma_fence *fence)
  1252. {
  1253. if (plane_state->fence) {
  1254. dma_fence_put(fence);
  1255. return;
  1256. }
  1257. plane_state->fence = fence;
  1258. }
  1259. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  1260. /**
  1261. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1262. * @conn_state: atomic state object for the connector
  1263. * @crtc: crtc to use for the connector
  1264. *
  1265. * Changing the assigned crtc for a connector requires us to grab the lock and
  1266. * state for the new crtc, as needed. This function takes care of all these
  1267. * details besides updating the pointer in the state object itself.
  1268. *
  1269. * Returns:
  1270. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1271. * then the w/w mutex code has detected a deadlock and the entire atomic
  1272. * sequence must be restarted. All other errors are fatal.
  1273. */
  1274. int
  1275. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1276. struct drm_crtc *crtc)
  1277. {
  1278. struct drm_crtc_state *crtc_state;
  1279. if (conn_state->crtc == crtc)
  1280. return 0;
  1281. if (conn_state->crtc) {
  1282. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  1283. conn_state->crtc);
  1284. crtc_state->connector_mask &=
  1285. ~(1 << drm_connector_index(conn_state->connector));
  1286. drm_connector_put(conn_state->connector);
  1287. conn_state->crtc = NULL;
  1288. }
  1289. if (crtc) {
  1290. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1291. if (IS_ERR(crtc_state))
  1292. return PTR_ERR(crtc_state);
  1293. crtc_state->connector_mask |=
  1294. 1 << drm_connector_index(conn_state->connector);
  1295. drm_connector_get(conn_state->connector);
  1296. conn_state->crtc = crtc;
  1297. DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
  1298. conn_state, crtc->base.id, crtc->name);
  1299. } else {
  1300. DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
  1301. conn_state);
  1302. }
  1303. return 0;
  1304. }
  1305. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1306. /**
  1307. * drm_atomic_add_affected_connectors - add connectors for crtc
  1308. * @state: atomic state
  1309. * @crtc: DRM crtc
  1310. *
  1311. * This function walks the current configuration and adds all connectors
  1312. * currently using @crtc to the atomic configuration @state. Note that this
  1313. * function must acquire the connection mutex. This can potentially cause
  1314. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1315. * drivers and helpers should only call this when really needed (e.g. when a
  1316. * full modeset needs to happen due to some change).
  1317. *
  1318. * Returns:
  1319. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1320. * then the w/w mutex code has detected a deadlock and the entire atomic
  1321. * sequence must be restarted. All other errors are fatal.
  1322. */
  1323. int
  1324. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1325. struct drm_crtc *crtc)
  1326. {
  1327. struct drm_mode_config *config = &state->dev->mode_config;
  1328. struct drm_connector *connector;
  1329. struct drm_connector_state *conn_state;
  1330. struct drm_connector_list_iter conn_iter;
  1331. struct drm_crtc_state *crtc_state;
  1332. int ret;
  1333. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1334. if (IS_ERR(crtc_state))
  1335. return PTR_ERR(crtc_state);
  1336. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1337. if (ret)
  1338. return ret;
  1339. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1340. crtc->base.id, crtc->name, state);
  1341. /*
  1342. * Changed connectors are already in @state, so only need to look
  1343. * at the connector_mask in crtc_state.
  1344. */
  1345. drm_connector_list_iter_begin(state->dev, &conn_iter);
  1346. drm_for_each_connector_iter(connector, &conn_iter) {
  1347. if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
  1348. continue;
  1349. conn_state = drm_atomic_get_connector_state(state, connector);
  1350. if (IS_ERR(conn_state)) {
  1351. drm_connector_list_iter_end(&conn_iter);
  1352. return PTR_ERR(conn_state);
  1353. }
  1354. }
  1355. drm_connector_list_iter_end(&conn_iter);
  1356. return 0;
  1357. }
  1358. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1359. /**
  1360. * drm_atomic_add_affected_planes - add planes for crtc
  1361. * @state: atomic state
  1362. * @crtc: DRM crtc
  1363. *
  1364. * This function walks the current configuration and adds all planes
  1365. * currently used by @crtc to the atomic configuration @state. This is useful
  1366. * when an atomic commit also needs to check all currently enabled plane on
  1367. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1368. * to avoid special code to force-enable all planes.
  1369. *
  1370. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1371. * current CRTC for that plane (if there is any) adding all the plane states for
  1372. * a CRTC will not reduce parallism of atomic updates.
  1373. *
  1374. * Returns:
  1375. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1376. * then the w/w mutex code has detected a deadlock and the entire atomic
  1377. * sequence must be restarted. All other errors are fatal.
  1378. */
  1379. int
  1380. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1381. struct drm_crtc *crtc)
  1382. {
  1383. struct drm_plane *plane;
  1384. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  1385. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1386. struct drm_plane_state *plane_state =
  1387. drm_atomic_get_plane_state(state, plane);
  1388. if (IS_ERR(plane_state))
  1389. return PTR_ERR(plane_state);
  1390. }
  1391. return 0;
  1392. }
  1393. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1394. /**
  1395. * drm_atomic_check_only - check whether a given config would work
  1396. * @state: atomic configuration to check
  1397. *
  1398. * Note that this function can return -EDEADLK if the driver needed to acquire
  1399. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1400. * backoff dance and restart. All other errors are fatal.
  1401. *
  1402. * Returns:
  1403. * 0 on success, negative error code on failure.
  1404. */
  1405. int drm_atomic_check_only(struct drm_atomic_state *state)
  1406. {
  1407. struct drm_device *dev = state->dev;
  1408. struct drm_mode_config *config = &dev->mode_config;
  1409. struct drm_plane *plane;
  1410. struct drm_plane_state *plane_state;
  1411. struct drm_crtc *crtc;
  1412. struct drm_crtc_state *crtc_state;
  1413. int i, ret = 0;
  1414. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1415. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1416. ret = drm_atomic_plane_check(plane, plane_state);
  1417. if (ret) {
  1418. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1419. plane->base.id, plane->name);
  1420. return ret;
  1421. }
  1422. }
  1423. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1424. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1425. if (ret) {
  1426. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1427. crtc->base.id, crtc->name);
  1428. return ret;
  1429. }
  1430. }
  1431. if (config->funcs->atomic_check)
  1432. ret = config->funcs->atomic_check(state->dev, state);
  1433. if (ret)
  1434. return ret;
  1435. if (!state->allow_modeset) {
  1436. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1437. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1438. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1439. crtc->base.id, crtc->name);
  1440. return -EINVAL;
  1441. }
  1442. }
  1443. }
  1444. return 0;
  1445. }
  1446. EXPORT_SYMBOL(drm_atomic_check_only);
  1447. /**
  1448. * drm_atomic_commit - commit configuration atomically
  1449. * @state: atomic configuration to check
  1450. *
  1451. * Note that this function can return -EDEADLK if the driver needed to acquire
  1452. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1453. * backoff dance and restart. All other errors are fatal.
  1454. *
  1455. * This function will take its own reference on @state.
  1456. * Callers should always release their reference with drm_atomic_state_put().
  1457. *
  1458. * Returns:
  1459. * 0 on success, negative error code on failure.
  1460. */
  1461. int drm_atomic_commit(struct drm_atomic_state *state)
  1462. {
  1463. struct drm_mode_config *config = &state->dev->mode_config;
  1464. int ret;
  1465. ret = drm_atomic_check_only(state);
  1466. if (ret)
  1467. return ret;
  1468. DRM_DEBUG_ATOMIC("committing %p\n", state);
  1469. return config->funcs->atomic_commit(state->dev, state, false);
  1470. }
  1471. EXPORT_SYMBOL(drm_atomic_commit);
  1472. /**
  1473. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  1474. * @state: atomic configuration to check
  1475. *
  1476. * Note that this function can return -EDEADLK if the driver needed to acquire
  1477. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1478. * backoff dance and restart. All other errors are fatal.
  1479. *
  1480. * This function will take its own reference on @state.
  1481. * Callers should always release their reference with drm_atomic_state_put().
  1482. *
  1483. * Returns:
  1484. * 0 on success, negative error code on failure.
  1485. */
  1486. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1487. {
  1488. struct drm_mode_config *config = &state->dev->mode_config;
  1489. int ret;
  1490. ret = drm_atomic_check_only(state);
  1491. if (ret)
  1492. return ret;
  1493. DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
  1494. return config->funcs->atomic_commit(state->dev, state, true);
  1495. }
  1496. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1497. static void drm_atomic_print_state(const struct drm_atomic_state *state)
  1498. {
  1499. struct drm_printer p = drm_info_printer(state->dev->dev);
  1500. struct drm_plane *plane;
  1501. struct drm_plane_state *plane_state;
  1502. struct drm_crtc *crtc;
  1503. struct drm_crtc_state *crtc_state;
  1504. struct drm_connector *connector;
  1505. struct drm_connector_state *connector_state;
  1506. int i;
  1507. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1508. for_each_new_plane_in_state(state, plane, plane_state, i)
  1509. drm_atomic_plane_print_state(&p, plane_state);
  1510. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  1511. drm_atomic_crtc_print_state(&p, crtc_state);
  1512. for_each_new_connector_in_state(state, connector, connector_state, i)
  1513. drm_atomic_connector_print_state(&p, connector_state);
  1514. }
  1515. static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  1516. bool take_locks)
  1517. {
  1518. struct drm_mode_config *config = &dev->mode_config;
  1519. struct drm_plane *plane;
  1520. struct drm_crtc *crtc;
  1521. struct drm_connector *connector;
  1522. struct drm_connector_list_iter conn_iter;
  1523. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1524. return;
  1525. list_for_each_entry(plane, &config->plane_list, head) {
  1526. if (take_locks)
  1527. drm_modeset_lock(&plane->mutex, NULL);
  1528. drm_atomic_plane_print_state(p, plane->state);
  1529. if (take_locks)
  1530. drm_modeset_unlock(&plane->mutex);
  1531. }
  1532. list_for_each_entry(crtc, &config->crtc_list, head) {
  1533. if (take_locks)
  1534. drm_modeset_lock(&crtc->mutex, NULL);
  1535. drm_atomic_crtc_print_state(p, crtc->state);
  1536. if (take_locks)
  1537. drm_modeset_unlock(&crtc->mutex);
  1538. }
  1539. drm_connector_list_iter_begin(dev, &conn_iter);
  1540. if (take_locks)
  1541. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  1542. drm_for_each_connector_iter(connector, &conn_iter)
  1543. drm_atomic_connector_print_state(p, connector->state);
  1544. if (take_locks)
  1545. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  1546. drm_connector_list_iter_end(&conn_iter);
  1547. }
  1548. /**
  1549. * drm_state_dump - dump entire device atomic state
  1550. * @dev: the drm device
  1551. * @p: where to print the state to
  1552. *
  1553. * Just for debugging. Drivers might want an option to dump state
  1554. * to dmesg in case of error irq's. (Hint, you probably want to
  1555. * ratelimit this!)
  1556. *
  1557. * The caller must drm_modeset_lock_all(), or if this is called
  1558. * from error irq handler, it should not be enabled by default.
  1559. * (Ie. if you are debugging errors you might not care that this
  1560. * is racey. But calling this without all modeset locks held is
  1561. * not inherently safe.)
  1562. */
  1563. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1564. {
  1565. __drm_state_dump(dev, p, false);
  1566. }
  1567. EXPORT_SYMBOL(drm_state_dump);
  1568. #ifdef CONFIG_DEBUG_FS
  1569. static int drm_state_info(struct seq_file *m, void *data)
  1570. {
  1571. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1572. struct drm_device *dev = node->minor->dev;
  1573. struct drm_printer p = drm_seq_file_printer(m);
  1574. __drm_state_dump(dev, &p, true);
  1575. return 0;
  1576. }
  1577. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1578. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1579. {"state", drm_state_info, 0},
  1580. };
  1581. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1582. {
  1583. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1584. ARRAY_SIZE(drm_atomic_debugfs_list),
  1585. minor->debugfs_root, minor);
  1586. }
  1587. #endif
  1588. /*
  1589. * The big monstor ioctl
  1590. */
  1591. static struct drm_pending_vblank_event *create_vblank_event(
  1592. struct drm_device *dev, uint64_t user_data)
  1593. {
  1594. struct drm_pending_vblank_event *e = NULL;
  1595. e = kzalloc(sizeof *e, GFP_KERNEL);
  1596. if (!e)
  1597. return NULL;
  1598. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1599. e->event.base.length = sizeof(e->event);
  1600. e->event.user_data = user_data;
  1601. return e;
  1602. }
  1603. int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
  1604. struct drm_connector *connector,
  1605. int mode)
  1606. {
  1607. struct drm_connector *tmp_connector;
  1608. struct drm_connector_state *new_conn_state;
  1609. struct drm_crtc *crtc;
  1610. struct drm_crtc_state *crtc_state;
  1611. int i, ret, old_mode = connector->dpms;
  1612. bool active = false;
  1613. ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
  1614. state->acquire_ctx);
  1615. if (ret)
  1616. return ret;
  1617. if (mode != DRM_MODE_DPMS_ON)
  1618. mode = DRM_MODE_DPMS_OFF;
  1619. connector->dpms = mode;
  1620. crtc = connector->state->crtc;
  1621. if (!crtc)
  1622. goto out;
  1623. ret = drm_atomic_add_affected_connectors(state, crtc);
  1624. if (ret)
  1625. goto out;
  1626. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1627. if (IS_ERR(crtc_state)) {
  1628. ret = PTR_ERR(crtc_state);
  1629. goto out;
  1630. }
  1631. for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
  1632. if (new_conn_state->crtc != crtc)
  1633. continue;
  1634. if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
  1635. active = true;
  1636. break;
  1637. }
  1638. }
  1639. crtc_state->active = active;
  1640. ret = drm_atomic_commit(state);
  1641. out:
  1642. if (ret != 0)
  1643. connector->dpms = old_mode;
  1644. return ret;
  1645. }
  1646. int drm_atomic_set_property(struct drm_atomic_state *state,
  1647. struct drm_mode_object *obj,
  1648. struct drm_property *prop,
  1649. uint64_t prop_value)
  1650. {
  1651. struct drm_mode_object *ref;
  1652. int ret;
  1653. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1654. return -EINVAL;
  1655. switch (obj->type) {
  1656. case DRM_MODE_OBJECT_CONNECTOR: {
  1657. struct drm_connector *connector = obj_to_connector(obj);
  1658. struct drm_connector_state *connector_state;
  1659. connector_state = drm_atomic_get_connector_state(state, connector);
  1660. if (IS_ERR(connector_state)) {
  1661. ret = PTR_ERR(connector_state);
  1662. break;
  1663. }
  1664. ret = drm_atomic_connector_set_property(connector,
  1665. connector_state, prop, prop_value);
  1666. break;
  1667. }
  1668. case DRM_MODE_OBJECT_CRTC: {
  1669. struct drm_crtc *crtc = obj_to_crtc(obj);
  1670. struct drm_crtc_state *crtc_state;
  1671. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1672. if (IS_ERR(crtc_state)) {
  1673. ret = PTR_ERR(crtc_state);
  1674. break;
  1675. }
  1676. ret = drm_atomic_crtc_set_property(crtc,
  1677. crtc_state, prop, prop_value);
  1678. break;
  1679. }
  1680. case DRM_MODE_OBJECT_PLANE: {
  1681. struct drm_plane *plane = obj_to_plane(obj);
  1682. struct drm_plane_state *plane_state;
  1683. plane_state = drm_atomic_get_plane_state(state, plane);
  1684. if (IS_ERR(plane_state)) {
  1685. ret = PTR_ERR(plane_state);
  1686. break;
  1687. }
  1688. ret = drm_atomic_plane_set_property(plane,
  1689. plane_state, prop, prop_value);
  1690. break;
  1691. }
  1692. default:
  1693. ret = -EINVAL;
  1694. break;
  1695. }
  1696. drm_property_change_valid_put(prop, ref);
  1697. return ret;
  1698. }
  1699. /**
  1700. * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
  1701. *
  1702. * @dev: drm device to check.
  1703. * @plane_mask: plane mask for planes that were updated.
  1704. * @ret: return value, can be -EDEADLK for a retry.
  1705. *
  1706. * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
  1707. * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
  1708. * is a common operation for each atomic update, so this call is split off as a
  1709. * helper.
  1710. */
  1711. void drm_atomic_clean_old_fb(struct drm_device *dev,
  1712. unsigned plane_mask,
  1713. int ret)
  1714. {
  1715. struct drm_plane *plane;
  1716. /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
  1717. * locks (ie. while it is still safe to deref plane->state). We
  1718. * need to do this here because the driver entry points cannot
  1719. * distinguish between legacy and atomic ioctls.
  1720. */
  1721. drm_for_each_plane_mask(plane, dev, plane_mask) {
  1722. if (ret == 0) {
  1723. struct drm_framebuffer *new_fb = plane->state->fb;
  1724. if (new_fb)
  1725. drm_framebuffer_get(new_fb);
  1726. plane->fb = new_fb;
  1727. plane->crtc = plane->state->crtc;
  1728. if (plane->old_fb)
  1729. drm_framebuffer_put(plane->old_fb);
  1730. }
  1731. plane->old_fb = NULL;
  1732. }
  1733. }
  1734. EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  1735. /**
  1736. * DOC: explicit fencing properties
  1737. *
  1738. * Explicit fencing allows userspace to control the buffer synchronization
  1739. * between devices. A Fence or a group of fences are transfered to/from
  1740. * userspace using Sync File fds and there are two DRM properties for that.
  1741. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  1742. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  1743. *
  1744. * As a contrast, with implicit fencing the kernel keeps track of any
  1745. * ongoing rendering, and automatically ensures that the atomic update waits
  1746. * for any pending rendering to complete. For shared buffers represented with
  1747. * a &struct dma_buf this is tracked in &struct reservation_object.
  1748. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  1749. * whereas explicit fencing is what Android wants.
  1750. *
  1751. * "IN_FENCE_FD”:
  1752. * Use this property to pass a fence that DRM should wait on before
  1753. * proceeding with the Atomic Commit request and show the framebuffer for
  1754. * the plane on the screen. The fence can be either a normal fence or a
  1755. * merged one, the sync_file framework will handle both cases and use a
  1756. * fence_array if a merged fence is received. Passing -1 here means no
  1757. * fences to wait on.
  1758. *
  1759. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  1760. * it will only check if the Sync File is a valid one.
  1761. *
  1762. * On the driver side the fence is stored on the @fence parameter of
  1763. * &struct drm_plane_state. Drivers which also support implicit fencing
  1764. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  1765. * to make sure there's consistent behaviour between drivers in precedence
  1766. * of implicit vs. explicit fencing.
  1767. *
  1768. * "OUT_FENCE_PTR”:
  1769. * Use this property to pass a file descriptor pointer to DRM. Once the
  1770. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  1771. * the file descriptor number of a Sync File. This Sync File contains the
  1772. * CRTC fence that will be signaled when all framebuffers present on the
  1773. * Atomic Commit * request for that given CRTC are scanned out on the
  1774. * screen.
  1775. *
  1776. * The Atomic Commit request fails if a invalid pointer is passed. If the
  1777. * Atomic Commit request fails for any other reason the out fence fd
  1778. * returned will be -1. On a Atomic Commit with the
  1779. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  1780. *
  1781. * Note that out-fences don't have a special interface to drivers and are
  1782. * internally represented by a &struct drm_pending_vblank_event in struct
  1783. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  1784. * helpers and for the DRM event handling for existing userspace.
  1785. */
  1786. struct drm_out_fence_state {
  1787. s32 __user *out_fence_ptr;
  1788. struct sync_file *sync_file;
  1789. int fd;
  1790. };
  1791. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  1792. struct dma_fence *fence)
  1793. {
  1794. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  1795. if (fence_state->fd < 0)
  1796. return fence_state->fd;
  1797. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  1798. return -EFAULT;
  1799. fence_state->sync_file = sync_file_create(fence);
  1800. if (!fence_state->sync_file)
  1801. return -ENOMEM;
  1802. return 0;
  1803. }
  1804. static int prepare_crtc_signaling(struct drm_device *dev,
  1805. struct drm_atomic_state *state,
  1806. struct drm_mode_atomic *arg,
  1807. struct drm_file *file_priv,
  1808. struct drm_out_fence_state **fence_state,
  1809. unsigned int *num_fences)
  1810. {
  1811. struct drm_crtc *crtc;
  1812. struct drm_crtc_state *crtc_state;
  1813. int i, c = 0, ret;
  1814. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  1815. return 0;
  1816. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1817. s32 __user *fence_ptr;
  1818. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  1819. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  1820. struct drm_pending_vblank_event *e;
  1821. e = create_vblank_event(dev, arg->user_data);
  1822. if (!e)
  1823. return -ENOMEM;
  1824. crtc_state->event = e;
  1825. }
  1826. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1827. struct drm_pending_vblank_event *e = crtc_state->event;
  1828. if (!file_priv)
  1829. continue;
  1830. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  1831. &e->event.base);
  1832. if (ret) {
  1833. kfree(e);
  1834. crtc_state->event = NULL;
  1835. return ret;
  1836. }
  1837. }
  1838. if (fence_ptr) {
  1839. struct dma_fence *fence;
  1840. struct drm_out_fence_state *f;
  1841. f = krealloc(*fence_state, sizeof(**fence_state) *
  1842. (*num_fences + 1), GFP_KERNEL);
  1843. if (!f)
  1844. return -ENOMEM;
  1845. memset(&f[*num_fences], 0, sizeof(*f));
  1846. f[*num_fences].out_fence_ptr = fence_ptr;
  1847. *fence_state = f;
  1848. fence = drm_crtc_create_fence(crtc);
  1849. if (!fence)
  1850. return -ENOMEM;
  1851. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1852. if (ret) {
  1853. dma_fence_put(fence);
  1854. return ret;
  1855. }
  1856. crtc_state->event->base.fence = fence;
  1857. }
  1858. c++;
  1859. }
  1860. /*
  1861. * Having this flag means user mode pends on event which will never
  1862. * reach due to lack of at least one CRTC for signaling
  1863. */
  1864. if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1865. return -EINVAL;
  1866. return 0;
  1867. }
  1868. static void complete_crtc_signaling(struct drm_device *dev,
  1869. struct drm_atomic_state *state,
  1870. struct drm_out_fence_state *fence_state,
  1871. unsigned int num_fences,
  1872. bool install_fds)
  1873. {
  1874. struct drm_crtc *crtc;
  1875. struct drm_crtc_state *crtc_state;
  1876. int i;
  1877. if (install_fds) {
  1878. for (i = 0; i < num_fences; i++)
  1879. fd_install(fence_state[i].fd,
  1880. fence_state[i].sync_file->file);
  1881. kfree(fence_state);
  1882. return;
  1883. }
  1884. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1885. struct drm_pending_vblank_event *event = crtc_state->event;
  1886. /*
  1887. * Free the allocated event. drm_atomic_helper_setup_commit
  1888. * can allocate an event too, so only free it if it's ours
  1889. * to prevent a double free in drm_atomic_state_clear.
  1890. */
  1891. if (event && (event->base.fence || event->base.file_priv)) {
  1892. drm_event_cancel_free(dev, &event->base);
  1893. crtc_state->event = NULL;
  1894. }
  1895. }
  1896. if (!fence_state)
  1897. return;
  1898. for (i = 0; i < num_fences; i++) {
  1899. if (fence_state[i].sync_file)
  1900. fput(fence_state[i].sync_file->file);
  1901. if (fence_state[i].fd >= 0)
  1902. put_unused_fd(fence_state[i].fd);
  1903. /* If this fails log error to the user */
  1904. if (fence_state[i].out_fence_ptr &&
  1905. put_user(-1, fence_state[i].out_fence_ptr))
  1906. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  1907. }
  1908. kfree(fence_state);
  1909. }
  1910. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1911. void *data, struct drm_file *file_priv)
  1912. {
  1913. struct drm_mode_atomic *arg = data;
  1914. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1915. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1916. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1917. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1918. unsigned int copied_objs, copied_props;
  1919. struct drm_atomic_state *state;
  1920. struct drm_modeset_acquire_ctx ctx;
  1921. struct drm_plane *plane;
  1922. struct drm_out_fence_state *fence_state;
  1923. unsigned plane_mask;
  1924. int ret = 0;
  1925. unsigned int i, j, num_fences;
  1926. /* disallow for drivers not supporting atomic: */
  1927. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1928. return -EINVAL;
  1929. /* disallow for userspace that has not enabled atomic cap (even
  1930. * though this may be a bit overkill, since legacy userspace
  1931. * wouldn't know how to call this ioctl)
  1932. */
  1933. if (!file_priv->atomic)
  1934. return -EINVAL;
  1935. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  1936. return -EINVAL;
  1937. if (arg->reserved)
  1938. return -EINVAL;
  1939. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  1940. !dev->mode_config.async_page_flip)
  1941. return -EINVAL;
  1942. /* can't test and expect an event at the same time. */
  1943. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1944. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1945. return -EINVAL;
  1946. drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
  1947. state = drm_atomic_state_alloc(dev);
  1948. if (!state)
  1949. return -ENOMEM;
  1950. state->acquire_ctx = &ctx;
  1951. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1952. retry:
  1953. plane_mask = 0;
  1954. copied_objs = 0;
  1955. copied_props = 0;
  1956. fence_state = NULL;
  1957. num_fences = 0;
  1958. for (i = 0; i < arg->count_objs; i++) {
  1959. uint32_t obj_id, count_props;
  1960. struct drm_mode_object *obj;
  1961. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1962. ret = -EFAULT;
  1963. goto out;
  1964. }
  1965. obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
  1966. if (!obj) {
  1967. ret = -ENOENT;
  1968. goto out;
  1969. }
  1970. if (!obj->properties) {
  1971. drm_mode_object_put(obj);
  1972. ret = -ENOENT;
  1973. goto out;
  1974. }
  1975. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1976. drm_mode_object_put(obj);
  1977. ret = -EFAULT;
  1978. goto out;
  1979. }
  1980. copied_objs++;
  1981. for (j = 0; j < count_props; j++) {
  1982. uint32_t prop_id;
  1983. uint64_t prop_value;
  1984. struct drm_property *prop;
  1985. if (get_user(prop_id, props_ptr + copied_props)) {
  1986. drm_mode_object_put(obj);
  1987. ret = -EFAULT;
  1988. goto out;
  1989. }
  1990. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1991. if (!prop) {
  1992. drm_mode_object_put(obj);
  1993. ret = -ENOENT;
  1994. goto out;
  1995. }
  1996. if (copy_from_user(&prop_value,
  1997. prop_values_ptr + copied_props,
  1998. sizeof(prop_value))) {
  1999. drm_mode_object_put(obj);
  2000. ret = -EFAULT;
  2001. goto out;
  2002. }
  2003. ret = drm_atomic_set_property(state, obj, prop,
  2004. prop_value);
  2005. if (ret) {
  2006. drm_mode_object_put(obj);
  2007. goto out;
  2008. }
  2009. copied_props++;
  2010. }
  2011. if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
  2012. !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
  2013. plane = obj_to_plane(obj);
  2014. plane_mask |= (1 << drm_plane_index(plane));
  2015. plane->old_fb = plane->fb;
  2016. }
  2017. drm_mode_object_put(obj);
  2018. }
  2019. ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
  2020. &num_fences);
  2021. if (ret)
  2022. goto out;
  2023. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  2024. ret = drm_atomic_check_only(state);
  2025. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  2026. ret = drm_atomic_nonblocking_commit(state);
  2027. } else {
  2028. if (unlikely(drm_debug & DRM_UT_STATE))
  2029. drm_atomic_print_state(state);
  2030. ret = drm_atomic_commit(state);
  2031. }
  2032. out:
  2033. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  2034. complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
  2035. if (ret == -EDEADLK) {
  2036. drm_atomic_state_clear(state);
  2037. ret = drm_modeset_backoff(&ctx);
  2038. if (!ret)
  2039. goto retry;
  2040. }
  2041. drm_atomic_state_put(state);
  2042. drm_modeset_drop_locks(&ctx);
  2043. drm_modeset_acquire_fini(&ctx);
  2044. return ret;
  2045. }