drm_atomic.c 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_print.h>
  31. #include <drm/drm_writeback.h>
  32. #include <linux/sync_file.h>
  33. #include "drm_crtc_internal.h"
  34. #include "drm_internal.h"
  35. void __drm_crtc_commit_free(struct kref *kref)
  36. {
  37. struct drm_crtc_commit *commit =
  38. container_of(kref, struct drm_crtc_commit, ref);
  39. kfree(commit);
  40. }
  41. EXPORT_SYMBOL(__drm_crtc_commit_free);
  42. /**
  43. * drm_atomic_state_default_release -
  44. * release memory initialized by drm_atomic_state_init
  45. * @state: atomic state
  46. *
  47. * Free all the memory allocated by drm_atomic_state_init.
  48. * This should only be used by drivers which are still subclassing
  49. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  50. */
  51. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  52. {
  53. kfree(state->connectors);
  54. kfree(state->crtcs);
  55. kfree(state->planes);
  56. kfree(state->private_objs);
  57. }
  58. EXPORT_SYMBOL(drm_atomic_state_default_release);
  59. /**
  60. * drm_atomic_state_init - init new atomic state
  61. * @dev: DRM device
  62. * @state: atomic state
  63. *
  64. * Default implementation for filling in a new atomic state.
  65. * This should only be used by drivers which are still subclassing
  66. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  67. */
  68. int
  69. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  70. {
  71. kref_init(&state->ref);
  72. /* TODO legacy paths should maybe do a better job about
  73. * setting this appropriately?
  74. */
  75. state->allow_modeset = true;
  76. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  77. sizeof(*state->crtcs), GFP_KERNEL);
  78. if (!state->crtcs)
  79. goto fail;
  80. state->planes = kcalloc(dev->mode_config.num_total_plane,
  81. sizeof(*state->planes), GFP_KERNEL);
  82. if (!state->planes)
  83. goto fail;
  84. state->dev = dev;
  85. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  86. return 0;
  87. fail:
  88. drm_atomic_state_default_release(state);
  89. return -ENOMEM;
  90. }
  91. EXPORT_SYMBOL(drm_atomic_state_init);
  92. /**
  93. * drm_atomic_state_alloc - allocate atomic state
  94. * @dev: DRM device
  95. *
  96. * This allocates an empty atomic state to track updates.
  97. */
  98. struct drm_atomic_state *
  99. drm_atomic_state_alloc(struct drm_device *dev)
  100. {
  101. struct drm_mode_config *config = &dev->mode_config;
  102. if (!config->funcs->atomic_state_alloc) {
  103. struct drm_atomic_state *state;
  104. state = kzalloc(sizeof(*state), GFP_KERNEL);
  105. if (!state)
  106. return NULL;
  107. if (drm_atomic_state_init(dev, state) < 0) {
  108. kfree(state);
  109. return NULL;
  110. }
  111. return state;
  112. }
  113. return config->funcs->atomic_state_alloc(dev);
  114. }
  115. EXPORT_SYMBOL(drm_atomic_state_alloc);
  116. /**
  117. * drm_atomic_state_default_clear - clear base atomic state
  118. * @state: atomic state
  119. *
  120. * Default implementation for clearing atomic state.
  121. * This should only be used by drivers which are still subclassing
  122. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  123. */
  124. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  125. {
  126. struct drm_device *dev = state->dev;
  127. struct drm_mode_config *config = &dev->mode_config;
  128. int i;
  129. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  130. for (i = 0; i < state->num_connector; i++) {
  131. struct drm_connector *connector = state->connectors[i].ptr;
  132. if (!connector)
  133. continue;
  134. connector->funcs->atomic_destroy_state(connector,
  135. state->connectors[i].state);
  136. state->connectors[i].ptr = NULL;
  137. state->connectors[i].state = NULL;
  138. state->connectors[i].old_state = NULL;
  139. state->connectors[i].new_state = NULL;
  140. drm_connector_put(connector);
  141. }
  142. for (i = 0; i < config->num_crtc; i++) {
  143. struct drm_crtc *crtc = state->crtcs[i].ptr;
  144. if (!crtc)
  145. continue;
  146. crtc->funcs->atomic_destroy_state(crtc,
  147. state->crtcs[i].state);
  148. state->crtcs[i].ptr = NULL;
  149. state->crtcs[i].state = NULL;
  150. state->crtcs[i].old_state = NULL;
  151. state->crtcs[i].new_state = NULL;
  152. }
  153. for (i = 0; i < config->num_total_plane; i++) {
  154. struct drm_plane *plane = state->planes[i].ptr;
  155. if (!plane)
  156. continue;
  157. plane->funcs->atomic_destroy_state(plane,
  158. state->planes[i].state);
  159. state->planes[i].ptr = NULL;
  160. state->planes[i].state = NULL;
  161. state->planes[i].old_state = NULL;
  162. state->planes[i].new_state = NULL;
  163. }
  164. for (i = 0; i < state->num_private_objs; i++) {
  165. struct drm_private_obj *obj = state->private_objs[i].ptr;
  166. obj->funcs->atomic_destroy_state(obj,
  167. state->private_objs[i].state);
  168. state->private_objs[i].ptr = NULL;
  169. state->private_objs[i].state = NULL;
  170. state->private_objs[i].old_state = NULL;
  171. state->private_objs[i].new_state = NULL;
  172. }
  173. state->num_private_objs = 0;
  174. if (state->fake_commit) {
  175. drm_crtc_commit_put(state->fake_commit);
  176. state->fake_commit = NULL;
  177. }
  178. }
  179. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  180. /**
  181. * drm_atomic_state_clear - clear state object
  182. * @state: atomic state
  183. *
  184. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  185. * all locks. So someone else could sneak in and change the current modeset
  186. * configuration. Which means that all the state assembled in @state is no
  187. * longer an atomic update to the current state, but to some arbitrary earlier
  188. * state. Which could break assumptions the driver's
  189. * &drm_mode_config_funcs.atomic_check likely relies on.
  190. *
  191. * Hence we must clear all cached state and completely start over, using this
  192. * function.
  193. */
  194. void drm_atomic_state_clear(struct drm_atomic_state *state)
  195. {
  196. struct drm_device *dev = state->dev;
  197. struct drm_mode_config *config = &dev->mode_config;
  198. if (config->funcs->atomic_state_clear)
  199. config->funcs->atomic_state_clear(state);
  200. else
  201. drm_atomic_state_default_clear(state);
  202. }
  203. EXPORT_SYMBOL(drm_atomic_state_clear);
  204. /**
  205. * __drm_atomic_state_free - free all memory for an atomic state
  206. * @ref: This atomic state to deallocate
  207. *
  208. * This frees all memory associated with an atomic state, including all the
  209. * per-object state for planes, crtcs and connectors.
  210. */
  211. void __drm_atomic_state_free(struct kref *ref)
  212. {
  213. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  214. struct drm_mode_config *config = &state->dev->mode_config;
  215. drm_atomic_state_clear(state);
  216. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  217. if (config->funcs->atomic_state_free) {
  218. config->funcs->atomic_state_free(state);
  219. } else {
  220. drm_atomic_state_default_release(state);
  221. kfree(state);
  222. }
  223. }
  224. EXPORT_SYMBOL(__drm_atomic_state_free);
  225. /**
  226. * drm_atomic_get_crtc_state - get crtc state
  227. * @state: global atomic state object
  228. * @crtc: crtc to get state object for
  229. *
  230. * This function returns the crtc state for the given crtc, allocating it if
  231. * needed. It will also grab the relevant crtc lock to make sure that the state
  232. * is consistent.
  233. *
  234. * Returns:
  235. *
  236. * Either the allocated state or the error code encoded into the pointer. When
  237. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  238. * entire atomic sequence must be restarted. All other errors are fatal.
  239. */
  240. struct drm_crtc_state *
  241. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  242. struct drm_crtc *crtc)
  243. {
  244. int ret, index = drm_crtc_index(crtc);
  245. struct drm_crtc_state *crtc_state;
  246. WARN_ON(!state->acquire_ctx);
  247. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  248. if (crtc_state)
  249. return crtc_state;
  250. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  251. if (ret)
  252. return ERR_PTR(ret);
  253. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  254. if (!crtc_state)
  255. return ERR_PTR(-ENOMEM);
  256. state->crtcs[index].state = crtc_state;
  257. state->crtcs[index].old_state = crtc->state;
  258. state->crtcs[index].new_state = crtc_state;
  259. state->crtcs[index].ptr = crtc;
  260. crtc_state->state = state;
  261. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  262. crtc->base.id, crtc->name, crtc_state, state);
  263. return crtc_state;
  264. }
  265. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  266. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  267. struct drm_crtc *crtc, s32 __user *fence_ptr)
  268. {
  269. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  270. }
  271. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  272. struct drm_crtc *crtc)
  273. {
  274. s32 __user *fence_ptr;
  275. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  276. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  277. return fence_ptr;
  278. }
  279. static int set_out_fence_for_connector(struct drm_atomic_state *state,
  280. struct drm_connector *connector,
  281. s32 __user *fence_ptr)
  282. {
  283. unsigned int index = drm_connector_index(connector);
  284. if (!fence_ptr)
  285. return 0;
  286. if (put_user(-1, fence_ptr))
  287. return -EFAULT;
  288. state->connectors[index].out_fence_ptr = fence_ptr;
  289. return 0;
  290. }
  291. static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
  292. struct drm_connector *connector)
  293. {
  294. unsigned int index = drm_connector_index(connector);
  295. s32 __user *fence_ptr;
  296. fence_ptr = state->connectors[index].out_fence_ptr;
  297. state->connectors[index].out_fence_ptr = NULL;
  298. return fence_ptr;
  299. }
  300. /**
  301. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  302. * @state: the CRTC whose incoming state to update
  303. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  304. *
  305. * Set a mode (originating from the kernel) on the desired CRTC state and update
  306. * the enable property.
  307. *
  308. * RETURNS:
  309. * Zero on success, error code on failure. Cannot return -EDEADLK.
  310. */
  311. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  312. const struct drm_display_mode *mode)
  313. {
  314. struct drm_crtc *crtc = state->crtc;
  315. struct drm_mode_modeinfo umode;
  316. /* Early return for no change. */
  317. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  318. return 0;
  319. drm_property_blob_put(state->mode_blob);
  320. state->mode_blob = NULL;
  321. if (mode) {
  322. drm_mode_convert_to_umode(&umode, mode);
  323. state->mode_blob =
  324. drm_property_create_blob(state->crtc->dev,
  325. sizeof(umode),
  326. &umode);
  327. if (IS_ERR(state->mode_blob))
  328. return PTR_ERR(state->mode_blob);
  329. drm_mode_copy(&state->mode, mode);
  330. state->enable = true;
  331. DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  332. mode->name, crtc->base.id, crtc->name, state);
  333. } else {
  334. memset(&state->mode, 0, sizeof(state->mode));
  335. state->enable = false;
  336. DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  337. crtc->base.id, crtc->name, state);
  338. }
  339. return 0;
  340. }
  341. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  342. /**
  343. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  344. * @state: the CRTC whose incoming state to update
  345. * @blob: pointer to blob property to use for mode
  346. *
  347. * Set a mode (originating from a blob property) on the desired CRTC state.
  348. * This function will take a reference on the blob property for the CRTC state,
  349. * and release the reference held on the state's existing mode property, if any
  350. * was set.
  351. *
  352. * RETURNS:
  353. * Zero on success, error code on failure. Cannot return -EDEADLK.
  354. */
  355. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  356. struct drm_property_blob *blob)
  357. {
  358. struct drm_crtc *crtc = state->crtc;
  359. if (blob == state->mode_blob)
  360. return 0;
  361. drm_property_blob_put(state->mode_blob);
  362. state->mode_blob = NULL;
  363. memset(&state->mode, 0, sizeof(state->mode));
  364. if (blob) {
  365. int ret;
  366. if (blob->length != sizeof(struct drm_mode_modeinfo)) {
  367. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
  368. crtc->base.id, crtc->name,
  369. blob->length);
  370. return -EINVAL;
  371. }
  372. ret = drm_mode_convert_umode(crtc->dev,
  373. &state->mode, blob->data);
  374. if (ret) {
  375. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
  376. crtc->base.id, crtc->name,
  377. ret, drm_get_mode_status_name(state->mode.status));
  378. drm_mode_debug_printmodeline(&state->mode);
  379. return -EINVAL;
  380. }
  381. state->mode_blob = drm_property_blob_get(blob);
  382. state->enable = true;
  383. DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  384. state->mode.name, crtc->base.id, crtc->name,
  385. state);
  386. } else {
  387. state->enable = false;
  388. DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  389. crtc->base.id, crtc->name, state);
  390. }
  391. return 0;
  392. }
  393. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  394. /**
  395. * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it
  396. * @dev: DRM device
  397. * @blob: a pointer to the member blob to be replaced
  398. * @blob_id: ID of the new blob
  399. * @expected_size: total expected size of the blob data (in bytes)
  400. * @expected_elem_size: expected element size of the blob data (in bytes)
  401. * @replaced: did the blob get replaced?
  402. *
  403. * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero
  404. * @blob becomes NULL.
  405. *
  406. * If @expected_size is positive the new blob length is expected to be equal
  407. * to @expected_size bytes. If @expected_elem_size is positive the new blob
  408. * length is expected to be a multiple of @expected_elem_size bytes. Otherwise
  409. * an error is returned.
  410. *
  411. * @replaced will indicate to the caller whether the blob was replaced or not.
  412. * If the old and new blobs were in fact the same blob @replaced will be false
  413. * otherwise it will be true.
  414. *
  415. * RETURNS:
  416. * Zero on success, error code on failure.
  417. */
  418. static int
  419. drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
  420. struct drm_property_blob **blob,
  421. uint64_t blob_id,
  422. ssize_t expected_size,
  423. ssize_t expected_elem_size,
  424. bool *replaced)
  425. {
  426. struct drm_property_blob *new_blob = NULL;
  427. if (blob_id != 0) {
  428. new_blob = drm_property_lookup_blob(dev, blob_id);
  429. if (new_blob == NULL)
  430. return -EINVAL;
  431. if (expected_size > 0 &&
  432. new_blob->length != expected_size) {
  433. drm_property_blob_put(new_blob);
  434. return -EINVAL;
  435. }
  436. if (expected_elem_size > 0 &&
  437. new_blob->length % expected_elem_size != 0) {
  438. drm_property_blob_put(new_blob);
  439. return -EINVAL;
  440. }
  441. }
  442. *replaced |= drm_property_replace_blob(blob, new_blob);
  443. drm_property_blob_put(new_blob);
  444. return 0;
  445. }
  446. /**
  447. * drm_atomic_crtc_set_property - set property on CRTC
  448. * @crtc: the drm CRTC to set a property on
  449. * @state: the state object to update with the new property value
  450. * @property: the property to set
  451. * @val: the new property value
  452. *
  453. * This function handles generic/core properties and calls out to driver's
  454. * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
  455. * consistent behavior you must call this function rather than the driver hook
  456. * directly.
  457. *
  458. * RETURNS:
  459. * Zero on success, error code on failure
  460. */
  461. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  462. struct drm_crtc_state *state, struct drm_property *property,
  463. uint64_t val)
  464. {
  465. struct drm_device *dev = crtc->dev;
  466. struct drm_mode_config *config = &dev->mode_config;
  467. bool replaced = false;
  468. int ret;
  469. if (property == config->prop_active)
  470. state->active = val;
  471. else if (property == config->prop_mode_id) {
  472. struct drm_property_blob *mode =
  473. drm_property_lookup_blob(dev, val);
  474. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  475. drm_property_blob_put(mode);
  476. return ret;
  477. } else if (property == config->degamma_lut_property) {
  478. ret = drm_atomic_replace_property_blob_from_id(dev,
  479. &state->degamma_lut,
  480. val,
  481. -1, sizeof(struct drm_color_lut),
  482. &replaced);
  483. state->color_mgmt_changed |= replaced;
  484. return ret;
  485. } else if (property == config->ctm_property) {
  486. ret = drm_atomic_replace_property_blob_from_id(dev,
  487. &state->ctm,
  488. val,
  489. sizeof(struct drm_color_ctm), -1,
  490. &replaced);
  491. state->color_mgmt_changed |= replaced;
  492. return ret;
  493. } else if (property == config->gamma_lut_property) {
  494. ret = drm_atomic_replace_property_blob_from_id(dev,
  495. &state->gamma_lut,
  496. val,
  497. -1, sizeof(struct drm_color_lut),
  498. &replaced);
  499. state->color_mgmt_changed |= replaced;
  500. return ret;
  501. } else if (property == config->prop_out_fence_ptr) {
  502. s32 __user *fence_ptr = u64_to_user_ptr(val);
  503. if (!fence_ptr)
  504. return 0;
  505. if (put_user(-1, fence_ptr))
  506. return -EFAULT;
  507. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  508. } else if (crtc->funcs->atomic_set_property) {
  509. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  510. } else {
  511. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
  512. crtc->base.id, crtc->name,
  513. property->base.id, property->name);
  514. return -EINVAL;
  515. }
  516. return 0;
  517. }
  518. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  519. /**
  520. * drm_atomic_crtc_get_property - get property value from CRTC state
  521. * @crtc: the drm CRTC to set a property on
  522. * @state: the state object to get the property value from
  523. * @property: the property to set
  524. * @val: return location for the property value
  525. *
  526. * This function handles generic/core properties and calls out to driver's
  527. * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
  528. * consistent behavior you must call this function rather than the driver hook
  529. * directly.
  530. *
  531. * RETURNS:
  532. * Zero on success, error code on failure
  533. */
  534. static int
  535. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  536. const struct drm_crtc_state *state,
  537. struct drm_property *property, uint64_t *val)
  538. {
  539. struct drm_device *dev = crtc->dev;
  540. struct drm_mode_config *config = &dev->mode_config;
  541. if (property == config->prop_active)
  542. *val = state->active;
  543. else if (property == config->prop_mode_id)
  544. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  545. else if (property == config->degamma_lut_property)
  546. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  547. else if (property == config->ctm_property)
  548. *val = (state->ctm) ? state->ctm->base.id : 0;
  549. else if (property == config->gamma_lut_property)
  550. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  551. else if (property == config->prop_out_fence_ptr)
  552. *val = 0;
  553. else if (crtc->funcs->atomic_get_property)
  554. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  555. else
  556. return -EINVAL;
  557. return 0;
  558. }
  559. /**
  560. * drm_atomic_crtc_check - check crtc state
  561. * @crtc: crtc to check
  562. * @state: crtc state to check
  563. *
  564. * Provides core sanity checks for crtc state.
  565. *
  566. * RETURNS:
  567. * Zero on success, error code on failure
  568. */
  569. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  570. struct drm_crtc_state *state)
  571. {
  572. /* NOTE: we explicitly don't enforce constraints such as primary
  573. * layer covering entire screen, since that is something we want
  574. * to allow (on hw that supports it). For hw that does not, it
  575. * should be checked in driver's crtc->atomic_check() vfunc.
  576. *
  577. * TODO: Add generic modeset state checks once we support those.
  578. */
  579. if (state->active && !state->enable) {
  580. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  581. crtc->base.id, crtc->name);
  582. return -EINVAL;
  583. }
  584. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  585. * as this is a kernel-internal detail that userspace should never
  586. * be able to trigger. */
  587. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  588. WARN_ON(state->enable && !state->mode_blob)) {
  589. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  590. crtc->base.id, crtc->name);
  591. return -EINVAL;
  592. }
  593. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  594. WARN_ON(!state->enable && state->mode_blob)) {
  595. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  596. crtc->base.id, crtc->name);
  597. return -EINVAL;
  598. }
  599. /*
  600. * Reject event generation for when a CRTC is off and stays off.
  601. * It wouldn't be hard to implement this, but userspace has a track
  602. * record of happily burning through 100% cpu (or worse, crash) when the
  603. * display pipe is suspended. To avoid all that fun just reject updates
  604. * that ask for events since likely that indicates a bug in the
  605. * compositor's drawing loop. This is consistent with the vblank IOCTL
  606. * and legacy page_flip IOCTL which also reject service on a disabled
  607. * pipe.
  608. */
  609. if (state->event && !state->active && !crtc->state->active) {
  610. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
  611. crtc->base.id, crtc->name);
  612. return -EINVAL;
  613. }
  614. return 0;
  615. }
  616. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  617. const struct drm_crtc_state *state)
  618. {
  619. struct drm_crtc *crtc = state->crtc;
  620. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  621. drm_printf(p, "\tenable=%d\n", state->enable);
  622. drm_printf(p, "\tactive=%d\n", state->active);
  623. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  624. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  625. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  626. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  627. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  628. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  629. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  630. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  631. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  632. if (crtc->funcs->atomic_print_state)
  633. crtc->funcs->atomic_print_state(p, state);
  634. }
  635. /**
  636. * drm_atomic_connector_check - check connector state
  637. * @connector: connector to check
  638. * @state: connector state to check
  639. *
  640. * Provides core sanity checks for connector state.
  641. *
  642. * RETURNS:
  643. * Zero on success, error code on failure
  644. */
  645. static int drm_atomic_connector_check(struct drm_connector *connector,
  646. struct drm_connector_state *state)
  647. {
  648. struct drm_crtc_state *crtc_state;
  649. struct drm_writeback_job *writeback_job = state->writeback_job;
  650. if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
  651. return 0;
  652. if (writeback_job->fb && !state->crtc) {
  653. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
  654. connector->base.id, connector->name);
  655. return -EINVAL;
  656. }
  657. if (state->crtc)
  658. crtc_state = drm_atomic_get_existing_crtc_state(state->state,
  659. state->crtc);
  660. if (writeback_job->fb && !crtc_state->active) {
  661. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
  662. connector->base.id, connector->name,
  663. state->crtc->base.id);
  664. return -EINVAL;
  665. }
  666. if (writeback_job->out_fence && !writeback_job->fb) {
  667. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
  668. connector->base.id, connector->name);
  669. return -EINVAL;
  670. }
  671. return 0;
  672. }
  673. /**
  674. * drm_atomic_get_plane_state - get plane state
  675. * @state: global atomic state object
  676. * @plane: plane to get state object for
  677. *
  678. * This function returns the plane state for the given plane, allocating it if
  679. * needed. It will also grab the relevant plane lock to make sure that the state
  680. * is consistent.
  681. *
  682. * Returns:
  683. *
  684. * Either the allocated state or the error code encoded into the pointer. When
  685. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  686. * entire atomic sequence must be restarted. All other errors are fatal.
  687. */
  688. struct drm_plane_state *
  689. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  690. struct drm_plane *plane)
  691. {
  692. int ret, index = drm_plane_index(plane);
  693. struct drm_plane_state *plane_state;
  694. WARN_ON(!state->acquire_ctx);
  695. /* the legacy pointers should never be set */
  696. WARN_ON(plane->fb);
  697. WARN_ON(plane->old_fb);
  698. WARN_ON(plane->crtc);
  699. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  700. if (plane_state)
  701. return plane_state;
  702. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  703. if (ret)
  704. return ERR_PTR(ret);
  705. plane_state = plane->funcs->atomic_duplicate_state(plane);
  706. if (!plane_state)
  707. return ERR_PTR(-ENOMEM);
  708. state->planes[index].state = plane_state;
  709. state->planes[index].ptr = plane;
  710. state->planes[index].old_state = plane->state;
  711. state->planes[index].new_state = plane_state;
  712. plane_state->state = state;
  713. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  714. plane->base.id, plane->name, plane_state, state);
  715. if (plane_state->crtc) {
  716. struct drm_crtc_state *crtc_state;
  717. crtc_state = drm_atomic_get_crtc_state(state,
  718. plane_state->crtc);
  719. if (IS_ERR(crtc_state))
  720. return ERR_CAST(crtc_state);
  721. }
  722. return plane_state;
  723. }
  724. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  725. /**
  726. * drm_atomic_plane_set_property - set property on plane
  727. * @plane: the drm plane to set a property on
  728. * @state: the state object to update with the new property value
  729. * @property: the property to set
  730. * @val: the new property value
  731. *
  732. * This function handles generic/core properties and calls out to driver's
  733. * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
  734. * consistent behavior you must call this function rather than the driver hook
  735. * directly.
  736. *
  737. * RETURNS:
  738. * Zero on success, error code on failure
  739. */
  740. static int drm_atomic_plane_set_property(struct drm_plane *plane,
  741. struct drm_plane_state *state, struct drm_property *property,
  742. uint64_t val)
  743. {
  744. struct drm_device *dev = plane->dev;
  745. struct drm_mode_config *config = &dev->mode_config;
  746. if (property == config->prop_fb_id) {
  747. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  748. drm_atomic_set_fb_for_plane(state, fb);
  749. if (fb)
  750. drm_framebuffer_put(fb);
  751. } else if (property == config->prop_in_fence_fd) {
  752. if (state->fence)
  753. return -EINVAL;
  754. if (U642I64(val) == -1)
  755. return 0;
  756. state->fence = sync_file_get_fence(val);
  757. if (!state->fence)
  758. return -EINVAL;
  759. } else if (property == config->prop_crtc_id) {
  760. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  761. return drm_atomic_set_crtc_for_plane(state, crtc);
  762. } else if (property == config->prop_crtc_x) {
  763. state->crtc_x = U642I64(val);
  764. } else if (property == config->prop_crtc_y) {
  765. state->crtc_y = U642I64(val);
  766. } else if (property == config->prop_crtc_w) {
  767. state->crtc_w = val;
  768. } else if (property == config->prop_crtc_h) {
  769. state->crtc_h = val;
  770. } else if (property == config->prop_src_x) {
  771. state->src_x = val;
  772. } else if (property == config->prop_src_y) {
  773. state->src_y = val;
  774. } else if (property == config->prop_src_w) {
  775. state->src_w = val;
  776. } else if (property == config->prop_src_h) {
  777. state->src_h = val;
  778. } else if (property == plane->alpha_property) {
  779. state->alpha = val;
  780. } else if (property == plane->rotation_property) {
  781. if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
  782. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
  783. plane->base.id, plane->name, val);
  784. return -EINVAL;
  785. }
  786. state->rotation = val;
  787. } else if (property == plane->zpos_property) {
  788. state->zpos = val;
  789. } else if (property == plane->color_encoding_property) {
  790. state->color_encoding = val;
  791. } else if (property == plane->color_range_property) {
  792. state->color_range = val;
  793. } else if (plane->funcs->atomic_set_property) {
  794. return plane->funcs->atomic_set_property(plane, state,
  795. property, val);
  796. } else {
  797. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
  798. plane->base.id, plane->name,
  799. property->base.id, property->name);
  800. return -EINVAL;
  801. }
  802. return 0;
  803. }
  804. /**
  805. * drm_atomic_plane_get_property - get property value from plane state
  806. * @plane: the drm plane to set a property on
  807. * @state: the state object to get the property value from
  808. * @property: the property to set
  809. * @val: return location for the property value
  810. *
  811. * This function handles generic/core properties and calls out to driver's
  812. * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
  813. * consistent behavior you must call this function rather than the driver hook
  814. * directly.
  815. *
  816. * RETURNS:
  817. * Zero on success, error code on failure
  818. */
  819. static int
  820. drm_atomic_plane_get_property(struct drm_plane *plane,
  821. const struct drm_plane_state *state,
  822. struct drm_property *property, uint64_t *val)
  823. {
  824. struct drm_device *dev = plane->dev;
  825. struct drm_mode_config *config = &dev->mode_config;
  826. if (property == config->prop_fb_id) {
  827. *val = (state->fb) ? state->fb->base.id : 0;
  828. } else if (property == config->prop_in_fence_fd) {
  829. *val = -1;
  830. } else if (property == config->prop_crtc_id) {
  831. *val = (state->crtc) ? state->crtc->base.id : 0;
  832. } else if (property == config->prop_crtc_x) {
  833. *val = I642U64(state->crtc_x);
  834. } else if (property == config->prop_crtc_y) {
  835. *val = I642U64(state->crtc_y);
  836. } else if (property == config->prop_crtc_w) {
  837. *val = state->crtc_w;
  838. } else if (property == config->prop_crtc_h) {
  839. *val = state->crtc_h;
  840. } else if (property == config->prop_src_x) {
  841. *val = state->src_x;
  842. } else if (property == config->prop_src_y) {
  843. *val = state->src_y;
  844. } else if (property == config->prop_src_w) {
  845. *val = state->src_w;
  846. } else if (property == config->prop_src_h) {
  847. *val = state->src_h;
  848. } else if (property == plane->alpha_property) {
  849. *val = state->alpha;
  850. } else if (property == plane->rotation_property) {
  851. *val = state->rotation;
  852. } else if (property == plane->zpos_property) {
  853. *val = state->zpos;
  854. } else if (property == plane->color_encoding_property) {
  855. *val = state->color_encoding;
  856. } else if (property == plane->color_range_property) {
  857. *val = state->color_range;
  858. } else if (plane->funcs->atomic_get_property) {
  859. return plane->funcs->atomic_get_property(plane, state, property, val);
  860. } else {
  861. return -EINVAL;
  862. }
  863. return 0;
  864. }
  865. static bool
  866. plane_switching_crtc(struct drm_atomic_state *state,
  867. struct drm_plane *plane,
  868. struct drm_plane_state *plane_state)
  869. {
  870. if (!plane->state->crtc || !plane_state->crtc)
  871. return false;
  872. if (plane->state->crtc == plane_state->crtc)
  873. return false;
  874. /* This could be refined, but currently there's no helper or driver code
  875. * to implement direct switching of active planes nor userspace to take
  876. * advantage of more direct plane switching without the intermediate
  877. * full OFF state.
  878. */
  879. return true;
  880. }
  881. /**
  882. * drm_atomic_plane_check - check plane state
  883. * @plane: plane to check
  884. * @state: plane state to check
  885. *
  886. * Provides core sanity checks for plane state.
  887. *
  888. * RETURNS:
  889. * Zero on success, error code on failure
  890. */
  891. static int drm_atomic_plane_check(struct drm_plane *plane,
  892. struct drm_plane_state *state)
  893. {
  894. unsigned int fb_width, fb_height;
  895. int ret;
  896. /* either *both* CRTC and FB must be set, or neither */
  897. if (state->crtc && !state->fb) {
  898. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
  899. plane->base.id, plane->name);
  900. return -EINVAL;
  901. } else if (state->fb && !state->crtc) {
  902. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
  903. plane->base.id, plane->name);
  904. return -EINVAL;
  905. }
  906. /* if disabled, we don't care about the rest of the state: */
  907. if (!state->crtc)
  908. return 0;
  909. /* Check whether this plane is usable on this CRTC */
  910. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  911. DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
  912. state->crtc->base.id, state->crtc->name,
  913. plane->base.id, plane->name);
  914. return -EINVAL;
  915. }
  916. /* Check whether this plane supports the fb pixel format. */
  917. ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
  918. state->fb->modifier);
  919. if (ret) {
  920. struct drm_format_name_buf format_name;
  921. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
  922. plane->base.id, plane->name,
  923. drm_get_format_name(state->fb->format->format,
  924. &format_name),
  925. state->fb->modifier);
  926. return ret;
  927. }
  928. /* Give drivers some help against integer overflows */
  929. if (state->crtc_w > INT_MAX ||
  930. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  931. state->crtc_h > INT_MAX ||
  932. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  933. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
  934. plane->base.id, plane->name,
  935. state->crtc_w, state->crtc_h,
  936. state->crtc_x, state->crtc_y);
  937. return -ERANGE;
  938. }
  939. fb_width = state->fb->width << 16;
  940. fb_height = state->fb->height << 16;
  941. /* Make sure source coordinates are inside the fb. */
  942. if (state->src_w > fb_width ||
  943. state->src_x > fb_width - state->src_w ||
  944. state->src_h > fb_height ||
  945. state->src_y > fb_height - state->src_h) {
  946. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
  947. "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
  948. plane->base.id, plane->name,
  949. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  950. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  951. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  952. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
  953. state->fb->width, state->fb->height);
  954. return -ENOSPC;
  955. }
  956. if (plane_switching_crtc(state->state, plane, state)) {
  957. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  958. plane->base.id, plane->name);
  959. return -EINVAL;
  960. }
  961. return 0;
  962. }
  963. static void drm_atomic_plane_print_state(struct drm_printer *p,
  964. const struct drm_plane_state *state)
  965. {
  966. struct drm_plane *plane = state->plane;
  967. struct drm_rect src = drm_plane_state_src(state);
  968. struct drm_rect dest = drm_plane_state_dest(state);
  969. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  970. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  971. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  972. if (state->fb)
  973. drm_framebuffer_print_info(p, 2, state->fb);
  974. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  975. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  976. drm_printf(p, "\trotation=%x\n", state->rotation);
  977. drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
  978. drm_printf(p, "\tcolor-encoding=%s\n",
  979. drm_get_color_encoding_name(state->color_encoding));
  980. drm_printf(p, "\tcolor-range=%s\n",
  981. drm_get_color_range_name(state->color_range));
  982. if (plane->funcs->atomic_print_state)
  983. plane->funcs->atomic_print_state(p, state);
  984. }
  985. /**
  986. * DOC: handling driver private state
  987. *
  988. * Very often the DRM objects exposed to userspace in the atomic modeset api
  989. * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
  990. * underlying hardware. Especially for any kind of shared resources (e.g. shared
  991. * clocks, scaler units, bandwidth and fifo limits shared among a group of
  992. * planes or CRTCs, and so on) it makes sense to model these as independent
  993. * objects. Drivers then need to do similar state tracking and commit ordering for
  994. * such private (since not exposed to userpace) objects as the atomic core and
  995. * helpers already provide for connectors, planes and CRTCs.
  996. *
  997. * To make this easier on drivers the atomic core provides some support to track
  998. * driver private state objects using struct &drm_private_obj, with the
  999. * associated state struct &drm_private_state.
  1000. *
  1001. * Similar to userspace-exposed objects, private state structures can be
  1002. * acquired by calling drm_atomic_get_private_obj_state(). Since this function
  1003. * does not take care of locking, drivers should wrap it for each type of
  1004. * private state object they have with the required call to drm_modeset_lock()
  1005. * for the corresponding &drm_modeset_lock.
  1006. *
  1007. * All private state structures contained in a &drm_atomic_state update can be
  1008. * iterated using for_each_oldnew_private_obj_in_state(),
  1009. * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
  1010. * Drivers are recommended to wrap these for each type of driver private state
  1011. * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
  1012. * least if they want to iterate over all objects of a given type.
  1013. *
  1014. * An earlier way to handle driver private state was by subclassing struct
  1015. * &drm_atomic_state. But since that encourages non-standard ways to implement
  1016. * the check/commit split atomic requires (by using e.g. "check and rollback or
  1017. * commit instead" of "duplicate state, check, then either commit or release
  1018. * duplicated state) it is deprecated in favour of using &drm_private_state.
  1019. */
  1020. /**
  1021. * drm_atomic_private_obj_init - initialize private object
  1022. * @obj: private object
  1023. * @state: initial private object state
  1024. * @funcs: pointer to the struct of function pointers that identify the object
  1025. * type
  1026. *
  1027. * Initialize the private object, which can be embedded into any
  1028. * driver private object that needs its own atomic state.
  1029. */
  1030. void
  1031. drm_atomic_private_obj_init(struct drm_private_obj *obj,
  1032. struct drm_private_state *state,
  1033. const struct drm_private_state_funcs *funcs)
  1034. {
  1035. memset(obj, 0, sizeof(*obj));
  1036. obj->state = state;
  1037. obj->funcs = funcs;
  1038. }
  1039. EXPORT_SYMBOL(drm_atomic_private_obj_init);
  1040. /**
  1041. * drm_atomic_private_obj_fini - finalize private object
  1042. * @obj: private object
  1043. *
  1044. * Finalize the private object.
  1045. */
  1046. void
  1047. drm_atomic_private_obj_fini(struct drm_private_obj *obj)
  1048. {
  1049. obj->funcs->atomic_destroy_state(obj, obj->state);
  1050. }
  1051. EXPORT_SYMBOL(drm_atomic_private_obj_fini);
  1052. /**
  1053. * drm_atomic_get_private_obj_state - get private object state
  1054. * @state: global atomic state
  1055. * @obj: private object to get the state for
  1056. *
  1057. * This function returns the private object state for the given private object,
  1058. * allocating the state if needed. It does not grab any locks as the caller is
  1059. * expected to care of any required locking.
  1060. *
  1061. * RETURNS:
  1062. *
  1063. * Either the allocated state or the error code encoded into a pointer.
  1064. */
  1065. struct drm_private_state *
  1066. drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
  1067. struct drm_private_obj *obj)
  1068. {
  1069. int index, num_objs, i;
  1070. size_t size;
  1071. struct __drm_private_objs_state *arr;
  1072. struct drm_private_state *obj_state;
  1073. for (i = 0; i < state->num_private_objs; i++)
  1074. if (obj == state->private_objs[i].ptr)
  1075. return state->private_objs[i].state;
  1076. num_objs = state->num_private_objs + 1;
  1077. size = sizeof(*state->private_objs) * num_objs;
  1078. arr = krealloc(state->private_objs, size, GFP_KERNEL);
  1079. if (!arr)
  1080. return ERR_PTR(-ENOMEM);
  1081. state->private_objs = arr;
  1082. index = state->num_private_objs;
  1083. memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
  1084. obj_state = obj->funcs->atomic_duplicate_state(obj);
  1085. if (!obj_state)
  1086. return ERR_PTR(-ENOMEM);
  1087. state->private_objs[index].state = obj_state;
  1088. state->private_objs[index].old_state = obj->state;
  1089. state->private_objs[index].new_state = obj_state;
  1090. state->private_objs[index].ptr = obj;
  1091. obj_state->state = state;
  1092. state->num_private_objs = num_objs;
  1093. DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
  1094. obj, obj_state, state);
  1095. return obj_state;
  1096. }
  1097. EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
  1098. /**
  1099. * drm_atomic_get_connector_state - get connector state
  1100. * @state: global atomic state object
  1101. * @connector: connector to get state object for
  1102. *
  1103. * This function returns the connector state for the given connector,
  1104. * allocating it if needed. It will also grab the relevant connector lock to
  1105. * make sure that the state is consistent.
  1106. *
  1107. * Returns:
  1108. *
  1109. * Either the allocated state or the error code encoded into the pointer. When
  1110. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  1111. * entire atomic sequence must be restarted. All other errors are fatal.
  1112. */
  1113. struct drm_connector_state *
  1114. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  1115. struct drm_connector *connector)
  1116. {
  1117. int ret, index;
  1118. struct drm_mode_config *config = &connector->dev->mode_config;
  1119. struct drm_connector_state *connector_state;
  1120. WARN_ON(!state->acquire_ctx);
  1121. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1122. if (ret)
  1123. return ERR_PTR(ret);
  1124. index = drm_connector_index(connector);
  1125. if (index >= state->num_connector) {
  1126. struct __drm_connnectors_state *c;
  1127. int alloc = max(index + 1, config->num_connector);
  1128. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  1129. if (!c)
  1130. return ERR_PTR(-ENOMEM);
  1131. state->connectors = c;
  1132. memset(&state->connectors[state->num_connector], 0,
  1133. sizeof(*state->connectors) * (alloc - state->num_connector));
  1134. state->num_connector = alloc;
  1135. }
  1136. if (state->connectors[index].state)
  1137. return state->connectors[index].state;
  1138. connector_state = connector->funcs->atomic_duplicate_state(connector);
  1139. if (!connector_state)
  1140. return ERR_PTR(-ENOMEM);
  1141. drm_connector_get(connector);
  1142. state->connectors[index].state = connector_state;
  1143. state->connectors[index].old_state = connector->state;
  1144. state->connectors[index].new_state = connector_state;
  1145. state->connectors[index].ptr = connector;
  1146. connector_state->state = state;
  1147. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
  1148. connector->base.id, connector->name,
  1149. connector_state, state);
  1150. if (connector_state->crtc) {
  1151. struct drm_crtc_state *crtc_state;
  1152. crtc_state = drm_atomic_get_crtc_state(state,
  1153. connector_state->crtc);
  1154. if (IS_ERR(crtc_state))
  1155. return ERR_CAST(crtc_state);
  1156. }
  1157. return connector_state;
  1158. }
  1159. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  1160. /**
  1161. * drm_atomic_connector_set_property - set property on connector.
  1162. * @connector: the drm connector to set a property on
  1163. * @state: the state object to update with the new property value
  1164. * @property: the property to set
  1165. * @val: the new property value
  1166. *
  1167. * This function handles generic/core properties and calls out to driver's
  1168. * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
  1169. * consistent behavior you must call this function rather than the driver hook
  1170. * directly.
  1171. *
  1172. * RETURNS:
  1173. * Zero on success, error code on failure
  1174. */
  1175. static int drm_atomic_connector_set_property(struct drm_connector *connector,
  1176. struct drm_connector_state *state, struct drm_property *property,
  1177. uint64_t val)
  1178. {
  1179. struct drm_device *dev = connector->dev;
  1180. struct drm_mode_config *config = &dev->mode_config;
  1181. if (property == config->prop_crtc_id) {
  1182. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  1183. return drm_atomic_set_crtc_for_connector(state, crtc);
  1184. } else if (property == config->dpms_property) {
  1185. /* setting DPMS property requires special handling, which
  1186. * is done in legacy setprop path for us. Disallow (for
  1187. * now?) atomic writes to DPMS property:
  1188. */
  1189. return -EINVAL;
  1190. } else if (property == config->tv_select_subconnector_property) {
  1191. state->tv.subconnector = val;
  1192. } else if (property == config->tv_left_margin_property) {
  1193. state->tv.margins.left = val;
  1194. } else if (property == config->tv_right_margin_property) {
  1195. state->tv.margins.right = val;
  1196. } else if (property == config->tv_top_margin_property) {
  1197. state->tv.margins.top = val;
  1198. } else if (property == config->tv_bottom_margin_property) {
  1199. state->tv.margins.bottom = val;
  1200. } else if (property == config->tv_mode_property) {
  1201. state->tv.mode = val;
  1202. } else if (property == config->tv_brightness_property) {
  1203. state->tv.brightness = val;
  1204. } else if (property == config->tv_contrast_property) {
  1205. state->tv.contrast = val;
  1206. } else if (property == config->tv_flicker_reduction_property) {
  1207. state->tv.flicker_reduction = val;
  1208. } else if (property == config->tv_overscan_property) {
  1209. state->tv.overscan = val;
  1210. } else if (property == config->tv_saturation_property) {
  1211. state->tv.saturation = val;
  1212. } else if (property == config->tv_hue_property) {
  1213. state->tv.hue = val;
  1214. } else if (property == config->link_status_property) {
  1215. /* Never downgrade from GOOD to BAD on userspace's request here,
  1216. * only hw issues can do that.
  1217. *
  1218. * For an atomic property the userspace doesn't need to be able
  1219. * to understand all the properties, but needs to be able to
  1220. * restore the state it wants on VT switch. So if the userspace
  1221. * tries to change the link_status from GOOD to BAD, driver
  1222. * silently rejects it and returns a 0. This prevents userspace
  1223. * from accidently breaking the display when it restores the
  1224. * state.
  1225. */
  1226. if (state->link_status != DRM_LINK_STATUS_GOOD)
  1227. state->link_status = val;
  1228. } else if (property == config->aspect_ratio_property) {
  1229. state->picture_aspect_ratio = val;
  1230. } else if (property == config->content_type_property) {
  1231. state->content_type = val;
  1232. } else if (property == connector->scaling_mode_property) {
  1233. state->scaling_mode = val;
  1234. } else if (property == connector->content_protection_property) {
  1235. if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
  1236. DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
  1237. return -EINVAL;
  1238. }
  1239. state->content_protection = val;
  1240. } else if (property == config->writeback_fb_id_property) {
  1241. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  1242. int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
  1243. if (fb)
  1244. drm_framebuffer_put(fb);
  1245. return ret;
  1246. } else if (property == config->writeback_out_fence_ptr_property) {
  1247. s32 __user *fence_ptr = u64_to_user_ptr(val);
  1248. return set_out_fence_for_connector(state->state, connector,
  1249. fence_ptr);
  1250. } else if (connector->funcs->atomic_set_property) {
  1251. return connector->funcs->atomic_set_property(connector,
  1252. state, property, val);
  1253. } else {
  1254. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
  1255. connector->base.id, connector->name,
  1256. property->base.id, property->name);
  1257. return -EINVAL;
  1258. }
  1259. return 0;
  1260. }
  1261. static void drm_atomic_connector_print_state(struct drm_printer *p,
  1262. const struct drm_connector_state *state)
  1263. {
  1264. struct drm_connector *connector = state->connector;
  1265. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  1266. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  1267. if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
  1268. if (state->writeback_job && state->writeback_job->fb)
  1269. drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
  1270. if (connector->funcs->atomic_print_state)
  1271. connector->funcs->atomic_print_state(p, state);
  1272. }
  1273. /**
  1274. * drm_atomic_connector_get_property - get property value from connector state
  1275. * @connector: the drm connector to set a property on
  1276. * @state: the state object to get the property value from
  1277. * @property: the property to set
  1278. * @val: return location for the property value
  1279. *
  1280. * This function handles generic/core properties and calls out to driver's
  1281. * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
  1282. * consistent behavior you must call this function rather than the driver hook
  1283. * directly.
  1284. *
  1285. * RETURNS:
  1286. * Zero on success, error code on failure
  1287. */
  1288. static int
  1289. drm_atomic_connector_get_property(struct drm_connector *connector,
  1290. const struct drm_connector_state *state,
  1291. struct drm_property *property, uint64_t *val)
  1292. {
  1293. struct drm_device *dev = connector->dev;
  1294. struct drm_mode_config *config = &dev->mode_config;
  1295. if (property == config->prop_crtc_id) {
  1296. *val = (state->crtc) ? state->crtc->base.id : 0;
  1297. } else if (property == config->dpms_property) {
  1298. *val = connector->dpms;
  1299. } else if (property == config->tv_select_subconnector_property) {
  1300. *val = state->tv.subconnector;
  1301. } else if (property == config->tv_left_margin_property) {
  1302. *val = state->tv.margins.left;
  1303. } else if (property == config->tv_right_margin_property) {
  1304. *val = state->tv.margins.right;
  1305. } else if (property == config->tv_top_margin_property) {
  1306. *val = state->tv.margins.top;
  1307. } else if (property == config->tv_bottom_margin_property) {
  1308. *val = state->tv.margins.bottom;
  1309. } else if (property == config->tv_mode_property) {
  1310. *val = state->tv.mode;
  1311. } else if (property == config->tv_brightness_property) {
  1312. *val = state->tv.brightness;
  1313. } else if (property == config->tv_contrast_property) {
  1314. *val = state->tv.contrast;
  1315. } else if (property == config->tv_flicker_reduction_property) {
  1316. *val = state->tv.flicker_reduction;
  1317. } else if (property == config->tv_overscan_property) {
  1318. *val = state->tv.overscan;
  1319. } else if (property == config->tv_saturation_property) {
  1320. *val = state->tv.saturation;
  1321. } else if (property == config->tv_hue_property) {
  1322. *val = state->tv.hue;
  1323. } else if (property == config->link_status_property) {
  1324. *val = state->link_status;
  1325. } else if (property == config->aspect_ratio_property) {
  1326. *val = state->picture_aspect_ratio;
  1327. } else if (property == config->content_type_property) {
  1328. *val = state->content_type;
  1329. } else if (property == connector->scaling_mode_property) {
  1330. *val = state->scaling_mode;
  1331. } else if (property == connector->content_protection_property) {
  1332. *val = state->content_protection;
  1333. } else if (property == config->writeback_fb_id_property) {
  1334. /* Writeback framebuffer is one-shot, write and forget */
  1335. *val = 0;
  1336. } else if (property == config->writeback_out_fence_ptr_property) {
  1337. *val = 0;
  1338. } else if (connector->funcs->atomic_get_property) {
  1339. return connector->funcs->atomic_get_property(connector,
  1340. state, property, val);
  1341. } else {
  1342. return -EINVAL;
  1343. }
  1344. return 0;
  1345. }
  1346. int drm_atomic_get_property(struct drm_mode_object *obj,
  1347. struct drm_property *property, uint64_t *val)
  1348. {
  1349. struct drm_device *dev = property->dev;
  1350. int ret;
  1351. switch (obj->type) {
  1352. case DRM_MODE_OBJECT_CONNECTOR: {
  1353. struct drm_connector *connector = obj_to_connector(obj);
  1354. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  1355. ret = drm_atomic_connector_get_property(connector,
  1356. connector->state, property, val);
  1357. break;
  1358. }
  1359. case DRM_MODE_OBJECT_CRTC: {
  1360. struct drm_crtc *crtc = obj_to_crtc(obj);
  1361. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  1362. ret = drm_atomic_crtc_get_property(crtc,
  1363. crtc->state, property, val);
  1364. break;
  1365. }
  1366. case DRM_MODE_OBJECT_PLANE: {
  1367. struct drm_plane *plane = obj_to_plane(obj);
  1368. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  1369. ret = drm_atomic_plane_get_property(plane,
  1370. plane->state, property, val);
  1371. break;
  1372. }
  1373. default:
  1374. ret = -EINVAL;
  1375. break;
  1376. }
  1377. return ret;
  1378. }
  1379. /**
  1380. * drm_atomic_set_crtc_for_plane - set crtc for plane
  1381. * @plane_state: the plane whose incoming state to update
  1382. * @crtc: crtc to use for the plane
  1383. *
  1384. * Changing the assigned crtc for a plane requires us to grab the lock and state
  1385. * for the new crtc, as needed. This function takes care of all these details
  1386. * besides updating the pointer in the state object itself.
  1387. *
  1388. * Returns:
  1389. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1390. * then the w/w mutex code has detected a deadlock and the entire atomic
  1391. * sequence must be restarted. All other errors are fatal.
  1392. */
  1393. int
  1394. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  1395. struct drm_crtc *crtc)
  1396. {
  1397. struct drm_plane *plane = plane_state->plane;
  1398. struct drm_crtc_state *crtc_state;
  1399. /* Nothing to do for same crtc*/
  1400. if (plane_state->crtc == crtc)
  1401. return 0;
  1402. if (plane_state->crtc) {
  1403. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1404. plane_state->crtc);
  1405. if (WARN_ON(IS_ERR(crtc_state)))
  1406. return PTR_ERR(crtc_state);
  1407. crtc_state->plane_mask &= ~drm_plane_mask(plane);
  1408. }
  1409. plane_state->crtc = crtc;
  1410. if (crtc) {
  1411. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1412. crtc);
  1413. if (IS_ERR(crtc_state))
  1414. return PTR_ERR(crtc_state);
  1415. crtc_state->plane_mask |= drm_plane_mask(plane);
  1416. }
  1417. if (crtc)
  1418. DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
  1419. plane->base.id, plane->name, plane_state,
  1420. crtc->base.id, crtc->name);
  1421. else
  1422. DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
  1423. plane->base.id, plane->name, plane_state);
  1424. return 0;
  1425. }
  1426. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  1427. /**
  1428. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  1429. * @plane_state: atomic state object for the plane
  1430. * @fb: fb to use for the plane
  1431. *
  1432. * Changing the assigned framebuffer for a plane requires us to grab a reference
  1433. * to the new fb and drop the reference to the old fb, if there is one. This
  1434. * function takes care of all these details besides updating the pointer in the
  1435. * state object itself.
  1436. */
  1437. void
  1438. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1439. struct drm_framebuffer *fb)
  1440. {
  1441. struct drm_plane *plane = plane_state->plane;
  1442. if (fb)
  1443. DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
  1444. fb->base.id, plane->base.id, plane->name,
  1445. plane_state);
  1446. else
  1447. DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
  1448. plane->base.id, plane->name, plane_state);
  1449. drm_framebuffer_assign(&plane_state->fb, fb);
  1450. }
  1451. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1452. /**
  1453. * drm_atomic_set_fence_for_plane - set fence for plane
  1454. * @plane_state: atomic state object for the plane
  1455. * @fence: dma_fence to use for the plane
  1456. *
  1457. * Helper to setup the plane_state fence in case it is not set yet.
  1458. * By using this drivers doesn't need to worry if the user choose
  1459. * implicit or explicit fencing.
  1460. *
  1461. * This function will not set the fence to the state if it was set
  1462. * via explicit fencing interfaces on the atomic ioctl. In that case it will
  1463. * drop the reference to the fence as we are not storing it anywhere.
  1464. * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  1465. * with the received implicit fence. In both cases this function consumes a
  1466. * reference for @fence.
  1467. *
  1468. * This way explicit fencing can be used to overrule implicit fencing, which is
  1469. * important to make explicit fencing use-cases work: One example is using one
  1470. * buffer for 2 screens with different refresh rates. Implicit fencing will
  1471. * clamp rendering to the refresh rate of the slower screen, whereas explicit
  1472. * fence allows 2 independent render and display loops on a single buffer. If a
  1473. * driver allows obeys both implicit and explicit fences for plane updates, then
  1474. * it will break all the benefits of explicit fencing.
  1475. */
  1476. void
  1477. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  1478. struct dma_fence *fence)
  1479. {
  1480. if (plane_state->fence) {
  1481. dma_fence_put(fence);
  1482. return;
  1483. }
  1484. plane_state->fence = fence;
  1485. }
  1486. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  1487. /**
  1488. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1489. * @conn_state: atomic state object for the connector
  1490. * @crtc: crtc to use for the connector
  1491. *
  1492. * Changing the assigned crtc for a connector requires us to grab the lock and
  1493. * state for the new crtc, as needed. This function takes care of all these
  1494. * details besides updating the pointer in the state object itself.
  1495. *
  1496. * Returns:
  1497. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1498. * then the w/w mutex code has detected a deadlock and the entire atomic
  1499. * sequence must be restarted. All other errors are fatal.
  1500. */
  1501. int
  1502. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1503. struct drm_crtc *crtc)
  1504. {
  1505. struct drm_connector *connector = conn_state->connector;
  1506. struct drm_crtc_state *crtc_state;
  1507. if (conn_state->crtc == crtc)
  1508. return 0;
  1509. if (conn_state->crtc) {
  1510. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  1511. conn_state->crtc);
  1512. crtc_state->connector_mask &=
  1513. ~drm_connector_mask(conn_state->connector);
  1514. drm_connector_put(conn_state->connector);
  1515. conn_state->crtc = NULL;
  1516. }
  1517. if (crtc) {
  1518. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1519. if (IS_ERR(crtc_state))
  1520. return PTR_ERR(crtc_state);
  1521. crtc_state->connector_mask |=
  1522. drm_connector_mask(conn_state->connector);
  1523. drm_connector_get(conn_state->connector);
  1524. conn_state->crtc = crtc;
  1525. DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
  1526. connector->base.id, connector->name,
  1527. conn_state, crtc->base.id, crtc->name);
  1528. } else {
  1529. DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
  1530. connector->base.id, connector->name,
  1531. conn_state);
  1532. }
  1533. return 0;
  1534. }
  1535. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1536. /*
  1537. * drm_atomic_get_writeback_job - return or allocate a writeback job
  1538. * @conn_state: Connector state to get the job for
  1539. *
  1540. * Writeback jobs have a different lifetime to the atomic state they are
  1541. * associated with. This convenience function takes care of allocating a job
  1542. * if there isn't yet one associated with the connector state, otherwise
  1543. * it just returns the existing job.
  1544. *
  1545. * Returns: The writeback job for the given connector state
  1546. */
  1547. static struct drm_writeback_job *
  1548. drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
  1549. {
  1550. WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
  1551. if (!conn_state->writeback_job)
  1552. conn_state->writeback_job =
  1553. kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
  1554. return conn_state->writeback_job;
  1555. }
  1556. /**
  1557. * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
  1558. * @conn_state: atomic state object for the connector
  1559. * @fb: fb to use for the connector
  1560. *
  1561. * This is used to set the framebuffer for a writeback connector, which outputs
  1562. * to a buffer instead of an actual physical connector.
  1563. * Changing the assigned framebuffer requires us to grab a reference to the new
  1564. * fb and drop the reference to the old fb, if there is one. This function
  1565. * takes care of all these details besides updating the pointer in the
  1566. * state object itself.
  1567. *
  1568. * Note: The only way conn_state can already have an fb set is if the commit
  1569. * sets the property more than once.
  1570. *
  1571. * See also: drm_writeback_connector_init()
  1572. *
  1573. * Returns: 0 on success
  1574. */
  1575. int drm_atomic_set_writeback_fb_for_connector(
  1576. struct drm_connector_state *conn_state,
  1577. struct drm_framebuffer *fb)
  1578. {
  1579. struct drm_writeback_job *job =
  1580. drm_atomic_get_writeback_job(conn_state);
  1581. if (!job)
  1582. return -ENOMEM;
  1583. drm_framebuffer_assign(&job->fb, fb);
  1584. if (fb)
  1585. DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
  1586. fb->base.id, conn_state);
  1587. else
  1588. DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
  1589. conn_state);
  1590. return 0;
  1591. }
  1592. EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
  1593. /**
  1594. * drm_atomic_add_affected_connectors - add connectors for crtc
  1595. * @state: atomic state
  1596. * @crtc: DRM crtc
  1597. *
  1598. * This function walks the current configuration and adds all connectors
  1599. * currently using @crtc to the atomic configuration @state. Note that this
  1600. * function must acquire the connection mutex. This can potentially cause
  1601. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1602. * drivers and helpers should only call this when really needed (e.g. when a
  1603. * full modeset needs to happen due to some change).
  1604. *
  1605. * Returns:
  1606. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1607. * then the w/w mutex code has detected a deadlock and the entire atomic
  1608. * sequence must be restarted. All other errors are fatal.
  1609. */
  1610. int
  1611. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1612. struct drm_crtc *crtc)
  1613. {
  1614. struct drm_mode_config *config = &state->dev->mode_config;
  1615. struct drm_connector *connector;
  1616. struct drm_connector_state *conn_state;
  1617. struct drm_connector_list_iter conn_iter;
  1618. struct drm_crtc_state *crtc_state;
  1619. int ret;
  1620. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1621. if (IS_ERR(crtc_state))
  1622. return PTR_ERR(crtc_state);
  1623. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1624. if (ret)
  1625. return ret;
  1626. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1627. crtc->base.id, crtc->name, state);
  1628. /*
  1629. * Changed connectors are already in @state, so only need to look
  1630. * at the connector_mask in crtc_state.
  1631. */
  1632. drm_connector_list_iter_begin(state->dev, &conn_iter);
  1633. drm_for_each_connector_iter(connector, &conn_iter) {
  1634. if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
  1635. continue;
  1636. conn_state = drm_atomic_get_connector_state(state, connector);
  1637. if (IS_ERR(conn_state)) {
  1638. drm_connector_list_iter_end(&conn_iter);
  1639. return PTR_ERR(conn_state);
  1640. }
  1641. }
  1642. drm_connector_list_iter_end(&conn_iter);
  1643. return 0;
  1644. }
  1645. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1646. /**
  1647. * drm_atomic_add_affected_planes - add planes for crtc
  1648. * @state: atomic state
  1649. * @crtc: DRM crtc
  1650. *
  1651. * This function walks the current configuration and adds all planes
  1652. * currently used by @crtc to the atomic configuration @state. This is useful
  1653. * when an atomic commit also needs to check all currently enabled plane on
  1654. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1655. * to avoid special code to force-enable all planes.
  1656. *
  1657. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1658. * current CRTC for that plane (if there is any) adding all the plane states for
  1659. * a CRTC will not reduce parallism of atomic updates.
  1660. *
  1661. * Returns:
  1662. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1663. * then the w/w mutex code has detected a deadlock and the entire atomic
  1664. * sequence must be restarted. All other errors are fatal.
  1665. */
  1666. int
  1667. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1668. struct drm_crtc *crtc)
  1669. {
  1670. struct drm_plane *plane;
  1671. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  1672. DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
  1673. crtc->base.id, crtc->name, state);
  1674. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1675. struct drm_plane_state *plane_state =
  1676. drm_atomic_get_plane_state(state, plane);
  1677. if (IS_ERR(plane_state))
  1678. return PTR_ERR(plane_state);
  1679. }
  1680. return 0;
  1681. }
  1682. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1683. /**
  1684. * drm_atomic_check_only - check whether a given config would work
  1685. * @state: atomic configuration to check
  1686. *
  1687. * Note that this function can return -EDEADLK if the driver needed to acquire
  1688. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1689. * backoff dance and restart. All other errors are fatal.
  1690. *
  1691. * Returns:
  1692. * 0 on success, negative error code on failure.
  1693. */
  1694. int drm_atomic_check_only(struct drm_atomic_state *state)
  1695. {
  1696. struct drm_device *dev = state->dev;
  1697. struct drm_mode_config *config = &dev->mode_config;
  1698. struct drm_plane *plane;
  1699. struct drm_plane_state *plane_state;
  1700. struct drm_crtc *crtc;
  1701. struct drm_crtc_state *crtc_state;
  1702. struct drm_connector *conn;
  1703. struct drm_connector_state *conn_state;
  1704. int i, ret = 0;
  1705. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1706. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1707. ret = drm_atomic_plane_check(plane, plane_state);
  1708. if (ret) {
  1709. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1710. plane->base.id, plane->name);
  1711. return ret;
  1712. }
  1713. }
  1714. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1715. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1716. if (ret) {
  1717. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1718. crtc->base.id, crtc->name);
  1719. return ret;
  1720. }
  1721. }
  1722. for_each_new_connector_in_state(state, conn, conn_state, i) {
  1723. ret = drm_atomic_connector_check(conn, conn_state);
  1724. if (ret) {
  1725. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
  1726. conn->base.id, conn->name);
  1727. return ret;
  1728. }
  1729. }
  1730. if (config->funcs->atomic_check) {
  1731. ret = config->funcs->atomic_check(state->dev, state);
  1732. if (ret) {
  1733. DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
  1734. state, ret);
  1735. return ret;
  1736. }
  1737. }
  1738. if (!state->allow_modeset) {
  1739. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1740. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1741. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1742. crtc->base.id, crtc->name);
  1743. return -EINVAL;
  1744. }
  1745. }
  1746. }
  1747. return 0;
  1748. }
  1749. EXPORT_SYMBOL(drm_atomic_check_only);
  1750. /**
  1751. * drm_atomic_commit - commit configuration atomically
  1752. * @state: atomic configuration to check
  1753. *
  1754. * Note that this function can return -EDEADLK if the driver needed to acquire
  1755. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1756. * backoff dance and restart. All other errors are fatal.
  1757. *
  1758. * This function will take its own reference on @state.
  1759. * Callers should always release their reference with drm_atomic_state_put().
  1760. *
  1761. * Returns:
  1762. * 0 on success, negative error code on failure.
  1763. */
  1764. int drm_atomic_commit(struct drm_atomic_state *state)
  1765. {
  1766. struct drm_mode_config *config = &state->dev->mode_config;
  1767. int ret;
  1768. ret = drm_atomic_check_only(state);
  1769. if (ret)
  1770. return ret;
  1771. DRM_DEBUG_ATOMIC("committing %p\n", state);
  1772. return config->funcs->atomic_commit(state->dev, state, false);
  1773. }
  1774. EXPORT_SYMBOL(drm_atomic_commit);
  1775. /**
  1776. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  1777. * @state: atomic configuration to check
  1778. *
  1779. * Note that this function can return -EDEADLK if the driver needed to acquire
  1780. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1781. * backoff dance and restart. All other errors are fatal.
  1782. *
  1783. * This function will take its own reference on @state.
  1784. * Callers should always release their reference with drm_atomic_state_put().
  1785. *
  1786. * Returns:
  1787. * 0 on success, negative error code on failure.
  1788. */
  1789. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1790. {
  1791. struct drm_mode_config *config = &state->dev->mode_config;
  1792. int ret;
  1793. ret = drm_atomic_check_only(state);
  1794. if (ret)
  1795. return ret;
  1796. DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
  1797. return config->funcs->atomic_commit(state->dev, state, true);
  1798. }
  1799. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1800. static void drm_atomic_print_state(const struct drm_atomic_state *state)
  1801. {
  1802. struct drm_printer p = drm_info_printer(state->dev->dev);
  1803. struct drm_plane *plane;
  1804. struct drm_plane_state *plane_state;
  1805. struct drm_crtc *crtc;
  1806. struct drm_crtc_state *crtc_state;
  1807. struct drm_connector *connector;
  1808. struct drm_connector_state *connector_state;
  1809. int i;
  1810. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1811. for_each_new_plane_in_state(state, plane, plane_state, i)
  1812. drm_atomic_plane_print_state(&p, plane_state);
  1813. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  1814. drm_atomic_crtc_print_state(&p, crtc_state);
  1815. for_each_new_connector_in_state(state, connector, connector_state, i)
  1816. drm_atomic_connector_print_state(&p, connector_state);
  1817. }
  1818. static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  1819. bool take_locks)
  1820. {
  1821. struct drm_mode_config *config = &dev->mode_config;
  1822. struct drm_plane *plane;
  1823. struct drm_crtc *crtc;
  1824. struct drm_connector *connector;
  1825. struct drm_connector_list_iter conn_iter;
  1826. if (!drm_drv_uses_atomic_modeset(dev))
  1827. return;
  1828. list_for_each_entry(plane, &config->plane_list, head) {
  1829. if (take_locks)
  1830. drm_modeset_lock(&plane->mutex, NULL);
  1831. drm_atomic_plane_print_state(p, plane->state);
  1832. if (take_locks)
  1833. drm_modeset_unlock(&plane->mutex);
  1834. }
  1835. list_for_each_entry(crtc, &config->crtc_list, head) {
  1836. if (take_locks)
  1837. drm_modeset_lock(&crtc->mutex, NULL);
  1838. drm_atomic_crtc_print_state(p, crtc->state);
  1839. if (take_locks)
  1840. drm_modeset_unlock(&crtc->mutex);
  1841. }
  1842. drm_connector_list_iter_begin(dev, &conn_iter);
  1843. if (take_locks)
  1844. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  1845. drm_for_each_connector_iter(connector, &conn_iter)
  1846. drm_atomic_connector_print_state(p, connector->state);
  1847. if (take_locks)
  1848. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  1849. drm_connector_list_iter_end(&conn_iter);
  1850. }
  1851. /**
  1852. * drm_state_dump - dump entire device atomic state
  1853. * @dev: the drm device
  1854. * @p: where to print the state to
  1855. *
  1856. * Just for debugging. Drivers might want an option to dump state
  1857. * to dmesg in case of error irq's. (Hint, you probably want to
  1858. * ratelimit this!)
  1859. *
  1860. * The caller must drm_modeset_lock_all(), or if this is called
  1861. * from error irq handler, it should not be enabled by default.
  1862. * (Ie. if you are debugging errors you might not care that this
  1863. * is racey. But calling this without all modeset locks held is
  1864. * not inherently safe.)
  1865. */
  1866. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1867. {
  1868. __drm_state_dump(dev, p, false);
  1869. }
  1870. EXPORT_SYMBOL(drm_state_dump);
  1871. #ifdef CONFIG_DEBUG_FS
  1872. static int drm_state_info(struct seq_file *m, void *data)
  1873. {
  1874. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1875. struct drm_device *dev = node->minor->dev;
  1876. struct drm_printer p = drm_seq_file_printer(m);
  1877. __drm_state_dump(dev, &p, true);
  1878. return 0;
  1879. }
  1880. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1881. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1882. {"state", drm_state_info, 0},
  1883. };
  1884. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1885. {
  1886. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1887. ARRAY_SIZE(drm_atomic_debugfs_list),
  1888. minor->debugfs_root, minor);
  1889. }
  1890. #endif
  1891. /*
  1892. * The big monster ioctl
  1893. */
  1894. static struct drm_pending_vblank_event *create_vblank_event(
  1895. struct drm_crtc *crtc, uint64_t user_data)
  1896. {
  1897. struct drm_pending_vblank_event *e = NULL;
  1898. e = kzalloc(sizeof *e, GFP_KERNEL);
  1899. if (!e)
  1900. return NULL;
  1901. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1902. e->event.base.length = sizeof(e->event);
  1903. e->event.vbl.crtc_id = crtc->base.id;
  1904. e->event.vbl.user_data = user_data;
  1905. return e;
  1906. }
  1907. int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
  1908. struct drm_connector *connector,
  1909. int mode)
  1910. {
  1911. struct drm_connector *tmp_connector;
  1912. struct drm_connector_state *new_conn_state;
  1913. struct drm_crtc *crtc;
  1914. struct drm_crtc_state *crtc_state;
  1915. int i, ret, old_mode = connector->dpms;
  1916. bool active = false;
  1917. ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
  1918. state->acquire_ctx);
  1919. if (ret)
  1920. return ret;
  1921. if (mode != DRM_MODE_DPMS_ON)
  1922. mode = DRM_MODE_DPMS_OFF;
  1923. connector->dpms = mode;
  1924. crtc = connector->state->crtc;
  1925. if (!crtc)
  1926. goto out;
  1927. ret = drm_atomic_add_affected_connectors(state, crtc);
  1928. if (ret)
  1929. goto out;
  1930. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1931. if (IS_ERR(crtc_state)) {
  1932. ret = PTR_ERR(crtc_state);
  1933. goto out;
  1934. }
  1935. for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
  1936. if (new_conn_state->crtc != crtc)
  1937. continue;
  1938. if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
  1939. active = true;
  1940. break;
  1941. }
  1942. }
  1943. crtc_state->active = active;
  1944. ret = drm_atomic_commit(state);
  1945. out:
  1946. if (ret != 0)
  1947. connector->dpms = old_mode;
  1948. return ret;
  1949. }
  1950. int drm_atomic_set_property(struct drm_atomic_state *state,
  1951. struct drm_mode_object *obj,
  1952. struct drm_property *prop,
  1953. uint64_t prop_value)
  1954. {
  1955. struct drm_mode_object *ref;
  1956. int ret;
  1957. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1958. return -EINVAL;
  1959. switch (obj->type) {
  1960. case DRM_MODE_OBJECT_CONNECTOR: {
  1961. struct drm_connector *connector = obj_to_connector(obj);
  1962. struct drm_connector_state *connector_state;
  1963. connector_state = drm_atomic_get_connector_state(state, connector);
  1964. if (IS_ERR(connector_state)) {
  1965. ret = PTR_ERR(connector_state);
  1966. break;
  1967. }
  1968. ret = drm_atomic_connector_set_property(connector,
  1969. connector_state, prop, prop_value);
  1970. break;
  1971. }
  1972. case DRM_MODE_OBJECT_CRTC: {
  1973. struct drm_crtc *crtc = obj_to_crtc(obj);
  1974. struct drm_crtc_state *crtc_state;
  1975. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1976. if (IS_ERR(crtc_state)) {
  1977. ret = PTR_ERR(crtc_state);
  1978. break;
  1979. }
  1980. ret = drm_atomic_crtc_set_property(crtc,
  1981. crtc_state, prop, prop_value);
  1982. break;
  1983. }
  1984. case DRM_MODE_OBJECT_PLANE: {
  1985. struct drm_plane *plane = obj_to_plane(obj);
  1986. struct drm_plane_state *plane_state;
  1987. plane_state = drm_atomic_get_plane_state(state, plane);
  1988. if (IS_ERR(plane_state)) {
  1989. ret = PTR_ERR(plane_state);
  1990. break;
  1991. }
  1992. ret = drm_atomic_plane_set_property(plane,
  1993. plane_state, prop, prop_value);
  1994. break;
  1995. }
  1996. default:
  1997. ret = -EINVAL;
  1998. break;
  1999. }
  2000. drm_property_change_valid_put(prop, ref);
  2001. return ret;
  2002. }
  2003. /**
  2004. * DOC: explicit fencing properties
  2005. *
  2006. * Explicit fencing allows userspace to control the buffer synchronization
  2007. * between devices. A Fence or a group of fences are transfered to/from
  2008. * userspace using Sync File fds and there are two DRM properties for that.
  2009. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  2010. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  2011. *
  2012. * As a contrast, with implicit fencing the kernel keeps track of any
  2013. * ongoing rendering, and automatically ensures that the atomic update waits
  2014. * for any pending rendering to complete. For shared buffers represented with
  2015. * a &struct dma_buf this is tracked in &struct reservation_object.
  2016. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  2017. * whereas explicit fencing is what Android wants.
  2018. *
  2019. * "IN_FENCE_FD”:
  2020. * Use this property to pass a fence that DRM should wait on before
  2021. * proceeding with the Atomic Commit request and show the framebuffer for
  2022. * the plane on the screen. The fence can be either a normal fence or a
  2023. * merged one, the sync_file framework will handle both cases and use a
  2024. * fence_array if a merged fence is received. Passing -1 here means no
  2025. * fences to wait on.
  2026. *
  2027. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  2028. * it will only check if the Sync File is a valid one.
  2029. *
  2030. * On the driver side the fence is stored on the @fence parameter of
  2031. * &struct drm_plane_state. Drivers which also support implicit fencing
  2032. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  2033. * to make sure there's consistent behaviour between drivers in precedence
  2034. * of implicit vs. explicit fencing.
  2035. *
  2036. * "OUT_FENCE_PTR”:
  2037. * Use this property to pass a file descriptor pointer to DRM. Once the
  2038. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  2039. * the file descriptor number of a Sync File. This Sync File contains the
  2040. * CRTC fence that will be signaled when all framebuffers present on the
  2041. * Atomic Commit * request for that given CRTC are scanned out on the
  2042. * screen.
  2043. *
  2044. * The Atomic Commit request fails if a invalid pointer is passed. If the
  2045. * Atomic Commit request fails for any other reason the out fence fd
  2046. * returned will be -1. On a Atomic Commit with the
  2047. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  2048. *
  2049. * Note that out-fences don't have a special interface to drivers and are
  2050. * internally represented by a &struct drm_pending_vblank_event in struct
  2051. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  2052. * helpers and for the DRM event handling for existing userspace.
  2053. */
  2054. struct drm_out_fence_state {
  2055. s32 __user *out_fence_ptr;
  2056. struct sync_file *sync_file;
  2057. int fd;
  2058. };
  2059. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  2060. struct dma_fence *fence)
  2061. {
  2062. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  2063. if (fence_state->fd < 0)
  2064. return fence_state->fd;
  2065. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  2066. return -EFAULT;
  2067. fence_state->sync_file = sync_file_create(fence);
  2068. if (!fence_state->sync_file)
  2069. return -ENOMEM;
  2070. return 0;
  2071. }
  2072. static int prepare_signaling(struct drm_device *dev,
  2073. struct drm_atomic_state *state,
  2074. struct drm_mode_atomic *arg,
  2075. struct drm_file *file_priv,
  2076. struct drm_out_fence_state **fence_state,
  2077. unsigned int *num_fences)
  2078. {
  2079. struct drm_crtc *crtc;
  2080. struct drm_crtc_state *crtc_state;
  2081. struct drm_connector *conn;
  2082. struct drm_connector_state *conn_state;
  2083. int i, c = 0, ret;
  2084. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  2085. return 0;
  2086. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2087. s32 __user *fence_ptr;
  2088. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  2089. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  2090. struct drm_pending_vblank_event *e;
  2091. e = create_vblank_event(crtc, arg->user_data);
  2092. if (!e)
  2093. return -ENOMEM;
  2094. crtc_state->event = e;
  2095. }
  2096. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  2097. struct drm_pending_vblank_event *e = crtc_state->event;
  2098. if (!file_priv)
  2099. continue;
  2100. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  2101. &e->event.base);
  2102. if (ret) {
  2103. kfree(e);
  2104. crtc_state->event = NULL;
  2105. return ret;
  2106. }
  2107. }
  2108. if (fence_ptr) {
  2109. struct dma_fence *fence;
  2110. struct drm_out_fence_state *f;
  2111. f = krealloc(*fence_state, sizeof(**fence_state) *
  2112. (*num_fences + 1), GFP_KERNEL);
  2113. if (!f)
  2114. return -ENOMEM;
  2115. memset(&f[*num_fences], 0, sizeof(*f));
  2116. f[*num_fences].out_fence_ptr = fence_ptr;
  2117. *fence_state = f;
  2118. fence = drm_crtc_create_fence(crtc);
  2119. if (!fence)
  2120. return -ENOMEM;
  2121. ret = setup_out_fence(&f[(*num_fences)++], fence);
  2122. if (ret) {
  2123. dma_fence_put(fence);
  2124. return ret;
  2125. }
  2126. crtc_state->event->base.fence = fence;
  2127. }
  2128. c++;
  2129. }
  2130. for_each_new_connector_in_state(state, conn, conn_state, i) {
  2131. struct drm_writeback_connector *wb_conn;
  2132. struct drm_writeback_job *job;
  2133. struct drm_out_fence_state *f;
  2134. struct dma_fence *fence;
  2135. s32 __user *fence_ptr;
  2136. fence_ptr = get_out_fence_for_connector(state, conn);
  2137. if (!fence_ptr)
  2138. continue;
  2139. job = drm_atomic_get_writeback_job(conn_state);
  2140. if (!job)
  2141. return -ENOMEM;
  2142. f = krealloc(*fence_state, sizeof(**fence_state) *
  2143. (*num_fences + 1), GFP_KERNEL);
  2144. if (!f)
  2145. return -ENOMEM;
  2146. memset(&f[*num_fences], 0, sizeof(*f));
  2147. f[*num_fences].out_fence_ptr = fence_ptr;
  2148. *fence_state = f;
  2149. wb_conn = drm_connector_to_writeback(conn);
  2150. fence = drm_writeback_get_out_fence(wb_conn);
  2151. if (!fence)
  2152. return -ENOMEM;
  2153. ret = setup_out_fence(&f[(*num_fences)++], fence);
  2154. if (ret) {
  2155. dma_fence_put(fence);
  2156. return ret;
  2157. }
  2158. job->out_fence = fence;
  2159. }
  2160. /*
  2161. * Having this flag means user mode pends on event which will never
  2162. * reach due to lack of at least one CRTC for signaling
  2163. */
  2164. if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  2165. return -EINVAL;
  2166. return 0;
  2167. }
  2168. static void complete_signaling(struct drm_device *dev,
  2169. struct drm_atomic_state *state,
  2170. struct drm_out_fence_state *fence_state,
  2171. unsigned int num_fences,
  2172. bool install_fds)
  2173. {
  2174. struct drm_crtc *crtc;
  2175. struct drm_crtc_state *crtc_state;
  2176. int i;
  2177. if (install_fds) {
  2178. for (i = 0; i < num_fences; i++)
  2179. fd_install(fence_state[i].fd,
  2180. fence_state[i].sync_file->file);
  2181. kfree(fence_state);
  2182. return;
  2183. }
  2184. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2185. struct drm_pending_vblank_event *event = crtc_state->event;
  2186. /*
  2187. * Free the allocated event. drm_atomic_helper_setup_commit
  2188. * can allocate an event too, so only free it if it's ours
  2189. * to prevent a double free in drm_atomic_state_clear.
  2190. */
  2191. if (event && (event->base.fence || event->base.file_priv)) {
  2192. drm_event_cancel_free(dev, &event->base);
  2193. crtc_state->event = NULL;
  2194. }
  2195. }
  2196. if (!fence_state)
  2197. return;
  2198. for (i = 0; i < num_fences; i++) {
  2199. if (fence_state[i].sync_file)
  2200. fput(fence_state[i].sync_file->file);
  2201. if (fence_state[i].fd >= 0)
  2202. put_unused_fd(fence_state[i].fd);
  2203. /* If this fails log error to the user */
  2204. if (fence_state[i].out_fence_ptr &&
  2205. put_user(-1, fence_state[i].out_fence_ptr))
  2206. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  2207. }
  2208. kfree(fence_state);
  2209. }
  2210. int drm_mode_atomic_ioctl(struct drm_device *dev,
  2211. void *data, struct drm_file *file_priv)
  2212. {
  2213. struct drm_mode_atomic *arg = data;
  2214. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  2215. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  2216. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  2217. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  2218. unsigned int copied_objs, copied_props;
  2219. struct drm_atomic_state *state;
  2220. struct drm_modeset_acquire_ctx ctx;
  2221. struct drm_out_fence_state *fence_state;
  2222. int ret = 0;
  2223. unsigned int i, j, num_fences;
  2224. /* disallow for drivers not supporting atomic: */
  2225. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  2226. return -EINVAL;
  2227. /* disallow for userspace that has not enabled atomic cap (even
  2228. * though this may be a bit overkill, since legacy userspace
  2229. * wouldn't know how to call this ioctl)
  2230. */
  2231. if (!file_priv->atomic)
  2232. return -EINVAL;
  2233. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  2234. return -EINVAL;
  2235. if (arg->reserved)
  2236. return -EINVAL;
  2237. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  2238. !dev->mode_config.async_page_flip)
  2239. return -EINVAL;
  2240. /* can't test and expect an event at the same time. */
  2241. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  2242. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  2243. return -EINVAL;
  2244. drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
  2245. state = drm_atomic_state_alloc(dev);
  2246. if (!state)
  2247. return -ENOMEM;
  2248. state->acquire_ctx = &ctx;
  2249. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  2250. retry:
  2251. copied_objs = 0;
  2252. copied_props = 0;
  2253. fence_state = NULL;
  2254. num_fences = 0;
  2255. for (i = 0; i < arg->count_objs; i++) {
  2256. uint32_t obj_id, count_props;
  2257. struct drm_mode_object *obj;
  2258. if (get_user(obj_id, objs_ptr + copied_objs)) {
  2259. ret = -EFAULT;
  2260. goto out;
  2261. }
  2262. obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
  2263. if (!obj) {
  2264. ret = -ENOENT;
  2265. goto out;
  2266. }
  2267. if (!obj->properties) {
  2268. drm_mode_object_put(obj);
  2269. ret = -ENOENT;
  2270. goto out;
  2271. }
  2272. if (get_user(count_props, count_props_ptr + copied_objs)) {
  2273. drm_mode_object_put(obj);
  2274. ret = -EFAULT;
  2275. goto out;
  2276. }
  2277. copied_objs++;
  2278. for (j = 0; j < count_props; j++) {
  2279. uint32_t prop_id;
  2280. uint64_t prop_value;
  2281. struct drm_property *prop;
  2282. if (get_user(prop_id, props_ptr + copied_props)) {
  2283. drm_mode_object_put(obj);
  2284. ret = -EFAULT;
  2285. goto out;
  2286. }
  2287. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  2288. if (!prop) {
  2289. drm_mode_object_put(obj);
  2290. ret = -ENOENT;
  2291. goto out;
  2292. }
  2293. if (copy_from_user(&prop_value,
  2294. prop_values_ptr + copied_props,
  2295. sizeof(prop_value))) {
  2296. drm_mode_object_put(obj);
  2297. ret = -EFAULT;
  2298. goto out;
  2299. }
  2300. ret = drm_atomic_set_property(state, obj, prop,
  2301. prop_value);
  2302. if (ret) {
  2303. drm_mode_object_put(obj);
  2304. goto out;
  2305. }
  2306. copied_props++;
  2307. }
  2308. drm_mode_object_put(obj);
  2309. }
  2310. ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
  2311. &num_fences);
  2312. if (ret)
  2313. goto out;
  2314. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  2315. ret = drm_atomic_check_only(state);
  2316. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  2317. ret = drm_atomic_nonblocking_commit(state);
  2318. } else {
  2319. if (unlikely(drm_debug & DRM_UT_STATE))
  2320. drm_atomic_print_state(state);
  2321. ret = drm_atomic_commit(state);
  2322. }
  2323. out:
  2324. complete_signaling(dev, state, fence_state, num_fences, !ret);
  2325. if (ret == -EDEADLK) {
  2326. drm_atomic_state_clear(state);
  2327. ret = drm_modeset_backoff(&ctx);
  2328. if (!ret)
  2329. goto retry;
  2330. }
  2331. drm_atomic_state_put(state);
  2332. drm_modeset_drop_locks(&ctx);
  2333. drm_modeset_acquire_fini(&ctx);
  2334. return ret;
  2335. }