drm_atomic_helper.c 120 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_plane_helper.h>
  31. #include <drm/drm_crtc_helper.h>
  32. #include <drm/drm_atomic_helper.h>
  33. #include <drm/drm_writeback.h>
  34. #include <linux/dma-fence.h>
  35. #include "drm_crtc_helper_internal.h"
  36. #include "drm_crtc_internal.h"
  37. /**
  38. * DOC: overview
  39. *
  40. * This helper library provides implementations of check and commit functions on
  41. * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
  42. * also provides convenience implementations for the atomic state handling
  43. * callbacks for drivers which don't need to subclass the drm core structures to
  44. * add their own additional internal state.
  45. *
  46. * This library also provides default implementations for the check callback in
  47. * drm_atomic_helper_check() and for the commit callback with
  48. * drm_atomic_helper_commit(). But the individual stages and callbacks are
  49. * exposed to allow drivers to mix and match and e.g. use the plane helpers only
  50. * together with a driver private modeset implementation.
  51. *
  52. * This library also provides implementations for all the legacy driver
  53. * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
  54. * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
  55. * various functions to implement set_property callbacks. New drivers must not
  56. * implement these functions themselves but must use the provided helpers.
  57. *
  58. * The atomic helper uses the same function table structures as all other
  59. * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
  60. * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
  61. * also shares the &struct drm_plane_helper_funcs function table with the plane
  62. * helpers.
  63. */
  64. static void
  65. drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
  66. struct drm_plane_state *old_plane_state,
  67. struct drm_plane_state *plane_state,
  68. struct drm_plane *plane)
  69. {
  70. struct drm_crtc_state *crtc_state;
  71. if (old_plane_state->crtc) {
  72. crtc_state = drm_atomic_get_new_crtc_state(state,
  73. old_plane_state->crtc);
  74. if (WARN_ON(!crtc_state))
  75. return;
  76. crtc_state->planes_changed = true;
  77. }
  78. if (plane_state->crtc) {
  79. crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
  80. if (WARN_ON(!crtc_state))
  81. return;
  82. crtc_state->planes_changed = true;
  83. }
  84. }
  85. static int handle_conflicting_encoders(struct drm_atomic_state *state,
  86. bool disable_conflicting_encoders)
  87. {
  88. struct drm_connector_state *new_conn_state;
  89. struct drm_connector *connector;
  90. struct drm_connector_list_iter conn_iter;
  91. struct drm_encoder *encoder;
  92. unsigned encoder_mask = 0;
  93. int i, ret = 0;
  94. /*
  95. * First loop, find all newly assigned encoders from the connectors
  96. * part of the state. If the same encoder is assigned to multiple
  97. * connectors bail out.
  98. */
  99. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  100. const struct drm_connector_helper_funcs *funcs = connector->helper_private;
  101. struct drm_encoder *new_encoder;
  102. if (!new_conn_state->crtc)
  103. continue;
  104. if (funcs->atomic_best_encoder)
  105. new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
  106. else if (funcs->best_encoder)
  107. new_encoder = funcs->best_encoder(connector);
  108. else
  109. new_encoder = drm_atomic_helper_best_encoder(connector);
  110. if (new_encoder) {
  111. if (encoder_mask & drm_encoder_mask(new_encoder)) {
  112. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
  113. new_encoder->base.id, new_encoder->name,
  114. connector->base.id, connector->name);
  115. return -EINVAL;
  116. }
  117. encoder_mask |= drm_encoder_mask(new_encoder);
  118. }
  119. }
  120. if (!encoder_mask)
  121. return 0;
  122. /*
  123. * Second loop, iterate over all connectors not part of the state.
  124. *
  125. * If a conflicting encoder is found and disable_conflicting_encoders
  126. * is not set, an error is returned. Userspace can provide a solution
  127. * through the atomic ioctl.
  128. *
  129. * If the flag is set conflicting connectors are removed from the crtc
  130. * and the crtc is disabled if no encoder is left. This preserves
  131. * compatibility with the legacy set_config behavior.
  132. */
  133. drm_connector_list_iter_begin(state->dev, &conn_iter);
  134. drm_for_each_connector_iter(connector, &conn_iter) {
  135. struct drm_crtc_state *crtc_state;
  136. if (drm_atomic_get_new_connector_state(state, connector))
  137. continue;
  138. encoder = connector->state->best_encoder;
  139. if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
  140. continue;
  141. if (!disable_conflicting_encoders) {
  142. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
  143. encoder->base.id, encoder->name,
  144. connector->state->crtc->base.id,
  145. connector->state->crtc->name,
  146. connector->base.id, connector->name);
  147. ret = -EINVAL;
  148. goto out;
  149. }
  150. new_conn_state = drm_atomic_get_connector_state(state, connector);
  151. if (IS_ERR(new_conn_state)) {
  152. ret = PTR_ERR(new_conn_state);
  153. goto out;
  154. }
  155. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
  156. encoder->base.id, encoder->name,
  157. new_conn_state->crtc->base.id, new_conn_state->crtc->name,
  158. connector->base.id, connector->name);
  159. crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
  160. ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
  161. if (ret)
  162. goto out;
  163. if (!crtc_state->connector_mask) {
  164. ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
  165. NULL);
  166. if (ret < 0)
  167. goto out;
  168. crtc_state->active = false;
  169. }
  170. }
  171. out:
  172. drm_connector_list_iter_end(&conn_iter);
  173. return ret;
  174. }
  175. static void
  176. set_best_encoder(struct drm_atomic_state *state,
  177. struct drm_connector_state *conn_state,
  178. struct drm_encoder *encoder)
  179. {
  180. struct drm_crtc_state *crtc_state;
  181. struct drm_crtc *crtc;
  182. if (conn_state->best_encoder) {
  183. /* Unset the encoder_mask in the old crtc state. */
  184. crtc = conn_state->connector->state->crtc;
  185. /* A NULL crtc is an error here because we should have
  186. * duplicated a NULL best_encoder when crtc was NULL.
  187. * As an exception restoring duplicated atomic state
  188. * during resume is allowed, so don't warn when
  189. * best_encoder is equal to encoder we intend to set.
  190. */
  191. WARN_ON(!crtc && encoder != conn_state->best_encoder);
  192. if (crtc) {
  193. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  194. crtc_state->encoder_mask &=
  195. ~drm_encoder_mask(conn_state->best_encoder);
  196. }
  197. }
  198. if (encoder) {
  199. crtc = conn_state->crtc;
  200. WARN_ON(!crtc);
  201. if (crtc) {
  202. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  203. crtc_state->encoder_mask |=
  204. drm_encoder_mask(encoder);
  205. }
  206. }
  207. conn_state->best_encoder = encoder;
  208. }
  209. static void
  210. steal_encoder(struct drm_atomic_state *state,
  211. struct drm_encoder *encoder)
  212. {
  213. struct drm_crtc_state *crtc_state;
  214. struct drm_connector *connector;
  215. struct drm_connector_state *old_connector_state, *new_connector_state;
  216. int i;
  217. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  218. struct drm_crtc *encoder_crtc;
  219. if (new_connector_state->best_encoder != encoder)
  220. continue;
  221. encoder_crtc = old_connector_state->crtc;
  222. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
  223. encoder->base.id, encoder->name,
  224. encoder_crtc->base.id, encoder_crtc->name);
  225. set_best_encoder(state, new_connector_state, NULL);
  226. crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
  227. crtc_state->connectors_changed = true;
  228. return;
  229. }
  230. }
  231. static int
  232. update_connector_routing(struct drm_atomic_state *state,
  233. struct drm_connector *connector,
  234. struct drm_connector_state *old_connector_state,
  235. struct drm_connector_state *new_connector_state)
  236. {
  237. const struct drm_connector_helper_funcs *funcs;
  238. struct drm_encoder *new_encoder;
  239. struct drm_crtc_state *crtc_state;
  240. DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
  241. connector->base.id,
  242. connector->name);
  243. if (old_connector_state->crtc != new_connector_state->crtc) {
  244. if (old_connector_state->crtc) {
  245. crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
  246. crtc_state->connectors_changed = true;
  247. }
  248. if (new_connector_state->crtc) {
  249. crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
  250. crtc_state->connectors_changed = true;
  251. }
  252. }
  253. if (!new_connector_state->crtc) {
  254. DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
  255. connector->base.id,
  256. connector->name);
  257. set_best_encoder(state, new_connector_state, NULL);
  258. return 0;
  259. }
  260. crtc_state = drm_atomic_get_new_crtc_state(state,
  261. new_connector_state->crtc);
  262. /*
  263. * For compatibility with legacy users, we want to make sure that
  264. * we allow DPMS On->Off modesets on unregistered connectors. Modesets
  265. * which would result in anything else must be considered invalid, to
  266. * avoid turning on new displays on dead connectors.
  267. *
  268. * Since the connector can be unregistered at any point during an
  269. * atomic check or commit, this is racy. But that's OK: all we care
  270. * about is ensuring that userspace can't do anything but shut off the
  271. * display on a connector that was destroyed after its been notified,
  272. * not before.
  273. */
  274. if (drm_connector_is_unregistered(connector) && crtc_state->active) {
  275. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
  276. connector->base.id, connector->name);
  277. return -EINVAL;
  278. }
  279. funcs = connector->helper_private;
  280. if (funcs->atomic_best_encoder)
  281. new_encoder = funcs->atomic_best_encoder(connector,
  282. new_connector_state);
  283. else if (funcs->best_encoder)
  284. new_encoder = funcs->best_encoder(connector);
  285. else
  286. new_encoder = drm_atomic_helper_best_encoder(connector);
  287. if (!new_encoder) {
  288. DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
  289. connector->base.id,
  290. connector->name);
  291. return -EINVAL;
  292. }
  293. if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
  294. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
  295. new_encoder->base.id,
  296. new_encoder->name,
  297. new_connector_state->crtc->base.id,
  298. new_connector_state->crtc->name);
  299. return -EINVAL;
  300. }
  301. if (new_encoder == new_connector_state->best_encoder) {
  302. set_best_encoder(state, new_connector_state, new_encoder);
  303. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
  304. connector->base.id,
  305. connector->name,
  306. new_encoder->base.id,
  307. new_encoder->name,
  308. new_connector_state->crtc->base.id,
  309. new_connector_state->crtc->name);
  310. return 0;
  311. }
  312. steal_encoder(state, new_encoder);
  313. set_best_encoder(state, new_connector_state, new_encoder);
  314. crtc_state->connectors_changed = true;
  315. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
  316. connector->base.id,
  317. connector->name,
  318. new_encoder->base.id,
  319. new_encoder->name,
  320. new_connector_state->crtc->base.id,
  321. new_connector_state->crtc->name);
  322. return 0;
  323. }
  324. static int
  325. mode_fixup(struct drm_atomic_state *state)
  326. {
  327. struct drm_crtc *crtc;
  328. struct drm_crtc_state *new_crtc_state;
  329. struct drm_connector *connector;
  330. struct drm_connector_state *new_conn_state;
  331. int i;
  332. int ret;
  333. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  334. if (!new_crtc_state->mode_changed &&
  335. !new_crtc_state->connectors_changed)
  336. continue;
  337. drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
  338. }
  339. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  340. const struct drm_encoder_helper_funcs *funcs;
  341. struct drm_encoder *encoder;
  342. WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
  343. if (!new_conn_state->crtc || !new_conn_state->best_encoder)
  344. continue;
  345. new_crtc_state =
  346. drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
  347. /*
  348. * Each encoder has at most one connector (since we always steal
  349. * it away), so we won't call ->mode_fixup twice.
  350. */
  351. encoder = new_conn_state->best_encoder;
  352. funcs = encoder->helper_private;
  353. ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
  354. &new_crtc_state->adjusted_mode);
  355. if (!ret) {
  356. DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
  357. return -EINVAL;
  358. }
  359. if (funcs && funcs->atomic_check) {
  360. ret = funcs->atomic_check(encoder, new_crtc_state,
  361. new_conn_state);
  362. if (ret) {
  363. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
  364. encoder->base.id, encoder->name);
  365. return ret;
  366. }
  367. } else if (funcs && funcs->mode_fixup) {
  368. ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
  369. &new_crtc_state->adjusted_mode);
  370. if (!ret) {
  371. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
  372. encoder->base.id, encoder->name);
  373. return -EINVAL;
  374. }
  375. }
  376. }
  377. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  378. const struct drm_crtc_helper_funcs *funcs;
  379. if (!new_crtc_state->enable)
  380. continue;
  381. if (!new_crtc_state->mode_changed &&
  382. !new_crtc_state->connectors_changed)
  383. continue;
  384. funcs = crtc->helper_private;
  385. if (!funcs->mode_fixup)
  386. continue;
  387. ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
  388. &new_crtc_state->adjusted_mode);
  389. if (!ret) {
  390. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
  391. crtc->base.id, crtc->name);
  392. return -EINVAL;
  393. }
  394. }
  395. return 0;
  396. }
  397. static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
  398. struct drm_encoder *encoder,
  399. struct drm_crtc *crtc,
  400. struct drm_display_mode *mode)
  401. {
  402. enum drm_mode_status ret;
  403. ret = drm_encoder_mode_valid(encoder, mode);
  404. if (ret != MODE_OK) {
  405. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
  406. encoder->base.id, encoder->name);
  407. return ret;
  408. }
  409. ret = drm_bridge_mode_valid(encoder->bridge, mode);
  410. if (ret != MODE_OK) {
  411. DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
  412. return ret;
  413. }
  414. ret = drm_crtc_mode_valid(crtc, mode);
  415. if (ret != MODE_OK) {
  416. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
  417. crtc->base.id, crtc->name);
  418. return ret;
  419. }
  420. return ret;
  421. }
  422. static int
  423. mode_valid(struct drm_atomic_state *state)
  424. {
  425. struct drm_connector_state *conn_state;
  426. struct drm_connector *connector;
  427. int i;
  428. for_each_new_connector_in_state(state, connector, conn_state, i) {
  429. struct drm_encoder *encoder = conn_state->best_encoder;
  430. struct drm_crtc *crtc = conn_state->crtc;
  431. struct drm_crtc_state *crtc_state;
  432. enum drm_mode_status mode_status;
  433. struct drm_display_mode *mode;
  434. if (!crtc || !encoder)
  435. continue;
  436. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  437. if (!crtc_state)
  438. continue;
  439. if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
  440. continue;
  441. mode = &crtc_state->mode;
  442. mode_status = mode_valid_path(connector, encoder, crtc, mode);
  443. if (mode_status != MODE_OK)
  444. return -EINVAL;
  445. }
  446. return 0;
  447. }
  448. /**
  449. * drm_atomic_helper_check_modeset - validate state object for modeset changes
  450. * @dev: DRM device
  451. * @state: the driver state object
  452. *
  453. * Check the state object to see if the requested state is physically possible.
  454. * This does all the crtc and connector related computations for an atomic
  455. * update and adds any additional connectors needed for full modesets. It calls
  456. * the various per-object callbacks in the follow order:
  457. *
  458. * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
  459. * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
  460. * 3. If it's determined a modeset is needed then all connectors on the affected crtc
  461. * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
  462. * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
  463. * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
  464. * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
  465. * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
  466. * This function is only called when the encoder will be part of a configured crtc,
  467. * it must not be used for implementing connector property validation.
  468. * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
  469. * instead.
  470. * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
  471. *
  472. * &drm_crtc_state.mode_changed is set when the input mode is changed.
  473. * &drm_crtc_state.connectors_changed is set when a connector is added or
  474. * removed from the crtc. &drm_crtc_state.active_changed is set when
  475. * &drm_crtc_state.active changes, which is used for DPMS.
  476. * See also: drm_atomic_crtc_needs_modeset()
  477. *
  478. * IMPORTANT:
  479. *
  480. * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
  481. * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
  482. * without a full modeset) _must_ call this function afterwards after that
  483. * change. It is permitted to call this function multiple times for the same
  484. * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
  485. * upon the adjusted dotclock for fifo space allocation and watermark
  486. * computation.
  487. *
  488. * RETURNS:
  489. * Zero for success or -errno
  490. */
  491. int
  492. drm_atomic_helper_check_modeset(struct drm_device *dev,
  493. struct drm_atomic_state *state)
  494. {
  495. struct drm_crtc *crtc;
  496. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  497. struct drm_connector *connector;
  498. struct drm_connector_state *old_connector_state, *new_connector_state;
  499. int i, ret;
  500. unsigned connectors_mask = 0;
  501. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  502. bool has_connectors =
  503. !!new_crtc_state->connector_mask;
  504. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  505. if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
  506. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
  507. crtc->base.id, crtc->name);
  508. new_crtc_state->mode_changed = true;
  509. }
  510. if (old_crtc_state->enable != new_crtc_state->enable) {
  511. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
  512. crtc->base.id, crtc->name);
  513. /*
  514. * For clarity this assignment is done here, but
  515. * enable == 0 is only true when there are no
  516. * connectors and a NULL mode.
  517. *
  518. * The other way around is true as well. enable != 0
  519. * iff connectors are attached and a mode is set.
  520. */
  521. new_crtc_state->mode_changed = true;
  522. new_crtc_state->connectors_changed = true;
  523. }
  524. if (old_crtc_state->active != new_crtc_state->active) {
  525. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
  526. crtc->base.id, crtc->name);
  527. new_crtc_state->active_changed = true;
  528. }
  529. if (new_crtc_state->enable != has_connectors) {
  530. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
  531. crtc->base.id, crtc->name);
  532. return -EINVAL;
  533. }
  534. }
  535. ret = handle_conflicting_encoders(state, false);
  536. if (ret)
  537. return ret;
  538. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  539. const struct drm_connector_helper_funcs *funcs = connector->helper_private;
  540. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  541. /*
  542. * This only sets crtc->connectors_changed for routing changes,
  543. * drivers must set crtc->connectors_changed themselves when
  544. * connector properties need to be updated.
  545. */
  546. ret = update_connector_routing(state, connector,
  547. old_connector_state,
  548. new_connector_state);
  549. if (ret)
  550. return ret;
  551. if (old_connector_state->crtc) {
  552. new_crtc_state = drm_atomic_get_new_crtc_state(state,
  553. old_connector_state->crtc);
  554. if (old_connector_state->link_status !=
  555. new_connector_state->link_status)
  556. new_crtc_state->connectors_changed = true;
  557. }
  558. if (funcs->atomic_check)
  559. ret = funcs->atomic_check(connector, new_connector_state);
  560. if (ret)
  561. return ret;
  562. connectors_mask |= BIT(i);
  563. }
  564. /*
  565. * After all the routing has been prepared we need to add in any
  566. * connector which is itself unchanged, but who's crtc changes it's
  567. * configuration. This must be done before calling mode_fixup in case a
  568. * crtc only changed its mode but has the same set of connectors.
  569. */
  570. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  571. if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
  572. continue;
  573. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
  574. crtc->base.id, crtc->name,
  575. new_crtc_state->enable ? 'y' : 'n',
  576. new_crtc_state->active ? 'y' : 'n');
  577. ret = drm_atomic_add_affected_connectors(state, crtc);
  578. if (ret != 0)
  579. return ret;
  580. ret = drm_atomic_add_affected_planes(state, crtc);
  581. if (ret != 0)
  582. return ret;
  583. }
  584. /*
  585. * Iterate over all connectors again, to make sure atomic_check()
  586. * has been called on them when a modeset is forced.
  587. */
  588. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  589. const struct drm_connector_helper_funcs *funcs = connector->helper_private;
  590. if (connectors_mask & BIT(i))
  591. continue;
  592. if (funcs->atomic_check)
  593. ret = funcs->atomic_check(connector, new_connector_state);
  594. if (ret)
  595. return ret;
  596. }
  597. ret = mode_valid(state);
  598. if (ret)
  599. return ret;
  600. return mode_fixup(state);
  601. }
  602. EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
  603. /**
  604. * drm_atomic_helper_check_plane_state() - Check plane state for validity
  605. * @plane_state: plane state to check
  606. * @crtc_state: crtc state to check
  607. * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
  608. * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
  609. * @can_position: is it legal to position the plane such that it
  610. * doesn't cover the entire crtc? This will generally
  611. * only be false for primary planes.
  612. * @can_update_disabled: can the plane be updated while the crtc
  613. * is disabled?
  614. *
  615. * Checks that a desired plane update is valid, and updates various
  616. * bits of derived state (clipped coordinates etc.). Drivers that provide
  617. * their own plane handling rather than helper-provided implementations may
  618. * still wish to call this function to avoid duplication of error checking
  619. * code.
  620. *
  621. * RETURNS:
  622. * Zero if update appears valid, error code on failure
  623. */
  624. int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
  625. const struct drm_crtc_state *crtc_state,
  626. int min_scale,
  627. int max_scale,
  628. bool can_position,
  629. bool can_update_disabled)
  630. {
  631. struct drm_framebuffer *fb = plane_state->fb;
  632. struct drm_rect *src = &plane_state->src;
  633. struct drm_rect *dst = &plane_state->dst;
  634. unsigned int rotation = plane_state->rotation;
  635. struct drm_rect clip = {};
  636. int hscale, vscale;
  637. WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
  638. *src = drm_plane_state_src(plane_state);
  639. *dst = drm_plane_state_dest(plane_state);
  640. if (!fb) {
  641. plane_state->visible = false;
  642. return 0;
  643. }
  644. /* crtc should only be NULL when disabling (i.e., !fb) */
  645. if (WARN_ON(!plane_state->crtc)) {
  646. plane_state->visible = false;
  647. return 0;
  648. }
  649. if (!crtc_state->enable && !can_update_disabled) {
  650. DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
  651. return -EINVAL;
  652. }
  653. drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
  654. /* Check scaling */
  655. hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
  656. vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
  657. if (hscale < 0 || vscale < 0) {
  658. DRM_DEBUG_KMS("Invalid scaling of plane\n");
  659. drm_rect_debug_print("src: ", &plane_state->src, true);
  660. drm_rect_debug_print("dst: ", &plane_state->dst, false);
  661. return -ERANGE;
  662. }
  663. if (crtc_state->enable)
  664. drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
  665. plane_state->visible = drm_rect_clip_scaled(src, dst, &clip);
  666. drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
  667. if (!plane_state->visible)
  668. /*
  669. * Plane isn't visible; some drivers can handle this
  670. * so we just return success here. Drivers that can't
  671. * (including those that use the primary plane helper's
  672. * update function) will return an error from their
  673. * update_plane handler.
  674. */
  675. return 0;
  676. if (!can_position && !drm_rect_equals(dst, &clip)) {
  677. DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
  678. drm_rect_debug_print("dst: ", dst, false);
  679. drm_rect_debug_print("clip: ", &clip, false);
  680. return -EINVAL;
  681. }
  682. return 0;
  683. }
  684. EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
  685. /**
  686. * drm_atomic_helper_check_planes - validate state object for planes changes
  687. * @dev: DRM device
  688. * @state: the driver state object
  689. *
  690. * Check the state object to see if the requested state is physically possible.
  691. * This does all the plane update related checks using by calling into the
  692. * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
  693. * hooks provided by the driver.
  694. *
  695. * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
  696. * updated planes.
  697. *
  698. * RETURNS:
  699. * Zero for success or -errno
  700. */
  701. int
  702. drm_atomic_helper_check_planes(struct drm_device *dev,
  703. struct drm_atomic_state *state)
  704. {
  705. struct drm_crtc *crtc;
  706. struct drm_crtc_state *new_crtc_state;
  707. struct drm_plane *plane;
  708. struct drm_plane_state *new_plane_state, *old_plane_state;
  709. int i, ret = 0;
  710. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  711. const struct drm_plane_helper_funcs *funcs;
  712. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  713. funcs = plane->helper_private;
  714. drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
  715. if (!funcs || !funcs->atomic_check)
  716. continue;
  717. ret = funcs->atomic_check(plane, new_plane_state);
  718. if (ret) {
  719. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
  720. plane->base.id, plane->name);
  721. return ret;
  722. }
  723. }
  724. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  725. const struct drm_crtc_helper_funcs *funcs;
  726. funcs = crtc->helper_private;
  727. if (!funcs || !funcs->atomic_check)
  728. continue;
  729. ret = funcs->atomic_check(crtc, new_crtc_state);
  730. if (ret) {
  731. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
  732. crtc->base.id, crtc->name);
  733. return ret;
  734. }
  735. }
  736. return ret;
  737. }
  738. EXPORT_SYMBOL(drm_atomic_helper_check_planes);
  739. /**
  740. * drm_atomic_helper_check - validate state object
  741. * @dev: DRM device
  742. * @state: the driver state object
  743. *
  744. * Check the state object to see if the requested state is physically possible.
  745. * Only crtcs and planes have check callbacks, so for any additional (global)
  746. * checking that a driver needs it can simply wrap that around this function.
  747. * Drivers without such needs can directly use this as their
  748. * &drm_mode_config_funcs.atomic_check callback.
  749. *
  750. * This just wraps the two parts of the state checking for planes and modeset
  751. * state in the default order: First it calls drm_atomic_helper_check_modeset()
  752. * and then drm_atomic_helper_check_planes(). The assumption is that the
  753. * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
  754. * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
  755. * watermarks.
  756. *
  757. * Note that zpos normalization will add all enable planes to the state which
  758. * might not desired for some drivers.
  759. * For example enable/disable of a cursor plane which have fixed zpos value
  760. * would trigger all other enabled planes to be forced to the state change.
  761. *
  762. * RETURNS:
  763. * Zero for success or -errno
  764. */
  765. int drm_atomic_helper_check(struct drm_device *dev,
  766. struct drm_atomic_state *state)
  767. {
  768. int ret;
  769. ret = drm_atomic_helper_check_modeset(dev, state);
  770. if (ret)
  771. return ret;
  772. if (dev->mode_config.normalize_zpos) {
  773. ret = drm_atomic_normalize_zpos(dev, state);
  774. if (ret)
  775. return ret;
  776. }
  777. ret = drm_atomic_helper_check_planes(dev, state);
  778. if (ret)
  779. return ret;
  780. if (state->legacy_cursor_update)
  781. state->async_update = !drm_atomic_helper_async_check(dev, state);
  782. return ret;
  783. }
  784. EXPORT_SYMBOL(drm_atomic_helper_check);
  785. static void
  786. disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
  787. {
  788. struct drm_connector *connector;
  789. struct drm_connector_state *old_conn_state, *new_conn_state;
  790. struct drm_crtc *crtc;
  791. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  792. int i;
  793. for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
  794. const struct drm_encoder_helper_funcs *funcs;
  795. struct drm_encoder *encoder;
  796. /* Shut down everything that's in the changeset and currently
  797. * still on. So need to check the old, saved state. */
  798. if (!old_conn_state->crtc)
  799. continue;
  800. old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
  801. if (!old_crtc_state->active ||
  802. !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
  803. continue;
  804. encoder = old_conn_state->best_encoder;
  805. /* We shouldn't get this far if we didn't previously have
  806. * an encoder.. but WARN_ON() rather than explode.
  807. */
  808. if (WARN_ON(!encoder))
  809. continue;
  810. funcs = encoder->helper_private;
  811. DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
  812. encoder->base.id, encoder->name);
  813. /*
  814. * Each encoder has at most one connector (since we always steal
  815. * it away), so we won't call disable hooks twice.
  816. */
  817. drm_bridge_disable(encoder->bridge);
  818. /* Right function depends upon target state. */
  819. if (funcs) {
  820. if (new_conn_state->crtc && funcs->prepare)
  821. funcs->prepare(encoder);
  822. else if (funcs->disable)
  823. funcs->disable(encoder);
  824. else if (funcs->dpms)
  825. funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  826. }
  827. drm_bridge_post_disable(encoder->bridge);
  828. }
  829. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  830. const struct drm_crtc_helper_funcs *funcs;
  831. int ret;
  832. /* Shut down everything that needs a full modeset. */
  833. if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
  834. continue;
  835. if (!old_crtc_state->active)
  836. continue;
  837. funcs = crtc->helper_private;
  838. DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
  839. crtc->base.id, crtc->name);
  840. /* Right function depends upon target state. */
  841. if (new_crtc_state->enable && funcs->prepare)
  842. funcs->prepare(crtc);
  843. else if (funcs->atomic_disable)
  844. funcs->atomic_disable(crtc, old_crtc_state);
  845. else if (funcs->disable)
  846. funcs->disable(crtc);
  847. else
  848. funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  849. if (!(dev->irq_enabled && dev->num_crtcs))
  850. continue;
  851. ret = drm_crtc_vblank_get(crtc);
  852. WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
  853. if (ret == 0)
  854. drm_crtc_vblank_put(crtc);
  855. }
  856. }
  857. /**
  858. * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
  859. * @dev: DRM device
  860. * @old_state: atomic state object with old state structures
  861. *
  862. * This function updates all the various legacy modeset state pointers in
  863. * connectors, encoders and crtcs. It also updates the timestamping constants
  864. * used for precise vblank timestamps by calling
  865. * drm_calc_timestamping_constants().
  866. *
  867. * Drivers can use this for building their own atomic commit if they don't have
  868. * a pure helper-based modeset implementation.
  869. *
  870. * Since these updates are not synchronized with lockings, only code paths
  871. * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
  872. * legacy state filled out by this helper. Defacto this means this helper and
  873. * the legacy state pointers are only really useful for transitioning an
  874. * existing driver to the atomic world.
  875. */
  876. void
  877. drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
  878. struct drm_atomic_state *old_state)
  879. {
  880. struct drm_connector *connector;
  881. struct drm_connector_state *old_conn_state, *new_conn_state;
  882. struct drm_crtc *crtc;
  883. struct drm_crtc_state *new_crtc_state;
  884. int i;
  885. /* clear out existing links and update dpms */
  886. for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
  887. if (connector->encoder) {
  888. WARN_ON(!connector->encoder->crtc);
  889. connector->encoder->crtc = NULL;
  890. connector->encoder = NULL;
  891. }
  892. crtc = new_conn_state->crtc;
  893. if ((!crtc && old_conn_state->crtc) ||
  894. (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
  895. int mode = DRM_MODE_DPMS_OFF;
  896. if (crtc && crtc->state->active)
  897. mode = DRM_MODE_DPMS_ON;
  898. connector->dpms = mode;
  899. }
  900. }
  901. /* set new links */
  902. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  903. if (!new_conn_state->crtc)
  904. continue;
  905. if (WARN_ON(!new_conn_state->best_encoder))
  906. continue;
  907. connector->encoder = new_conn_state->best_encoder;
  908. connector->encoder->crtc = new_conn_state->crtc;
  909. }
  910. /* set legacy state in the crtc structure */
  911. for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
  912. struct drm_plane *primary = crtc->primary;
  913. struct drm_plane_state *new_plane_state;
  914. crtc->mode = new_crtc_state->mode;
  915. crtc->enabled = new_crtc_state->enable;
  916. new_plane_state =
  917. drm_atomic_get_new_plane_state(old_state, primary);
  918. if (new_plane_state && new_plane_state->crtc == crtc) {
  919. crtc->x = new_plane_state->src_x >> 16;
  920. crtc->y = new_plane_state->src_y >> 16;
  921. }
  922. if (new_crtc_state->enable)
  923. drm_calc_timestamping_constants(crtc,
  924. &new_crtc_state->adjusted_mode);
  925. }
  926. }
  927. EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
  928. static void
  929. crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
  930. {
  931. struct drm_crtc *crtc;
  932. struct drm_crtc_state *new_crtc_state;
  933. struct drm_connector *connector;
  934. struct drm_connector_state *new_conn_state;
  935. int i;
  936. for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
  937. const struct drm_crtc_helper_funcs *funcs;
  938. if (!new_crtc_state->mode_changed)
  939. continue;
  940. funcs = crtc->helper_private;
  941. if (new_crtc_state->enable && funcs->mode_set_nofb) {
  942. DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
  943. crtc->base.id, crtc->name);
  944. funcs->mode_set_nofb(crtc);
  945. }
  946. }
  947. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  948. const struct drm_encoder_helper_funcs *funcs;
  949. struct drm_encoder *encoder;
  950. struct drm_display_mode *mode, *adjusted_mode;
  951. if (!new_conn_state->best_encoder)
  952. continue;
  953. encoder = new_conn_state->best_encoder;
  954. funcs = encoder->helper_private;
  955. new_crtc_state = new_conn_state->crtc->state;
  956. mode = &new_crtc_state->mode;
  957. adjusted_mode = &new_crtc_state->adjusted_mode;
  958. if (!new_crtc_state->mode_changed)
  959. continue;
  960. DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
  961. encoder->base.id, encoder->name);
  962. /*
  963. * Each encoder has at most one connector (since we always steal
  964. * it away), so we won't call mode_set hooks twice.
  965. */
  966. if (funcs && funcs->atomic_mode_set) {
  967. funcs->atomic_mode_set(encoder, new_crtc_state,
  968. new_conn_state);
  969. } else if (funcs && funcs->mode_set) {
  970. funcs->mode_set(encoder, mode, adjusted_mode);
  971. }
  972. drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
  973. }
  974. }
  975. /**
  976. * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
  977. * @dev: DRM device
  978. * @old_state: atomic state object with old state structures
  979. *
  980. * This function shuts down all the outputs that need to be shut down and
  981. * prepares them (if required) with the new mode.
  982. *
  983. * For compatibility with legacy crtc helpers this should be called before
  984. * drm_atomic_helper_commit_planes(), which is what the default commit function
  985. * does. But drivers with different needs can group the modeset commits together
  986. * and do the plane commits at the end. This is useful for drivers doing runtime
  987. * PM since planes updates then only happen when the CRTC is actually enabled.
  988. */
  989. void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
  990. struct drm_atomic_state *old_state)
  991. {
  992. disable_outputs(dev, old_state);
  993. drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
  994. crtc_set_mode(dev, old_state);
  995. }
  996. EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
  997. static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
  998. struct drm_atomic_state *old_state)
  999. {
  1000. struct drm_connector *connector;
  1001. struct drm_connector_state *new_conn_state;
  1002. int i;
  1003. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  1004. const struct drm_connector_helper_funcs *funcs;
  1005. funcs = connector->helper_private;
  1006. if (!funcs->atomic_commit)
  1007. continue;
  1008. if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
  1009. WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
  1010. funcs->atomic_commit(connector, new_conn_state);
  1011. }
  1012. }
  1013. }
  1014. /**
  1015. * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
  1016. * @dev: DRM device
  1017. * @old_state: atomic state object with old state structures
  1018. *
  1019. * This function enables all the outputs with the new configuration which had to
  1020. * be turned off for the update.
  1021. *
  1022. * For compatibility with legacy crtc helpers this should be called after
  1023. * drm_atomic_helper_commit_planes(), which is what the default commit function
  1024. * does. But drivers with different needs can group the modeset commits together
  1025. * and do the plane commits at the end. This is useful for drivers doing runtime
  1026. * PM since planes updates then only happen when the CRTC is actually enabled.
  1027. */
  1028. void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
  1029. struct drm_atomic_state *old_state)
  1030. {
  1031. struct drm_crtc *crtc;
  1032. struct drm_crtc_state *old_crtc_state;
  1033. struct drm_crtc_state *new_crtc_state;
  1034. struct drm_connector *connector;
  1035. struct drm_connector_state *new_conn_state;
  1036. int i;
  1037. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1038. const struct drm_crtc_helper_funcs *funcs;
  1039. /* Need to filter out CRTCs where only planes change. */
  1040. if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
  1041. continue;
  1042. if (!new_crtc_state->active)
  1043. continue;
  1044. funcs = crtc->helper_private;
  1045. if (new_crtc_state->enable) {
  1046. DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
  1047. crtc->base.id, crtc->name);
  1048. if (funcs->atomic_enable)
  1049. funcs->atomic_enable(crtc, old_crtc_state);
  1050. else
  1051. funcs->commit(crtc);
  1052. }
  1053. }
  1054. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  1055. const struct drm_encoder_helper_funcs *funcs;
  1056. struct drm_encoder *encoder;
  1057. if (!new_conn_state->best_encoder)
  1058. continue;
  1059. if (!new_conn_state->crtc->state->active ||
  1060. !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
  1061. continue;
  1062. encoder = new_conn_state->best_encoder;
  1063. funcs = encoder->helper_private;
  1064. DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
  1065. encoder->base.id, encoder->name);
  1066. /*
  1067. * Each encoder has at most one connector (since we always steal
  1068. * it away), so we won't call enable hooks twice.
  1069. */
  1070. drm_bridge_pre_enable(encoder->bridge);
  1071. if (funcs) {
  1072. if (funcs->enable)
  1073. funcs->enable(encoder);
  1074. else if (funcs->commit)
  1075. funcs->commit(encoder);
  1076. }
  1077. drm_bridge_enable(encoder->bridge);
  1078. }
  1079. drm_atomic_helper_commit_writebacks(dev, old_state);
  1080. }
  1081. EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  1082. /**
  1083. * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
  1084. * @dev: DRM device
  1085. * @state: atomic state object with old state structures
  1086. * @pre_swap: If true, do an interruptible wait, and @state is the new state.
  1087. * Otherwise @state is the old state.
  1088. *
  1089. * For implicit sync, driver should fish the exclusive fence out from the
  1090. * incoming fb's and stash it in the drm_plane_state. This is called after
  1091. * drm_atomic_helper_swap_state() so it uses the current plane state (and
  1092. * just uses the atomic state to find the changed planes)
  1093. *
  1094. * Note that @pre_swap is needed since the point where we block for fences moves
  1095. * around depending upon whether an atomic commit is blocking or
  1096. * non-blocking. For non-blocking commit all waiting needs to happen after
  1097. * drm_atomic_helper_swap_state() is called, but for blocking commits we want
  1098. * to wait **before** we do anything that can't be easily rolled back. That is
  1099. * before we call drm_atomic_helper_swap_state().
  1100. *
  1101. * Returns zero if success or < 0 if dma_fence_wait() fails.
  1102. */
  1103. int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
  1104. struct drm_atomic_state *state,
  1105. bool pre_swap)
  1106. {
  1107. struct drm_plane *plane;
  1108. struct drm_plane_state *new_plane_state;
  1109. int i, ret;
  1110. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1111. if (!new_plane_state->fence)
  1112. continue;
  1113. WARN_ON(!new_plane_state->fb);
  1114. /*
  1115. * If waiting for fences pre-swap (ie: nonblock), userspace can
  1116. * still interrupt the operation. Instead of blocking until the
  1117. * timer expires, make the wait interruptible.
  1118. */
  1119. ret = dma_fence_wait(new_plane_state->fence, pre_swap);
  1120. if (ret)
  1121. return ret;
  1122. dma_fence_put(new_plane_state->fence);
  1123. new_plane_state->fence = NULL;
  1124. }
  1125. return 0;
  1126. }
  1127. EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
  1128. /**
  1129. * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
  1130. * @dev: DRM device
  1131. * @old_state: atomic state object with old state structures
  1132. *
  1133. * Helper to, after atomic commit, wait for vblanks on all effected
  1134. * crtcs (ie. before cleaning up old framebuffers using
  1135. * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
  1136. * framebuffers have actually changed to optimize for the legacy cursor and
  1137. * plane update use-case.
  1138. *
  1139. * Drivers using the nonblocking commit tracking support initialized by calling
  1140. * drm_atomic_helper_setup_commit() should look at
  1141. * drm_atomic_helper_wait_for_flip_done() as an alternative.
  1142. */
  1143. void
  1144. drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
  1145. struct drm_atomic_state *old_state)
  1146. {
  1147. struct drm_crtc *crtc;
  1148. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1149. int i, ret;
  1150. unsigned crtc_mask = 0;
  1151. /*
  1152. * Legacy cursor ioctls are completely unsynced, and userspace
  1153. * relies on that (by doing tons of cursor updates).
  1154. */
  1155. if (old_state->legacy_cursor_update)
  1156. return;
  1157. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1158. if (!new_crtc_state->active)
  1159. continue;
  1160. ret = drm_crtc_vblank_get(crtc);
  1161. if (ret != 0)
  1162. continue;
  1163. crtc_mask |= drm_crtc_mask(crtc);
  1164. old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
  1165. }
  1166. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1167. if (!(crtc_mask & drm_crtc_mask(crtc)))
  1168. continue;
  1169. ret = wait_event_timeout(dev->vblank[i].queue,
  1170. old_state->crtcs[i].last_vblank_count !=
  1171. drm_crtc_vblank_count(crtc),
  1172. msecs_to_jiffies(50));
  1173. WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
  1174. crtc->base.id, crtc->name);
  1175. drm_crtc_vblank_put(crtc);
  1176. }
  1177. }
  1178. EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  1179. /**
  1180. * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
  1181. * @dev: DRM device
  1182. * @old_state: atomic state object with old state structures
  1183. *
  1184. * Helper to, after atomic commit, wait for page flips on all effected
  1185. * crtcs (ie. before cleaning up old framebuffers using
  1186. * drm_atomic_helper_cleanup_planes()). Compared to
  1187. * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
  1188. * CRTCs, assuming that cursors-only updates are signalling their completion
  1189. * immediately (or using a different path).
  1190. *
  1191. * This requires that drivers use the nonblocking commit tracking support
  1192. * initialized using drm_atomic_helper_setup_commit().
  1193. */
  1194. void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
  1195. struct drm_atomic_state *old_state)
  1196. {
  1197. struct drm_crtc *crtc;
  1198. int i;
  1199. for (i = 0; i < dev->mode_config.num_crtc; i++) {
  1200. struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
  1201. int ret;
  1202. crtc = old_state->crtcs[i].ptr;
  1203. if (!crtc || !commit)
  1204. continue;
  1205. ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
  1206. if (ret == 0)
  1207. DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
  1208. crtc->base.id, crtc->name);
  1209. }
  1210. }
  1211. EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
  1212. /**
  1213. * drm_atomic_helper_commit_tail - commit atomic update to hardware
  1214. * @old_state: atomic state object with old state structures
  1215. *
  1216. * This is the default implementation for the
  1217. * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
  1218. * that do not support runtime_pm or do not need the CRTC to be
  1219. * enabled to perform a commit. Otherwise, see
  1220. * drm_atomic_helper_commit_tail_rpm().
  1221. *
  1222. * Note that the default ordering of how the various stages are called is to
  1223. * match the legacy modeset helper library closest.
  1224. */
  1225. void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
  1226. {
  1227. struct drm_device *dev = old_state->dev;
  1228. drm_atomic_helper_commit_modeset_disables(dev, old_state);
  1229. drm_atomic_helper_commit_planes(dev, old_state, 0);
  1230. drm_atomic_helper_commit_modeset_enables(dev, old_state);
  1231. drm_atomic_helper_fake_vblank(old_state);
  1232. drm_atomic_helper_commit_hw_done(old_state);
  1233. drm_atomic_helper_wait_for_vblanks(dev, old_state);
  1234. drm_atomic_helper_cleanup_planes(dev, old_state);
  1235. }
  1236. EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
  1237. /**
  1238. * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
  1239. * @old_state: new modeset state to be committed
  1240. *
  1241. * This is an alternative implementation for the
  1242. * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
  1243. * that support runtime_pm or need the CRTC to be enabled to perform a
  1244. * commit. Otherwise, one should use the default implementation
  1245. * drm_atomic_helper_commit_tail().
  1246. */
  1247. void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
  1248. {
  1249. struct drm_device *dev = old_state->dev;
  1250. drm_atomic_helper_commit_modeset_disables(dev, old_state);
  1251. drm_atomic_helper_commit_modeset_enables(dev, old_state);
  1252. drm_atomic_helper_commit_planes(dev, old_state,
  1253. DRM_PLANE_COMMIT_ACTIVE_ONLY);
  1254. drm_atomic_helper_fake_vblank(old_state);
  1255. drm_atomic_helper_commit_hw_done(old_state);
  1256. drm_atomic_helper_wait_for_vblanks(dev, old_state);
  1257. drm_atomic_helper_cleanup_planes(dev, old_state);
  1258. }
  1259. EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
  1260. static void commit_tail(struct drm_atomic_state *old_state)
  1261. {
  1262. struct drm_device *dev = old_state->dev;
  1263. const struct drm_mode_config_helper_funcs *funcs;
  1264. funcs = dev->mode_config.helper_private;
  1265. drm_atomic_helper_wait_for_fences(dev, old_state, false);
  1266. drm_atomic_helper_wait_for_dependencies(old_state);
  1267. if (funcs && funcs->atomic_commit_tail)
  1268. funcs->atomic_commit_tail(old_state);
  1269. else
  1270. drm_atomic_helper_commit_tail(old_state);
  1271. drm_atomic_helper_commit_cleanup_done(old_state);
  1272. drm_atomic_state_put(old_state);
  1273. }
  1274. static void commit_work(struct work_struct *work)
  1275. {
  1276. struct drm_atomic_state *state = container_of(work,
  1277. struct drm_atomic_state,
  1278. commit_work);
  1279. commit_tail(state);
  1280. }
  1281. /**
  1282. * drm_atomic_helper_async_check - check if state can be commited asynchronously
  1283. * @dev: DRM device
  1284. * @state: the driver state object
  1285. *
  1286. * This helper will check if it is possible to commit the state asynchronously.
  1287. * Async commits are not supposed to swap the states like normal sync commits
  1288. * but just do in-place changes on the current state.
  1289. *
  1290. * It will return 0 if the commit can happen in an asynchronous fashion or error
  1291. * if not. Note that error just mean it can't be commited asynchronously, if it
  1292. * fails the commit should be treated like a normal synchronous commit.
  1293. */
  1294. int drm_atomic_helper_async_check(struct drm_device *dev,
  1295. struct drm_atomic_state *state)
  1296. {
  1297. struct drm_crtc *crtc;
  1298. struct drm_crtc_state *crtc_state;
  1299. struct drm_plane *plane = NULL;
  1300. struct drm_plane_state *old_plane_state = NULL;
  1301. struct drm_plane_state *new_plane_state = NULL;
  1302. const struct drm_plane_helper_funcs *funcs;
  1303. int i, n_planes = 0;
  1304. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1305. if (drm_atomic_crtc_needs_modeset(crtc_state))
  1306. return -EINVAL;
  1307. }
  1308. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
  1309. n_planes++;
  1310. /* FIXME: we support only single plane updates for now */
  1311. if (n_planes != 1)
  1312. return -EINVAL;
  1313. if (!new_plane_state->crtc ||
  1314. old_plane_state->crtc != new_plane_state->crtc)
  1315. return -EINVAL;
  1316. funcs = plane->helper_private;
  1317. if (!funcs->atomic_async_update)
  1318. return -EINVAL;
  1319. if (new_plane_state->fence)
  1320. return -EINVAL;
  1321. /*
  1322. * Don't do an async update if there is an outstanding commit modifying
  1323. * the plane. This prevents our async update's changes from getting
  1324. * overridden by a previous synchronous update's state.
  1325. */
  1326. if (old_plane_state->commit &&
  1327. !try_wait_for_completion(&old_plane_state->commit->hw_done))
  1328. return -EBUSY;
  1329. return funcs->atomic_async_check(plane, new_plane_state);
  1330. }
  1331. EXPORT_SYMBOL(drm_atomic_helper_async_check);
  1332. /**
  1333. * drm_atomic_helper_async_commit - commit state asynchronously
  1334. * @dev: DRM device
  1335. * @state: the driver state object
  1336. *
  1337. * This function commits a state asynchronously, i.e., not vblank
  1338. * synchronized. It should be used on a state only when
  1339. * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
  1340. * the states like normal sync commits, but just do in-place changes on the
  1341. * current state.
  1342. */
  1343. void drm_atomic_helper_async_commit(struct drm_device *dev,
  1344. struct drm_atomic_state *state)
  1345. {
  1346. struct drm_plane *plane;
  1347. struct drm_plane_state *plane_state;
  1348. const struct drm_plane_helper_funcs *funcs;
  1349. int i;
  1350. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1351. funcs = plane->helper_private;
  1352. funcs->atomic_async_update(plane, plane_state);
  1353. /*
  1354. * ->atomic_async_update() is supposed to update the
  1355. * plane->state in-place, make sure at least common
  1356. * properties have been properly updated.
  1357. */
  1358. WARN_ON_ONCE(plane->state->fb != plane_state->fb);
  1359. WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
  1360. WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
  1361. WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
  1362. WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
  1363. }
  1364. }
  1365. EXPORT_SYMBOL(drm_atomic_helper_async_commit);
  1366. /**
  1367. * drm_atomic_helper_commit - commit validated state object
  1368. * @dev: DRM device
  1369. * @state: the driver state object
  1370. * @nonblock: whether nonblocking behavior is requested.
  1371. *
  1372. * This function commits a with drm_atomic_helper_check() pre-validated state
  1373. * object. This can still fail when e.g. the framebuffer reservation fails. This
  1374. * function implements nonblocking commits, using
  1375. * drm_atomic_helper_setup_commit() and related functions.
  1376. *
  1377. * Committing the actual hardware state is done through the
  1378. * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
  1379. * implementation drm_atomic_helper_commit_tail().
  1380. *
  1381. * RETURNS:
  1382. * Zero for success or -errno.
  1383. */
  1384. int drm_atomic_helper_commit(struct drm_device *dev,
  1385. struct drm_atomic_state *state,
  1386. bool nonblock)
  1387. {
  1388. int ret;
  1389. if (state->async_update) {
  1390. ret = drm_atomic_helper_prepare_planes(dev, state);
  1391. if (ret)
  1392. return ret;
  1393. drm_atomic_helper_async_commit(dev, state);
  1394. drm_atomic_helper_cleanup_planes(dev, state);
  1395. return 0;
  1396. }
  1397. ret = drm_atomic_helper_setup_commit(state, nonblock);
  1398. if (ret)
  1399. return ret;
  1400. INIT_WORK(&state->commit_work, commit_work);
  1401. ret = drm_atomic_helper_prepare_planes(dev, state);
  1402. if (ret)
  1403. return ret;
  1404. if (!nonblock) {
  1405. ret = drm_atomic_helper_wait_for_fences(dev, state, true);
  1406. if (ret)
  1407. goto err;
  1408. }
  1409. /*
  1410. * This is the point of no return - everything below never fails except
  1411. * when the hw goes bonghits. Which means we can commit the new state on
  1412. * the software side now.
  1413. */
  1414. ret = drm_atomic_helper_swap_state(state, true);
  1415. if (ret)
  1416. goto err;
  1417. /*
  1418. * Everything below can be run asynchronously without the need to grab
  1419. * any modeset locks at all under one condition: It must be guaranteed
  1420. * that the asynchronous work has either been cancelled (if the driver
  1421. * supports it, which at least requires that the framebuffers get
  1422. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  1423. * before the new state gets committed on the software side with
  1424. * drm_atomic_helper_swap_state().
  1425. *
  1426. * This scheme allows new atomic state updates to be prepared and
  1427. * checked in parallel to the asynchronous completion of the previous
  1428. * update. Which is important since compositors need to figure out the
  1429. * composition of the next frame right after having submitted the
  1430. * current layout.
  1431. *
  1432. * NOTE: Commit work has multiple phases, first hardware commit, then
  1433. * cleanup. We want them to overlap, hence need system_unbound_wq to
  1434. * make sure work items don't artifically stall on each another.
  1435. */
  1436. drm_atomic_state_get(state);
  1437. if (nonblock)
  1438. queue_work(system_unbound_wq, &state->commit_work);
  1439. else
  1440. commit_tail(state);
  1441. return 0;
  1442. err:
  1443. drm_atomic_helper_cleanup_planes(dev, state);
  1444. return ret;
  1445. }
  1446. EXPORT_SYMBOL(drm_atomic_helper_commit);
  1447. /**
  1448. * DOC: implementing nonblocking commit
  1449. *
  1450. * Nonblocking atomic commits have to be implemented in the following sequence:
  1451. *
  1452. * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
  1453. * which commit needs to call which can fail, so we want to run it first and
  1454. * synchronously.
  1455. *
  1456. * 2. Synchronize with any outstanding nonblocking commit worker threads which
  1457. * might be affected the new state update. This can be done by either cancelling
  1458. * or flushing the work items, depending upon whether the driver can deal with
  1459. * cancelled updates. Note that it is important to ensure that the framebuffer
  1460. * cleanup is still done when cancelling.
  1461. *
  1462. * Asynchronous workers need to have sufficient parallelism to be able to run
  1463. * different atomic commits on different CRTCs in parallel. The simplest way to
  1464. * achive this is by running them on the &system_unbound_wq work queue. Note
  1465. * that drivers are not required to split up atomic commits and run an
  1466. * individual commit in parallel - userspace is supposed to do that if it cares.
  1467. * But it might be beneficial to do that for modesets, since those necessarily
  1468. * must be done as one global operation, and enabling or disabling a CRTC can
  1469. * take a long time. But even that is not required.
  1470. *
  1471. * 3. The software state is updated synchronously with
  1472. * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
  1473. * locks means concurrent callers never see inconsistent state. And doing this
  1474. * while it's guaranteed that no relevant nonblocking worker runs means that
  1475. * nonblocking workers do not need grab any locks. Actually they must not grab
  1476. * locks, for otherwise the work flushing will deadlock.
  1477. *
  1478. * 4. Schedule a work item to do all subsequent steps, using the split-out
  1479. * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
  1480. * then cleaning up the framebuffers after the old framebuffer is no longer
  1481. * being displayed.
  1482. *
  1483. * The above scheme is implemented in the atomic helper libraries in
  1484. * drm_atomic_helper_commit() using a bunch of helper functions. See
  1485. * drm_atomic_helper_setup_commit() for a starting point.
  1486. */
  1487. static int stall_checks(struct drm_crtc *crtc, bool nonblock)
  1488. {
  1489. struct drm_crtc_commit *commit, *stall_commit = NULL;
  1490. bool completed = true;
  1491. int i;
  1492. long ret = 0;
  1493. spin_lock(&crtc->commit_lock);
  1494. i = 0;
  1495. list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
  1496. if (i == 0) {
  1497. completed = try_wait_for_completion(&commit->flip_done);
  1498. /* Userspace is not allowed to get ahead of the previous
  1499. * commit with nonblocking ones. */
  1500. if (!completed && nonblock) {
  1501. spin_unlock(&crtc->commit_lock);
  1502. return -EBUSY;
  1503. }
  1504. } else if (i == 1) {
  1505. stall_commit = drm_crtc_commit_get(commit);
  1506. break;
  1507. }
  1508. i++;
  1509. }
  1510. spin_unlock(&crtc->commit_lock);
  1511. if (!stall_commit)
  1512. return 0;
  1513. /* We don't want to let commits get ahead of cleanup work too much,
  1514. * stalling on 2nd previous commit means triple-buffer won't ever stall.
  1515. */
  1516. ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
  1517. 10*HZ);
  1518. if (ret == 0)
  1519. DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
  1520. crtc->base.id, crtc->name);
  1521. drm_crtc_commit_put(stall_commit);
  1522. return ret < 0 ? ret : 0;
  1523. }
  1524. static void release_crtc_commit(struct completion *completion)
  1525. {
  1526. struct drm_crtc_commit *commit = container_of(completion,
  1527. typeof(*commit),
  1528. flip_done);
  1529. drm_crtc_commit_put(commit);
  1530. }
  1531. static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
  1532. {
  1533. init_completion(&commit->flip_done);
  1534. init_completion(&commit->hw_done);
  1535. init_completion(&commit->cleanup_done);
  1536. INIT_LIST_HEAD(&commit->commit_entry);
  1537. kref_init(&commit->ref);
  1538. commit->crtc = crtc;
  1539. }
  1540. static struct drm_crtc_commit *
  1541. crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
  1542. {
  1543. if (crtc) {
  1544. struct drm_crtc_state *new_crtc_state;
  1545. new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  1546. return new_crtc_state->commit;
  1547. }
  1548. if (!state->fake_commit) {
  1549. state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
  1550. if (!state->fake_commit)
  1551. return NULL;
  1552. init_commit(state->fake_commit, NULL);
  1553. }
  1554. return state->fake_commit;
  1555. }
  1556. /**
  1557. * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
  1558. * @state: new modeset state to be committed
  1559. * @nonblock: whether nonblocking behavior is requested.
  1560. *
  1561. * This function prepares @state to be used by the atomic helper's support for
  1562. * nonblocking commits. Drivers using the nonblocking commit infrastructure
  1563. * should always call this function from their
  1564. * &drm_mode_config_funcs.atomic_commit hook.
  1565. *
  1566. * To be able to use this support drivers need to use a few more helper
  1567. * functions. drm_atomic_helper_wait_for_dependencies() must be called before
  1568. * actually committing the hardware state, and for nonblocking commits this call
  1569. * must be placed in the async worker. See also drm_atomic_helper_swap_state()
  1570. * and it's stall parameter, for when a driver's commit hooks look at the
  1571. * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
  1572. *
  1573. * Completion of the hardware commit step must be signalled using
  1574. * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
  1575. * to read or change any permanent software or hardware modeset state. The only
  1576. * exception is state protected by other means than &drm_modeset_lock locks.
  1577. * Only the free standing @state with pointers to the old state structures can
  1578. * be inspected, e.g. to clean up old buffers using
  1579. * drm_atomic_helper_cleanup_planes().
  1580. *
  1581. * At the very end, before cleaning up @state drivers must call
  1582. * drm_atomic_helper_commit_cleanup_done().
  1583. *
  1584. * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
  1585. * complete and easy-to-use default implementation of the atomic_commit() hook.
  1586. *
  1587. * The tracking of asynchronously executed and still pending commits is done
  1588. * using the core structure &drm_crtc_commit.
  1589. *
  1590. * By default there's no need to clean up resources allocated by this function
  1591. * explicitly: drm_atomic_state_default_clear() will take care of that
  1592. * automatically.
  1593. *
  1594. * Returns:
  1595. *
  1596. * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
  1597. * -ENOMEM on allocation failures and -EINTR when a signal is pending.
  1598. */
  1599. int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
  1600. bool nonblock)
  1601. {
  1602. struct drm_crtc *crtc;
  1603. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1604. struct drm_connector *conn;
  1605. struct drm_connector_state *old_conn_state, *new_conn_state;
  1606. struct drm_plane *plane;
  1607. struct drm_plane_state *old_plane_state, *new_plane_state;
  1608. struct drm_crtc_commit *commit;
  1609. int i, ret;
  1610. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  1611. commit = kzalloc(sizeof(*commit), GFP_KERNEL);
  1612. if (!commit)
  1613. return -ENOMEM;
  1614. init_commit(commit, crtc);
  1615. new_crtc_state->commit = commit;
  1616. ret = stall_checks(crtc, nonblock);
  1617. if (ret)
  1618. return ret;
  1619. /* Drivers only send out events when at least either current or
  1620. * new CRTC state is active. Complete right away if everything
  1621. * stays off. */
  1622. if (!old_crtc_state->active && !new_crtc_state->active) {
  1623. complete_all(&commit->flip_done);
  1624. continue;
  1625. }
  1626. /* Legacy cursor updates are fully unsynced. */
  1627. if (state->legacy_cursor_update) {
  1628. complete_all(&commit->flip_done);
  1629. continue;
  1630. }
  1631. if (!new_crtc_state->event) {
  1632. commit->event = kzalloc(sizeof(*commit->event),
  1633. GFP_KERNEL);
  1634. if (!commit->event)
  1635. return -ENOMEM;
  1636. new_crtc_state->event = commit->event;
  1637. }
  1638. new_crtc_state->event->base.completion = &commit->flip_done;
  1639. new_crtc_state->event->base.completion_release = release_crtc_commit;
  1640. drm_crtc_commit_get(commit);
  1641. commit->abort_completion = true;
  1642. state->crtcs[i].commit = commit;
  1643. drm_crtc_commit_get(commit);
  1644. }
  1645. for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
  1646. /* Userspace is not allowed to get ahead of the previous
  1647. * commit with nonblocking ones. */
  1648. if (nonblock && old_conn_state->commit &&
  1649. !try_wait_for_completion(&old_conn_state->commit->flip_done))
  1650. return -EBUSY;
  1651. /* Always track connectors explicitly for e.g. link retraining. */
  1652. commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
  1653. if (!commit)
  1654. return -ENOMEM;
  1655. new_conn_state->commit = drm_crtc_commit_get(commit);
  1656. }
  1657. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  1658. /* Userspace is not allowed to get ahead of the previous
  1659. * commit with nonblocking ones. */
  1660. if (nonblock && old_plane_state->commit &&
  1661. !try_wait_for_completion(&old_plane_state->commit->flip_done))
  1662. return -EBUSY;
  1663. /* Always track planes explicitly for async pageflip support. */
  1664. commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
  1665. if (!commit)
  1666. return -ENOMEM;
  1667. new_plane_state->commit = drm_crtc_commit_get(commit);
  1668. }
  1669. return 0;
  1670. }
  1671. EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
  1672. /**
  1673. * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
  1674. * @old_state: atomic state object with old state structures
  1675. *
  1676. * This function waits for all preceeding commits that touch the same CRTC as
  1677. * @old_state to both be committed to the hardware (as signalled by
  1678. * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
  1679. * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
  1680. *
  1681. * This is part of the atomic helper support for nonblocking commits, see
  1682. * drm_atomic_helper_setup_commit() for an overview.
  1683. */
  1684. void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
  1685. {
  1686. struct drm_crtc *crtc;
  1687. struct drm_crtc_state *old_crtc_state;
  1688. struct drm_plane *plane;
  1689. struct drm_plane_state *old_plane_state;
  1690. struct drm_connector *conn;
  1691. struct drm_connector_state *old_conn_state;
  1692. struct drm_crtc_commit *commit;
  1693. int i;
  1694. long ret;
  1695. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1696. commit = old_crtc_state->commit;
  1697. if (!commit)
  1698. continue;
  1699. ret = wait_for_completion_timeout(&commit->hw_done,
  1700. 10*HZ);
  1701. if (ret == 0)
  1702. DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
  1703. crtc->base.id, crtc->name);
  1704. /* Currently no support for overwriting flips, hence
  1705. * stall for previous one to execute completely. */
  1706. ret = wait_for_completion_timeout(&commit->flip_done,
  1707. 10*HZ);
  1708. if (ret == 0)
  1709. DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
  1710. crtc->base.id, crtc->name);
  1711. }
  1712. for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
  1713. commit = old_conn_state->commit;
  1714. if (!commit)
  1715. continue;
  1716. ret = wait_for_completion_timeout(&commit->hw_done,
  1717. 10*HZ);
  1718. if (ret == 0)
  1719. DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
  1720. conn->base.id, conn->name);
  1721. /* Currently no support for overwriting flips, hence
  1722. * stall for previous one to execute completely. */
  1723. ret = wait_for_completion_timeout(&commit->flip_done,
  1724. 10*HZ);
  1725. if (ret == 0)
  1726. DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
  1727. conn->base.id, conn->name);
  1728. }
  1729. for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
  1730. commit = old_plane_state->commit;
  1731. if (!commit)
  1732. continue;
  1733. ret = wait_for_completion_timeout(&commit->hw_done,
  1734. 10*HZ);
  1735. if (ret == 0)
  1736. DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
  1737. plane->base.id, plane->name);
  1738. /* Currently no support for overwriting flips, hence
  1739. * stall for previous one to execute completely. */
  1740. ret = wait_for_completion_timeout(&commit->flip_done,
  1741. 10*HZ);
  1742. if (ret == 0)
  1743. DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
  1744. plane->base.id, plane->name);
  1745. }
  1746. }
  1747. EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
  1748. /**
  1749. * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
  1750. * @old_state: atomic state object with old state structures
  1751. *
  1752. * This function walks all CRTCs and fake VBLANK events on those with
  1753. * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
  1754. * The primary use of this function is writeback connectors working in oneshot
  1755. * mode and faking VBLANK events. In this case they only fake the VBLANK event
  1756. * when a job is queued, and any change to the pipeline that does not touch the
  1757. * connector is leading to timeouts when calling
  1758. * drm_atomic_helper_wait_for_vblanks() or
  1759. * drm_atomic_helper_wait_for_flip_done().
  1760. *
  1761. * This is part of the atomic helper support for nonblocking commits, see
  1762. * drm_atomic_helper_setup_commit() for an overview.
  1763. */
  1764. void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
  1765. {
  1766. struct drm_crtc_state *new_crtc_state;
  1767. struct drm_crtc *crtc;
  1768. int i;
  1769. for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
  1770. unsigned long flags;
  1771. if (!new_crtc_state->no_vblank)
  1772. continue;
  1773. spin_lock_irqsave(&old_state->dev->event_lock, flags);
  1774. if (new_crtc_state->event) {
  1775. drm_crtc_send_vblank_event(crtc,
  1776. new_crtc_state->event);
  1777. new_crtc_state->event = NULL;
  1778. }
  1779. spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
  1780. }
  1781. }
  1782. EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
  1783. /**
  1784. * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
  1785. * @old_state: atomic state object with old state structures
  1786. *
  1787. * This function is used to signal completion of the hardware commit step. After
  1788. * this step the driver is not allowed to read or change any permanent software
  1789. * or hardware modeset state. The only exception is state protected by other
  1790. * means than &drm_modeset_lock locks.
  1791. *
  1792. * Drivers should try to postpone any expensive or delayed cleanup work after
  1793. * this function is called.
  1794. *
  1795. * This is part of the atomic helper support for nonblocking commits, see
  1796. * drm_atomic_helper_setup_commit() for an overview.
  1797. */
  1798. void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
  1799. {
  1800. struct drm_crtc *crtc;
  1801. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1802. struct drm_crtc_commit *commit;
  1803. int i;
  1804. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1805. commit = new_crtc_state->commit;
  1806. if (!commit)
  1807. continue;
  1808. /*
  1809. * copy new_crtc_state->commit to old_crtc_state->commit,
  1810. * it's unsafe to touch new_crtc_state after hw_done,
  1811. * but we still need to do so in cleanup_done().
  1812. */
  1813. if (old_crtc_state->commit)
  1814. drm_crtc_commit_put(old_crtc_state->commit);
  1815. old_crtc_state->commit = drm_crtc_commit_get(commit);
  1816. /* backend must have consumed any event by now */
  1817. WARN_ON(new_crtc_state->event);
  1818. complete_all(&commit->hw_done);
  1819. }
  1820. if (old_state->fake_commit) {
  1821. complete_all(&old_state->fake_commit->hw_done);
  1822. complete_all(&old_state->fake_commit->flip_done);
  1823. }
  1824. }
  1825. EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
  1826. /**
  1827. * drm_atomic_helper_commit_cleanup_done - signal completion of commit
  1828. * @old_state: atomic state object with old state structures
  1829. *
  1830. * This signals completion of the atomic update @old_state, including any
  1831. * cleanup work. If used, it must be called right before calling
  1832. * drm_atomic_state_put().
  1833. *
  1834. * This is part of the atomic helper support for nonblocking commits, see
  1835. * drm_atomic_helper_setup_commit() for an overview.
  1836. */
  1837. void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
  1838. {
  1839. struct drm_crtc *crtc;
  1840. struct drm_crtc_state *old_crtc_state;
  1841. struct drm_crtc_commit *commit;
  1842. int i;
  1843. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1844. commit = old_crtc_state->commit;
  1845. if (WARN_ON(!commit))
  1846. continue;
  1847. complete_all(&commit->cleanup_done);
  1848. WARN_ON(!try_wait_for_completion(&commit->hw_done));
  1849. spin_lock(&crtc->commit_lock);
  1850. list_del(&commit->commit_entry);
  1851. spin_unlock(&crtc->commit_lock);
  1852. }
  1853. if (old_state->fake_commit)
  1854. complete_all(&old_state->fake_commit->cleanup_done);
  1855. }
  1856. EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
  1857. /**
  1858. * drm_atomic_helper_prepare_planes - prepare plane resources before commit
  1859. * @dev: DRM device
  1860. * @state: atomic state object with new state structures
  1861. *
  1862. * This function prepares plane state, specifically framebuffers, for the new
  1863. * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
  1864. * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
  1865. * any already successfully prepared framebuffer.
  1866. *
  1867. * Returns:
  1868. * 0 on success, negative error code on failure.
  1869. */
  1870. int drm_atomic_helper_prepare_planes(struct drm_device *dev,
  1871. struct drm_atomic_state *state)
  1872. {
  1873. struct drm_plane *plane;
  1874. struct drm_plane_state *new_plane_state;
  1875. int ret, i, j;
  1876. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1877. const struct drm_plane_helper_funcs *funcs;
  1878. funcs = plane->helper_private;
  1879. if (funcs->prepare_fb) {
  1880. ret = funcs->prepare_fb(plane, new_plane_state);
  1881. if (ret)
  1882. goto fail;
  1883. }
  1884. }
  1885. return 0;
  1886. fail:
  1887. for_each_new_plane_in_state(state, plane, new_plane_state, j) {
  1888. const struct drm_plane_helper_funcs *funcs;
  1889. if (j >= i)
  1890. continue;
  1891. funcs = plane->helper_private;
  1892. if (funcs->cleanup_fb)
  1893. funcs->cleanup_fb(plane, new_plane_state);
  1894. }
  1895. return ret;
  1896. }
  1897. EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
  1898. static bool plane_crtc_active(const struct drm_plane_state *state)
  1899. {
  1900. return state->crtc && state->crtc->state->active;
  1901. }
  1902. /**
  1903. * drm_atomic_helper_commit_planes - commit plane state
  1904. * @dev: DRM device
  1905. * @old_state: atomic state object with old state structures
  1906. * @flags: flags for committing plane state
  1907. *
  1908. * This function commits the new plane state using the plane and atomic helper
  1909. * functions for planes and crtcs. It assumes that the atomic state has already
  1910. * been pushed into the relevant object state pointers, since this step can no
  1911. * longer fail.
  1912. *
  1913. * It still requires the global state object @old_state to know which planes and
  1914. * crtcs need to be updated though.
  1915. *
  1916. * Note that this function does all plane updates across all CRTCs in one step.
  1917. * If the hardware can't support this approach look at
  1918. * drm_atomic_helper_commit_planes_on_crtc() instead.
  1919. *
  1920. * Plane parameters can be updated by applications while the associated CRTC is
  1921. * disabled. The DRM/KMS core will store the parameters in the plane state,
  1922. * which will be available to the driver when the CRTC is turned on. As a result
  1923. * most drivers don't need to be immediately notified of plane updates for a
  1924. * disabled CRTC.
  1925. *
  1926. * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
  1927. * @flags in order not to receive plane update notifications related to a
  1928. * disabled CRTC. This avoids the need to manually ignore plane updates in
  1929. * driver code when the driver and/or hardware can't or just don't need to deal
  1930. * with updates on disabled CRTCs, for example when supporting runtime PM.
  1931. *
  1932. * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
  1933. * display controllers require to disable a CRTC's planes when the CRTC is
  1934. * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
  1935. * call for a plane if the CRTC of the old plane state needs a modesetting
  1936. * operation. Of course, the drivers need to disable the planes in their CRTC
  1937. * disable callbacks since no one else would do that.
  1938. *
  1939. * The drm_atomic_helper_commit() default implementation doesn't set the
  1940. * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
  1941. * This should not be copied blindly by drivers.
  1942. */
  1943. void drm_atomic_helper_commit_planes(struct drm_device *dev,
  1944. struct drm_atomic_state *old_state,
  1945. uint32_t flags)
  1946. {
  1947. struct drm_crtc *crtc;
  1948. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1949. struct drm_plane *plane;
  1950. struct drm_plane_state *old_plane_state, *new_plane_state;
  1951. int i;
  1952. bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
  1953. bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
  1954. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1955. const struct drm_crtc_helper_funcs *funcs;
  1956. funcs = crtc->helper_private;
  1957. if (!funcs || !funcs->atomic_begin)
  1958. continue;
  1959. if (active_only && !new_crtc_state->active)
  1960. continue;
  1961. funcs->atomic_begin(crtc, old_crtc_state);
  1962. }
  1963. for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
  1964. const struct drm_plane_helper_funcs *funcs;
  1965. bool disabling;
  1966. funcs = plane->helper_private;
  1967. if (!funcs)
  1968. continue;
  1969. disabling = drm_atomic_plane_disabling(old_plane_state,
  1970. new_plane_state);
  1971. if (active_only) {
  1972. /*
  1973. * Skip planes related to inactive CRTCs. If the plane
  1974. * is enabled use the state of the current CRTC. If the
  1975. * plane is being disabled use the state of the old
  1976. * CRTC to avoid skipping planes being disabled on an
  1977. * active CRTC.
  1978. */
  1979. if (!disabling && !plane_crtc_active(new_plane_state))
  1980. continue;
  1981. if (disabling && !plane_crtc_active(old_plane_state))
  1982. continue;
  1983. }
  1984. /*
  1985. * Special-case disabling the plane if drivers support it.
  1986. */
  1987. if (disabling && funcs->atomic_disable) {
  1988. struct drm_crtc_state *crtc_state;
  1989. crtc_state = old_plane_state->crtc->state;
  1990. if (drm_atomic_crtc_needs_modeset(crtc_state) &&
  1991. no_disable)
  1992. continue;
  1993. funcs->atomic_disable(plane, old_plane_state);
  1994. } else if (new_plane_state->crtc || disabling) {
  1995. funcs->atomic_update(plane, old_plane_state);
  1996. }
  1997. }
  1998. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1999. const struct drm_crtc_helper_funcs *funcs;
  2000. funcs = crtc->helper_private;
  2001. if (!funcs || !funcs->atomic_flush)
  2002. continue;
  2003. if (active_only && !new_crtc_state->active)
  2004. continue;
  2005. funcs->atomic_flush(crtc, old_crtc_state);
  2006. }
  2007. }
  2008. EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
  2009. /**
  2010. * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
  2011. * @old_crtc_state: atomic state object with the old crtc state
  2012. *
  2013. * This function commits the new plane state using the plane and atomic helper
  2014. * functions for planes on the specific crtc. It assumes that the atomic state
  2015. * has already been pushed into the relevant object state pointers, since this
  2016. * step can no longer fail.
  2017. *
  2018. * This function is useful when plane updates should be done crtc-by-crtc
  2019. * instead of one global step like drm_atomic_helper_commit_planes() does.
  2020. *
  2021. * This function can only be savely used when planes are not allowed to move
  2022. * between different CRTCs because this function doesn't handle inter-CRTC
  2023. * depencies. Callers need to ensure that either no such depencies exist,
  2024. * resolve them through ordering of commit calls or through some other means.
  2025. */
  2026. void
  2027. drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
  2028. {
  2029. const struct drm_crtc_helper_funcs *crtc_funcs;
  2030. struct drm_crtc *crtc = old_crtc_state->crtc;
  2031. struct drm_atomic_state *old_state = old_crtc_state->state;
  2032. struct drm_crtc_state *new_crtc_state =
  2033. drm_atomic_get_new_crtc_state(old_state, crtc);
  2034. struct drm_plane *plane;
  2035. unsigned plane_mask;
  2036. plane_mask = old_crtc_state->plane_mask;
  2037. plane_mask |= new_crtc_state->plane_mask;
  2038. crtc_funcs = crtc->helper_private;
  2039. if (crtc_funcs && crtc_funcs->atomic_begin)
  2040. crtc_funcs->atomic_begin(crtc, old_crtc_state);
  2041. drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
  2042. struct drm_plane_state *old_plane_state =
  2043. drm_atomic_get_old_plane_state(old_state, plane);
  2044. struct drm_plane_state *new_plane_state =
  2045. drm_atomic_get_new_plane_state(old_state, plane);
  2046. const struct drm_plane_helper_funcs *plane_funcs;
  2047. plane_funcs = plane->helper_private;
  2048. if (!old_plane_state || !plane_funcs)
  2049. continue;
  2050. WARN_ON(new_plane_state->crtc &&
  2051. new_plane_state->crtc != crtc);
  2052. if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
  2053. plane_funcs->atomic_disable)
  2054. plane_funcs->atomic_disable(plane, old_plane_state);
  2055. else if (new_plane_state->crtc ||
  2056. drm_atomic_plane_disabling(old_plane_state, new_plane_state))
  2057. plane_funcs->atomic_update(plane, old_plane_state);
  2058. }
  2059. if (crtc_funcs && crtc_funcs->atomic_flush)
  2060. crtc_funcs->atomic_flush(crtc, old_crtc_state);
  2061. }
  2062. EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
  2063. /**
  2064. * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
  2065. * @old_crtc_state: atomic state object with the old CRTC state
  2066. * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
  2067. *
  2068. * Disables all planes associated with the given CRTC. This can be
  2069. * used for instance in the CRTC helper atomic_disable callback to disable
  2070. * all planes.
  2071. *
  2072. * If the atomic-parameter is set the function calls the CRTC's
  2073. * atomic_begin hook before and atomic_flush hook after disabling the
  2074. * planes.
  2075. *
  2076. * It is a bug to call this function without having implemented the
  2077. * &drm_plane_helper_funcs.atomic_disable plane hook.
  2078. */
  2079. void
  2080. drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
  2081. bool atomic)
  2082. {
  2083. struct drm_crtc *crtc = old_crtc_state->crtc;
  2084. const struct drm_crtc_helper_funcs *crtc_funcs =
  2085. crtc->helper_private;
  2086. struct drm_plane *plane;
  2087. if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
  2088. crtc_funcs->atomic_begin(crtc, NULL);
  2089. drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
  2090. const struct drm_plane_helper_funcs *plane_funcs =
  2091. plane->helper_private;
  2092. if (!plane_funcs)
  2093. continue;
  2094. WARN_ON(!plane_funcs->atomic_disable);
  2095. if (plane_funcs->atomic_disable)
  2096. plane_funcs->atomic_disable(plane, NULL);
  2097. }
  2098. if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
  2099. crtc_funcs->atomic_flush(crtc, NULL);
  2100. }
  2101. EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
  2102. /**
  2103. * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
  2104. * @dev: DRM device
  2105. * @old_state: atomic state object with old state structures
  2106. *
  2107. * This function cleans up plane state, specifically framebuffers, from the old
  2108. * configuration. Hence the old configuration must be perserved in @old_state to
  2109. * be able to call this function.
  2110. *
  2111. * This function must also be called on the new state when the atomic update
  2112. * fails at any point after calling drm_atomic_helper_prepare_planes().
  2113. */
  2114. void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
  2115. struct drm_atomic_state *old_state)
  2116. {
  2117. struct drm_plane *plane;
  2118. struct drm_plane_state *old_plane_state, *new_plane_state;
  2119. int i;
  2120. for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
  2121. const struct drm_plane_helper_funcs *funcs;
  2122. struct drm_plane_state *plane_state;
  2123. /*
  2124. * This might be called before swapping when commit is aborted,
  2125. * in which case we have to cleanup the new state.
  2126. */
  2127. if (old_plane_state == plane->state)
  2128. plane_state = new_plane_state;
  2129. else
  2130. plane_state = old_plane_state;
  2131. funcs = plane->helper_private;
  2132. if (funcs->cleanup_fb)
  2133. funcs->cleanup_fb(plane, plane_state);
  2134. }
  2135. }
  2136. EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
  2137. /**
  2138. * drm_atomic_helper_swap_state - store atomic state into current sw state
  2139. * @state: atomic state
  2140. * @stall: stall for preceeding commits
  2141. *
  2142. * This function stores the atomic state into the current state pointers in all
  2143. * driver objects. It should be called after all failing steps have been done
  2144. * and succeeded, but before the actual hardware state is committed.
  2145. *
  2146. * For cleanup and error recovery the current state for all changed objects will
  2147. * be swapped into @state.
  2148. *
  2149. * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
  2150. *
  2151. * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
  2152. *
  2153. * 2. Do any other steps that might fail.
  2154. *
  2155. * 3. Put the staged state into the current state pointers with this function.
  2156. *
  2157. * 4. Actually commit the hardware state.
  2158. *
  2159. * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
  2160. * contains the old state. Also do any other cleanup required with that state.
  2161. *
  2162. * @stall must be set when nonblocking commits for this driver directly access
  2163. * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
  2164. * the current atomic helpers this is almost always the case, since the helpers
  2165. * don't pass the right state structures to the callbacks.
  2166. *
  2167. * Returns:
  2168. *
  2169. * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
  2170. * waiting for the previous commits has been interrupted.
  2171. */
  2172. int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
  2173. bool stall)
  2174. {
  2175. int i, ret;
  2176. struct drm_connector *connector;
  2177. struct drm_connector_state *old_conn_state, *new_conn_state;
  2178. struct drm_crtc *crtc;
  2179. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  2180. struct drm_plane *plane;
  2181. struct drm_plane_state *old_plane_state, *new_plane_state;
  2182. struct drm_crtc_commit *commit;
  2183. struct drm_private_obj *obj;
  2184. struct drm_private_state *old_obj_state, *new_obj_state;
  2185. if (stall) {
  2186. /*
  2187. * We have to stall for hw_done here before
  2188. * drm_atomic_helper_wait_for_dependencies() because flip
  2189. * depth > 1 is not yet supported by all drivers. As long as
  2190. * obj->state is directly dereferenced anywhere in the drivers
  2191. * atomic_commit_tail function, then it's unsafe to swap state
  2192. * before drm_atomic_helper_commit_hw_done() is called.
  2193. */
  2194. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  2195. commit = old_crtc_state->commit;
  2196. if (!commit)
  2197. continue;
  2198. ret = wait_for_completion_interruptible(&commit->hw_done);
  2199. if (ret)
  2200. return ret;
  2201. }
  2202. for_each_old_connector_in_state(state, connector, old_conn_state, i) {
  2203. commit = old_conn_state->commit;
  2204. if (!commit)
  2205. continue;
  2206. ret = wait_for_completion_interruptible(&commit->hw_done);
  2207. if (ret)
  2208. return ret;
  2209. }
  2210. for_each_old_plane_in_state(state, plane, old_plane_state, i) {
  2211. commit = old_plane_state->commit;
  2212. if (!commit)
  2213. continue;
  2214. ret = wait_for_completion_interruptible(&commit->hw_done);
  2215. if (ret)
  2216. return ret;
  2217. }
  2218. }
  2219. for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
  2220. WARN_ON(connector->state != old_conn_state);
  2221. old_conn_state->state = state;
  2222. new_conn_state->state = NULL;
  2223. state->connectors[i].state = old_conn_state;
  2224. connector->state = new_conn_state;
  2225. }
  2226. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  2227. WARN_ON(crtc->state != old_crtc_state);
  2228. old_crtc_state->state = state;
  2229. new_crtc_state->state = NULL;
  2230. state->crtcs[i].state = old_crtc_state;
  2231. crtc->state = new_crtc_state;
  2232. if (new_crtc_state->commit) {
  2233. spin_lock(&crtc->commit_lock);
  2234. list_add(&new_crtc_state->commit->commit_entry,
  2235. &crtc->commit_list);
  2236. spin_unlock(&crtc->commit_lock);
  2237. new_crtc_state->commit->event = NULL;
  2238. }
  2239. }
  2240. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  2241. WARN_ON(plane->state != old_plane_state);
  2242. old_plane_state->state = state;
  2243. new_plane_state->state = NULL;
  2244. state->planes[i].state = old_plane_state;
  2245. plane->state = new_plane_state;
  2246. }
  2247. for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
  2248. WARN_ON(obj->state != old_obj_state);
  2249. old_obj_state->state = state;
  2250. new_obj_state->state = NULL;
  2251. state->private_objs[i].state = old_obj_state;
  2252. obj->state = new_obj_state;
  2253. }
  2254. return 0;
  2255. }
  2256. EXPORT_SYMBOL(drm_atomic_helper_swap_state);
  2257. /**
  2258. * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
  2259. * @plane: plane object to update
  2260. * @crtc: owning CRTC of owning plane
  2261. * @fb: framebuffer to flip onto plane
  2262. * @crtc_x: x offset of primary plane on crtc
  2263. * @crtc_y: y offset of primary plane on crtc
  2264. * @crtc_w: width of primary plane rectangle on crtc
  2265. * @crtc_h: height of primary plane rectangle on crtc
  2266. * @src_x: x offset of @fb for panning
  2267. * @src_y: y offset of @fb for panning
  2268. * @src_w: width of source rectangle in @fb
  2269. * @src_h: height of source rectangle in @fb
  2270. * @ctx: lock acquire context
  2271. *
  2272. * Provides a default plane update handler using the atomic driver interface.
  2273. *
  2274. * RETURNS:
  2275. * Zero on success, error code on failure
  2276. */
  2277. int drm_atomic_helper_update_plane(struct drm_plane *plane,
  2278. struct drm_crtc *crtc,
  2279. struct drm_framebuffer *fb,
  2280. int crtc_x, int crtc_y,
  2281. unsigned int crtc_w, unsigned int crtc_h,
  2282. uint32_t src_x, uint32_t src_y,
  2283. uint32_t src_w, uint32_t src_h,
  2284. struct drm_modeset_acquire_ctx *ctx)
  2285. {
  2286. struct drm_atomic_state *state;
  2287. struct drm_plane_state *plane_state;
  2288. int ret = 0;
  2289. state = drm_atomic_state_alloc(plane->dev);
  2290. if (!state)
  2291. return -ENOMEM;
  2292. state->acquire_ctx = ctx;
  2293. plane_state = drm_atomic_get_plane_state(state, plane);
  2294. if (IS_ERR(plane_state)) {
  2295. ret = PTR_ERR(plane_state);
  2296. goto fail;
  2297. }
  2298. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  2299. if (ret != 0)
  2300. goto fail;
  2301. drm_atomic_set_fb_for_plane(plane_state, fb);
  2302. plane_state->crtc_x = crtc_x;
  2303. plane_state->crtc_y = crtc_y;
  2304. plane_state->crtc_w = crtc_w;
  2305. plane_state->crtc_h = crtc_h;
  2306. plane_state->src_x = src_x;
  2307. plane_state->src_y = src_y;
  2308. plane_state->src_w = src_w;
  2309. plane_state->src_h = src_h;
  2310. if (plane == crtc->cursor)
  2311. state->legacy_cursor_update = true;
  2312. ret = drm_atomic_commit(state);
  2313. fail:
  2314. drm_atomic_state_put(state);
  2315. return ret;
  2316. }
  2317. EXPORT_SYMBOL(drm_atomic_helper_update_plane);
  2318. /**
  2319. * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
  2320. * @plane: plane to disable
  2321. * @ctx: lock acquire context
  2322. *
  2323. * Provides a default plane disable handler using the atomic driver interface.
  2324. *
  2325. * RETURNS:
  2326. * Zero on success, error code on failure
  2327. */
  2328. int drm_atomic_helper_disable_plane(struct drm_plane *plane,
  2329. struct drm_modeset_acquire_ctx *ctx)
  2330. {
  2331. struct drm_atomic_state *state;
  2332. struct drm_plane_state *plane_state;
  2333. int ret = 0;
  2334. state = drm_atomic_state_alloc(plane->dev);
  2335. if (!state)
  2336. return -ENOMEM;
  2337. state->acquire_ctx = ctx;
  2338. plane_state = drm_atomic_get_plane_state(state, plane);
  2339. if (IS_ERR(plane_state)) {
  2340. ret = PTR_ERR(plane_state);
  2341. goto fail;
  2342. }
  2343. if (plane_state->crtc && plane_state->crtc->cursor == plane)
  2344. plane_state->state->legacy_cursor_update = true;
  2345. ret = __drm_atomic_helper_disable_plane(plane, plane_state);
  2346. if (ret != 0)
  2347. goto fail;
  2348. ret = drm_atomic_commit(state);
  2349. fail:
  2350. drm_atomic_state_put(state);
  2351. return ret;
  2352. }
  2353. EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
  2354. /* just used from fb-helper and atomic-helper: */
  2355. int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
  2356. struct drm_plane_state *plane_state)
  2357. {
  2358. int ret;
  2359. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2360. if (ret != 0)
  2361. return ret;
  2362. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2363. plane_state->crtc_x = 0;
  2364. plane_state->crtc_y = 0;
  2365. plane_state->crtc_w = 0;
  2366. plane_state->crtc_h = 0;
  2367. plane_state->src_x = 0;
  2368. plane_state->src_y = 0;
  2369. plane_state->src_w = 0;
  2370. plane_state->src_h = 0;
  2371. return 0;
  2372. }
  2373. static int update_output_state(struct drm_atomic_state *state,
  2374. struct drm_mode_set *set)
  2375. {
  2376. struct drm_device *dev = set->crtc->dev;
  2377. struct drm_crtc *crtc;
  2378. struct drm_crtc_state *new_crtc_state;
  2379. struct drm_connector *connector;
  2380. struct drm_connector_state *new_conn_state;
  2381. int ret, i;
  2382. ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
  2383. state->acquire_ctx);
  2384. if (ret)
  2385. return ret;
  2386. /* First disable all connectors on the target crtc. */
  2387. ret = drm_atomic_add_affected_connectors(state, set->crtc);
  2388. if (ret)
  2389. return ret;
  2390. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  2391. if (new_conn_state->crtc == set->crtc) {
  2392. ret = drm_atomic_set_crtc_for_connector(new_conn_state,
  2393. NULL);
  2394. if (ret)
  2395. return ret;
  2396. /* Make sure legacy setCrtc always re-trains */
  2397. new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
  2398. }
  2399. }
  2400. /* Then set all connectors from set->connectors on the target crtc */
  2401. for (i = 0; i < set->num_connectors; i++) {
  2402. new_conn_state = drm_atomic_get_connector_state(state,
  2403. set->connectors[i]);
  2404. if (IS_ERR(new_conn_state))
  2405. return PTR_ERR(new_conn_state);
  2406. ret = drm_atomic_set_crtc_for_connector(new_conn_state,
  2407. set->crtc);
  2408. if (ret)
  2409. return ret;
  2410. }
  2411. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  2412. /* Don't update ->enable for the CRTC in the set_config request,
  2413. * since a mismatch would indicate a bug in the upper layers.
  2414. * The actual modeset code later on will catch any
  2415. * inconsistencies here. */
  2416. if (crtc == set->crtc)
  2417. continue;
  2418. if (!new_crtc_state->connector_mask) {
  2419. ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
  2420. NULL);
  2421. if (ret < 0)
  2422. return ret;
  2423. new_crtc_state->active = false;
  2424. }
  2425. }
  2426. return 0;
  2427. }
  2428. /**
  2429. * drm_atomic_helper_set_config - set a new config from userspace
  2430. * @set: mode set configuration
  2431. * @ctx: lock acquisition context
  2432. *
  2433. * Provides a default crtc set_config handler using the atomic driver interface.
  2434. *
  2435. * NOTE: For backwards compatibility with old userspace this automatically
  2436. * resets the "link-status" property to GOOD, to force any link
  2437. * re-training. The SETCRTC ioctl does not define whether an update does
  2438. * need a full modeset or just a plane update, hence we're allowed to do
  2439. * that. See also drm_connector_set_link_status_property().
  2440. *
  2441. * Returns:
  2442. * Returns 0 on success, negative errno numbers on failure.
  2443. */
  2444. int drm_atomic_helper_set_config(struct drm_mode_set *set,
  2445. struct drm_modeset_acquire_ctx *ctx)
  2446. {
  2447. struct drm_atomic_state *state;
  2448. struct drm_crtc *crtc = set->crtc;
  2449. int ret = 0;
  2450. state = drm_atomic_state_alloc(crtc->dev);
  2451. if (!state)
  2452. return -ENOMEM;
  2453. state->acquire_ctx = ctx;
  2454. ret = __drm_atomic_helper_set_config(set, state);
  2455. if (ret != 0)
  2456. goto fail;
  2457. ret = handle_conflicting_encoders(state, true);
  2458. if (ret)
  2459. return ret;
  2460. ret = drm_atomic_commit(state);
  2461. fail:
  2462. drm_atomic_state_put(state);
  2463. return ret;
  2464. }
  2465. EXPORT_SYMBOL(drm_atomic_helper_set_config);
  2466. /* just used from fb-helper and atomic-helper: */
  2467. int __drm_atomic_helper_set_config(struct drm_mode_set *set,
  2468. struct drm_atomic_state *state)
  2469. {
  2470. struct drm_crtc_state *crtc_state;
  2471. struct drm_plane_state *primary_state;
  2472. struct drm_crtc *crtc = set->crtc;
  2473. int hdisplay, vdisplay;
  2474. int ret;
  2475. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2476. if (IS_ERR(crtc_state))
  2477. return PTR_ERR(crtc_state);
  2478. primary_state = drm_atomic_get_plane_state(state, crtc->primary);
  2479. if (IS_ERR(primary_state))
  2480. return PTR_ERR(primary_state);
  2481. if (!set->mode) {
  2482. WARN_ON(set->fb);
  2483. WARN_ON(set->num_connectors);
  2484. ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
  2485. if (ret != 0)
  2486. return ret;
  2487. crtc_state->active = false;
  2488. ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
  2489. if (ret != 0)
  2490. return ret;
  2491. drm_atomic_set_fb_for_plane(primary_state, NULL);
  2492. goto commit;
  2493. }
  2494. WARN_ON(!set->fb);
  2495. WARN_ON(!set->num_connectors);
  2496. ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
  2497. if (ret != 0)
  2498. return ret;
  2499. crtc_state->active = true;
  2500. ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
  2501. if (ret != 0)
  2502. return ret;
  2503. drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
  2504. drm_atomic_set_fb_for_plane(primary_state, set->fb);
  2505. primary_state->crtc_x = 0;
  2506. primary_state->crtc_y = 0;
  2507. primary_state->crtc_w = hdisplay;
  2508. primary_state->crtc_h = vdisplay;
  2509. primary_state->src_x = set->x << 16;
  2510. primary_state->src_y = set->y << 16;
  2511. if (drm_rotation_90_or_270(primary_state->rotation)) {
  2512. primary_state->src_w = vdisplay << 16;
  2513. primary_state->src_h = hdisplay << 16;
  2514. } else {
  2515. primary_state->src_w = hdisplay << 16;
  2516. primary_state->src_h = vdisplay << 16;
  2517. }
  2518. commit:
  2519. ret = update_output_state(state, set);
  2520. if (ret)
  2521. return ret;
  2522. return 0;
  2523. }
  2524. static int __drm_atomic_helper_disable_all(struct drm_device *dev,
  2525. struct drm_modeset_acquire_ctx *ctx,
  2526. bool clean_old_fbs)
  2527. {
  2528. struct drm_atomic_state *state;
  2529. struct drm_connector_state *conn_state;
  2530. struct drm_connector *conn;
  2531. struct drm_plane_state *plane_state;
  2532. struct drm_plane *plane;
  2533. struct drm_crtc_state *crtc_state;
  2534. struct drm_crtc *crtc;
  2535. int ret, i;
  2536. state = drm_atomic_state_alloc(dev);
  2537. if (!state)
  2538. return -ENOMEM;
  2539. state->acquire_ctx = ctx;
  2540. drm_for_each_crtc(crtc, dev) {
  2541. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2542. if (IS_ERR(crtc_state)) {
  2543. ret = PTR_ERR(crtc_state);
  2544. goto free;
  2545. }
  2546. crtc_state->active = false;
  2547. ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
  2548. if (ret < 0)
  2549. goto free;
  2550. ret = drm_atomic_add_affected_planes(state, crtc);
  2551. if (ret < 0)
  2552. goto free;
  2553. ret = drm_atomic_add_affected_connectors(state, crtc);
  2554. if (ret < 0)
  2555. goto free;
  2556. }
  2557. for_each_new_connector_in_state(state, conn, conn_state, i) {
  2558. ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
  2559. if (ret < 0)
  2560. goto free;
  2561. }
  2562. for_each_new_plane_in_state(state, plane, plane_state, i) {
  2563. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2564. if (ret < 0)
  2565. goto free;
  2566. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2567. }
  2568. ret = drm_atomic_commit(state);
  2569. free:
  2570. drm_atomic_state_put(state);
  2571. return ret;
  2572. }
  2573. /**
  2574. * drm_atomic_helper_disable_all - disable all currently active outputs
  2575. * @dev: DRM device
  2576. * @ctx: lock acquisition context
  2577. *
  2578. * Loops through all connectors, finding those that aren't turned off and then
  2579. * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
  2580. * that they are connected to.
  2581. *
  2582. * This is used for example in suspend/resume to disable all currently active
  2583. * functions when suspending. If you just want to shut down everything at e.g.
  2584. * driver unload, look at drm_atomic_helper_shutdown().
  2585. *
  2586. * Note that if callers haven't already acquired all modeset locks this might
  2587. * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
  2588. *
  2589. * Returns:
  2590. * 0 on success or a negative error code on failure.
  2591. *
  2592. * See also:
  2593. * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
  2594. * drm_atomic_helper_shutdown().
  2595. */
  2596. int drm_atomic_helper_disable_all(struct drm_device *dev,
  2597. struct drm_modeset_acquire_ctx *ctx)
  2598. {
  2599. return __drm_atomic_helper_disable_all(dev, ctx, false);
  2600. }
  2601. EXPORT_SYMBOL(drm_atomic_helper_disable_all);
  2602. /**
  2603. * drm_atomic_helper_shutdown - shutdown all CRTC
  2604. * @dev: DRM device
  2605. *
  2606. * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
  2607. * suspend should instead be handled with drm_atomic_helper_suspend(), since
  2608. * that also takes a snapshot of the modeset state to be restored on resume.
  2609. *
  2610. * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
  2611. * and it is the atomic version of drm_crtc_force_disable_all().
  2612. */
  2613. void drm_atomic_helper_shutdown(struct drm_device *dev)
  2614. {
  2615. struct drm_modeset_acquire_ctx ctx;
  2616. int ret;
  2617. drm_modeset_acquire_init(&ctx, 0);
  2618. while (1) {
  2619. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2620. if (!ret)
  2621. ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
  2622. if (ret != -EDEADLK)
  2623. break;
  2624. drm_modeset_backoff(&ctx);
  2625. }
  2626. if (ret)
  2627. DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
  2628. drm_modeset_drop_locks(&ctx);
  2629. drm_modeset_acquire_fini(&ctx);
  2630. }
  2631. EXPORT_SYMBOL(drm_atomic_helper_shutdown);
  2632. /**
  2633. * drm_atomic_helper_suspend - subsystem-level suspend helper
  2634. * @dev: DRM device
  2635. *
  2636. * Duplicates the current atomic state, disables all active outputs and then
  2637. * returns a pointer to the original atomic state to the caller. Drivers can
  2638. * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
  2639. * restore the output configuration that was active at the time the system
  2640. * entered suspend.
  2641. *
  2642. * Note that it is potentially unsafe to use this. The atomic state object
  2643. * returned by this function is assumed to be persistent. Drivers must ensure
  2644. * that this holds true. Before calling this function, drivers must make sure
  2645. * to suspend fbdev emulation so that nothing can be using the device.
  2646. *
  2647. * Returns:
  2648. * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
  2649. * encoded error code on failure. Drivers should store the returned atomic
  2650. * state object and pass it to the drm_atomic_helper_resume() helper upon
  2651. * resume.
  2652. *
  2653. * See also:
  2654. * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
  2655. * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
  2656. */
  2657. struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
  2658. {
  2659. struct drm_modeset_acquire_ctx ctx;
  2660. struct drm_atomic_state *state;
  2661. int err;
  2662. drm_modeset_acquire_init(&ctx, 0);
  2663. retry:
  2664. err = drm_modeset_lock_all_ctx(dev, &ctx);
  2665. if (err < 0) {
  2666. state = ERR_PTR(err);
  2667. goto unlock;
  2668. }
  2669. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  2670. if (IS_ERR(state))
  2671. goto unlock;
  2672. err = drm_atomic_helper_disable_all(dev, &ctx);
  2673. if (err < 0) {
  2674. drm_atomic_state_put(state);
  2675. state = ERR_PTR(err);
  2676. goto unlock;
  2677. }
  2678. unlock:
  2679. if (PTR_ERR(state) == -EDEADLK) {
  2680. drm_modeset_backoff(&ctx);
  2681. goto retry;
  2682. }
  2683. drm_modeset_drop_locks(&ctx);
  2684. drm_modeset_acquire_fini(&ctx);
  2685. return state;
  2686. }
  2687. EXPORT_SYMBOL(drm_atomic_helper_suspend);
  2688. /**
  2689. * drm_atomic_helper_commit_duplicated_state - commit duplicated state
  2690. * @state: duplicated atomic state to commit
  2691. * @ctx: pointer to acquire_ctx to use for commit.
  2692. *
  2693. * The state returned by drm_atomic_helper_duplicate_state() and
  2694. * drm_atomic_helper_suspend() is partially invalid, and needs to
  2695. * be fixed up before commit.
  2696. *
  2697. * Returns:
  2698. * 0 on success or a negative error code on failure.
  2699. *
  2700. * See also:
  2701. * drm_atomic_helper_suspend()
  2702. */
  2703. int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
  2704. struct drm_modeset_acquire_ctx *ctx)
  2705. {
  2706. int i;
  2707. struct drm_plane *plane;
  2708. struct drm_plane_state *new_plane_state;
  2709. struct drm_connector *connector;
  2710. struct drm_connector_state *new_conn_state;
  2711. struct drm_crtc *crtc;
  2712. struct drm_crtc_state *new_crtc_state;
  2713. state->acquire_ctx = ctx;
  2714. for_each_new_plane_in_state(state, plane, new_plane_state, i)
  2715. state->planes[i].old_state = plane->state;
  2716. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
  2717. state->crtcs[i].old_state = crtc->state;
  2718. for_each_new_connector_in_state(state, connector, new_conn_state, i)
  2719. state->connectors[i].old_state = connector->state;
  2720. return drm_atomic_commit(state);
  2721. }
  2722. EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
  2723. /**
  2724. * drm_atomic_helper_resume - subsystem-level resume helper
  2725. * @dev: DRM device
  2726. * @state: atomic state to resume to
  2727. *
  2728. * Calls drm_mode_config_reset() to synchronize hardware and software states,
  2729. * grabs all modeset locks and commits the atomic state object. This can be
  2730. * used in conjunction with the drm_atomic_helper_suspend() helper to
  2731. * implement suspend/resume for drivers that support atomic mode-setting.
  2732. *
  2733. * Returns:
  2734. * 0 on success or a negative error code on failure.
  2735. *
  2736. * See also:
  2737. * drm_atomic_helper_suspend()
  2738. */
  2739. int drm_atomic_helper_resume(struct drm_device *dev,
  2740. struct drm_atomic_state *state)
  2741. {
  2742. struct drm_modeset_acquire_ctx ctx;
  2743. int err;
  2744. drm_mode_config_reset(dev);
  2745. drm_modeset_acquire_init(&ctx, 0);
  2746. while (1) {
  2747. err = drm_modeset_lock_all_ctx(dev, &ctx);
  2748. if (err)
  2749. goto out;
  2750. err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
  2751. out:
  2752. if (err != -EDEADLK)
  2753. break;
  2754. drm_modeset_backoff(&ctx);
  2755. }
  2756. drm_atomic_state_put(state);
  2757. drm_modeset_drop_locks(&ctx);
  2758. drm_modeset_acquire_fini(&ctx);
  2759. return err;
  2760. }
  2761. EXPORT_SYMBOL(drm_atomic_helper_resume);
  2762. static int page_flip_common(struct drm_atomic_state *state,
  2763. struct drm_crtc *crtc,
  2764. struct drm_framebuffer *fb,
  2765. struct drm_pending_vblank_event *event,
  2766. uint32_t flags)
  2767. {
  2768. struct drm_plane *plane = crtc->primary;
  2769. struct drm_plane_state *plane_state;
  2770. struct drm_crtc_state *crtc_state;
  2771. int ret = 0;
  2772. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2773. if (IS_ERR(crtc_state))
  2774. return PTR_ERR(crtc_state);
  2775. crtc_state->event = event;
  2776. crtc_state->pageflip_flags = flags;
  2777. plane_state = drm_atomic_get_plane_state(state, plane);
  2778. if (IS_ERR(plane_state))
  2779. return PTR_ERR(plane_state);
  2780. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  2781. if (ret != 0)
  2782. return ret;
  2783. drm_atomic_set_fb_for_plane(plane_state, fb);
  2784. /* Make sure we don't accidentally do a full modeset. */
  2785. state->allow_modeset = false;
  2786. if (!crtc_state->active) {
  2787. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
  2788. crtc->base.id, crtc->name);
  2789. return -EINVAL;
  2790. }
  2791. return ret;
  2792. }
  2793. /**
  2794. * drm_atomic_helper_page_flip - execute a legacy page flip
  2795. * @crtc: DRM crtc
  2796. * @fb: DRM framebuffer
  2797. * @event: optional DRM event to signal upon completion
  2798. * @flags: flip flags for non-vblank sync'ed updates
  2799. * @ctx: lock acquisition context
  2800. *
  2801. * Provides a default &drm_crtc_funcs.page_flip implementation
  2802. * using the atomic driver interface.
  2803. *
  2804. * Returns:
  2805. * Returns 0 on success, negative errno numbers on failure.
  2806. *
  2807. * See also:
  2808. * drm_atomic_helper_page_flip_target()
  2809. */
  2810. int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
  2811. struct drm_framebuffer *fb,
  2812. struct drm_pending_vblank_event *event,
  2813. uint32_t flags,
  2814. struct drm_modeset_acquire_ctx *ctx)
  2815. {
  2816. struct drm_plane *plane = crtc->primary;
  2817. struct drm_atomic_state *state;
  2818. int ret = 0;
  2819. state = drm_atomic_state_alloc(plane->dev);
  2820. if (!state)
  2821. return -ENOMEM;
  2822. state->acquire_ctx = ctx;
  2823. ret = page_flip_common(state, crtc, fb, event, flags);
  2824. if (ret != 0)
  2825. goto fail;
  2826. ret = drm_atomic_nonblocking_commit(state);
  2827. fail:
  2828. drm_atomic_state_put(state);
  2829. return ret;
  2830. }
  2831. EXPORT_SYMBOL(drm_atomic_helper_page_flip);
  2832. /**
  2833. * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
  2834. * @crtc: DRM crtc
  2835. * @fb: DRM framebuffer
  2836. * @event: optional DRM event to signal upon completion
  2837. * @flags: flip flags for non-vblank sync'ed updates
  2838. * @target: specifying the target vblank period when the flip to take effect
  2839. * @ctx: lock acquisition context
  2840. *
  2841. * Provides a default &drm_crtc_funcs.page_flip_target implementation.
  2842. * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
  2843. * target vblank period to flip.
  2844. *
  2845. * Returns:
  2846. * Returns 0 on success, negative errno numbers on failure.
  2847. */
  2848. int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
  2849. struct drm_framebuffer *fb,
  2850. struct drm_pending_vblank_event *event,
  2851. uint32_t flags,
  2852. uint32_t target,
  2853. struct drm_modeset_acquire_ctx *ctx)
  2854. {
  2855. struct drm_plane *plane = crtc->primary;
  2856. struct drm_atomic_state *state;
  2857. struct drm_crtc_state *crtc_state;
  2858. int ret = 0;
  2859. state = drm_atomic_state_alloc(plane->dev);
  2860. if (!state)
  2861. return -ENOMEM;
  2862. state->acquire_ctx = ctx;
  2863. ret = page_flip_common(state, crtc, fb, event, flags);
  2864. if (ret != 0)
  2865. goto fail;
  2866. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2867. if (WARN_ON(!crtc_state)) {
  2868. ret = -EINVAL;
  2869. goto fail;
  2870. }
  2871. crtc_state->target_vblank = target;
  2872. ret = drm_atomic_nonblocking_commit(state);
  2873. fail:
  2874. drm_atomic_state_put(state);
  2875. return ret;
  2876. }
  2877. EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
  2878. /**
  2879. * drm_atomic_helper_best_encoder - Helper for
  2880. * &drm_connector_helper_funcs.best_encoder callback
  2881. * @connector: Connector control structure
  2882. *
  2883. * This is a &drm_connector_helper_funcs.best_encoder callback helper for
  2884. * connectors that support exactly 1 encoder, statically determined at driver
  2885. * init time.
  2886. */
  2887. struct drm_encoder *
  2888. drm_atomic_helper_best_encoder(struct drm_connector *connector)
  2889. {
  2890. WARN_ON(connector->encoder_ids[1]);
  2891. return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
  2892. }
  2893. EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
  2894. /**
  2895. * DOC: atomic state reset and initialization
  2896. *
  2897. * Both the drm core and the atomic helpers assume that there is always the full
  2898. * and correct atomic software state for all connectors, CRTCs and planes
  2899. * available. Which is a bit a problem on driver load and also after system
  2900. * suspend. One way to solve this is to have a hardware state read-out
  2901. * infrastructure which reconstructs the full software state (e.g. the i915
  2902. * driver).
  2903. *
  2904. * The simpler solution is to just reset the software state to everything off,
  2905. * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
  2906. * the atomic helpers provide default reset implementations for all hooks.
  2907. *
  2908. * On the upside the precise state tracking of atomic simplifies system suspend
  2909. * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
  2910. * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
  2911. * For other drivers the building blocks are split out, see the documentation
  2912. * for these functions.
  2913. */
  2914. /**
  2915. * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
  2916. * @crtc: drm CRTC
  2917. *
  2918. * Resets the atomic state for @crtc by freeing the state pointer (which might
  2919. * be NULL, e.g. at driver load time) and allocating a new empty state object.
  2920. */
  2921. void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
  2922. {
  2923. if (crtc->state)
  2924. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  2925. kfree(crtc->state);
  2926. crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
  2927. if (crtc->state)
  2928. crtc->state->crtc = crtc;
  2929. }
  2930. EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
  2931. /**
  2932. * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
  2933. * @crtc: CRTC object
  2934. * @state: atomic CRTC state
  2935. *
  2936. * Copies atomic state from a CRTC's current state and resets inferred values.
  2937. * This is useful for drivers that subclass the CRTC state.
  2938. */
  2939. void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
  2940. struct drm_crtc_state *state)
  2941. {
  2942. memcpy(state, crtc->state, sizeof(*state));
  2943. if (state->mode_blob)
  2944. drm_property_blob_get(state->mode_blob);
  2945. if (state->degamma_lut)
  2946. drm_property_blob_get(state->degamma_lut);
  2947. if (state->ctm)
  2948. drm_property_blob_get(state->ctm);
  2949. if (state->gamma_lut)
  2950. drm_property_blob_get(state->gamma_lut);
  2951. state->mode_changed = false;
  2952. state->active_changed = false;
  2953. state->planes_changed = false;
  2954. state->connectors_changed = false;
  2955. state->color_mgmt_changed = false;
  2956. state->zpos_changed = false;
  2957. state->commit = NULL;
  2958. state->event = NULL;
  2959. state->pageflip_flags = 0;
  2960. }
  2961. EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
  2962. /**
  2963. * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
  2964. * @crtc: drm CRTC
  2965. *
  2966. * Default CRTC state duplicate hook for drivers which don't have their own
  2967. * subclassed CRTC state structure.
  2968. */
  2969. struct drm_crtc_state *
  2970. drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
  2971. {
  2972. struct drm_crtc_state *state;
  2973. if (WARN_ON(!crtc->state))
  2974. return NULL;
  2975. state = kmalloc(sizeof(*state), GFP_KERNEL);
  2976. if (state)
  2977. __drm_atomic_helper_crtc_duplicate_state(crtc, state);
  2978. return state;
  2979. }
  2980. EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
  2981. /**
  2982. * __drm_atomic_helper_crtc_destroy_state - release CRTC state
  2983. * @state: CRTC state object to release
  2984. *
  2985. * Releases all resources stored in the CRTC state without actually freeing
  2986. * the memory of the CRTC state. This is useful for drivers that subclass the
  2987. * CRTC state.
  2988. */
  2989. void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
  2990. {
  2991. if (state->commit) {
  2992. /*
  2993. * In the event that a non-blocking commit returns
  2994. * -ERESTARTSYS before the commit_tail work is queued, we will
  2995. * have an extra reference to the commit object. Release it, if
  2996. * the event has not been consumed by the worker.
  2997. *
  2998. * state->event may be freed, so we can't directly look at
  2999. * state->event->base.completion.
  3000. */
  3001. if (state->event && state->commit->abort_completion)
  3002. drm_crtc_commit_put(state->commit);
  3003. kfree(state->commit->event);
  3004. state->commit->event = NULL;
  3005. drm_crtc_commit_put(state->commit);
  3006. }
  3007. drm_property_blob_put(state->mode_blob);
  3008. drm_property_blob_put(state->degamma_lut);
  3009. drm_property_blob_put(state->ctm);
  3010. drm_property_blob_put(state->gamma_lut);
  3011. }
  3012. EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
  3013. /**
  3014. * drm_atomic_helper_crtc_destroy_state - default state destroy hook
  3015. * @crtc: drm CRTC
  3016. * @state: CRTC state object to release
  3017. *
  3018. * Default CRTC state destroy hook for drivers which don't have their own
  3019. * subclassed CRTC state structure.
  3020. */
  3021. void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
  3022. struct drm_crtc_state *state)
  3023. {
  3024. __drm_atomic_helper_crtc_destroy_state(state);
  3025. kfree(state);
  3026. }
  3027. EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
  3028. /**
  3029. * __drm_atomic_helper_plane_reset - resets planes state to default values
  3030. * @plane: plane object, must not be NULL
  3031. * @state: atomic plane state, must not be NULL
  3032. *
  3033. * Initializes plane state to default. This is useful for drivers that subclass
  3034. * the plane state.
  3035. */
  3036. void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
  3037. struct drm_plane_state *state)
  3038. {
  3039. state->plane = plane;
  3040. state->rotation = DRM_MODE_ROTATE_0;
  3041. state->alpha = DRM_BLEND_ALPHA_OPAQUE;
  3042. state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
  3043. plane->state = state;
  3044. }
  3045. EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
  3046. /**
  3047. * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
  3048. * @plane: drm plane
  3049. *
  3050. * Resets the atomic state for @plane by freeing the state pointer (which might
  3051. * be NULL, e.g. at driver load time) and allocating a new empty state object.
  3052. */
  3053. void drm_atomic_helper_plane_reset(struct drm_plane *plane)
  3054. {
  3055. if (plane->state)
  3056. __drm_atomic_helper_plane_destroy_state(plane->state);
  3057. kfree(plane->state);
  3058. plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
  3059. if (plane->state)
  3060. __drm_atomic_helper_plane_reset(plane, plane->state);
  3061. }
  3062. EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
  3063. /**
  3064. * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
  3065. * @plane: plane object
  3066. * @state: atomic plane state
  3067. *
  3068. * Copies atomic state from a plane's current state. This is useful for
  3069. * drivers that subclass the plane state.
  3070. */
  3071. void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
  3072. struct drm_plane_state *state)
  3073. {
  3074. memcpy(state, plane->state, sizeof(*state));
  3075. if (state->fb)
  3076. drm_framebuffer_get(state->fb);
  3077. state->fence = NULL;
  3078. state->commit = NULL;
  3079. }
  3080. EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
  3081. /**
  3082. * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
  3083. * @plane: drm plane
  3084. *
  3085. * Default plane state duplicate hook for drivers which don't have their own
  3086. * subclassed plane state structure.
  3087. */
  3088. struct drm_plane_state *
  3089. drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
  3090. {
  3091. struct drm_plane_state *state;
  3092. if (WARN_ON(!plane->state))
  3093. return NULL;
  3094. state = kmalloc(sizeof(*state), GFP_KERNEL);
  3095. if (state)
  3096. __drm_atomic_helper_plane_duplicate_state(plane, state);
  3097. return state;
  3098. }
  3099. EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
  3100. /**
  3101. * __drm_atomic_helper_plane_destroy_state - release plane state
  3102. * @state: plane state object to release
  3103. *
  3104. * Releases all resources stored in the plane state without actually freeing
  3105. * the memory of the plane state. This is useful for drivers that subclass the
  3106. * plane state.
  3107. */
  3108. void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
  3109. {
  3110. if (state->fb)
  3111. drm_framebuffer_put(state->fb);
  3112. if (state->fence)
  3113. dma_fence_put(state->fence);
  3114. if (state->commit)
  3115. drm_crtc_commit_put(state->commit);
  3116. }
  3117. EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
  3118. /**
  3119. * drm_atomic_helper_plane_destroy_state - default state destroy hook
  3120. * @plane: drm plane
  3121. * @state: plane state object to release
  3122. *
  3123. * Default plane state destroy hook for drivers which don't have their own
  3124. * subclassed plane state structure.
  3125. */
  3126. void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
  3127. struct drm_plane_state *state)
  3128. {
  3129. __drm_atomic_helper_plane_destroy_state(state);
  3130. kfree(state);
  3131. }
  3132. EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
  3133. /**
  3134. * __drm_atomic_helper_connector_reset - reset state on connector
  3135. * @connector: drm connector
  3136. * @conn_state: connector state to assign
  3137. *
  3138. * Initializes the newly allocated @conn_state and assigns it to
  3139. * the &drm_conector->state pointer of @connector, usually required when
  3140. * initializing the drivers or when called from the &drm_connector_funcs.reset
  3141. * hook.
  3142. *
  3143. * This is useful for drivers that subclass the connector state.
  3144. */
  3145. void
  3146. __drm_atomic_helper_connector_reset(struct drm_connector *connector,
  3147. struct drm_connector_state *conn_state)
  3148. {
  3149. if (conn_state)
  3150. conn_state->connector = connector;
  3151. connector->state = conn_state;
  3152. }
  3153. EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
  3154. /**
  3155. * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
  3156. * @connector: drm connector
  3157. *
  3158. * Resets the atomic state for @connector by freeing the state pointer (which
  3159. * might be NULL, e.g. at driver load time) and allocating a new empty state
  3160. * object.
  3161. */
  3162. void drm_atomic_helper_connector_reset(struct drm_connector *connector)
  3163. {
  3164. struct drm_connector_state *conn_state =
  3165. kzalloc(sizeof(*conn_state), GFP_KERNEL);
  3166. if (connector->state)
  3167. __drm_atomic_helper_connector_destroy_state(connector->state);
  3168. kfree(connector->state);
  3169. __drm_atomic_helper_connector_reset(connector, conn_state);
  3170. }
  3171. EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
  3172. /**
  3173. * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
  3174. * @connector: connector object
  3175. * @state: atomic connector state
  3176. *
  3177. * Copies atomic state from a connector's current state. This is useful for
  3178. * drivers that subclass the connector state.
  3179. */
  3180. void
  3181. __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
  3182. struct drm_connector_state *state)
  3183. {
  3184. memcpy(state, connector->state, sizeof(*state));
  3185. if (state->crtc)
  3186. drm_connector_get(connector);
  3187. state->commit = NULL;
  3188. /* Don't copy over a writeback job, they are used only once */
  3189. state->writeback_job = NULL;
  3190. }
  3191. EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
  3192. /**
  3193. * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
  3194. * @connector: drm connector
  3195. *
  3196. * Default connector state duplicate hook for drivers which don't have their own
  3197. * subclassed connector state structure.
  3198. */
  3199. struct drm_connector_state *
  3200. drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
  3201. {
  3202. struct drm_connector_state *state;
  3203. if (WARN_ON(!connector->state))
  3204. return NULL;
  3205. state = kmalloc(sizeof(*state), GFP_KERNEL);
  3206. if (state)
  3207. __drm_atomic_helper_connector_duplicate_state(connector, state);
  3208. return state;
  3209. }
  3210. EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
  3211. /**
  3212. * drm_atomic_helper_duplicate_state - duplicate an atomic state object
  3213. * @dev: DRM device
  3214. * @ctx: lock acquisition context
  3215. *
  3216. * Makes a copy of the current atomic state by looping over all objects and
  3217. * duplicating their respective states. This is used for example by suspend/
  3218. * resume support code to save the state prior to suspend such that it can
  3219. * be restored upon resume.
  3220. *
  3221. * Note that this treats atomic state as persistent between save and restore.
  3222. * Drivers must make sure that this is possible and won't result in confusion
  3223. * or erroneous behaviour.
  3224. *
  3225. * Note that if callers haven't already acquired all modeset locks this might
  3226. * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
  3227. *
  3228. * Returns:
  3229. * A pointer to the copy of the atomic state object on success or an
  3230. * ERR_PTR()-encoded error code on failure.
  3231. *
  3232. * See also:
  3233. * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
  3234. */
  3235. struct drm_atomic_state *
  3236. drm_atomic_helper_duplicate_state(struct drm_device *dev,
  3237. struct drm_modeset_acquire_ctx *ctx)
  3238. {
  3239. struct drm_atomic_state *state;
  3240. struct drm_connector *conn;
  3241. struct drm_connector_list_iter conn_iter;
  3242. struct drm_plane *plane;
  3243. struct drm_crtc *crtc;
  3244. int err = 0;
  3245. state = drm_atomic_state_alloc(dev);
  3246. if (!state)
  3247. return ERR_PTR(-ENOMEM);
  3248. state->acquire_ctx = ctx;
  3249. drm_for_each_crtc(crtc, dev) {
  3250. struct drm_crtc_state *crtc_state;
  3251. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  3252. if (IS_ERR(crtc_state)) {
  3253. err = PTR_ERR(crtc_state);
  3254. goto free;
  3255. }
  3256. }
  3257. drm_for_each_plane(plane, dev) {
  3258. struct drm_plane_state *plane_state;
  3259. plane_state = drm_atomic_get_plane_state(state, plane);
  3260. if (IS_ERR(plane_state)) {
  3261. err = PTR_ERR(plane_state);
  3262. goto free;
  3263. }
  3264. }
  3265. drm_connector_list_iter_begin(dev, &conn_iter);
  3266. drm_for_each_connector_iter(conn, &conn_iter) {
  3267. struct drm_connector_state *conn_state;
  3268. conn_state = drm_atomic_get_connector_state(state, conn);
  3269. if (IS_ERR(conn_state)) {
  3270. err = PTR_ERR(conn_state);
  3271. drm_connector_list_iter_end(&conn_iter);
  3272. goto free;
  3273. }
  3274. }
  3275. drm_connector_list_iter_end(&conn_iter);
  3276. /* clear the acquire context so that it isn't accidentally reused */
  3277. state->acquire_ctx = NULL;
  3278. free:
  3279. if (err < 0) {
  3280. drm_atomic_state_put(state);
  3281. state = ERR_PTR(err);
  3282. }
  3283. return state;
  3284. }
  3285. EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
  3286. /**
  3287. * __drm_atomic_helper_connector_destroy_state - release connector state
  3288. * @state: connector state object to release
  3289. *
  3290. * Releases all resources stored in the connector state without actually
  3291. * freeing the memory of the connector state. This is useful for drivers that
  3292. * subclass the connector state.
  3293. */
  3294. void
  3295. __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
  3296. {
  3297. if (state->crtc)
  3298. drm_connector_put(state->connector);
  3299. if (state->commit)
  3300. drm_crtc_commit_put(state->commit);
  3301. }
  3302. EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
  3303. /**
  3304. * drm_atomic_helper_connector_destroy_state - default state destroy hook
  3305. * @connector: drm connector
  3306. * @state: connector state object to release
  3307. *
  3308. * Default connector state destroy hook for drivers which don't have their own
  3309. * subclassed connector state structure.
  3310. */
  3311. void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
  3312. struct drm_connector_state *state)
  3313. {
  3314. __drm_atomic_helper_connector_destroy_state(state);
  3315. kfree(state);
  3316. }
  3317. EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
  3318. /**
  3319. * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
  3320. * @crtc: CRTC object
  3321. * @red: red correction table
  3322. * @green: green correction table
  3323. * @blue: green correction table
  3324. * @size: size of the tables
  3325. * @ctx: lock acquire context
  3326. *
  3327. * Implements support for legacy gamma correction table for drivers
  3328. * that support color management through the DEGAMMA_LUT/GAMMA_LUT
  3329. * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
  3330. * how the atomic color management and gamma tables work.
  3331. */
  3332. int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
  3333. u16 *red, u16 *green, u16 *blue,
  3334. uint32_t size,
  3335. struct drm_modeset_acquire_ctx *ctx)
  3336. {
  3337. struct drm_device *dev = crtc->dev;
  3338. struct drm_atomic_state *state;
  3339. struct drm_crtc_state *crtc_state;
  3340. struct drm_property_blob *blob = NULL;
  3341. struct drm_color_lut *blob_data;
  3342. int i, ret = 0;
  3343. bool replaced;
  3344. state = drm_atomic_state_alloc(crtc->dev);
  3345. if (!state)
  3346. return -ENOMEM;
  3347. blob = drm_property_create_blob(dev,
  3348. sizeof(struct drm_color_lut) * size,
  3349. NULL);
  3350. if (IS_ERR(blob)) {
  3351. ret = PTR_ERR(blob);
  3352. blob = NULL;
  3353. goto fail;
  3354. }
  3355. /* Prepare GAMMA_LUT with the legacy values. */
  3356. blob_data = blob->data;
  3357. for (i = 0; i < size; i++) {
  3358. blob_data[i].red = red[i];
  3359. blob_data[i].green = green[i];
  3360. blob_data[i].blue = blue[i];
  3361. }
  3362. state->acquire_ctx = ctx;
  3363. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  3364. if (IS_ERR(crtc_state)) {
  3365. ret = PTR_ERR(crtc_state);
  3366. goto fail;
  3367. }
  3368. /* Reset DEGAMMA_LUT and CTM properties. */
  3369. replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
  3370. replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
  3371. replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
  3372. crtc_state->color_mgmt_changed |= replaced;
  3373. ret = drm_atomic_commit(state);
  3374. fail:
  3375. drm_atomic_state_put(state);
  3376. drm_property_blob_put(blob);
  3377. return ret;
  3378. }
  3379. EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
  3380. /**
  3381. * __drm_atomic_helper_private_duplicate_state - copy atomic private state
  3382. * @obj: CRTC object
  3383. * @state: new private object state
  3384. *
  3385. * Copies atomic state from a private objects's current state and resets inferred values.
  3386. * This is useful for drivers that subclass the private state.
  3387. */
  3388. void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
  3389. struct drm_private_state *state)
  3390. {
  3391. memcpy(state, obj->state, sizeof(*state));
  3392. }
  3393. EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);