drm_atomic_helper.c 115 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_plane_helper.h>
  30. #include <drm/drm_crtc_helper.h>
  31. #include <drm/drm_atomic_helper.h>
  32. #include <linux/dma-fence.h>
  33. #include "drm_crtc_helper_internal.h"
  34. #include "drm_crtc_internal.h"
  35. /**
  36. * DOC: overview
  37. *
  38. * This helper library provides implementations of check and commit functions on
  39. * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
  40. * also provides convenience implementations for the atomic state handling
  41. * callbacks for drivers which don't need to subclass the drm core structures to
  42. * add their own additional internal state.
  43. *
  44. * This library also provides default implementations for the check callback in
  45. * drm_atomic_helper_check() and for the commit callback with
  46. * drm_atomic_helper_commit(). But the individual stages and callbacks are
  47. * exposed to allow drivers to mix and match and e.g. use the plane helpers only
  48. * together with a driver private modeset implementation.
  49. *
  50. * This library also provides implementations for all the legacy driver
  51. * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
  52. * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
  53. * various functions to implement set_property callbacks. New drivers must not
  54. * implement these functions themselves but must use the provided helpers.
  55. *
  56. * The atomic helper uses the same function table structures as all other
  57. * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs,
  58. * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It
  59. * also shares the &struct drm_plane_helper_funcs function table with the plane
  60. * helpers.
  61. */
  62. static void
  63. drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
  64. struct drm_plane_state *old_plane_state,
  65. struct drm_plane_state *plane_state,
  66. struct drm_plane *plane)
  67. {
  68. struct drm_crtc_state *crtc_state;
  69. if (old_plane_state->crtc) {
  70. crtc_state = drm_atomic_get_new_crtc_state(state,
  71. old_plane_state->crtc);
  72. if (WARN_ON(!crtc_state))
  73. return;
  74. crtc_state->planes_changed = true;
  75. }
  76. if (plane_state->crtc) {
  77. crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
  78. if (WARN_ON(!crtc_state))
  79. return;
  80. crtc_state->planes_changed = true;
  81. }
  82. }
  83. static int handle_conflicting_encoders(struct drm_atomic_state *state,
  84. bool disable_conflicting_encoders)
  85. {
  86. struct drm_connector_state *new_conn_state;
  87. struct drm_connector *connector;
  88. struct drm_connector_list_iter conn_iter;
  89. struct drm_encoder *encoder;
  90. unsigned encoder_mask = 0;
  91. int i, ret = 0;
  92. /*
  93. * First loop, find all newly assigned encoders from the connectors
  94. * part of the state. If the same encoder is assigned to multiple
  95. * connectors bail out.
  96. */
  97. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  98. const struct drm_connector_helper_funcs *funcs = connector->helper_private;
  99. struct drm_encoder *new_encoder;
  100. if (!new_conn_state->crtc)
  101. continue;
  102. if (funcs->atomic_best_encoder)
  103. new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
  104. else if (funcs->best_encoder)
  105. new_encoder = funcs->best_encoder(connector);
  106. else
  107. new_encoder = drm_atomic_helper_best_encoder(connector);
  108. if (new_encoder) {
  109. if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
  110. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
  111. new_encoder->base.id, new_encoder->name,
  112. connector->base.id, connector->name);
  113. return -EINVAL;
  114. }
  115. encoder_mask |= 1 << drm_encoder_index(new_encoder);
  116. }
  117. }
  118. if (!encoder_mask)
  119. return 0;
  120. /*
  121. * Second loop, iterate over all connectors not part of the state.
  122. *
  123. * If a conflicting encoder is found and disable_conflicting_encoders
  124. * is not set, an error is returned. Userspace can provide a solution
  125. * through the atomic ioctl.
  126. *
  127. * If the flag is set conflicting connectors are removed from the crtc
  128. * and the crtc is disabled if no encoder is left. This preserves
  129. * compatibility with the legacy set_config behavior.
  130. */
  131. drm_connector_list_iter_begin(state->dev, &conn_iter);
  132. drm_for_each_connector_iter(connector, &conn_iter) {
  133. struct drm_crtc_state *crtc_state;
  134. if (drm_atomic_get_new_connector_state(state, connector))
  135. continue;
  136. encoder = connector->state->best_encoder;
  137. if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
  138. continue;
  139. if (!disable_conflicting_encoders) {
  140. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
  141. encoder->base.id, encoder->name,
  142. connector->state->crtc->base.id,
  143. connector->state->crtc->name,
  144. connector->base.id, connector->name);
  145. ret = -EINVAL;
  146. goto out;
  147. }
  148. new_conn_state = drm_atomic_get_connector_state(state, connector);
  149. if (IS_ERR(new_conn_state)) {
  150. ret = PTR_ERR(new_conn_state);
  151. goto out;
  152. }
  153. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
  154. encoder->base.id, encoder->name,
  155. new_conn_state->crtc->base.id, new_conn_state->crtc->name,
  156. connector->base.id, connector->name);
  157. crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
  158. ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL);
  159. if (ret)
  160. goto out;
  161. if (!crtc_state->connector_mask) {
  162. ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
  163. NULL);
  164. if (ret < 0)
  165. goto out;
  166. crtc_state->active = false;
  167. }
  168. }
  169. out:
  170. drm_connector_list_iter_end(&conn_iter);
  171. return ret;
  172. }
  173. static void
  174. set_best_encoder(struct drm_atomic_state *state,
  175. struct drm_connector_state *conn_state,
  176. struct drm_encoder *encoder)
  177. {
  178. struct drm_crtc_state *crtc_state;
  179. struct drm_crtc *crtc;
  180. if (conn_state->best_encoder) {
  181. /* Unset the encoder_mask in the old crtc state. */
  182. crtc = conn_state->connector->state->crtc;
  183. /* A NULL crtc is an error here because we should have
  184. * duplicated a NULL best_encoder when crtc was NULL.
  185. * As an exception restoring duplicated atomic state
  186. * during resume is allowed, so don't warn when
  187. * best_encoder is equal to encoder we intend to set.
  188. */
  189. WARN_ON(!crtc && encoder != conn_state->best_encoder);
  190. if (crtc) {
  191. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  192. crtc_state->encoder_mask &=
  193. ~(1 << drm_encoder_index(conn_state->best_encoder));
  194. }
  195. }
  196. if (encoder) {
  197. crtc = conn_state->crtc;
  198. WARN_ON(!crtc);
  199. if (crtc) {
  200. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  201. crtc_state->encoder_mask |=
  202. 1 << drm_encoder_index(encoder);
  203. }
  204. }
  205. conn_state->best_encoder = encoder;
  206. }
  207. static void
  208. steal_encoder(struct drm_atomic_state *state,
  209. struct drm_encoder *encoder)
  210. {
  211. struct drm_crtc_state *crtc_state;
  212. struct drm_connector *connector;
  213. struct drm_connector_state *old_connector_state, *new_connector_state;
  214. int i;
  215. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  216. struct drm_crtc *encoder_crtc;
  217. if (new_connector_state->best_encoder != encoder)
  218. continue;
  219. encoder_crtc = old_connector_state->crtc;
  220. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
  221. encoder->base.id, encoder->name,
  222. encoder_crtc->base.id, encoder_crtc->name);
  223. set_best_encoder(state, new_connector_state, NULL);
  224. crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc);
  225. crtc_state->connectors_changed = true;
  226. return;
  227. }
  228. }
  229. static int
  230. update_connector_routing(struct drm_atomic_state *state,
  231. struct drm_connector *connector,
  232. struct drm_connector_state *old_connector_state,
  233. struct drm_connector_state *new_connector_state)
  234. {
  235. const struct drm_connector_helper_funcs *funcs;
  236. struct drm_encoder *new_encoder;
  237. struct drm_crtc_state *crtc_state;
  238. DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
  239. connector->base.id,
  240. connector->name);
  241. if (old_connector_state->crtc != new_connector_state->crtc) {
  242. if (old_connector_state->crtc) {
  243. crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc);
  244. crtc_state->connectors_changed = true;
  245. }
  246. if (new_connector_state->crtc) {
  247. crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
  248. crtc_state->connectors_changed = true;
  249. }
  250. }
  251. if (!new_connector_state->crtc) {
  252. DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
  253. connector->base.id,
  254. connector->name);
  255. set_best_encoder(state, new_connector_state, NULL);
  256. return 0;
  257. }
  258. funcs = connector->helper_private;
  259. if (funcs->atomic_best_encoder)
  260. new_encoder = funcs->atomic_best_encoder(connector,
  261. new_connector_state);
  262. else if (funcs->best_encoder)
  263. new_encoder = funcs->best_encoder(connector);
  264. else
  265. new_encoder = drm_atomic_helper_best_encoder(connector);
  266. if (!new_encoder) {
  267. DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
  268. connector->base.id,
  269. connector->name);
  270. return -EINVAL;
  271. }
  272. if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) {
  273. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n",
  274. new_encoder->base.id,
  275. new_encoder->name,
  276. new_connector_state->crtc->base.id,
  277. new_connector_state->crtc->name);
  278. return -EINVAL;
  279. }
  280. if (new_encoder == new_connector_state->best_encoder) {
  281. set_best_encoder(state, new_connector_state, new_encoder);
  282. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
  283. connector->base.id,
  284. connector->name,
  285. new_encoder->base.id,
  286. new_encoder->name,
  287. new_connector_state->crtc->base.id,
  288. new_connector_state->crtc->name);
  289. return 0;
  290. }
  291. steal_encoder(state, new_encoder);
  292. set_best_encoder(state, new_connector_state, new_encoder);
  293. crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
  294. crtc_state->connectors_changed = true;
  295. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
  296. connector->base.id,
  297. connector->name,
  298. new_encoder->base.id,
  299. new_encoder->name,
  300. new_connector_state->crtc->base.id,
  301. new_connector_state->crtc->name);
  302. return 0;
  303. }
  304. static int
  305. mode_fixup(struct drm_atomic_state *state)
  306. {
  307. struct drm_crtc *crtc;
  308. struct drm_crtc_state *new_crtc_state;
  309. struct drm_connector *connector;
  310. struct drm_connector_state *new_conn_state;
  311. int i;
  312. int ret;
  313. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  314. if (!new_crtc_state->mode_changed &&
  315. !new_crtc_state->connectors_changed)
  316. continue;
  317. drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode);
  318. }
  319. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  320. const struct drm_encoder_helper_funcs *funcs;
  321. struct drm_encoder *encoder;
  322. WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc);
  323. if (!new_conn_state->crtc || !new_conn_state->best_encoder)
  324. continue;
  325. new_crtc_state =
  326. drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
  327. /*
  328. * Each encoder has at most one connector (since we always steal
  329. * it away), so we won't call ->mode_fixup twice.
  330. */
  331. encoder = new_conn_state->best_encoder;
  332. funcs = encoder->helper_private;
  333. ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode,
  334. &new_crtc_state->adjusted_mode);
  335. if (!ret) {
  336. DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
  337. return -EINVAL;
  338. }
  339. if (funcs && funcs->atomic_check) {
  340. ret = funcs->atomic_check(encoder, new_crtc_state,
  341. new_conn_state);
  342. if (ret) {
  343. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
  344. encoder->base.id, encoder->name);
  345. return ret;
  346. }
  347. } else if (funcs && funcs->mode_fixup) {
  348. ret = funcs->mode_fixup(encoder, &new_crtc_state->mode,
  349. &new_crtc_state->adjusted_mode);
  350. if (!ret) {
  351. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
  352. encoder->base.id, encoder->name);
  353. return -EINVAL;
  354. }
  355. }
  356. }
  357. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  358. const struct drm_crtc_helper_funcs *funcs;
  359. if (!new_crtc_state->enable)
  360. continue;
  361. if (!new_crtc_state->mode_changed &&
  362. !new_crtc_state->connectors_changed)
  363. continue;
  364. funcs = crtc->helper_private;
  365. if (!funcs->mode_fixup)
  366. continue;
  367. ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
  368. &new_crtc_state->adjusted_mode);
  369. if (!ret) {
  370. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n",
  371. crtc->base.id, crtc->name);
  372. return -EINVAL;
  373. }
  374. }
  375. return 0;
  376. }
  377. static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
  378. struct drm_encoder *encoder,
  379. struct drm_crtc *crtc,
  380. struct drm_display_mode *mode)
  381. {
  382. enum drm_mode_status ret;
  383. ret = drm_encoder_mode_valid(encoder, mode);
  384. if (ret != MODE_OK) {
  385. DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
  386. encoder->base.id, encoder->name);
  387. return ret;
  388. }
  389. ret = drm_bridge_mode_valid(encoder->bridge, mode);
  390. if (ret != MODE_OK) {
  391. DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
  392. return ret;
  393. }
  394. ret = drm_crtc_mode_valid(crtc, mode);
  395. if (ret != MODE_OK) {
  396. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
  397. crtc->base.id, crtc->name);
  398. return ret;
  399. }
  400. return ret;
  401. }
  402. static int
  403. mode_valid(struct drm_atomic_state *state)
  404. {
  405. struct drm_connector_state *conn_state;
  406. struct drm_connector *connector;
  407. int i;
  408. for_each_new_connector_in_state(state, connector, conn_state, i) {
  409. struct drm_encoder *encoder = conn_state->best_encoder;
  410. struct drm_crtc *crtc = conn_state->crtc;
  411. struct drm_crtc_state *crtc_state;
  412. enum drm_mode_status mode_status;
  413. struct drm_display_mode *mode;
  414. if (!crtc || !encoder)
  415. continue;
  416. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  417. if (!crtc_state)
  418. continue;
  419. if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
  420. continue;
  421. mode = &crtc_state->mode;
  422. mode_status = mode_valid_path(connector, encoder, crtc, mode);
  423. if (mode_status != MODE_OK)
  424. return -EINVAL;
  425. }
  426. return 0;
  427. }
  428. /**
  429. * drm_atomic_helper_check_modeset - validate state object for modeset changes
  430. * @dev: DRM device
  431. * @state: the driver state object
  432. *
  433. * Check the state object to see if the requested state is physically possible.
  434. * This does all the crtc and connector related computations for an atomic
  435. * update and adds any additional connectors needed for full modesets. It calls
  436. * the various per-object callbacks in the follow order:
  437. *
  438. * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder.
  439. * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
  440. * 3. If it's determined a modeset is needed then all connectors on the affected crtc
  441. * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
  442. * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
  443. * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
  444. * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
  445. * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
  446. * This function is only called when the encoder will be part of a configured crtc,
  447. * it must not be used for implementing connector property validation.
  448. * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
  449. * instead.
  450. * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
  451. *
  452. * &drm_crtc_state.mode_changed is set when the input mode is changed.
  453. * &drm_crtc_state.connectors_changed is set when a connector is added or
  454. * removed from the crtc. &drm_crtc_state.active_changed is set when
  455. * &drm_crtc_state.active changes, which is used for DPMS.
  456. * See also: drm_atomic_crtc_needs_modeset()
  457. *
  458. * IMPORTANT:
  459. *
  460. * Drivers which set &drm_crtc_state.mode_changed (e.g. in their
  461. * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done
  462. * without a full modeset) _must_ call this function afterwards after that
  463. * change. It is permitted to call this function multiple times for the same
  464. * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend
  465. * upon the adjusted dotclock for fifo space allocation and watermark
  466. * computation.
  467. *
  468. * RETURNS:
  469. * Zero for success or -errno
  470. */
  471. int
  472. drm_atomic_helper_check_modeset(struct drm_device *dev,
  473. struct drm_atomic_state *state)
  474. {
  475. struct drm_crtc *crtc;
  476. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  477. struct drm_connector *connector;
  478. struct drm_connector_state *old_connector_state, *new_connector_state;
  479. int i, ret;
  480. unsigned connectors_mask = 0;
  481. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  482. bool has_connectors =
  483. !!new_crtc_state->connector_mask;
  484. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  485. if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
  486. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
  487. crtc->base.id, crtc->name);
  488. new_crtc_state->mode_changed = true;
  489. }
  490. if (old_crtc_state->enable != new_crtc_state->enable) {
  491. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n",
  492. crtc->base.id, crtc->name);
  493. /*
  494. * For clarity this assignment is done here, but
  495. * enable == 0 is only true when there are no
  496. * connectors and a NULL mode.
  497. *
  498. * The other way around is true as well. enable != 0
  499. * iff connectors are attached and a mode is set.
  500. */
  501. new_crtc_state->mode_changed = true;
  502. new_crtc_state->connectors_changed = true;
  503. }
  504. if (old_crtc_state->active != new_crtc_state->active) {
  505. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n",
  506. crtc->base.id, crtc->name);
  507. new_crtc_state->active_changed = true;
  508. }
  509. if (new_crtc_state->enable != has_connectors) {
  510. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n",
  511. crtc->base.id, crtc->name);
  512. return -EINVAL;
  513. }
  514. }
  515. ret = handle_conflicting_encoders(state, false);
  516. if (ret)
  517. return ret;
  518. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  519. const struct drm_connector_helper_funcs *funcs = connector->helper_private;
  520. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  521. /*
  522. * This only sets crtc->connectors_changed for routing changes,
  523. * drivers must set crtc->connectors_changed themselves when
  524. * connector properties need to be updated.
  525. */
  526. ret = update_connector_routing(state, connector,
  527. old_connector_state,
  528. new_connector_state);
  529. if (ret)
  530. return ret;
  531. if (old_connector_state->crtc) {
  532. new_crtc_state = drm_atomic_get_new_crtc_state(state,
  533. old_connector_state->crtc);
  534. if (old_connector_state->link_status !=
  535. new_connector_state->link_status)
  536. new_crtc_state->connectors_changed = true;
  537. }
  538. if (funcs->atomic_check)
  539. ret = funcs->atomic_check(connector, new_connector_state);
  540. if (ret)
  541. return ret;
  542. connectors_mask += BIT(i);
  543. }
  544. /*
  545. * After all the routing has been prepared we need to add in any
  546. * connector which is itself unchanged, but who's crtc changes it's
  547. * configuration. This must be done before calling mode_fixup in case a
  548. * crtc only changed its mode but has the same set of connectors.
  549. */
  550. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  551. if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
  552. continue;
  553. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n",
  554. crtc->base.id, crtc->name,
  555. new_crtc_state->enable ? 'y' : 'n',
  556. new_crtc_state->active ? 'y' : 'n');
  557. ret = drm_atomic_add_affected_connectors(state, crtc);
  558. if (ret != 0)
  559. return ret;
  560. ret = drm_atomic_add_affected_planes(state, crtc);
  561. if (ret != 0)
  562. return ret;
  563. }
  564. /*
  565. * Iterate over all connectors again, to make sure atomic_check()
  566. * has been called on them when a modeset is forced.
  567. */
  568. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  569. const struct drm_connector_helper_funcs *funcs = connector->helper_private;
  570. if (connectors_mask & BIT(i))
  571. continue;
  572. if (funcs->atomic_check)
  573. ret = funcs->atomic_check(connector, new_connector_state);
  574. if (ret)
  575. return ret;
  576. }
  577. ret = mode_valid(state);
  578. if (ret)
  579. return ret;
  580. return mode_fixup(state);
  581. }
  582. EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
  583. /**
  584. * drm_atomic_helper_check_plane_state() - Check plane state for validity
  585. * @plane_state: plane state to check
  586. * @crtc_state: crtc state to check
  587. * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
  588. * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
  589. * @can_position: is it legal to position the plane such that it
  590. * doesn't cover the entire crtc? This will generally
  591. * only be false for primary planes.
  592. * @can_update_disabled: can the plane be updated while the crtc
  593. * is disabled?
  594. *
  595. * Checks that a desired plane update is valid, and updates various
  596. * bits of derived state (clipped coordinates etc.). Drivers that provide
  597. * their own plane handling rather than helper-provided implementations may
  598. * still wish to call this function to avoid duplication of error checking
  599. * code.
  600. *
  601. * RETURNS:
  602. * Zero if update appears valid, error code on failure
  603. */
  604. int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
  605. const struct drm_crtc_state *crtc_state,
  606. int min_scale,
  607. int max_scale,
  608. bool can_position,
  609. bool can_update_disabled)
  610. {
  611. struct drm_framebuffer *fb = plane_state->fb;
  612. struct drm_rect *src = &plane_state->src;
  613. struct drm_rect *dst = &plane_state->dst;
  614. unsigned int rotation = plane_state->rotation;
  615. struct drm_rect clip = {};
  616. int hscale, vscale;
  617. WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc);
  618. *src = drm_plane_state_src(plane_state);
  619. *dst = drm_plane_state_dest(plane_state);
  620. if (!fb) {
  621. plane_state->visible = false;
  622. return 0;
  623. }
  624. /* crtc should only be NULL when disabling (i.e., !fb) */
  625. if (WARN_ON(!plane_state->crtc)) {
  626. plane_state->visible = false;
  627. return 0;
  628. }
  629. if (!crtc_state->enable && !can_update_disabled) {
  630. DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
  631. return -EINVAL;
  632. }
  633. drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
  634. /* Check scaling */
  635. hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
  636. vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
  637. if (hscale < 0 || vscale < 0) {
  638. DRM_DEBUG_KMS("Invalid scaling of plane\n");
  639. drm_rect_debug_print("src: ", &plane_state->src, true);
  640. drm_rect_debug_print("dst: ", &plane_state->dst, false);
  641. return -ERANGE;
  642. }
  643. if (crtc_state->enable)
  644. drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2);
  645. plane_state->visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale);
  646. drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
  647. if (!plane_state->visible)
  648. /*
  649. * Plane isn't visible; some drivers can handle this
  650. * so we just return success here. Drivers that can't
  651. * (including those that use the primary plane helper's
  652. * update function) will return an error from their
  653. * update_plane handler.
  654. */
  655. return 0;
  656. if (!can_position && !drm_rect_equals(dst, &clip)) {
  657. DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
  658. drm_rect_debug_print("dst: ", dst, false);
  659. drm_rect_debug_print("clip: ", &clip, false);
  660. return -EINVAL;
  661. }
  662. return 0;
  663. }
  664. EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
  665. /**
  666. * drm_atomic_helper_check_planes - validate state object for planes changes
  667. * @dev: DRM device
  668. * @state: the driver state object
  669. *
  670. * Check the state object to see if the requested state is physically possible.
  671. * This does all the plane update related checks using by calling into the
  672. * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check
  673. * hooks provided by the driver.
  674. *
  675. * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has
  676. * updated planes.
  677. *
  678. * RETURNS:
  679. * Zero for success or -errno
  680. */
  681. int
  682. drm_atomic_helper_check_planes(struct drm_device *dev,
  683. struct drm_atomic_state *state)
  684. {
  685. struct drm_crtc *crtc;
  686. struct drm_crtc_state *new_crtc_state;
  687. struct drm_plane *plane;
  688. struct drm_plane_state *new_plane_state, *old_plane_state;
  689. int i, ret = 0;
  690. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  691. const struct drm_plane_helper_funcs *funcs;
  692. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  693. funcs = plane->helper_private;
  694. drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
  695. if (!funcs || !funcs->atomic_check)
  696. continue;
  697. ret = funcs->atomic_check(plane, new_plane_state);
  698. if (ret) {
  699. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
  700. plane->base.id, plane->name);
  701. return ret;
  702. }
  703. }
  704. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  705. const struct drm_crtc_helper_funcs *funcs;
  706. funcs = crtc->helper_private;
  707. if (!funcs || !funcs->atomic_check)
  708. continue;
  709. ret = funcs->atomic_check(crtc, new_crtc_state);
  710. if (ret) {
  711. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
  712. crtc->base.id, crtc->name);
  713. return ret;
  714. }
  715. }
  716. return ret;
  717. }
  718. EXPORT_SYMBOL(drm_atomic_helper_check_planes);
  719. /**
  720. * drm_atomic_helper_check - validate state object
  721. * @dev: DRM device
  722. * @state: the driver state object
  723. *
  724. * Check the state object to see if the requested state is physically possible.
  725. * Only crtcs and planes have check callbacks, so for any additional (global)
  726. * checking that a driver needs it can simply wrap that around this function.
  727. * Drivers without such needs can directly use this as their
  728. * &drm_mode_config_funcs.atomic_check callback.
  729. *
  730. * This just wraps the two parts of the state checking for planes and modeset
  731. * state in the default order: First it calls drm_atomic_helper_check_modeset()
  732. * and then drm_atomic_helper_check_planes(). The assumption is that the
  733. * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check
  734. * functions depend upon an updated adjusted_mode.clock to e.g. properly compute
  735. * watermarks.
  736. *
  737. * RETURNS:
  738. * Zero for success or -errno
  739. */
  740. int drm_atomic_helper_check(struct drm_device *dev,
  741. struct drm_atomic_state *state)
  742. {
  743. int ret;
  744. ret = drm_atomic_helper_check_modeset(dev, state);
  745. if (ret)
  746. return ret;
  747. ret = drm_atomic_helper_check_planes(dev, state);
  748. if (ret)
  749. return ret;
  750. if (state->legacy_cursor_update)
  751. state->async_update = !drm_atomic_helper_async_check(dev, state);
  752. return ret;
  753. }
  754. EXPORT_SYMBOL(drm_atomic_helper_check);
  755. static void
  756. disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
  757. {
  758. struct drm_connector *connector;
  759. struct drm_connector_state *old_conn_state, *new_conn_state;
  760. struct drm_crtc *crtc;
  761. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  762. int i;
  763. for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
  764. const struct drm_encoder_helper_funcs *funcs;
  765. struct drm_encoder *encoder;
  766. /* Shut down everything that's in the changeset and currently
  767. * still on. So need to check the old, saved state. */
  768. if (!old_conn_state->crtc)
  769. continue;
  770. old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc);
  771. if (!old_crtc_state->active ||
  772. !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
  773. continue;
  774. encoder = old_conn_state->best_encoder;
  775. /* We shouldn't get this far if we didn't previously have
  776. * an encoder.. but WARN_ON() rather than explode.
  777. */
  778. if (WARN_ON(!encoder))
  779. continue;
  780. funcs = encoder->helper_private;
  781. DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
  782. encoder->base.id, encoder->name);
  783. /*
  784. * Each encoder has at most one connector (since we always steal
  785. * it away), so we won't call disable hooks twice.
  786. */
  787. drm_bridge_disable(encoder->bridge);
  788. /* Right function depends upon target state. */
  789. if (funcs) {
  790. if (new_conn_state->crtc && funcs->prepare)
  791. funcs->prepare(encoder);
  792. else if (funcs->disable)
  793. funcs->disable(encoder);
  794. else if (funcs->dpms)
  795. funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  796. }
  797. drm_bridge_post_disable(encoder->bridge);
  798. }
  799. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  800. const struct drm_crtc_helper_funcs *funcs;
  801. int ret;
  802. /* Shut down everything that needs a full modeset. */
  803. if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
  804. continue;
  805. if (!old_crtc_state->active)
  806. continue;
  807. funcs = crtc->helper_private;
  808. DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n",
  809. crtc->base.id, crtc->name);
  810. /* Right function depends upon target state. */
  811. if (new_crtc_state->enable && funcs->prepare)
  812. funcs->prepare(crtc);
  813. else if (funcs->atomic_disable)
  814. funcs->atomic_disable(crtc, old_crtc_state);
  815. else if (funcs->disable)
  816. funcs->disable(crtc);
  817. else
  818. funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  819. if (!(dev->irq_enabled && dev->num_crtcs))
  820. continue;
  821. ret = drm_crtc_vblank_get(crtc);
  822. WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
  823. if (ret == 0)
  824. drm_crtc_vblank_put(crtc);
  825. }
  826. }
  827. /**
  828. * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
  829. * @dev: DRM device
  830. * @old_state: atomic state object with old state structures
  831. *
  832. * This function updates all the various legacy modeset state pointers in
  833. * connectors, encoders and crtcs. It also updates the timestamping constants
  834. * used for precise vblank timestamps by calling
  835. * drm_calc_timestamping_constants().
  836. *
  837. * Drivers can use this for building their own atomic commit if they don't have
  838. * a pure helper-based modeset implementation.
  839. *
  840. * Since these updates are not synchronized with lockings, only code paths
  841. * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the
  842. * legacy state filled out by this helper. Defacto this means this helper and
  843. * the legacy state pointers are only really useful for transitioning an
  844. * existing driver to the atomic world.
  845. */
  846. void
  847. drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
  848. struct drm_atomic_state *old_state)
  849. {
  850. struct drm_connector *connector;
  851. struct drm_connector_state *old_conn_state, *new_conn_state;
  852. struct drm_crtc *crtc;
  853. struct drm_crtc_state *new_crtc_state;
  854. int i;
  855. /* clear out existing links and update dpms */
  856. for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) {
  857. if (connector->encoder) {
  858. WARN_ON(!connector->encoder->crtc);
  859. connector->encoder->crtc = NULL;
  860. connector->encoder = NULL;
  861. }
  862. crtc = new_conn_state->crtc;
  863. if ((!crtc && old_conn_state->crtc) ||
  864. (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
  865. int mode = DRM_MODE_DPMS_OFF;
  866. if (crtc && crtc->state->active)
  867. mode = DRM_MODE_DPMS_ON;
  868. connector->dpms = mode;
  869. }
  870. }
  871. /* set new links */
  872. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  873. if (!new_conn_state->crtc)
  874. continue;
  875. if (WARN_ON(!new_conn_state->best_encoder))
  876. continue;
  877. connector->encoder = new_conn_state->best_encoder;
  878. connector->encoder->crtc = new_conn_state->crtc;
  879. }
  880. /* set legacy state in the crtc structure */
  881. for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
  882. struct drm_plane *primary = crtc->primary;
  883. struct drm_plane_state *new_plane_state;
  884. crtc->mode = new_crtc_state->mode;
  885. crtc->enabled = new_crtc_state->enable;
  886. new_plane_state =
  887. drm_atomic_get_new_plane_state(old_state, primary);
  888. if (new_plane_state && new_plane_state->crtc == crtc) {
  889. crtc->x = new_plane_state->src_x >> 16;
  890. crtc->y = new_plane_state->src_y >> 16;
  891. }
  892. if (new_crtc_state->enable)
  893. drm_calc_timestamping_constants(crtc,
  894. &new_crtc_state->adjusted_mode);
  895. }
  896. }
  897. EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
  898. static void
  899. crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
  900. {
  901. struct drm_crtc *crtc;
  902. struct drm_crtc_state *new_crtc_state;
  903. struct drm_connector *connector;
  904. struct drm_connector_state *new_conn_state;
  905. int i;
  906. for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
  907. const struct drm_crtc_helper_funcs *funcs;
  908. if (!new_crtc_state->mode_changed)
  909. continue;
  910. funcs = crtc->helper_private;
  911. if (new_crtc_state->enable && funcs->mode_set_nofb) {
  912. DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n",
  913. crtc->base.id, crtc->name);
  914. funcs->mode_set_nofb(crtc);
  915. }
  916. }
  917. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  918. const struct drm_encoder_helper_funcs *funcs;
  919. struct drm_encoder *encoder;
  920. struct drm_display_mode *mode, *adjusted_mode;
  921. if (!new_conn_state->best_encoder)
  922. continue;
  923. encoder = new_conn_state->best_encoder;
  924. funcs = encoder->helper_private;
  925. new_crtc_state = new_conn_state->crtc->state;
  926. mode = &new_crtc_state->mode;
  927. adjusted_mode = &new_crtc_state->adjusted_mode;
  928. if (!new_crtc_state->mode_changed)
  929. continue;
  930. DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
  931. encoder->base.id, encoder->name);
  932. /*
  933. * Each encoder has at most one connector (since we always steal
  934. * it away), so we won't call mode_set hooks twice.
  935. */
  936. if (funcs && funcs->atomic_mode_set) {
  937. funcs->atomic_mode_set(encoder, new_crtc_state,
  938. new_conn_state);
  939. } else if (funcs && funcs->mode_set) {
  940. funcs->mode_set(encoder, mode, adjusted_mode);
  941. }
  942. drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
  943. }
  944. }
  945. /**
  946. * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
  947. * @dev: DRM device
  948. * @old_state: atomic state object with old state structures
  949. *
  950. * This function shuts down all the outputs that need to be shut down and
  951. * prepares them (if required) with the new mode.
  952. *
  953. * For compatibility with legacy crtc helpers this should be called before
  954. * drm_atomic_helper_commit_planes(), which is what the default commit function
  955. * does. But drivers with different needs can group the modeset commits together
  956. * and do the plane commits at the end. This is useful for drivers doing runtime
  957. * PM since planes updates then only happen when the CRTC is actually enabled.
  958. */
  959. void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
  960. struct drm_atomic_state *old_state)
  961. {
  962. disable_outputs(dev, old_state);
  963. drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
  964. crtc_set_mode(dev, old_state);
  965. }
  966. EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
  967. /**
  968. * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
  969. * @dev: DRM device
  970. * @old_state: atomic state object with old state structures
  971. *
  972. * This function enables all the outputs with the new configuration which had to
  973. * be turned off for the update.
  974. *
  975. * For compatibility with legacy crtc helpers this should be called after
  976. * drm_atomic_helper_commit_planes(), which is what the default commit function
  977. * does. But drivers with different needs can group the modeset commits together
  978. * and do the plane commits at the end. This is useful for drivers doing runtime
  979. * PM since planes updates then only happen when the CRTC is actually enabled.
  980. */
  981. void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
  982. struct drm_atomic_state *old_state)
  983. {
  984. struct drm_crtc *crtc;
  985. struct drm_crtc_state *old_crtc_state;
  986. struct drm_crtc_state *new_crtc_state;
  987. struct drm_connector *connector;
  988. struct drm_connector_state *new_conn_state;
  989. int i;
  990. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  991. const struct drm_crtc_helper_funcs *funcs;
  992. /* Need to filter out CRTCs where only planes change. */
  993. if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
  994. continue;
  995. if (!new_crtc_state->active)
  996. continue;
  997. funcs = crtc->helper_private;
  998. if (new_crtc_state->enable) {
  999. DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
  1000. crtc->base.id, crtc->name);
  1001. if (funcs->atomic_enable)
  1002. funcs->atomic_enable(crtc, old_crtc_state);
  1003. else
  1004. funcs->commit(crtc);
  1005. }
  1006. }
  1007. for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
  1008. const struct drm_encoder_helper_funcs *funcs;
  1009. struct drm_encoder *encoder;
  1010. if (!new_conn_state->best_encoder)
  1011. continue;
  1012. if (!new_conn_state->crtc->state->active ||
  1013. !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
  1014. continue;
  1015. encoder = new_conn_state->best_encoder;
  1016. funcs = encoder->helper_private;
  1017. DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
  1018. encoder->base.id, encoder->name);
  1019. /*
  1020. * Each encoder has at most one connector (since we always steal
  1021. * it away), so we won't call enable hooks twice.
  1022. */
  1023. drm_bridge_pre_enable(encoder->bridge);
  1024. if (funcs) {
  1025. if (funcs->enable)
  1026. funcs->enable(encoder);
  1027. else if (funcs->commit)
  1028. funcs->commit(encoder);
  1029. }
  1030. drm_bridge_enable(encoder->bridge);
  1031. }
  1032. }
  1033. EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  1034. /**
  1035. * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
  1036. * @dev: DRM device
  1037. * @state: atomic state object with old state structures
  1038. * @pre_swap: If true, do an interruptible wait, and @state is the new state.
  1039. * Otherwise @state is the old state.
  1040. *
  1041. * For implicit sync, driver should fish the exclusive fence out from the
  1042. * incoming fb's and stash it in the drm_plane_state. This is called after
  1043. * drm_atomic_helper_swap_state() so it uses the current plane state (and
  1044. * just uses the atomic state to find the changed planes)
  1045. *
  1046. * Note that @pre_swap is needed since the point where we block for fences moves
  1047. * around depending upon whether an atomic commit is blocking or
  1048. * non-blocking. For non-blocking commit all waiting needs to happen after
  1049. * drm_atomic_helper_swap_state() is called, but for blocking commits we want
  1050. * to wait **before** we do anything that can't be easily rolled back. That is
  1051. * before we call drm_atomic_helper_swap_state().
  1052. *
  1053. * Returns zero if success or < 0 if dma_fence_wait() fails.
  1054. */
  1055. int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
  1056. struct drm_atomic_state *state,
  1057. bool pre_swap)
  1058. {
  1059. struct drm_plane *plane;
  1060. struct drm_plane_state *new_plane_state;
  1061. int i, ret;
  1062. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1063. if (!new_plane_state->fence)
  1064. continue;
  1065. WARN_ON(!new_plane_state->fb);
  1066. /*
  1067. * If waiting for fences pre-swap (ie: nonblock), userspace can
  1068. * still interrupt the operation. Instead of blocking until the
  1069. * timer expires, make the wait interruptible.
  1070. */
  1071. ret = dma_fence_wait(new_plane_state->fence, pre_swap);
  1072. if (ret)
  1073. return ret;
  1074. dma_fence_put(new_plane_state->fence);
  1075. new_plane_state->fence = NULL;
  1076. }
  1077. return 0;
  1078. }
  1079. EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
  1080. /**
  1081. * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
  1082. * @dev: DRM device
  1083. * @old_state: atomic state object with old state structures
  1084. *
  1085. * Helper to, after atomic commit, wait for vblanks on all effected
  1086. * crtcs (ie. before cleaning up old framebuffers using
  1087. * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
  1088. * framebuffers have actually changed to optimize for the legacy cursor and
  1089. * plane update use-case.
  1090. *
  1091. * Drivers using the nonblocking commit tracking support initialized by calling
  1092. * drm_atomic_helper_setup_commit() should look at
  1093. * drm_atomic_helper_wait_for_flip_done() as an alternative.
  1094. */
  1095. void
  1096. drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
  1097. struct drm_atomic_state *old_state)
  1098. {
  1099. struct drm_crtc *crtc;
  1100. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1101. int i, ret;
  1102. unsigned crtc_mask = 0;
  1103. /*
  1104. * Legacy cursor ioctls are completely unsynced, and userspace
  1105. * relies on that (by doing tons of cursor updates).
  1106. */
  1107. if (old_state->legacy_cursor_update)
  1108. return;
  1109. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1110. if (!new_crtc_state->active)
  1111. continue;
  1112. ret = drm_crtc_vblank_get(crtc);
  1113. if (ret != 0)
  1114. continue;
  1115. crtc_mask |= drm_crtc_mask(crtc);
  1116. old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc);
  1117. }
  1118. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1119. if (!(crtc_mask & drm_crtc_mask(crtc)))
  1120. continue;
  1121. ret = wait_event_timeout(dev->vblank[i].queue,
  1122. old_state->crtcs[i].last_vblank_count !=
  1123. drm_crtc_vblank_count(crtc),
  1124. msecs_to_jiffies(50));
  1125. WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n",
  1126. crtc->base.id, crtc->name);
  1127. drm_crtc_vblank_put(crtc);
  1128. }
  1129. }
  1130. EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  1131. /**
  1132. * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
  1133. * @dev: DRM device
  1134. * @old_state: atomic state object with old state structures
  1135. *
  1136. * Helper to, after atomic commit, wait for page flips on all effected
  1137. * crtcs (ie. before cleaning up old framebuffers using
  1138. * drm_atomic_helper_cleanup_planes()). Compared to
  1139. * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
  1140. * CRTCs, assuming that cursors-only updates are signalling their completion
  1141. * immediately (or using a different path).
  1142. *
  1143. * This requires that drivers use the nonblocking commit tracking support
  1144. * initialized using drm_atomic_helper_setup_commit().
  1145. */
  1146. void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
  1147. struct drm_atomic_state *old_state)
  1148. {
  1149. struct drm_crtc_state *new_crtc_state;
  1150. struct drm_crtc *crtc;
  1151. int i;
  1152. for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
  1153. struct drm_crtc_commit *commit = new_crtc_state->commit;
  1154. int ret;
  1155. if (!commit)
  1156. continue;
  1157. ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
  1158. if (ret == 0)
  1159. DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
  1160. crtc->base.id, crtc->name);
  1161. }
  1162. }
  1163. EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
  1164. /**
  1165. * drm_atomic_helper_commit_tail - commit atomic update to hardware
  1166. * @old_state: atomic state object with old state structures
  1167. *
  1168. * This is the default implementation for the
  1169. * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
  1170. * that do not support runtime_pm or do not need the CRTC to be
  1171. * enabled to perform a commit. Otherwise, see
  1172. * drm_atomic_helper_commit_tail_rpm().
  1173. *
  1174. * Note that the default ordering of how the various stages are called is to
  1175. * match the legacy modeset helper library closest.
  1176. */
  1177. void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
  1178. {
  1179. struct drm_device *dev = old_state->dev;
  1180. drm_atomic_helper_commit_modeset_disables(dev, old_state);
  1181. drm_atomic_helper_commit_planes(dev, old_state, 0);
  1182. drm_atomic_helper_commit_modeset_enables(dev, old_state);
  1183. drm_atomic_helper_commit_hw_done(old_state);
  1184. drm_atomic_helper_wait_for_vblanks(dev, old_state);
  1185. drm_atomic_helper_cleanup_planes(dev, old_state);
  1186. }
  1187. EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
  1188. /**
  1189. * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
  1190. * @old_state: new modeset state to be committed
  1191. *
  1192. * This is an alternative implementation for the
  1193. * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
  1194. * that support runtime_pm or need the CRTC to be enabled to perform a
  1195. * commit. Otherwise, one should use the default implementation
  1196. * drm_atomic_helper_commit_tail().
  1197. */
  1198. void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
  1199. {
  1200. struct drm_device *dev = old_state->dev;
  1201. drm_atomic_helper_commit_modeset_disables(dev, old_state);
  1202. drm_atomic_helper_commit_modeset_enables(dev, old_state);
  1203. drm_atomic_helper_commit_planes(dev, old_state,
  1204. DRM_PLANE_COMMIT_ACTIVE_ONLY);
  1205. drm_atomic_helper_commit_hw_done(old_state);
  1206. drm_atomic_helper_wait_for_vblanks(dev, old_state);
  1207. drm_atomic_helper_cleanup_planes(dev, old_state);
  1208. }
  1209. EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
  1210. static void commit_tail(struct drm_atomic_state *old_state)
  1211. {
  1212. struct drm_device *dev = old_state->dev;
  1213. const struct drm_mode_config_helper_funcs *funcs;
  1214. funcs = dev->mode_config.helper_private;
  1215. drm_atomic_helper_wait_for_fences(dev, old_state, false);
  1216. drm_atomic_helper_wait_for_dependencies(old_state);
  1217. if (funcs && funcs->atomic_commit_tail)
  1218. funcs->atomic_commit_tail(old_state);
  1219. else
  1220. drm_atomic_helper_commit_tail(old_state);
  1221. drm_atomic_helper_commit_cleanup_done(old_state);
  1222. drm_atomic_state_put(old_state);
  1223. }
  1224. static void commit_work(struct work_struct *work)
  1225. {
  1226. struct drm_atomic_state *state = container_of(work,
  1227. struct drm_atomic_state,
  1228. commit_work);
  1229. commit_tail(state);
  1230. }
  1231. /**
  1232. * drm_atomic_helper_async_check - check if state can be commited asynchronously
  1233. * @dev: DRM device
  1234. * @state: the driver state object
  1235. *
  1236. * This helper will check if it is possible to commit the state asynchronously.
  1237. * Async commits are not supposed to swap the states like normal sync commits
  1238. * but just do in-place changes on the current state.
  1239. *
  1240. * It will return 0 if the commit can happen in an asynchronous fashion or error
  1241. * if not. Note that error just mean it can't be commited asynchronously, if it
  1242. * fails the commit should be treated like a normal synchronous commit.
  1243. */
  1244. int drm_atomic_helper_async_check(struct drm_device *dev,
  1245. struct drm_atomic_state *state)
  1246. {
  1247. struct drm_crtc *crtc;
  1248. struct drm_crtc_state *crtc_state;
  1249. struct drm_plane *plane;
  1250. struct drm_plane_state *old_plane_state, *new_plane_state;
  1251. const struct drm_plane_helper_funcs *funcs;
  1252. int i, n_planes = 0;
  1253. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1254. if (drm_atomic_crtc_needs_modeset(crtc_state))
  1255. return -EINVAL;
  1256. }
  1257. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
  1258. n_planes++;
  1259. /* FIXME: we support only single plane updates for now */
  1260. if (n_planes != 1)
  1261. return -EINVAL;
  1262. if (!new_plane_state->crtc)
  1263. return -EINVAL;
  1264. funcs = plane->helper_private;
  1265. if (!funcs->atomic_async_update)
  1266. return -EINVAL;
  1267. if (new_plane_state->fence)
  1268. return -EINVAL;
  1269. /*
  1270. * Don't do an async update if there is an outstanding commit modifying
  1271. * the plane. This prevents our async update's changes from getting
  1272. * overridden by a previous synchronous update's state.
  1273. */
  1274. if (old_plane_state->commit &&
  1275. !try_wait_for_completion(&old_plane_state->commit->hw_done))
  1276. return -EBUSY;
  1277. return funcs->atomic_async_check(plane, new_plane_state);
  1278. }
  1279. EXPORT_SYMBOL(drm_atomic_helper_async_check);
  1280. /**
  1281. * drm_atomic_helper_async_commit - commit state asynchronously
  1282. * @dev: DRM device
  1283. * @state: the driver state object
  1284. *
  1285. * This function commits a state asynchronously, i.e., not vblank
  1286. * synchronized. It should be used on a state only when
  1287. * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
  1288. * the states like normal sync commits, but just do in-place changes on the
  1289. * current state.
  1290. */
  1291. void drm_atomic_helper_async_commit(struct drm_device *dev,
  1292. struct drm_atomic_state *state)
  1293. {
  1294. struct drm_plane *plane;
  1295. struct drm_plane_state *plane_state;
  1296. const struct drm_plane_helper_funcs *funcs;
  1297. int i;
  1298. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1299. funcs = plane->helper_private;
  1300. funcs->atomic_async_update(plane, plane_state);
  1301. }
  1302. }
  1303. EXPORT_SYMBOL(drm_atomic_helper_async_commit);
  1304. /**
  1305. * drm_atomic_helper_commit - commit validated state object
  1306. * @dev: DRM device
  1307. * @state: the driver state object
  1308. * @nonblock: whether nonblocking behavior is requested.
  1309. *
  1310. * This function commits a with drm_atomic_helper_check() pre-validated state
  1311. * object. This can still fail when e.g. the framebuffer reservation fails. This
  1312. * function implements nonblocking commits, using
  1313. * drm_atomic_helper_setup_commit() and related functions.
  1314. *
  1315. * Committing the actual hardware state is done through the
  1316. * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default
  1317. * implementation drm_atomic_helper_commit_tail().
  1318. *
  1319. * RETURNS:
  1320. * Zero for success or -errno.
  1321. */
  1322. int drm_atomic_helper_commit(struct drm_device *dev,
  1323. struct drm_atomic_state *state,
  1324. bool nonblock)
  1325. {
  1326. int ret;
  1327. if (state->async_update) {
  1328. ret = drm_atomic_helper_prepare_planes(dev, state);
  1329. if (ret)
  1330. return ret;
  1331. drm_atomic_helper_async_commit(dev, state);
  1332. drm_atomic_helper_cleanup_planes(dev, state);
  1333. return 0;
  1334. }
  1335. ret = drm_atomic_helper_setup_commit(state, nonblock);
  1336. if (ret)
  1337. return ret;
  1338. INIT_WORK(&state->commit_work, commit_work);
  1339. ret = drm_atomic_helper_prepare_planes(dev, state);
  1340. if (ret)
  1341. return ret;
  1342. if (!nonblock) {
  1343. ret = drm_atomic_helper_wait_for_fences(dev, state, true);
  1344. if (ret)
  1345. goto err;
  1346. }
  1347. /*
  1348. * This is the point of no return - everything below never fails except
  1349. * when the hw goes bonghits. Which means we can commit the new state on
  1350. * the software side now.
  1351. */
  1352. ret = drm_atomic_helper_swap_state(state, true);
  1353. if (ret)
  1354. goto err;
  1355. /*
  1356. * Everything below can be run asynchronously without the need to grab
  1357. * any modeset locks at all under one condition: It must be guaranteed
  1358. * that the asynchronous work has either been cancelled (if the driver
  1359. * supports it, which at least requires that the framebuffers get
  1360. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  1361. * before the new state gets committed on the software side with
  1362. * drm_atomic_helper_swap_state().
  1363. *
  1364. * This scheme allows new atomic state updates to be prepared and
  1365. * checked in parallel to the asynchronous completion of the previous
  1366. * update. Which is important since compositors need to figure out the
  1367. * composition of the next frame right after having submitted the
  1368. * current layout.
  1369. *
  1370. * NOTE: Commit work has multiple phases, first hardware commit, then
  1371. * cleanup. We want them to overlap, hence need system_unbound_wq to
  1372. * make sure work items don't artifically stall on each another.
  1373. */
  1374. drm_atomic_state_get(state);
  1375. if (nonblock)
  1376. queue_work(system_unbound_wq, &state->commit_work);
  1377. else
  1378. commit_tail(state);
  1379. return 0;
  1380. err:
  1381. drm_atomic_helper_cleanup_planes(dev, state);
  1382. return ret;
  1383. }
  1384. EXPORT_SYMBOL(drm_atomic_helper_commit);
  1385. /**
  1386. * DOC: implementing nonblocking commit
  1387. *
  1388. * Nonblocking atomic commits have to be implemented in the following sequence:
  1389. *
  1390. * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
  1391. * which commit needs to call which can fail, so we want to run it first and
  1392. * synchronously.
  1393. *
  1394. * 2. Synchronize with any outstanding nonblocking commit worker threads which
  1395. * might be affected the new state update. This can be done by either cancelling
  1396. * or flushing the work items, depending upon whether the driver can deal with
  1397. * cancelled updates. Note that it is important to ensure that the framebuffer
  1398. * cleanup is still done when cancelling.
  1399. *
  1400. * Asynchronous workers need to have sufficient parallelism to be able to run
  1401. * different atomic commits on different CRTCs in parallel. The simplest way to
  1402. * achive this is by running them on the &system_unbound_wq work queue. Note
  1403. * that drivers are not required to split up atomic commits and run an
  1404. * individual commit in parallel - userspace is supposed to do that if it cares.
  1405. * But it might be beneficial to do that for modesets, since those necessarily
  1406. * must be done as one global operation, and enabling or disabling a CRTC can
  1407. * take a long time. But even that is not required.
  1408. *
  1409. * 3. The software state is updated synchronously with
  1410. * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
  1411. * locks means concurrent callers never see inconsistent state. And doing this
  1412. * while it's guaranteed that no relevant nonblocking worker runs means that
  1413. * nonblocking workers do not need grab any locks. Actually they must not grab
  1414. * locks, for otherwise the work flushing will deadlock.
  1415. *
  1416. * 4. Schedule a work item to do all subsequent steps, using the split-out
  1417. * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
  1418. * then cleaning up the framebuffers after the old framebuffer is no longer
  1419. * being displayed.
  1420. *
  1421. * The above scheme is implemented in the atomic helper libraries in
  1422. * drm_atomic_helper_commit() using a bunch of helper functions. See
  1423. * drm_atomic_helper_setup_commit() for a starting point.
  1424. */
  1425. static int stall_checks(struct drm_crtc *crtc, bool nonblock)
  1426. {
  1427. struct drm_crtc_commit *commit, *stall_commit = NULL;
  1428. bool completed = true;
  1429. int i;
  1430. long ret = 0;
  1431. spin_lock(&crtc->commit_lock);
  1432. i = 0;
  1433. list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
  1434. if (i == 0) {
  1435. completed = try_wait_for_completion(&commit->flip_done);
  1436. /* Userspace is not allowed to get ahead of the previous
  1437. * commit with nonblocking ones. */
  1438. if (!completed && nonblock) {
  1439. spin_unlock(&crtc->commit_lock);
  1440. return -EBUSY;
  1441. }
  1442. } else if (i == 1) {
  1443. stall_commit = drm_crtc_commit_get(commit);
  1444. break;
  1445. }
  1446. i++;
  1447. }
  1448. spin_unlock(&crtc->commit_lock);
  1449. if (!stall_commit)
  1450. return 0;
  1451. /* We don't want to let commits get ahead of cleanup work too much,
  1452. * stalling on 2nd previous commit means triple-buffer won't ever stall.
  1453. */
  1454. ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
  1455. 10*HZ);
  1456. if (ret == 0)
  1457. DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
  1458. crtc->base.id, crtc->name);
  1459. drm_crtc_commit_put(stall_commit);
  1460. return ret < 0 ? ret : 0;
  1461. }
  1462. static void release_crtc_commit(struct completion *completion)
  1463. {
  1464. struct drm_crtc_commit *commit = container_of(completion,
  1465. typeof(*commit),
  1466. flip_done);
  1467. drm_crtc_commit_put(commit);
  1468. }
  1469. static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
  1470. {
  1471. init_completion(&commit->flip_done);
  1472. init_completion(&commit->hw_done);
  1473. init_completion(&commit->cleanup_done);
  1474. INIT_LIST_HEAD(&commit->commit_entry);
  1475. kref_init(&commit->ref);
  1476. commit->crtc = crtc;
  1477. }
  1478. static struct drm_crtc_commit *
  1479. crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
  1480. {
  1481. if (crtc) {
  1482. struct drm_crtc_state *new_crtc_state;
  1483. new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  1484. return new_crtc_state->commit;
  1485. }
  1486. if (!state->fake_commit) {
  1487. state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
  1488. if (!state->fake_commit)
  1489. return NULL;
  1490. init_commit(state->fake_commit, NULL);
  1491. }
  1492. return state->fake_commit;
  1493. }
  1494. /**
  1495. * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
  1496. * @state: new modeset state to be committed
  1497. * @nonblock: whether nonblocking behavior is requested.
  1498. *
  1499. * This function prepares @state to be used by the atomic helper's support for
  1500. * nonblocking commits. Drivers using the nonblocking commit infrastructure
  1501. * should always call this function from their
  1502. * &drm_mode_config_funcs.atomic_commit hook.
  1503. *
  1504. * To be able to use this support drivers need to use a few more helper
  1505. * functions. drm_atomic_helper_wait_for_dependencies() must be called before
  1506. * actually committing the hardware state, and for nonblocking commits this call
  1507. * must be placed in the async worker. See also drm_atomic_helper_swap_state()
  1508. * and it's stall parameter, for when a driver's commit hooks look at the
  1509. * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly.
  1510. *
  1511. * Completion of the hardware commit step must be signalled using
  1512. * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
  1513. * to read or change any permanent software or hardware modeset state. The only
  1514. * exception is state protected by other means than &drm_modeset_lock locks.
  1515. * Only the free standing @state with pointers to the old state structures can
  1516. * be inspected, e.g. to clean up old buffers using
  1517. * drm_atomic_helper_cleanup_planes().
  1518. *
  1519. * At the very end, before cleaning up @state drivers must call
  1520. * drm_atomic_helper_commit_cleanup_done().
  1521. *
  1522. * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
  1523. * complete and easy-to-use default implementation of the atomic_commit() hook.
  1524. *
  1525. * The tracking of asynchronously executed and still pending commits is done
  1526. * using the core structure &drm_crtc_commit.
  1527. *
  1528. * By default there's no need to clean up resources allocated by this function
  1529. * explicitly: drm_atomic_state_default_clear() will take care of that
  1530. * automatically.
  1531. *
  1532. * Returns:
  1533. *
  1534. * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
  1535. * -ENOMEM on allocation failures and -EINTR when a signal is pending.
  1536. */
  1537. int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
  1538. bool nonblock)
  1539. {
  1540. struct drm_crtc *crtc;
  1541. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1542. struct drm_connector *conn;
  1543. struct drm_connector_state *old_conn_state, *new_conn_state;
  1544. struct drm_plane *plane;
  1545. struct drm_plane_state *old_plane_state, *new_plane_state;
  1546. struct drm_crtc_commit *commit;
  1547. int i, ret;
  1548. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  1549. commit = kzalloc(sizeof(*commit), GFP_KERNEL);
  1550. if (!commit)
  1551. return -ENOMEM;
  1552. init_commit(commit, crtc);
  1553. new_crtc_state->commit = commit;
  1554. ret = stall_checks(crtc, nonblock);
  1555. if (ret)
  1556. return ret;
  1557. /* Drivers only send out events when at least either current or
  1558. * new CRTC state is active. Complete right away if everything
  1559. * stays off. */
  1560. if (!old_crtc_state->active && !new_crtc_state->active) {
  1561. complete_all(&commit->flip_done);
  1562. continue;
  1563. }
  1564. /* Legacy cursor updates are fully unsynced. */
  1565. if (state->legacy_cursor_update) {
  1566. complete_all(&commit->flip_done);
  1567. continue;
  1568. }
  1569. if (!new_crtc_state->event) {
  1570. commit->event = kzalloc(sizeof(*commit->event),
  1571. GFP_KERNEL);
  1572. if (!commit->event)
  1573. return -ENOMEM;
  1574. new_crtc_state->event = commit->event;
  1575. }
  1576. new_crtc_state->event->base.completion = &commit->flip_done;
  1577. new_crtc_state->event->base.completion_release = release_crtc_commit;
  1578. drm_crtc_commit_get(commit);
  1579. commit->abort_completion = true;
  1580. }
  1581. for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
  1582. /* Userspace is not allowed to get ahead of the previous
  1583. * commit with nonblocking ones. */
  1584. if (nonblock && old_conn_state->commit &&
  1585. !try_wait_for_completion(&old_conn_state->commit->flip_done))
  1586. return -EBUSY;
  1587. /* Always track connectors explicitly for e.g. link retraining. */
  1588. commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
  1589. if (!commit)
  1590. return -ENOMEM;
  1591. new_conn_state->commit = drm_crtc_commit_get(commit);
  1592. }
  1593. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  1594. /* Userspace is not allowed to get ahead of the previous
  1595. * commit with nonblocking ones. */
  1596. if (nonblock && old_plane_state->commit &&
  1597. !try_wait_for_completion(&old_plane_state->commit->flip_done))
  1598. return -EBUSY;
  1599. /* Always track planes explicitly for async pageflip support. */
  1600. commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
  1601. if (!commit)
  1602. return -ENOMEM;
  1603. new_plane_state->commit = drm_crtc_commit_get(commit);
  1604. }
  1605. return 0;
  1606. }
  1607. EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
  1608. /**
  1609. * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
  1610. * @old_state: atomic state object with old state structures
  1611. *
  1612. * This function waits for all preceeding commits that touch the same CRTC as
  1613. * @old_state to both be committed to the hardware (as signalled by
  1614. * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
  1615. * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
  1616. *
  1617. * This is part of the atomic helper support for nonblocking commits, see
  1618. * drm_atomic_helper_setup_commit() for an overview.
  1619. */
  1620. void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
  1621. {
  1622. struct drm_crtc *crtc;
  1623. struct drm_crtc_state *old_crtc_state;
  1624. struct drm_plane *plane;
  1625. struct drm_plane_state *old_plane_state;
  1626. struct drm_connector *conn;
  1627. struct drm_connector_state *old_conn_state;
  1628. struct drm_crtc_commit *commit;
  1629. int i;
  1630. long ret;
  1631. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1632. commit = old_crtc_state->commit;
  1633. if (!commit)
  1634. continue;
  1635. ret = wait_for_completion_timeout(&commit->hw_done,
  1636. 10*HZ);
  1637. if (ret == 0)
  1638. DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
  1639. crtc->base.id, crtc->name);
  1640. /* Currently no support for overwriting flips, hence
  1641. * stall for previous one to execute completely. */
  1642. ret = wait_for_completion_timeout(&commit->flip_done,
  1643. 10*HZ);
  1644. if (ret == 0)
  1645. DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
  1646. crtc->base.id, crtc->name);
  1647. }
  1648. for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
  1649. commit = old_conn_state->commit;
  1650. if (!commit)
  1651. continue;
  1652. ret = wait_for_completion_timeout(&commit->hw_done,
  1653. 10*HZ);
  1654. if (ret == 0)
  1655. DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
  1656. conn->base.id, conn->name);
  1657. /* Currently no support for overwriting flips, hence
  1658. * stall for previous one to execute completely. */
  1659. ret = wait_for_completion_timeout(&commit->flip_done,
  1660. 10*HZ);
  1661. if (ret == 0)
  1662. DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
  1663. conn->base.id, conn->name);
  1664. }
  1665. for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
  1666. commit = old_plane_state->commit;
  1667. if (!commit)
  1668. continue;
  1669. ret = wait_for_completion_timeout(&commit->hw_done,
  1670. 10*HZ);
  1671. if (ret == 0)
  1672. DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
  1673. plane->base.id, plane->name);
  1674. /* Currently no support for overwriting flips, hence
  1675. * stall for previous one to execute completely. */
  1676. ret = wait_for_completion_timeout(&commit->flip_done,
  1677. 10*HZ);
  1678. if (ret == 0)
  1679. DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
  1680. plane->base.id, plane->name);
  1681. }
  1682. }
  1683. EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
  1684. /**
  1685. * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
  1686. * @old_state: atomic state object with old state structures
  1687. *
  1688. * This function is used to signal completion of the hardware commit step. After
  1689. * this step the driver is not allowed to read or change any permanent software
  1690. * or hardware modeset state. The only exception is state protected by other
  1691. * means than &drm_modeset_lock locks.
  1692. *
  1693. * Drivers should try to postpone any expensive or delayed cleanup work after
  1694. * this function is called.
  1695. *
  1696. * This is part of the atomic helper support for nonblocking commits, see
  1697. * drm_atomic_helper_setup_commit() for an overview.
  1698. */
  1699. void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
  1700. {
  1701. struct drm_crtc *crtc;
  1702. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1703. struct drm_crtc_commit *commit;
  1704. int i;
  1705. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1706. commit = new_crtc_state->commit;
  1707. if (!commit)
  1708. continue;
  1709. /*
  1710. * copy new_crtc_state->commit to old_crtc_state->commit,
  1711. * it's unsafe to touch new_crtc_state after hw_done,
  1712. * but we still need to do so in cleanup_done().
  1713. */
  1714. if (old_crtc_state->commit)
  1715. drm_crtc_commit_put(old_crtc_state->commit);
  1716. old_crtc_state->commit = drm_crtc_commit_get(commit);
  1717. /* backend must have consumed any event by now */
  1718. WARN_ON(new_crtc_state->event);
  1719. complete_all(&commit->hw_done);
  1720. }
  1721. if (old_state->fake_commit) {
  1722. complete_all(&old_state->fake_commit->hw_done);
  1723. complete_all(&old_state->fake_commit->flip_done);
  1724. }
  1725. }
  1726. EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
  1727. /**
  1728. * drm_atomic_helper_commit_cleanup_done - signal completion of commit
  1729. * @old_state: atomic state object with old state structures
  1730. *
  1731. * This signals completion of the atomic update @old_state, including any
  1732. * cleanup work. If used, it must be called right before calling
  1733. * drm_atomic_state_put().
  1734. *
  1735. * This is part of the atomic helper support for nonblocking commits, see
  1736. * drm_atomic_helper_setup_commit() for an overview.
  1737. */
  1738. void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
  1739. {
  1740. struct drm_crtc *crtc;
  1741. struct drm_crtc_state *old_crtc_state;
  1742. struct drm_crtc_commit *commit;
  1743. int i;
  1744. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1745. commit = old_crtc_state->commit;
  1746. if (WARN_ON(!commit))
  1747. continue;
  1748. complete_all(&commit->cleanup_done);
  1749. WARN_ON(!try_wait_for_completion(&commit->hw_done));
  1750. spin_lock(&crtc->commit_lock);
  1751. list_del(&commit->commit_entry);
  1752. spin_unlock(&crtc->commit_lock);
  1753. }
  1754. if (old_state->fake_commit)
  1755. complete_all(&old_state->fake_commit->cleanup_done);
  1756. }
  1757. EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
  1758. /**
  1759. * drm_atomic_helper_prepare_planes - prepare plane resources before commit
  1760. * @dev: DRM device
  1761. * @state: atomic state object with new state structures
  1762. *
  1763. * This function prepares plane state, specifically framebuffers, for the new
  1764. * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure
  1765. * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on
  1766. * any already successfully prepared framebuffer.
  1767. *
  1768. * Returns:
  1769. * 0 on success, negative error code on failure.
  1770. */
  1771. int drm_atomic_helper_prepare_planes(struct drm_device *dev,
  1772. struct drm_atomic_state *state)
  1773. {
  1774. struct drm_plane *plane;
  1775. struct drm_plane_state *new_plane_state;
  1776. int ret, i, j;
  1777. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1778. const struct drm_plane_helper_funcs *funcs;
  1779. funcs = plane->helper_private;
  1780. if (funcs->prepare_fb) {
  1781. ret = funcs->prepare_fb(plane, new_plane_state);
  1782. if (ret)
  1783. goto fail;
  1784. }
  1785. }
  1786. return 0;
  1787. fail:
  1788. for_each_new_plane_in_state(state, plane, new_plane_state, j) {
  1789. const struct drm_plane_helper_funcs *funcs;
  1790. if (j >= i)
  1791. continue;
  1792. funcs = plane->helper_private;
  1793. if (funcs->cleanup_fb)
  1794. funcs->cleanup_fb(plane, new_plane_state);
  1795. }
  1796. return ret;
  1797. }
  1798. EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
  1799. static bool plane_crtc_active(const struct drm_plane_state *state)
  1800. {
  1801. return state->crtc && state->crtc->state->active;
  1802. }
  1803. /**
  1804. * drm_atomic_helper_commit_planes - commit plane state
  1805. * @dev: DRM device
  1806. * @old_state: atomic state object with old state structures
  1807. * @flags: flags for committing plane state
  1808. *
  1809. * This function commits the new plane state using the plane and atomic helper
  1810. * functions for planes and crtcs. It assumes that the atomic state has already
  1811. * been pushed into the relevant object state pointers, since this step can no
  1812. * longer fail.
  1813. *
  1814. * It still requires the global state object @old_state to know which planes and
  1815. * crtcs need to be updated though.
  1816. *
  1817. * Note that this function does all plane updates across all CRTCs in one step.
  1818. * If the hardware can't support this approach look at
  1819. * drm_atomic_helper_commit_planes_on_crtc() instead.
  1820. *
  1821. * Plane parameters can be updated by applications while the associated CRTC is
  1822. * disabled. The DRM/KMS core will store the parameters in the plane state,
  1823. * which will be available to the driver when the CRTC is turned on. As a result
  1824. * most drivers don't need to be immediately notified of plane updates for a
  1825. * disabled CRTC.
  1826. *
  1827. * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
  1828. * @flags in order not to receive plane update notifications related to a
  1829. * disabled CRTC. This avoids the need to manually ignore plane updates in
  1830. * driver code when the driver and/or hardware can't or just don't need to deal
  1831. * with updates on disabled CRTCs, for example when supporting runtime PM.
  1832. *
  1833. * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
  1834. * display controllers require to disable a CRTC's planes when the CRTC is
  1835. * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable
  1836. * call for a plane if the CRTC of the old plane state needs a modesetting
  1837. * operation. Of course, the drivers need to disable the planes in their CRTC
  1838. * disable callbacks since no one else would do that.
  1839. *
  1840. * The drm_atomic_helper_commit() default implementation doesn't set the
  1841. * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
  1842. * This should not be copied blindly by drivers.
  1843. */
  1844. void drm_atomic_helper_commit_planes(struct drm_device *dev,
  1845. struct drm_atomic_state *old_state,
  1846. uint32_t flags)
  1847. {
  1848. struct drm_crtc *crtc;
  1849. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  1850. struct drm_plane *plane;
  1851. struct drm_plane_state *old_plane_state, *new_plane_state;
  1852. int i;
  1853. bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
  1854. bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
  1855. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1856. const struct drm_crtc_helper_funcs *funcs;
  1857. funcs = crtc->helper_private;
  1858. if (!funcs || !funcs->atomic_begin)
  1859. continue;
  1860. if (active_only && !new_crtc_state->active)
  1861. continue;
  1862. funcs->atomic_begin(crtc, old_crtc_state);
  1863. }
  1864. for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
  1865. const struct drm_plane_helper_funcs *funcs;
  1866. bool disabling;
  1867. funcs = plane->helper_private;
  1868. if (!funcs)
  1869. continue;
  1870. disabling = drm_atomic_plane_disabling(old_plane_state,
  1871. new_plane_state);
  1872. if (active_only) {
  1873. /*
  1874. * Skip planes related to inactive CRTCs. If the plane
  1875. * is enabled use the state of the current CRTC. If the
  1876. * plane is being disabled use the state of the old
  1877. * CRTC to avoid skipping planes being disabled on an
  1878. * active CRTC.
  1879. */
  1880. if (!disabling && !plane_crtc_active(new_plane_state))
  1881. continue;
  1882. if (disabling && !plane_crtc_active(old_plane_state))
  1883. continue;
  1884. }
  1885. /*
  1886. * Special-case disabling the plane if drivers support it.
  1887. */
  1888. if (disabling && funcs->atomic_disable) {
  1889. struct drm_crtc_state *crtc_state;
  1890. crtc_state = old_plane_state->crtc->state;
  1891. if (drm_atomic_crtc_needs_modeset(crtc_state) &&
  1892. no_disable)
  1893. continue;
  1894. funcs->atomic_disable(plane, old_plane_state);
  1895. } else if (new_plane_state->crtc || disabling) {
  1896. funcs->atomic_update(plane, old_plane_state);
  1897. }
  1898. }
  1899. for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
  1900. const struct drm_crtc_helper_funcs *funcs;
  1901. funcs = crtc->helper_private;
  1902. if (!funcs || !funcs->atomic_flush)
  1903. continue;
  1904. if (active_only && !new_crtc_state->active)
  1905. continue;
  1906. funcs->atomic_flush(crtc, old_crtc_state);
  1907. }
  1908. }
  1909. EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
  1910. /**
  1911. * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc
  1912. * @old_crtc_state: atomic state object with the old crtc state
  1913. *
  1914. * This function commits the new plane state using the plane and atomic helper
  1915. * functions for planes on the specific crtc. It assumes that the atomic state
  1916. * has already been pushed into the relevant object state pointers, since this
  1917. * step can no longer fail.
  1918. *
  1919. * This function is useful when plane updates should be done crtc-by-crtc
  1920. * instead of one global step like drm_atomic_helper_commit_planes() does.
  1921. *
  1922. * This function can only be savely used when planes are not allowed to move
  1923. * between different CRTCs because this function doesn't handle inter-CRTC
  1924. * depencies. Callers need to ensure that either no such depencies exist,
  1925. * resolve them through ordering of commit calls or through some other means.
  1926. */
  1927. void
  1928. drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
  1929. {
  1930. const struct drm_crtc_helper_funcs *crtc_funcs;
  1931. struct drm_crtc *crtc = old_crtc_state->crtc;
  1932. struct drm_atomic_state *old_state = old_crtc_state->state;
  1933. struct drm_plane *plane;
  1934. unsigned plane_mask;
  1935. plane_mask = old_crtc_state->plane_mask;
  1936. plane_mask |= crtc->state->plane_mask;
  1937. crtc_funcs = crtc->helper_private;
  1938. if (crtc_funcs && crtc_funcs->atomic_begin)
  1939. crtc_funcs->atomic_begin(crtc, old_crtc_state);
  1940. drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
  1941. struct drm_plane_state *old_plane_state =
  1942. drm_atomic_get_old_plane_state(old_state, plane);
  1943. const struct drm_plane_helper_funcs *plane_funcs;
  1944. plane_funcs = plane->helper_private;
  1945. if (!old_plane_state || !plane_funcs)
  1946. continue;
  1947. WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
  1948. if (drm_atomic_plane_disabling(old_plane_state, plane->state) &&
  1949. plane_funcs->atomic_disable)
  1950. plane_funcs->atomic_disable(plane, old_plane_state);
  1951. else if (plane->state->crtc ||
  1952. drm_atomic_plane_disabling(old_plane_state, plane->state))
  1953. plane_funcs->atomic_update(plane, old_plane_state);
  1954. }
  1955. if (crtc_funcs && crtc_funcs->atomic_flush)
  1956. crtc_funcs->atomic_flush(crtc, old_crtc_state);
  1957. }
  1958. EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
  1959. /**
  1960. * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
  1961. * @old_crtc_state: atomic state object with the old CRTC state
  1962. * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
  1963. *
  1964. * Disables all planes associated with the given CRTC. This can be
  1965. * used for instance in the CRTC helper atomic_disable callback to disable
  1966. * all planes.
  1967. *
  1968. * If the atomic-parameter is set the function calls the CRTC's
  1969. * atomic_begin hook before and atomic_flush hook after disabling the
  1970. * planes.
  1971. *
  1972. * It is a bug to call this function without having implemented the
  1973. * &drm_plane_helper_funcs.atomic_disable plane hook.
  1974. */
  1975. void
  1976. drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
  1977. bool atomic)
  1978. {
  1979. struct drm_crtc *crtc = old_crtc_state->crtc;
  1980. const struct drm_crtc_helper_funcs *crtc_funcs =
  1981. crtc->helper_private;
  1982. struct drm_plane *plane;
  1983. if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
  1984. crtc_funcs->atomic_begin(crtc, NULL);
  1985. drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
  1986. const struct drm_plane_helper_funcs *plane_funcs =
  1987. plane->helper_private;
  1988. if (!plane_funcs)
  1989. continue;
  1990. WARN_ON(!plane_funcs->atomic_disable);
  1991. if (plane_funcs->atomic_disable)
  1992. plane_funcs->atomic_disable(plane, NULL);
  1993. }
  1994. if (atomic && crtc_funcs && crtc_funcs->atomic_flush)
  1995. crtc_funcs->atomic_flush(crtc, NULL);
  1996. }
  1997. EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
  1998. /**
  1999. * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
  2000. * @dev: DRM device
  2001. * @old_state: atomic state object with old state structures
  2002. *
  2003. * This function cleans up plane state, specifically framebuffers, from the old
  2004. * configuration. Hence the old configuration must be perserved in @old_state to
  2005. * be able to call this function.
  2006. *
  2007. * This function must also be called on the new state when the atomic update
  2008. * fails at any point after calling drm_atomic_helper_prepare_planes().
  2009. */
  2010. void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
  2011. struct drm_atomic_state *old_state)
  2012. {
  2013. struct drm_plane *plane;
  2014. struct drm_plane_state *old_plane_state, *new_plane_state;
  2015. int i;
  2016. for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
  2017. const struct drm_plane_helper_funcs *funcs;
  2018. struct drm_plane_state *plane_state;
  2019. /*
  2020. * This might be called before swapping when commit is aborted,
  2021. * in which case we have to cleanup the new state.
  2022. */
  2023. if (old_plane_state == plane->state)
  2024. plane_state = new_plane_state;
  2025. else
  2026. plane_state = old_plane_state;
  2027. funcs = plane->helper_private;
  2028. if (funcs->cleanup_fb)
  2029. funcs->cleanup_fb(plane, plane_state);
  2030. }
  2031. }
  2032. EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
  2033. /**
  2034. * drm_atomic_helper_swap_state - store atomic state into current sw state
  2035. * @state: atomic state
  2036. * @stall: stall for preceeding commits
  2037. *
  2038. * This function stores the atomic state into the current state pointers in all
  2039. * driver objects. It should be called after all failing steps have been done
  2040. * and succeeded, but before the actual hardware state is committed.
  2041. *
  2042. * For cleanup and error recovery the current state for all changed objects will
  2043. * be swapped into @state.
  2044. *
  2045. * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
  2046. *
  2047. * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
  2048. *
  2049. * 2. Do any other steps that might fail.
  2050. *
  2051. * 3. Put the staged state into the current state pointers with this function.
  2052. *
  2053. * 4. Actually commit the hardware state.
  2054. *
  2055. * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
  2056. * contains the old state. Also do any other cleanup required with that state.
  2057. *
  2058. * @stall must be set when nonblocking commits for this driver directly access
  2059. * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
  2060. * the current atomic helpers this is almost always the case, since the helpers
  2061. * don't pass the right state structures to the callbacks.
  2062. *
  2063. * Returns:
  2064. *
  2065. * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
  2066. * waiting for the previous commits has been interrupted.
  2067. */
  2068. int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
  2069. bool stall)
  2070. {
  2071. int i, ret;
  2072. struct drm_connector *connector;
  2073. struct drm_connector_state *old_conn_state, *new_conn_state;
  2074. struct drm_crtc *crtc;
  2075. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  2076. struct drm_plane *plane;
  2077. struct drm_plane_state *old_plane_state, *new_plane_state;
  2078. struct drm_crtc_commit *commit;
  2079. struct drm_private_obj *obj;
  2080. struct drm_private_state *old_obj_state, *new_obj_state;
  2081. if (stall) {
  2082. /*
  2083. * We have to stall for hw_done here before
  2084. * drm_atomic_helper_wait_for_dependencies() because flip
  2085. * depth > 1 is not yet supported by all drivers. As long as
  2086. * obj->state is directly dereferenced anywhere in the drivers
  2087. * atomic_commit_tail function, then it's unsafe to swap state
  2088. * before drm_atomic_helper_commit_hw_done() is called.
  2089. */
  2090. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  2091. commit = old_crtc_state->commit;
  2092. if (!commit)
  2093. continue;
  2094. ret = wait_for_completion_interruptible(&commit->hw_done);
  2095. if (ret)
  2096. return ret;
  2097. }
  2098. for_each_old_connector_in_state(state, connector, old_conn_state, i) {
  2099. commit = old_conn_state->commit;
  2100. if (!commit)
  2101. continue;
  2102. ret = wait_for_completion_interruptible(&commit->hw_done);
  2103. if (ret)
  2104. return ret;
  2105. }
  2106. for_each_old_plane_in_state(state, plane, old_plane_state, i) {
  2107. commit = old_plane_state->commit;
  2108. if (!commit)
  2109. continue;
  2110. ret = wait_for_completion_interruptible(&commit->hw_done);
  2111. if (ret)
  2112. return ret;
  2113. }
  2114. }
  2115. for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
  2116. WARN_ON(connector->state != old_conn_state);
  2117. old_conn_state->state = state;
  2118. new_conn_state->state = NULL;
  2119. state->connectors[i].state = old_conn_state;
  2120. connector->state = new_conn_state;
  2121. }
  2122. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  2123. WARN_ON(crtc->state != old_crtc_state);
  2124. old_crtc_state->state = state;
  2125. new_crtc_state->state = NULL;
  2126. state->crtcs[i].state = old_crtc_state;
  2127. crtc->state = new_crtc_state;
  2128. if (new_crtc_state->commit) {
  2129. spin_lock(&crtc->commit_lock);
  2130. list_add(&new_crtc_state->commit->commit_entry,
  2131. &crtc->commit_list);
  2132. spin_unlock(&crtc->commit_lock);
  2133. new_crtc_state->commit->event = NULL;
  2134. }
  2135. }
  2136. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  2137. WARN_ON(plane->state != old_plane_state);
  2138. old_plane_state->state = state;
  2139. new_plane_state->state = NULL;
  2140. state->planes[i].state = old_plane_state;
  2141. plane->state = new_plane_state;
  2142. }
  2143. for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
  2144. WARN_ON(obj->state != old_obj_state);
  2145. old_obj_state->state = state;
  2146. new_obj_state->state = NULL;
  2147. state->private_objs[i].state = old_obj_state;
  2148. obj->state = new_obj_state;
  2149. }
  2150. return 0;
  2151. }
  2152. EXPORT_SYMBOL(drm_atomic_helper_swap_state);
  2153. /**
  2154. * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
  2155. * @plane: plane object to update
  2156. * @crtc: owning CRTC of owning plane
  2157. * @fb: framebuffer to flip onto plane
  2158. * @crtc_x: x offset of primary plane on crtc
  2159. * @crtc_y: y offset of primary plane on crtc
  2160. * @crtc_w: width of primary plane rectangle on crtc
  2161. * @crtc_h: height of primary plane rectangle on crtc
  2162. * @src_x: x offset of @fb for panning
  2163. * @src_y: y offset of @fb for panning
  2164. * @src_w: width of source rectangle in @fb
  2165. * @src_h: height of source rectangle in @fb
  2166. * @ctx: lock acquire context
  2167. *
  2168. * Provides a default plane update handler using the atomic driver interface.
  2169. *
  2170. * RETURNS:
  2171. * Zero on success, error code on failure
  2172. */
  2173. int drm_atomic_helper_update_plane(struct drm_plane *plane,
  2174. struct drm_crtc *crtc,
  2175. struct drm_framebuffer *fb,
  2176. int crtc_x, int crtc_y,
  2177. unsigned int crtc_w, unsigned int crtc_h,
  2178. uint32_t src_x, uint32_t src_y,
  2179. uint32_t src_w, uint32_t src_h,
  2180. struct drm_modeset_acquire_ctx *ctx)
  2181. {
  2182. struct drm_atomic_state *state;
  2183. struct drm_plane_state *plane_state;
  2184. int ret = 0;
  2185. state = drm_atomic_state_alloc(plane->dev);
  2186. if (!state)
  2187. return -ENOMEM;
  2188. state->acquire_ctx = ctx;
  2189. plane_state = drm_atomic_get_plane_state(state, plane);
  2190. if (IS_ERR(plane_state)) {
  2191. ret = PTR_ERR(plane_state);
  2192. goto fail;
  2193. }
  2194. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  2195. if (ret != 0)
  2196. goto fail;
  2197. drm_atomic_set_fb_for_plane(plane_state, fb);
  2198. plane_state->crtc_x = crtc_x;
  2199. plane_state->crtc_y = crtc_y;
  2200. plane_state->crtc_w = crtc_w;
  2201. plane_state->crtc_h = crtc_h;
  2202. plane_state->src_x = src_x;
  2203. plane_state->src_y = src_y;
  2204. plane_state->src_w = src_w;
  2205. plane_state->src_h = src_h;
  2206. if (plane == crtc->cursor)
  2207. state->legacy_cursor_update = true;
  2208. ret = drm_atomic_commit(state);
  2209. fail:
  2210. drm_atomic_state_put(state);
  2211. return ret;
  2212. }
  2213. EXPORT_SYMBOL(drm_atomic_helper_update_plane);
  2214. /**
  2215. * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
  2216. * @plane: plane to disable
  2217. * @ctx: lock acquire context
  2218. *
  2219. * Provides a default plane disable handler using the atomic driver interface.
  2220. *
  2221. * RETURNS:
  2222. * Zero on success, error code on failure
  2223. */
  2224. int drm_atomic_helper_disable_plane(struct drm_plane *plane,
  2225. struct drm_modeset_acquire_ctx *ctx)
  2226. {
  2227. struct drm_atomic_state *state;
  2228. struct drm_plane_state *plane_state;
  2229. int ret = 0;
  2230. state = drm_atomic_state_alloc(plane->dev);
  2231. if (!state)
  2232. return -ENOMEM;
  2233. state->acquire_ctx = ctx;
  2234. plane_state = drm_atomic_get_plane_state(state, plane);
  2235. if (IS_ERR(plane_state)) {
  2236. ret = PTR_ERR(plane_state);
  2237. goto fail;
  2238. }
  2239. if (plane_state->crtc && (plane == plane->crtc->cursor))
  2240. plane_state->state->legacy_cursor_update = true;
  2241. ret = __drm_atomic_helper_disable_plane(plane, plane_state);
  2242. if (ret != 0)
  2243. goto fail;
  2244. ret = drm_atomic_commit(state);
  2245. fail:
  2246. drm_atomic_state_put(state);
  2247. return ret;
  2248. }
  2249. EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
  2250. /* just used from fb-helper and atomic-helper: */
  2251. int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
  2252. struct drm_plane_state *plane_state)
  2253. {
  2254. int ret;
  2255. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2256. if (ret != 0)
  2257. return ret;
  2258. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2259. plane_state->crtc_x = 0;
  2260. plane_state->crtc_y = 0;
  2261. plane_state->crtc_w = 0;
  2262. plane_state->crtc_h = 0;
  2263. plane_state->src_x = 0;
  2264. plane_state->src_y = 0;
  2265. plane_state->src_w = 0;
  2266. plane_state->src_h = 0;
  2267. return 0;
  2268. }
  2269. static int update_output_state(struct drm_atomic_state *state,
  2270. struct drm_mode_set *set)
  2271. {
  2272. struct drm_device *dev = set->crtc->dev;
  2273. struct drm_crtc *crtc;
  2274. struct drm_crtc_state *new_crtc_state;
  2275. struct drm_connector *connector;
  2276. struct drm_connector_state *new_conn_state;
  2277. int ret, i;
  2278. ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
  2279. state->acquire_ctx);
  2280. if (ret)
  2281. return ret;
  2282. /* First disable all connectors on the target crtc. */
  2283. ret = drm_atomic_add_affected_connectors(state, set->crtc);
  2284. if (ret)
  2285. return ret;
  2286. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  2287. if (new_conn_state->crtc == set->crtc) {
  2288. ret = drm_atomic_set_crtc_for_connector(new_conn_state,
  2289. NULL);
  2290. if (ret)
  2291. return ret;
  2292. /* Make sure legacy setCrtc always re-trains */
  2293. new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
  2294. }
  2295. }
  2296. /* Then set all connectors from set->connectors on the target crtc */
  2297. for (i = 0; i < set->num_connectors; i++) {
  2298. new_conn_state = drm_atomic_get_connector_state(state,
  2299. set->connectors[i]);
  2300. if (IS_ERR(new_conn_state))
  2301. return PTR_ERR(new_conn_state);
  2302. ret = drm_atomic_set_crtc_for_connector(new_conn_state,
  2303. set->crtc);
  2304. if (ret)
  2305. return ret;
  2306. }
  2307. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  2308. /* Don't update ->enable for the CRTC in the set_config request,
  2309. * since a mismatch would indicate a bug in the upper layers.
  2310. * The actual modeset code later on will catch any
  2311. * inconsistencies here. */
  2312. if (crtc == set->crtc)
  2313. continue;
  2314. if (!new_crtc_state->connector_mask) {
  2315. ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
  2316. NULL);
  2317. if (ret < 0)
  2318. return ret;
  2319. new_crtc_state->active = false;
  2320. }
  2321. }
  2322. return 0;
  2323. }
  2324. /**
  2325. * drm_atomic_helper_set_config - set a new config from userspace
  2326. * @set: mode set configuration
  2327. * @ctx: lock acquisition context
  2328. *
  2329. * Provides a default crtc set_config handler using the atomic driver interface.
  2330. *
  2331. * NOTE: For backwards compatibility with old userspace this automatically
  2332. * resets the "link-status" property to GOOD, to force any link
  2333. * re-training. The SETCRTC ioctl does not define whether an update does
  2334. * need a full modeset or just a plane update, hence we're allowed to do
  2335. * that. See also drm_mode_connector_set_link_status_property().
  2336. *
  2337. * Returns:
  2338. * Returns 0 on success, negative errno numbers on failure.
  2339. */
  2340. int drm_atomic_helper_set_config(struct drm_mode_set *set,
  2341. struct drm_modeset_acquire_ctx *ctx)
  2342. {
  2343. struct drm_atomic_state *state;
  2344. struct drm_crtc *crtc = set->crtc;
  2345. int ret = 0;
  2346. state = drm_atomic_state_alloc(crtc->dev);
  2347. if (!state)
  2348. return -ENOMEM;
  2349. state->acquire_ctx = ctx;
  2350. ret = __drm_atomic_helper_set_config(set, state);
  2351. if (ret != 0)
  2352. goto fail;
  2353. ret = handle_conflicting_encoders(state, true);
  2354. if (ret)
  2355. return ret;
  2356. ret = drm_atomic_commit(state);
  2357. fail:
  2358. drm_atomic_state_put(state);
  2359. return ret;
  2360. }
  2361. EXPORT_SYMBOL(drm_atomic_helper_set_config);
  2362. /* just used from fb-helper and atomic-helper: */
  2363. int __drm_atomic_helper_set_config(struct drm_mode_set *set,
  2364. struct drm_atomic_state *state)
  2365. {
  2366. struct drm_crtc_state *crtc_state;
  2367. struct drm_plane_state *primary_state;
  2368. struct drm_crtc *crtc = set->crtc;
  2369. int hdisplay, vdisplay;
  2370. int ret;
  2371. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2372. if (IS_ERR(crtc_state))
  2373. return PTR_ERR(crtc_state);
  2374. primary_state = drm_atomic_get_plane_state(state, crtc->primary);
  2375. if (IS_ERR(primary_state))
  2376. return PTR_ERR(primary_state);
  2377. if (!set->mode) {
  2378. WARN_ON(set->fb);
  2379. WARN_ON(set->num_connectors);
  2380. ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
  2381. if (ret != 0)
  2382. return ret;
  2383. crtc_state->active = false;
  2384. ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
  2385. if (ret != 0)
  2386. return ret;
  2387. drm_atomic_set_fb_for_plane(primary_state, NULL);
  2388. goto commit;
  2389. }
  2390. WARN_ON(!set->fb);
  2391. WARN_ON(!set->num_connectors);
  2392. ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
  2393. if (ret != 0)
  2394. return ret;
  2395. crtc_state->active = true;
  2396. ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
  2397. if (ret != 0)
  2398. return ret;
  2399. drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
  2400. drm_atomic_set_fb_for_plane(primary_state, set->fb);
  2401. primary_state->crtc_x = 0;
  2402. primary_state->crtc_y = 0;
  2403. primary_state->crtc_w = hdisplay;
  2404. primary_state->crtc_h = vdisplay;
  2405. primary_state->src_x = set->x << 16;
  2406. primary_state->src_y = set->y << 16;
  2407. if (drm_rotation_90_or_270(primary_state->rotation)) {
  2408. primary_state->src_w = vdisplay << 16;
  2409. primary_state->src_h = hdisplay << 16;
  2410. } else {
  2411. primary_state->src_w = hdisplay << 16;
  2412. primary_state->src_h = vdisplay << 16;
  2413. }
  2414. commit:
  2415. ret = update_output_state(state, set);
  2416. if (ret)
  2417. return ret;
  2418. return 0;
  2419. }
  2420. /**
  2421. * drm_atomic_helper_disable_all - disable all currently active outputs
  2422. * @dev: DRM device
  2423. * @ctx: lock acquisition context
  2424. *
  2425. * Loops through all connectors, finding those that aren't turned off and then
  2426. * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
  2427. * that they are connected to.
  2428. *
  2429. * This is used for example in suspend/resume to disable all currently active
  2430. * functions when suspending. If you just want to shut down everything at e.g.
  2431. * driver unload, look at drm_atomic_helper_shutdown().
  2432. *
  2433. * Note that if callers haven't already acquired all modeset locks this might
  2434. * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
  2435. *
  2436. * Returns:
  2437. * 0 on success or a negative error code on failure.
  2438. *
  2439. * See also:
  2440. * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
  2441. * drm_atomic_helper_shutdown().
  2442. */
  2443. int drm_atomic_helper_disable_all(struct drm_device *dev,
  2444. struct drm_modeset_acquire_ctx *ctx)
  2445. {
  2446. struct drm_atomic_state *state;
  2447. struct drm_connector_state *conn_state;
  2448. struct drm_connector *conn;
  2449. struct drm_plane_state *plane_state;
  2450. struct drm_plane *plane;
  2451. struct drm_crtc_state *crtc_state;
  2452. struct drm_crtc *crtc;
  2453. unsigned plane_mask = 0;
  2454. int ret, i;
  2455. state = drm_atomic_state_alloc(dev);
  2456. if (!state)
  2457. return -ENOMEM;
  2458. state->acquire_ctx = ctx;
  2459. drm_for_each_crtc(crtc, dev) {
  2460. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2461. if (IS_ERR(crtc_state)) {
  2462. ret = PTR_ERR(crtc_state);
  2463. goto free;
  2464. }
  2465. crtc_state->active = false;
  2466. ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
  2467. if (ret < 0)
  2468. goto free;
  2469. ret = drm_atomic_add_affected_planes(state, crtc);
  2470. if (ret < 0)
  2471. goto free;
  2472. ret = drm_atomic_add_affected_connectors(state, crtc);
  2473. if (ret < 0)
  2474. goto free;
  2475. }
  2476. for_each_new_connector_in_state(state, conn, conn_state, i) {
  2477. ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
  2478. if (ret < 0)
  2479. goto free;
  2480. }
  2481. for_each_new_plane_in_state(state, plane, plane_state, i) {
  2482. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2483. if (ret < 0)
  2484. goto free;
  2485. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2486. plane_mask |= BIT(drm_plane_index(plane));
  2487. plane->old_fb = plane->fb;
  2488. }
  2489. ret = drm_atomic_commit(state);
  2490. free:
  2491. if (plane_mask)
  2492. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  2493. drm_atomic_state_put(state);
  2494. return ret;
  2495. }
  2496. EXPORT_SYMBOL(drm_atomic_helper_disable_all);
  2497. /**
  2498. * drm_atomic_helper_shutdown - shutdown all CRTC
  2499. * @dev: DRM device
  2500. *
  2501. * This shuts down all CRTC, which is useful for driver unloading. Shutdown on
  2502. * suspend should instead be handled with drm_atomic_helper_suspend(), since
  2503. * that also takes a snapshot of the modeset state to be restored on resume.
  2504. *
  2505. * This is just a convenience wrapper around drm_atomic_helper_disable_all(),
  2506. * and it is the atomic version of drm_crtc_force_disable_all().
  2507. */
  2508. void drm_atomic_helper_shutdown(struct drm_device *dev)
  2509. {
  2510. struct drm_modeset_acquire_ctx ctx;
  2511. int ret;
  2512. drm_modeset_acquire_init(&ctx, 0);
  2513. while (1) {
  2514. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2515. if (!ret)
  2516. ret = drm_atomic_helper_disable_all(dev, &ctx);
  2517. if (ret != -EDEADLK)
  2518. break;
  2519. drm_modeset_backoff(&ctx);
  2520. }
  2521. if (ret)
  2522. DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
  2523. drm_modeset_drop_locks(&ctx);
  2524. drm_modeset_acquire_fini(&ctx);
  2525. }
  2526. EXPORT_SYMBOL(drm_atomic_helper_shutdown);
  2527. /**
  2528. * drm_atomic_helper_suspend - subsystem-level suspend helper
  2529. * @dev: DRM device
  2530. *
  2531. * Duplicates the current atomic state, disables all active outputs and then
  2532. * returns a pointer to the original atomic state to the caller. Drivers can
  2533. * pass this pointer to the drm_atomic_helper_resume() helper upon resume to
  2534. * restore the output configuration that was active at the time the system
  2535. * entered suspend.
  2536. *
  2537. * Note that it is potentially unsafe to use this. The atomic state object
  2538. * returned by this function is assumed to be persistent. Drivers must ensure
  2539. * that this holds true. Before calling this function, drivers must make sure
  2540. * to suspend fbdev emulation so that nothing can be using the device.
  2541. *
  2542. * Returns:
  2543. * A pointer to a copy of the state before suspend on success or an ERR_PTR()-
  2544. * encoded error code on failure. Drivers should store the returned atomic
  2545. * state object and pass it to the drm_atomic_helper_resume() helper upon
  2546. * resume.
  2547. *
  2548. * See also:
  2549. * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(),
  2550. * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state()
  2551. */
  2552. struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
  2553. {
  2554. struct drm_modeset_acquire_ctx ctx;
  2555. struct drm_atomic_state *state;
  2556. int err;
  2557. drm_modeset_acquire_init(&ctx, 0);
  2558. retry:
  2559. err = drm_modeset_lock_all_ctx(dev, &ctx);
  2560. if (err < 0) {
  2561. state = ERR_PTR(err);
  2562. goto unlock;
  2563. }
  2564. state = drm_atomic_helper_duplicate_state(dev, &ctx);
  2565. if (IS_ERR(state))
  2566. goto unlock;
  2567. err = drm_atomic_helper_disable_all(dev, &ctx);
  2568. if (err < 0) {
  2569. drm_atomic_state_put(state);
  2570. state = ERR_PTR(err);
  2571. goto unlock;
  2572. }
  2573. unlock:
  2574. if (PTR_ERR(state) == -EDEADLK) {
  2575. drm_modeset_backoff(&ctx);
  2576. goto retry;
  2577. }
  2578. drm_modeset_drop_locks(&ctx);
  2579. drm_modeset_acquire_fini(&ctx);
  2580. return state;
  2581. }
  2582. EXPORT_SYMBOL(drm_atomic_helper_suspend);
  2583. /**
  2584. * drm_atomic_helper_commit_duplicated_state - commit duplicated state
  2585. * @state: duplicated atomic state to commit
  2586. * @ctx: pointer to acquire_ctx to use for commit.
  2587. *
  2588. * The state returned by drm_atomic_helper_duplicate_state() and
  2589. * drm_atomic_helper_suspend() is partially invalid, and needs to
  2590. * be fixed up before commit.
  2591. *
  2592. * Returns:
  2593. * 0 on success or a negative error code on failure.
  2594. *
  2595. * See also:
  2596. * drm_atomic_helper_suspend()
  2597. */
  2598. int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
  2599. struct drm_modeset_acquire_ctx *ctx)
  2600. {
  2601. int i;
  2602. struct drm_plane *plane;
  2603. struct drm_plane_state *new_plane_state;
  2604. struct drm_connector *connector;
  2605. struct drm_connector_state *new_conn_state;
  2606. struct drm_crtc *crtc;
  2607. struct drm_crtc_state *new_crtc_state;
  2608. unsigned plane_mask = 0;
  2609. struct drm_device *dev = state->dev;
  2610. int ret;
  2611. state->acquire_ctx = ctx;
  2612. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  2613. plane_mask |= BIT(drm_plane_index(plane));
  2614. state->planes[i].old_state = plane->state;
  2615. }
  2616. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
  2617. state->crtcs[i].old_state = crtc->state;
  2618. for_each_new_connector_in_state(state, connector, new_conn_state, i)
  2619. state->connectors[i].old_state = connector->state;
  2620. ret = drm_atomic_commit(state);
  2621. if (plane_mask)
  2622. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  2623. return ret;
  2624. }
  2625. EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
  2626. /**
  2627. * drm_atomic_helper_resume - subsystem-level resume helper
  2628. * @dev: DRM device
  2629. * @state: atomic state to resume to
  2630. *
  2631. * Calls drm_mode_config_reset() to synchronize hardware and software states,
  2632. * grabs all modeset locks and commits the atomic state object. This can be
  2633. * used in conjunction with the drm_atomic_helper_suspend() helper to
  2634. * implement suspend/resume for drivers that support atomic mode-setting.
  2635. *
  2636. * Returns:
  2637. * 0 on success or a negative error code on failure.
  2638. *
  2639. * See also:
  2640. * drm_atomic_helper_suspend()
  2641. */
  2642. int drm_atomic_helper_resume(struct drm_device *dev,
  2643. struct drm_atomic_state *state)
  2644. {
  2645. struct drm_modeset_acquire_ctx ctx;
  2646. int err;
  2647. drm_mode_config_reset(dev);
  2648. drm_modeset_acquire_init(&ctx, 0);
  2649. while (1) {
  2650. err = drm_modeset_lock_all_ctx(dev, &ctx);
  2651. if (err)
  2652. goto out;
  2653. err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
  2654. out:
  2655. if (err != -EDEADLK)
  2656. break;
  2657. drm_modeset_backoff(&ctx);
  2658. }
  2659. drm_atomic_state_put(state);
  2660. drm_modeset_drop_locks(&ctx);
  2661. drm_modeset_acquire_fini(&ctx);
  2662. return err;
  2663. }
  2664. EXPORT_SYMBOL(drm_atomic_helper_resume);
  2665. static int page_flip_common(struct drm_atomic_state *state,
  2666. struct drm_crtc *crtc,
  2667. struct drm_framebuffer *fb,
  2668. struct drm_pending_vblank_event *event,
  2669. uint32_t flags)
  2670. {
  2671. struct drm_plane *plane = crtc->primary;
  2672. struct drm_plane_state *plane_state;
  2673. struct drm_crtc_state *crtc_state;
  2674. int ret = 0;
  2675. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2676. if (IS_ERR(crtc_state))
  2677. return PTR_ERR(crtc_state);
  2678. crtc_state->event = event;
  2679. crtc_state->pageflip_flags = flags;
  2680. plane_state = drm_atomic_get_plane_state(state, plane);
  2681. if (IS_ERR(plane_state))
  2682. return PTR_ERR(plane_state);
  2683. ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
  2684. if (ret != 0)
  2685. return ret;
  2686. drm_atomic_set_fb_for_plane(plane_state, fb);
  2687. /* Make sure we don't accidentally do a full modeset. */
  2688. state->allow_modeset = false;
  2689. if (!crtc_state->active) {
  2690. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n",
  2691. crtc->base.id, crtc->name);
  2692. return -EINVAL;
  2693. }
  2694. return ret;
  2695. }
  2696. /**
  2697. * drm_atomic_helper_page_flip - execute a legacy page flip
  2698. * @crtc: DRM crtc
  2699. * @fb: DRM framebuffer
  2700. * @event: optional DRM event to signal upon completion
  2701. * @flags: flip flags for non-vblank sync'ed updates
  2702. * @ctx: lock acquisition context
  2703. *
  2704. * Provides a default &drm_crtc_funcs.page_flip implementation
  2705. * using the atomic driver interface.
  2706. *
  2707. * Returns:
  2708. * Returns 0 on success, negative errno numbers on failure.
  2709. *
  2710. * See also:
  2711. * drm_atomic_helper_page_flip_target()
  2712. */
  2713. int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
  2714. struct drm_framebuffer *fb,
  2715. struct drm_pending_vblank_event *event,
  2716. uint32_t flags,
  2717. struct drm_modeset_acquire_ctx *ctx)
  2718. {
  2719. struct drm_plane *plane = crtc->primary;
  2720. struct drm_atomic_state *state;
  2721. int ret = 0;
  2722. state = drm_atomic_state_alloc(plane->dev);
  2723. if (!state)
  2724. return -ENOMEM;
  2725. state->acquire_ctx = ctx;
  2726. ret = page_flip_common(state, crtc, fb, event, flags);
  2727. if (ret != 0)
  2728. goto fail;
  2729. ret = drm_atomic_nonblocking_commit(state);
  2730. fail:
  2731. drm_atomic_state_put(state);
  2732. return ret;
  2733. }
  2734. EXPORT_SYMBOL(drm_atomic_helper_page_flip);
  2735. /**
  2736. * drm_atomic_helper_page_flip_target - do page flip on target vblank period.
  2737. * @crtc: DRM crtc
  2738. * @fb: DRM framebuffer
  2739. * @event: optional DRM event to signal upon completion
  2740. * @flags: flip flags for non-vblank sync'ed updates
  2741. * @target: specifying the target vblank period when the flip to take effect
  2742. * @ctx: lock acquisition context
  2743. *
  2744. * Provides a default &drm_crtc_funcs.page_flip_target implementation.
  2745. * Similar to drm_atomic_helper_page_flip() with extra parameter to specify
  2746. * target vblank period to flip.
  2747. *
  2748. * Returns:
  2749. * Returns 0 on success, negative errno numbers on failure.
  2750. */
  2751. int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
  2752. struct drm_framebuffer *fb,
  2753. struct drm_pending_vblank_event *event,
  2754. uint32_t flags,
  2755. uint32_t target,
  2756. struct drm_modeset_acquire_ctx *ctx)
  2757. {
  2758. struct drm_plane *plane = crtc->primary;
  2759. struct drm_atomic_state *state;
  2760. struct drm_crtc_state *crtc_state;
  2761. int ret = 0;
  2762. state = drm_atomic_state_alloc(plane->dev);
  2763. if (!state)
  2764. return -ENOMEM;
  2765. state->acquire_ctx = ctx;
  2766. ret = page_flip_common(state, crtc, fb, event, flags);
  2767. if (ret != 0)
  2768. goto fail;
  2769. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2770. if (WARN_ON(!crtc_state)) {
  2771. ret = -EINVAL;
  2772. goto fail;
  2773. }
  2774. crtc_state->target_vblank = target;
  2775. ret = drm_atomic_nonblocking_commit(state);
  2776. fail:
  2777. drm_atomic_state_put(state);
  2778. return ret;
  2779. }
  2780. EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
  2781. /**
  2782. * drm_atomic_helper_best_encoder - Helper for
  2783. * &drm_connector_helper_funcs.best_encoder callback
  2784. * @connector: Connector control structure
  2785. *
  2786. * This is a &drm_connector_helper_funcs.best_encoder callback helper for
  2787. * connectors that support exactly 1 encoder, statically determined at driver
  2788. * init time.
  2789. */
  2790. struct drm_encoder *
  2791. drm_atomic_helper_best_encoder(struct drm_connector *connector)
  2792. {
  2793. WARN_ON(connector->encoder_ids[1]);
  2794. return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
  2795. }
  2796. EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
  2797. /**
  2798. * DOC: atomic state reset and initialization
  2799. *
  2800. * Both the drm core and the atomic helpers assume that there is always the full
  2801. * and correct atomic software state for all connectors, CRTCs and planes
  2802. * available. Which is a bit a problem on driver load and also after system
  2803. * suspend. One way to solve this is to have a hardware state read-out
  2804. * infrastructure which reconstructs the full software state (e.g. the i915
  2805. * driver).
  2806. *
  2807. * The simpler solution is to just reset the software state to everything off,
  2808. * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
  2809. * the atomic helpers provide default reset implementations for all hooks.
  2810. *
  2811. * On the upside the precise state tracking of atomic simplifies system suspend
  2812. * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
  2813. * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
  2814. * For other drivers the building blocks are split out, see the documentation
  2815. * for these functions.
  2816. */
  2817. /**
  2818. * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
  2819. * @crtc: drm CRTC
  2820. *
  2821. * Resets the atomic state for @crtc by freeing the state pointer (which might
  2822. * be NULL, e.g. at driver load time) and allocating a new empty state object.
  2823. */
  2824. void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
  2825. {
  2826. if (crtc->state)
  2827. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  2828. kfree(crtc->state);
  2829. crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
  2830. if (crtc->state)
  2831. crtc->state->crtc = crtc;
  2832. }
  2833. EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
  2834. /**
  2835. * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
  2836. * @crtc: CRTC object
  2837. * @state: atomic CRTC state
  2838. *
  2839. * Copies atomic state from a CRTC's current state and resets inferred values.
  2840. * This is useful for drivers that subclass the CRTC state.
  2841. */
  2842. void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
  2843. struct drm_crtc_state *state)
  2844. {
  2845. memcpy(state, crtc->state, sizeof(*state));
  2846. if (state->mode_blob)
  2847. drm_property_blob_get(state->mode_blob);
  2848. if (state->degamma_lut)
  2849. drm_property_blob_get(state->degamma_lut);
  2850. if (state->ctm)
  2851. drm_property_blob_get(state->ctm);
  2852. if (state->gamma_lut)
  2853. drm_property_blob_get(state->gamma_lut);
  2854. state->mode_changed = false;
  2855. state->active_changed = false;
  2856. state->planes_changed = false;
  2857. state->connectors_changed = false;
  2858. state->color_mgmt_changed = false;
  2859. state->zpos_changed = false;
  2860. state->commit = NULL;
  2861. state->event = NULL;
  2862. state->pageflip_flags = 0;
  2863. }
  2864. EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
  2865. /**
  2866. * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
  2867. * @crtc: drm CRTC
  2868. *
  2869. * Default CRTC state duplicate hook for drivers which don't have their own
  2870. * subclassed CRTC state structure.
  2871. */
  2872. struct drm_crtc_state *
  2873. drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
  2874. {
  2875. struct drm_crtc_state *state;
  2876. if (WARN_ON(!crtc->state))
  2877. return NULL;
  2878. state = kmalloc(sizeof(*state), GFP_KERNEL);
  2879. if (state)
  2880. __drm_atomic_helper_crtc_duplicate_state(crtc, state);
  2881. return state;
  2882. }
  2883. EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
  2884. /**
  2885. * __drm_atomic_helper_crtc_destroy_state - release CRTC state
  2886. * @state: CRTC state object to release
  2887. *
  2888. * Releases all resources stored in the CRTC state without actually freeing
  2889. * the memory of the CRTC state. This is useful for drivers that subclass the
  2890. * CRTC state.
  2891. */
  2892. void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
  2893. {
  2894. if (state->commit) {
  2895. /*
  2896. * In the event that a non-blocking commit returns
  2897. * -ERESTARTSYS before the commit_tail work is queued, we will
  2898. * have an extra reference to the commit object. Release it, if
  2899. * the event has not been consumed by the worker.
  2900. *
  2901. * state->event may be freed, so we can't directly look at
  2902. * state->event->base.completion.
  2903. */
  2904. if (state->event && state->commit->abort_completion)
  2905. drm_crtc_commit_put(state->commit);
  2906. kfree(state->commit->event);
  2907. state->commit->event = NULL;
  2908. drm_crtc_commit_put(state->commit);
  2909. }
  2910. drm_property_blob_put(state->mode_blob);
  2911. drm_property_blob_put(state->degamma_lut);
  2912. drm_property_blob_put(state->ctm);
  2913. drm_property_blob_put(state->gamma_lut);
  2914. }
  2915. EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
  2916. /**
  2917. * drm_atomic_helper_crtc_destroy_state - default state destroy hook
  2918. * @crtc: drm CRTC
  2919. * @state: CRTC state object to release
  2920. *
  2921. * Default CRTC state destroy hook for drivers which don't have their own
  2922. * subclassed CRTC state structure.
  2923. */
  2924. void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
  2925. struct drm_crtc_state *state)
  2926. {
  2927. __drm_atomic_helper_crtc_destroy_state(state);
  2928. kfree(state);
  2929. }
  2930. EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
  2931. /**
  2932. * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
  2933. * @plane: drm plane
  2934. *
  2935. * Resets the atomic state for @plane by freeing the state pointer (which might
  2936. * be NULL, e.g. at driver load time) and allocating a new empty state object.
  2937. */
  2938. void drm_atomic_helper_plane_reset(struct drm_plane *plane)
  2939. {
  2940. if (plane->state)
  2941. __drm_atomic_helper_plane_destroy_state(plane->state);
  2942. kfree(plane->state);
  2943. plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
  2944. if (plane->state) {
  2945. plane->state->plane = plane;
  2946. plane->state->rotation = DRM_MODE_ROTATE_0;
  2947. }
  2948. }
  2949. EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
  2950. /**
  2951. * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
  2952. * @plane: plane object
  2953. * @state: atomic plane state
  2954. *
  2955. * Copies atomic state from a plane's current state. This is useful for
  2956. * drivers that subclass the plane state.
  2957. */
  2958. void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
  2959. struct drm_plane_state *state)
  2960. {
  2961. memcpy(state, plane->state, sizeof(*state));
  2962. if (state->fb)
  2963. drm_framebuffer_get(state->fb);
  2964. state->fence = NULL;
  2965. state->commit = NULL;
  2966. }
  2967. EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
  2968. /**
  2969. * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
  2970. * @plane: drm plane
  2971. *
  2972. * Default plane state duplicate hook for drivers which don't have their own
  2973. * subclassed plane state structure.
  2974. */
  2975. struct drm_plane_state *
  2976. drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
  2977. {
  2978. struct drm_plane_state *state;
  2979. if (WARN_ON(!plane->state))
  2980. return NULL;
  2981. state = kmalloc(sizeof(*state), GFP_KERNEL);
  2982. if (state)
  2983. __drm_atomic_helper_plane_duplicate_state(plane, state);
  2984. return state;
  2985. }
  2986. EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
  2987. /**
  2988. * __drm_atomic_helper_plane_destroy_state - release plane state
  2989. * @state: plane state object to release
  2990. *
  2991. * Releases all resources stored in the plane state without actually freeing
  2992. * the memory of the plane state. This is useful for drivers that subclass the
  2993. * plane state.
  2994. */
  2995. void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
  2996. {
  2997. if (state->fb)
  2998. drm_framebuffer_put(state->fb);
  2999. if (state->fence)
  3000. dma_fence_put(state->fence);
  3001. if (state->commit)
  3002. drm_crtc_commit_put(state->commit);
  3003. }
  3004. EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
  3005. /**
  3006. * drm_atomic_helper_plane_destroy_state - default state destroy hook
  3007. * @plane: drm plane
  3008. * @state: plane state object to release
  3009. *
  3010. * Default plane state destroy hook for drivers which don't have their own
  3011. * subclassed plane state structure.
  3012. */
  3013. void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
  3014. struct drm_plane_state *state)
  3015. {
  3016. __drm_atomic_helper_plane_destroy_state(state);
  3017. kfree(state);
  3018. }
  3019. EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
  3020. /**
  3021. * __drm_atomic_helper_connector_reset - reset state on connector
  3022. * @connector: drm connector
  3023. * @conn_state: connector state to assign
  3024. *
  3025. * Initializes the newly allocated @conn_state and assigns it to
  3026. * the &drm_conector->state pointer of @connector, usually required when
  3027. * initializing the drivers or when called from the &drm_connector_funcs.reset
  3028. * hook.
  3029. *
  3030. * This is useful for drivers that subclass the connector state.
  3031. */
  3032. void
  3033. __drm_atomic_helper_connector_reset(struct drm_connector *connector,
  3034. struct drm_connector_state *conn_state)
  3035. {
  3036. if (conn_state)
  3037. conn_state->connector = connector;
  3038. connector->state = conn_state;
  3039. }
  3040. EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
  3041. /**
  3042. * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
  3043. * @connector: drm connector
  3044. *
  3045. * Resets the atomic state for @connector by freeing the state pointer (which
  3046. * might be NULL, e.g. at driver load time) and allocating a new empty state
  3047. * object.
  3048. */
  3049. void drm_atomic_helper_connector_reset(struct drm_connector *connector)
  3050. {
  3051. struct drm_connector_state *conn_state =
  3052. kzalloc(sizeof(*conn_state), GFP_KERNEL);
  3053. if (connector->state)
  3054. __drm_atomic_helper_connector_destroy_state(connector->state);
  3055. kfree(connector->state);
  3056. __drm_atomic_helper_connector_reset(connector, conn_state);
  3057. }
  3058. EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
  3059. /**
  3060. * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
  3061. * @connector: connector object
  3062. * @state: atomic connector state
  3063. *
  3064. * Copies atomic state from a connector's current state. This is useful for
  3065. * drivers that subclass the connector state.
  3066. */
  3067. void
  3068. __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
  3069. struct drm_connector_state *state)
  3070. {
  3071. memcpy(state, connector->state, sizeof(*state));
  3072. if (state->crtc)
  3073. drm_connector_get(connector);
  3074. state->commit = NULL;
  3075. }
  3076. EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
  3077. /**
  3078. * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
  3079. * @connector: drm connector
  3080. *
  3081. * Default connector state duplicate hook for drivers which don't have their own
  3082. * subclassed connector state structure.
  3083. */
  3084. struct drm_connector_state *
  3085. drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
  3086. {
  3087. struct drm_connector_state *state;
  3088. if (WARN_ON(!connector->state))
  3089. return NULL;
  3090. state = kmalloc(sizeof(*state), GFP_KERNEL);
  3091. if (state)
  3092. __drm_atomic_helper_connector_duplicate_state(connector, state);
  3093. return state;
  3094. }
  3095. EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
  3096. /**
  3097. * drm_atomic_helper_duplicate_state - duplicate an atomic state object
  3098. * @dev: DRM device
  3099. * @ctx: lock acquisition context
  3100. *
  3101. * Makes a copy of the current atomic state by looping over all objects and
  3102. * duplicating their respective states. This is used for example by suspend/
  3103. * resume support code to save the state prior to suspend such that it can
  3104. * be restored upon resume.
  3105. *
  3106. * Note that this treats atomic state as persistent between save and restore.
  3107. * Drivers must make sure that this is possible and won't result in confusion
  3108. * or erroneous behaviour.
  3109. *
  3110. * Note that if callers haven't already acquired all modeset locks this might
  3111. * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
  3112. *
  3113. * Returns:
  3114. * A pointer to the copy of the atomic state object on success or an
  3115. * ERR_PTR()-encoded error code on failure.
  3116. *
  3117. * See also:
  3118. * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
  3119. */
  3120. struct drm_atomic_state *
  3121. drm_atomic_helper_duplicate_state(struct drm_device *dev,
  3122. struct drm_modeset_acquire_ctx *ctx)
  3123. {
  3124. struct drm_atomic_state *state;
  3125. struct drm_connector *conn;
  3126. struct drm_connector_list_iter conn_iter;
  3127. struct drm_plane *plane;
  3128. struct drm_crtc *crtc;
  3129. int err = 0;
  3130. state = drm_atomic_state_alloc(dev);
  3131. if (!state)
  3132. return ERR_PTR(-ENOMEM);
  3133. state->acquire_ctx = ctx;
  3134. drm_for_each_crtc(crtc, dev) {
  3135. struct drm_crtc_state *crtc_state;
  3136. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  3137. if (IS_ERR(crtc_state)) {
  3138. err = PTR_ERR(crtc_state);
  3139. goto free;
  3140. }
  3141. }
  3142. drm_for_each_plane(plane, dev) {
  3143. struct drm_plane_state *plane_state;
  3144. plane_state = drm_atomic_get_plane_state(state, plane);
  3145. if (IS_ERR(plane_state)) {
  3146. err = PTR_ERR(plane_state);
  3147. goto free;
  3148. }
  3149. }
  3150. drm_connector_list_iter_begin(dev, &conn_iter);
  3151. drm_for_each_connector_iter(conn, &conn_iter) {
  3152. struct drm_connector_state *conn_state;
  3153. conn_state = drm_atomic_get_connector_state(state, conn);
  3154. if (IS_ERR(conn_state)) {
  3155. err = PTR_ERR(conn_state);
  3156. drm_connector_list_iter_end(&conn_iter);
  3157. goto free;
  3158. }
  3159. }
  3160. drm_connector_list_iter_end(&conn_iter);
  3161. /* clear the acquire context so that it isn't accidentally reused */
  3162. state->acquire_ctx = NULL;
  3163. free:
  3164. if (err < 0) {
  3165. drm_atomic_state_put(state);
  3166. state = ERR_PTR(err);
  3167. }
  3168. return state;
  3169. }
  3170. EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
  3171. /**
  3172. * __drm_atomic_helper_connector_destroy_state - release connector state
  3173. * @state: connector state object to release
  3174. *
  3175. * Releases all resources stored in the connector state without actually
  3176. * freeing the memory of the connector state. This is useful for drivers that
  3177. * subclass the connector state.
  3178. */
  3179. void
  3180. __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
  3181. {
  3182. if (state->crtc)
  3183. drm_connector_put(state->connector);
  3184. if (state->commit)
  3185. drm_crtc_commit_put(state->commit);
  3186. }
  3187. EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
  3188. /**
  3189. * drm_atomic_helper_connector_destroy_state - default state destroy hook
  3190. * @connector: drm connector
  3191. * @state: connector state object to release
  3192. *
  3193. * Default connector state destroy hook for drivers which don't have their own
  3194. * subclassed connector state structure.
  3195. */
  3196. void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
  3197. struct drm_connector_state *state)
  3198. {
  3199. __drm_atomic_helper_connector_destroy_state(state);
  3200. kfree(state);
  3201. }
  3202. EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
  3203. /**
  3204. * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
  3205. * @crtc: CRTC object
  3206. * @red: red correction table
  3207. * @green: green correction table
  3208. * @blue: green correction table
  3209. * @size: size of the tables
  3210. * @ctx: lock acquire context
  3211. *
  3212. * Implements support for legacy gamma correction table for drivers
  3213. * that support color management through the DEGAMMA_LUT/GAMMA_LUT
  3214. * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for
  3215. * how the atomic color management and gamma tables work.
  3216. */
  3217. int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
  3218. u16 *red, u16 *green, u16 *blue,
  3219. uint32_t size,
  3220. struct drm_modeset_acquire_ctx *ctx)
  3221. {
  3222. struct drm_device *dev = crtc->dev;
  3223. struct drm_atomic_state *state;
  3224. struct drm_crtc_state *crtc_state;
  3225. struct drm_property_blob *blob = NULL;
  3226. struct drm_color_lut *blob_data;
  3227. int i, ret = 0;
  3228. bool replaced;
  3229. state = drm_atomic_state_alloc(crtc->dev);
  3230. if (!state)
  3231. return -ENOMEM;
  3232. blob = drm_property_create_blob(dev,
  3233. sizeof(struct drm_color_lut) * size,
  3234. NULL);
  3235. if (IS_ERR(blob)) {
  3236. ret = PTR_ERR(blob);
  3237. blob = NULL;
  3238. goto fail;
  3239. }
  3240. /* Prepare GAMMA_LUT with the legacy values. */
  3241. blob_data = (struct drm_color_lut *) blob->data;
  3242. for (i = 0; i < size; i++) {
  3243. blob_data[i].red = red[i];
  3244. blob_data[i].green = green[i];
  3245. blob_data[i].blue = blue[i];
  3246. }
  3247. state->acquire_ctx = ctx;
  3248. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  3249. if (IS_ERR(crtc_state)) {
  3250. ret = PTR_ERR(crtc_state);
  3251. goto fail;
  3252. }
  3253. /* Reset DEGAMMA_LUT and CTM properties. */
  3254. replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
  3255. replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
  3256. replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
  3257. crtc_state->color_mgmt_changed |= replaced;
  3258. ret = drm_atomic_commit(state);
  3259. fail:
  3260. drm_atomic_state_put(state);
  3261. drm_property_blob_put(blob);
  3262. return ret;
  3263. }
  3264. EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
  3265. /**
  3266. * __drm_atomic_helper_private_duplicate_state - copy atomic private state
  3267. * @obj: CRTC object
  3268. * @state: new private object state
  3269. *
  3270. * Copies atomic state from a private objects's current state and resets inferred values.
  3271. * This is useful for drivers that subclass the private state.
  3272. */
  3273. void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
  3274. struct drm_private_state *state)
  3275. {
  3276. memcpy(state, obj->state, sizeof(*state));
  3277. }
  3278. EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);