drm_atomic.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_print.h>
  31. #include <linux/sync_file.h>
  32. #include "drm_crtc_internal.h"
  33. #include "drm_internal.h"
  34. void __drm_crtc_commit_free(struct kref *kref)
  35. {
  36. struct drm_crtc_commit *commit =
  37. container_of(kref, struct drm_crtc_commit, ref);
  38. kfree(commit);
  39. }
  40. EXPORT_SYMBOL(__drm_crtc_commit_free);
  41. /**
  42. * drm_atomic_state_default_release -
  43. * release memory initialized by drm_atomic_state_init
  44. * @state: atomic state
  45. *
  46. * Free all the memory allocated by drm_atomic_state_init.
  47. * This is useful for drivers that subclass the atomic state.
  48. */
  49. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  50. {
  51. kfree(state->connectors);
  52. kfree(state->crtcs);
  53. kfree(state->planes);
  54. kfree(state->private_objs);
  55. }
  56. EXPORT_SYMBOL(drm_atomic_state_default_release);
  57. /**
  58. * drm_atomic_state_init - init new atomic state
  59. * @dev: DRM device
  60. * @state: atomic state
  61. *
  62. * Default implementation for filling in a new atomic state.
  63. * This is useful for drivers that subclass the atomic state.
  64. */
  65. int
  66. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  67. {
  68. kref_init(&state->ref);
  69. /* TODO legacy paths should maybe do a better job about
  70. * setting this appropriately?
  71. */
  72. state->allow_modeset = true;
  73. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  74. sizeof(*state->crtcs), GFP_KERNEL);
  75. if (!state->crtcs)
  76. goto fail;
  77. state->planes = kcalloc(dev->mode_config.num_total_plane,
  78. sizeof(*state->planes), GFP_KERNEL);
  79. if (!state->planes)
  80. goto fail;
  81. state->dev = dev;
  82. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  83. return 0;
  84. fail:
  85. drm_atomic_state_default_release(state);
  86. return -ENOMEM;
  87. }
  88. EXPORT_SYMBOL(drm_atomic_state_init);
  89. /**
  90. * drm_atomic_state_alloc - allocate atomic state
  91. * @dev: DRM device
  92. *
  93. * This allocates an empty atomic state to track updates.
  94. */
  95. struct drm_atomic_state *
  96. drm_atomic_state_alloc(struct drm_device *dev)
  97. {
  98. struct drm_mode_config *config = &dev->mode_config;
  99. if (!config->funcs->atomic_state_alloc) {
  100. struct drm_atomic_state *state;
  101. state = kzalloc(sizeof(*state), GFP_KERNEL);
  102. if (!state)
  103. return NULL;
  104. if (drm_atomic_state_init(dev, state) < 0) {
  105. kfree(state);
  106. return NULL;
  107. }
  108. return state;
  109. }
  110. return config->funcs->atomic_state_alloc(dev);
  111. }
  112. EXPORT_SYMBOL(drm_atomic_state_alloc);
  113. /**
  114. * drm_atomic_state_default_clear - clear base atomic state
  115. * @state: atomic state
  116. *
  117. * Default implementation for clearing atomic state.
  118. * This is useful for drivers that subclass the atomic state.
  119. */
  120. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  121. {
  122. struct drm_device *dev = state->dev;
  123. struct drm_mode_config *config = &dev->mode_config;
  124. int i;
  125. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  126. for (i = 0; i < state->num_connector; i++) {
  127. struct drm_connector *connector = state->connectors[i].ptr;
  128. if (!connector)
  129. continue;
  130. connector->funcs->atomic_destroy_state(connector,
  131. state->connectors[i].state);
  132. state->connectors[i].ptr = NULL;
  133. state->connectors[i].state = NULL;
  134. drm_connector_put(connector);
  135. }
  136. for (i = 0; i < config->num_crtc; i++) {
  137. struct drm_crtc *crtc = state->crtcs[i].ptr;
  138. if (!crtc)
  139. continue;
  140. crtc->funcs->atomic_destroy_state(crtc,
  141. state->crtcs[i].state);
  142. state->crtcs[i].ptr = NULL;
  143. state->crtcs[i].state = NULL;
  144. }
  145. for (i = 0; i < config->num_total_plane; i++) {
  146. struct drm_plane *plane = state->planes[i].ptr;
  147. if (!plane)
  148. continue;
  149. plane->funcs->atomic_destroy_state(plane,
  150. state->planes[i].state);
  151. state->planes[i].ptr = NULL;
  152. state->planes[i].state = NULL;
  153. }
  154. for (i = 0; i < state->num_private_objs; i++) {
  155. struct drm_private_obj *obj = state->private_objs[i].ptr;
  156. obj->funcs->atomic_destroy_state(obj,
  157. state->private_objs[i].state);
  158. state->private_objs[i].ptr = NULL;
  159. state->private_objs[i].state = NULL;
  160. }
  161. state->num_private_objs = 0;
  162. if (state->fake_commit) {
  163. drm_crtc_commit_put(state->fake_commit);
  164. state->fake_commit = NULL;
  165. }
  166. }
  167. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  168. /**
  169. * drm_atomic_state_clear - clear state object
  170. * @state: atomic state
  171. *
  172. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  173. * all locks. So someone else could sneak in and change the current modeset
  174. * configuration. Which means that all the state assembled in @state is no
  175. * longer an atomic update to the current state, but to some arbitrary earlier
  176. * state. Which could break assumptions the driver's
  177. * &drm_mode_config_funcs.atomic_check likely relies on.
  178. *
  179. * Hence we must clear all cached state and completely start over, using this
  180. * function.
  181. */
  182. void drm_atomic_state_clear(struct drm_atomic_state *state)
  183. {
  184. struct drm_device *dev = state->dev;
  185. struct drm_mode_config *config = &dev->mode_config;
  186. if (config->funcs->atomic_state_clear)
  187. config->funcs->atomic_state_clear(state);
  188. else
  189. drm_atomic_state_default_clear(state);
  190. }
  191. EXPORT_SYMBOL(drm_atomic_state_clear);
  192. /**
  193. * __drm_atomic_state_free - free all memory for an atomic state
  194. * @ref: This atomic state to deallocate
  195. *
  196. * This frees all memory associated with an atomic state, including all the
  197. * per-object state for planes, crtcs and connectors.
  198. */
  199. void __drm_atomic_state_free(struct kref *ref)
  200. {
  201. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  202. struct drm_mode_config *config = &state->dev->mode_config;
  203. drm_atomic_state_clear(state);
  204. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  205. if (config->funcs->atomic_state_free) {
  206. config->funcs->atomic_state_free(state);
  207. } else {
  208. drm_atomic_state_default_release(state);
  209. kfree(state);
  210. }
  211. }
  212. EXPORT_SYMBOL(__drm_atomic_state_free);
  213. /**
  214. * drm_atomic_get_crtc_state - get crtc state
  215. * @state: global atomic state object
  216. * @crtc: crtc to get state object for
  217. *
  218. * This function returns the crtc state for the given crtc, allocating it if
  219. * needed. It will also grab the relevant crtc lock to make sure that the state
  220. * is consistent.
  221. *
  222. * Returns:
  223. *
  224. * Either the allocated state or the error code encoded into the pointer. When
  225. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  226. * entire atomic sequence must be restarted. All other errors are fatal.
  227. */
  228. struct drm_crtc_state *
  229. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  230. struct drm_crtc *crtc)
  231. {
  232. int ret, index = drm_crtc_index(crtc);
  233. struct drm_crtc_state *crtc_state;
  234. WARN_ON(!state->acquire_ctx);
  235. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  236. if (crtc_state)
  237. return crtc_state;
  238. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  239. if (ret)
  240. return ERR_PTR(ret);
  241. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  242. if (!crtc_state)
  243. return ERR_PTR(-ENOMEM);
  244. state->crtcs[index].state = crtc_state;
  245. state->crtcs[index].old_state = crtc->state;
  246. state->crtcs[index].new_state = crtc_state;
  247. state->crtcs[index].ptr = crtc;
  248. crtc_state->state = state;
  249. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  250. crtc->base.id, crtc->name, crtc_state, state);
  251. return crtc_state;
  252. }
  253. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  254. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  255. struct drm_crtc *crtc, s32 __user *fence_ptr)
  256. {
  257. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  258. }
  259. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  260. struct drm_crtc *crtc)
  261. {
  262. s32 __user *fence_ptr;
  263. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  264. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  265. return fence_ptr;
  266. }
  267. /**
  268. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  269. * @state: the CRTC whose incoming state to update
  270. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  271. *
  272. * Set a mode (originating from the kernel) on the desired CRTC state and update
  273. * the enable property.
  274. *
  275. * RETURNS:
  276. * Zero on success, error code on failure. Cannot return -EDEADLK.
  277. */
  278. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  279. const struct drm_display_mode *mode)
  280. {
  281. struct drm_mode_modeinfo umode;
  282. /* Early return for no change. */
  283. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  284. return 0;
  285. drm_property_blob_put(state->mode_blob);
  286. state->mode_blob = NULL;
  287. if (mode) {
  288. drm_mode_convert_to_umode(&umode, mode);
  289. state->mode_blob =
  290. drm_property_create_blob(state->crtc->dev,
  291. sizeof(umode),
  292. &umode);
  293. if (IS_ERR(state->mode_blob))
  294. return PTR_ERR(state->mode_blob);
  295. drm_mode_copy(&state->mode, mode);
  296. state->enable = true;
  297. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  298. mode->name, state);
  299. } else {
  300. memset(&state->mode, 0, sizeof(state->mode));
  301. state->enable = false;
  302. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  303. state);
  304. }
  305. return 0;
  306. }
  307. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  308. /**
  309. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  310. * @state: the CRTC whose incoming state to update
  311. * @blob: pointer to blob property to use for mode
  312. *
  313. * Set a mode (originating from a blob property) on the desired CRTC state.
  314. * This function will take a reference on the blob property for the CRTC state,
  315. * and release the reference held on the state's existing mode property, if any
  316. * was set.
  317. *
  318. * RETURNS:
  319. * Zero on success, error code on failure. Cannot return -EDEADLK.
  320. */
  321. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  322. struct drm_property_blob *blob)
  323. {
  324. if (blob == state->mode_blob)
  325. return 0;
  326. drm_property_blob_put(state->mode_blob);
  327. state->mode_blob = NULL;
  328. memset(&state->mode, 0, sizeof(state->mode));
  329. if (blob) {
  330. if (blob->length != sizeof(struct drm_mode_modeinfo) ||
  331. drm_mode_convert_umode(&state->mode,
  332. (const struct drm_mode_modeinfo *)
  333. blob->data))
  334. return -EINVAL;
  335. state->mode_blob = drm_property_blob_get(blob);
  336. state->enable = true;
  337. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  338. state->mode.name, state);
  339. } else {
  340. state->enable = false;
  341. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  342. state);
  343. }
  344. return 0;
  345. }
  346. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  347. static int
  348. drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
  349. struct drm_property_blob **blob,
  350. uint64_t blob_id,
  351. ssize_t expected_size,
  352. bool *replaced)
  353. {
  354. struct drm_property_blob *new_blob = NULL;
  355. if (blob_id != 0) {
  356. new_blob = drm_property_lookup_blob(dev, blob_id);
  357. if (new_blob == NULL)
  358. return -EINVAL;
  359. if (expected_size > 0 && expected_size != new_blob->length) {
  360. drm_property_blob_put(new_blob);
  361. return -EINVAL;
  362. }
  363. }
  364. *replaced |= drm_property_replace_blob(blob, new_blob);
  365. drm_property_blob_put(new_blob);
  366. return 0;
  367. }
  368. /**
  369. * drm_atomic_crtc_set_property - set property on CRTC
  370. * @crtc: the drm CRTC to set a property on
  371. * @state: the state object to update with the new property value
  372. * @property: the property to set
  373. * @val: the new property value
  374. *
  375. * This function handles generic/core properties and calls out to driver's
  376. * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
  377. * consistent behavior you must call this function rather than the driver hook
  378. * directly.
  379. *
  380. * RETURNS:
  381. * Zero on success, error code on failure
  382. */
  383. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  384. struct drm_crtc_state *state, struct drm_property *property,
  385. uint64_t val)
  386. {
  387. struct drm_device *dev = crtc->dev;
  388. struct drm_mode_config *config = &dev->mode_config;
  389. bool replaced = false;
  390. int ret;
  391. if (property == config->prop_active)
  392. state->active = val;
  393. else if (property == config->prop_mode_id) {
  394. struct drm_property_blob *mode =
  395. drm_property_lookup_blob(dev, val);
  396. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  397. drm_property_blob_put(mode);
  398. return ret;
  399. } else if (property == config->degamma_lut_property) {
  400. ret = drm_atomic_replace_property_blob_from_id(dev,
  401. &state->degamma_lut,
  402. val,
  403. -1,
  404. &replaced);
  405. state->color_mgmt_changed |= replaced;
  406. return ret;
  407. } else if (property == config->ctm_property) {
  408. ret = drm_atomic_replace_property_blob_from_id(dev,
  409. &state->ctm,
  410. val,
  411. sizeof(struct drm_color_ctm),
  412. &replaced);
  413. state->color_mgmt_changed |= replaced;
  414. return ret;
  415. } else if (property == config->gamma_lut_property) {
  416. ret = drm_atomic_replace_property_blob_from_id(dev,
  417. &state->gamma_lut,
  418. val,
  419. -1,
  420. &replaced);
  421. state->color_mgmt_changed |= replaced;
  422. return ret;
  423. } else if (property == config->prop_out_fence_ptr) {
  424. s32 __user *fence_ptr = u64_to_user_ptr(val);
  425. if (!fence_ptr)
  426. return 0;
  427. if (put_user(-1, fence_ptr))
  428. return -EFAULT;
  429. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  430. } else if (crtc->funcs->atomic_set_property)
  431. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  432. else
  433. return -EINVAL;
  434. return 0;
  435. }
  436. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  437. /**
  438. * drm_atomic_crtc_get_property - get property value from CRTC state
  439. * @crtc: the drm CRTC to set a property on
  440. * @state: the state object to get the property value from
  441. * @property: the property to set
  442. * @val: return location for the property value
  443. *
  444. * This function handles generic/core properties and calls out to driver's
  445. * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
  446. * consistent behavior you must call this function rather than the driver hook
  447. * directly.
  448. *
  449. * RETURNS:
  450. * Zero on success, error code on failure
  451. */
  452. static int
  453. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  454. const struct drm_crtc_state *state,
  455. struct drm_property *property, uint64_t *val)
  456. {
  457. struct drm_device *dev = crtc->dev;
  458. struct drm_mode_config *config = &dev->mode_config;
  459. if (property == config->prop_active)
  460. *val = state->active;
  461. else if (property == config->prop_mode_id)
  462. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  463. else if (property == config->degamma_lut_property)
  464. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  465. else if (property == config->ctm_property)
  466. *val = (state->ctm) ? state->ctm->base.id : 0;
  467. else if (property == config->gamma_lut_property)
  468. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  469. else if (property == config->prop_out_fence_ptr)
  470. *val = 0;
  471. else if (crtc->funcs->atomic_get_property)
  472. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  473. else
  474. return -EINVAL;
  475. return 0;
  476. }
  477. /**
  478. * drm_atomic_crtc_check - check crtc state
  479. * @crtc: crtc to check
  480. * @state: crtc state to check
  481. *
  482. * Provides core sanity checks for crtc state.
  483. *
  484. * RETURNS:
  485. * Zero on success, error code on failure
  486. */
  487. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  488. struct drm_crtc_state *state)
  489. {
  490. /* NOTE: we explicitly don't enforce constraints such as primary
  491. * layer covering entire screen, since that is something we want
  492. * to allow (on hw that supports it). For hw that does not, it
  493. * should be checked in driver's crtc->atomic_check() vfunc.
  494. *
  495. * TODO: Add generic modeset state checks once we support those.
  496. */
  497. if (state->active && !state->enable) {
  498. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  499. crtc->base.id, crtc->name);
  500. return -EINVAL;
  501. }
  502. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  503. * as this is a kernel-internal detail that userspace should never
  504. * be able to trigger. */
  505. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  506. WARN_ON(state->enable && !state->mode_blob)) {
  507. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  508. crtc->base.id, crtc->name);
  509. return -EINVAL;
  510. }
  511. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  512. WARN_ON(!state->enable && state->mode_blob)) {
  513. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  514. crtc->base.id, crtc->name);
  515. return -EINVAL;
  516. }
  517. /*
  518. * Reject event generation for when a CRTC is off and stays off.
  519. * It wouldn't be hard to implement this, but userspace has a track
  520. * record of happily burning through 100% cpu (or worse, crash) when the
  521. * display pipe is suspended. To avoid all that fun just reject updates
  522. * that ask for events since likely that indicates a bug in the
  523. * compositor's drawing loop. This is consistent with the vblank IOCTL
  524. * and legacy page_flip IOCTL which also reject service on a disabled
  525. * pipe.
  526. */
  527. if (state->event && !state->active && !crtc->state->active) {
  528. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
  529. crtc->base.id, crtc->name);
  530. return -EINVAL;
  531. }
  532. return 0;
  533. }
  534. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  535. const struct drm_crtc_state *state)
  536. {
  537. struct drm_crtc *crtc = state->crtc;
  538. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  539. drm_printf(p, "\tenable=%d\n", state->enable);
  540. drm_printf(p, "\tactive=%d\n", state->active);
  541. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  542. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  543. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  544. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  545. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  546. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  547. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  548. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  549. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  550. if (crtc->funcs->atomic_print_state)
  551. crtc->funcs->atomic_print_state(p, state);
  552. }
  553. /**
  554. * drm_atomic_get_plane_state - get plane state
  555. * @state: global atomic state object
  556. * @plane: plane to get state object for
  557. *
  558. * This function returns the plane state for the given plane, allocating it if
  559. * needed. It will also grab the relevant plane lock to make sure that the state
  560. * is consistent.
  561. *
  562. * Returns:
  563. *
  564. * Either the allocated state or the error code encoded into the pointer. When
  565. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  566. * entire atomic sequence must be restarted. All other errors are fatal.
  567. */
  568. struct drm_plane_state *
  569. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  570. struct drm_plane *plane)
  571. {
  572. int ret, index = drm_plane_index(plane);
  573. struct drm_plane_state *plane_state;
  574. WARN_ON(!state->acquire_ctx);
  575. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  576. if (plane_state)
  577. return plane_state;
  578. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  579. if (ret)
  580. return ERR_PTR(ret);
  581. plane_state = plane->funcs->atomic_duplicate_state(plane);
  582. if (!plane_state)
  583. return ERR_PTR(-ENOMEM);
  584. state->planes[index].state = plane_state;
  585. state->planes[index].ptr = plane;
  586. state->planes[index].old_state = plane->state;
  587. state->planes[index].new_state = plane_state;
  588. plane_state->state = state;
  589. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  590. plane->base.id, plane->name, plane_state, state);
  591. if (plane_state->crtc) {
  592. struct drm_crtc_state *crtc_state;
  593. crtc_state = drm_atomic_get_crtc_state(state,
  594. plane_state->crtc);
  595. if (IS_ERR(crtc_state))
  596. return ERR_CAST(crtc_state);
  597. }
  598. return plane_state;
  599. }
  600. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  601. /**
  602. * drm_atomic_plane_set_property - set property on plane
  603. * @plane: the drm plane to set a property on
  604. * @state: the state object to update with the new property value
  605. * @property: the property to set
  606. * @val: the new property value
  607. *
  608. * This function handles generic/core properties and calls out to driver's
  609. * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
  610. * consistent behavior you must call this function rather than the driver hook
  611. * directly.
  612. *
  613. * RETURNS:
  614. * Zero on success, error code on failure
  615. */
  616. static int drm_atomic_plane_set_property(struct drm_plane *plane,
  617. struct drm_plane_state *state, struct drm_property *property,
  618. uint64_t val)
  619. {
  620. struct drm_device *dev = plane->dev;
  621. struct drm_mode_config *config = &dev->mode_config;
  622. if (property == config->prop_fb_id) {
  623. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  624. drm_atomic_set_fb_for_plane(state, fb);
  625. if (fb)
  626. drm_framebuffer_put(fb);
  627. } else if (property == config->prop_in_fence_fd) {
  628. if (state->fence)
  629. return -EINVAL;
  630. if (U642I64(val) == -1)
  631. return 0;
  632. state->fence = sync_file_get_fence(val);
  633. if (!state->fence)
  634. return -EINVAL;
  635. } else if (property == config->prop_crtc_id) {
  636. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  637. return drm_atomic_set_crtc_for_plane(state, crtc);
  638. } else if (property == config->prop_crtc_x) {
  639. state->crtc_x = U642I64(val);
  640. } else if (property == config->prop_crtc_y) {
  641. state->crtc_y = U642I64(val);
  642. } else if (property == config->prop_crtc_w) {
  643. state->crtc_w = val;
  644. } else if (property == config->prop_crtc_h) {
  645. state->crtc_h = val;
  646. } else if (property == config->prop_src_x) {
  647. state->src_x = val;
  648. } else if (property == config->prop_src_y) {
  649. state->src_y = val;
  650. } else if (property == config->prop_src_w) {
  651. state->src_w = val;
  652. } else if (property == config->prop_src_h) {
  653. state->src_h = val;
  654. } else if (property == plane->rotation_property) {
  655. if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
  656. return -EINVAL;
  657. state->rotation = val;
  658. } else if (property == plane->zpos_property) {
  659. state->zpos = val;
  660. } else if (plane->funcs->atomic_set_property) {
  661. return plane->funcs->atomic_set_property(plane, state,
  662. property, val);
  663. } else {
  664. return -EINVAL;
  665. }
  666. return 0;
  667. }
  668. /**
  669. * drm_atomic_plane_get_property - get property value from plane state
  670. * @plane: the drm plane to set a property on
  671. * @state: the state object to get the property value from
  672. * @property: the property to set
  673. * @val: return location for the property value
  674. *
  675. * This function handles generic/core properties and calls out to driver's
  676. * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
  677. * consistent behavior you must call this function rather than the driver hook
  678. * directly.
  679. *
  680. * RETURNS:
  681. * Zero on success, error code on failure
  682. */
  683. static int
  684. drm_atomic_plane_get_property(struct drm_plane *plane,
  685. const struct drm_plane_state *state,
  686. struct drm_property *property, uint64_t *val)
  687. {
  688. struct drm_device *dev = plane->dev;
  689. struct drm_mode_config *config = &dev->mode_config;
  690. if (property == config->prop_fb_id) {
  691. *val = (state->fb) ? state->fb->base.id : 0;
  692. } else if (property == config->prop_in_fence_fd) {
  693. *val = -1;
  694. } else if (property == config->prop_crtc_id) {
  695. *val = (state->crtc) ? state->crtc->base.id : 0;
  696. } else if (property == config->prop_crtc_x) {
  697. *val = I642U64(state->crtc_x);
  698. } else if (property == config->prop_crtc_y) {
  699. *val = I642U64(state->crtc_y);
  700. } else if (property == config->prop_crtc_w) {
  701. *val = state->crtc_w;
  702. } else if (property == config->prop_crtc_h) {
  703. *val = state->crtc_h;
  704. } else if (property == config->prop_src_x) {
  705. *val = state->src_x;
  706. } else if (property == config->prop_src_y) {
  707. *val = state->src_y;
  708. } else if (property == config->prop_src_w) {
  709. *val = state->src_w;
  710. } else if (property == config->prop_src_h) {
  711. *val = state->src_h;
  712. } else if (property == plane->rotation_property) {
  713. *val = state->rotation;
  714. } else if (property == plane->zpos_property) {
  715. *val = state->zpos;
  716. } else if (plane->funcs->atomic_get_property) {
  717. return plane->funcs->atomic_get_property(plane, state, property, val);
  718. } else {
  719. return -EINVAL;
  720. }
  721. return 0;
  722. }
  723. static bool
  724. plane_switching_crtc(struct drm_atomic_state *state,
  725. struct drm_plane *plane,
  726. struct drm_plane_state *plane_state)
  727. {
  728. if (!plane->state->crtc || !plane_state->crtc)
  729. return false;
  730. if (plane->state->crtc == plane_state->crtc)
  731. return false;
  732. /* This could be refined, but currently there's no helper or driver code
  733. * to implement direct switching of active planes nor userspace to take
  734. * advantage of more direct plane switching without the intermediate
  735. * full OFF state.
  736. */
  737. return true;
  738. }
  739. /**
  740. * drm_atomic_plane_check - check plane state
  741. * @plane: plane to check
  742. * @state: plane state to check
  743. *
  744. * Provides core sanity checks for plane state.
  745. *
  746. * RETURNS:
  747. * Zero on success, error code on failure
  748. */
  749. static int drm_atomic_plane_check(struct drm_plane *plane,
  750. struct drm_plane_state *state)
  751. {
  752. unsigned int fb_width, fb_height;
  753. int ret;
  754. /* either *both* CRTC and FB must be set, or neither */
  755. if (WARN_ON(state->crtc && !state->fb)) {
  756. DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
  757. return -EINVAL;
  758. } else if (WARN_ON(state->fb && !state->crtc)) {
  759. DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
  760. return -EINVAL;
  761. }
  762. /* if disabled, we don't care about the rest of the state: */
  763. if (!state->crtc)
  764. return 0;
  765. /* Check whether this plane is usable on this CRTC */
  766. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  767. DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
  768. return -EINVAL;
  769. }
  770. /* Check whether this plane supports the fb pixel format. */
  771. ret = drm_plane_check_pixel_format(plane, state->fb->format->format);
  772. if (ret) {
  773. struct drm_format_name_buf format_name;
  774. DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
  775. drm_get_format_name(state->fb->format->format,
  776. &format_name));
  777. return ret;
  778. }
  779. /* Give drivers some help against integer overflows */
  780. if (state->crtc_w > INT_MAX ||
  781. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  782. state->crtc_h > INT_MAX ||
  783. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  784. DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
  785. state->crtc_w, state->crtc_h,
  786. state->crtc_x, state->crtc_y);
  787. return -ERANGE;
  788. }
  789. fb_width = state->fb->width << 16;
  790. fb_height = state->fb->height << 16;
  791. /* Make sure source coordinates are inside the fb. */
  792. if (state->src_w > fb_width ||
  793. state->src_x > fb_width - state->src_w ||
  794. state->src_h > fb_height ||
  795. state->src_y > fb_height - state->src_h) {
  796. DRM_DEBUG_ATOMIC("Invalid source coordinates "
  797. "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
  798. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  799. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  800. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  801. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
  802. state->fb->width, state->fb->height);
  803. return -ENOSPC;
  804. }
  805. if (plane_switching_crtc(state->state, plane, state)) {
  806. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  807. plane->base.id, plane->name);
  808. return -EINVAL;
  809. }
  810. return 0;
  811. }
  812. static void drm_atomic_plane_print_state(struct drm_printer *p,
  813. const struct drm_plane_state *state)
  814. {
  815. struct drm_plane *plane = state->plane;
  816. struct drm_rect src = drm_plane_state_src(state);
  817. struct drm_rect dest = drm_plane_state_dest(state);
  818. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  819. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  820. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  821. if (state->fb)
  822. drm_framebuffer_print_info(p, 2, state->fb);
  823. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  824. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  825. drm_printf(p, "\trotation=%x\n", state->rotation);
  826. if (plane->funcs->atomic_print_state)
  827. plane->funcs->atomic_print_state(p, state);
  828. }
  829. /**
  830. * drm_atomic_private_obj_init - initialize private object
  831. * @obj: private object
  832. * @state: initial private object state
  833. * @funcs: pointer to the struct of function pointers that identify the object
  834. * type
  835. *
  836. * Initialize the private object, which can be embedded into any
  837. * driver private object that needs its own atomic state.
  838. */
  839. void
  840. drm_atomic_private_obj_init(struct drm_private_obj *obj,
  841. struct drm_private_state *state,
  842. const struct drm_private_state_funcs *funcs)
  843. {
  844. memset(obj, 0, sizeof(*obj));
  845. obj->state = state;
  846. obj->funcs = funcs;
  847. }
  848. EXPORT_SYMBOL(drm_atomic_private_obj_init);
  849. /**
  850. * drm_atomic_private_obj_fini - finalize private object
  851. * @obj: private object
  852. *
  853. * Finalize the private object.
  854. */
  855. void
  856. drm_atomic_private_obj_fini(struct drm_private_obj *obj)
  857. {
  858. obj->funcs->atomic_destroy_state(obj, obj->state);
  859. }
  860. EXPORT_SYMBOL(drm_atomic_private_obj_fini);
  861. /**
  862. * drm_atomic_get_private_obj_state - get private object state
  863. * @state: global atomic state
  864. * @obj: private object to get the state for
  865. *
  866. * This function returns the private object state for the given private object,
  867. * allocating the state if needed. It does not grab any locks as the caller is
  868. * expected to care of any required locking.
  869. *
  870. * RETURNS:
  871. *
  872. * Either the allocated state or the error code encoded into a pointer.
  873. */
  874. struct drm_private_state *
  875. drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
  876. struct drm_private_obj *obj)
  877. {
  878. int index, num_objs, i;
  879. size_t size;
  880. struct __drm_private_objs_state *arr;
  881. struct drm_private_state *obj_state;
  882. for (i = 0; i < state->num_private_objs; i++)
  883. if (obj == state->private_objs[i].ptr)
  884. return state->private_objs[i].state;
  885. num_objs = state->num_private_objs + 1;
  886. size = sizeof(*state->private_objs) * num_objs;
  887. arr = krealloc(state->private_objs, size, GFP_KERNEL);
  888. if (!arr)
  889. return ERR_PTR(-ENOMEM);
  890. state->private_objs = arr;
  891. index = state->num_private_objs;
  892. memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
  893. obj_state = obj->funcs->atomic_duplicate_state(obj);
  894. if (!obj_state)
  895. return ERR_PTR(-ENOMEM);
  896. state->private_objs[index].state = obj_state;
  897. state->private_objs[index].old_state = obj->state;
  898. state->private_objs[index].new_state = obj_state;
  899. state->private_objs[index].ptr = obj;
  900. state->num_private_objs = num_objs;
  901. DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
  902. obj, obj_state, state);
  903. return obj_state;
  904. }
  905. EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
  906. /**
  907. * drm_atomic_get_connector_state - get connector state
  908. * @state: global atomic state object
  909. * @connector: connector to get state object for
  910. *
  911. * This function returns the connector state for the given connector,
  912. * allocating it if needed. It will also grab the relevant connector lock to
  913. * make sure that the state is consistent.
  914. *
  915. * Returns:
  916. *
  917. * Either the allocated state or the error code encoded into the pointer. When
  918. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  919. * entire atomic sequence must be restarted. All other errors are fatal.
  920. */
  921. struct drm_connector_state *
  922. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  923. struct drm_connector *connector)
  924. {
  925. int ret, index;
  926. struct drm_mode_config *config = &connector->dev->mode_config;
  927. struct drm_connector_state *connector_state;
  928. WARN_ON(!state->acquire_ctx);
  929. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  930. if (ret)
  931. return ERR_PTR(ret);
  932. index = drm_connector_index(connector);
  933. if (index >= state->num_connector) {
  934. struct __drm_connnectors_state *c;
  935. int alloc = max(index + 1, config->num_connector);
  936. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  937. if (!c)
  938. return ERR_PTR(-ENOMEM);
  939. state->connectors = c;
  940. memset(&state->connectors[state->num_connector], 0,
  941. sizeof(*state->connectors) * (alloc - state->num_connector));
  942. state->num_connector = alloc;
  943. }
  944. if (state->connectors[index].state)
  945. return state->connectors[index].state;
  946. connector_state = connector->funcs->atomic_duplicate_state(connector);
  947. if (!connector_state)
  948. return ERR_PTR(-ENOMEM);
  949. drm_connector_get(connector);
  950. state->connectors[index].state = connector_state;
  951. state->connectors[index].old_state = connector->state;
  952. state->connectors[index].new_state = connector_state;
  953. state->connectors[index].ptr = connector;
  954. connector_state->state = state;
  955. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
  956. connector->base.id, connector->name,
  957. connector_state, state);
  958. if (connector_state->crtc) {
  959. struct drm_crtc_state *crtc_state;
  960. crtc_state = drm_atomic_get_crtc_state(state,
  961. connector_state->crtc);
  962. if (IS_ERR(crtc_state))
  963. return ERR_CAST(crtc_state);
  964. }
  965. return connector_state;
  966. }
  967. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  968. /**
  969. * drm_atomic_connector_set_property - set property on connector.
  970. * @connector: the drm connector to set a property on
  971. * @state: the state object to update with the new property value
  972. * @property: the property to set
  973. * @val: the new property value
  974. *
  975. * This function handles generic/core properties and calls out to driver's
  976. * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
  977. * consistent behavior you must call this function rather than the driver hook
  978. * directly.
  979. *
  980. * RETURNS:
  981. * Zero on success, error code on failure
  982. */
  983. static int drm_atomic_connector_set_property(struct drm_connector *connector,
  984. struct drm_connector_state *state, struct drm_property *property,
  985. uint64_t val)
  986. {
  987. struct drm_device *dev = connector->dev;
  988. struct drm_mode_config *config = &dev->mode_config;
  989. if (property == config->prop_crtc_id) {
  990. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  991. return drm_atomic_set_crtc_for_connector(state, crtc);
  992. } else if (property == config->dpms_property) {
  993. /* setting DPMS property requires special handling, which
  994. * is done in legacy setprop path for us. Disallow (for
  995. * now?) atomic writes to DPMS property:
  996. */
  997. return -EINVAL;
  998. } else if (property == config->tv_select_subconnector_property) {
  999. state->tv.subconnector = val;
  1000. } else if (property == config->tv_left_margin_property) {
  1001. state->tv.margins.left = val;
  1002. } else if (property == config->tv_right_margin_property) {
  1003. state->tv.margins.right = val;
  1004. } else if (property == config->tv_top_margin_property) {
  1005. state->tv.margins.top = val;
  1006. } else if (property == config->tv_bottom_margin_property) {
  1007. state->tv.margins.bottom = val;
  1008. } else if (property == config->tv_mode_property) {
  1009. state->tv.mode = val;
  1010. } else if (property == config->tv_brightness_property) {
  1011. state->tv.brightness = val;
  1012. } else if (property == config->tv_contrast_property) {
  1013. state->tv.contrast = val;
  1014. } else if (property == config->tv_flicker_reduction_property) {
  1015. state->tv.flicker_reduction = val;
  1016. } else if (property == config->tv_overscan_property) {
  1017. state->tv.overscan = val;
  1018. } else if (property == config->tv_saturation_property) {
  1019. state->tv.saturation = val;
  1020. } else if (property == config->tv_hue_property) {
  1021. state->tv.hue = val;
  1022. } else if (property == config->link_status_property) {
  1023. /* Never downgrade from GOOD to BAD on userspace's request here,
  1024. * only hw issues can do that.
  1025. *
  1026. * For an atomic property the userspace doesn't need to be able
  1027. * to understand all the properties, but needs to be able to
  1028. * restore the state it wants on VT switch. So if the userspace
  1029. * tries to change the link_status from GOOD to BAD, driver
  1030. * silently rejects it and returns a 0. This prevents userspace
  1031. * from accidently breaking the display when it restores the
  1032. * state.
  1033. */
  1034. if (state->link_status != DRM_LINK_STATUS_GOOD)
  1035. state->link_status = val;
  1036. } else if (property == config->aspect_ratio_property) {
  1037. state->picture_aspect_ratio = val;
  1038. } else if (property == connector->scaling_mode_property) {
  1039. state->scaling_mode = val;
  1040. } else if (connector->funcs->atomic_set_property) {
  1041. return connector->funcs->atomic_set_property(connector,
  1042. state, property, val);
  1043. } else {
  1044. return -EINVAL;
  1045. }
  1046. return 0;
  1047. }
  1048. static void drm_atomic_connector_print_state(struct drm_printer *p,
  1049. const struct drm_connector_state *state)
  1050. {
  1051. struct drm_connector *connector = state->connector;
  1052. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  1053. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  1054. if (connector->funcs->atomic_print_state)
  1055. connector->funcs->atomic_print_state(p, state);
  1056. }
  1057. /**
  1058. * drm_atomic_connector_get_property - get property value from connector state
  1059. * @connector: the drm connector to set a property on
  1060. * @state: the state object to get the property value from
  1061. * @property: the property to set
  1062. * @val: return location for the property value
  1063. *
  1064. * This function handles generic/core properties and calls out to driver's
  1065. * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
  1066. * consistent behavior you must call this function rather than the driver hook
  1067. * directly.
  1068. *
  1069. * RETURNS:
  1070. * Zero on success, error code on failure
  1071. */
  1072. static int
  1073. drm_atomic_connector_get_property(struct drm_connector *connector,
  1074. const struct drm_connector_state *state,
  1075. struct drm_property *property, uint64_t *val)
  1076. {
  1077. struct drm_device *dev = connector->dev;
  1078. struct drm_mode_config *config = &dev->mode_config;
  1079. if (property == config->prop_crtc_id) {
  1080. *val = (state->crtc) ? state->crtc->base.id : 0;
  1081. } else if (property == config->dpms_property) {
  1082. *val = connector->dpms;
  1083. } else if (property == config->tv_select_subconnector_property) {
  1084. *val = state->tv.subconnector;
  1085. } else if (property == config->tv_left_margin_property) {
  1086. *val = state->tv.margins.left;
  1087. } else if (property == config->tv_right_margin_property) {
  1088. *val = state->tv.margins.right;
  1089. } else if (property == config->tv_top_margin_property) {
  1090. *val = state->tv.margins.top;
  1091. } else if (property == config->tv_bottom_margin_property) {
  1092. *val = state->tv.margins.bottom;
  1093. } else if (property == config->tv_mode_property) {
  1094. *val = state->tv.mode;
  1095. } else if (property == config->tv_brightness_property) {
  1096. *val = state->tv.brightness;
  1097. } else if (property == config->tv_contrast_property) {
  1098. *val = state->tv.contrast;
  1099. } else if (property == config->tv_flicker_reduction_property) {
  1100. *val = state->tv.flicker_reduction;
  1101. } else if (property == config->tv_overscan_property) {
  1102. *val = state->tv.overscan;
  1103. } else if (property == config->tv_saturation_property) {
  1104. *val = state->tv.saturation;
  1105. } else if (property == config->tv_hue_property) {
  1106. *val = state->tv.hue;
  1107. } else if (property == config->link_status_property) {
  1108. *val = state->link_status;
  1109. } else if (property == config->aspect_ratio_property) {
  1110. *val = state->picture_aspect_ratio;
  1111. } else if (property == connector->scaling_mode_property) {
  1112. *val = state->scaling_mode;
  1113. } else if (connector->funcs->atomic_get_property) {
  1114. return connector->funcs->atomic_get_property(connector,
  1115. state, property, val);
  1116. } else {
  1117. return -EINVAL;
  1118. }
  1119. return 0;
  1120. }
  1121. int drm_atomic_get_property(struct drm_mode_object *obj,
  1122. struct drm_property *property, uint64_t *val)
  1123. {
  1124. struct drm_device *dev = property->dev;
  1125. int ret;
  1126. switch (obj->type) {
  1127. case DRM_MODE_OBJECT_CONNECTOR: {
  1128. struct drm_connector *connector = obj_to_connector(obj);
  1129. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  1130. ret = drm_atomic_connector_get_property(connector,
  1131. connector->state, property, val);
  1132. break;
  1133. }
  1134. case DRM_MODE_OBJECT_CRTC: {
  1135. struct drm_crtc *crtc = obj_to_crtc(obj);
  1136. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  1137. ret = drm_atomic_crtc_get_property(crtc,
  1138. crtc->state, property, val);
  1139. break;
  1140. }
  1141. case DRM_MODE_OBJECT_PLANE: {
  1142. struct drm_plane *plane = obj_to_plane(obj);
  1143. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  1144. ret = drm_atomic_plane_get_property(plane,
  1145. plane->state, property, val);
  1146. break;
  1147. }
  1148. default:
  1149. ret = -EINVAL;
  1150. break;
  1151. }
  1152. return ret;
  1153. }
  1154. /**
  1155. * drm_atomic_set_crtc_for_plane - set crtc for plane
  1156. * @plane_state: the plane whose incoming state to update
  1157. * @crtc: crtc to use for the plane
  1158. *
  1159. * Changing the assigned crtc for a plane requires us to grab the lock and state
  1160. * for the new crtc, as needed. This function takes care of all these details
  1161. * besides updating the pointer in the state object itself.
  1162. *
  1163. * Returns:
  1164. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1165. * then the w/w mutex code has detected a deadlock and the entire atomic
  1166. * sequence must be restarted. All other errors are fatal.
  1167. */
  1168. int
  1169. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  1170. struct drm_crtc *crtc)
  1171. {
  1172. struct drm_plane *plane = plane_state->plane;
  1173. struct drm_crtc_state *crtc_state;
  1174. if (plane_state->crtc) {
  1175. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1176. plane_state->crtc);
  1177. if (WARN_ON(IS_ERR(crtc_state)))
  1178. return PTR_ERR(crtc_state);
  1179. crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
  1180. }
  1181. plane_state->crtc = crtc;
  1182. if (crtc) {
  1183. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1184. crtc);
  1185. if (IS_ERR(crtc_state))
  1186. return PTR_ERR(crtc_state);
  1187. crtc_state->plane_mask |= (1 << drm_plane_index(plane));
  1188. }
  1189. if (crtc)
  1190. DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
  1191. plane_state, crtc->base.id, crtc->name);
  1192. else
  1193. DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
  1194. plane_state);
  1195. return 0;
  1196. }
  1197. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  1198. /**
  1199. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  1200. * @plane_state: atomic state object for the plane
  1201. * @fb: fb to use for the plane
  1202. *
  1203. * Changing the assigned framebuffer for a plane requires us to grab a reference
  1204. * to the new fb and drop the reference to the old fb, if there is one. This
  1205. * function takes care of all these details besides updating the pointer in the
  1206. * state object itself.
  1207. */
  1208. void
  1209. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1210. struct drm_framebuffer *fb)
  1211. {
  1212. if (fb)
  1213. DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
  1214. fb->base.id, plane_state);
  1215. else
  1216. DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
  1217. plane_state);
  1218. drm_framebuffer_assign(&plane_state->fb, fb);
  1219. }
  1220. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1221. /**
  1222. * drm_atomic_set_fence_for_plane - set fence for plane
  1223. * @plane_state: atomic state object for the plane
  1224. * @fence: dma_fence to use for the plane
  1225. *
  1226. * Helper to setup the plane_state fence in case it is not set yet.
  1227. * By using this drivers doesn't need to worry if the user choose
  1228. * implicit or explicit fencing.
  1229. *
  1230. * This function will not set the fence to the state if it was set
  1231. * via explicit fencing interfaces on the atomic ioctl. In that case it will
  1232. * drop the reference to the fence as we are not storing it anywhere.
  1233. * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  1234. * with the received implicit fence. In both cases this function consumes a
  1235. * reference for @fence.
  1236. */
  1237. void
  1238. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  1239. struct dma_fence *fence)
  1240. {
  1241. if (plane_state->fence) {
  1242. dma_fence_put(fence);
  1243. return;
  1244. }
  1245. plane_state->fence = fence;
  1246. }
  1247. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  1248. /**
  1249. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1250. * @conn_state: atomic state object for the connector
  1251. * @crtc: crtc to use for the connector
  1252. *
  1253. * Changing the assigned crtc for a connector requires us to grab the lock and
  1254. * state for the new crtc, as needed. This function takes care of all these
  1255. * details besides updating the pointer in the state object itself.
  1256. *
  1257. * Returns:
  1258. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1259. * then the w/w mutex code has detected a deadlock and the entire atomic
  1260. * sequence must be restarted. All other errors are fatal.
  1261. */
  1262. int
  1263. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1264. struct drm_crtc *crtc)
  1265. {
  1266. struct drm_crtc_state *crtc_state;
  1267. if (conn_state->crtc == crtc)
  1268. return 0;
  1269. if (conn_state->crtc) {
  1270. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  1271. conn_state->crtc);
  1272. crtc_state->connector_mask &=
  1273. ~(1 << drm_connector_index(conn_state->connector));
  1274. drm_connector_put(conn_state->connector);
  1275. conn_state->crtc = NULL;
  1276. }
  1277. if (crtc) {
  1278. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1279. if (IS_ERR(crtc_state))
  1280. return PTR_ERR(crtc_state);
  1281. crtc_state->connector_mask |=
  1282. 1 << drm_connector_index(conn_state->connector);
  1283. drm_connector_get(conn_state->connector);
  1284. conn_state->crtc = crtc;
  1285. DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
  1286. conn_state, crtc->base.id, crtc->name);
  1287. } else {
  1288. DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
  1289. conn_state);
  1290. }
  1291. return 0;
  1292. }
  1293. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1294. /**
  1295. * drm_atomic_add_affected_connectors - add connectors for crtc
  1296. * @state: atomic state
  1297. * @crtc: DRM crtc
  1298. *
  1299. * This function walks the current configuration and adds all connectors
  1300. * currently using @crtc to the atomic configuration @state. Note that this
  1301. * function must acquire the connection mutex. This can potentially cause
  1302. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1303. * drivers and helpers should only call this when really needed (e.g. when a
  1304. * full modeset needs to happen due to some change).
  1305. *
  1306. * Returns:
  1307. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1308. * then the w/w mutex code has detected a deadlock and the entire atomic
  1309. * sequence must be restarted. All other errors are fatal.
  1310. */
  1311. int
  1312. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1313. struct drm_crtc *crtc)
  1314. {
  1315. struct drm_mode_config *config = &state->dev->mode_config;
  1316. struct drm_connector *connector;
  1317. struct drm_connector_state *conn_state;
  1318. struct drm_connector_list_iter conn_iter;
  1319. struct drm_crtc_state *crtc_state;
  1320. int ret;
  1321. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1322. if (IS_ERR(crtc_state))
  1323. return PTR_ERR(crtc_state);
  1324. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1325. if (ret)
  1326. return ret;
  1327. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1328. crtc->base.id, crtc->name, state);
  1329. /*
  1330. * Changed connectors are already in @state, so only need to look
  1331. * at the connector_mask in crtc_state.
  1332. */
  1333. drm_connector_list_iter_begin(state->dev, &conn_iter);
  1334. drm_for_each_connector_iter(connector, &conn_iter) {
  1335. if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
  1336. continue;
  1337. conn_state = drm_atomic_get_connector_state(state, connector);
  1338. if (IS_ERR(conn_state)) {
  1339. drm_connector_list_iter_end(&conn_iter);
  1340. return PTR_ERR(conn_state);
  1341. }
  1342. }
  1343. drm_connector_list_iter_end(&conn_iter);
  1344. return 0;
  1345. }
  1346. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1347. /**
  1348. * drm_atomic_add_affected_planes - add planes for crtc
  1349. * @state: atomic state
  1350. * @crtc: DRM crtc
  1351. *
  1352. * This function walks the current configuration and adds all planes
  1353. * currently used by @crtc to the atomic configuration @state. This is useful
  1354. * when an atomic commit also needs to check all currently enabled plane on
  1355. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1356. * to avoid special code to force-enable all planes.
  1357. *
  1358. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1359. * current CRTC for that plane (if there is any) adding all the plane states for
  1360. * a CRTC will not reduce parallism of atomic updates.
  1361. *
  1362. * Returns:
  1363. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1364. * then the w/w mutex code has detected a deadlock and the entire atomic
  1365. * sequence must be restarted. All other errors are fatal.
  1366. */
  1367. int
  1368. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1369. struct drm_crtc *crtc)
  1370. {
  1371. struct drm_plane *plane;
  1372. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  1373. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1374. struct drm_plane_state *plane_state =
  1375. drm_atomic_get_plane_state(state, plane);
  1376. if (IS_ERR(plane_state))
  1377. return PTR_ERR(plane_state);
  1378. }
  1379. return 0;
  1380. }
  1381. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1382. /**
  1383. * drm_atomic_check_only - check whether a given config would work
  1384. * @state: atomic configuration to check
  1385. *
  1386. * Note that this function can return -EDEADLK if the driver needed to acquire
  1387. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1388. * backoff dance and restart. All other errors are fatal.
  1389. *
  1390. * Returns:
  1391. * 0 on success, negative error code on failure.
  1392. */
  1393. int drm_atomic_check_only(struct drm_atomic_state *state)
  1394. {
  1395. struct drm_device *dev = state->dev;
  1396. struct drm_mode_config *config = &dev->mode_config;
  1397. struct drm_plane *plane;
  1398. struct drm_plane_state *plane_state;
  1399. struct drm_crtc *crtc;
  1400. struct drm_crtc_state *crtc_state;
  1401. int i, ret = 0;
  1402. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1403. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1404. ret = drm_atomic_plane_check(plane, plane_state);
  1405. if (ret) {
  1406. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1407. plane->base.id, plane->name);
  1408. return ret;
  1409. }
  1410. }
  1411. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1412. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1413. if (ret) {
  1414. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1415. crtc->base.id, crtc->name);
  1416. return ret;
  1417. }
  1418. }
  1419. if (config->funcs->atomic_check)
  1420. ret = config->funcs->atomic_check(state->dev, state);
  1421. if (ret)
  1422. return ret;
  1423. if (!state->allow_modeset) {
  1424. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1425. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1426. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1427. crtc->base.id, crtc->name);
  1428. return -EINVAL;
  1429. }
  1430. }
  1431. }
  1432. return 0;
  1433. }
  1434. EXPORT_SYMBOL(drm_atomic_check_only);
  1435. /**
  1436. * drm_atomic_commit - commit configuration atomically
  1437. * @state: atomic configuration to check
  1438. *
  1439. * Note that this function can return -EDEADLK if the driver needed to acquire
  1440. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1441. * backoff dance and restart. All other errors are fatal.
  1442. *
  1443. * This function will take its own reference on @state.
  1444. * Callers should always release their reference with drm_atomic_state_put().
  1445. *
  1446. * Returns:
  1447. * 0 on success, negative error code on failure.
  1448. */
  1449. int drm_atomic_commit(struct drm_atomic_state *state)
  1450. {
  1451. struct drm_mode_config *config = &state->dev->mode_config;
  1452. int ret;
  1453. ret = drm_atomic_check_only(state);
  1454. if (ret)
  1455. return ret;
  1456. DRM_DEBUG_ATOMIC("committing %p\n", state);
  1457. return config->funcs->atomic_commit(state->dev, state, false);
  1458. }
  1459. EXPORT_SYMBOL(drm_atomic_commit);
  1460. /**
  1461. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  1462. * @state: atomic configuration to check
  1463. *
  1464. * Note that this function can return -EDEADLK if the driver needed to acquire
  1465. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1466. * backoff dance and restart. All other errors are fatal.
  1467. *
  1468. * This function will take its own reference on @state.
  1469. * Callers should always release their reference with drm_atomic_state_put().
  1470. *
  1471. * Returns:
  1472. * 0 on success, negative error code on failure.
  1473. */
  1474. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1475. {
  1476. struct drm_mode_config *config = &state->dev->mode_config;
  1477. int ret;
  1478. ret = drm_atomic_check_only(state);
  1479. if (ret)
  1480. return ret;
  1481. DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
  1482. return config->funcs->atomic_commit(state->dev, state, true);
  1483. }
  1484. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1485. static void drm_atomic_print_state(const struct drm_atomic_state *state)
  1486. {
  1487. struct drm_printer p = drm_info_printer(state->dev->dev);
  1488. struct drm_plane *plane;
  1489. struct drm_plane_state *plane_state;
  1490. struct drm_crtc *crtc;
  1491. struct drm_crtc_state *crtc_state;
  1492. struct drm_connector *connector;
  1493. struct drm_connector_state *connector_state;
  1494. int i;
  1495. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1496. for_each_new_plane_in_state(state, plane, plane_state, i)
  1497. drm_atomic_plane_print_state(&p, plane_state);
  1498. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  1499. drm_atomic_crtc_print_state(&p, crtc_state);
  1500. for_each_new_connector_in_state(state, connector, connector_state, i)
  1501. drm_atomic_connector_print_state(&p, connector_state);
  1502. }
  1503. static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  1504. bool take_locks)
  1505. {
  1506. struct drm_mode_config *config = &dev->mode_config;
  1507. struct drm_plane *plane;
  1508. struct drm_crtc *crtc;
  1509. struct drm_connector *connector;
  1510. struct drm_connector_list_iter conn_iter;
  1511. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1512. return;
  1513. list_for_each_entry(plane, &config->plane_list, head) {
  1514. if (take_locks)
  1515. drm_modeset_lock(&plane->mutex, NULL);
  1516. drm_atomic_plane_print_state(p, plane->state);
  1517. if (take_locks)
  1518. drm_modeset_unlock(&plane->mutex);
  1519. }
  1520. list_for_each_entry(crtc, &config->crtc_list, head) {
  1521. if (take_locks)
  1522. drm_modeset_lock(&crtc->mutex, NULL);
  1523. drm_atomic_crtc_print_state(p, crtc->state);
  1524. if (take_locks)
  1525. drm_modeset_unlock(&crtc->mutex);
  1526. }
  1527. drm_connector_list_iter_begin(dev, &conn_iter);
  1528. if (take_locks)
  1529. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  1530. drm_for_each_connector_iter(connector, &conn_iter)
  1531. drm_atomic_connector_print_state(p, connector->state);
  1532. if (take_locks)
  1533. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  1534. drm_connector_list_iter_end(&conn_iter);
  1535. }
  1536. /**
  1537. * drm_state_dump - dump entire device atomic state
  1538. * @dev: the drm device
  1539. * @p: where to print the state to
  1540. *
  1541. * Just for debugging. Drivers might want an option to dump state
  1542. * to dmesg in case of error irq's. (Hint, you probably want to
  1543. * ratelimit this!)
  1544. *
  1545. * The caller must drm_modeset_lock_all(), or if this is called
  1546. * from error irq handler, it should not be enabled by default.
  1547. * (Ie. if you are debugging errors you might not care that this
  1548. * is racey. But calling this without all modeset locks held is
  1549. * not inherently safe.)
  1550. */
  1551. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1552. {
  1553. __drm_state_dump(dev, p, false);
  1554. }
  1555. EXPORT_SYMBOL(drm_state_dump);
  1556. #ifdef CONFIG_DEBUG_FS
  1557. static int drm_state_info(struct seq_file *m, void *data)
  1558. {
  1559. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1560. struct drm_device *dev = node->minor->dev;
  1561. struct drm_printer p = drm_seq_file_printer(m);
  1562. __drm_state_dump(dev, &p, true);
  1563. return 0;
  1564. }
  1565. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1566. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1567. {"state", drm_state_info, 0},
  1568. };
  1569. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1570. {
  1571. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1572. ARRAY_SIZE(drm_atomic_debugfs_list),
  1573. minor->debugfs_root, minor);
  1574. }
  1575. #endif
  1576. /*
  1577. * The big monster ioctl
  1578. */
  1579. static struct drm_pending_vblank_event *create_vblank_event(
  1580. struct drm_crtc *crtc, uint64_t user_data)
  1581. {
  1582. struct drm_pending_vblank_event *e = NULL;
  1583. e = kzalloc(sizeof *e, GFP_KERNEL);
  1584. if (!e)
  1585. return NULL;
  1586. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1587. e->event.base.length = sizeof(e->event);
  1588. e->event.vbl.crtc_id = crtc->base.id;
  1589. e->event.vbl.user_data = user_data;
  1590. return e;
  1591. }
  1592. int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
  1593. struct drm_connector *connector,
  1594. int mode)
  1595. {
  1596. struct drm_connector *tmp_connector;
  1597. struct drm_connector_state *new_conn_state;
  1598. struct drm_crtc *crtc;
  1599. struct drm_crtc_state *crtc_state;
  1600. int i, ret, old_mode = connector->dpms;
  1601. bool active = false;
  1602. ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
  1603. state->acquire_ctx);
  1604. if (ret)
  1605. return ret;
  1606. if (mode != DRM_MODE_DPMS_ON)
  1607. mode = DRM_MODE_DPMS_OFF;
  1608. connector->dpms = mode;
  1609. crtc = connector->state->crtc;
  1610. if (!crtc)
  1611. goto out;
  1612. ret = drm_atomic_add_affected_connectors(state, crtc);
  1613. if (ret)
  1614. goto out;
  1615. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1616. if (IS_ERR(crtc_state)) {
  1617. ret = PTR_ERR(crtc_state);
  1618. goto out;
  1619. }
  1620. for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
  1621. if (new_conn_state->crtc != crtc)
  1622. continue;
  1623. if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
  1624. active = true;
  1625. break;
  1626. }
  1627. }
  1628. crtc_state->active = active;
  1629. ret = drm_atomic_commit(state);
  1630. out:
  1631. if (ret != 0)
  1632. connector->dpms = old_mode;
  1633. return ret;
  1634. }
  1635. int drm_atomic_set_property(struct drm_atomic_state *state,
  1636. struct drm_mode_object *obj,
  1637. struct drm_property *prop,
  1638. uint64_t prop_value)
  1639. {
  1640. struct drm_mode_object *ref;
  1641. int ret;
  1642. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1643. return -EINVAL;
  1644. switch (obj->type) {
  1645. case DRM_MODE_OBJECT_CONNECTOR: {
  1646. struct drm_connector *connector = obj_to_connector(obj);
  1647. struct drm_connector_state *connector_state;
  1648. connector_state = drm_atomic_get_connector_state(state, connector);
  1649. if (IS_ERR(connector_state)) {
  1650. ret = PTR_ERR(connector_state);
  1651. break;
  1652. }
  1653. ret = drm_atomic_connector_set_property(connector,
  1654. connector_state, prop, prop_value);
  1655. break;
  1656. }
  1657. case DRM_MODE_OBJECT_CRTC: {
  1658. struct drm_crtc *crtc = obj_to_crtc(obj);
  1659. struct drm_crtc_state *crtc_state;
  1660. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1661. if (IS_ERR(crtc_state)) {
  1662. ret = PTR_ERR(crtc_state);
  1663. break;
  1664. }
  1665. ret = drm_atomic_crtc_set_property(crtc,
  1666. crtc_state, prop, prop_value);
  1667. break;
  1668. }
  1669. case DRM_MODE_OBJECT_PLANE: {
  1670. struct drm_plane *plane = obj_to_plane(obj);
  1671. struct drm_plane_state *plane_state;
  1672. plane_state = drm_atomic_get_plane_state(state, plane);
  1673. if (IS_ERR(plane_state)) {
  1674. ret = PTR_ERR(plane_state);
  1675. break;
  1676. }
  1677. ret = drm_atomic_plane_set_property(plane,
  1678. plane_state, prop, prop_value);
  1679. break;
  1680. }
  1681. default:
  1682. ret = -EINVAL;
  1683. break;
  1684. }
  1685. drm_property_change_valid_put(prop, ref);
  1686. return ret;
  1687. }
  1688. /**
  1689. * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
  1690. *
  1691. * @dev: drm device to check.
  1692. * @plane_mask: plane mask for planes that were updated.
  1693. * @ret: return value, can be -EDEADLK for a retry.
  1694. *
  1695. * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
  1696. * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
  1697. * is a common operation for each atomic update, so this call is split off as a
  1698. * helper.
  1699. */
  1700. void drm_atomic_clean_old_fb(struct drm_device *dev,
  1701. unsigned plane_mask,
  1702. int ret)
  1703. {
  1704. struct drm_plane *plane;
  1705. /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
  1706. * locks (ie. while it is still safe to deref plane->state). We
  1707. * need to do this here because the driver entry points cannot
  1708. * distinguish between legacy and atomic ioctls.
  1709. */
  1710. drm_for_each_plane_mask(plane, dev, plane_mask) {
  1711. if (ret == 0) {
  1712. struct drm_framebuffer *new_fb = plane->state->fb;
  1713. if (new_fb)
  1714. drm_framebuffer_get(new_fb);
  1715. plane->fb = new_fb;
  1716. plane->crtc = plane->state->crtc;
  1717. if (plane->old_fb)
  1718. drm_framebuffer_put(plane->old_fb);
  1719. }
  1720. plane->old_fb = NULL;
  1721. }
  1722. }
  1723. EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  1724. /**
  1725. * DOC: explicit fencing properties
  1726. *
  1727. * Explicit fencing allows userspace to control the buffer synchronization
  1728. * between devices. A Fence or a group of fences are transfered to/from
  1729. * userspace using Sync File fds and there are two DRM properties for that.
  1730. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  1731. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  1732. *
  1733. * As a contrast, with implicit fencing the kernel keeps track of any
  1734. * ongoing rendering, and automatically ensures that the atomic update waits
  1735. * for any pending rendering to complete. For shared buffers represented with
  1736. * a &struct dma_buf this is tracked in &struct reservation_object.
  1737. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  1738. * whereas explicit fencing is what Android wants.
  1739. *
  1740. * "IN_FENCE_FD”:
  1741. * Use this property to pass a fence that DRM should wait on before
  1742. * proceeding with the Atomic Commit request and show the framebuffer for
  1743. * the plane on the screen. The fence can be either a normal fence or a
  1744. * merged one, the sync_file framework will handle both cases and use a
  1745. * fence_array if a merged fence is received. Passing -1 here means no
  1746. * fences to wait on.
  1747. *
  1748. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  1749. * it will only check if the Sync File is a valid one.
  1750. *
  1751. * On the driver side the fence is stored on the @fence parameter of
  1752. * &struct drm_plane_state. Drivers which also support implicit fencing
  1753. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  1754. * to make sure there's consistent behaviour between drivers in precedence
  1755. * of implicit vs. explicit fencing.
  1756. *
  1757. * "OUT_FENCE_PTR”:
  1758. * Use this property to pass a file descriptor pointer to DRM. Once the
  1759. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  1760. * the file descriptor number of a Sync File. This Sync File contains the
  1761. * CRTC fence that will be signaled when all framebuffers present on the
  1762. * Atomic Commit * request for that given CRTC are scanned out on the
  1763. * screen.
  1764. *
  1765. * The Atomic Commit request fails if a invalid pointer is passed. If the
  1766. * Atomic Commit request fails for any other reason the out fence fd
  1767. * returned will be -1. On a Atomic Commit with the
  1768. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  1769. *
  1770. * Note that out-fences don't have a special interface to drivers and are
  1771. * internally represented by a &struct drm_pending_vblank_event in struct
  1772. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  1773. * helpers and for the DRM event handling for existing userspace.
  1774. */
  1775. struct drm_out_fence_state {
  1776. s32 __user *out_fence_ptr;
  1777. struct sync_file *sync_file;
  1778. int fd;
  1779. };
  1780. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  1781. struct dma_fence *fence)
  1782. {
  1783. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  1784. if (fence_state->fd < 0)
  1785. return fence_state->fd;
  1786. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  1787. return -EFAULT;
  1788. fence_state->sync_file = sync_file_create(fence);
  1789. if (!fence_state->sync_file)
  1790. return -ENOMEM;
  1791. return 0;
  1792. }
  1793. static int prepare_crtc_signaling(struct drm_device *dev,
  1794. struct drm_atomic_state *state,
  1795. struct drm_mode_atomic *arg,
  1796. struct drm_file *file_priv,
  1797. struct drm_out_fence_state **fence_state,
  1798. unsigned int *num_fences)
  1799. {
  1800. struct drm_crtc *crtc;
  1801. struct drm_crtc_state *crtc_state;
  1802. int i, c = 0, ret;
  1803. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  1804. return 0;
  1805. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1806. s32 __user *fence_ptr;
  1807. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  1808. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  1809. struct drm_pending_vblank_event *e;
  1810. e = create_vblank_event(crtc, arg->user_data);
  1811. if (!e)
  1812. return -ENOMEM;
  1813. crtc_state->event = e;
  1814. }
  1815. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1816. struct drm_pending_vblank_event *e = crtc_state->event;
  1817. if (!file_priv)
  1818. continue;
  1819. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  1820. &e->event.base);
  1821. if (ret) {
  1822. kfree(e);
  1823. crtc_state->event = NULL;
  1824. return ret;
  1825. }
  1826. }
  1827. if (fence_ptr) {
  1828. struct dma_fence *fence;
  1829. struct drm_out_fence_state *f;
  1830. f = krealloc(*fence_state, sizeof(**fence_state) *
  1831. (*num_fences + 1), GFP_KERNEL);
  1832. if (!f)
  1833. return -ENOMEM;
  1834. memset(&f[*num_fences], 0, sizeof(*f));
  1835. f[*num_fences].out_fence_ptr = fence_ptr;
  1836. *fence_state = f;
  1837. fence = drm_crtc_create_fence(crtc);
  1838. if (!fence)
  1839. return -ENOMEM;
  1840. ret = setup_out_fence(&f[(*num_fences)++], fence);
  1841. if (ret) {
  1842. dma_fence_put(fence);
  1843. return ret;
  1844. }
  1845. crtc_state->event->base.fence = fence;
  1846. }
  1847. c++;
  1848. }
  1849. /*
  1850. * Having this flag means user mode pends on event which will never
  1851. * reach due to lack of at least one CRTC for signaling
  1852. */
  1853. if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1854. return -EINVAL;
  1855. return 0;
  1856. }
  1857. static void complete_crtc_signaling(struct drm_device *dev,
  1858. struct drm_atomic_state *state,
  1859. struct drm_out_fence_state *fence_state,
  1860. unsigned int num_fences,
  1861. bool install_fds)
  1862. {
  1863. struct drm_crtc *crtc;
  1864. struct drm_crtc_state *crtc_state;
  1865. int i;
  1866. if (install_fds) {
  1867. for (i = 0; i < num_fences; i++)
  1868. fd_install(fence_state[i].fd,
  1869. fence_state[i].sync_file->file);
  1870. kfree(fence_state);
  1871. return;
  1872. }
  1873. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1874. struct drm_pending_vblank_event *event = crtc_state->event;
  1875. /*
  1876. * Free the allocated event. drm_atomic_helper_setup_commit
  1877. * can allocate an event too, so only free it if it's ours
  1878. * to prevent a double free in drm_atomic_state_clear.
  1879. */
  1880. if (event && (event->base.fence || event->base.file_priv)) {
  1881. drm_event_cancel_free(dev, &event->base);
  1882. crtc_state->event = NULL;
  1883. }
  1884. }
  1885. if (!fence_state)
  1886. return;
  1887. for (i = 0; i < num_fences; i++) {
  1888. if (fence_state[i].sync_file)
  1889. fput(fence_state[i].sync_file->file);
  1890. if (fence_state[i].fd >= 0)
  1891. put_unused_fd(fence_state[i].fd);
  1892. /* If this fails log error to the user */
  1893. if (fence_state[i].out_fence_ptr &&
  1894. put_user(-1, fence_state[i].out_fence_ptr))
  1895. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  1896. }
  1897. kfree(fence_state);
  1898. }
  1899. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1900. void *data, struct drm_file *file_priv)
  1901. {
  1902. struct drm_mode_atomic *arg = data;
  1903. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1904. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1905. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1906. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1907. unsigned int copied_objs, copied_props;
  1908. struct drm_atomic_state *state;
  1909. struct drm_modeset_acquire_ctx ctx;
  1910. struct drm_plane *plane;
  1911. struct drm_out_fence_state *fence_state;
  1912. unsigned plane_mask;
  1913. int ret = 0;
  1914. unsigned int i, j, num_fences;
  1915. /* disallow for drivers not supporting atomic: */
  1916. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1917. return -EINVAL;
  1918. /* disallow for userspace that has not enabled atomic cap (even
  1919. * though this may be a bit overkill, since legacy userspace
  1920. * wouldn't know how to call this ioctl)
  1921. */
  1922. if (!file_priv->atomic)
  1923. return -EINVAL;
  1924. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  1925. return -EINVAL;
  1926. if (arg->reserved)
  1927. return -EINVAL;
  1928. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  1929. !dev->mode_config.async_page_flip)
  1930. return -EINVAL;
  1931. /* can't test and expect an event at the same time. */
  1932. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1933. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1934. return -EINVAL;
  1935. drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
  1936. state = drm_atomic_state_alloc(dev);
  1937. if (!state)
  1938. return -ENOMEM;
  1939. state->acquire_ctx = &ctx;
  1940. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1941. retry:
  1942. plane_mask = 0;
  1943. copied_objs = 0;
  1944. copied_props = 0;
  1945. fence_state = NULL;
  1946. num_fences = 0;
  1947. for (i = 0; i < arg->count_objs; i++) {
  1948. uint32_t obj_id, count_props;
  1949. struct drm_mode_object *obj;
  1950. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1951. ret = -EFAULT;
  1952. goto out;
  1953. }
  1954. obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
  1955. if (!obj) {
  1956. ret = -ENOENT;
  1957. goto out;
  1958. }
  1959. if (!obj->properties) {
  1960. drm_mode_object_put(obj);
  1961. ret = -ENOENT;
  1962. goto out;
  1963. }
  1964. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1965. drm_mode_object_put(obj);
  1966. ret = -EFAULT;
  1967. goto out;
  1968. }
  1969. copied_objs++;
  1970. for (j = 0; j < count_props; j++) {
  1971. uint32_t prop_id;
  1972. uint64_t prop_value;
  1973. struct drm_property *prop;
  1974. if (get_user(prop_id, props_ptr + copied_props)) {
  1975. drm_mode_object_put(obj);
  1976. ret = -EFAULT;
  1977. goto out;
  1978. }
  1979. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1980. if (!prop) {
  1981. drm_mode_object_put(obj);
  1982. ret = -ENOENT;
  1983. goto out;
  1984. }
  1985. if (copy_from_user(&prop_value,
  1986. prop_values_ptr + copied_props,
  1987. sizeof(prop_value))) {
  1988. drm_mode_object_put(obj);
  1989. ret = -EFAULT;
  1990. goto out;
  1991. }
  1992. ret = drm_atomic_set_property(state, obj, prop,
  1993. prop_value);
  1994. if (ret) {
  1995. drm_mode_object_put(obj);
  1996. goto out;
  1997. }
  1998. copied_props++;
  1999. }
  2000. if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
  2001. !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
  2002. plane = obj_to_plane(obj);
  2003. plane_mask |= (1 << drm_plane_index(plane));
  2004. plane->old_fb = plane->fb;
  2005. }
  2006. drm_mode_object_put(obj);
  2007. }
  2008. ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
  2009. &num_fences);
  2010. if (ret)
  2011. goto out;
  2012. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  2013. ret = drm_atomic_check_only(state);
  2014. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  2015. ret = drm_atomic_nonblocking_commit(state);
  2016. } else {
  2017. if (unlikely(drm_debug & DRM_UT_STATE))
  2018. drm_atomic_print_state(state);
  2019. ret = drm_atomic_commit(state);
  2020. }
  2021. out:
  2022. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  2023. complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
  2024. if (ret == -EDEADLK) {
  2025. drm_atomic_state_clear(state);
  2026. ret = drm_modeset_backoff(&ctx);
  2027. if (!ret)
  2028. goto retry;
  2029. }
  2030. drm_atomic_state_put(state);
  2031. drm_modeset_drop_locks(&ctx);
  2032. drm_modeset_acquire_fini(&ctx);
  2033. return ret;
  2034. }