intel_atomic.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * Copyright © 2015 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21. * DEALINGS IN THE SOFTWARE.
  22. */
  23. /**
  24. * DOC: atomic modeset support
  25. *
  26. * The functions here implement the state management and hardware programming
  27. * dispatch required by the atomic modeset infrastructure.
  28. * See intel_atomic_plane.c for the plane-specific atomic functionality.
  29. */
  30. #include <drm/drmP.h>
  31. #include <drm/drm_atomic.h>
  32. #include <drm/drm_atomic_helper.h>
  33. #include <drm/drm_plane_helper.h>
  34. #include "intel_drv.h"
  35. /**
  36. * intel_atomic_check - validate state object
  37. * @dev: drm device
  38. * @state: state to validate
  39. */
  40. int intel_atomic_check(struct drm_device *dev,
  41. struct drm_atomic_state *state)
  42. {
  43. int nplanes = dev->mode_config.num_total_plane;
  44. int ncrtcs = dev->mode_config.num_crtc;
  45. int nconnectors = dev->mode_config.num_connector;
  46. enum pipe nuclear_pipe = INVALID_PIPE;
  47. struct intel_crtc *nuclear_crtc = NULL;
  48. struct intel_crtc_state *crtc_state = NULL;
  49. int ret;
  50. int i;
  51. bool not_nuclear = false;
  52. /*
  53. * FIXME: At the moment, we only support "nuclear pageflip" on a
  54. * single CRTC. Cross-crtc updates will be added later.
  55. */
  56. for (i = 0; i < nplanes; i++) {
  57. struct intel_plane *plane = to_intel_plane(state->planes[i]);
  58. if (!plane)
  59. continue;
  60. if (nuclear_pipe == INVALID_PIPE) {
  61. nuclear_pipe = plane->pipe;
  62. } else if (nuclear_pipe != plane->pipe) {
  63. DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
  64. return -EINVAL;
  65. }
  66. }
  67. /*
  68. * FIXME: We only handle planes for now; make sure there are no CRTC's
  69. * or connectors involved.
  70. */
  71. state->allow_modeset = false;
  72. for (i = 0; i < ncrtcs; i++) {
  73. struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
  74. if (crtc)
  75. memset(&crtc->atomic, 0, sizeof(crtc->atomic));
  76. if (crtc && crtc->pipe != nuclear_pipe)
  77. not_nuclear = true;
  78. if (crtc && crtc->pipe == nuclear_pipe) {
  79. nuclear_crtc = crtc;
  80. crtc_state = to_intel_crtc_state(state->crtc_states[i]);
  81. }
  82. }
  83. for (i = 0; i < nconnectors; i++)
  84. if (state->connectors[i] != NULL)
  85. not_nuclear = true;
  86. if (not_nuclear) {
  87. DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
  88. return -EINVAL;
  89. }
  90. ret = drm_atomic_helper_check_planes(dev, state);
  91. if (ret)
  92. return ret;
  93. /*
  94. * FIXME: move to crtc atomic check function once this is
  95. * more atomic friendly.
  96. */
  97. ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
  98. if (ret)
  99. return ret;
  100. return ret;
  101. }
  102. /**
  103. * intel_atomic_commit - commit validated state object
  104. * @dev: DRM device
  105. * @state: the top-level driver state object
  106. * @async: asynchronous commit
  107. *
  108. * This function commits a top-level state object that has been validated
  109. * with drm_atomic_helper_check().
  110. *
  111. * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
  112. * we can only handle plane-related operations and do not yet support
  113. * asynchronous commit.
  114. *
  115. * RETURNS
  116. * Zero for success or -errno.
  117. */
  118. int intel_atomic_commit(struct drm_device *dev,
  119. struct drm_atomic_state *state,
  120. bool async)
  121. {
  122. struct drm_crtc_state *crtc_state;
  123. struct drm_crtc *crtc;
  124. int ret;
  125. int i;
  126. if (async) {
  127. DRM_DEBUG_KMS("i915 does not yet support async commit\n");
  128. return -EINVAL;
  129. }
  130. ret = drm_atomic_helper_prepare_planes(dev, state);
  131. if (ret)
  132. return ret;
  133. /* Point of no return */
  134. drm_atomic_helper_swap_state(dev, state);
  135. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  136. to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
  137. if (INTEL_INFO(dev)->gen >= 9)
  138. skl_detach_scalers(to_intel_crtc(crtc));
  139. drm_atomic_helper_commit_planes_on_crtc(crtc_state);
  140. }
  141. /* FIXME: This function should eventually call __intel_set_mode when needed */
  142. drm_atomic_helper_wait_for_vblanks(dev, state);
  143. drm_atomic_helper_cleanup_planes(dev, state);
  144. drm_atomic_state_free(state);
  145. return 0;
  146. }
  147. /**
  148. * intel_connector_atomic_get_property - fetch connector property value
  149. * @connector: connector to fetch property for
  150. * @state: state containing the property value
  151. * @property: property to look up
  152. * @val: pointer to write property value into
  153. *
  154. * The DRM core does not store shadow copies of properties for
  155. * atomic-capable drivers. This entrypoint is used to fetch
  156. * the current value of a driver-specific connector property.
  157. */
  158. int
  159. intel_connector_atomic_get_property(struct drm_connector *connector,
  160. const struct drm_connector_state *state,
  161. struct drm_property *property,
  162. uint64_t *val)
  163. {
  164. int i;
  165. /*
  166. * TODO: We only have atomic modeset for planes at the moment, so the
  167. * crtc/connector code isn't quite ready yet. Until it's ready,
  168. * continue to look up all property values in the DRM's shadow copy
  169. * in obj->properties->values[].
  170. *
  171. * When the crtc/connector state work matures, this function should
  172. * be updated to read the values out of the state structure instead.
  173. */
  174. for (i = 0; i < connector->base.properties->count; i++) {
  175. if (connector->base.properties->properties[i] == property) {
  176. *val = connector->base.properties->values[i];
  177. return 0;
  178. }
  179. }
  180. return -EINVAL;
  181. }
  182. /*
  183. * intel_crtc_duplicate_state - duplicate crtc state
  184. * @crtc: drm crtc
  185. *
  186. * Allocates and returns a copy of the crtc state (both common and
  187. * Intel-specific) for the specified crtc.
  188. *
  189. * Returns: The newly allocated crtc state, or NULL on failure.
  190. */
  191. struct drm_crtc_state *
  192. intel_crtc_duplicate_state(struct drm_crtc *crtc)
  193. {
  194. struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  195. struct intel_crtc_state *crtc_state;
  196. if (WARN_ON(!intel_crtc->config))
  197. crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
  198. else
  199. crtc_state = kmemdup(intel_crtc->config,
  200. sizeof(*intel_crtc->config), GFP_KERNEL);
  201. if (!crtc_state)
  202. return NULL;
  203. __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
  204. crtc_state->base.crtc = crtc;
  205. return &crtc_state->base;
  206. }
  207. /**
  208. * intel_crtc_destroy_state - destroy crtc state
  209. * @crtc: drm crtc
  210. *
  211. * Destroys the crtc state (both common and Intel-specific) for the
  212. * specified crtc.
  213. */
  214. void
  215. intel_crtc_destroy_state(struct drm_crtc *crtc,
  216. struct drm_crtc_state *state)
  217. {
  218. drm_atomic_helper_crtc_destroy_state(crtc, state);
  219. }
  220. /**
  221. * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
  222. * @dev: DRM device
  223. * @crtc: intel crtc
  224. * @crtc_state: incoming crtc_state to validate and setup scalers
  225. *
  226. * This function sets up scalers based on staged scaling requests for
  227. * a @crtc and its planes. It is called from crtc level check path. If request
  228. * is a supportable request, it attaches scalers to requested planes and crtc.
  229. *
  230. * This function takes into account the current scaler(s) in use by any planes
  231. * not being part of this atomic state
  232. *
  233. * Returns:
  234. * 0 - scalers were setup succesfully
  235. * error code - otherwise
  236. */
  237. int intel_atomic_setup_scalers(struct drm_device *dev,
  238. struct intel_crtc *intel_crtc,
  239. struct intel_crtc_state *crtc_state)
  240. {
  241. struct drm_plane *plane = NULL;
  242. struct intel_plane *intel_plane;
  243. struct intel_plane_state *plane_state = NULL;
  244. struct intel_crtc_scaler_state *scaler_state;
  245. struct drm_atomic_state *drm_state;
  246. int num_scalers_need;
  247. int i, j;
  248. if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
  249. return 0;
  250. scaler_state = &crtc_state->scaler_state;
  251. drm_state = crtc_state->base.state;
  252. num_scalers_need = hweight32(scaler_state->scaler_users);
  253. DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
  254. crtc_state, num_scalers_need, intel_crtc->num_scalers,
  255. scaler_state->scaler_users);
  256. /*
  257. * High level flow:
  258. * - staged scaler requests are already in scaler_state->scaler_users
  259. * - check whether staged scaling requests can be supported
  260. * - add planes using scalers that aren't in current transaction
  261. * - assign scalers to requested users
  262. * - as part of plane commit, scalers will be committed
  263. * (i.e., either attached or detached) to respective planes in hw
  264. * - as part of crtc_commit, scaler will be either attached or detached
  265. * to crtc in hw
  266. */
  267. /* fail if required scalers > available scalers */
  268. if (num_scalers_need > intel_crtc->num_scalers){
  269. DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
  270. num_scalers_need, intel_crtc->num_scalers);
  271. return -EINVAL;
  272. }
  273. /* walkthrough scaler_users bits and start assigning scalers */
  274. for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
  275. int *scaler_id;
  276. const char *name;
  277. int idx;
  278. /* skip if scaler not required */
  279. if (!(scaler_state->scaler_users & (1 << i)))
  280. continue;
  281. if (i == SKL_CRTC_INDEX) {
  282. name = "CRTC";
  283. idx = intel_crtc->base.base.id;
  284. /* panel fitter case: assign as a crtc scaler */
  285. scaler_id = &scaler_state->scaler_id;
  286. } else {
  287. name = "PLANE";
  288. idx = plane->base.id;
  289. if (!drm_state)
  290. continue;
  291. /* plane scaler case: assign as a plane scaler */
  292. /* find the plane that set the bit as scaler_user */
  293. plane = drm_state->planes[i];
  294. /*
  295. * to enable/disable hq mode, add planes that are using scaler
  296. * into this transaction
  297. */
  298. if (!plane) {
  299. struct drm_plane_state *state;
  300. plane = drm_plane_from_index(dev, i);
  301. state = drm_atomic_get_plane_state(drm_state, plane);
  302. if (IS_ERR(state)) {
  303. DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
  304. plane->base.id);
  305. return PTR_ERR(state);
  306. }
  307. }
  308. intel_plane = to_intel_plane(plane);
  309. /* plane on different crtc cannot be a scaler user of this crtc */
  310. if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
  311. continue;
  312. }
  313. plane_state = to_intel_plane_state(drm_state->plane_states[i]);
  314. scaler_id = &plane_state->scaler_id;
  315. }
  316. if (*scaler_id < 0) {
  317. /* find a free scaler */
  318. for (j = 0; j < intel_crtc->num_scalers; j++) {
  319. if (!scaler_state->scalers[j].in_use) {
  320. scaler_state->scalers[j].in_use = 1;
  321. *scaler_id = j;
  322. DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
  323. intel_crtc->pipe, *scaler_id, name, idx);
  324. break;
  325. }
  326. }
  327. }
  328. if (WARN_ON(*scaler_id < 0)) {
  329. DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
  330. continue;
  331. }
  332. /* set scaler mode */
  333. if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
  334. /*
  335. * when only 1 scaler is in use on either pipe A or B,
  336. * scaler 0 operates in high quality (HQ) mode.
  337. * In this case use scaler 0 to take advantage of HQ mode
  338. */
  339. *scaler_id = 0;
  340. scaler_state->scalers[0].in_use = 1;
  341. scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
  342. scaler_state->scalers[1].in_use = 0;
  343. } else {
  344. scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
  345. }
  346. }
  347. return 0;
  348. }
  349. static void
  350. intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
  351. struct intel_shared_dpll_config *shared_dpll)
  352. {
  353. enum intel_dpll_id i;
  354. /* Copy shared dpll state */
  355. for (i = 0; i < dev_priv->num_shared_dpll; i++) {
  356. struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
  357. shared_dpll[i] = pll->config;
  358. }
  359. }
  360. struct intel_shared_dpll_config *
  361. intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
  362. {
  363. struct intel_atomic_state *state = to_intel_atomic_state(s);
  364. WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
  365. if (!state->dpll_set) {
  366. state->dpll_set = true;
  367. intel_atomic_duplicate_dpll_state(to_i915(s->dev),
  368. state->shared_dpll);
  369. }
  370. return state->shared_dpll;
  371. }
  372. struct drm_atomic_state *
  373. intel_atomic_state_alloc(struct drm_device *dev)
  374. {
  375. struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
  376. if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
  377. kfree(state);
  378. return NULL;
  379. }
  380. return &state->base;
  381. }
  382. void intel_atomic_state_clear(struct drm_atomic_state *s)
  383. {
  384. struct intel_atomic_state *state = to_intel_atomic_state(s);
  385. drm_atomic_state_default_clear(&state->base);
  386. state->dpll_set = false;
  387. }