dpu_crtc.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608
  1. /*
  2. * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  19. #include <linux/sort.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/ktime.h>
  22. #include <drm/drm_mode.h>
  23. #include <drm/drm_crtc.h>
  24. #include <drm/drm_crtc_helper.h>
  25. #include <drm/drm_flip_work.h>
  26. #include <drm/drm_rect.h>
  27. #include "dpu_kms.h"
  28. #include "dpu_hw_lm.h"
  29. #include "dpu_hw_ctl.h"
  30. #include "dpu_crtc.h"
  31. #include "dpu_plane.h"
  32. #include "dpu_encoder.h"
  33. #include "dpu_vbif.h"
  34. #include "dpu_power_handle.h"
  35. #include "dpu_core_perf.h"
  36. #include "dpu_trace.h"
  37. #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
  38. #define DPU_DRM_BLEND_OP_OPAQUE 1
  39. #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
  40. #define DPU_DRM_BLEND_OP_COVERAGE 3
  41. #define DPU_DRM_BLEND_OP_MAX 4
  42. /* layer mixer index on dpu_crtc */
  43. #define LEFT_MIXER 0
  44. #define RIGHT_MIXER 1
  45. static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
  46. struct drm_display_mode *mode)
  47. {
  48. return mode->hdisplay / cstate->num_mixers;
  49. }
  50. static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
  51. {
  52. struct msm_drm_private *priv = crtc->dev->dev_private;
  53. return to_dpu_kms(priv->kms);
  54. }
  55. static void dpu_crtc_destroy(struct drm_crtc *crtc)
  56. {
  57. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  58. DPU_DEBUG("\n");
  59. if (!crtc)
  60. return;
  61. dpu_crtc->phandle = NULL;
  62. drm_crtc_cleanup(crtc);
  63. mutex_destroy(&dpu_crtc->crtc_lock);
  64. kfree(dpu_crtc);
  65. }
  66. static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
  67. struct dpu_plane_state *pstate, struct dpu_format *format)
  68. {
  69. struct dpu_hw_mixer *lm = mixer->hw_lm;
  70. uint32_t blend_op;
  71. struct drm_format_name_buf format_name;
  72. /* default to opaque blending */
  73. blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
  74. DPU_BLEND_BG_ALPHA_BG_CONST;
  75. if (format->alpha_enable) {
  76. /* coverage blending */
  77. blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
  78. DPU_BLEND_BG_ALPHA_FG_PIXEL |
  79. DPU_BLEND_BG_INV_ALPHA;
  80. }
  81. lm->ops.setup_blend_config(lm, pstate->stage,
  82. 0xFF, 0, blend_op);
  83. DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
  84. drm_get_format_name(format->base.pixel_format, &format_name),
  85. format->alpha_enable, blend_op);
  86. }
  87. static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
  88. {
  89. struct dpu_crtc *dpu_crtc;
  90. struct dpu_crtc_state *crtc_state;
  91. int lm_idx, lm_horiz_position;
  92. dpu_crtc = to_dpu_crtc(crtc);
  93. crtc_state = to_dpu_crtc_state(crtc->state);
  94. lm_horiz_position = 0;
  95. for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
  96. const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
  97. struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
  98. struct dpu_hw_mixer_cfg cfg;
  99. if (!lm_roi || !drm_rect_visible(lm_roi))
  100. continue;
  101. cfg.out_width = drm_rect_width(lm_roi);
  102. cfg.out_height = drm_rect_height(lm_roi);
  103. cfg.right_mixer = lm_horiz_position++;
  104. cfg.flags = 0;
  105. hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
  106. }
  107. }
  108. static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
  109. struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
  110. {
  111. struct drm_plane *plane;
  112. struct drm_framebuffer *fb;
  113. struct drm_plane_state *state;
  114. struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
  115. struct dpu_plane_state *pstate = NULL;
  116. struct dpu_format *format;
  117. struct dpu_hw_ctl *ctl = mixer->lm_ctl;
  118. struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
  119. u32 flush_mask;
  120. uint32_t stage_idx, lm_idx;
  121. int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
  122. bool bg_alpha_enable = false;
  123. drm_atomic_crtc_for_each_plane(plane, crtc) {
  124. state = plane->state;
  125. if (!state)
  126. continue;
  127. pstate = to_dpu_plane_state(state);
  128. fb = state->fb;
  129. dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
  130. DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
  131. crtc->base.id,
  132. pstate->stage,
  133. plane->base.id,
  134. dpu_plane_pipe(plane) - SSPP_VIG0,
  135. state->fb ? state->fb->base.id : -1);
  136. format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
  137. if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
  138. bg_alpha_enable = true;
  139. stage_idx = zpos_cnt[pstate->stage]++;
  140. stage_cfg->stage[pstate->stage][stage_idx] =
  141. dpu_plane_pipe(plane);
  142. stage_cfg->multirect_index[pstate->stage][stage_idx] =
  143. pstate->multirect_index;
  144. trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
  145. state, pstate, stage_idx,
  146. dpu_plane_pipe(plane) - SSPP_VIG0,
  147. format->base.pixel_format,
  148. fb ? fb->modifier : 0);
  149. /* blend config update */
  150. for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
  151. _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
  152. pstate, format);
  153. mixer[lm_idx].flush_mask |= flush_mask;
  154. if (bg_alpha_enable && !format->alpha_enable)
  155. mixer[lm_idx].mixer_op_mode = 0;
  156. else
  157. mixer[lm_idx].mixer_op_mode |=
  158. 1 << pstate->stage;
  159. }
  160. }
  161. _dpu_crtc_program_lm_output_roi(crtc);
  162. }
  163. /**
  164. * _dpu_crtc_blend_setup - configure crtc mixers
  165. * @crtc: Pointer to drm crtc structure
  166. */
  167. static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
  168. {
  169. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  170. struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
  171. struct dpu_crtc_mixer *mixer = cstate->mixers;
  172. struct dpu_hw_ctl *ctl;
  173. struct dpu_hw_mixer *lm;
  174. int i;
  175. DPU_DEBUG("%s\n", dpu_crtc->name);
  176. for (i = 0; i < cstate->num_mixers; i++) {
  177. if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
  178. DPU_ERROR("invalid lm or ctl assigned to mixer\n");
  179. return;
  180. }
  181. mixer[i].mixer_op_mode = 0;
  182. mixer[i].flush_mask = 0;
  183. if (mixer[i].lm_ctl->ops.clear_all_blendstages)
  184. mixer[i].lm_ctl->ops.clear_all_blendstages(
  185. mixer[i].lm_ctl);
  186. }
  187. /* initialize stage cfg */
  188. memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
  189. _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
  190. for (i = 0; i < cstate->num_mixers; i++) {
  191. ctl = mixer[i].lm_ctl;
  192. lm = mixer[i].hw_lm;
  193. lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
  194. mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
  195. mixer[i].hw_lm->idx);
  196. /* stage config flush mask */
  197. ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
  198. DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
  199. mixer[i].hw_lm->idx - LM_0,
  200. mixer[i].mixer_op_mode,
  201. ctl->idx - CTL_0,
  202. mixer[i].flush_mask);
  203. ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
  204. &dpu_crtc->stage_cfg);
  205. }
  206. }
  207. /**
  208. * _dpu_crtc_complete_flip - signal pending page_flip events
  209. * Any pending vblank events are added to the vblank_event_list
  210. * so that the next vblank interrupt shall signal them.
  211. * However PAGE_FLIP events are not handled through the vblank_event_list.
  212. * This API signals any pending PAGE_FLIP events requested through
  213. * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
  214. * @crtc: Pointer to drm crtc structure
  215. */
  216. static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
  217. {
  218. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  219. struct drm_device *dev = crtc->dev;
  220. unsigned long flags;
  221. spin_lock_irqsave(&dev->event_lock, flags);
  222. if (dpu_crtc->event) {
  223. DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
  224. dpu_crtc->event);
  225. trace_dpu_crtc_complete_flip(DRMID(crtc));
  226. drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
  227. dpu_crtc->event = NULL;
  228. }
  229. spin_unlock_irqrestore(&dev->event_lock, flags);
  230. }
  231. enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
  232. {
  233. struct drm_encoder *encoder;
  234. if (!crtc || !crtc->dev) {
  235. DPU_ERROR("invalid crtc\n");
  236. return INTF_MODE_NONE;
  237. }
  238. drm_for_each_encoder(encoder, crtc->dev)
  239. if (encoder->crtc == crtc)
  240. return dpu_encoder_get_intf_mode(encoder);
  241. return INTF_MODE_NONE;
  242. }
  243. static void dpu_crtc_vblank_cb(void *data)
  244. {
  245. struct drm_crtc *crtc = (struct drm_crtc *)data;
  246. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  247. /* keep statistics on vblank callback - with auto reset via debugfs */
  248. if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
  249. dpu_crtc->vblank_cb_time = ktime_get();
  250. else
  251. dpu_crtc->vblank_cb_count++;
  252. _dpu_crtc_complete_flip(crtc);
  253. drm_crtc_handle_vblank(crtc);
  254. trace_dpu_crtc_vblank_cb(DRMID(crtc));
  255. }
  256. static void dpu_crtc_frame_event_work(struct kthread_work *work)
  257. {
  258. struct dpu_crtc_frame_event *fevent = container_of(work,
  259. struct dpu_crtc_frame_event, work);
  260. struct drm_crtc *crtc = fevent->crtc;
  261. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  262. unsigned long flags;
  263. bool frame_done = false;
  264. DPU_ATRACE_BEGIN("crtc_frame_event");
  265. DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
  266. ktime_to_ns(fevent->ts));
  267. if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
  268. | DPU_ENCODER_FRAME_EVENT_ERROR
  269. | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
  270. if (atomic_read(&dpu_crtc->frame_pending) < 1) {
  271. /* this should not happen */
  272. DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
  273. crtc->base.id,
  274. fevent->event,
  275. ktime_to_ns(fevent->ts),
  276. atomic_read(&dpu_crtc->frame_pending));
  277. } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
  278. /* release bandwidth and other resources */
  279. trace_dpu_crtc_frame_event_done(DRMID(crtc),
  280. fevent->event);
  281. dpu_core_perf_crtc_release_bw(crtc);
  282. } else {
  283. trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
  284. fevent->event);
  285. }
  286. if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
  287. dpu_core_perf_crtc_update(crtc, 0, false);
  288. if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
  289. | DPU_ENCODER_FRAME_EVENT_ERROR))
  290. frame_done = true;
  291. }
  292. if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
  293. DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
  294. crtc->base.id, ktime_to_ns(fevent->ts));
  295. if (frame_done)
  296. complete_all(&dpu_crtc->frame_done_comp);
  297. spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
  298. list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
  299. spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
  300. DPU_ATRACE_END("crtc_frame_event");
  301. }
  302. /*
  303. * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
  304. * registers this API to encoder for all frame event callbacks like
  305. * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
  306. * from different context - IRQ, user thread, commit_thread, etc. Each event
  307. * should be carefully reviewed and should be processed in proper task context
  308. * to avoid schedulin delay or properly manage the irq context's bottom half
  309. * processing.
  310. */
  311. static void dpu_crtc_frame_event_cb(void *data, u32 event)
  312. {
  313. struct drm_crtc *crtc = (struct drm_crtc *)data;
  314. struct dpu_crtc *dpu_crtc;
  315. struct msm_drm_private *priv;
  316. struct dpu_crtc_frame_event *fevent;
  317. unsigned long flags;
  318. u32 crtc_id;
  319. /* Nothing to do on idle event */
  320. if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
  321. return;
  322. dpu_crtc = to_dpu_crtc(crtc);
  323. priv = crtc->dev->dev_private;
  324. crtc_id = drm_crtc_index(crtc);
  325. trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
  326. spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
  327. fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
  328. struct dpu_crtc_frame_event, list);
  329. if (fevent)
  330. list_del_init(&fevent->list);
  331. spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
  332. if (!fevent) {
  333. DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
  334. return;
  335. }
  336. fevent->event = event;
  337. fevent->crtc = crtc;
  338. fevent->ts = ktime_get();
  339. kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
  340. }
  341. void dpu_crtc_complete_commit(struct drm_crtc *crtc,
  342. struct drm_crtc_state *old_state)
  343. {
  344. if (!crtc || !crtc->state) {
  345. DPU_ERROR("invalid crtc\n");
  346. return;
  347. }
  348. trace_dpu_crtc_complete_commit(DRMID(crtc));
  349. }
  350. static void _dpu_crtc_setup_mixer_for_encoder(
  351. struct drm_crtc *crtc,
  352. struct drm_encoder *enc)
  353. {
  354. struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
  355. struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
  356. struct dpu_rm *rm = &dpu_kms->rm;
  357. struct dpu_crtc_mixer *mixer;
  358. struct dpu_hw_ctl *last_valid_ctl = NULL;
  359. int i;
  360. struct dpu_rm_hw_iter lm_iter, ctl_iter;
  361. dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
  362. dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
  363. /* Set up all the mixers and ctls reserved by this encoder */
  364. for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
  365. mixer = &cstate->mixers[i];
  366. if (!dpu_rm_get_hw(rm, &lm_iter))
  367. break;
  368. mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
  369. /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
  370. if (!dpu_rm_get_hw(rm, &ctl_iter)) {
  371. DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
  372. mixer->hw_lm->idx - LM_0);
  373. mixer->lm_ctl = last_valid_ctl;
  374. } else {
  375. mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
  376. last_valid_ctl = mixer->lm_ctl;
  377. }
  378. /* Shouldn't happen, mixers are always >= ctls */
  379. if (!mixer->lm_ctl) {
  380. DPU_ERROR("no valid ctls found for lm %d\n",
  381. mixer->hw_lm->idx - LM_0);
  382. return;
  383. }
  384. mixer->encoder = enc;
  385. cstate->num_mixers++;
  386. DPU_DEBUG("setup mixer %d: lm %d\n",
  387. i, mixer->hw_lm->idx - LM_0);
  388. DPU_DEBUG("setup mixer %d: ctl %d\n",
  389. i, mixer->lm_ctl->idx - CTL_0);
  390. }
  391. }
  392. static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
  393. {
  394. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  395. struct drm_encoder *enc;
  396. mutex_lock(&dpu_crtc->crtc_lock);
  397. /* Check for mixers on all encoders attached to this crtc */
  398. list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
  399. if (enc->crtc != crtc)
  400. continue;
  401. _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
  402. }
  403. mutex_unlock(&dpu_crtc->crtc_lock);
  404. }
  405. static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
  406. struct drm_crtc_state *state)
  407. {
  408. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  409. struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
  410. struct drm_display_mode *adj_mode = &state->adjusted_mode;
  411. u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
  412. int i;
  413. for (i = 0; i < cstate->num_mixers; i++) {
  414. struct drm_rect *r = &cstate->lm_bounds[i];
  415. r->x1 = crtc_split_width * i;
  416. r->y1 = 0;
  417. r->x2 = r->x1 + crtc_split_width;
  418. r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
  419. trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
  420. }
  421. drm_mode_debug_printmodeline(adj_mode);
  422. }
  423. static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
  424. struct drm_crtc_state *old_state)
  425. {
  426. struct dpu_crtc *dpu_crtc;
  427. struct dpu_crtc_state *cstate;
  428. struct drm_encoder *encoder;
  429. struct drm_device *dev;
  430. unsigned long flags;
  431. struct dpu_crtc_smmu_state_data *smmu_state;
  432. if (!crtc) {
  433. DPU_ERROR("invalid crtc\n");
  434. return;
  435. }
  436. if (!crtc->state->enable) {
  437. DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
  438. crtc->base.id, crtc->state->enable);
  439. return;
  440. }
  441. DPU_DEBUG("crtc%d\n", crtc->base.id);
  442. dpu_crtc = to_dpu_crtc(crtc);
  443. cstate = to_dpu_crtc_state(crtc->state);
  444. dev = crtc->dev;
  445. smmu_state = &dpu_crtc->smmu_state;
  446. if (!cstate->num_mixers) {
  447. _dpu_crtc_setup_mixers(crtc);
  448. _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
  449. }
  450. if (dpu_crtc->event) {
  451. WARN_ON(dpu_crtc->event);
  452. } else {
  453. spin_lock_irqsave(&dev->event_lock, flags);
  454. dpu_crtc->event = crtc->state->event;
  455. crtc->state->event = NULL;
  456. spin_unlock_irqrestore(&dev->event_lock, flags);
  457. }
  458. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  459. if (encoder->crtc != crtc)
  460. continue;
  461. /* encoder will trigger pending mask now */
  462. dpu_encoder_trigger_kickoff_pending(encoder);
  463. }
  464. /*
  465. * If no mixers have been allocated in dpu_crtc_atomic_check(),
  466. * it means we are trying to flush a CRTC whose state is disabled:
  467. * nothing else needs to be done.
  468. */
  469. if (unlikely(!cstate->num_mixers))
  470. return;
  471. _dpu_crtc_blend_setup(crtc);
  472. /*
  473. * PP_DONE irq is only used by command mode for now.
  474. * It is better to request pending before FLUSH and START trigger
  475. * to make sure no pp_done irq missed.
  476. * This is safe because no pp_done will happen before SW trigger
  477. * in command mode.
  478. */
  479. }
  480. static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
  481. struct drm_crtc_state *old_crtc_state)
  482. {
  483. struct dpu_crtc *dpu_crtc;
  484. struct drm_device *dev;
  485. struct drm_plane *plane;
  486. struct msm_drm_private *priv;
  487. struct msm_drm_thread *event_thread;
  488. unsigned long flags;
  489. struct dpu_crtc_state *cstate;
  490. if (!crtc->state->enable) {
  491. DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
  492. crtc->base.id, crtc->state->enable);
  493. return;
  494. }
  495. DPU_DEBUG("crtc%d\n", crtc->base.id);
  496. dpu_crtc = to_dpu_crtc(crtc);
  497. cstate = to_dpu_crtc_state(crtc->state);
  498. dev = crtc->dev;
  499. priv = dev->dev_private;
  500. if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
  501. DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
  502. return;
  503. }
  504. event_thread = &priv->event_thread[crtc->index];
  505. if (dpu_crtc->event) {
  506. DPU_DEBUG("already received dpu_crtc->event\n");
  507. } else {
  508. spin_lock_irqsave(&dev->event_lock, flags);
  509. dpu_crtc->event = crtc->state->event;
  510. crtc->state->event = NULL;
  511. spin_unlock_irqrestore(&dev->event_lock, flags);
  512. }
  513. /*
  514. * If no mixers has been allocated in dpu_crtc_atomic_check(),
  515. * it means we are trying to flush a CRTC whose state is disabled:
  516. * nothing else needs to be done.
  517. */
  518. if (unlikely(!cstate->num_mixers))
  519. return;
  520. /*
  521. * For planes without commit update, drm framework will not add
  522. * those planes to current state since hardware update is not
  523. * required. However, if those planes were power collapsed since
  524. * last commit cycle, driver has to restore the hardware state
  525. * of those planes explicitly here prior to plane flush.
  526. */
  527. drm_atomic_crtc_for_each_plane(plane, crtc)
  528. dpu_plane_restore(plane);
  529. /* update performance setting before crtc kickoff */
  530. dpu_core_perf_crtc_update(crtc, 1, false);
  531. /*
  532. * Final plane updates: Give each plane a chance to complete all
  533. * required writes/flushing before crtc's "flush
  534. * everything" call below.
  535. */
  536. drm_atomic_crtc_for_each_plane(plane, crtc) {
  537. if (dpu_crtc->smmu_state.transition_error)
  538. dpu_plane_set_error(plane, true);
  539. dpu_plane_flush(plane);
  540. }
  541. /* Kickoff will be scheduled by outer layer */
  542. }
  543. /**
  544. * dpu_crtc_destroy_state - state destroy hook
  545. * @crtc: drm CRTC
  546. * @state: CRTC state object to release
  547. */
  548. static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
  549. struct drm_crtc_state *state)
  550. {
  551. struct dpu_crtc *dpu_crtc;
  552. struct dpu_crtc_state *cstate;
  553. if (!crtc || !state) {
  554. DPU_ERROR("invalid argument(s)\n");
  555. return;
  556. }
  557. dpu_crtc = to_dpu_crtc(crtc);
  558. cstate = to_dpu_crtc_state(state);
  559. DPU_DEBUG("crtc%d\n", crtc->base.id);
  560. __drm_atomic_helper_crtc_destroy_state(state);
  561. kfree(cstate);
  562. }
  563. static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
  564. {
  565. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  566. int ret, rc = 0;
  567. if (!atomic_read(&dpu_crtc->frame_pending)) {
  568. DPU_DEBUG("no frames pending\n");
  569. return 0;
  570. }
  571. DPU_ATRACE_BEGIN("frame done completion wait");
  572. ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
  573. msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
  574. if (!ret) {
  575. DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
  576. rc = -ETIMEDOUT;
  577. }
  578. DPU_ATRACE_END("frame done completion wait");
  579. return rc;
  580. }
  581. void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
  582. {
  583. struct drm_encoder *encoder;
  584. struct drm_device *dev = crtc->dev;
  585. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  586. struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
  587. struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
  588. int ret;
  589. /*
  590. * If no mixers has been allocated in dpu_crtc_atomic_check(),
  591. * it means we are trying to start a CRTC whose state is disabled:
  592. * nothing else needs to be done.
  593. */
  594. if (unlikely(!cstate->num_mixers))
  595. return;
  596. DPU_ATRACE_BEGIN("crtc_commit");
  597. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  598. struct dpu_encoder_kickoff_params params = { 0 };
  599. if (encoder->crtc != crtc)
  600. continue;
  601. /*
  602. * Encoder will flush/start now, unless it has a tx pending.
  603. * If so, it may delay and flush at an irq event (e.g. ppdone)
  604. */
  605. dpu_encoder_prepare_for_kickoff(encoder, &params);
  606. }
  607. /* wait for frame_event_done completion */
  608. DPU_ATRACE_BEGIN("wait_for_frame_done_event");
  609. ret = _dpu_crtc_wait_for_frame_done(crtc);
  610. DPU_ATRACE_END("wait_for_frame_done_event");
  611. if (ret) {
  612. DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
  613. crtc->base.id,
  614. atomic_read(&dpu_crtc->frame_pending));
  615. goto end;
  616. }
  617. if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
  618. /* acquire bandwidth and other resources */
  619. DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
  620. } else
  621. DPU_DEBUG("crtc%d commit\n", crtc->base.id);
  622. dpu_crtc->play_count++;
  623. dpu_vbif_clear_errors(dpu_kms);
  624. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  625. if (encoder->crtc != crtc)
  626. continue;
  627. dpu_encoder_kickoff(encoder);
  628. }
  629. end:
  630. reinit_completion(&dpu_crtc->frame_done_comp);
  631. DPU_ATRACE_END("crtc_commit");
  632. }
  633. /**
  634. * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
  635. * @dpu_crtc: Pointer to dpu crtc structure
  636. * @enable: Whether to enable/disable vblanks
  637. */
  638. static void _dpu_crtc_vblank_enable_no_lock(
  639. struct dpu_crtc *dpu_crtc, bool enable)
  640. {
  641. struct drm_crtc *crtc = &dpu_crtc->base;
  642. struct drm_device *dev = crtc->dev;
  643. struct drm_encoder *enc;
  644. if (enable) {
  645. /* drop lock since power crtc cb may try to re-acquire lock */
  646. mutex_unlock(&dpu_crtc->crtc_lock);
  647. pm_runtime_get_sync(dev->dev);
  648. mutex_lock(&dpu_crtc->crtc_lock);
  649. list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
  650. if (enc->crtc != crtc)
  651. continue;
  652. trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
  653. DRMID(enc), enable,
  654. dpu_crtc);
  655. dpu_encoder_register_vblank_callback(enc,
  656. dpu_crtc_vblank_cb, (void *)crtc);
  657. }
  658. } else {
  659. list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
  660. if (enc->crtc != crtc)
  661. continue;
  662. trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
  663. DRMID(enc), enable,
  664. dpu_crtc);
  665. dpu_encoder_register_vblank_callback(enc, NULL, NULL);
  666. }
  667. /* drop lock since power crtc cb may try to re-acquire lock */
  668. mutex_unlock(&dpu_crtc->crtc_lock);
  669. pm_runtime_put_sync(dev->dev);
  670. mutex_lock(&dpu_crtc->crtc_lock);
  671. }
  672. }
  673. /**
  674. * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
  675. * @crtc: Pointer to drm crtc object
  676. * @enable: true to enable suspend, false to indicate resume
  677. */
  678. static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
  679. {
  680. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  681. DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
  682. mutex_lock(&dpu_crtc->crtc_lock);
  683. /*
  684. * If the vblank is enabled, release a power reference on suspend
  685. * and take it back during resume (if it is still enabled).
  686. */
  687. trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
  688. if (dpu_crtc->suspend == enable)
  689. DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
  690. crtc->base.id, enable);
  691. else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
  692. _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
  693. }
  694. dpu_crtc->suspend = enable;
  695. mutex_unlock(&dpu_crtc->crtc_lock);
  696. }
  697. /**
  698. * dpu_crtc_duplicate_state - state duplicate hook
  699. * @crtc: Pointer to drm crtc structure
  700. * @Returns: Pointer to new drm_crtc_state structure
  701. */
  702. static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
  703. {
  704. struct dpu_crtc *dpu_crtc;
  705. struct dpu_crtc_state *cstate, *old_cstate;
  706. if (!crtc || !crtc->state) {
  707. DPU_ERROR("invalid argument(s)\n");
  708. return NULL;
  709. }
  710. dpu_crtc = to_dpu_crtc(crtc);
  711. old_cstate = to_dpu_crtc_state(crtc->state);
  712. cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
  713. if (!cstate) {
  714. DPU_ERROR("failed to allocate state\n");
  715. return NULL;
  716. }
  717. /* duplicate base helper */
  718. __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
  719. return &cstate->base;
  720. }
  721. /**
  722. * dpu_crtc_reset - reset hook for CRTCs
  723. * Resets the atomic state for @crtc by freeing the state pointer (which might
  724. * be NULL, e.g. at driver load time) and allocating a new empty state object.
  725. * @crtc: Pointer to drm crtc structure
  726. */
  727. static void dpu_crtc_reset(struct drm_crtc *crtc)
  728. {
  729. struct dpu_crtc *dpu_crtc;
  730. struct dpu_crtc_state *cstate;
  731. if (!crtc) {
  732. DPU_ERROR("invalid crtc\n");
  733. return;
  734. }
  735. /* revert suspend actions, if necessary */
  736. if (dpu_kms_is_suspend_state(crtc->dev))
  737. _dpu_crtc_set_suspend(crtc, false);
  738. /* remove previous state, if present */
  739. if (crtc->state) {
  740. dpu_crtc_destroy_state(crtc, crtc->state);
  741. crtc->state = 0;
  742. }
  743. dpu_crtc = to_dpu_crtc(crtc);
  744. cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
  745. if (!cstate) {
  746. DPU_ERROR("failed to allocate state\n");
  747. return;
  748. }
  749. cstate->base.crtc = crtc;
  750. crtc->state = &cstate->base;
  751. }
  752. static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
  753. {
  754. struct drm_crtc *crtc = arg;
  755. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  756. struct drm_encoder *encoder;
  757. mutex_lock(&dpu_crtc->crtc_lock);
  758. trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
  759. /* restore encoder; crtc will be programmed during commit */
  760. drm_for_each_encoder(encoder, crtc->dev) {
  761. if (encoder->crtc != crtc)
  762. continue;
  763. dpu_encoder_virt_restore(encoder);
  764. }
  765. mutex_unlock(&dpu_crtc->crtc_lock);
  766. }
  767. static void dpu_crtc_disable(struct drm_crtc *crtc)
  768. {
  769. struct dpu_crtc *dpu_crtc;
  770. struct dpu_crtc_state *cstate;
  771. struct drm_display_mode *mode;
  772. struct drm_encoder *encoder;
  773. struct msm_drm_private *priv;
  774. unsigned long flags;
  775. if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
  776. DPU_ERROR("invalid crtc\n");
  777. return;
  778. }
  779. dpu_crtc = to_dpu_crtc(crtc);
  780. cstate = to_dpu_crtc_state(crtc->state);
  781. mode = &cstate->base.adjusted_mode;
  782. priv = crtc->dev->dev_private;
  783. DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
  784. if (dpu_kms_is_suspend_state(crtc->dev))
  785. _dpu_crtc_set_suspend(crtc, true);
  786. /* Disable/save vblank irq handling */
  787. drm_crtc_vblank_off(crtc);
  788. mutex_lock(&dpu_crtc->crtc_lock);
  789. /* wait for frame_event_done completion */
  790. if (_dpu_crtc_wait_for_frame_done(crtc))
  791. DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
  792. crtc->base.id,
  793. atomic_read(&dpu_crtc->frame_pending));
  794. trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
  795. if (dpu_crtc->enabled && !dpu_crtc->suspend &&
  796. dpu_crtc->vblank_requested) {
  797. _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
  798. }
  799. dpu_crtc->enabled = false;
  800. if (atomic_read(&dpu_crtc->frame_pending)) {
  801. trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
  802. atomic_read(&dpu_crtc->frame_pending));
  803. dpu_core_perf_crtc_release_bw(crtc);
  804. atomic_set(&dpu_crtc->frame_pending, 0);
  805. }
  806. dpu_core_perf_crtc_update(crtc, 0, true);
  807. drm_for_each_encoder(encoder, crtc->dev) {
  808. if (encoder->crtc != crtc)
  809. continue;
  810. dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
  811. }
  812. if (dpu_crtc->power_event)
  813. dpu_power_handle_unregister_event(dpu_crtc->phandle,
  814. dpu_crtc->power_event);
  815. memset(cstate->mixers, 0, sizeof(cstate->mixers));
  816. cstate->num_mixers = 0;
  817. /* disable clk & bw control until clk & bw properties are set */
  818. cstate->bw_control = false;
  819. cstate->bw_split_vote = false;
  820. mutex_unlock(&dpu_crtc->crtc_lock);
  821. if (crtc->state->event && !crtc->state->active) {
  822. spin_lock_irqsave(&crtc->dev->event_lock, flags);
  823. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  824. crtc->state->event = NULL;
  825. spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
  826. }
  827. }
  828. static void dpu_crtc_enable(struct drm_crtc *crtc,
  829. struct drm_crtc_state *old_crtc_state)
  830. {
  831. struct dpu_crtc *dpu_crtc;
  832. struct drm_encoder *encoder;
  833. struct msm_drm_private *priv;
  834. if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
  835. DPU_ERROR("invalid crtc\n");
  836. return;
  837. }
  838. priv = crtc->dev->dev_private;
  839. DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
  840. dpu_crtc = to_dpu_crtc(crtc);
  841. drm_for_each_encoder(encoder, crtc->dev) {
  842. if (encoder->crtc != crtc)
  843. continue;
  844. dpu_encoder_register_frame_event_callback(encoder,
  845. dpu_crtc_frame_event_cb, (void *)crtc);
  846. }
  847. mutex_lock(&dpu_crtc->crtc_lock);
  848. trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
  849. if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
  850. dpu_crtc->vblank_requested) {
  851. _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
  852. }
  853. dpu_crtc->enabled = true;
  854. mutex_unlock(&dpu_crtc->crtc_lock);
  855. /* Enable/restore vblank irq handling */
  856. drm_crtc_vblank_on(crtc);
  857. dpu_crtc->power_event = dpu_power_handle_register_event(
  858. dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
  859. dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
  860. }
  861. struct plane_state {
  862. struct dpu_plane_state *dpu_pstate;
  863. const struct drm_plane_state *drm_pstate;
  864. int stage;
  865. u32 pipe_id;
  866. };
  867. static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
  868. struct drm_crtc_state *state)
  869. {
  870. struct dpu_crtc *dpu_crtc;
  871. struct plane_state *pstates;
  872. struct dpu_crtc_state *cstate;
  873. const struct drm_plane_state *pstate;
  874. struct drm_plane *plane;
  875. struct drm_display_mode *mode;
  876. int cnt = 0, rc = 0, mixer_width, i, z_pos;
  877. struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
  878. int multirect_count = 0;
  879. const struct drm_plane_state *pipe_staged[SSPP_MAX];
  880. int left_zpos_cnt = 0, right_zpos_cnt = 0;
  881. struct drm_rect crtc_rect = { 0 };
  882. if (!crtc) {
  883. DPU_ERROR("invalid crtc\n");
  884. return -EINVAL;
  885. }
  886. pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
  887. dpu_crtc = to_dpu_crtc(crtc);
  888. cstate = to_dpu_crtc_state(state);
  889. if (!state->enable || !state->active) {
  890. DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
  891. crtc->base.id, state->enable, state->active);
  892. goto end;
  893. }
  894. mode = &state->adjusted_mode;
  895. DPU_DEBUG("%s: check", dpu_crtc->name);
  896. /* force a full mode set if active state changed */
  897. if (state->active_changed)
  898. state->mode_changed = true;
  899. memset(pipe_staged, 0, sizeof(pipe_staged));
  900. mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
  901. _dpu_crtc_setup_lm_bounds(crtc, state);
  902. crtc_rect.x2 = mode->hdisplay;
  903. crtc_rect.y2 = mode->vdisplay;
  904. /* get plane state for all drm planes associated with crtc state */
  905. drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
  906. struct drm_rect dst, clip = crtc_rect;
  907. if (IS_ERR_OR_NULL(pstate)) {
  908. rc = PTR_ERR(pstate);
  909. DPU_ERROR("%s: failed to get plane%d state, %d\n",
  910. dpu_crtc->name, plane->base.id, rc);
  911. goto end;
  912. }
  913. if (cnt >= DPU_STAGE_MAX * 4)
  914. continue;
  915. pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
  916. pstates[cnt].drm_pstate = pstate;
  917. pstates[cnt].stage = pstate->normalized_zpos;
  918. pstates[cnt].pipe_id = dpu_plane_pipe(plane);
  919. if (pipe_staged[pstates[cnt].pipe_id]) {
  920. multirect_plane[multirect_count].r0 =
  921. pipe_staged[pstates[cnt].pipe_id];
  922. multirect_plane[multirect_count].r1 = pstate;
  923. multirect_count++;
  924. pipe_staged[pstates[cnt].pipe_id] = NULL;
  925. } else {
  926. pipe_staged[pstates[cnt].pipe_id] = pstate;
  927. }
  928. cnt++;
  929. dst = drm_plane_state_dest(pstate);
  930. if (!drm_rect_intersect(&clip, &dst)) {
  931. DPU_ERROR("invalid vertical/horizontal destination\n");
  932. DPU_ERROR("display: " DRM_RECT_FMT " plane: "
  933. DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
  934. DRM_RECT_ARG(&dst));
  935. rc = -E2BIG;
  936. goto end;
  937. }
  938. }
  939. for (i = 1; i < SSPP_MAX; i++) {
  940. if (pipe_staged[i]) {
  941. dpu_plane_clear_multirect(pipe_staged[i]);
  942. if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
  943. DPU_ERROR(
  944. "r1 only virt plane:%d not supported\n",
  945. pipe_staged[i]->plane->base.id);
  946. rc = -EINVAL;
  947. goto end;
  948. }
  949. }
  950. }
  951. z_pos = -1;
  952. for (i = 0; i < cnt; i++) {
  953. /* reset counts at every new blend stage */
  954. if (pstates[i].stage != z_pos) {
  955. left_zpos_cnt = 0;
  956. right_zpos_cnt = 0;
  957. z_pos = pstates[i].stage;
  958. }
  959. /* verify z_pos setting before using it */
  960. if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
  961. DPU_ERROR("> %d plane stages assigned\n",
  962. DPU_STAGE_MAX - DPU_STAGE_0);
  963. rc = -EINVAL;
  964. goto end;
  965. } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
  966. if (left_zpos_cnt == 2) {
  967. DPU_ERROR("> 2 planes @ stage %d on left\n",
  968. z_pos);
  969. rc = -EINVAL;
  970. goto end;
  971. }
  972. left_zpos_cnt++;
  973. } else {
  974. if (right_zpos_cnt == 2) {
  975. DPU_ERROR("> 2 planes @ stage %d on right\n",
  976. z_pos);
  977. rc = -EINVAL;
  978. goto end;
  979. }
  980. right_zpos_cnt++;
  981. }
  982. pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
  983. DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
  984. }
  985. for (i = 0; i < multirect_count; i++) {
  986. if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
  987. DPU_ERROR(
  988. "multirect validation failed for planes (%d - %d)\n",
  989. multirect_plane[i].r0->plane->base.id,
  990. multirect_plane[i].r1->plane->base.id);
  991. rc = -EINVAL;
  992. goto end;
  993. }
  994. }
  995. rc = dpu_core_perf_crtc_check(crtc, state);
  996. if (rc) {
  997. DPU_ERROR("crtc%d failed performance check %d\n",
  998. crtc->base.id, rc);
  999. goto end;
  1000. }
  1001. /* validate source split:
  1002. * use pstates sorted by stage to check planes on same stage
  1003. * we assume that all pipes are in source split so its valid to compare
  1004. * without taking into account left/right mixer placement
  1005. */
  1006. for (i = 1; i < cnt; i++) {
  1007. struct plane_state *prv_pstate, *cur_pstate;
  1008. struct drm_rect left_rect, right_rect;
  1009. int32_t left_pid, right_pid;
  1010. int32_t stage;
  1011. prv_pstate = &pstates[i - 1];
  1012. cur_pstate = &pstates[i];
  1013. if (prv_pstate->stage != cur_pstate->stage)
  1014. continue;
  1015. stage = cur_pstate->stage;
  1016. left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
  1017. left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
  1018. right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
  1019. right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
  1020. if (right_rect.x1 < left_rect.x1) {
  1021. swap(left_pid, right_pid);
  1022. swap(left_rect, right_rect);
  1023. }
  1024. /**
  1025. * - planes are enumerated in pipe-priority order such that
  1026. * planes with lower drm_id must be left-most in a shared
  1027. * blend-stage when using source split.
  1028. * - planes in source split must be contiguous in width
  1029. * - planes in source split must have same dest yoff and height
  1030. */
  1031. if (right_pid < left_pid) {
  1032. DPU_ERROR(
  1033. "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
  1034. stage, left_pid, right_pid);
  1035. rc = -EINVAL;
  1036. goto end;
  1037. } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
  1038. DPU_ERROR("non-contiguous coordinates for src split. "
  1039. "stage: %d left: " DRM_RECT_FMT " right: "
  1040. DRM_RECT_FMT "\n", stage,
  1041. DRM_RECT_ARG(&left_rect),
  1042. DRM_RECT_ARG(&right_rect));
  1043. rc = -EINVAL;
  1044. goto end;
  1045. } else if (left_rect.y1 != right_rect.y1 ||
  1046. drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
  1047. DPU_ERROR("source split at stage: %d. invalid "
  1048. "yoff/height: left: " DRM_RECT_FMT " right: "
  1049. DRM_RECT_FMT "\n", stage,
  1050. DRM_RECT_ARG(&left_rect),
  1051. DRM_RECT_ARG(&right_rect));
  1052. rc = -EINVAL;
  1053. goto end;
  1054. }
  1055. }
  1056. end:
  1057. kfree(pstates);
  1058. return rc;
  1059. }
  1060. int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
  1061. {
  1062. struct dpu_crtc *dpu_crtc;
  1063. if (!crtc) {
  1064. DPU_ERROR("invalid crtc\n");
  1065. return -EINVAL;
  1066. }
  1067. dpu_crtc = to_dpu_crtc(crtc);
  1068. mutex_lock(&dpu_crtc->crtc_lock);
  1069. trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
  1070. if (dpu_crtc->enabled && !dpu_crtc->suspend) {
  1071. _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
  1072. }
  1073. dpu_crtc->vblank_requested = en;
  1074. mutex_unlock(&dpu_crtc->crtc_lock);
  1075. return 0;
  1076. }
  1077. #ifdef CONFIG_DEBUG_FS
  1078. static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
  1079. {
  1080. struct dpu_crtc *dpu_crtc;
  1081. struct dpu_plane_state *pstate = NULL;
  1082. struct dpu_crtc_mixer *m;
  1083. struct drm_crtc *crtc;
  1084. struct drm_plane *plane;
  1085. struct drm_display_mode *mode;
  1086. struct drm_framebuffer *fb;
  1087. struct drm_plane_state *state;
  1088. struct dpu_crtc_state *cstate;
  1089. int i, out_width;
  1090. if (!s || !s->private)
  1091. return -EINVAL;
  1092. dpu_crtc = s->private;
  1093. crtc = &dpu_crtc->base;
  1094. drm_modeset_lock_all(crtc->dev);
  1095. cstate = to_dpu_crtc_state(crtc->state);
  1096. mutex_lock(&dpu_crtc->crtc_lock);
  1097. mode = &crtc->state->adjusted_mode;
  1098. out_width = _dpu_crtc_get_mixer_width(cstate, mode);
  1099. seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
  1100. mode->hdisplay, mode->vdisplay);
  1101. seq_puts(s, "\n");
  1102. for (i = 0; i < cstate->num_mixers; ++i) {
  1103. m = &cstate->mixers[i];
  1104. if (!m->hw_lm)
  1105. seq_printf(s, "\tmixer[%d] has no lm\n", i);
  1106. else if (!m->lm_ctl)
  1107. seq_printf(s, "\tmixer[%d] has no ctl\n", i);
  1108. else
  1109. seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
  1110. m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
  1111. out_width, mode->vdisplay);
  1112. }
  1113. seq_puts(s, "\n");
  1114. drm_atomic_crtc_for_each_plane(plane, crtc) {
  1115. pstate = to_dpu_plane_state(plane->state);
  1116. state = plane->state;
  1117. if (!pstate || !state)
  1118. continue;
  1119. seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
  1120. pstate->stage);
  1121. if (plane->state->fb) {
  1122. fb = plane->state->fb;
  1123. seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
  1124. fb->base.id, (char *) &fb->format->format,
  1125. fb->width, fb->height);
  1126. for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
  1127. seq_printf(s, "cpp[%d]:%u ",
  1128. i, fb->format->cpp[i]);
  1129. seq_puts(s, "\n\t");
  1130. seq_printf(s, "modifier:%8llu ", fb->modifier);
  1131. seq_puts(s, "\n");
  1132. seq_puts(s, "\t");
  1133. for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
  1134. seq_printf(s, "pitches[%d]:%8u ", i,
  1135. fb->pitches[i]);
  1136. seq_puts(s, "\n");
  1137. seq_puts(s, "\t");
  1138. for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
  1139. seq_printf(s, "offsets[%d]:%8u ", i,
  1140. fb->offsets[i]);
  1141. seq_puts(s, "\n");
  1142. }
  1143. seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
  1144. state->src_x, state->src_y, state->src_w, state->src_h);
  1145. seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
  1146. state->crtc_x, state->crtc_y, state->crtc_w,
  1147. state->crtc_h);
  1148. seq_printf(s, "\tmultirect: mode: %d index: %d\n",
  1149. pstate->multirect_mode, pstate->multirect_index);
  1150. seq_puts(s, "\n");
  1151. }
  1152. if (dpu_crtc->vblank_cb_count) {
  1153. ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
  1154. s64 diff_ms = ktime_to_ms(diff);
  1155. s64 fps = diff_ms ? div_s64(
  1156. dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
  1157. seq_printf(s,
  1158. "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
  1159. fps, dpu_crtc->vblank_cb_count,
  1160. ktime_to_ms(diff), dpu_crtc->play_count);
  1161. /* reset time & count for next measurement */
  1162. dpu_crtc->vblank_cb_count = 0;
  1163. dpu_crtc->vblank_cb_time = ktime_set(0, 0);
  1164. }
  1165. seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
  1166. mutex_unlock(&dpu_crtc->crtc_lock);
  1167. drm_modeset_unlock_all(crtc->dev);
  1168. return 0;
  1169. }
  1170. static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
  1171. {
  1172. return single_open(file, _dpu_debugfs_status_show, inode->i_private);
  1173. }
  1174. #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
  1175. static int __prefix ## _open(struct inode *inode, struct file *file) \
  1176. { \
  1177. return single_open(file, __prefix ## _show, inode->i_private); \
  1178. } \
  1179. static const struct file_operations __prefix ## _fops = { \
  1180. .owner = THIS_MODULE, \
  1181. .open = __prefix ## _open, \
  1182. .release = single_release, \
  1183. .read = seq_read, \
  1184. .llseek = seq_lseek, \
  1185. }
  1186. static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
  1187. {
  1188. struct drm_crtc *crtc = (struct drm_crtc *) s->private;
  1189. struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
  1190. int i;
  1191. seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
  1192. seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
  1193. seq_printf(s, "core_clk_rate: %llu\n",
  1194. dpu_crtc->cur_perf.core_clk_rate);
  1195. for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
  1196. i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
  1197. seq_printf(s, "bw_ctl[%s]: %llu\n",
  1198. dpu_power_handle_get_dbus_name(i),
  1199. dpu_crtc->cur_perf.bw_ctl[i]);
  1200. seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
  1201. dpu_power_handle_get_dbus_name(i),
  1202. dpu_crtc->cur_perf.max_per_pipe_ib[i]);
  1203. }
  1204. return 0;
  1205. }
  1206. DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
  1207. static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
  1208. {
  1209. struct dpu_crtc *dpu_crtc;
  1210. struct dpu_kms *dpu_kms;
  1211. static const struct file_operations debugfs_status_fops = {
  1212. .open = _dpu_debugfs_status_open,
  1213. .read = seq_read,
  1214. .llseek = seq_lseek,
  1215. .release = single_release,
  1216. };
  1217. if (!crtc)
  1218. return -EINVAL;
  1219. dpu_crtc = to_dpu_crtc(crtc);
  1220. dpu_kms = _dpu_crtc_get_kms(crtc);
  1221. dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
  1222. crtc->dev->primary->debugfs_root);
  1223. if (!dpu_crtc->debugfs_root)
  1224. return -ENOMEM;
  1225. /* don't error check these */
  1226. debugfs_create_file("status", 0400,
  1227. dpu_crtc->debugfs_root,
  1228. dpu_crtc, &debugfs_status_fops);
  1229. debugfs_create_file("state", 0600,
  1230. dpu_crtc->debugfs_root,
  1231. &dpu_crtc->base,
  1232. &dpu_crtc_debugfs_state_fops);
  1233. return 0;
  1234. }
  1235. static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
  1236. {
  1237. struct dpu_crtc *dpu_crtc;
  1238. if (!crtc)
  1239. return;
  1240. dpu_crtc = to_dpu_crtc(crtc);
  1241. debugfs_remove_recursive(dpu_crtc->debugfs_root);
  1242. }
  1243. #else
  1244. static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
  1245. {
  1246. return 0;
  1247. }
  1248. static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
  1249. {
  1250. }
  1251. #endif /* CONFIG_DEBUG_FS */
  1252. static int dpu_crtc_late_register(struct drm_crtc *crtc)
  1253. {
  1254. return _dpu_crtc_init_debugfs(crtc);
  1255. }
  1256. static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
  1257. {
  1258. _dpu_crtc_destroy_debugfs(crtc);
  1259. }
  1260. static const struct drm_crtc_funcs dpu_crtc_funcs = {
  1261. .set_config = drm_atomic_helper_set_config,
  1262. .destroy = dpu_crtc_destroy,
  1263. .page_flip = drm_atomic_helper_page_flip,
  1264. .reset = dpu_crtc_reset,
  1265. .atomic_duplicate_state = dpu_crtc_duplicate_state,
  1266. .atomic_destroy_state = dpu_crtc_destroy_state,
  1267. .late_register = dpu_crtc_late_register,
  1268. .early_unregister = dpu_crtc_early_unregister,
  1269. };
  1270. static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
  1271. .disable = dpu_crtc_disable,
  1272. .atomic_enable = dpu_crtc_enable,
  1273. .atomic_check = dpu_crtc_atomic_check,
  1274. .atomic_begin = dpu_crtc_atomic_begin,
  1275. .atomic_flush = dpu_crtc_atomic_flush,
  1276. };
  1277. /* initialize crtc */
  1278. struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
  1279. struct drm_plane *cursor)
  1280. {
  1281. struct drm_crtc *crtc = NULL;
  1282. struct dpu_crtc *dpu_crtc = NULL;
  1283. struct msm_drm_private *priv = NULL;
  1284. struct dpu_kms *kms = NULL;
  1285. int i;
  1286. priv = dev->dev_private;
  1287. kms = to_dpu_kms(priv->kms);
  1288. dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
  1289. if (!dpu_crtc)
  1290. return ERR_PTR(-ENOMEM);
  1291. crtc = &dpu_crtc->base;
  1292. crtc->dev = dev;
  1293. mutex_init(&dpu_crtc->crtc_lock);
  1294. spin_lock_init(&dpu_crtc->spin_lock);
  1295. atomic_set(&dpu_crtc->frame_pending, 0);
  1296. init_completion(&dpu_crtc->frame_done_comp);
  1297. INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
  1298. for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
  1299. INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
  1300. list_add(&dpu_crtc->frame_events[i].list,
  1301. &dpu_crtc->frame_event_list);
  1302. kthread_init_work(&dpu_crtc->frame_events[i].work,
  1303. dpu_crtc_frame_event_work);
  1304. }
  1305. drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
  1306. NULL);
  1307. drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
  1308. /* save user friendly CRTC name for later */
  1309. snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
  1310. /* initialize event handling */
  1311. spin_lock_init(&dpu_crtc->event_lock);
  1312. dpu_crtc->phandle = &kms->phandle;
  1313. DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
  1314. return crtc;
  1315. }