mdp5_ctl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include "mdp5_kms.h"
  14. #include "mdp5_ctl.h"
  15. /*
  16. * CTL - MDP Control Pool Manager
  17. *
  18. * Controls are shared between all display interfaces.
  19. *
  20. * They are intended to be used for data path configuration.
  21. * The top level register programming describes the complete data path for
  22. * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
  23. *
  24. * Hardware capabilities determine the number of concurrent data paths
  25. *
  26. * In certain use cases (high-resolution dual pipe), one single CTL can be
  27. * shared across multiple CRTCs.
  28. */
  29. #define CTL_STAT_BUSY 0x1
  30. #define CTL_STAT_BOOKED 0x2
  31. struct mdp5_ctl {
  32. struct mdp5_ctl_manager *ctlm;
  33. u32 id;
  34. /* CTL status bitmask */
  35. u32 status;
  36. bool encoder_enabled;
  37. uint32_t start_mask;
  38. /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
  39. spinlock_t hw_lock;
  40. u32 reg_offset;
  41. /* when do CTL registers need to be flushed? (mask of trigger bits) */
  42. u32 pending_ctl_trigger;
  43. bool cursor_on;
  44. /* True if the current CTL has FLUSH bits pending for single FLUSH. */
  45. bool flush_pending;
  46. struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
  47. };
  48. struct mdp5_ctl_manager {
  49. struct drm_device *dev;
  50. /* number of CTL / Layer Mixers in this hw config: */
  51. u32 nlm;
  52. u32 nctl;
  53. /* to filter out non-present bits in the current hardware config */
  54. u32 flush_hw_mask;
  55. /* status for single FLUSH */
  56. bool single_flush_supported;
  57. u32 single_flush_pending_mask;
  58. /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
  59. spinlock_t pool_lock;
  60. struct mdp5_ctl ctls[MAX_CTL];
  61. };
  62. static inline
  63. struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
  64. {
  65. struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
  66. return to_mdp5_kms(to_mdp_kms(priv->kms));
  67. }
  68. static inline
  69. void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
  70. {
  71. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  72. (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  73. mdp5_write(mdp5_kms, reg, data);
  74. }
  75. static inline
  76. u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
  77. {
  78. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  79. (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  80. return mdp5_read(mdp5_kms, reg);
  81. }
  82. static void set_display_intf(struct mdp5_kms *mdp5_kms,
  83. struct mdp5_interface *intf)
  84. {
  85. unsigned long flags;
  86. u32 intf_sel;
  87. spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
  88. intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
  89. switch (intf->num) {
  90. case 0:
  91. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
  92. intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
  93. break;
  94. case 1:
  95. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
  96. intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
  97. break;
  98. case 2:
  99. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
  100. intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
  101. break;
  102. case 3:
  103. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
  104. intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
  105. break;
  106. default:
  107. BUG();
  108. break;
  109. }
  110. mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
  111. spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
  112. }
  113. static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
  114. {
  115. unsigned long flags;
  116. struct mdp5_interface *intf = pipeline->intf;
  117. u32 ctl_op = 0;
  118. if (!mdp5_cfg_intf_is_virtual(intf->type))
  119. ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
  120. switch (intf->type) {
  121. case INTF_DSI:
  122. if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
  123. ctl_op |= MDP5_CTL_OP_CMD_MODE;
  124. break;
  125. case INTF_WB:
  126. if (intf->mode == MDP5_INTF_WB_MODE_LINE)
  127. ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
  128. break;
  129. default:
  130. break;
  131. }
  132. if (pipeline->r_mixer)
  133. ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
  134. MDP5_CTL_OP_PACK_3D(1);
  135. spin_lock_irqsave(&ctl->hw_lock, flags);
  136. ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
  137. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  138. }
  139. int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
  140. {
  141. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  142. struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
  143. struct mdp5_interface *intf = pipeline->intf;
  144. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  145. struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
  146. ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
  147. mdp_ctl_flush_mask_encoder(intf);
  148. if (r_mixer)
  149. ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
  150. /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
  151. if (!mdp5_cfg_intf_is_virtual(intf->type))
  152. set_display_intf(mdp5_kms, intf);
  153. set_ctl_op(ctl, pipeline);
  154. return 0;
  155. }
  156. static bool start_signal_needed(struct mdp5_ctl *ctl,
  157. struct mdp5_pipeline *pipeline)
  158. {
  159. struct mdp5_interface *intf = pipeline->intf;
  160. if (!ctl->encoder_enabled || ctl->start_mask != 0)
  161. return false;
  162. switch (intf->type) {
  163. case INTF_WB:
  164. return true;
  165. case INTF_DSI:
  166. return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
  167. default:
  168. return false;
  169. }
  170. }
  171. /*
  172. * send_start_signal() - Overlay Processor Start Signal
  173. *
  174. * For a given control operation (display pipeline), a START signal needs to be
  175. * executed in order to kick off operation and activate all layers.
  176. * e.g.: DSI command mode, Writeback
  177. */
  178. static void send_start_signal(struct mdp5_ctl *ctl)
  179. {
  180. unsigned long flags;
  181. spin_lock_irqsave(&ctl->hw_lock, flags);
  182. ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
  183. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  184. }
  185. static void refill_start_mask(struct mdp5_ctl *ctl,
  186. struct mdp5_pipeline *pipeline)
  187. {
  188. struct mdp5_interface *intf = pipeline->intf;
  189. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  190. struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
  191. ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
  192. if (r_mixer)
  193. ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
  194. /*
  195. * Writeback encoder needs to program & flush
  196. * address registers for each page flip..
  197. */
  198. if (intf->type == INTF_WB)
  199. ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
  200. }
  201. /**
  202. * mdp5_ctl_set_encoder_state() - set the encoder state
  203. *
  204. * @enable: true, when encoder is ready for data streaming; false, otherwise.
  205. *
  206. * Note:
  207. * This encoder state is needed to trigger START signal (data path kickoff).
  208. */
  209. int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
  210. struct mdp5_pipeline *pipeline,
  211. bool enabled)
  212. {
  213. struct mdp5_interface *intf = pipeline->intf;
  214. if (WARN_ON(!ctl))
  215. return -EINVAL;
  216. ctl->encoder_enabled = enabled;
  217. DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
  218. if (start_signal_needed(ctl, pipeline)) {
  219. send_start_signal(ctl);
  220. refill_start_mask(ctl, pipeline);
  221. }
  222. return 0;
  223. }
  224. /*
  225. * Note:
  226. * CTL registers need to be flushed after calling this function
  227. * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  228. */
  229. int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
  230. int cursor_id, bool enable)
  231. {
  232. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  233. unsigned long flags;
  234. u32 blend_cfg;
  235. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  236. if (unlikely(WARN_ON(!mixer))) {
  237. dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
  238. ctl->id);
  239. return -EINVAL;
  240. }
  241. if (pipeline->r_mixer) {
  242. dev_err(ctl_mgr->dev->dev, "unsupported configuration");
  243. return -EINVAL;
  244. }
  245. spin_lock_irqsave(&ctl->hw_lock, flags);
  246. blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
  247. if (enable)
  248. blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
  249. else
  250. blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
  251. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
  252. ctl->cursor_on = enable;
  253. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  254. ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
  255. return 0;
  256. }
  257. static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
  258. enum mdp_mixer_stage_id stage)
  259. {
  260. switch (pipe) {
  261. case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
  262. case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
  263. case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
  264. case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
  265. case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
  266. case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
  267. case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
  268. case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
  269. case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
  270. case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
  271. case SSPP_CURSOR0:
  272. case SSPP_CURSOR1:
  273. default: return 0;
  274. }
  275. }
  276. static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
  277. enum mdp_mixer_stage_id stage)
  278. {
  279. if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
  280. return 0;
  281. switch (pipe) {
  282. case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
  283. case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
  284. case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
  285. case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
  286. case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
  287. case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
  288. case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
  289. case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
  290. case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
  291. case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
  292. case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
  293. case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
  294. default: return 0;
  295. }
  296. }
  297. static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
  298. {
  299. unsigned long flags;
  300. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  301. int i;
  302. spin_lock_irqsave(&ctl->hw_lock, flags);
  303. for (i = 0; i < ctl_mgr->nlm; i++) {
  304. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
  305. ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
  306. }
  307. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  308. }
  309. #define PIPE_LEFT 0
  310. #define PIPE_RIGHT 1
  311. int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
  312. enum mdp5_pipe stage[][MAX_PIPE_STAGE],
  313. enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
  314. u32 stage_cnt, u32 ctl_blend_op_flags)
  315. {
  316. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  317. struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
  318. unsigned long flags;
  319. u32 blend_cfg = 0, blend_ext_cfg = 0;
  320. u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
  321. int i, start_stage;
  322. mdp5_ctl_reset_blend_regs(ctl);
  323. if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
  324. start_stage = STAGE0;
  325. blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
  326. if (r_mixer)
  327. r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
  328. } else {
  329. start_stage = STAGE_BASE;
  330. }
  331. for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
  332. blend_cfg |=
  333. mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
  334. mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
  335. blend_ext_cfg |=
  336. mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
  337. mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
  338. if (r_mixer) {
  339. r_blend_cfg |=
  340. mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
  341. mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
  342. r_blend_ext_cfg |=
  343. mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
  344. mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
  345. }
  346. }
  347. spin_lock_irqsave(&ctl->hw_lock, flags);
  348. if (ctl->cursor_on)
  349. blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
  350. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
  351. ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
  352. blend_ext_cfg);
  353. if (r_mixer) {
  354. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
  355. r_blend_cfg);
  356. ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
  357. r_blend_ext_cfg);
  358. }
  359. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  360. ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
  361. if (r_mixer)
  362. ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
  363. DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
  364. blend_cfg, blend_ext_cfg);
  365. if (r_mixer)
  366. DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
  367. r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
  368. return 0;
  369. }
  370. u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
  371. {
  372. if (intf->type == INTF_WB)
  373. return MDP5_CTL_FLUSH_WB;
  374. switch (intf->num) {
  375. case 0: return MDP5_CTL_FLUSH_TIMING_0;
  376. case 1: return MDP5_CTL_FLUSH_TIMING_1;
  377. case 2: return MDP5_CTL_FLUSH_TIMING_2;
  378. case 3: return MDP5_CTL_FLUSH_TIMING_3;
  379. default: return 0;
  380. }
  381. }
  382. u32 mdp_ctl_flush_mask_cursor(int cursor_id)
  383. {
  384. switch (cursor_id) {
  385. case 0: return MDP5_CTL_FLUSH_CURSOR_0;
  386. case 1: return MDP5_CTL_FLUSH_CURSOR_1;
  387. default: return 0;
  388. }
  389. }
  390. u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
  391. {
  392. switch (pipe) {
  393. case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
  394. case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
  395. case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
  396. case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
  397. case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
  398. case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
  399. case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
  400. case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
  401. case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
  402. case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
  403. case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
  404. case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
  405. default: return 0;
  406. }
  407. }
  408. u32 mdp_ctl_flush_mask_lm(int lm)
  409. {
  410. switch (lm) {
  411. case 0: return MDP5_CTL_FLUSH_LM0;
  412. case 1: return MDP5_CTL_FLUSH_LM1;
  413. case 2: return MDP5_CTL_FLUSH_LM2;
  414. case 5: return MDP5_CTL_FLUSH_LM5;
  415. default: return 0;
  416. }
  417. }
  418. static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
  419. u32 flush_mask)
  420. {
  421. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  422. u32 sw_mask = 0;
  423. #define BIT_NEEDS_SW_FIX(bit) \
  424. (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
  425. /* for some targets, cursor bit is the same as LM bit */
  426. if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
  427. sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
  428. return sw_mask;
  429. }
  430. static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
  431. u32 *flush_id)
  432. {
  433. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  434. if (ctl->pair) {
  435. DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
  436. ctl->flush_pending = true;
  437. ctl_mgr->single_flush_pending_mask |= (*flush_mask);
  438. *flush_mask = 0;
  439. if (ctl->pair->flush_pending) {
  440. *flush_id = min_t(u32, ctl->id, ctl->pair->id);
  441. *flush_mask = ctl_mgr->single_flush_pending_mask;
  442. ctl->flush_pending = false;
  443. ctl->pair->flush_pending = false;
  444. ctl_mgr->single_flush_pending_mask = 0;
  445. DBG("Single FLUSH mask %x,ID %d", *flush_mask,
  446. *flush_id);
  447. }
  448. }
  449. }
  450. /**
  451. * mdp5_ctl_commit() - Register Flush
  452. *
  453. * The flush register is used to indicate several registers are all
  454. * programmed, and are safe to update to the back copy of the double
  455. * buffered registers.
  456. *
  457. * Some registers FLUSH bits are shared when the hardware does not have
  458. * dedicated bits for them; handling these is the job of fix_sw_flush().
  459. *
  460. * CTL registers need to be flushed in some circumstances; if that is the
  461. * case, some trigger bits will be present in both flush mask and
  462. * ctl->pending_ctl_trigger.
  463. *
  464. * Return H/W flushed bit mask.
  465. */
  466. u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
  467. struct mdp5_pipeline *pipeline,
  468. u32 flush_mask)
  469. {
  470. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  471. unsigned long flags;
  472. u32 flush_id = ctl->id;
  473. u32 curr_ctl_flush_mask;
  474. ctl->start_mask &= ~flush_mask;
  475. VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
  476. ctl->start_mask, ctl->pending_ctl_trigger);
  477. if (ctl->pending_ctl_trigger & flush_mask) {
  478. flush_mask |= MDP5_CTL_FLUSH_CTL;
  479. ctl->pending_ctl_trigger = 0;
  480. }
  481. flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
  482. flush_mask &= ctl_mgr->flush_hw_mask;
  483. curr_ctl_flush_mask = flush_mask;
  484. fix_for_single_flush(ctl, &flush_mask, &flush_id);
  485. if (flush_mask) {
  486. spin_lock_irqsave(&ctl->hw_lock, flags);
  487. ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
  488. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  489. }
  490. if (start_signal_needed(ctl, pipeline)) {
  491. send_start_signal(ctl);
  492. refill_start_mask(ctl, pipeline);
  493. }
  494. return curr_ctl_flush_mask;
  495. }
  496. u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
  497. {
  498. return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
  499. }
  500. int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
  501. {
  502. return WARN_ON(!ctl) ? -EINVAL : ctl->id;
  503. }
  504. /*
  505. * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
  506. */
  507. int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
  508. {
  509. struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
  510. struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
  511. /* do nothing silently if hw doesn't support */
  512. if (!ctl_mgr->single_flush_supported)
  513. return 0;
  514. if (!enable) {
  515. ctlx->pair = NULL;
  516. ctly->pair = NULL;
  517. mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
  518. return 0;
  519. } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
  520. dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
  521. return -EINVAL;
  522. } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
  523. dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
  524. return -EINVAL;
  525. }
  526. ctlx->pair = ctly;
  527. ctly->pair = ctlx;
  528. mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
  529. MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
  530. return 0;
  531. }
  532. /*
  533. * mdp5_ctl_request() - CTL allocation
  534. *
  535. * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
  536. * If no CTL is available in preferred category, allocate from the other one.
  537. *
  538. * @return fail if no CTL is available.
  539. */
  540. struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
  541. int intf_num)
  542. {
  543. struct mdp5_ctl *ctl = NULL;
  544. const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
  545. u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
  546. unsigned long flags;
  547. int c;
  548. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  549. /* search the preferred */
  550. for (c = 0; c < ctl_mgr->nctl; c++)
  551. if ((ctl_mgr->ctls[c].status & checkm) == match)
  552. goto found;
  553. dev_warn(ctl_mgr->dev->dev,
  554. "fall back to the other CTL category for INTF %d!\n", intf_num);
  555. match ^= CTL_STAT_BOOKED;
  556. for (c = 0; c < ctl_mgr->nctl; c++)
  557. if ((ctl_mgr->ctls[c].status & checkm) == match)
  558. goto found;
  559. dev_err(ctl_mgr->dev->dev, "No more CTL available!");
  560. goto unlock;
  561. found:
  562. ctl = &ctl_mgr->ctls[c];
  563. ctl->status |= CTL_STAT_BUSY;
  564. ctl->pending_ctl_trigger = 0;
  565. DBG("CTL %d allocated", ctl->id);
  566. unlock:
  567. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  568. return ctl;
  569. }
  570. void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
  571. {
  572. unsigned long flags;
  573. int c;
  574. for (c = 0; c < ctl_mgr->nctl; c++) {
  575. struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
  576. spin_lock_irqsave(&ctl->hw_lock, flags);
  577. ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
  578. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  579. }
  580. }
  581. void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
  582. {
  583. kfree(ctl_mgr);
  584. }
  585. struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
  586. void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
  587. {
  588. struct mdp5_ctl_manager *ctl_mgr;
  589. const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
  590. int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
  591. const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
  592. unsigned long flags;
  593. int c, ret;
  594. ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
  595. if (!ctl_mgr) {
  596. dev_err(dev->dev, "failed to allocate CTL manager\n");
  597. ret = -ENOMEM;
  598. goto fail;
  599. }
  600. if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
  601. dev_err(dev->dev, "Increase static pool size to at least %d\n",
  602. ctl_cfg->count);
  603. ret = -ENOSPC;
  604. goto fail;
  605. }
  606. /* initialize the CTL manager: */
  607. ctl_mgr->dev = dev;
  608. ctl_mgr->nlm = hw_cfg->lm.count;
  609. ctl_mgr->nctl = ctl_cfg->count;
  610. ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
  611. spin_lock_init(&ctl_mgr->pool_lock);
  612. /* initialize each CTL of the pool: */
  613. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  614. for (c = 0; c < ctl_mgr->nctl; c++) {
  615. struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
  616. if (WARN_ON(!ctl_cfg->base[c])) {
  617. dev_err(dev->dev, "CTL_%d: base is null!\n", c);
  618. ret = -EINVAL;
  619. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  620. goto fail;
  621. }
  622. ctl->ctlm = ctl_mgr;
  623. ctl->id = c;
  624. ctl->reg_offset = ctl_cfg->base[c];
  625. ctl->status = 0;
  626. spin_lock_init(&ctl->hw_lock);
  627. }
  628. /*
  629. * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
  630. * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
  631. * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
  632. * Single FLUSH is supported from hw rev v3.0.
  633. */
  634. if (rev >= 3) {
  635. ctl_mgr->single_flush_supported = true;
  636. /* Reserve CTL0/1 for INTF1/2 */
  637. ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
  638. ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
  639. }
  640. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  641. DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
  642. return ctl_mgr;
  643. fail:
  644. if (ctl_mgr)
  645. mdp5_ctlm_destroy(ctl_mgr);
  646. return ERR_PTR(ret);
  647. }