mdp5_ctl.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /*
  2. * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include "mdp5_kms.h"
  14. #include "mdp5_ctl.h"
  15. /*
  16. * CTL - MDP Control Pool Manager
  17. *
  18. * Controls are shared between all display interfaces.
  19. *
  20. * They are intended to be used for data path configuration.
  21. * The top level register programming describes the complete data path for
  22. * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
  23. *
  24. * Hardware capabilities determine the number of concurrent data paths
  25. *
  26. * In certain use cases (high-resolution dual pipe), one single CTL can be
  27. * shared across multiple CRTCs.
  28. */
  29. #define CTL_STAT_BUSY 0x1
  30. #define CTL_STAT_BOOKED 0x2
  31. struct mdp5_ctl {
  32. struct mdp5_ctl_manager *ctlm;
  33. u32 id;
  34. /* CTL status bitmask */
  35. u32 status;
  36. bool encoder_enabled;
  37. /* pending flush_mask bits */
  38. u32 flush_mask;
  39. /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
  40. spinlock_t hw_lock;
  41. u32 reg_offset;
  42. /* when do CTL registers need to be flushed? (mask of trigger bits) */
  43. u32 pending_ctl_trigger;
  44. bool cursor_on;
  45. /* True if the current CTL has FLUSH bits pending for single FLUSH. */
  46. bool flush_pending;
  47. struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
  48. };
  49. struct mdp5_ctl_manager {
  50. struct drm_device *dev;
  51. /* number of CTL / Layer Mixers in this hw config: */
  52. u32 nlm;
  53. u32 nctl;
  54. /* to filter out non-present bits in the current hardware config */
  55. u32 flush_hw_mask;
  56. /* status for single FLUSH */
  57. bool single_flush_supported;
  58. u32 single_flush_pending_mask;
  59. /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
  60. spinlock_t pool_lock;
  61. struct mdp5_ctl ctls[MAX_CTL];
  62. };
  63. static inline
  64. struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
  65. {
  66. struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
  67. return to_mdp5_kms(to_mdp_kms(priv->kms));
  68. }
  69. static inline
  70. void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
  71. {
  72. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  73. (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  74. mdp5_write(mdp5_kms, reg, data);
  75. }
  76. static inline
  77. u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
  78. {
  79. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  80. (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
  81. return mdp5_read(mdp5_kms, reg);
  82. }
  83. static void set_display_intf(struct mdp5_kms *mdp5_kms,
  84. struct mdp5_interface *intf)
  85. {
  86. unsigned long flags;
  87. u32 intf_sel;
  88. spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
  89. intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
  90. switch (intf->num) {
  91. case 0:
  92. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
  93. intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
  94. break;
  95. case 1:
  96. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
  97. intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
  98. break;
  99. case 2:
  100. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
  101. intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
  102. break;
  103. case 3:
  104. intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
  105. intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
  106. break;
  107. default:
  108. BUG();
  109. break;
  110. }
  111. mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
  112. spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
  113. }
  114. static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
  115. {
  116. unsigned long flags;
  117. struct mdp5_interface *intf = pipeline->intf;
  118. u32 ctl_op = 0;
  119. if (!mdp5_cfg_intf_is_virtual(intf->type))
  120. ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
  121. switch (intf->type) {
  122. case INTF_DSI:
  123. if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
  124. ctl_op |= MDP5_CTL_OP_CMD_MODE;
  125. break;
  126. case INTF_WB:
  127. if (intf->mode == MDP5_INTF_WB_MODE_LINE)
  128. ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
  129. break;
  130. default:
  131. break;
  132. }
  133. if (pipeline->r_mixer)
  134. ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
  135. MDP5_CTL_OP_PACK_3D(1);
  136. spin_lock_irqsave(&ctl->hw_lock, flags);
  137. ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
  138. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  139. }
  140. int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
  141. {
  142. struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
  143. struct mdp5_interface *intf = pipeline->intf;
  144. /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
  145. if (!mdp5_cfg_intf_is_virtual(intf->type))
  146. set_display_intf(mdp5_kms, intf);
  147. set_ctl_op(ctl, pipeline);
  148. return 0;
  149. }
  150. static bool start_signal_needed(struct mdp5_ctl *ctl,
  151. struct mdp5_pipeline *pipeline)
  152. {
  153. struct mdp5_interface *intf = pipeline->intf;
  154. if (!ctl->encoder_enabled)
  155. return false;
  156. switch (intf->type) {
  157. case INTF_WB:
  158. return true;
  159. case INTF_DSI:
  160. return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
  161. default:
  162. return false;
  163. }
  164. }
  165. /*
  166. * send_start_signal() - Overlay Processor Start Signal
  167. *
  168. * For a given control operation (display pipeline), a START signal needs to be
  169. * executed in order to kick off operation and activate all layers.
  170. * e.g.: DSI command mode, Writeback
  171. */
  172. static void send_start_signal(struct mdp5_ctl *ctl)
  173. {
  174. unsigned long flags;
  175. spin_lock_irqsave(&ctl->hw_lock, flags);
  176. ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
  177. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  178. }
  179. /**
  180. * mdp5_ctl_set_encoder_state() - set the encoder state
  181. *
  182. * @enable: true, when encoder is ready for data streaming; false, otherwise.
  183. *
  184. * Note:
  185. * This encoder state is needed to trigger START signal (data path kickoff).
  186. */
  187. int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
  188. struct mdp5_pipeline *pipeline,
  189. bool enabled)
  190. {
  191. struct mdp5_interface *intf = pipeline->intf;
  192. if (WARN_ON(!ctl))
  193. return -EINVAL;
  194. ctl->encoder_enabled = enabled;
  195. DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
  196. if (start_signal_needed(ctl, pipeline)) {
  197. send_start_signal(ctl);
  198. }
  199. return 0;
  200. }
  201. /*
  202. * Note:
  203. * CTL registers need to be flushed after calling this function
  204. * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  205. */
  206. int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
  207. int cursor_id, bool enable)
  208. {
  209. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  210. unsigned long flags;
  211. u32 blend_cfg;
  212. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  213. if (unlikely(WARN_ON(!mixer))) {
  214. dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
  215. ctl->id);
  216. return -EINVAL;
  217. }
  218. if (pipeline->r_mixer) {
  219. dev_err(ctl_mgr->dev->dev, "unsupported configuration");
  220. return -EINVAL;
  221. }
  222. spin_lock_irqsave(&ctl->hw_lock, flags);
  223. blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
  224. if (enable)
  225. blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
  226. else
  227. blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
  228. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
  229. ctl->cursor_on = enable;
  230. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  231. ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
  232. return 0;
  233. }
  234. static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
  235. enum mdp_mixer_stage_id stage)
  236. {
  237. switch (pipe) {
  238. case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
  239. case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
  240. case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
  241. case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
  242. case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
  243. case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
  244. case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
  245. case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
  246. case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
  247. case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
  248. case SSPP_CURSOR0:
  249. case SSPP_CURSOR1:
  250. default: return 0;
  251. }
  252. }
  253. static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
  254. enum mdp_mixer_stage_id stage)
  255. {
  256. if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
  257. return 0;
  258. switch (pipe) {
  259. case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
  260. case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
  261. case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
  262. case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
  263. case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
  264. case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
  265. case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
  266. case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
  267. case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
  268. case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
  269. case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
  270. case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
  271. default: return 0;
  272. }
  273. }
  274. static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
  275. {
  276. unsigned long flags;
  277. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  278. int i;
  279. spin_lock_irqsave(&ctl->hw_lock, flags);
  280. for (i = 0; i < ctl_mgr->nlm; i++) {
  281. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
  282. ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
  283. }
  284. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  285. }
  286. #define PIPE_LEFT 0
  287. #define PIPE_RIGHT 1
  288. int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
  289. enum mdp5_pipe stage[][MAX_PIPE_STAGE],
  290. enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
  291. u32 stage_cnt, u32 ctl_blend_op_flags)
  292. {
  293. struct mdp5_hw_mixer *mixer = pipeline->mixer;
  294. struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
  295. unsigned long flags;
  296. u32 blend_cfg = 0, blend_ext_cfg = 0;
  297. u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
  298. int i, start_stage;
  299. mdp5_ctl_reset_blend_regs(ctl);
  300. if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
  301. start_stage = STAGE0;
  302. blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
  303. if (r_mixer)
  304. r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
  305. } else {
  306. start_stage = STAGE_BASE;
  307. }
  308. for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
  309. blend_cfg |=
  310. mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
  311. mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
  312. blend_ext_cfg |=
  313. mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
  314. mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
  315. if (r_mixer) {
  316. r_blend_cfg |=
  317. mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
  318. mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
  319. r_blend_ext_cfg |=
  320. mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
  321. mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
  322. }
  323. }
  324. spin_lock_irqsave(&ctl->hw_lock, flags);
  325. if (ctl->cursor_on)
  326. blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
  327. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
  328. ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
  329. blend_ext_cfg);
  330. if (r_mixer) {
  331. ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
  332. r_blend_cfg);
  333. ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
  334. r_blend_ext_cfg);
  335. }
  336. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  337. ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
  338. if (r_mixer)
  339. ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
  340. DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
  341. blend_cfg, blend_ext_cfg);
  342. if (r_mixer)
  343. DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
  344. r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
  345. return 0;
  346. }
  347. u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
  348. {
  349. if (intf->type == INTF_WB)
  350. return MDP5_CTL_FLUSH_WB;
  351. switch (intf->num) {
  352. case 0: return MDP5_CTL_FLUSH_TIMING_0;
  353. case 1: return MDP5_CTL_FLUSH_TIMING_1;
  354. case 2: return MDP5_CTL_FLUSH_TIMING_2;
  355. case 3: return MDP5_CTL_FLUSH_TIMING_3;
  356. default: return 0;
  357. }
  358. }
  359. u32 mdp_ctl_flush_mask_cursor(int cursor_id)
  360. {
  361. switch (cursor_id) {
  362. case 0: return MDP5_CTL_FLUSH_CURSOR_0;
  363. case 1: return MDP5_CTL_FLUSH_CURSOR_1;
  364. default: return 0;
  365. }
  366. }
  367. u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
  368. {
  369. switch (pipe) {
  370. case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
  371. case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
  372. case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
  373. case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
  374. case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
  375. case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
  376. case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
  377. case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
  378. case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
  379. case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
  380. case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
  381. case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
  382. default: return 0;
  383. }
  384. }
  385. u32 mdp_ctl_flush_mask_lm(int lm)
  386. {
  387. switch (lm) {
  388. case 0: return MDP5_CTL_FLUSH_LM0;
  389. case 1: return MDP5_CTL_FLUSH_LM1;
  390. case 2: return MDP5_CTL_FLUSH_LM2;
  391. case 3: return MDP5_CTL_FLUSH_LM3;
  392. case 4: return MDP5_CTL_FLUSH_LM4;
  393. case 5: return MDP5_CTL_FLUSH_LM5;
  394. default: return 0;
  395. }
  396. }
  397. static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
  398. u32 flush_mask)
  399. {
  400. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  401. u32 sw_mask = 0;
  402. #define BIT_NEEDS_SW_FIX(bit) \
  403. (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
  404. /* for some targets, cursor bit is the same as LM bit */
  405. if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
  406. sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
  407. return sw_mask;
  408. }
  409. static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
  410. u32 *flush_id)
  411. {
  412. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  413. if (ctl->pair) {
  414. DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
  415. ctl->flush_pending = true;
  416. ctl_mgr->single_flush_pending_mask |= (*flush_mask);
  417. *flush_mask = 0;
  418. if (ctl->pair->flush_pending) {
  419. *flush_id = min_t(u32, ctl->id, ctl->pair->id);
  420. *flush_mask = ctl_mgr->single_flush_pending_mask;
  421. ctl->flush_pending = false;
  422. ctl->pair->flush_pending = false;
  423. ctl_mgr->single_flush_pending_mask = 0;
  424. DBG("Single FLUSH mask %x,ID %d", *flush_mask,
  425. *flush_id);
  426. }
  427. }
  428. }
  429. /**
  430. * mdp5_ctl_commit() - Register Flush
  431. *
  432. * The flush register is used to indicate several registers are all
  433. * programmed, and are safe to update to the back copy of the double
  434. * buffered registers.
  435. *
  436. * Some registers FLUSH bits are shared when the hardware does not have
  437. * dedicated bits for them; handling these is the job of fix_sw_flush().
  438. *
  439. * CTL registers need to be flushed in some circumstances; if that is the
  440. * case, some trigger bits will be present in both flush mask and
  441. * ctl->pending_ctl_trigger.
  442. *
  443. * Return H/W flushed bit mask.
  444. */
  445. u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
  446. struct mdp5_pipeline *pipeline,
  447. u32 flush_mask, bool start)
  448. {
  449. struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
  450. unsigned long flags;
  451. u32 flush_id = ctl->id;
  452. u32 curr_ctl_flush_mask;
  453. VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
  454. if (ctl->pending_ctl_trigger & flush_mask) {
  455. flush_mask |= MDP5_CTL_FLUSH_CTL;
  456. ctl->pending_ctl_trigger = 0;
  457. }
  458. flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
  459. flush_mask &= ctl_mgr->flush_hw_mask;
  460. curr_ctl_flush_mask = flush_mask;
  461. fix_for_single_flush(ctl, &flush_mask, &flush_id);
  462. if (!start) {
  463. ctl->flush_mask |= flush_mask;
  464. return curr_ctl_flush_mask;
  465. } else {
  466. flush_mask |= ctl->flush_mask;
  467. ctl->flush_mask = 0;
  468. }
  469. if (flush_mask) {
  470. spin_lock_irqsave(&ctl->hw_lock, flags);
  471. ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
  472. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  473. }
  474. if (start_signal_needed(ctl, pipeline)) {
  475. send_start_signal(ctl);
  476. }
  477. return curr_ctl_flush_mask;
  478. }
  479. u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
  480. {
  481. return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
  482. }
  483. int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
  484. {
  485. return WARN_ON(!ctl) ? -EINVAL : ctl->id;
  486. }
  487. /*
  488. * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
  489. */
  490. int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
  491. {
  492. struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
  493. struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
  494. /* do nothing silently if hw doesn't support */
  495. if (!ctl_mgr->single_flush_supported)
  496. return 0;
  497. if (!enable) {
  498. ctlx->pair = NULL;
  499. ctly->pair = NULL;
  500. mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
  501. return 0;
  502. } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
  503. dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
  504. return -EINVAL;
  505. } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
  506. dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
  507. return -EINVAL;
  508. }
  509. ctlx->pair = ctly;
  510. ctly->pair = ctlx;
  511. mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
  512. MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
  513. return 0;
  514. }
  515. /*
  516. * mdp5_ctl_request() - CTL allocation
  517. *
  518. * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
  519. * If no CTL is available in preferred category, allocate from the other one.
  520. *
  521. * @return fail if no CTL is available.
  522. */
  523. struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
  524. int intf_num)
  525. {
  526. struct mdp5_ctl *ctl = NULL;
  527. const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
  528. u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
  529. unsigned long flags;
  530. int c;
  531. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  532. /* search the preferred */
  533. for (c = 0; c < ctl_mgr->nctl; c++)
  534. if ((ctl_mgr->ctls[c].status & checkm) == match)
  535. goto found;
  536. dev_warn(ctl_mgr->dev->dev,
  537. "fall back to the other CTL category for INTF %d!\n", intf_num);
  538. match ^= CTL_STAT_BOOKED;
  539. for (c = 0; c < ctl_mgr->nctl; c++)
  540. if ((ctl_mgr->ctls[c].status & checkm) == match)
  541. goto found;
  542. dev_err(ctl_mgr->dev->dev, "No more CTL available!");
  543. goto unlock;
  544. found:
  545. ctl = &ctl_mgr->ctls[c];
  546. ctl->status |= CTL_STAT_BUSY;
  547. ctl->pending_ctl_trigger = 0;
  548. DBG("CTL %d allocated", ctl->id);
  549. unlock:
  550. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  551. return ctl;
  552. }
  553. void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
  554. {
  555. unsigned long flags;
  556. int c;
  557. for (c = 0; c < ctl_mgr->nctl; c++) {
  558. struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
  559. spin_lock_irqsave(&ctl->hw_lock, flags);
  560. ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
  561. spin_unlock_irqrestore(&ctl->hw_lock, flags);
  562. }
  563. }
  564. void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
  565. {
  566. kfree(ctl_mgr);
  567. }
  568. struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
  569. void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
  570. {
  571. struct mdp5_ctl_manager *ctl_mgr;
  572. const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
  573. int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
  574. unsigned dsi_cnt = 0;
  575. const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
  576. unsigned long flags;
  577. int c, ret;
  578. ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
  579. if (!ctl_mgr) {
  580. dev_err(dev->dev, "failed to allocate CTL manager\n");
  581. ret = -ENOMEM;
  582. goto fail;
  583. }
  584. if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
  585. dev_err(dev->dev, "Increase static pool size to at least %d\n",
  586. ctl_cfg->count);
  587. ret = -ENOSPC;
  588. goto fail;
  589. }
  590. /* initialize the CTL manager: */
  591. ctl_mgr->dev = dev;
  592. ctl_mgr->nlm = hw_cfg->lm.count;
  593. ctl_mgr->nctl = ctl_cfg->count;
  594. ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
  595. spin_lock_init(&ctl_mgr->pool_lock);
  596. /* initialize each CTL of the pool: */
  597. spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
  598. for (c = 0; c < ctl_mgr->nctl; c++) {
  599. struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
  600. if (WARN_ON(!ctl_cfg->base[c])) {
  601. dev_err(dev->dev, "CTL_%d: base is null!\n", c);
  602. ret = -EINVAL;
  603. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  604. goto fail;
  605. }
  606. ctl->ctlm = ctl_mgr;
  607. ctl->id = c;
  608. ctl->reg_offset = ctl_cfg->base[c];
  609. ctl->status = 0;
  610. spin_lock_init(&ctl->hw_lock);
  611. }
  612. /*
  613. * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
  614. * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
  615. * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
  616. * Single FLUSH is supported from hw rev v3.0.
  617. */
  618. for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
  619. if (hw_cfg->intf.connect[c] == INTF_DSI)
  620. dsi_cnt++;
  621. if ((rev >= 3) && (dsi_cnt > 1)) {
  622. ctl_mgr->single_flush_supported = true;
  623. /* Reserve CTL0/1 for INTF1/2 */
  624. ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
  625. ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
  626. }
  627. spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
  628. DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
  629. return ctl_mgr;
  630. fail:
  631. if (ctl_mgr)
  632. mdp5_ctlm_destroy(ctl_mgr);
  633. return ERR_PTR(ret);
  634. }