dpu_hw_ctl.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/delay.h>
  13. #include "dpu_hwio.h"
  14. #include "dpu_hw_ctl.h"
  15. #include "dpu_dbg.h"
  16. #include "dpu_kms.h"
  17. #define CTL_LAYER(lm) \
  18. (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
  19. #define CTL_LAYER_EXT(lm) \
  20. (0x40 + (((lm) - LM_0) * 0x004))
  21. #define CTL_LAYER_EXT2(lm) \
  22. (0x70 + (((lm) - LM_0) * 0x004))
  23. #define CTL_LAYER_EXT3(lm) \
  24. (0xA0 + (((lm) - LM_0) * 0x004))
  25. #define CTL_TOP 0x014
  26. #define CTL_FLUSH 0x018
  27. #define CTL_START 0x01C
  28. #define CTL_PREPARE 0x0d0
  29. #define CTL_SW_RESET 0x030
  30. #define CTL_LAYER_EXTN_OFFSET 0x40
  31. #define CTL_MIXER_BORDER_OUT BIT(24)
  32. #define CTL_FLUSH_MASK_CTL BIT(17)
  33. #define DPU_REG_RESET_TIMEOUT_US 2000
  34. static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
  35. struct dpu_mdss_cfg *m,
  36. void __iomem *addr,
  37. struct dpu_hw_blk_reg_map *b)
  38. {
  39. int i;
  40. for (i = 0; i < m->ctl_count; i++) {
  41. if (ctl == m->ctl[i].id) {
  42. b->base_off = addr;
  43. b->blk_off = m->ctl[i].base;
  44. b->length = m->ctl[i].len;
  45. b->hwversion = m->hwversion;
  46. b->log_mask = DPU_DBG_MASK_CTL;
  47. return &m->ctl[i];
  48. }
  49. }
  50. return ERR_PTR(-ENOMEM);
  51. }
  52. static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
  53. enum dpu_lm lm)
  54. {
  55. int i;
  56. int stages = -EINVAL;
  57. for (i = 0; i < count; i++) {
  58. if (lm == mixer[i].id) {
  59. stages = mixer[i].sblk->maxblendstages;
  60. break;
  61. }
  62. }
  63. return stages;
  64. }
  65. static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
  66. {
  67. DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
  68. }
  69. static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
  70. {
  71. DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
  72. }
  73. static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
  74. {
  75. ctx->pending_flush_mask = 0x0;
  76. }
  77. static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
  78. u32 flushbits)
  79. {
  80. ctx->pending_flush_mask |= flushbits;
  81. }
  82. static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
  83. {
  84. if (!ctx)
  85. return 0x0;
  86. return ctx->pending_flush_mask;
  87. }
  88. static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
  89. {
  90. DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
  91. }
  92. static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
  93. {
  94. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  95. return DPU_REG_READ(c, CTL_FLUSH);
  96. }
  97. static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
  98. enum dpu_sspp sspp)
  99. {
  100. uint32_t flushbits = 0;
  101. switch (sspp) {
  102. case SSPP_VIG0:
  103. flushbits = BIT(0);
  104. break;
  105. case SSPP_VIG1:
  106. flushbits = BIT(1);
  107. break;
  108. case SSPP_VIG2:
  109. flushbits = BIT(2);
  110. break;
  111. case SSPP_VIG3:
  112. flushbits = BIT(18);
  113. break;
  114. case SSPP_RGB0:
  115. flushbits = BIT(3);
  116. break;
  117. case SSPP_RGB1:
  118. flushbits = BIT(4);
  119. break;
  120. case SSPP_RGB2:
  121. flushbits = BIT(5);
  122. break;
  123. case SSPP_RGB3:
  124. flushbits = BIT(19);
  125. break;
  126. case SSPP_DMA0:
  127. flushbits = BIT(11);
  128. break;
  129. case SSPP_DMA1:
  130. flushbits = BIT(12);
  131. break;
  132. case SSPP_DMA2:
  133. flushbits = BIT(24);
  134. break;
  135. case SSPP_DMA3:
  136. flushbits = BIT(25);
  137. break;
  138. case SSPP_CURSOR0:
  139. flushbits = BIT(22);
  140. break;
  141. case SSPP_CURSOR1:
  142. flushbits = BIT(23);
  143. break;
  144. default:
  145. break;
  146. }
  147. return flushbits;
  148. }
  149. static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
  150. enum dpu_lm lm)
  151. {
  152. uint32_t flushbits = 0;
  153. switch (lm) {
  154. case LM_0:
  155. flushbits = BIT(6);
  156. break;
  157. case LM_1:
  158. flushbits = BIT(7);
  159. break;
  160. case LM_2:
  161. flushbits = BIT(8);
  162. break;
  163. case LM_3:
  164. flushbits = BIT(9);
  165. break;
  166. case LM_4:
  167. flushbits = BIT(10);
  168. break;
  169. case LM_5:
  170. flushbits = BIT(20);
  171. break;
  172. default:
  173. return -EINVAL;
  174. }
  175. flushbits |= CTL_FLUSH_MASK_CTL;
  176. return flushbits;
  177. }
  178. static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
  179. u32 *flushbits, enum dpu_intf intf)
  180. {
  181. switch (intf) {
  182. case INTF_0:
  183. *flushbits |= BIT(31);
  184. break;
  185. case INTF_1:
  186. *flushbits |= BIT(30);
  187. break;
  188. case INTF_2:
  189. *flushbits |= BIT(29);
  190. break;
  191. case INTF_3:
  192. *flushbits |= BIT(28);
  193. break;
  194. default:
  195. return -EINVAL;
  196. }
  197. return 0;
  198. }
  199. static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
  200. {
  201. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  202. ktime_t timeout;
  203. u32 status;
  204. timeout = ktime_add_us(ktime_get(), timeout_us);
  205. /*
  206. * it takes around 30us to have mdp finish resetting its ctl path
  207. * poll every 50us so that reset should be completed at 1st poll
  208. */
  209. do {
  210. status = DPU_REG_READ(c, CTL_SW_RESET);
  211. status &= 0x1;
  212. if (status)
  213. usleep_range(20, 50);
  214. } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
  215. return status;
  216. }
  217. static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
  218. {
  219. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  220. pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
  221. DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
  222. if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
  223. return -EINVAL;
  224. return 0;
  225. }
  226. static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
  227. {
  228. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  229. u32 status;
  230. status = DPU_REG_READ(c, CTL_SW_RESET);
  231. status &= 0x01;
  232. if (!status)
  233. return 0;
  234. pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
  235. if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
  236. pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
  237. return -EINVAL;
  238. }
  239. return 0;
  240. }
  241. static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
  242. {
  243. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  244. int i;
  245. for (i = 0; i < ctx->mixer_count; i++) {
  246. DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
  247. DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
  248. DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
  249. DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
  250. }
  251. }
  252. static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
  253. enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
  254. {
  255. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  256. u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
  257. u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
  258. int i, j;
  259. int stages;
  260. int pipes_per_stage;
  261. stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
  262. if (stages < 0)
  263. return;
  264. if (test_bit(DPU_MIXER_SOURCESPLIT,
  265. &ctx->mixer_hw_caps->features))
  266. pipes_per_stage = PIPES_PER_STAGE;
  267. else
  268. pipes_per_stage = 1;
  269. mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
  270. if (!stage_cfg)
  271. goto exit;
  272. for (i = 0; i <= stages; i++) {
  273. /* overflow to ext register if 'i + 1 > 7' */
  274. mix = (i + 1) & 0x7;
  275. ext = i >= 7;
  276. for (j = 0 ; j < pipes_per_stage; j++) {
  277. enum dpu_sspp_multirect_index rect_index =
  278. stage_cfg->multirect_index[i][j];
  279. switch (stage_cfg->stage[i][j]) {
  280. case SSPP_VIG0:
  281. if (rect_index == DPU_SSPP_RECT_1) {
  282. mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
  283. } else {
  284. mixercfg |= mix << 0;
  285. mixercfg_ext |= ext << 0;
  286. }
  287. break;
  288. case SSPP_VIG1:
  289. if (rect_index == DPU_SSPP_RECT_1) {
  290. mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
  291. } else {
  292. mixercfg |= mix << 3;
  293. mixercfg_ext |= ext << 2;
  294. }
  295. break;
  296. case SSPP_VIG2:
  297. if (rect_index == DPU_SSPP_RECT_1) {
  298. mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
  299. } else {
  300. mixercfg |= mix << 6;
  301. mixercfg_ext |= ext << 4;
  302. }
  303. break;
  304. case SSPP_VIG3:
  305. if (rect_index == DPU_SSPP_RECT_1) {
  306. mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
  307. } else {
  308. mixercfg |= mix << 26;
  309. mixercfg_ext |= ext << 6;
  310. }
  311. break;
  312. case SSPP_RGB0:
  313. mixercfg |= mix << 9;
  314. mixercfg_ext |= ext << 8;
  315. break;
  316. case SSPP_RGB1:
  317. mixercfg |= mix << 12;
  318. mixercfg_ext |= ext << 10;
  319. break;
  320. case SSPP_RGB2:
  321. mixercfg |= mix << 15;
  322. mixercfg_ext |= ext << 12;
  323. break;
  324. case SSPP_RGB3:
  325. mixercfg |= mix << 29;
  326. mixercfg_ext |= ext << 14;
  327. break;
  328. case SSPP_DMA0:
  329. if (rect_index == DPU_SSPP_RECT_1) {
  330. mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
  331. } else {
  332. mixercfg |= mix << 18;
  333. mixercfg_ext |= ext << 16;
  334. }
  335. break;
  336. case SSPP_DMA1:
  337. if (rect_index == DPU_SSPP_RECT_1) {
  338. mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
  339. } else {
  340. mixercfg |= mix << 21;
  341. mixercfg_ext |= ext << 18;
  342. }
  343. break;
  344. case SSPP_DMA2:
  345. if (rect_index == DPU_SSPP_RECT_1) {
  346. mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
  347. } else {
  348. mix |= (i + 1) & 0xF;
  349. mixercfg_ext2 |= mix << 0;
  350. }
  351. break;
  352. case SSPP_DMA3:
  353. if (rect_index == DPU_SSPP_RECT_1) {
  354. mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
  355. } else {
  356. mix |= (i + 1) & 0xF;
  357. mixercfg_ext2 |= mix << 4;
  358. }
  359. break;
  360. case SSPP_CURSOR0:
  361. mixercfg_ext |= ((i + 1) & 0xF) << 20;
  362. break;
  363. case SSPP_CURSOR1:
  364. mixercfg_ext |= ((i + 1) & 0xF) << 26;
  365. break;
  366. default:
  367. break;
  368. }
  369. }
  370. }
  371. exit:
  372. DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
  373. DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
  374. DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
  375. DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
  376. }
  377. static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
  378. struct dpu_hw_intf_cfg *cfg)
  379. {
  380. struct dpu_hw_blk_reg_map *c = &ctx->hw;
  381. u32 intf_cfg = 0;
  382. intf_cfg |= (cfg->intf & 0xF) << 4;
  383. if (cfg->mode_3d) {
  384. intf_cfg |= BIT(19);
  385. intf_cfg |= (cfg->mode_3d - 0x1) << 20;
  386. }
  387. switch (cfg->intf_mode_sel) {
  388. case DPU_CTL_MODE_SEL_VID:
  389. intf_cfg &= ~BIT(17);
  390. intf_cfg &= ~(0x3 << 15);
  391. break;
  392. case DPU_CTL_MODE_SEL_CMD:
  393. intf_cfg |= BIT(17);
  394. intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
  395. break;
  396. default:
  397. pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
  398. return;
  399. }
  400. DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
  401. }
  402. static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
  403. unsigned long cap)
  404. {
  405. ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
  406. ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
  407. ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
  408. ops->trigger_flush = dpu_hw_ctl_trigger_flush;
  409. ops->get_flush_register = dpu_hw_ctl_get_flush_register;
  410. ops->trigger_start = dpu_hw_ctl_trigger_start;
  411. ops->trigger_pending = dpu_hw_ctl_trigger_pending;
  412. ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
  413. ops->reset = dpu_hw_ctl_reset_control;
  414. ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
  415. ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
  416. ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
  417. ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
  418. ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
  419. ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
  420. };
  421. static struct dpu_hw_blk_ops dpu_hw_ops = {
  422. .start = NULL,
  423. .stop = NULL,
  424. };
  425. struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
  426. void __iomem *addr,
  427. struct dpu_mdss_cfg *m)
  428. {
  429. struct dpu_hw_ctl *c;
  430. struct dpu_ctl_cfg *cfg;
  431. int rc;
  432. c = kzalloc(sizeof(*c), GFP_KERNEL);
  433. if (!c)
  434. return ERR_PTR(-ENOMEM);
  435. cfg = _ctl_offset(idx, m, addr, &c->hw);
  436. if (IS_ERR_OR_NULL(cfg)) {
  437. kfree(c);
  438. pr_err("failed to create dpu_hw_ctl %d\n", idx);
  439. return ERR_PTR(-EINVAL);
  440. }
  441. c->caps = cfg;
  442. _setup_ctl_ops(&c->ops, c->caps->features);
  443. c->idx = idx;
  444. c->mixer_count = m->mixer_count;
  445. c->mixer_hw_caps = m->mixer;
  446. rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
  447. if (rc) {
  448. DPU_ERROR("failed to init hw blk %d\n", rc);
  449. goto blk_init_error;
  450. }
  451. return c;
  452. blk_init_error:
  453. kzfree(c);
  454. return ERR_PTR(rc);
  455. }
  456. void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
  457. {
  458. if (ctx)
  459. dpu_hw_blk_destroy(&ctx->base);
  460. kfree(ctx);
  461. }