rockchip_drm_vop.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_crtc.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_flip_work.h>
  20. #include <drm/drm_plane_helper.h>
  21. #ifdef CONFIG_DRM_ANALOGIX_DP
  22. #include <drm/bridge/analogix_dp.h>
  23. #endif
  24. #include <linux/kernel.h>
  25. #include <linux/module.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/clk.h>
  28. #include <linux/iopoll.h>
  29. #include <linux/of.h>
  30. #include <linux/of_device.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/component.h>
  33. #include <linux/overflow.h>
  34. #include <linux/reset.h>
  35. #include <linux/delay.h>
  36. #include "rockchip_drm_drv.h"
  37. #include "rockchip_drm_gem.h"
  38. #include "rockchip_drm_fb.h"
  39. #include "rockchip_drm_psr.h"
  40. #include "rockchip_drm_vop.h"
  41. #include "rockchip_rgb.h"
  42. #define VOP_WIN_SET(x, win, name, v) \
  43. vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
  44. #define VOP_SCL_SET(x, win, name, v) \
  45. vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
  46. #define VOP_SCL_SET_EXT(x, win, name, v) \
  47. vop_reg_set(vop, &win->phy->scl->ext->name, \
  48. win->base, ~0, v, #name)
  49. #define VOP_INTR_SET_MASK(vop, name, mask, v) \
  50. vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
  51. #define VOP_REG_SET(vop, group, name, v) \
  52. vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
  53. #define VOP_INTR_SET_TYPE(vop, name, type, v) \
  54. do { \
  55. int i, reg = 0, mask = 0; \
  56. for (i = 0; i < vop->data->intr->nintrs; i++) { \
  57. if (vop->data->intr->intrs[i] & type) { \
  58. reg |= (v) << i; \
  59. mask |= 1 << i; \
  60. } \
  61. } \
  62. VOP_INTR_SET_MASK(vop, name, mask, reg); \
  63. } while (0)
  64. #define VOP_INTR_GET_TYPE(vop, name, type) \
  65. vop_get_intr_type(vop, &vop->data->intr->name, type)
  66. #define VOP_WIN_GET(x, win, name) \
  67. vop_read_reg(x, win->offset, win->phy->name)
  68. #define VOP_WIN_GET_YRGBADDR(vop, win) \
  69. vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
  70. #define VOP_WIN_TO_INDEX(vop_win) \
  71. ((vop_win) - (vop_win)->vop->win)
  72. #define to_vop(x) container_of(x, struct vop, crtc)
  73. #define to_vop_win(x) container_of(x, struct vop_win, base)
  74. enum vop_pending {
  75. VOP_PENDING_FB_UNREF,
  76. };
  77. struct vop_win {
  78. struct drm_plane base;
  79. const struct vop_win_data *data;
  80. struct vop *vop;
  81. };
  82. struct rockchip_rgb;
  83. struct vop {
  84. struct drm_crtc crtc;
  85. struct device *dev;
  86. struct drm_device *drm_dev;
  87. bool is_enabled;
  88. struct completion dsp_hold_completion;
  89. /* protected by dev->event_lock */
  90. struct drm_pending_vblank_event *event;
  91. struct drm_flip_work fb_unref_work;
  92. unsigned long pending;
  93. struct completion line_flag_completion;
  94. const struct vop_data *data;
  95. uint32_t *regsbak;
  96. void __iomem *regs;
  97. /* physical map length of vop register */
  98. uint32_t len;
  99. /* one time only one process allowed to config the register */
  100. spinlock_t reg_lock;
  101. /* lock vop irq reg */
  102. spinlock_t irq_lock;
  103. /* protects crtc enable/disable */
  104. struct mutex vop_lock;
  105. unsigned int irq;
  106. /* vop AHP clk */
  107. struct clk *hclk;
  108. /* vop dclk */
  109. struct clk *dclk;
  110. /* vop share memory frequency */
  111. struct clk *aclk;
  112. /* vop dclk reset */
  113. struct reset_control *dclk_rst;
  114. /* optional internal rgb encoder */
  115. struct rockchip_rgb *rgb;
  116. struct vop_win win[];
  117. };
  118. static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
  119. {
  120. writel(v, vop->regs + offset);
  121. vop->regsbak[offset >> 2] = v;
  122. }
  123. static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
  124. {
  125. return readl(vop->regs + offset);
  126. }
  127. static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
  128. const struct vop_reg *reg)
  129. {
  130. return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
  131. }
  132. static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
  133. uint32_t _offset, uint32_t _mask, uint32_t v,
  134. const char *reg_name)
  135. {
  136. int offset, mask, shift;
  137. if (!reg || !reg->mask) {
  138. DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name);
  139. return;
  140. }
  141. offset = reg->offset + _offset;
  142. mask = reg->mask & _mask;
  143. shift = reg->shift;
  144. if (reg->write_mask) {
  145. v = ((v << shift) & 0xffff) | (mask << (shift + 16));
  146. } else {
  147. uint32_t cached_val = vop->regsbak[offset >> 2];
  148. v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
  149. vop->regsbak[offset >> 2] = v;
  150. }
  151. if (reg->relaxed)
  152. writel_relaxed(v, vop->regs + offset);
  153. else
  154. writel(v, vop->regs + offset);
  155. }
  156. static inline uint32_t vop_get_intr_type(struct vop *vop,
  157. const struct vop_reg *reg, int type)
  158. {
  159. uint32_t i, ret = 0;
  160. uint32_t regs = vop_read_reg(vop, 0, reg);
  161. for (i = 0; i < vop->data->intr->nintrs; i++) {
  162. if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
  163. ret |= vop->data->intr->intrs[i];
  164. }
  165. return ret;
  166. }
  167. static inline void vop_cfg_done(struct vop *vop)
  168. {
  169. VOP_REG_SET(vop, common, cfg_done, 1);
  170. }
  171. static bool has_rb_swapped(uint32_t format)
  172. {
  173. switch (format) {
  174. case DRM_FORMAT_XBGR8888:
  175. case DRM_FORMAT_ABGR8888:
  176. case DRM_FORMAT_BGR888:
  177. case DRM_FORMAT_BGR565:
  178. return true;
  179. default:
  180. return false;
  181. }
  182. }
  183. static enum vop_data_format vop_convert_format(uint32_t format)
  184. {
  185. switch (format) {
  186. case DRM_FORMAT_XRGB8888:
  187. case DRM_FORMAT_ARGB8888:
  188. case DRM_FORMAT_XBGR8888:
  189. case DRM_FORMAT_ABGR8888:
  190. return VOP_FMT_ARGB8888;
  191. case DRM_FORMAT_RGB888:
  192. case DRM_FORMAT_BGR888:
  193. return VOP_FMT_RGB888;
  194. case DRM_FORMAT_RGB565:
  195. case DRM_FORMAT_BGR565:
  196. return VOP_FMT_RGB565;
  197. case DRM_FORMAT_NV12:
  198. return VOP_FMT_YUV420SP;
  199. case DRM_FORMAT_NV16:
  200. return VOP_FMT_YUV422SP;
  201. case DRM_FORMAT_NV24:
  202. return VOP_FMT_YUV444SP;
  203. default:
  204. DRM_ERROR("unsupported format[%08x]\n", format);
  205. return -EINVAL;
  206. }
  207. }
  208. static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
  209. uint32_t dst, bool is_horizontal,
  210. int vsu_mode, int *vskiplines)
  211. {
  212. uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
  213. if (vskiplines)
  214. *vskiplines = 0;
  215. if (is_horizontal) {
  216. if (mode == SCALE_UP)
  217. val = GET_SCL_FT_BIC(src, dst);
  218. else if (mode == SCALE_DOWN)
  219. val = GET_SCL_FT_BILI_DN(src, dst);
  220. } else {
  221. if (mode == SCALE_UP) {
  222. if (vsu_mode == SCALE_UP_BIL)
  223. val = GET_SCL_FT_BILI_UP(src, dst);
  224. else
  225. val = GET_SCL_FT_BIC(src, dst);
  226. } else if (mode == SCALE_DOWN) {
  227. if (vskiplines) {
  228. *vskiplines = scl_get_vskiplines(src, dst);
  229. val = scl_get_bili_dn_vskip(src, dst,
  230. *vskiplines);
  231. } else {
  232. val = GET_SCL_FT_BILI_DN(src, dst);
  233. }
  234. }
  235. }
  236. return val;
  237. }
  238. static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
  239. uint32_t src_w, uint32_t src_h, uint32_t dst_w,
  240. uint32_t dst_h, uint32_t pixel_format)
  241. {
  242. uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
  243. uint16_t cbcr_hor_scl_mode = SCALE_NONE;
  244. uint16_t cbcr_ver_scl_mode = SCALE_NONE;
  245. int hsub = drm_format_horz_chroma_subsampling(pixel_format);
  246. int vsub = drm_format_vert_chroma_subsampling(pixel_format);
  247. const struct drm_format_info *info;
  248. bool is_yuv = false;
  249. uint16_t cbcr_src_w = src_w / hsub;
  250. uint16_t cbcr_src_h = src_h / vsub;
  251. uint16_t vsu_mode;
  252. uint16_t lb_mode;
  253. uint32_t val;
  254. int vskiplines;
  255. info = drm_format_info(pixel_format);
  256. if (info->is_yuv)
  257. is_yuv = true;
  258. if (dst_w > 3840) {
  259. DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
  260. return;
  261. }
  262. if (!win->phy->scl->ext) {
  263. VOP_SCL_SET(vop, win, scale_yrgb_x,
  264. scl_cal_scale2(src_w, dst_w));
  265. VOP_SCL_SET(vop, win, scale_yrgb_y,
  266. scl_cal_scale2(src_h, dst_h));
  267. if (is_yuv) {
  268. VOP_SCL_SET(vop, win, scale_cbcr_x,
  269. scl_cal_scale2(cbcr_src_w, dst_w));
  270. VOP_SCL_SET(vop, win, scale_cbcr_y,
  271. scl_cal_scale2(cbcr_src_h, dst_h));
  272. }
  273. return;
  274. }
  275. yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
  276. yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
  277. if (is_yuv) {
  278. cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
  279. cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
  280. if (cbcr_hor_scl_mode == SCALE_DOWN)
  281. lb_mode = scl_vop_cal_lb_mode(dst_w, true);
  282. else
  283. lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
  284. } else {
  285. if (yrgb_hor_scl_mode == SCALE_DOWN)
  286. lb_mode = scl_vop_cal_lb_mode(dst_w, false);
  287. else
  288. lb_mode = scl_vop_cal_lb_mode(src_w, false);
  289. }
  290. VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
  291. if (lb_mode == LB_RGB_3840X2) {
  292. if (yrgb_ver_scl_mode != SCALE_NONE) {
  293. DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
  294. return;
  295. }
  296. if (cbcr_ver_scl_mode != SCALE_NONE) {
  297. DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
  298. return;
  299. }
  300. vsu_mode = SCALE_UP_BIL;
  301. } else if (lb_mode == LB_RGB_2560X4) {
  302. vsu_mode = SCALE_UP_BIL;
  303. } else {
  304. vsu_mode = SCALE_UP_BIC;
  305. }
  306. val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
  307. true, 0, NULL);
  308. VOP_SCL_SET(vop, win, scale_yrgb_x, val);
  309. val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
  310. false, vsu_mode, &vskiplines);
  311. VOP_SCL_SET(vop, win, scale_yrgb_y, val);
  312. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
  313. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
  314. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
  315. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
  316. VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
  317. VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
  318. VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
  319. if (is_yuv) {
  320. val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
  321. dst_w, true, 0, NULL);
  322. VOP_SCL_SET(vop, win, scale_cbcr_x, val);
  323. val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
  324. dst_h, false, vsu_mode, &vskiplines);
  325. VOP_SCL_SET(vop, win, scale_cbcr_y, val);
  326. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
  327. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
  328. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
  329. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
  330. VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
  331. VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
  332. VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
  333. }
  334. }
  335. static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
  336. {
  337. unsigned long flags;
  338. if (WARN_ON(!vop->is_enabled))
  339. return;
  340. spin_lock_irqsave(&vop->irq_lock, flags);
  341. VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
  342. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
  343. spin_unlock_irqrestore(&vop->irq_lock, flags);
  344. }
  345. static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
  346. {
  347. unsigned long flags;
  348. if (WARN_ON(!vop->is_enabled))
  349. return;
  350. spin_lock_irqsave(&vop->irq_lock, flags);
  351. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
  352. spin_unlock_irqrestore(&vop->irq_lock, flags);
  353. }
  354. /*
  355. * (1) each frame starts at the start of the Vsync pulse which is signaled by
  356. * the "FRAME_SYNC" interrupt.
  357. * (2) the active data region of each frame ends at dsp_vact_end
  358. * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
  359. * to get "LINE_FLAG" interrupt at the end of the active on screen data.
  360. *
  361. * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
  362. * Interrupts
  363. * LINE_FLAG -------------------------------+
  364. * FRAME_SYNC ----+ |
  365. * | |
  366. * v v
  367. * | Vsync | Vbp | Vactive | Vfp |
  368. * ^ ^ ^ ^
  369. * | | | |
  370. * | | | |
  371. * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
  372. * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
  373. * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
  374. * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
  375. */
  376. static bool vop_line_flag_irq_is_enabled(struct vop *vop)
  377. {
  378. uint32_t line_flag_irq;
  379. unsigned long flags;
  380. spin_lock_irqsave(&vop->irq_lock, flags);
  381. line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
  382. spin_unlock_irqrestore(&vop->irq_lock, flags);
  383. return !!line_flag_irq;
  384. }
  385. static void vop_line_flag_irq_enable(struct vop *vop)
  386. {
  387. unsigned long flags;
  388. if (WARN_ON(!vop->is_enabled))
  389. return;
  390. spin_lock_irqsave(&vop->irq_lock, flags);
  391. VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
  392. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
  393. spin_unlock_irqrestore(&vop->irq_lock, flags);
  394. }
  395. static void vop_line_flag_irq_disable(struct vop *vop)
  396. {
  397. unsigned long flags;
  398. if (WARN_ON(!vop->is_enabled))
  399. return;
  400. spin_lock_irqsave(&vop->irq_lock, flags);
  401. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
  402. spin_unlock_irqrestore(&vop->irq_lock, flags);
  403. }
  404. static int vop_core_clks_enable(struct vop *vop)
  405. {
  406. int ret;
  407. ret = clk_enable(vop->hclk);
  408. if (ret < 0)
  409. return ret;
  410. ret = clk_enable(vop->aclk);
  411. if (ret < 0)
  412. goto err_disable_hclk;
  413. return 0;
  414. err_disable_hclk:
  415. clk_disable(vop->hclk);
  416. return ret;
  417. }
  418. static void vop_core_clks_disable(struct vop *vop)
  419. {
  420. clk_disable(vop->aclk);
  421. clk_disable(vop->hclk);
  422. }
  423. static int vop_enable(struct drm_crtc *crtc)
  424. {
  425. struct vop *vop = to_vop(crtc);
  426. int ret, i;
  427. ret = pm_runtime_get_sync(vop->dev);
  428. if (ret < 0) {
  429. DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
  430. return ret;
  431. }
  432. ret = vop_core_clks_enable(vop);
  433. if (WARN_ON(ret < 0))
  434. goto err_put_pm_runtime;
  435. ret = clk_enable(vop->dclk);
  436. if (WARN_ON(ret < 0))
  437. goto err_disable_core;
  438. /*
  439. * Slave iommu shares power, irq and clock with vop. It was associated
  440. * automatically with this master device via common driver code.
  441. * Now that we have enabled the clock we attach it to the shared drm
  442. * mapping.
  443. */
  444. ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
  445. if (ret) {
  446. DRM_DEV_ERROR(vop->dev,
  447. "failed to attach dma mapping, %d\n", ret);
  448. goto err_disable_dclk;
  449. }
  450. spin_lock(&vop->reg_lock);
  451. for (i = 0; i < vop->len; i += 4)
  452. writel_relaxed(vop->regsbak[i / 4], vop->regs + i);
  453. /*
  454. * We need to make sure that all windows are disabled before we
  455. * enable the crtc. Otherwise we might try to scan from a destroyed
  456. * buffer later.
  457. */
  458. for (i = 0; i < vop->data->win_size; i++) {
  459. struct vop_win *vop_win = &vop->win[i];
  460. const struct vop_win_data *win = vop_win->data;
  461. VOP_WIN_SET(vop, win, enable, 0);
  462. }
  463. spin_unlock(&vop->reg_lock);
  464. vop_cfg_done(vop);
  465. /*
  466. * At here, vop clock & iommu is enable, R/W vop regs would be safe.
  467. */
  468. vop->is_enabled = true;
  469. spin_lock(&vop->reg_lock);
  470. VOP_REG_SET(vop, common, standby, 1);
  471. spin_unlock(&vop->reg_lock);
  472. drm_crtc_vblank_on(crtc);
  473. return 0;
  474. err_disable_dclk:
  475. clk_disable(vop->dclk);
  476. err_disable_core:
  477. vop_core_clks_disable(vop);
  478. err_put_pm_runtime:
  479. pm_runtime_put_sync(vop->dev);
  480. return ret;
  481. }
  482. static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
  483. struct drm_crtc_state *old_state)
  484. {
  485. struct vop *vop = to_vop(crtc);
  486. WARN_ON(vop->event);
  487. mutex_lock(&vop->vop_lock);
  488. drm_crtc_vblank_off(crtc);
  489. /*
  490. * Vop standby will take effect at end of current frame,
  491. * if dsp hold valid irq happen, it means standby complete.
  492. *
  493. * we must wait standby complete when we want to disable aclk,
  494. * if not, memory bus maybe dead.
  495. */
  496. reinit_completion(&vop->dsp_hold_completion);
  497. vop_dsp_hold_valid_irq_enable(vop);
  498. spin_lock(&vop->reg_lock);
  499. VOP_REG_SET(vop, common, standby, 1);
  500. spin_unlock(&vop->reg_lock);
  501. wait_for_completion(&vop->dsp_hold_completion);
  502. vop_dsp_hold_valid_irq_disable(vop);
  503. vop->is_enabled = false;
  504. /*
  505. * vop standby complete, so iommu detach is safe.
  506. */
  507. rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
  508. clk_disable(vop->dclk);
  509. vop_core_clks_disable(vop);
  510. pm_runtime_put(vop->dev);
  511. mutex_unlock(&vop->vop_lock);
  512. if (crtc->state->event && !crtc->state->active) {
  513. spin_lock_irq(&crtc->dev->event_lock);
  514. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  515. spin_unlock_irq(&crtc->dev->event_lock);
  516. crtc->state->event = NULL;
  517. }
  518. }
  519. static void vop_plane_destroy(struct drm_plane *plane)
  520. {
  521. drm_plane_cleanup(plane);
  522. }
  523. static int vop_plane_atomic_check(struct drm_plane *plane,
  524. struct drm_plane_state *state)
  525. {
  526. struct drm_crtc *crtc = state->crtc;
  527. struct drm_crtc_state *crtc_state;
  528. struct drm_framebuffer *fb = state->fb;
  529. struct vop_win *vop_win = to_vop_win(plane);
  530. const struct vop_win_data *win = vop_win->data;
  531. int ret;
  532. int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
  533. DRM_PLANE_HELPER_NO_SCALING;
  534. int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
  535. DRM_PLANE_HELPER_NO_SCALING;
  536. if (!crtc || !fb)
  537. return 0;
  538. crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
  539. if (WARN_ON(!crtc_state))
  540. return -EINVAL;
  541. ret = drm_atomic_helper_check_plane_state(state, crtc_state,
  542. min_scale, max_scale,
  543. true, true);
  544. if (ret)
  545. return ret;
  546. if (!state->visible)
  547. return 0;
  548. ret = vop_convert_format(fb->format->format);
  549. if (ret < 0)
  550. return ret;
  551. /*
  552. * Src.x1 can be odd when do clip, but yuv plane start point
  553. * need align with 2 pixel.
  554. */
  555. if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
  556. DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
  557. return -EINVAL;
  558. }
  559. return 0;
  560. }
  561. static void vop_plane_atomic_disable(struct drm_plane *plane,
  562. struct drm_plane_state *old_state)
  563. {
  564. struct vop_win *vop_win = to_vop_win(plane);
  565. const struct vop_win_data *win = vop_win->data;
  566. struct vop *vop = to_vop(old_state->crtc);
  567. if (!old_state->crtc)
  568. return;
  569. spin_lock(&vop->reg_lock);
  570. VOP_WIN_SET(vop, win, enable, 0);
  571. spin_unlock(&vop->reg_lock);
  572. }
  573. static void vop_plane_atomic_update(struct drm_plane *plane,
  574. struct drm_plane_state *old_state)
  575. {
  576. struct drm_plane_state *state = plane->state;
  577. struct drm_crtc *crtc = state->crtc;
  578. struct vop_win *vop_win = to_vop_win(plane);
  579. const struct vop_win_data *win = vop_win->data;
  580. struct vop *vop = to_vop(state->crtc);
  581. struct drm_framebuffer *fb = state->fb;
  582. unsigned int actual_w, actual_h;
  583. unsigned int dsp_stx, dsp_sty;
  584. uint32_t act_info, dsp_info, dsp_st;
  585. struct drm_rect *src = &state->src;
  586. struct drm_rect *dest = &state->dst;
  587. struct drm_gem_object *obj, *uv_obj;
  588. struct rockchip_gem_object *rk_obj, *rk_uv_obj;
  589. unsigned long offset;
  590. dma_addr_t dma_addr;
  591. uint32_t val;
  592. bool rb_swap;
  593. int win_index = VOP_WIN_TO_INDEX(vop_win);
  594. int format;
  595. /*
  596. * can't update plane when vop is disabled.
  597. */
  598. if (WARN_ON(!crtc))
  599. return;
  600. if (WARN_ON(!vop->is_enabled))
  601. return;
  602. if (!state->visible) {
  603. vop_plane_atomic_disable(plane, old_state);
  604. return;
  605. }
  606. obj = fb->obj[0];
  607. rk_obj = to_rockchip_obj(obj);
  608. actual_w = drm_rect_width(src) >> 16;
  609. actual_h = drm_rect_height(src) >> 16;
  610. act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
  611. dsp_info = (drm_rect_height(dest) - 1) << 16;
  612. dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
  613. dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
  614. dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
  615. dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
  616. offset = (src->x1 >> 16) * fb->format->cpp[0];
  617. offset += (src->y1 >> 16) * fb->pitches[0];
  618. dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
  619. format = vop_convert_format(fb->format->format);
  620. spin_lock(&vop->reg_lock);
  621. VOP_WIN_SET(vop, win, format, format);
  622. VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
  623. VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
  624. if (fb->format->is_yuv) {
  625. int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
  626. int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
  627. int bpp = fb->format->cpp[1];
  628. uv_obj = fb->obj[1];
  629. rk_uv_obj = to_rockchip_obj(uv_obj);
  630. offset = (src->x1 >> 16) * bpp / hsub;
  631. offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
  632. dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
  633. VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
  634. VOP_WIN_SET(vop, win, uv_mst, dma_addr);
  635. }
  636. if (win->phy->scl)
  637. scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
  638. drm_rect_width(dest), drm_rect_height(dest),
  639. fb->format->format);
  640. VOP_WIN_SET(vop, win, act_info, act_info);
  641. VOP_WIN_SET(vop, win, dsp_info, dsp_info);
  642. VOP_WIN_SET(vop, win, dsp_st, dsp_st);
  643. rb_swap = has_rb_swapped(fb->format->format);
  644. VOP_WIN_SET(vop, win, rb_swap, rb_swap);
  645. /*
  646. * Blending win0 with the background color doesn't seem to work
  647. * correctly. We only get the background color, no matter the contents
  648. * of the win0 framebuffer. However, blending pre-multiplied color
  649. * with the default opaque black default background color is a no-op,
  650. * so we can just disable blending to get the correct result.
  651. */
  652. if (fb->format->has_alpha && win_index > 0) {
  653. VOP_WIN_SET(vop, win, dst_alpha_ctl,
  654. DST_FACTOR_M0(ALPHA_SRC_INVERSE));
  655. val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
  656. SRC_ALPHA_M0(ALPHA_STRAIGHT) |
  657. SRC_BLEND_M0(ALPHA_PER_PIX) |
  658. SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
  659. SRC_FACTOR_M0(ALPHA_ONE);
  660. VOP_WIN_SET(vop, win, src_alpha_ctl, val);
  661. } else {
  662. VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
  663. }
  664. VOP_WIN_SET(vop, win, enable, 1);
  665. spin_unlock(&vop->reg_lock);
  666. }
  667. static const struct drm_plane_helper_funcs plane_helper_funcs = {
  668. .atomic_check = vop_plane_atomic_check,
  669. .atomic_update = vop_plane_atomic_update,
  670. .atomic_disable = vop_plane_atomic_disable,
  671. };
  672. static const struct drm_plane_funcs vop_plane_funcs = {
  673. .update_plane = drm_atomic_helper_update_plane,
  674. .disable_plane = drm_atomic_helper_disable_plane,
  675. .destroy = vop_plane_destroy,
  676. .reset = drm_atomic_helper_plane_reset,
  677. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  678. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  679. };
  680. static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
  681. {
  682. struct vop *vop = to_vop(crtc);
  683. unsigned long flags;
  684. if (WARN_ON(!vop->is_enabled))
  685. return -EPERM;
  686. spin_lock_irqsave(&vop->irq_lock, flags);
  687. VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
  688. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
  689. spin_unlock_irqrestore(&vop->irq_lock, flags);
  690. return 0;
  691. }
  692. static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
  693. {
  694. struct vop *vop = to_vop(crtc);
  695. unsigned long flags;
  696. if (WARN_ON(!vop->is_enabled))
  697. return;
  698. spin_lock_irqsave(&vop->irq_lock, flags);
  699. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
  700. spin_unlock_irqrestore(&vop->irq_lock, flags);
  701. }
  702. static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
  703. const struct drm_display_mode *mode,
  704. struct drm_display_mode *adjusted_mode)
  705. {
  706. struct vop *vop = to_vop(crtc);
  707. adjusted_mode->clock =
  708. clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
  709. return true;
  710. }
  711. static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
  712. struct drm_crtc_state *old_state)
  713. {
  714. struct vop *vop = to_vop(crtc);
  715. const struct vop_data *vop_data = vop->data;
  716. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
  717. struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
  718. u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
  719. u16 hdisplay = adjusted_mode->hdisplay;
  720. u16 htotal = adjusted_mode->htotal;
  721. u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
  722. u16 hact_end = hact_st + hdisplay;
  723. u16 vdisplay = adjusted_mode->vdisplay;
  724. u16 vtotal = adjusted_mode->vtotal;
  725. u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
  726. u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
  727. u16 vact_end = vact_st + vdisplay;
  728. uint32_t pin_pol, val;
  729. int ret;
  730. mutex_lock(&vop->vop_lock);
  731. WARN_ON(vop->event);
  732. ret = vop_enable(crtc);
  733. if (ret) {
  734. mutex_unlock(&vop->vop_lock);
  735. DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
  736. return;
  737. }
  738. pin_pol = BIT(DCLK_INVERT);
  739. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
  740. BIT(HSYNC_POSITIVE) : 0;
  741. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
  742. BIT(VSYNC_POSITIVE) : 0;
  743. VOP_REG_SET(vop, output, pin_pol, pin_pol);
  744. switch (s->output_type) {
  745. case DRM_MODE_CONNECTOR_LVDS:
  746. VOP_REG_SET(vop, output, rgb_en, 1);
  747. VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
  748. break;
  749. case DRM_MODE_CONNECTOR_eDP:
  750. VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
  751. VOP_REG_SET(vop, output, edp_en, 1);
  752. break;
  753. case DRM_MODE_CONNECTOR_HDMIA:
  754. VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
  755. VOP_REG_SET(vop, output, hdmi_en, 1);
  756. break;
  757. case DRM_MODE_CONNECTOR_DSI:
  758. VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
  759. VOP_REG_SET(vop, output, mipi_en, 1);
  760. break;
  761. case DRM_MODE_CONNECTOR_DisplayPort:
  762. pin_pol &= ~BIT(DCLK_INVERT);
  763. VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
  764. VOP_REG_SET(vop, output, dp_en, 1);
  765. break;
  766. default:
  767. DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
  768. s->output_type);
  769. }
  770. /*
  771. * if vop is not support RGB10 output, need force RGB10 to RGB888.
  772. */
  773. if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
  774. !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
  775. s->output_mode = ROCKCHIP_OUT_MODE_P888;
  776. if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
  777. VOP_REG_SET(vop, common, pre_dither_down, 1);
  778. else
  779. VOP_REG_SET(vop, common, pre_dither_down, 0);
  780. VOP_REG_SET(vop, common, out_mode, s->output_mode);
  781. VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
  782. val = hact_st << 16;
  783. val |= hact_end;
  784. VOP_REG_SET(vop, modeset, hact_st_end, val);
  785. VOP_REG_SET(vop, modeset, hpost_st_end, val);
  786. VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len);
  787. val = vact_st << 16;
  788. val |= vact_end;
  789. VOP_REG_SET(vop, modeset, vact_st_end, val);
  790. VOP_REG_SET(vop, modeset, vpost_st_end, val);
  791. VOP_REG_SET(vop, intr, line_flag_num[0], vact_end);
  792. clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
  793. VOP_REG_SET(vop, common, standby, 0);
  794. mutex_unlock(&vop->vop_lock);
  795. }
  796. static bool vop_fs_irq_is_pending(struct vop *vop)
  797. {
  798. return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
  799. }
  800. static void vop_wait_for_irq_handler(struct vop *vop)
  801. {
  802. bool pending;
  803. int ret;
  804. /*
  805. * Spin until frame start interrupt status bit goes low, which means
  806. * that interrupt handler was invoked and cleared it. The timeout of
  807. * 10 msecs is really too long, but it is just a safety measure if
  808. * something goes really wrong. The wait will only happen in the very
  809. * unlikely case of a vblank happening exactly at the same time and
  810. * shouldn't exceed microseconds range.
  811. */
  812. ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
  813. !pending, 0, 10 * 1000);
  814. if (ret)
  815. DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
  816. synchronize_irq(vop->irq);
  817. }
  818. static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
  819. struct drm_crtc_state *old_crtc_state)
  820. {
  821. struct drm_atomic_state *old_state = old_crtc_state->state;
  822. struct drm_plane_state *old_plane_state, *new_plane_state;
  823. struct vop *vop = to_vop(crtc);
  824. struct drm_plane *plane;
  825. int i;
  826. if (WARN_ON(!vop->is_enabled))
  827. return;
  828. spin_lock(&vop->reg_lock);
  829. vop_cfg_done(vop);
  830. spin_unlock(&vop->reg_lock);
  831. /*
  832. * There is a (rather unlikely) possiblity that a vblank interrupt
  833. * fired before we set the cfg_done bit. To avoid spuriously
  834. * signalling flip completion we need to wait for it to finish.
  835. */
  836. vop_wait_for_irq_handler(vop);
  837. spin_lock_irq(&crtc->dev->event_lock);
  838. if (crtc->state->event) {
  839. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  840. WARN_ON(vop->event);
  841. vop->event = crtc->state->event;
  842. crtc->state->event = NULL;
  843. }
  844. spin_unlock_irq(&crtc->dev->event_lock);
  845. for_each_oldnew_plane_in_state(old_state, plane, old_plane_state,
  846. new_plane_state, i) {
  847. if (!old_plane_state->fb)
  848. continue;
  849. if (old_plane_state->fb == new_plane_state->fb)
  850. continue;
  851. drm_framebuffer_get(old_plane_state->fb);
  852. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  853. drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
  854. set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
  855. }
  856. }
  857. static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
  858. .mode_fixup = vop_crtc_mode_fixup,
  859. .atomic_flush = vop_crtc_atomic_flush,
  860. .atomic_enable = vop_crtc_atomic_enable,
  861. .atomic_disable = vop_crtc_atomic_disable,
  862. };
  863. static void vop_crtc_destroy(struct drm_crtc *crtc)
  864. {
  865. drm_crtc_cleanup(crtc);
  866. }
  867. static void vop_crtc_reset(struct drm_crtc *crtc)
  868. {
  869. if (crtc->state)
  870. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  871. kfree(crtc->state);
  872. crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
  873. if (crtc->state)
  874. crtc->state->crtc = crtc;
  875. }
  876. static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  877. {
  878. struct rockchip_crtc_state *rockchip_state;
  879. rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
  880. if (!rockchip_state)
  881. return NULL;
  882. __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
  883. return &rockchip_state->base;
  884. }
  885. static void vop_crtc_destroy_state(struct drm_crtc *crtc,
  886. struct drm_crtc_state *state)
  887. {
  888. struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
  889. __drm_atomic_helper_crtc_destroy_state(&s->base);
  890. kfree(s);
  891. }
  892. #ifdef CONFIG_DRM_ANALOGIX_DP
  893. static struct drm_connector *vop_get_edp_connector(struct vop *vop)
  894. {
  895. struct drm_connector *connector;
  896. struct drm_connector_list_iter conn_iter;
  897. drm_connector_list_iter_begin(vop->drm_dev, &conn_iter);
  898. drm_for_each_connector_iter(connector, &conn_iter) {
  899. if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
  900. drm_connector_list_iter_end(&conn_iter);
  901. return connector;
  902. }
  903. }
  904. drm_connector_list_iter_end(&conn_iter);
  905. return NULL;
  906. }
  907. static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
  908. const char *source_name)
  909. {
  910. struct vop *vop = to_vop(crtc);
  911. struct drm_connector *connector;
  912. int ret;
  913. connector = vop_get_edp_connector(vop);
  914. if (!connector)
  915. return -EINVAL;
  916. if (source_name && strcmp(source_name, "auto") == 0)
  917. ret = analogix_dp_start_crc(connector);
  918. else if (!source_name)
  919. ret = analogix_dp_stop_crc(connector);
  920. else
  921. ret = -EINVAL;
  922. return ret;
  923. }
  924. static int
  925. vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
  926. size_t *values_cnt)
  927. {
  928. if (source_name && strcmp(source_name, "auto") != 0)
  929. return -EINVAL;
  930. *values_cnt = 3;
  931. return 0;
  932. }
  933. #else
  934. static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
  935. const char *source_name)
  936. {
  937. return -ENODEV;
  938. }
  939. static int
  940. vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
  941. size_t *values_cnt)
  942. {
  943. return -ENODEV;
  944. }
  945. #endif
  946. static const struct drm_crtc_funcs vop_crtc_funcs = {
  947. .set_config = drm_atomic_helper_set_config,
  948. .page_flip = drm_atomic_helper_page_flip,
  949. .destroy = vop_crtc_destroy,
  950. .reset = vop_crtc_reset,
  951. .atomic_duplicate_state = vop_crtc_duplicate_state,
  952. .atomic_destroy_state = vop_crtc_destroy_state,
  953. .enable_vblank = vop_crtc_enable_vblank,
  954. .disable_vblank = vop_crtc_disable_vblank,
  955. .set_crc_source = vop_crtc_set_crc_source,
  956. .verify_crc_source = vop_crtc_verify_crc_source,
  957. };
  958. static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
  959. {
  960. struct vop *vop = container_of(work, struct vop, fb_unref_work);
  961. struct drm_framebuffer *fb = val;
  962. drm_crtc_vblank_put(&vop->crtc);
  963. drm_framebuffer_put(fb);
  964. }
  965. static void vop_handle_vblank(struct vop *vop)
  966. {
  967. struct drm_device *drm = vop->drm_dev;
  968. struct drm_crtc *crtc = &vop->crtc;
  969. spin_lock(&drm->event_lock);
  970. if (vop->event) {
  971. drm_crtc_send_vblank_event(crtc, vop->event);
  972. drm_crtc_vblank_put(crtc);
  973. vop->event = NULL;
  974. }
  975. spin_unlock(&drm->event_lock);
  976. if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
  977. drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
  978. }
  979. static irqreturn_t vop_isr(int irq, void *data)
  980. {
  981. struct vop *vop = data;
  982. struct drm_crtc *crtc = &vop->crtc;
  983. uint32_t active_irqs;
  984. int ret = IRQ_NONE;
  985. /*
  986. * The irq is shared with the iommu. If the runtime-pm state of the
  987. * vop-device is disabled the irq has to be targeted at the iommu.
  988. */
  989. if (!pm_runtime_get_if_in_use(vop->dev))
  990. return IRQ_NONE;
  991. if (vop_core_clks_enable(vop)) {
  992. DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
  993. goto out;
  994. }
  995. /*
  996. * interrupt register has interrupt status, enable and clear bits, we
  997. * must hold irq_lock to avoid a race with enable/disable_vblank().
  998. */
  999. spin_lock(&vop->irq_lock);
  1000. active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
  1001. /* Clear all active interrupt sources */
  1002. if (active_irqs)
  1003. VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
  1004. spin_unlock(&vop->irq_lock);
  1005. /* This is expected for vop iommu irqs, since the irq is shared */
  1006. if (!active_irqs)
  1007. goto out_disable;
  1008. if (active_irqs & DSP_HOLD_VALID_INTR) {
  1009. complete(&vop->dsp_hold_completion);
  1010. active_irqs &= ~DSP_HOLD_VALID_INTR;
  1011. ret = IRQ_HANDLED;
  1012. }
  1013. if (active_irqs & LINE_FLAG_INTR) {
  1014. complete(&vop->line_flag_completion);
  1015. active_irqs &= ~LINE_FLAG_INTR;
  1016. ret = IRQ_HANDLED;
  1017. }
  1018. if (active_irqs & FS_INTR) {
  1019. drm_crtc_handle_vblank(crtc);
  1020. vop_handle_vblank(vop);
  1021. active_irqs &= ~FS_INTR;
  1022. ret = IRQ_HANDLED;
  1023. }
  1024. /* Unhandled irqs are spurious. */
  1025. if (active_irqs)
  1026. DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
  1027. active_irqs);
  1028. out_disable:
  1029. vop_core_clks_disable(vop);
  1030. out:
  1031. pm_runtime_put(vop->dev);
  1032. return ret;
  1033. }
  1034. static int vop_create_crtc(struct vop *vop)
  1035. {
  1036. const struct vop_data *vop_data = vop->data;
  1037. struct device *dev = vop->dev;
  1038. struct drm_device *drm_dev = vop->drm_dev;
  1039. struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
  1040. struct drm_crtc *crtc = &vop->crtc;
  1041. struct device_node *port;
  1042. int ret;
  1043. int i;
  1044. /*
  1045. * Create drm_plane for primary and cursor planes first, since we need
  1046. * to pass them to drm_crtc_init_with_planes, which sets the
  1047. * "possible_crtcs" to the newly initialized crtc.
  1048. */
  1049. for (i = 0; i < vop_data->win_size; i++) {
  1050. struct vop_win *vop_win = &vop->win[i];
  1051. const struct vop_win_data *win_data = vop_win->data;
  1052. if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
  1053. win_data->type != DRM_PLANE_TYPE_CURSOR)
  1054. continue;
  1055. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1056. 0, &vop_plane_funcs,
  1057. win_data->phy->data_formats,
  1058. win_data->phy->nformats,
  1059. NULL, win_data->type, NULL);
  1060. if (ret) {
  1061. DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
  1062. ret);
  1063. goto err_cleanup_planes;
  1064. }
  1065. plane = &vop_win->base;
  1066. drm_plane_helper_add(plane, &plane_helper_funcs);
  1067. if (plane->type == DRM_PLANE_TYPE_PRIMARY)
  1068. primary = plane;
  1069. else if (plane->type == DRM_PLANE_TYPE_CURSOR)
  1070. cursor = plane;
  1071. }
  1072. ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
  1073. &vop_crtc_funcs, NULL);
  1074. if (ret)
  1075. goto err_cleanup_planes;
  1076. drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
  1077. /*
  1078. * Create drm_planes for overlay windows with possible_crtcs restricted
  1079. * to the newly created crtc.
  1080. */
  1081. for (i = 0; i < vop_data->win_size; i++) {
  1082. struct vop_win *vop_win = &vop->win[i];
  1083. const struct vop_win_data *win_data = vop_win->data;
  1084. unsigned long possible_crtcs = drm_crtc_mask(crtc);
  1085. if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
  1086. continue;
  1087. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1088. possible_crtcs,
  1089. &vop_plane_funcs,
  1090. win_data->phy->data_formats,
  1091. win_data->phy->nformats,
  1092. NULL, win_data->type, NULL);
  1093. if (ret) {
  1094. DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
  1095. ret);
  1096. goto err_cleanup_crtc;
  1097. }
  1098. drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
  1099. }
  1100. port = of_get_child_by_name(dev->of_node, "port");
  1101. if (!port) {
  1102. DRM_DEV_ERROR(vop->dev, "no port node found in %pOF\n",
  1103. dev->of_node);
  1104. ret = -ENOENT;
  1105. goto err_cleanup_crtc;
  1106. }
  1107. drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
  1108. vop_fb_unref_worker);
  1109. init_completion(&vop->dsp_hold_completion);
  1110. init_completion(&vop->line_flag_completion);
  1111. crtc->port = port;
  1112. return 0;
  1113. err_cleanup_crtc:
  1114. drm_crtc_cleanup(crtc);
  1115. err_cleanup_planes:
  1116. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1117. head)
  1118. drm_plane_cleanup(plane);
  1119. return ret;
  1120. }
  1121. static void vop_destroy_crtc(struct vop *vop)
  1122. {
  1123. struct drm_crtc *crtc = &vop->crtc;
  1124. struct drm_device *drm_dev = vop->drm_dev;
  1125. struct drm_plane *plane, *tmp;
  1126. of_node_put(crtc->port);
  1127. /*
  1128. * We need to cleanup the planes now. Why?
  1129. *
  1130. * The planes are "&vop->win[i].base". That means the memory is
  1131. * all part of the big "struct vop" chunk of memory. That memory
  1132. * was devm allocated and associated with this component. We need to
  1133. * free it ourselves before vop_unbind() finishes.
  1134. */
  1135. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1136. head)
  1137. vop_plane_destroy(plane);
  1138. /*
  1139. * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
  1140. * references the CRTC.
  1141. */
  1142. drm_crtc_cleanup(crtc);
  1143. drm_flip_work_cleanup(&vop->fb_unref_work);
  1144. }
  1145. static int vop_initial(struct vop *vop)
  1146. {
  1147. const struct vop_data *vop_data = vop->data;
  1148. struct reset_control *ahb_rst;
  1149. int i, ret;
  1150. vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
  1151. if (IS_ERR(vop->hclk)) {
  1152. DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n");
  1153. return PTR_ERR(vop->hclk);
  1154. }
  1155. vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
  1156. if (IS_ERR(vop->aclk)) {
  1157. DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n");
  1158. return PTR_ERR(vop->aclk);
  1159. }
  1160. vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
  1161. if (IS_ERR(vop->dclk)) {
  1162. DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n");
  1163. return PTR_ERR(vop->dclk);
  1164. }
  1165. ret = pm_runtime_get_sync(vop->dev);
  1166. if (ret < 0) {
  1167. DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
  1168. return ret;
  1169. }
  1170. ret = clk_prepare(vop->dclk);
  1171. if (ret < 0) {
  1172. DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n");
  1173. goto err_put_pm_runtime;
  1174. }
  1175. /* Enable both the hclk and aclk to setup the vop */
  1176. ret = clk_prepare_enable(vop->hclk);
  1177. if (ret < 0) {
  1178. DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n");
  1179. goto err_unprepare_dclk;
  1180. }
  1181. ret = clk_prepare_enable(vop->aclk);
  1182. if (ret < 0) {
  1183. DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n");
  1184. goto err_disable_hclk;
  1185. }
  1186. /*
  1187. * do hclk_reset, reset all vop registers.
  1188. */
  1189. ahb_rst = devm_reset_control_get(vop->dev, "ahb");
  1190. if (IS_ERR(ahb_rst)) {
  1191. DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n");
  1192. ret = PTR_ERR(ahb_rst);
  1193. goto err_disable_aclk;
  1194. }
  1195. reset_control_assert(ahb_rst);
  1196. usleep_range(10, 20);
  1197. reset_control_deassert(ahb_rst);
  1198. VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
  1199. VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
  1200. for (i = 0; i < vop->len; i += sizeof(u32))
  1201. vop->regsbak[i / 4] = readl_relaxed(vop->regs + i);
  1202. VOP_REG_SET(vop, misc, global_regdone_en, 1);
  1203. VOP_REG_SET(vop, common, dsp_blank, 0);
  1204. for (i = 0; i < vop_data->win_size; i++) {
  1205. const struct vop_win_data *win = &vop_data->win[i];
  1206. int channel = i * 2 + 1;
  1207. VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
  1208. VOP_WIN_SET(vop, win, enable, 0);
  1209. VOP_WIN_SET(vop, win, gate, 1);
  1210. }
  1211. vop_cfg_done(vop);
  1212. /*
  1213. * do dclk_reset, let all config take affect.
  1214. */
  1215. vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
  1216. if (IS_ERR(vop->dclk_rst)) {
  1217. DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n");
  1218. ret = PTR_ERR(vop->dclk_rst);
  1219. goto err_disable_aclk;
  1220. }
  1221. reset_control_assert(vop->dclk_rst);
  1222. usleep_range(10, 20);
  1223. reset_control_deassert(vop->dclk_rst);
  1224. clk_disable(vop->hclk);
  1225. clk_disable(vop->aclk);
  1226. vop->is_enabled = false;
  1227. pm_runtime_put_sync(vop->dev);
  1228. return 0;
  1229. err_disable_aclk:
  1230. clk_disable_unprepare(vop->aclk);
  1231. err_disable_hclk:
  1232. clk_disable_unprepare(vop->hclk);
  1233. err_unprepare_dclk:
  1234. clk_unprepare(vop->dclk);
  1235. err_put_pm_runtime:
  1236. pm_runtime_put_sync(vop->dev);
  1237. return ret;
  1238. }
  1239. /*
  1240. * Initialize the vop->win array elements.
  1241. */
  1242. static void vop_win_init(struct vop *vop)
  1243. {
  1244. const struct vop_data *vop_data = vop->data;
  1245. unsigned int i;
  1246. for (i = 0; i < vop_data->win_size; i++) {
  1247. struct vop_win *vop_win = &vop->win[i];
  1248. const struct vop_win_data *win_data = &vop_data->win[i];
  1249. vop_win->data = win_data;
  1250. vop_win->vop = vop;
  1251. }
  1252. }
  1253. /**
  1254. * rockchip_drm_wait_vact_end
  1255. * @crtc: CRTC to enable line flag
  1256. * @mstimeout: millisecond for timeout
  1257. *
  1258. * Wait for vact_end line flag irq or timeout.
  1259. *
  1260. * Returns:
  1261. * Zero on success, negative errno on failure.
  1262. */
  1263. int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
  1264. {
  1265. struct vop *vop = to_vop(crtc);
  1266. unsigned long jiffies_left;
  1267. int ret = 0;
  1268. if (!crtc || !vop->is_enabled)
  1269. return -ENODEV;
  1270. mutex_lock(&vop->vop_lock);
  1271. if (mstimeout <= 0) {
  1272. ret = -EINVAL;
  1273. goto out;
  1274. }
  1275. if (vop_line_flag_irq_is_enabled(vop)) {
  1276. ret = -EBUSY;
  1277. goto out;
  1278. }
  1279. reinit_completion(&vop->line_flag_completion);
  1280. vop_line_flag_irq_enable(vop);
  1281. jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
  1282. msecs_to_jiffies(mstimeout));
  1283. vop_line_flag_irq_disable(vop);
  1284. if (jiffies_left == 0) {
  1285. DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
  1286. ret = -ETIMEDOUT;
  1287. goto out;
  1288. }
  1289. out:
  1290. mutex_unlock(&vop->vop_lock);
  1291. return ret;
  1292. }
  1293. EXPORT_SYMBOL(rockchip_drm_wait_vact_end);
  1294. static int vop_bind(struct device *dev, struct device *master, void *data)
  1295. {
  1296. struct platform_device *pdev = to_platform_device(dev);
  1297. const struct vop_data *vop_data;
  1298. struct drm_device *drm_dev = data;
  1299. struct vop *vop;
  1300. struct resource *res;
  1301. int ret, irq;
  1302. vop_data = of_device_get_match_data(dev);
  1303. if (!vop_data)
  1304. return -ENODEV;
  1305. /* Allocate vop struct and its vop_win array */
  1306. vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size),
  1307. GFP_KERNEL);
  1308. if (!vop)
  1309. return -ENOMEM;
  1310. vop->dev = dev;
  1311. vop->data = vop_data;
  1312. vop->drm_dev = drm_dev;
  1313. dev_set_drvdata(dev, vop);
  1314. vop_win_init(vop);
  1315. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1316. vop->len = resource_size(res);
  1317. vop->regs = devm_ioremap_resource(dev, res);
  1318. if (IS_ERR(vop->regs))
  1319. return PTR_ERR(vop->regs);
  1320. vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
  1321. if (!vop->regsbak)
  1322. return -ENOMEM;
  1323. irq = platform_get_irq(pdev, 0);
  1324. if (irq < 0) {
  1325. DRM_DEV_ERROR(dev, "cannot find irq for vop\n");
  1326. return irq;
  1327. }
  1328. vop->irq = (unsigned int)irq;
  1329. spin_lock_init(&vop->reg_lock);
  1330. spin_lock_init(&vop->irq_lock);
  1331. mutex_init(&vop->vop_lock);
  1332. ret = vop_create_crtc(vop);
  1333. if (ret)
  1334. return ret;
  1335. pm_runtime_enable(&pdev->dev);
  1336. ret = vop_initial(vop);
  1337. if (ret < 0) {
  1338. DRM_DEV_ERROR(&pdev->dev,
  1339. "cannot initial vop dev - err %d\n", ret);
  1340. goto err_disable_pm_runtime;
  1341. }
  1342. ret = devm_request_irq(dev, vop->irq, vop_isr,
  1343. IRQF_SHARED, dev_name(dev), vop);
  1344. if (ret)
  1345. goto err_disable_pm_runtime;
  1346. if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
  1347. vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
  1348. if (IS_ERR(vop->rgb)) {
  1349. ret = PTR_ERR(vop->rgb);
  1350. goto err_disable_pm_runtime;
  1351. }
  1352. }
  1353. return 0;
  1354. err_disable_pm_runtime:
  1355. pm_runtime_disable(&pdev->dev);
  1356. vop_destroy_crtc(vop);
  1357. return ret;
  1358. }
  1359. static void vop_unbind(struct device *dev, struct device *master, void *data)
  1360. {
  1361. struct vop *vop = dev_get_drvdata(dev);
  1362. if (vop->rgb)
  1363. rockchip_rgb_fini(vop->rgb);
  1364. pm_runtime_disable(dev);
  1365. vop_destroy_crtc(vop);
  1366. clk_unprepare(vop->aclk);
  1367. clk_unprepare(vop->hclk);
  1368. clk_unprepare(vop->dclk);
  1369. }
  1370. const struct component_ops vop_component_ops = {
  1371. .bind = vop_bind,
  1372. .unbind = vop_unbind,
  1373. };
  1374. EXPORT_SYMBOL_GPL(vop_component_ops);