rockchip_drm_vop.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_crtc.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_flip_work.h>
  20. #include <drm/drm_plane_helper.h>
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/clk.h>
  25. #include <linux/iopoll.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/component.h>
  30. #include <linux/reset.h>
  31. #include <linux/delay.h>
  32. #include "rockchip_drm_drv.h"
  33. #include "rockchip_drm_gem.h"
  34. #include "rockchip_drm_fb.h"
  35. #include "rockchip_drm_psr.h"
  36. #include "rockchip_drm_vop.h"
  37. #define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
  38. vop_mask_write(x, off, mask, shift, v, write_mask, true)
  39. #define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
  40. vop_mask_write(x, off, mask, shift, v, write_mask, false)
  41. #define REG_SET(x, base, reg, v, mode) \
  42. __REG_SET_##mode(x, base + reg.offset, \
  43. reg.mask, reg.shift, v, reg.write_mask)
  44. #define REG_SET_MASK(x, base, reg, mask, v, mode) \
  45. __REG_SET_##mode(x, base + reg.offset, \
  46. mask, reg.shift, v, reg.write_mask)
  47. #define VOP_WIN_SET(x, win, name, v) \
  48. REG_SET(x, win->base, win->phy->name, v, RELAXED)
  49. #define VOP_SCL_SET(x, win, name, v) \
  50. REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
  51. #define VOP_SCL_SET_EXT(x, win, name, v) \
  52. REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
  53. #define VOP_CTRL_SET(x, name, v) \
  54. REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
  55. #define VOP_INTR_GET(vop, name) \
  56. vop_read_reg(vop, 0, &vop->data->ctrl->name)
  57. #define VOP_INTR_SET(vop, name, mask, v) \
  58. REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
  59. #define VOP_INTR_SET_TYPE(vop, name, type, v) \
  60. do { \
  61. int i, reg = 0, mask = 0; \
  62. for (i = 0; i < vop->data->intr->nintrs; i++) { \
  63. if (vop->data->intr->intrs[i] & type) { \
  64. reg |= (v) << i; \
  65. mask |= 1 << i; \
  66. } \
  67. } \
  68. VOP_INTR_SET(vop, name, mask, reg); \
  69. } while (0)
  70. #define VOP_INTR_GET_TYPE(vop, name, type) \
  71. vop_get_intr_type(vop, &vop->data->intr->name, type)
  72. #define VOP_WIN_GET(x, win, name) \
  73. vop_read_reg(x, win->base, &win->phy->name)
  74. #define VOP_WIN_GET_YRGBADDR(vop, win) \
  75. vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
  76. #define to_vop(x) container_of(x, struct vop, crtc)
  77. #define to_vop_win(x) container_of(x, struct vop_win, base)
  78. enum vop_pending {
  79. VOP_PENDING_FB_UNREF,
  80. };
  81. struct vop_win {
  82. struct drm_plane base;
  83. const struct vop_win_data *data;
  84. struct vop *vop;
  85. };
  86. struct vop {
  87. struct drm_crtc crtc;
  88. struct device *dev;
  89. struct drm_device *drm_dev;
  90. bool is_enabled;
  91. /* mutex vsync_ work */
  92. struct mutex vsync_mutex;
  93. bool vsync_work_pending;
  94. struct completion dsp_hold_completion;
  95. /* protected by dev->event_lock */
  96. struct drm_pending_vblank_event *event;
  97. struct drm_flip_work fb_unref_work;
  98. unsigned long pending;
  99. struct completion line_flag_completion;
  100. const struct vop_data *data;
  101. uint32_t *regsbak;
  102. void __iomem *regs;
  103. /* physical map length of vop register */
  104. uint32_t len;
  105. /* one time only one process allowed to config the register */
  106. spinlock_t reg_lock;
  107. /* lock vop irq reg */
  108. spinlock_t irq_lock;
  109. unsigned int irq;
  110. /* vop AHP clk */
  111. struct clk *hclk;
  112. /* vop dclk */
  113. struct clk *dclk;
  114. /* vop share memory frequency */
  115. struct clk *aclk;
  116. /* vop dclk reset */
  117. struct reset_control *dclk_rst;
  118. struct vop_win win[];
  119. };
  120. static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
  121. {
  122. writel(v, vop->regs + offset);
  123. vop->regsbak[offset >> 2] = v;
  124. }
  125. static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
  126. {
  127. return readl(vop->regs + offset);
  128. }
  129. static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
  130. const struct vop_reg *reg)
  131. {
  132. return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
  133. }
  134. static inline void vop_mask_write(struct vop *vop, uint32_t offset,
  135. uint32_t mask, uint32_t shift, uint32_t v,
  136. bool write_mask, bool relaxed)
  137. {
  138. if (!mask)
  139. return;
  140. if (write_mask) {
  141. v = ((v << shift) & 0xffff) | (mask << (shift + 16));
  142. } else {
  143. uint32_t cached_val = vop->regsbak[offset >> 2];
  144. v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
  145. vop->regsbak[offset >> 2] = v;
  146. }
  147. if (relaxed)
  148. writel_relaxed(v, vop->regs + offset);
  149. else
  150. writel(v, vop->regs + offset);
  151. }
  152. static inline uint32_t vop_get_intr_type(struct vop *vop,
  153. const struct vop_reg *reg, int type)
  154. {
  155. uint32_t i, ret = 0;
  156. uint32_t regs = vop_read_reg(vop, 0, reg);
  157. for (i = 0; i < vop->data->intr->nintrs; i++) {
  158. if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
  159. ret |= vop->data->intr->intrs[i];
  160. }
  161. return ret;
  162. }
  163. static inline void vop_cfg_done(struct vop *vop)
  164. {
  165. VOP_CTRL_SET(vop, cfg_done, 1);
  166. }
  167. static bool has_rb_swapped(uint32_t format)
  168. {
  169. switch (format) {
  170. case DRM_FORMAT_XBGR8888:
  171. case DRM_FORMAT_ABGR8888:
  172. case DRM_FORMAT_BGR888:
  173. case DRM_FORMAT_BGR565:
  174. return true;
  175. default:
  176. return false;
  177. }
  178. }
  179. static enum vop_data_format vop_convert_format(uint32_t format)
  180. {
  181. switch (format) {
  182. case DRM_FORMAT_XRGB8888:
  183. case DRM_FORMAT_ARGB8888:
  184. case DRM_FORMAT_XBGR8888:
  185. case DRM_FORMAT_ABGR8888:
  186. return VOP_FMT_ARGB8888;
  187. case DRM_FORMAT_RGB888:
  188. case DRM_FORMAT_BGR888:
  189. return VOP_FMT_RGB888;
  190. case DRM_FORMAT_RGB565:
  191. case DRM_FORMAT_BGR565:
  192. return VOP_FMT_RGB565;
  193. case DRM_FORMAT_NV12:
  194. return VOP_FMT_YUV420SP;
  195. case DRM_FORMAT_NV16:
  196. return VOP_FMT_YUV422SP;
  197. case DRM_FORMAT_NV24:
  198. return VOP_FMT_YUV444SP;
  199. default:
  200. DRM_ERROR("unsupported format[%08x]\n", format);
  201. return -EINVAL;
  202. }
  203. }
  204. static bool is_yuv_support(uint32_t format)
  205. {
  206. switch (format) {
  207. case DRM_FORMAT_NV12:
  208. case DRM_FORMAT_NV16:
  209. case DRM_FORMAT_NV24:
  210. return true;
  211. default:
  212. return false;
  213. }
  214. }
  215. static bool is_alpha_support(uint32_t format)
  216. {
  217. switch (format) {
  218. case DRM_FORMAT_ARGB8888:
  219. case DRM_FORMAT_ABGR8888:
  220. return true;
  221. default:
  222. return false;
  223. }
  224. }
  225. static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
  226. uint32_t dst, bool is_horizontal,
  227. int vsu_mode, int *vskiplines)
  228. {
  229. uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
  230. if (is_horizontal) {
  231. if (mode == SCALE_UP)
  232. val = GET_SCL_FT_BIC(src, dst);
  233. else if (mode == SCALE_DOWN)
  234. val = GET_SCL_FT_BILI_DN(src, dst);
  235. } else {
  236. if (mode == SCALE_UP) {
  237. if (vsu_mode == SCALE_UP_BIL)
  238. val = GET_SCL_FT_BILI_UP(src, dst);
  239. else
  240. val = GET_SCL_FT_BIC(src, dst);
  241. } else if (mode == SCALE_DOWN) {
  242. if (vskiplines) {
  243. *vskiplines = scl_get_vskiplines(src, dst);
  244. val = scl_get_bili_dn_vskip(src, dst,
  245. *vskiplines);
  246. } else {
  247. val = GET_SCL_FT_BILI_DN(src, dst);
  248. }
  249. }
  250. }
  251. return val;
  252. }
  253. static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
  254. uint32_t src_w, uint32_t src_h, uint32_t dst_w,
  255. uint32_t dst_h, uint32_t pixel_format)
  256. {
  257. uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
  258. uint16_t cbcr_hor_scl_mode = SCALE_NONE;
  259. uint16_t cbcr_ver_scl_mode = SCALE_NONE;
  260. int hsub = drm_format_horz_chroma_subsampling(pixel_format);
  261. int vsub = drm_format_vert_chroma_subsampling(pixel_format);
  262. bool is_yuv = is_yuv_support(pixel_format);
  263. uint16_t cbcr_src_w = src_w / hsub;
  264. uint16_t cbcr_src_h = src_h / vsub;
  265. uint16_t vsu_mode;
  266. uint16_t lb_mode;
  267. uint32_t val;
  268. int vskiplines = 0;
  269. if (dst_w > 3840) {
  270. DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
  271. return;
  272. }
  273. if (!win->phy->scl->ext) {
  274. VOP_SCL_SET(vop, win, scale_yrgb_x,
  275. scl_cal_scale2(src_w, dst_w));
  276. VOP_SCL_SET(vop, win, scale_yrgb_y,
  277. scl_cal_scale2(src_h, dst_h));
  278. if (is_yuv) {
  279. VOP_SCL_SET(vop, win, scale_cbcr_x,
  280. scl_cal_scale2(cbcr_src_w, dst_w));
  281. VOP_SCL_SET(vop, win, scale_cbcr_y,
  282. scl_cal_scale2(cbcr_src_h, dst_h));
  283. }
  284. return;
  285. }
  286. yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
  287. yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
  288. if (is_yuv) {
  289. cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
  290. cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
  291. if (cbcr_hor_scl_mode == SCALE_DOWN)
  292. lb_mode = scl_vop_cal_lb_mode(dst_w, true);
  293. else
  294. lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
  295. } else {
  296. if (yrgb_hor_scl_mode == SCALE_DOWN)
  297. lb_mode = scl_vop_cal_lb_mode(dst_w, false);
  298. else
  299. lb_mode = scl_vop_cal_lb_mode(src_w, false);
  300. }
  301. VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
  302. if (lb_mode == LB_RGB_3840X2) {
  303. if (yrgb_ver_scl_mode != SCALE_NONE) {
  304. DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
  305. return;
  306. }
  307. if (cbcr_ver_scl_mode != SCALE_NONE) {
  308. DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
  309. return;
  310. }
  311. vsu_mode = SCALE_UP_BIL;
  312. } else if (lb_mode == LB_RGB_2560X4) {
  313. vsu_mode = SCALE_UP_BIL;
  314. } else {
  315. vsu_mode = SCALE_UP_BIC;
  316. }
  317. val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
  318. true, 0, NULL);
  319. VOP_SCL_SET(vop, win, scale_yrgb_x, val);
  320. val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
  321. false, vsu_mode, &vskiplines);
  322. VOP_SCL_SET(vop, win, scale_yrgb_y, val);
  323. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
  324. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
  325. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
  326. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
  327. VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
  328. VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
  329. VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
  330. if (is_yuv) {
  331. val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
  332. dst_w, true, 0, NULL);
  333. VOP_SCL_SET(vop, win, scale_cbcr_x, val);
  334. val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
  335. dst_h, false, vsu_mode, &vskiplines);
  336. VOP_SCL_SET(vop, win, scale_cbcr_y, val);
  337. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
  338. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
  339. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
  340. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
  341. VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
  342. VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
  343. VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
  344. }
  345. }
  346. static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
  347. {
  348. unsigned long flags;
  349. if (WARN_ON(!vop->is_enabled))
  350. return;
  351. spin_lock_irqsave(&vop->irq_lock, flags);
  352. VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
  353. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
  354. spin_unlock_irqrestore(&vop->irq_lock, flags);
  355. }
  356. static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
  357. {
  358. unsigned long flags;
  359. if (WARN_ON(!vop->is_enabled))
  360. return;
  361. spin_lock_irqsave(&vop->irq_lock, flags);
  362. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
  363. spin_unlock_irqrestore(&vop->irq_lock, flags);
  364. }
  365. /*
  366. * (1) each frame starts at the start of the Vsync pulse which is signaled by
  367. * the "FRAME_SYNC" interrupt.
  368. * (2) the active data region of each frame ends at dsp_vact_end
  369. * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
  370. * to get "LINE_FLAG" interrupt at the end of the active on screen data.
  371. *
  372. * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
  373. * Interrupts
  374. * LINE_FLAG -------------------------------+
  375. * FRAME_SYNC ----+ |
  376. * | |
  377. * v v
  378. * | Vsync | Vbp | Vactive | Vfp |
  379. * ^ ^ ^ ^
  380. * | | | |
  381. * | | | |
  382. * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
  383. * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
  384. * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
  385. * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
  386. */
  387. static bool vop_line_flag_irq_is_enabled(struct vop *vop)
  388. {
  389. uint32_t line_flag_irq;
  390. unsigned long flags;
  391. spin_lock_irqsave(&vop->irq_lock, flags);
  392. line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
  393. spin_unlock_irqrestore(&vop->irq_lock, flags);
  394. return !!line_flag_irq;
  395. }
  396. static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
  397. {
  398. unsigned long flags;
  399. if (WARN_ON(!vop->is_enabled))
  400. return;
  401. spin_lock_irqsave(&vop->irq_lock, flags);
  402. VOP_CTRL_SET(vop, line_flag_num[0], line_num);
  403. VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
  404. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
  405. spin_unlock_irqrestore(&vop->irq_lock, flags);
  406. }
  407. static void vop_line_flag_irq_disable(struct vop *vop)
  408. {
  409. unsigned long flags;
  410. if (WARN_ON(!vop->is_enabled))
  411. return;
  412. spin_lock_irqsave(&vop->irq_lock, flags);
  413. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
  414. spin_unlock_irqrestore(&vop->irq_lock, flags);
  415. }
  416. static int vop_enable(struct drm_crtc *crtc)
  417. {
  418. struct vop *vop = to_vop(crtc);
  419. int ret;
  420. ret = pm_runtime_get_sync(vop->dev);
  421. if (ret < 0) {
  422. dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
  423. goto err_put_pm_runtime;
  424. }
  425. ret = clk_enable(vop->hclk);
  426. if (WARN_ON(ret < 0))
  427. goto err_put_pm_runtime;
  428. ret = clk_enable(vop->dclk);
  429. if (WARN_ON(ret < 0))
  430. goto err_disable_hclk;
  431. ret = clk_enable(vop->aclk);
  432. if (WARN_ON(ret < 0))
  433. goto err_disable_dclk;
  434. /*
  435. * Slave iommu shares power, irq and clock with vop. It was associated
  436. * automatically with this master device via common driver code.
  437. * Now that we have enabled the clock we attach it to the shared drm
  438. * mapping.
  439. */
  440. ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
  441. if (ret) {
  442. dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
  443. goto err_disable_aclk;
  444. }
  445. memcpy(vop->regs, vop->regsbak, vop->len);
  446. /*
  447. * At here, vop clock & iommu is enable, R/W vop regs would be safe.
  448. */
  449. vop->is_enabled = true;
  450. spin_lock(&vop->reg_lock);
  451. VOP_CTRL_SET(vop, standby, 0);
  452. spin_unlock(&vop->reg_lock);
  453. enable_irq(vop->irq);
  454. drm_crtc_vblank_on(crtc);
  455. return 0;
  456. err_disable_aclk:
  457. clk_disable(vop->aclk);
  458. err_disable_dclk:
  459. clk_disable(vop->dclk);
  460. err_disable_hclk:
  461. clk_disable(vop->hclk);
  462. err_put_pm_runtime:
  463. pm_runtime_put_sync(vop->dev);
  464. return ret;
  465. }
  466. static void vop_crtc_disable(struct drm_crtc *crtc)
  467. {
  468. struct vop *vop = to_vop(crtc);
  469. int i;
  470. WARN_ON(vop->event);
  471. rockchip_drm_psr_deactivate(&vop->crtc);
  472. /*
  473. * We need to make sure that all windows are disabled before we
  474. * disable that crtc. Otherwise we might try to scan from a destroyed
  475. * buffer later.
  476. */
  477. for (i = 0; i < vop->data->win_size; i++) {
  478. struct vop_win *vop_win = &vop->win[i];
  479. const struct vop_win_data *win = vop_win->data;
  480. spin_lock(&vop->reg_lock);
  481. VOP_WIN_SET(vop, win, enable, 0);
  482. spin_unlock(&vop->reg_lock);
  483. }
  484. drm_crtc_vblank_off(crtc);
  485. /*
  486. * Vop standby will take effect at end of current frame,
  487. * if dsp hold valid irq happen, it means standby complete.
  488. *
  489. * we must wait standby complete when we want to disable aclk,
  490. * if not, memory bus maybe dead.
  491. */
  492. reinit_completion(&vop->dsp_hold_completion);
  493. vop_dsp_hold_valid_irq_enable(vop);
  494. spin_lock(&vop->reg_lock);
  495. VOP_CTRL_SET(vop, standby, 1);
  496. spin_unlock(&vop->reg_lock);
  497. wait_for_completion(&vop->dsp_hold_completion);
  498. vop_dsp_hold_valid_irq_disable(vop);
  499. disable_irq(vop->irq);
  500. vop->is_enabled = false;
  501. /*
  502. * vop standby complete, so iommu detach is safe.
  503. */
  504. rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
  505. clk_disable(vop->dclk);
  506. clk_disable(vop->aclk);
  507. clk_disable(vop->hclk);
  508. pm_runtime_put(vop->dev);
  509. if (crtc->state->event && !crtc->state->active) {
  510. spin_lock_irq(&crtc->dev->event_lock);
  511. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  512. spin_unlock_irq(&crtc->dev->event_lock);
  513. crtc->state->event = NULL;
  514. }
  515. }
  516. static void vop_plane_destroy(struct drm_plane *plane)
  517. {
  518. drm_plane_cleanup(plane);
  519. }
  520. static int vop_plane_atomic_check(struct drm_plane *plane,
  521. struct drm_plane_state *state)
  522. {
  523. struct drm_crtc *crtc = state->crtc;
  524. struct drm_crtc_state *crtc_state;
  525. struct drm_framebuffer *fb = state->fb;
  526. struct vop_win *vop_win = to_vop_win(plane);
  527. const struct vop_win_data *win = vop_win->data;
  528. int ret;
  529. struct drm_rect clip;
  530. int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
  531. DRM_PLANE_HELPER_NO_SCALING;
  532. int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
  533. DRM_PLANE_HELPER_NO_SCALING;
  534. if (!crtc || !fb)
  535. return 0;
  536. crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
  537. if (WARN_ON(!crtc_state))
  538. return -EINVAL;
  539. clip.x1 = 0;
  540. clip.y1 = 0;
  541. clip.x2 = crtc_state->adjusted_mode.hdisplay;
  542. clip.y2 = crtc_state->adjusted_mode.vdisplay;
  543. ret = drm_plane_helper_check_state(state, &clip,
  544. min_scale, max_scale,
  545. true, true);
  546. if (ret)
  547. return ret;
  548. if (!state->visible)
  549. return 0;
  550. ret = vop_convert_format(fb->format->format);
  551. if (ret < 0)
  552. return ret;
  553. /*
  554. * Src.x1 can be odd when do clip, but yuv plane start point
  555. * need align with 2 pixel.
  556. */
  557. if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
  558. return -EINVAL;
  559. return 0;
  560. }
  561. static void vop_plane_atomic_disable(struct drm_plane *plane,
  562. struct drm_plane_state *old_state)
  563. {
  564. struct vop_win *vop_win = to_vop_win(plane);
  565. const struct vop_win_data *win = vop_win->data;
  566. struct vop *vop = to_vop(old_state->crtc);
  567. if (!old_state->crtc)
  568. return;
  569. spin_lock(&vop->reg_lock);
  570. VOP_WIN_SET(vop, win, enable, 0);
  571. spin_unlock(&vop->reg_lock);
  572. }
  573. static void vop_plane_atomic_update(struct drm_plane *plane,
  574. struct drm_plane_state *old_state)
  575. {
  576. struct drm_plane_state *state = plane->state;
  577. struct drm_crtc *crtc = state->crtc;
  578. struct vop_win *vop_win = to_vop_win(plane);
  579. const struct vop_win_data *win = vop_win->data;
  580. struct vop *vop = to_vop(state->crtc);
  581. struct drm_framebuffer *fb = state->fb;
  582. unsigned int actual_w, actual_h;
  583. unsigned int dsp_stx, dsp_sty;
  584. uint32_t act_info, dsp_info, dsp_st;
  585. struct drm_rect *src = &state->src;
  586. struct drm_rect *dest = &state->dst;
  587. struct drm_gem_object *obj, *uv_obj;
  588. struct rockchip_gem_object *rk_obj, *rk_uv_obj;
  589. unsigned long offset;
  590. dma_addr_t dma_addr;
  591. uint32_t val;
  592. bool rb_swap;
  593. int format;
  594. /*
  595. * can't update plane when vop is disabled.
  596. */
  597. if (WARN_ON(!crtc))
  598. return;
  599. if (WARN_ON(!vop->is_enabled))
  600. return;
  601. if (!state->visible) {
  602. vop_plane_atomic_disable(plane, old_state);
  603. return;
  604. }
  605. obj = rockchip_fb_get_gem_obj(fb, 0);
  606. rk_obj = to_rockchip_obj(obj);
  607. actual_w = drm_rect_width(src) >> 16;
  608. actual_h = drm_rect_height(src) >> 16;
  609. act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
  610. dsp_info = (drm_rect_height(dest) - 1) << 16;
  611. dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
  612. dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
  613. dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
  614. dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
  615. offset = (src->x1 >> 16) * fb->format->cpp[0];
  616. offset += (src->y1 >> 16) * fb->pitches[0];
  617. dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
  618. format = vop_convert_format(fb->format->format);
  619. spin_lock(&vop->reg_lock);
  620. VOP_WIN_SET(vop, win, format, format);
  621. VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
  622. VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
  623. if (is_yuv_support(fb->format->format)) {
  624. int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
  625. int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
  626. int bpp = fb->format->cpp[1];
  627. uv_obj = rockchip_fb_get_gem_obj(fb, 1);
  628. rk_uv_obj = to_rockchip_obj(uv_obj);
  629. offset = (src->x1 >> 16) * bpp / hsub;
  630. offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
  631. dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
  632. VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
  633. VOP_WIN_SET(vop, win, uv_mst, dma_addr);
  634. }
  635. if (win->phy->scl)
  636. scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
  637. drm_rect_width(dest), drm_rect_height(dest),
  638. fb->format->format);
  639. VOP_WIN_SET(vop, win, act_info, act_info);
  640. VOP_WIN_SET(vop, win, dsp_info, dsp_info);
  641. VOP_WIN_SET(vop, win, dsp_st, dsp_st);
  642. rb_swap = has_rb_swapped(fb->format->format);
  643. VOP_WIN_SET(vop, win, rb_swap, rb_swap);
  644. if (is_alpha_support(fb->format->format)) {
  645. VOP_WIN_SET(vop, win, dst_alpha_ctl,
  646. DST_FACTOR_M0(ALPHA_SRC_INVERSE));
  647. val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
  648. SRC_ALPHA_M0(ALPHA_STRAIGHT) |
  649. SRC_BLEND_M0(ALPHA_PER_PIX) |
  650. SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
  651. SRC_FACTOR_M0(ALPHA_ONE);
  652. VOP_WIN_SET(vop, win, src_alpha_ctl, val);
  653. } else {
  654. VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
  655. }
  656. VOP_WIN_SET(vop, win, enable, 1);
  657. spin_unlock(&vop->reg_lock);
  658. }
  659. static const struct drm_plane_helper_funcs plane_helper_funcs = {
  660. .atomic_check = vop_plane_atomic_check,
  661. .atomic_update = vop_plane_atomic_update,
  662. .atomic_disable = vop_plane_atomic_disable,
  663. };
  664. static const struct drm_plane_funcs vop_plane_funcs = {
  665. .update_plane = drm_atomic_helper_update_plane,
  666. .disable_plane = drm_atomic_helper_disable_plane,
  667. .destroy = vop_plane_destroy,
  668. .reset = drm_atomic_helper_plane_reset,
  669. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  670. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  671. };
  672. static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
  673. {
  674. struct vop *vop = to_vop(crtc);
  675. unsigned long flags;
  676. if (WARN_ON(!vop->is_enabled))
  677. return -EPERM;
  678. spin_lock_irqsave(&vop->irq_lock, flags);
  679. VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
  680. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
  681. spin_unlock_irqrestore(&vop->irq_lock, flags);
  682. return 0;
  683. }
  684. static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
  685. {
  686. struct vop *vop = to_vop(crtc);
  687. unsigned long flags;
  688. if (WARN_ON(!vop->is_enabled))
  689. return;
  690. spin_lock_irqsave(&vop->irq_lock, flags);
  691. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
  692. spin_unlock_irqrestore(&vop->irq_lock, flags);
  693. }
  694. static const struct rockchip_crtc_funcs private_crtc_funcs = {
  695. .enable_vblank = vop_crtc_enable_vblank,
  696. .disable_vblank = vop_crtc_disable_vblank,
  697. };
  698. static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
  699. const struct drm_display_mode *mode,
  700. struct drm_display_mode *adjusted_mode)
  701. {
  702. struct vop *vop = to_vop(crtc);
  703. adjusted_mode->clock =
  704. clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
  705. return true;
  706. }
  707. static void vop_crtc_enable(struct drm_crtc *crtc)
  708. {
  709. struct vop *vop = to_vop(crtc);
  710. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
  711. struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
  712. u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
  713. u16 hdisplay = adjusted_mode->hdisplay;
  714. u16 htotal = adjusted_mode->htotal;
  715. u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
  716. u16 hact_end = hact_st + hdisplay;
  717. u16 vdisplay = adjusted_mode->vdisplay;
  718. u16 vtotal = adjusted_mode->vtotal;
  719. u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
  720. u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
  721. u16 vact_end = vact_st + vdisplay;
  722. uint32_t pin_pol, val;
  723. int ret;
  724. WARN_ON(vop->event);
  725. ret = vop_enable(crtc);
  726. if (ret) {
  727. DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
  728. return;
  729. }
  730. /*
  731. * If dclk rate is zero, mean that scanout is stop,
  732. * we don't need wait any more.
  733. */
  734. if (clk_get_rate(vop->dclk)) {
  735. /*
  736. * Rk3288 vop timing register is immediately, when configure
  737. * display timing on display time, may cause tearing.
  738. *
  739. * Vop standby will take effect at end of current frame,
  740. * if dsp hold valid irq happen, it means standby complete.
  741. *
  742. * mode set:
  743. * standby and wait complete --> |----
  744. * | display time
  745. * |----
  746. * |---> dsp hold irq
  747. * configure display timing --> |
  748. * standby exit |
  749. * | new frame start.
  750. */
  751. reinit_completion(&vop->dsp_hold_completion);
  752. vop_dsp_hold_valid_irq_enable(vop);
  753. spin_lock(&vop->reg_lock);
  754. VOP_CTRL_SET(vop, standby, 1);
  755. spin_unlock(&vop->reg_lock);
  756. wait_for_completion(&vop->dsp_hold_completion);
  757. vop_dsp_hold_valid_irq_disable(vop);
  758. }
  759. pin_pol = 0x8;
  760. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
  761. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
  762. VOP_CTRL_SET(vop, pin_pol, pin_pol);
  763. switch (s->output_type) {
  764. case DRM_MODE_CONNECTOR_LVDS:
  765. VOP_CTRL_SET(vop, rgb_en, 1);
  766. VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
  767. break;
  768. case DRM_MODE_CONNECTOR_eDP:
  769. VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
  770. VOP_CTRL_SET(vop, edp_en, 1);
  771. break;
  772. case DRM_MODE_CONNECTOR_HDMIA:
  773. VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
  774. VOP_CTRL_SET(vop, hdmi_en, 1);
  775. break;
  776. case DRM_MODE_CONNECTOR_DSI:
  777. VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
  778. VOP_CTRL_SET(vop, mipi_en, 1);
  779. break;
  780. default:
  781. DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
  782. s->output_type);
  783. }
  784. VOP_CTRL_SET(vop, out_mode, s->output_mode);
  785. VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
  786. val = hact_st << 16;
  787. val |= hact_end;
  788. VOP_CTRL_SET(vop, hact_st_end, val);
  789. VOP_CTRL_SET(vop, hpost_st_end, val);
  790. VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
  791. val = vact_st << 16;
  792. val |= vact_end;
  793. VOP_CTRL_SET(vop, vact_st_end, val);
  794. VOP_CTRL_SET(vop, vpost_st_end, val);
  795. clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
  796. VOP_CTRL_SET(vop, standby, 0);
  797. rockchip_drm_psr_activate(&vop->crtc);
  798. }
  799. static bool vop_fs_irq_is_pending(struct vop *vop)
  800. {
  801. return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
  802. }
  803. static void vop_wait_for_irq_handler(struct vop *vop)
  804. {
  805. bool pending;
  806. int ret;
  807. /*
  808. * Spin until frame start interrupt status bit goes low, which means
  809. * that interrupt handler was invoked and cleared it. The timeout of
  810. * 10 msecs is really too long, but it is just a safety measure if
  811. * something goes really wrong. The wait will only happen in the very
  812. * unlikely case of a vblank happening exactly at the same time and
  813. * shouldn't exceed microseconds range.
  814. */
  815. ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
  816. !pending, 0, 10 * 1000);
  817. if (ret)
  818. DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
  819. synchronize_irq(vop->irq);
  820. }
  821. static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
  822. struct drm_crtc_state *old_crtc_state)
  823. {
  824. struct drm_atomic_state *old_state = old_crtc_state->state;
  825. struct drm_plane_state *old_plane_state;
  826. struct vop *vop = to_vop(crtc);
  827. struct drm_plane *plane;
  828. int i;
  829. if (WARN_ON(!vop->is_enabled))
  830. return;
  831. spin_lock(&vop->reg_lock);
  832. vop_cfg_done(vop);
  833. spin_unlock(&vop->reg_lock);
  834. /*
  835. * There is a (rather unlikely) possiblity that a vblank interrupt
  836. * fired before we set the cfg_done bit. To avoid spuriously
  837. * signalling flip completion we need to wait for it to finish.
  838. */
  839. vop_wait_for_irq_handler(vop);
  840. spin_lock_irq(&crtc->dev->event_lock);
  841. if (crtc->state->event) {
  842. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  843. WARN_ON(vop->event);
  844. vop->event = crtc->state->event;
  845. crtc->state->event = NULL;
  846. }
  847. spin_unlock_irq(&crtc->dev->event_lock);
  848. for_each_plane_in_state(old_state, plane, old_plane_state, i) {
  849. if (!old_plane_state->fb)
  850. continue;
  851. if (old_plane_state->fb == plane->state->fb)
  852. continue;
  853. drm_framebuffer_reference(old_plane_state->fb);
  854. drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
  855. set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
  856. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  857. }
  858. }
  859. static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
  860. struct drm_crtc_state *old_crtc_state)
  861. {
  862. rockchip_drm_psr_flush(crtc);
  863. }
  864. static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
  865. .enable = vop_crtc_enable,
  866. .disable = vop_crtc_disable,
  867. .mode_fixup = vop_crtc_mode_fixup,
  868. .atomic_flush = vop_crtc_atomic_flush,
  869. .atomic_begin = vop_crtc_atomic_begin,
  870. };
  871. static void vop_crtc_destroy(struct drm_crtc *crtc)
  872. {
  873. drm_crtc_cleanup(crtc);
  874. }
  875. static void vop_crtc_reset(struct drm_crtc *crtc)
  876. {
  877. if (crtc->state)
  878. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  879. kfree(crtc->state);
  880. crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
  881. if (crtc->state)
  882. crtc->state->crtc = crtc;
  883. }
  884. static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  885. {
  886. struct rockchip_crtc_state *rockchip_state;
  887. rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
  888. if (!rockchip_state)
  889. return NULL;
  890. __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
  891. return &rockchip_state->base;
  892. }
  893. static void vop_crtc_destroy_state(struct drm_crtc *crtc,
  894. struct drm_crtc_state *state)
  895. {
  896. struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
  897. __drm_atomic_helper_crtc_destroy_state(&s->base);
  898. kfree(s);
  899. }
  900. static const struct drm_crtc_funcs vop_crtc_funcs = {
  901. .set_config = drm_atomic_helper_set_config,
  902. .page_flip = drm_atomic_helper_page_flip,
  903. .destroy = vop_crtc_destroy,
  904. .reset = vop_crtc_reset,
  905. .atomic_duplicate_state = vop_crtc_duplicate_state,
  906. .atomic_destroy_state = vop_crtc_destroy_state,
  907. };
  908. static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
  909. {
  910. struct vop *vop = container_of(work, struct vop, fb_unref_work);
  911. struct drm_framebuffer *fb = val;
  912. drm_crtc_vblank_put(&vop->crtc);
  913. drm_framebuffer_unreference(fb);
  914. }
  915. static void vop_handle_vblank(struct vop *vop)
  916. {
  917. struct drm_device *drm = vop->drm_dev;
  918. struct drm_crtc *crtc = &vop->crtc;
  919. unsigned long flags;
  920. spin_lock_irqsave(&drm->event_lock, flags);
  921. if (vop->event) {
  922. drm_crtc_send_vblank_event(crtc, vop->event);
  923. drm_crtc_vblank_put(crtc);
  924. vop->event = NULL;
  925. }
  926. spin_unlock_irqrestore(&drm->event_lock, flags);
  927. if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
  928. drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
  929. }
  930. static irqreturn_t vop_isr(int irq, void *data)
  931. {
  932. struct vop *vop = data;
  933. struct drm_crtc *crtc = &vop->crtc;
  934. uint32_t active_irqs;
  935. unsigned long flags;
  936. int ret = IRQ_NONE;
  937. /*
  938. * interrupt register has interrupt status, enable and clear bits, we
  939. * must hold irq_lock to avoid a race with enable/disable_vblank().
  940. */
  941. spin_lock_irqsave(&vop->irq_lock, flags);
  942. active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
  943. /* Clear all active interrupt sources */
  944. if (active_irqs)
  945. VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
  946. spin_unlock_irqrestore(&vop->irq_lock, flags);
  947. /* This is expected for vop iommu irqs, since the irq is shared */
  948. if (!active_irqs)
  949. return IRQ_NONE;
  950. if (active_irqs & DSP_HOLD_VALID_INTR) {
  951. complete(&vop->dsp_hold_completion);
  952. active_irqs &= ~DSP_HOLD_VALID_INTR;
  953. ret = IRQ_HANDLED;
  954. }
  955. if (active_irqs & LINE_FLAG_INTR) {
  956. complete(&vop->line_flag_completion);
  957. active_irqs &= ~LINE_FLAG_INTR;
  958. ret = IRQ_HANDLED;
  959. }
  960. if (active_irqs & FS_INTR) {
  961. drm_crtc_handle_vblank(crtc);
  962. vop_handle_vblank(vop);
  963. active_irqs &= ~FS_INTR;
  964. ret = IRQ_HANDLED;
  965. }
  966. /* Unhandled irqs are spurious. */
  967. if (active_irqs)
  968. DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
  969. active_irqs);
  970. return ret;
  971. }
  972. static int vop_create_crtc(struct vop *vop)
  973. {
  974. const struct vop_data *vop_data = vop->data;
  975. struct device *dev = vop->dev;
  976. struct drm_device *drm_dev = vop->drm_dev;
  977. struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
  978. struct drm_crtc *crtc = &vop->crtc;
  979. struct device_node *port;
  980. int ret;
  981. int i;
  982. /*
  983. * Create drm_plane for primary and cursor planes first, since we need
  984. * to pass them to drm_crtc_init_with_planes, which sets the
  985. * "possible_crtcs" to the newly initialized crtc.
  986. */
  987. for (i = 0; i < vop_data->win_size; i++) {
  988. struct vop_win *vop_win = &vop->win[i];
  989. const struct vop_win_data *win_data = vop_win->data;
  990. if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
  991. win_data->type != DRM_PLANE_TYPE_CURSOR)
  992. continue;
  993. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  994. 0, &vop_plane_funcs,
  995. win_data->phy->data_formats,
  996. win_data->phy->nformats,
  997. win_data->type, NULL);
  998. if (ret) {
  999. DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
  1000. ret);
  1001. goto err_cleanup_planes;
  1002. }
  1003. plane = &vop_win->base;
  1004. drm_plane_helper_add(plane, &plane_helper_funcs);
  1005. if (plane->type == DRM_PLANE_TYPE_PRIMARY)
  1006. primary = plane;
  1007. else if (plane->type == DRM_PLANE_TYPE_CURSOR)
  1008. cursor = plane;
  1009. }
  1010. ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
  1011. &vop_crtc_funcs, NULL);
  1012. if (ret)
  1013. goto err_cleanup_planes;
  1014. drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
  1015. /*
  1016. * Create drm_planes for overlay windows with possible_crtcs restricted
  1017. * to the newly created crtc.
  1018. */
  1019. for (i = 0; i < vop_data->win_size; i++) {
  1020. struct vop_win *vop_win = &vop->win[i];
  1021. const struct vop_win_data *win_data = vop_win->data;
  1022. unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
  1023. if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
  1024. continue;
  1025. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1026. possible_crtcs,
  1027. &vop_plane_funcs,
  1028. win_data->phy->data_formats,
  1029. win_data->phy->nformats,
  1030. win_data->type, NULL);
  1031. if (ret) {
  1032. DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
  1033. ret);
  1034. goto err_cleanup_crtc;
  1035. }
  1036. drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
  1037. }
  1038. port = of_get_child_by_name(dev->of_node, "port");
  1039. if (!port) {
  1040. DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
  1041. dev->of_node->full_name);
  1042. ret = -ENOENT;
  1043. goto err_cleanup_crtc;
  1044. }
  1045. drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
  1046. vop_fb_unref_worker);
  1047. init_completion(&vop->dsp_hold_completion);
  1048. init_completion(&vop->line_flag_completion);
  1049. crtc->port = port;
  1050. rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
  1051. return 0;
  1052. err_cleanup_crtc:
  1053. drm_crtc_cleanup(crtc);
  1054. err_cleanup_planes:
  1055. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1056. head)
  1057. drm_plane_cleanup(plane);
  1058. return ret;
  1059. }
  1060. static void vop_destroy_crtc(struct vop *vop)
  1061. {
  1062. struct drm_crtc *crtc = &vop->crtc;
  1063. struct drm_device *drm_dev = vop->drm_dev;
  1064. struct drm_plane *plane, *tmp;
  1065. rockchip_unregister_crtc_funcs(crtc);
  1066. of_node_put(crtc->port);
  1067. /*
  1068. * We need to cleanup the planes now. Why?
  1069. *
  1070. * The planes are "&vop->win[i].base". That means the memory is
  1071. * all part of the big "struct vop" chunk of memory. That memory
  1072. * was devm allocated and associated with this component. We need to
  1073. * free it ourselves before vop_unbind() finishes.
  1074. */
  1075. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1076. head)
  1077. vop_plane_destroy(plane);
  1078. /*
  1079. * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
  1080. * references the CRTC.
  1081. */
  1082. drm_crtc_cleanup(crtc);
  1083. drm_flip_work_cleanup(&vop->fb_unref_work);
  1084. }
  1085. static int vop_initial(struct vop *vop)
  1086. {
  1087. const struct vop_data *vop_data = vop->data;
  1088. const struct vop_reg_data *init_table = vop_data->init_table;
  1089. struct reset_control *ahb_rst;
  1090. int i, ret;
  1091. vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
  1092. if (IS_ERR(vop->hclk)) {
  1093. dev_err(vop->dev, "failed to get hclk source\n");
  1094. return PTR_ERR(vop->hclk);
  1095. }
  1096. vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
  1097. if (IS_ERR(vop->aclk)) {
  1098. dev_err(vop->dev, "failed to get aclk source\n");
  1099. return PTR_ERR(vop->aclk);
  1100. }
  1101. vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
  1102. if (IS_ERR(vop->dclk)) {
  1103. dev_err(vop->dev, "failed to get dclk source\n");
  1104. return PTR_ERR(vop->dclk);
  1105. }
  1106. ret = clk_prepare(vop->dclk);
  1107. if (ret < 0) {
  1108. dev_err(vop->dev, "failed to prepare dclk\n");
  1109. return ret;
  1110. }
  1111. /* Enable both the hclk and aclk to setup the vop */
  1112. ret = clk_prepare_enable(vop->hclk);
  1113. if (ret < 0) {
  1114. dev_err(vop->dev, "failed to prepare/enable hclk\n");
  1115. goto err_unprepare_dclk;
  1116. }
  1117. ret = clk_prepare_enable(vop->aclk);
  1118. if (ret < 0) {
  1119. dev_err(vop->dev, "failed to prepare/enable aclk\n");
  1120. goto err_disable_hclk;
  1121. }
  1122. /*
  1123. * do hclk_reset, reset all vop registers.
  1124. */
  1125. ahb_rst = devm_reset_control_get(vop->dev, "ahb");
  1126. if (IS_ERR(ahb_rst)) {
  1127. dev_err(vop->dev, "failed to get ahb reset\n");
  1128. ret = PTR_ERR(ahb_rst);
  1129. goto err_disable_aclk;
  1130. }
  1131. reset_control_assert(ahb_rst);
  1132. usleep_range(10, 20);
  1133. reset_control_deassert(ahb_rst);
  1134. memcpy(vop->regsbak, vop->regs, vop->len);
  1135. for (i = 0; i < vop_data->table_size; i++)
  1136. vop_writel(vop, init_table[i].offset, init_table[i].value);
  1137. for (i = 0; i < vop_data->win_size; i++) {
  1138. const struct vop_win_data *win = &vop_data->win[i];
  1139. VOP_WIN_SET(vop, win, enable, 0);
  1140. }
  1141. vop_cfg_done(vop);
  1142. /*
  1143. * do dclk_reset, let all config take affect.
  1144. */
  1145. vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
  1146. if (IS_ERR(vop->dclk_rst)) {
  1147. dev_err(vop->dev, "failed to get dclk reset\n");
  1148. ret = PTR_ERR(vop->dclk_rst);
  1149. goto err_disable_aclk;
  1150. }
  1151. reset_control_assert(vop->dclk_rst);
  1152. usleep_range(10, 20);
  1153. reset_control_deassert(vop->dclk_rst);
  1154. clk_disable(vop->hclk);
  1155. clk_disable(vop->aclk);
  1156. vop->is_enabled = false;
  1157. return 0;
  1158. err_disable_aclk:
  1159. clk_disable_unprepare(vop->aclk);
  1160. err_disable_hclk:
  1161. clk_disable_unprepare(vop->hclk);
  1162. err_unprepare_dclk:
  1163. clk_unprepare(vop->dclk);
  1164. return ret;
  1165. }
  1166. /*
  1167. * Initialize the vop->win array elements.
  1168. */
  1169. static void vop_win_init(struct vop *vop)
  1170. {
  1171. const struct vop_data *vop_data = vop->data;
  1172. unsigned int i;
  1173. for (i = 0; i < vop_data->win_size; i++) {
  1174. struct vop_win *vop_win = &vop->win[i];
  1175. const struct vop_win_data *win_data = &vop_data->win[i];
  1176. vop_win->data = win_data;
  1177. vop_win->vop = vop;
  1178. }
  1179. }
  1180. /**
  1181. * rockchip_drm_wait_line_flag - acqiure the give line flag event
  1182. * @crtc: CRTC to enable line flag
  1183. * @line_num: interested line number
  1184. * @mstimeout: millisecond for timeout
  1185. *
  1186. * Driver would hold here until the interested line flag interrupt have
  1187. * happened or timeout to wait.
  1188. *
  1189. * Returns:
  1190. * Zero on success, negative errno on failure.
  1191. */
  1192. int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
  1193. unsigned int mstimeout)
  1194. {
  1195. struct vop *vop = to_vop(crtc);
  1196. unsigned long jiffies_left;
  1197. if (!crtc || !vop->is_enabled)
  1198. return -ENODEV;
  1199. if (line_num > crtc->mode.vtotal || mstimeout <= 0)
  1200. return -EINVAL;
  1201. if (vop_line_flag_irq_is_enabled(vop))
  1202. return -EBUSY;
  1203. reinit_completion(&vop->line_flag_completion);
  1204. vop_line_flag_irq_enable(vop, line_num);
  1205. jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
  1206. msecs_to_jiffies(mstimeout));
  1207. vop_line_flag_irq_disable(vop);
  1208. if (jiffies_left == 0) {
  1209. dev_err(vop->dev, "Timeout waiting for IRQ\n");
  1210. return -ETIMEDOUT;
  1211. }
  1212. return 0;
  1213. }
  1214. EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
  1215. static int vop_bind(struct device *dev, struct device *master, void *data)
  1216. {
  1217. struct platform_device *pdev = to_platform_device(dev);
  1218. const struct vop_data *vop_data;
  1219. struct drm_device *drm_dev = data;
  1220. struct vop *vop;
  1221. struct resource *res;
  1222. size_t alloc_size;
  1223. int ret, irq;
  1224. vop_data = of_device_get_match_data(dev);
  1225. if (!vop_data)
  1226. return -ENODEV;
  1227. /* Allocate vop struct and its vop_win array */
  1228. alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
  1229. vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
  1230. if (!vop)
  1231. return -ENOMEM;
  1232. vop->dev = dev;
  1233. vop->data = vop_data;
  1234. vop->drm_dev = drm_dev;
  1235. dev_set_drvdata(dev, vop);
  1236. vop_win_init(vop);
  1237. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1238. vop->len = resource_size(res);
  1239. vop->regs = devm_ioremap_resource(dev, res);
  1240. if (IS_ERR(vop->regs))
  1241. return PTR_ERR(vop->regs);
  1242. vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
  1243. if (!vop->regsbak)
  1244. return -ENOMEM;
  1245. ret = vop_initial(vop);
  1246. if (ret < 0) {
  1247. dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
  1248. return ret;
  1249. }
  1250. irq = platform_get_irq(pdev, 0);
  1251. if (irq < 0) {
  1252. dev_err(dev, "cannot find irq for vop\n");
  1253. return irq;
  1254. }
  1255. vop->irq = (unsigned int)irq;
  1256. spin_lock_init(&vop->reg_lock);
  1257. spin_lock_init(&vop->irq_lock);
  1258. mutex_init(&vop->vsync_mutex);
  1259. ret = devm_request_irq(dev, vop->irq, vop_isr,
  1260. IRQF_SHARED, dev_name(dev), vop);
  1261. if (ret)
  1262. return ret;
  1263. /* IRQ is initially disabled; it gets enabled in power_on */
  1264. disable_irq(vop->irq);
  1265. ret = vop_create_crtc(vop);
  1266. if (ret)
  1267. goto err_enable_irq;
  1268. pm_runtime_enable(&pdev->dev);
  1269. return 0;
  1270. err_enable_irq:
  1271. enable_irq(vop->irq); /* To balance out the disable_irq above */
  1272. return ret;
  1273. }
  1274. static void vop_unbind(struct device *dev, struct device *master, void *data)
  1275. {
  1276. struct vop *vop = dev_get_drvdata(dev);
  1277. pm_runtime_disable(dev);
  1278. vop_destroy_crtc(vop);
  1279. }
  1280. const struct component_ops vop_component_ops = {
  1281. .bind = vop_bind,
  1282. .unbind = vop_unbind,
  1283. };
  1284. EXPORT_SYMBOL_GPL(vop_component_ops);