rockchip_drm_vop.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_crtc.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_flip_work.h>
  20. #include <drm/drm_plane_helper.h>
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/clk.h>
  25. #include <linux/iopoll.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/component.h>
  30. #include <linux/reset.h>
  31. #include <linux/delay.h>
  32. #include "rockchip_drm_drv.h"
  33. #include "rockchip_drm_gem.h"
  34. #include "rockchip_drm_fb.h"
  35. #include "rockchip_drm_psr.h"
  36. #include "rockchip_drm_vop.h"
  37. #define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
  38. vop_mask_write(x, off, mask, shift, v, write_mask, true)
  39. #define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
  40. vop_mask_write(x, off, mask, shift, v, write_mask, false)
  41. #define REG_SET(x, base, reg, v, mode) \
  42. __REG_SET_##mode(x, base + reg.offset, \
  43. reg.mask, reg.shift, v, reg.write_mask)
  44. #define REG_SET_MASK(x, base, reg, mask, v, mode) \
  45. __REG_SET_##mode(x, base + reg.offset, \
  46. mask, reg.shift, v, reg.write_mask)
  47. #define VOP_WIN_SET(x, win, name, v) \
  48. REG_SET(x, win->base, win->phy->name, v, RELAXED)
  49. #define VOP_SCL_SET(x, win, name, v) \
  50. REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
  51. #define VOP_SCL_SET_EXT(x, win, name, v) \
  52. REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
  53. #define VOP_CTRL_SET(x, name, v) \
  54. REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
  55. #define VOP_INTR_GET(vop, name) \
  56. vop_read_reg(vop, 0, &vop->data->ctrl->name)
  57. #define VOP_INTR_SET(vop, name, mask, v) \
  58. REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
  59. #define VOP_INTR_SET_TYPE(vop, name, type, v) \
  60. do { \
  61. int i, reg = 0, mask = 0; \
  62. for (i = 0; i < vop->data->intr->nintrs; i++) { \
  63. if (vop->data->intr->intrs[i] & type) { \
  64. reg |= (v) << i; \
  65. mask |= 1 << i; \
  66. } \
  67. } \
  68. VOP_INTR_SET(vop, name, mask, reg); \
  69. } while (0)
  70. #define VOP_INTR_GET_TYPE(vop, name, type) \
  71. vop_get_intr_type(vop, &vop->data->intr->name, type)
  72. #define VOP_WIN_GET(x, win, name) \
  73. vop_read_reg(x, win->base, &win->phy->name)
  74. #define VOP_WIN_GET_YRGBADDR(vop, win) \
  75. vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
  76. #define to_vop(x) container_of(x, struct vop, crtc)
  77. #define to_vop_win(x) container_of(x, struct vop_win, base)
  78. enum vop_pending {
  79. VOP_PENDING_FB_UNREF,
  80. };
  81. struct vop_win {
  82. struct drm_plane base;
  83. const struct vop_win_data *data;
  84. struct vop *vop;
  85. };
  86. struct vop {
  87. struct drm_crtc crtc;
  88. struct device *dev;
  89. struct drm_device *drm_dev;
  90. bool is_enabled;
  91. /* mutex vsync_ work */
  92. struct mutex vsync_mutex;
  93. bool vsync_work_pending;
  94. struct completion dsp_hold_completion;
  95. /* protected by dev->event_lock */
  96. struct drm_pending_vblank_event *event;
  97. struct drm_flip_work fb_unref_work;
  98. unsigned long pending;
  99. struct completion line_flag_completion;
  100. const struct vop_data *data;
  101. uint32_t *regsbak;
  102. void __iomem *regs;
  103. /* physical map length of vop register */
  104. uint32_t len;
  105. /* one time only one process allowed to config the register */
  106. spinlock_t reg_lock;
  107. /* lock vop irq reg */
  108. spinlock_t irq_lock;
  109. unsigned int irq;
  110. /* vop AHP clk */
  111. struct clk *hclk;
  112. /* vop dclk */
  113. struct clk *dclk;
  114. /* vop share memory frequency */
  115. struct clk *aclk;
  116. /* vop dclk reset */
  117. struct reset_control *dclk_rst;
  118. struct vop_win win[];
  119. };
  120. static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
  121. {
  122. writel(v, vop->regs + offset);
  123. vop->regsbak[offset >> 2] = v;
  124. }
  125. static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
  126. {
  127. return readl(vop->regs + offset);
  128. }
  129. static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
  130. const struct vop_reg *reg)
  131. {
  132. return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
  133. }
  134. static inline void vop_mask_write(struct vop *vop, uint32_t offset,
  135. uint32_t mask, uint32_t shift, uint32_t v,
  136. bool write_mask, bool relaxed)
  137. {
  138. if (!mask)
  139. return;
  140. if (write_mask) {
  141. v = ((v << shift) & 0xffff) | (mask << (shift + 16));
  142. } else {
  143. uint32_t cached_val = vop->regsbak[offset >> 2];
  144. v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
  145. vop->regsbak[offset >> 2] = v;
  146. }
  147. if (relaxed)
  148. writel_relaxed(v, vop->regs + offset);
  149. else
  150. writel(v, vop->regs + offset);
  151. }
  152. static inline uint32_t vop_get_intr_type(struct vop *vop,
  153. const struct vop_reg *reg, int type)
  154. {
  155. uint32_t i, ret = 0;
  156. uint32_t regs = vop_read_reg(vop, 0, reg);
  157. for (i = 0; i < vop->data->intr->nintrs; i++) {
  158. if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
  159. ret |= vop->data->intr->intrs[i];
  160. }
  161. return ret;
  162. }
  163. static inline void vop_cfg_done(struct vop *vop)
  164. {
  165. VOP_CTRL_SET(vop, cfg_done, 1);
  166. }
  167. static bool has_rb_swapped(uint32_t format)
  168. {
  169. switch (format) {
  170. case DRM_FORMAT_XBGR8888:
  171. case DRM_FORMAT_ABGR8888:
  172. case DRM_FORMAT_BGR888:
  173. case DRM_FORMAT_BGR565:
  174. return true;
  175. default:
  176. return false;
  177. }
  178. }
  179. static enum vop_data_format vop_convert_format(uint32_t format)
  180. {
  181. switch (format) {
  182. case DRM_FORMAT_XRGB8888:
  183. case DRM_FORMAT_ARGB8888:
  184. case DRM_FORMAT_XBGR8888:
  185. case DRM_FORMAT_ABGR8888:
  186. return VOP_FMT_ARGB8888;
  187. case DRM_FORMAT_RGB888:
  188. case DRM_FORMAT_BGR888:
  189. return VOP_FMT_RGB888;
  190. case DRM_FORMAT_RGB565:
  191. case DRM_FORMAT_BGR565:
  192. return VOP_FMT_RGB565;
  193. case DRM_FORMAT_NV12:
  194. return VOP_FMT_YUV420SP;
  195. case DRM_FORMAT_NV16:
  196. return VOP_FMT_YUV422SP;
  197. case DRM_FORMAT_NV24:
  198. return VOP_FMT_YUV444SP;
  199. default:
  200. DRM_ERROR("unsupported format[%08x]\n", format);
  201. return -EINVAL;
  202. }
  203. }
  204. static bool is_yuv_support(uint32_t format)
  205. {
  206. switch (format) {
  207. case DRM_FORMAT_NV12:
  208. case DRM_FORMAT_NV16:
  209. case DRM_FORMAT_NV24:
  210. return true;
  211. default:
  212. return false;
  213. }
  214. }
  215. static bool is_alpha_support(uint32_t format)
  216. {
  217. switch (format) {
  218. case DRM_FORMAT_ARGB8888:
  219. case DRM_FORMAT_ABGR8888:
  220. return true;
  221. default:
  222. return false;
  223. }
  224. }
  225. static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
  226. uint32_t dst, bool is_horizontal,
  227. int vsu_mode, int *vskiplines)
  228. {
  229. uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
  230. if (is_horizontal) {
  231. if (mode == SCALE_UP)
  232. val = GET_SCL_FT_BIC(src, dst);
  233. else if (mode == SCALE_DOWN)
  234. val = GET_SCL_FT_BILI_DN(src, dst);
  235. } else {
  236. if (mode == SCALE_UP) {
  237. if (vsu_mode == SCALE_UP_BIL)
  238. val = GET_SCL_FT_BILI_UP(src, dst);
  239. else
  240. val = GET_SCL_FT_BIC(src, dst);
  241. } else if (mode == SCALE_DOWN) {
  242. if (vskiplines) {
  243. *vskiplines = scl_get_vskiplines(src, dst);
  244. val = scl_get_bili_dn_vskip(src, dst,
  245. *vskiplines);
  246. } else {
  247. val = GET_SCL_FT_BILI_DN(src, dst);
  248. }
  249. }
  250. }
  251. return val;
  252. }
  253. static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
  254. uint32_t src_w, uint32_t src_h, uint32_t dst_w,
  255. uint32_t dst_h, uint32_t pixel_format)
  256. {
  257. uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
  258. uint16_t cbcr_hor_scl_mode = SCALE_NONE;
  259. uint16_t cbcr_ver_scl_mode = SCALE_NONE;
  260. int hsub = drm_format_horz_chroma_subsampling(pixel_format);
  261. int vsub = drm_format_vert_chroma_subsampling(pixel_format);
  262. bool is_yuv = is_yuv_support(pixel_format);
  263. uint16_t cbcr_src_w = src_w / hsub;
  264. uint16_t cbcr_src_h = src_h / vsub;
  265. uint16_t vsu_mode;
  266. uint16_t lb_mode;
  267. uint32_t val;
  268. int vskiplines = 0;
  269. if (dst_w > 3840) {
  270. DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
  271. return;
  272. }
  273. if (!win->phy->scl->ext) {
  274. VOP_SCL_SET(vop, win, scale_yrgb_x,
  275. scl_cal_scale2(src_w, dst_w));
  276. VOP_SCL_SET(vop, win, scale_yrgb_y,
  277. scl_cal_scale2(src_h, dst_h));
  278. if (is_yuv) {
  279. VOP_SCL_SET(vop, win, scale_cbcr_x,
  280. scl_cal_scale2(cbcr_src_w, dst_w));
  281. VOP_SCL_SET(vop, win, scale_cbcr_y,
  282. scl_cal_scale2(cbcr_src_h, dst_h));
  283. }
  284. return;
  285. }
  286. yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
  287. yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
  288. if (is_yuv) {
  289. cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
  290. cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
  291. if (cbcr_hor_scl_mode == SCALE_DOWN)
  292. lb_mode = scl_vop_cal_lb_mode(dst_w, true);
  293. else
  294. lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
  295. } else {
  296. if (yrgb_hor_scl_mode == SCALE_DOWN)
  297. lb_mode = scl_vop_cal_lb_mode(dst_w, false);
  298. else
  299. lb_mode = scl_vop_cal_lb_mode(src_w, false);
  300. }
  301. VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
  302. if (lb_mode == LB_RGB_3840X2) {
  303. if (yrgb_ver_scl_mode != SCALE_NONE) {
  304. DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
  305. return;
  306. }
  307. if (cbcr_ver_scl_mode != SCALE_NONE) {
  308. DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
  309. return;
  310. }
  311. vsu_mode = SCALE_UP_BIL;
  312. } else if (lb_mode == LB_RGB_2560X4) {
  313. vsu_mode = SCALE_UP_BIL;
  314. } else {
  315. vsu_mode = SCALE_UP_BIC;
  316. }
  317. val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
  318. true, 0, NULL);
  319. VOP_SCL_SET(vop, win, scale_yrgb_x, val);
  320. val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
  321. false, vsu_mode, &vskiplines);
  322. VOP_SCL_SET(vop, win, scale_yrgb_y, val);
  323. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
  324. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
  325. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
  326. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
  327. VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
  328. VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
  329. VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
  330. if (is_yuv) {
  331. val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
  332. dst_w, true, 0, NULL);
  333. VOP_SCL_SET(vop, win, scale_cbcr_x, val);
  334. val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
  335. dst_h, false, vsu_mode, &vskiplines);
  336. VOP_SCL_SET(vop, win, scale_cbcr_y, val);
  337. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
  338. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
  339. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
  340. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
  341. VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
  342. VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
  343. VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
  344. }
  345. }
  346. static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
  347. {
  348. unsigned long flags;
  349. if (WARN_ON(!vop->is_enabled))
  350. return;
  351. spin_lock_irqsave(&vop->irq_lock, flags);
  352. VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
  353. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
  354. spin_unlock_irqrestore(&vop->irq_lock, flags);
  355. }
  356. static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
  357. {
  358. unsigned long flags;
  359. if (WARN_ON(!vop->is_enabled))
  360. return;
  361. spin_lock_irqsave(&vop->irq_lock, flags);
  362. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
  363. spin_unlock_irqrestore(&vop->irq_lock, flags);
  364. }
  365. /*
  366. * (1) each frame starts at the start of the Vsync pulse which is signaled by
  367. * the "FRAME_SYNC" interrupt.
  368. * (2) the active data region of each frame ends at dsp_vact_end
  369. * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
  370. * to get "LINE_FLAG" interrupt at the end of the active on screen data.
  371. *
  372. * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
  373. * Interrupts
  374. * LINE_FLAG -------------------------------+
  375. * FRAME_SYNC ----+ |
  376. * | |
  377. * v v
  378. * | Vsync | Vbp | Vactive | Vfp |
  379. * ^ ^ ^ ^
  380. * | | | |
  381. * | | | |
  382. * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
  383. * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
  384. * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
  385. * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
  386. */
  387. static bool vop_line_flag_irq_is_enabled(struct vop *vop)
  388. {
  389. uint32_t line_flag_irq;
  390. unsigned long flags;
  391. spin_lock_irqsave(&vop->irq_lock, flags);
  392. line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
  393. spin_unlock_irqrestore(&vop->irq_lock, flags);
  394. return !!line_flag_irq;
  395. }
  396. static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
  397. {
  398. unsigned long flags;
  399. if (WARN_ON(!vop->is_enabled))
  400. return;
  401. spin_lock_irqsave(&vop->irq_lock, flags);
  402. VOP_CTRL_SET(vop, line_flag_num[0], line_num);
  403. VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
  404. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
  405. spin_unlock_irqrestore(&vop->irq_lock, flags);
  406. }
  407. static void vop_line_flag_irq_disable(struct vop *vop)
  408. {
  409. unsigned long flags;
  410. if (WARN_ON(!vop->is_enabled))
  411. return;
  412. spin_lock_irqsave(&vop->irq_lock, flags);
  413. VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
  414. spin_unlock_irqrestore(&vop->irq_lock, flags);
  415. }
  416. static int vop_enable(struct drm_crtc *crtc)
  417. {
  418. struct vop *vop = to_vop(crtc);
  419. int ret;
  420. ret = pm_runtime_get_sync(vop->dev);
  421. if (ret < 0) {
  422. dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
  423. goto err_put_pm_runtime;
  424. }
  425. ret = clk_enable(vop->hclk);
  426. if (WARN_ON(ret < 0))
  427. goto err_put_pm_runtime;
  428. ret = clk_enable(vop->dclk);
  429. if (WARN_ON(ret < 0))
  430. goto err_disable_hclk;
  431. ret = clk_enable(vop->aclk);
  432. if (WARN_ON(ret < 0))
  433. goto err_disable_dclk;
  434. /*
  435. * Slave iommu shares power, irq and clock with vop. It was associated
  436. * automatically with this master device via common driver code.
  437. * Now that we have enabled the clock we attach it to the shared drm
  438. * mapping.
  439. */
  440. ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
  441. if (ret) {
  442. dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
  443. goto err_disable_aclk;
  444. }
  445. memcpy(vop->regs, vop->regsbak, vop->len);
  446. vop_cfg_done(vop);
  447. /*
  448. * At here, vop clock & iommu is enable, R/W vop regs would be safe.
  449. */
  450. vop->is_enabled = true;
  451. spin_lock(&vop->reg_lock);
  452. VOP_CTRL_SET(vop, standby, 0);
  453. spin_unlock(&vop->reg_lock);
  454. enable_irq(vop->irq);
  455. drm_crtc_vblank_on(crtc);
  456. return 0;
  457. err_disable_aclk:
  458. clk_disable(vop->aclk);
  459. err_disable_dclk:
  460. clk_disable(vop->dclk);
  461. err_disable_hclk:
  462. clk_disable(vop->hclk);
  463. err_put_pm_runtime:
  464. pm_runtime_put_sync(vop->dev);
  465. return ret;
  466. }
  467. static void vop_crtc_disable(struct drm_crtc *crtc)
  468. {
  469. struct vop *vop = to_vop(crtc);
  470. int i;
  471. WARN_ON(vop->event);
  472. rockchip_drm_psr_deactivate(&vop->crtc);
  473. /*
  474. * We need to make sure that all windows are disabled before we
  475. * disable that crtc. Otherwise we might try to scan from a destroyed
  476. * buffer later.
  477. */
  478. for (i = 0; i < vop->data->win_size; i++) {
  479. struct vop_win *vop_win = &vop->win[i];
  480. const struct vop_win_data *win = vop_win->data;
  481. spin_lock(&vop->reg_lock);
  482. VOP_WIN_SET(vop, win, enable, 0);
  483. spin_unlock(&vop->reg_lock);
  484. }
  485. vop_cfg_done(vop);
  486. drm_crtc_vblank_off(crtc);
  487. /*
  488. * Vop standby will take effect at end of current frame,
  489. * if dsp hold valid irq happen, it means standby complete.
  490. *
  491. * we must wait standby complete when we want to disable aclk,
  492. * if not, memory bus maybe dead.
  493. */
  494. reinit_completion(&vop->dsp_hold_completion);
  495. vop_dsp_hold_valid_irq_enable(vop);
  496. spin_lock(&vop->reg_lock);
  497. VOP_CTRL_SET(vop, standby, 1);
  498. spin_unlock(&vop->reg_lock);
  499. wait_for_completion(&vop->dsp_hold_completion);
  500. vop_dsp_hold_valid_irq_disable(vop);
  501. disable_irq(vop->irq);
  502. vop->is_enabled = false;
  503. /*
  504. * vop standby complete, so iommu detach is safe.
  505. */
  506. rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
  507. clk_disable(vop->dclk);
  508. clk_disable(vop->aclk);
  509. clk_disable(vop->hclk);
  510. pm_runtime_put(vop->dev);
  511. if (crtc->state->event && !crtc->state->active) {
  512. spin_lock_irq(&crtc->dev->event_lock);
  513. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  514. spin_unlock_irq(&crtc->dev->event_lock);
  515. crtc->state->event = NULL;
  516. }
  517. }
  518. static void vop_plane_destroy(struct drm_plane *plane)
  519. {
  520. drm_plane_cleanup(plane);
  521. }
  522. static int vop_plane_atomic_check(struct drm_plane *plane,
  523. struct drm_plane_state *state)
  524. {
  525. struct drm_crtc *crtc = state->crtc;
  526. struct drm_crtc_state *crtc_state;
  527. struct drm_framebuffer *fb = state->fb;
  528. struct vop_win *vop_win = to_vop_win(plane);
  529. const struct vop_win_data *win = vop_win->data;
  530. int ret;
  531. struct drm_rect clip;
  532. int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
  533. DRM_PLANE_HELPER_NO_SCALING;
  534. int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
  535. DRM_PLANE_HELPER_NO_SCALING;
  536. if (!crtc || !fb)
  537. return 0;
  538. crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
  539. if (WARN_ON(!crtc_state))
  540. return -EINVAL;
  541. clip.x1 = 0;
  542. clip.y1 = 0;
  543. clip.x2 = crtc_state->adjusted_mode.hdisplay;
  544. clip.y2 = crtc_state->adjusted_mode.vdisplay;
  545. ret = drm_plane_helper_check_state(state, &clip,
  546. min_scale, max_scale,
  547. true, true);
  548. if (ret)
  549. return ret;
  550. if (!state->visible)
  551. return 0;
  552. ret = vop_convert_format(fb->format->format);
  553. if (ret < 0)
  554. return ret;
  555. /*
  556. * Src.x1 can be odd when do clip, but yuv plane start point
  557. * need align with 2 pixel.
  558. */
  559. if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
  560. return -EINVAL;
  561. return 0;
  562. }
  563. static void vop_plane_atomic_disable(struct drm_plane *plane,
  564. struct drm_plane_state *old_state)
  565. {
  566. struct vop_win *vop_win = to_vop_win(plane);
  567. const struct vop_win_data *win = vop_win->data;
  568. struct vop *vop = to_vop(old_state->crtc);
  569. if (!old_state->crtc)
  570. return;
  571. spin_lock(&vop->reg_lock);
  572. VOP_WIN_SET(vop, win, enable, 0);
  573. spin_unlock(&vop->reg_lock);
  574. }
  575. static void vop_plane_atomic_update(struct drm_plane *plane,
  576. struct drm_plane_state *old_state)
  577. {
  578. struct drm_plane_state *state = plane->state;
  579. struct drm_crtc *crtc = state->crtc;
  580. struct vop_win *vop_win = to_vop_win(plane);
  581. const struct vop_win_data *win = vop_win->data;
  582. struct vop *vop = to_vop(state->crtc);
  583. struct drm_framebuffer *fb = state->fb;
  584. unsigned int actual_w, actual_h;
  585. unsigned int dsp_stx, dsp_sty;
  586. uint32_t act_info, dsp_info, dsp_st;
  587. struct drm_rect *src = &state->src;
  588. struct drm_rect *dest = &state->dst;
  589. struct drm_gem_object *obj, *uv_obj;
  590. struct rockchip_gem_object *rk_obj, *rk_uv_obj;
  591. unsigned long offset;
  592. dma_addr_t dma_addr;
  593. uint32_t val;
  594. bool rb_swap;
  595. int format;
  596. /*
  597. * can't update plane when vop is disabled.
  598. */
  599. if (WARN_ON(!crtc))
  600. return;
  601. if (WARN_ON(!vop->is_enabled))
  602. return;
  603. if (!state->visible) {
  604. vop_plane_atomic_disable(plane, old_state);
  605. return;
  606. }
  607. obj = rockchip_fb_get_gem_obj(fb, 0);
  608. rk_obj = to_rockchip_obj(obj);
  609. actual_w = drm_rect_width(src) >> 16;
  610. actual_h = drm_rect_height(src) >> 16;
  611. act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
  612. dsp_info = (drm_rect_height(dest) - 1) << 16;
  613. dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
  614. dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
  615. dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
  616. dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
  617. offset = (src->x1 >> 16) * fb->format->cpp[0];
  618. offset += (src->y1 >> 16) * fb->pitches[0];
  619. dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
  620. format = vop_convert_format(fb->format->format);
  621. spin_lock(&vop->reg_lock);
  622. VOP_WIN_SET(vop, win, format, format);
  623. VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
  624. VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
  625. if (is_yuv_support(fb->format->format)) {
  626. int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
  627. int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
  628. int bpp = fb->format->cpp[1];
  629. uv_obj = rockchip_fb_get_gem_obj(fb, 1);
  630. rk_uv_obj = to_rockchip_obj(uv_obj);
  631. offset = (src->x1 >> 16) * bpp / hsub;
  632. offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
  633. dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
  634. VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
  635. VOP_WIN_SET(vop, win, uv_mst, dma_addr);
  636. }
  637. if (win->phy->scl)
  638. scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
  639. drm_rect_width(dest), drm_rect_height(dest),
  640. fb->format->format);
  641. VOP_WIN_SET(vop, win, act_info, act_info);
  642. VOP_WIN_SET(vop, win, dsp_info, dsp_info);
  643. VOP_WIN_SET(vop, win, dsp_st, dsp_st);
  644. rb_swap = has_rb_swapped(fb->format->format);
  645. VOP_WIN_SET(vop, win, rb_swap, rb_swap);
  646. if (is_alpha_support(fb->format->format)) {
  647. VOP_WIN_SET(vop, win, dst_alpha_ctl,
  648. DST_FACTOR_M0(ALPHA_SRC_INVERSE));
  649. val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
  650. SRC_ALPHA_M0(ALPHA_STRAIGHT) |
  651. SRC_BLEND_M0(ALPHA_PER_PIX) |
  652. SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
  653. SRC_FACTOR_M0(ALPHA_ONE);
  654. VOP_WIN_SET(vop, win, src_alpha_ctl, val);
  655. } else {
  656. VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
  657. }
  658. VOP_WIN_SET(vop, win, enable, 1);
  659. spin_unlock(&vop->reg_lock);
  660. }
  661. static const struct drm_plane_helper_funcs plane_helper_funcs = {
  662. .atomic_check = vop_plane_atomic_check,
  663. .atomic_update = vop_plane_atomic_update,
  664. .atomic_disable = vop_plane_atomic_disable,
  665. };
  666. static const struct drm_plane_funcs vop_plane_funcs = {
  667. .update_plane = drm_atomic_helper_update_plane,
  668. .disable_plane = drm_atomic_helper_disable_plane,
  669. .destroy = vop_plane_destroy,
  670. .reset = drm_atomic_helper_plane_reset,
  671. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  672. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  673. };
  674. static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
  675. {
  676. struct vop *vop = to_vop(crtc);
  677. unsigned long flags;
  678. if (WARN_ON(!vop->is_enabled))
  679. return -EPERM;
  680. spin_lock_irqsave(&vop->irq_lock, flags);
  681. VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
  682. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
  683. spin_unlock_irqrestore(&vop->irq_lock, flags);
  684. return 0;
  685. }
  686. static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
  687. {
  688. struct vop *vop = to_vop(crtc);
  689. unsigned long flags;
  690. if (WARN_ON(!vop->is_enabled))
  691. return;
  692. spin_lock_irqsave(&vop->irq_lock, flags);
  693. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
  694. spin_unlock_irqrestore(&vop->irq_lock, flags);
  695. }
  696. static const struct rockchip_crtc_funcs private_crtc_funcs = {
  697. .enable_vblank = vop_crtc_enable_vblank,
  698. .disable_vblank = vop_crtc_disable_vblank,
  699. };
  700. static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
  701. const struct drm_display_mode *mode,
  702. struct drm_display_mode *adjusted_mode)
  703. {
  704. struct vop *vop = to_vop(crtc);
  705. adjusted_mode->clock =
  706. clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
  707. return true;
  708. }
  709. static void vop_crtc_enable(struct drm_crtc *crtc)
  710. {
  711. struct vop *vop = to_vop(crtc);
  712. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
  713. struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
  714. u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
  715. u16 hdisplay = adjusted_mode->hdisplay;
  716. u16 htotal = adjusted_mode->htotal;
  717. u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
  718. u16 hact_end = hact_st + hdisplay;
  719. u16 vdisplay = adjusted_mode->vdisplay;
  720. u16 vtotal = adjusted_mode->vtotal;
  721. u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
  722. u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
  723. u16 vact_end = vact_st + vdisplay;
  724. uint32_t pin_pol, val;
  725. int ret;
  726. WARN_ON(vop->event);
  727. ret = vop_enable(crtc);
  728. if (ret) {
  729. DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
  730. return;
  731. }
  732. /*
  733. * If dclk rate is zero, mean that scanout is stop,
  734. * we don't need wait any more.
  735. */
  736. if (clk_get_rate(vop->dclk)) {
  737. /*
  738. * Rk3288 vop timing register is immediately, when configure
  739. * display timing on display time, may cause tearing.
  740. *
  741. * Vop standby will take effect at end of current frame,
  742. * if dsp hold valid irq happen, it means standby complete.
  743. *
  744. * mode set:
  745. * standby and wait complete --> |----
  746. * | display time
  747. * |----
  748. * |---> dsp hold irq
  749. * configure display timing --> |
  750. * standby exit |
  751. * | new frame start.
  752. */
  753. reinit_completion(&vop->dsp_hold_completion);
  754. vop_dsp_hold_valid_irq_enable(vop);
  755. spin_lock(&vop->reg_lock);
  756. VOP_CTRL_SET(vop, standby, 1);
  757. spin_unlock(&vop->reg_lock);
  758. wait_for_completion(&vop->dsp_hold_completion);
  759. vop_dsp_hold_valid_irq_disable(vop);
  760. }
  761. pin_pol = BIT(DCLK_INVERT);
  762. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ?
  763. 0 : BIT(HSYNC_POSITIVE);
  764. pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ?
  765. 0 : BIT(VSYNC_POSITIVE);
  766. VOP_CTRL_SET(vop, pin_pol, pin_pol);
  767. switch (s->output_type) {
  768. case DRM_MODE_CONNECTOR_LVDS:
  769. VOP_CTRL_SET(vop, rgb_en, 1);
  770. VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
  771. break;
  772. case DRM_MODE_CONNECTOR_eDP:
  773. VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
  774. VOP_CTRL_SET(vop, edp_en, 1);
  775. break;
  776. case DRM_MODE_CONNECTOR_HDMIA:
  777. VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
  778. VOP_CTRL_SET(vop, hdmi_en, 1);
  779. break;
  780. case DRM_MODE_CONNECTOR_DSI:
  781. VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
  782. VOP_CTRL_SET(vop, mipi_en, 1);
  783. break;
  784. case DRM_MODE_CONNECTOR_DisplayPort:
  785. pin_pol &= ~BIT(DCLK_INVERT);
  786. VOP_CTRL_SET(vop, dp_pin_pol, pin_pol);
  787. VOP_CTRL_SET(vop, dp_en, 1);
  788. break;
  789. default:
  790. DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
  791. s->output_type);
  792. }
  793. VOP_CTRL_SET(vop, out_mode, s->output_mode);
  794. VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
  795. val = hact_st << 16;
  796. val |= hact_end;
  797. VOP_CTRL_SET(vop, hact_st_end, val);
  798. VOP_CTRL_SET(vop, hpost_st_end, val);
  799. VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
  800. val = vact_st << 16;
  801. val |= vact_end;
  802. VOP_CTRL_SET(vop, vact_st_end, val);
  803. VOP_CTRL_SET(vop, vpost_st_end, val);
  804. clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
  805. VOP_CTRL_SET(vop, standby, 0);
  806. rockchip_drm_psr_activate(&vop->crtc);
  807. }
  808. static bool vop_fs_irq_is_pending(struct vop *vop)
  809. {
  810. return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
  811. }
  812. static void vop_wait_for_irq_handler(struct vop *vop)
  813. {
  814. bool pending;
  815. int ret;
  816. /*
  817. * Spin until frame start interrupt status bit goes low, which means
  818. * that interrupt handler was invoked and cleared it. The timeout of
  819. * 10 msecs is really too long, but it is just a safety measure if
  820. * something goes really wrong. The wait will only happen in the very
  821. * unlikely case of a vblank happening exactly at the same time and
  822. * shouldn't exceed microseconds range.
  823. */
  824. ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
  825. !pending, 0, 10 * 1000);
  826. if (ret)
  827. DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
  828. synchronize_irq(vop->irq);
  829. }
  830. static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
  831. struct drm_crtc_state *old_crtc_state)
  832. {
  833. struct drm_atomic_state *old_state = old_crtc_state->state;
  834. struct drm_plane_state *old_plane_state;
  835. struct vop *vop = to_vop(crtc);
  836. struct drm_plane *plane;
  837. int i;
  838. if (WARN_ON(!vop->is_enabled))
  839. return;
  840. spin_lock(&vop->reg_lock);
  841. vop_cfg_done(vop);
  842. spin_unlock(&vop->reg_lock);
  843. /*
  844. * There is a (rather unlikely) possiblity that a vblank interrupt
  845. * fired before we set the cfg_done bit. To avoid spuriously
  846. * signalling flip completion we need to wait for it to finish.
  847. */
  848. vop_wait_for_irq_handler(vop);
  849. spin_lock_irq(&crtc->dev->event_lock);
  850. if (crtc->state->event) {
  851. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  852. WARN_ON(vop->event);
  853. vop->event = crtc->state->event;
  854. crtc->state->event = NULL;
  855. }
  856. spin_unlock_irq(&crtc->dev->event_lock);
  857. for_each_plane_in_state(old_state, plane, old_plane_state, i) {
  858. if (!old_plane_state->fb)
  859. continue;
  860. if (old_plane_state->fb == plane->state->fb)
  861. continue;
  862. drm_framebuffer_reference(old_plane_state->fb);
  863. drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
  864. set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
  865. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  866. }
  867. }
  868. static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
  869. struct drm_crtc_state *old_crtc_state)
  870. {
  871. rockchip_drm_psr_flush(crtc);
  872. }
  873. static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
  874. .enable = vop_crtc_enable,
  875. .disable = vop_crtc_disable,
  876. .mode_fixup = vop_crtc_mode_fixup,
  877. .atomic_flush = vop_crtc_atomic_flush,
  878. .atomic_begin = vop_crtc_atomic_begin,
  879. };
  880. static void vop_crtc_destroy(struct drm_crtc *crtc)
  881. {
  882. drm_crtc_cleanup(crtc);
  883. }
  884. static void vop_crtc_reset(struct drm_crtc *crtc)
  885. {
  886. if (crtc->state)
  887. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  888. kfree(crtc->state);
  889. crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
  890. if (crtc->state)
  891. crtc->state->crtc = crtc;
  892. }
  893. static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  894. {
  895. struct rockchip_crtc_state *rockchip_state;
  896. rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
  897. if (!rockchip_state)
  898. return NULL;
  899. __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
  900. return &rockchip_state->base;
  901. }
  902. static void vop_crtc_destroy_state(struct drm_crtc *crtc,
  903. struct drm_crtc_state *state)
  904. {
  905. struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
  906. __drm_atomic_helper_crtc_destroy_state(&s->base);
  907. kfree(s);
  908. }
  909. static const struct drm_crtc_funcs vop_crtc_funcs = {
  910. .set_config = drm_atomic_helper_set_config,
  911. .page_flip = drm_atomic_helper_page_flip,
  912. .destroy = vop_crtc_destroy,
  913. .reset = vop_crtc_reset,
  914. .atomic_duplicate_state = vop_crtc_duplicate_state,
  915. .atomic_destroy_state = vop_crtc_destroy_state,
  916. };
  917. static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
  918. {
  919. struct vop *vop = container_of(work, struct vop, fb_unref_work);
  920. struct drm_framebuffer *fb = val;
  921. drm_crtc_vblank_put(&vop->crtc);
  922. drm_framebuffer_unreference(fb);
  923. }
  924. static void vop_handle_vblank(struct vop *vop)
  925. {
  926. struct drm_device *drm = vop->drm_dev;
  927. struct drm_crtc *crtc = &vop->crtc;
  928. unsigned long flags;
  929. spin_lock_irqsave(&drm->event_lock, flags);
  930. if (vop->event) {
  931. drm_crtc_send_vblank_event(crtc, vop->event);
  932. drm_crtc_vblank_put(crtc);
  933. vop->event = NULL;
  934. }
  935. spin_unlock_irqrestore(&drm->event_lock, flags);
  936. if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
  937. drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
  938. }
  939. static irqreturn_t vop_isr(int irq, void *data)
  940. {
  941. struct vop *vop = data;
  942. struct drm_crtc *crtc = &vop->crtc;
  943. uint32_t active_irqs;
  944. unsigned long flags;
  945. int ret = IRQ_NONE;
  946. /*
  947. * interrupt register has interrupt status, enable and clear bits, we
  948. * must hold irq_lock to avoid a race with enable/disable_vblank().
  949. */
  950. spin_lock_irqsave(&vop->irq_lock, flags);
  951. active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
  952. /* Clear all active interrupt sources */
  953. if (active_irqs)
  954. VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
  955. spin_unlock_irqrestore(&vop->irq_lock, flags);
  956. /* This is expected for vop iommu irqs, since the irq is shared */
  957. if (!active_irqs)
  958. return IRQ_NONE;
  959. if (active_irqs & DSP_HOLD_VALID_INTR) {
  960. complete(&vop->dsp_hold_completion);
  961. active_irqs &= ~DSP_HOLD_VALID_INTR;
  962. ret = IRQ_HANDLED;
  963. }
  964. if (active_irqs & LINE_FLAG_INTR) {
  965. complete(&vop->line_flag_completion);
  966. active_irqs &= ~LINE_FLAG_INTR;
  967. ret = IRQ_HANDLED;
  968. }
  969. if (active_irqs & FS_INTR) {
  970. drm_crtc_handle_vblank(crtc);
  971. vop_handle_vblank(vop);
  972. active_irqs &= ~FS_INTR;
  973. ret = IRQ_HANDLED;
  974. }
  975. /* Unhandled irqs are spurious. */
  976. if (active_irqs)
  977. DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
  978. active_irqs);
  979. return ret;
  980. }
  981. static int vop_create_crtc(struct vop *vop)
  982. {
  983. const struct vop_data *vop_data = vop->data;
  984. struct device *dev = vop->dev;
  985. struct drm_device *drm_dev = vop->drm_dev;
  986. struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
  987. struct drm_crtc *crtc = &vop->crtc;
  988. struct device_node *port;
  989. int ret;
  990. int i;
  991. /*
  992. * Create drm_plane for primary and cursor planes first, since we need
  993. * to pass them to drm_crtc_init_with_planes, which sets the
  994. * "possible_crtcs" to the newly initialized crtc.
  995. */
  996. for (i = 0; i < vop_data->win_size; i++) {
  997. struct vop_win *vop_win = &vop->win[i];
  998. const struct vop_win_data *win_data = vop_win->data;
  999. if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
  1000. win_data->type != DRM_PLANE_TYPE_CURSOR)
  1001. continue;
  1002. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1003. 0, &vop_plane_funcs,
  1004. win_data->phy->data_formats,
  1005. win_data->phy->nformats,
  1006. win_data->type, NULL);
  1007. if (ret) {
  1008. DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
  1009. ret);
  1010. goto err_cleanup_planes;
  1011. }
  1012. plane = &vop_win->base;
  1013. drm_plane_helper_add(plane, &plane_helper_funcs);
  1014. if (plane->type == DRM_PLANE_TYPE_PRIMARY)
  1015. primary = plane;
  1016. else if (plane->type == DRM_PLANE_TYPE_CURSOR)
  1017. cursor = plane;
  1018. }
  1019. ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
  1020. &vop_crtc_funcs, NULL);
  1021. if (ret)
  1022. goto err_cleanup_planes;
  1023. drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
  1024. /*
  1025. * Create drm_planes for overlay windows with possible_crtcs restricted
  1026. * to the newly created crtc.
  1027. */
  1028. for (i = 0; i < vop_data->win_size; i++) {
  1029. struct vop_win *vop_win = &vop->win[i];
  1030. const struct vop_win_data *win_data = vop_win->data;
  1031. unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
  1032. if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
  1033. continue;
  1034. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1035. possible_crtcs,
  1036. &vop_plane_funcs,
  1037. win_data->phy->data_formats,
  1038. win_data->phy->nformats,
  1039. win_data->type, NULL);
  1040. if (ret) {
  1041. DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
  1042. ret);
  1043. goto err_cleanup_crtc;
  1044. }
  1045. drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
  1046. }
  1047. port = of_get_child_by_name(dev->of_node, "port");
  1048. if (!port) {
  1049. DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
  1050. dev->of_node->full_name);
  1051. ret = -ENOENT;
  1052. goto err_cleanup_crtc;
  1053. }
  1054. drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
  1055. vop_fb_unref_worker);
  1056. init_completion(&vop->dsp_hold_completion);
  1057. init_completion(&vop->line_flag_completion);
  1058. crtc->port = port;
  1059. rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
  1060. return 0;
  1061. err_cleanup_crtc:
  1062. drm_crtc_cleanup(crtc);
  1063. err_cleanup_planes:
  1064. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1065. head)
  1066. drm_plane_cleanup(plane);
  1067. return ret;
  1068. }
  1069. static void vop_destroy_crtc(struct vop *vop)
  1070. {
  1071. struct drm_crtc *crtc = &vop->crtc;
  1072. struct drm_device *drm_dev = vop->drm_dev;
  1073. struct drm_plane *plane, *tmp;
  1074. rockchip_unregister_crtc_funcs(crtc);
  1075. of_node_put(crtc->port);
  1076. /*
  1077. * We need to cleanup the planes now. Why?
  1078. *
  1079. * The planes are "&vop->win[i].base". That means the memory is
  1080. * all part of the big "struct vop" chunk of memory. That memory
  1081. * was devm allocated and associated with this component. We need to
  1082. * free it ourselves before vop_unbind() finishes.
  1083. */
  1084. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1085. head)
  1086. vop_plane_destroy(plane);
  1087. /*
  1088. * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
  1089. * references the CRTC.
  1090. */
  1091. drm_crtc_cleanup(crtc);
  1092. drm_flip_work_cleanup(&vop->fb_unref_work);
  1093. }
  1094. static int vop_initial(struct vop *vop)
  1095. {
  1096. const struct vop_data *vop_data = vop->data;
  1097. const struct vop_reg_data *init_table = vop_data->init_table;
  1098. struct reset_control *ahb_rst;
  1099. int i, ret;
  1100. vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
  1101. if (IS_ERR(vop->hclk)) {
  1102. dev_err(vop->dev, "failed to get hclk source\n");
  1103. return PTR_ERR(vop->hclk);
  1104. }
  1105. vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
  1106. if (IS_ERR(vop->aclk)) {
  1107. dev_err(vop->dev, "failed to get aclk source\n");
  1108. return PTR_ERR(vop->aclk);
  1109. }
  1110. vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
  1111. if (IS_ERR(vop->dclk)) {
  1112. dev_err(vop->dev, "failed to get dclk source\n");
  1113. return PTR_ERR(vop->dclk);
  1114. }
  1115. ret = clk_prepare(vop->dclk);
  1116. if (ret < 0) {
  1117. dev_err(vop->dev, "failed to prepare dclk\n");
  1118. return ret;
  1119. }
  1120. /* Enable both the hclk and aclk to setup the vop */
  1121. ret = clk_prepare_enable(vop->hclk);
  1122. if (ret < 0) {
  1123. dev_err(vop->dev, "failed to prepare/enable hclk\n");
  1124. goto err_unprepare_dclk;
  1125. }
  1126. ret = clk_prepare_enable(vop->aclk);
  1127. if (ret < 0) {
  1128. dev_err(vop->dev, "failed to prepare/enable aclk\n");
  1129. goto err_disable_hclk;
  1130. }
  1131. /*
  1132. * do hclk_reset, reset all vop registers.
  1133. */
  1134. ahb_rst = devm_reset_control_get(vop->dev, "ahb");
  1135. if (IS_ERR(ahb_rst)) {
  1136. dev_err(vop->dev, "failed to get ahb reset\n");
  1137. ret = PTR_ERR(ahb_rst);
  1138. goto err_disable_aclk;
  1139. }
  1140. reset_control_assert(ahb_rst);
  1141. usleep_range(10, 20);
  1142. reset_control_deassert(ahb_rst);
  1143. memcpy(vop->regsbak, vop->regs, vop->len);
  1144. for (i = 0; i < vop_data->table_size; i++)
  1145. vop_writel(vop, init_table[i].offset, init_table[i].value);
  1146. for (i = 0; i < vop_data->win_size; i++) {
  1147. const struct vop_win_data *win = &vop_data->win[i];
  1148. VOP_WIN_SET(vop, win, enable, 0);
  1149. }
  1150. vop_cfg_done(vop);
  1151. /*
  1152. * do dclk_reset, let all config take affect.
  1153. */
  1154. vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
  1155. if (IS_ERR(vop->dclk_rst)) {
  1156. dev_err(vop->dev, "failed to get dclk reset\n");
  1157. ret = PTR_ERR(vop->dclk_rst);
  1158. goto err_disable_aclk;
  1159. }
  1160. reset_control_assert(vop->dclk_rst);
  1161. usleep_range(10, 20);
  1162. reset_control_deassert(vop->dclk_rst);
  1163. clk_disable(vop->hclk);
  1164. clk_disable(vop->aclk);
  1165. vop->is_enabled = false;
  1166. return 0;
  1167. err_disable_aclk:
  1168. clk_disable_unprepare(vop->aclk);
  1169. err_disable_hclk:
  1170. clk_disable_unprepare(vop->hclk);
  1171. err_unprepare_dclk:
  1172. clk_unprepare(vop->dclk);
  1173. return ret;
  1174. }
  1175. /*
  1176. * Initialize the vop->win array elements.
  1177. */
  1178. static void vop_win_init(struct vop *vop)
  1179. {
  1180. const struct vop_data *vop_data = vop->data;
  1181. unsigned int i;
  1182. for (i = 0; i < vop_data->win_size; i++) {
  1183. struct vop_win *vop_win = &vop->win[i];
  1184. const struct vop_win_data *win_data = &vop_data->win[i];
  1185. vop_win->data = win_data;
  1186. vop_win->vop = vop;
  1187. }
  1188. }
  1189. /**
  1190. * rockchip_drm_wait_line_flag - acqiure the give line flag event
  1191. * @crtc: CRTC to enable line flag
  1192. * @line_num: interested line number
  1193. * @mstimeout: millisecond for timeout
  1194. *
  1195. * Driver would hold here until the interested line flag interrupt have
  1196. * happened or timeout to wait.
  1197. *
  1198. * Returns:
  1199. * Zero on success, negative errno on failure.
  1200. */
  1201. int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
  1202. unsigned int mstimeout)
  1203. {
  1204. struct vop *vop = to_vop(crtc);
  1205. unsigned long jiffies_left;
  1206. if (!crtc || !vop->is_enabled)
  1207. return -ENODEV;
  1208. if (line_num > crtc->mode.vtotal || mstimeout <= 0)
  1209. return -EINVAL;
  1210. if (vop_line_flag_irq_is_enabled(vop))
  1211. return -EBUSY;
  1212. reinit_completion(&vop->line_flag_completion);
  1213. vop_line_flag_irq_enable(vop, line_num);
  1214. jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
  1215. msecs_to_jiffies(mstimeout));
  1216. vop_line_flag_irq_disable(vop);
  1217. if (jiffies_left == 0) {
  1218. dev_err(vop->dev, "Timeout waiting for IRQ\n");
  1219. return -ETIMEDOUT;
  1220. }
  1221. return 0;
  1222. }
  1223. EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
  1224. static int vop_bind(struct device *dev, struct device *master, void *data)
  1225. {
  1226. struct platform_device *pdev = to_platform_device(dev);
  1227. const struct vop_data *vop_data;
  1228. struct drm_device *drm_dev = data;
  1229. struct vop *vop;
  1230. struct resource *res;
  1231. size_t alloc_size;
  1232. int ret, irq;
  1233. vop_data = of_device_get_match_data(dev);
  1234. if (!vop_data)
  1235. return -ENODEV;
  1236. /* Allocate vop struct and its vop_win array */
  1237. alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
  1238. vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
  1239. if (!vop)
  1240. return -ENOMEM;
  1241. vop->dev = dev;
  1242. vop->data = vop_data;
  1243. vop->drm_dev = drm_dev;
  1244. dev_set_drvdata(dev, vop);
  1245. vop_win_init(vop);
  1246. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1247. vop->len = resource_size(res);
  1248. vop->regs = devm_ioremap_resource(dev, res);
  1249. if (IS_ERR(vop->regs))
  1250. return PTR_ERR(vop->regs);
  1251. vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
  1252. if (!vop->regsbak)
  1253. return -ENOMEM;
  1254. ret = vop_initial(vop);
  1255. if (ret < 0) {
  1256. dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
  1257. return ret;
  1258. }
  1259. irq = platform_get_irq(pdev, 0);
  1260. if (irq < 0) {
  1261. dev_err(dev, "cannot find irq for vop\n");
  1262. return irq;
  1263. }
  1264. vop->irq = (unsigned int)irq;
  1265. spin_lock_init(&vop->reg_lock);
  1266. spin_lock_init(&vop->irq_lock);
  1267. mutex_init(&vop->vsync_mutex);
  1268. ret = devm_request_irq(dev, vop->irq, vop_isr,
  1269. IRQF_SHARED, dev_name(dev), vop);
  1270. if (ret)
  1271. return ret;
  1272. /* IRQ is initially disabled; it gets enabled in power_on */
  1273. disable_irq(vop->irq);
  1274. ret = vop_create_crtc(vop);
  1275. if (ret)
  1276. goto err_enable_irq;
  1277. pm_runtime_enable(&pdev->dev);
  1278. return 0;
  1279. err_enable_irq:
  1280. enable_irq(vop->irq); /* To balance out the disable_irq above */
  1281. return ret;
  1282. }
  1283. static void vop_unbind(struct device *dev, struct device *master, void *data)
  1284. {
  1285. struct vop *vop = dev_get_drvdata(dev);
  1286. pm_runtime_disable(dev);
  1287. vop_destroy_crtc(vop);
  1288. }
  1289. const struct component_ops vop_component_ops = {
  1290. .bind = vop_bind,
  1291. .unbind = vop_unbind,
  1292. };
  1293. EXPORT_SYMBOL_GPL(vop_component_ops);