rockchip_drm_vop.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_crtc.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_plane_helper.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/clk.h>
  24. #include <linux/of.h>
  25. #include <linux/of_device.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/component.h>
  28. #include <linux/reset.h>
  29. #include <linux/delay.h>
  30. #include "rockchip_drm_drv.h"
  31. #include "rockchip_drm_gem.h"
  32. #include "rockchip_drm_fb.h"
  33. #include "rockchip_drm_vop.h"
  34. #define __REG_SET_RELAXED(x, off, mask, shift, v) \
  35. vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
  36. #define __REG_SET_NORMAL(x, off, mask, shift, v) \
  37. vop_mask_write(x, off, (mask) << shift, (v) << shift)
  38. #define REG_SET(x, base, reg, v, mode) \
  39. __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
  40. #define REG_SET_MASK(x, base, reg, mask, v, mode) \
  41. __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
  42. #define VOP_WIN_SET(x, win, name, v) \
  43. REG_SET(x, win->base, win->phy->name, v, RELAXED)
  44. #define VOP_SCL_SET(x, win, name, v) \
  45. REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
  46. #define VOP_SCL_SET_EXT(x, win, name, v) \
  47. REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
  48. #define VOP_CTRL_SET(x, name, v) \
  49. REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
  50. #define VOP_INTR_GET(vop, name) \
  51. vop_read_reg(vop, 0, &vop->data->ctrl->name)
  52. #define VOP_INTR_SET(vop, name, mask, v) \
  53. REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
  54. #define VOP_INTR_SET_TYPE(vop, name, type, v) \
  55. do { \
  56. int i, reg = 0, mask = 0; \
  57. for (i = 0; i < vop->data->intr->nintrs; i++) { \
  58. if (vop->data->intr->intrs[i] & type) { \
  59. reg |= (v) << i; \
  60. mask |= 1 << i; \
  61. } \
  62. } \
  63. VOP_INTR_SET(vop, name, mask, reg); \
  64. } while (0)
  65. #define VOP_INTR_GET_TYPE(vop, name, type) \
  66. vop_get_intr_type(vop, &vop->data->intr->name, type)
  67. #define VOP_WIN_GET(x, win, name) \
  68. vop_read_reg(x, win->base, &win->phy->name)
  69. #define VOP_WIN_GET_YRGBADDR(vop, win) \
  70. vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
  71. #define to_vop(x) container_of(x, struct vop, crtc)
  72. #define to_vop_win(x) container_of(x, struct vop_win, base)
  73. #define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
  74. struct vop_plane_state {
  75. struct drm_plane_state base;
  76. int format;
  77. struct drm_rect src;
  78. struct drm_rect dest;
  79. dma_addr_t yrgb_mst;
  80. bool enable;
  81. };
  82. struct vop_win {
  83. struct drm_plane base;
  84. const struct vop_win_data *data;
  85. struct vop *vop;
  86. struct vop_plane_state state;
  87. };
  88. struct vop {
  89. struct drm_crtc crtc;
  90. struct device *dev;
  91. struct drm_device *drm_dev;
  92. bool is_enabled;
  93. /* mutex vsync_ work */
  94. struct mutex vsync_mutex;
  95. bool vsync_work_pending;
  96. struct completion dsp_hold_completion;
  97. struct completion wait_update_complete;
  98. struct drm_pending_vblank_event *event;
  99. const struct vop_data *data;
  100. uint32_t *regsbak;
  101. void __iomem *regs;
  102. /* physical map length of vop register */
  103. uint32_t len;
  104. /* one time only one process allowed to config the register */
  105. spinlock_t reg_lock;
  106. /* lock vop irq reg */
  107. spinlock_t irq_lock;
  108. unsigned int irq;
  109. /* vop AHP clk */
  110. struct clk *hclk;
  111. /* vop dclk */
  112. struct clk *dclk;
  113. /* vop share memory frequency */
  114. struct clk *aclk;
  115. /* vop dclk reset */
  116. struct reset_control *dclk_rst;
  117. struct vop_win win[];
  118. };
  119. static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
  120. {
  121. writel(v, vop->regs + offset);
  122. vop->regsbak[offset >> 2] = v;
  123. }
  124. static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
  125. {
  126. return readl(vop->regs + offset);
  127. }
  128. static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
  129. const struct vop_reg *reg)
  130. {
  131. return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
  132. }
  133. static inline void vop_mask_write(struct vop *vop, uint32_t offset,
  134. uint32_t mask, uint32_t v)
  135. {
  136. if (mask) {
  137. uint32_t cached_val = vop->regsbak[offset >> 2];
  138. cached_val = (cached_val & ~mask) | v;
  139. writel(cached_val, vop->regs + offset);
  140. vop->regsbak[offset >> 2] = cached_val;
  141. }
  142. }
  143. static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
  144. uint32_t mask, uint32_t v)
  145. {
  146. if (mask) {
  147. uint32_t cached_val = vop->regsbak[offset >> 2];
  148. cached_val = (cached_val & ~mask) | v;
  149. writel_relaxed(cached_val, vop->regs + offset);
  150. vop->regsbak[offset >> 2] = cached_val;
  151. }
  152. }
  153. static inline uint32_t vop_get_intr_type(struct vop *vop,
  154. const struct vop_reg *reg, int type)
  155. {
  156. uint32_t i, ret = 0;
  157. uint32_t regs = vop_read_reg(vop, 0, reg);
  158. for (i = 0; i < vop->data->intr->nintrs; i++) {
  159. if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
  160. ret |= vop->data->intr->intrs[i];
  161. }
  162. return ret;
  163. }
  164. static inline void vop_cfg_done(struct vop *vop)
  165. {
  166. VOP_CTRL_SET(vop, cfg_done, 1);
  167. }
  168. static bool has_rb_swapped(uint32_t format)
  169. {
  170. switch (format) {
  171. case DRM_FORMAT_XBGR8888:
  172. case DRM_FORMAT_ABGR8888:
  173. case DRM_FORMAT_BGR888:
  174. case DRM_FORMAT_BGR565:
  175. return true;
  176. default:
  177. return false;
  178. }
  179. }
  180. static enum vop_data_format vop_convert_format(uint32_t format)
  181. {
  182. switch (format) {
  183. case DRM_FORMAT_XRGB8888:
  184. case DRM_FORMAT_ARGB8888:
  185. case DRM_FORMAT_XBGR8888:
  186. case DRM_FORMAT_ABGR8888:
  187. return VOP_FMT_ARGB8888;
  188. case DRM_FORMAT_RGB888:
  189. case DRM_FORMAT_BGR888:
  190. return VOP_FMT_RGB888;
  191. case DRM_FORMAT_RGB565:
  192. case DRM_FORMAT_BGR565:
  193. return VOP_FMT_RGB565;
  194. case DRM_FORMAT_NV12:
  195. return VOP_FMT_YUV420SP;
  196. case DRM_FORMAT_NV16:
  197. return VOP_FMT_YUV422SP;
  198. case DRM_FORMAT_NV24:
  199. return VOP_FMT_YUV444SP;
  200. default:
  201. DRM_ERROR("unsupport format[%08x]\n", format);
  202. return -EINVAL;
  203. }
  204. }
  205. static bool is_yuv_support(uint32_t format)
  206. {
  207. switch (format) {
  208. case DRM_FORMAT_NV12:
  209. case DRM_FORMAT_NV16:
  210. case DRM_FORMAT_NV24:
  211. return true;
  212. default:
  213. return false;
  214. }
  215. }
  216. static bool is_alpha_support(uint32_t format)
  217. {
  218. switch (format) {
  219. case DRM_FORMAT_ARGB8888:
  220. case DRM_FORMAT_ABGR8888:
  221. return true;
  222. default:
  223. return false;
  224. }
  225. }
  226. static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
  227. uint32_t dst, bool is_horizontal,
  228. int vsu_mode, int *vskiplines)
  229. {
  230. uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
  231. if (is_horizontal) {
  232. if (mode == SCALE_UP)
  233. val = GET_SCL_FT_BIC(src, dst);
  234. else if (mode == SCALE_DOWN)
  235. val = GET_SCL_FT_BILI_DN(src, dst);
  236. } else {
  237. if (mode == SCALE_UP) {
  238. if (vsu_mode == SCALE_UP_BIL)
  239. val = GET_SCL_FT_BILI_UP(src, dst);
  240. else
  241. val = GET_SCL_FT_BIC(src, dst);
  242. } else if (mode == SCALE_DOWN) {
  243. if (vskiplines) {
  244. *vskiplines = scl_get_vskiplines(src, dst);
  245. val = scl_get_bili_dn_vskip(src, dst,
  246. *vskiplines);
  247. } else {
  248. val = GET_SCL_FT_BILI_DN(src, dst);
  249. }
  250. }
  251. }
  252. return val;
  253. }
  254. static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
  255. uint32_t src_w, uint32_t src_h, uint32_t dst_w,
  256. uint32_t dst_h, uint32_t pixel_format)
  257. {
  258. uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
  259. uint16_t cbcr_hor_scl_mode = SCALE_NONE;
  260. uint16_t cbcr_ver_scl_mode = SCALE_NONE;
  261. int hsub = drm_format_horz_chroma_subsampling(pixel_format);
  262. int vsub = drm_format_vert_chroma_subsampling(pixel_format);
  263. bool is_yuv = is_yuv_support(pixel_format);
  264. uint16_t cbcr_src_w = src_w / hsub;
  265. uint16_t cbcr_src_h = src_h / vsub;
  266. uint16_t vsu_mode;
  267. uint16_t lb_mode;
  268. uint32_t val;
  269. int vskiplines = 0;
  270. if (dst_w > 3840) {
  271. DRM_ERROR("Maximum destination width (3840) exceeded\n");
  272. return;
  273. }
  274. if (!win->phy->scl->ext) {
  275. VOP_SCL_SET(vop, win, scale_yrgb_x,
  276. scl_cal_scale2(src_w, dst_w));
  277. VOP_SCL_SET(vop, win, scale_yrgb_y,
  278. scl_cal_scale2(src_h, dst_h));
  279. if (is_yuv) {
  280. VOP_SCL_SET(vop, win, scale_cbcr_x,
  281. scl_cal_scale2(src_w, dst_w));
  282. VOP_SCL_SET(vop, win, scale_cbcr_y,
  283. scl_cal_scale2(src_h, dst_h));
  284. }
  285. return;
  286. }
  287. yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
  288. yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
  289. if (is_yuv) {
  290. cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
  291. cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
  292. if (cbcr_hor_scl_mode == SCALE_DOWN)
  293. lb_mode = scl_vop_cal_lb_mode(dst_w, true);
  294. else
  295. lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
  296. } else {
  297. if (yrgb_hor_scl_mode == SCALE_DOWN)
  298. lb_mode = scl_vop_cal_lb_mode(dst_w, false);
  299. else
  300. lb_mode = scl_vop_cal_lb_mode(src_w, false);
  301. }
  302. VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
  303. if (lb_mode == LB_RGB_3840X2) {
  304. if (yrgb_ver_scl_mode != SCALE_NONE) {
  305. DRM_ERROR("ERROR : not allow yrgb ver scale\n");
  306. return;
  307. }
  308. if (cbcr_ver_scl_mode != SCALE_NONE) {
  309. DRM_ERROR("ERROR : not allow cbcr ver scale\n");
  310. return;
  311. }
  312. vsu_mode = SCALE_UP_BIL;
  313. } else if (lb_mode == LB_RGB_2560X4) {
  314. vsu_mode = SCALE_UP_BIL;
  315. } else {
  316. vsu_mode = SCALE_UP_BIC;
  317. }
  318. val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
  319. true, 0, NULL);
  320. VOP_SCL_SET(vop, win, scale_yrgb_x, val);
  321. val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
  322. false, vsu_mode, &vskiplines);
  323. VOP_SCL_SET(vop, win, scale_yrgb_y, val);
  324. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
  325. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
  326. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
  327. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
  328. VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
  329. VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
  330. VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
  331. if (is_yuv) {
  332. val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
  333. dst_w, true, 0, NULL);
  334. VOP_SCL_SET(vop, win, scale_cbcr_x, val);
  335. val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
  336. dst_h, false, vsu_mode, &vskiplines);
  337. VOP_SCL_SET(vop, win, scale_cbcr_y, val);
  338. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
  339. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
  340. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
  341. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
  342. VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
  343. VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
  344. VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
  345. }
  346. }
  347. static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
  348. {
  349. unsigned long flags;
  350. if (WARN_ON(!vop->is_enabled))
  351. return;
  352. spin_lock_irqsave(&vop->irq_lock, flags);
  353. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
  354. spin_unlock_irqrestore(&vop->irq_lock, flags);
  355. }
  356. static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
  357. {
  358. unsigned long flags;
  359. if (WARN_ON(!vop->is_enabled))
  360. return;
  361. spin_lock_irqsave(&vop->irq_lock, flags);
  362. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
  363. spin_unlock_irqrestore(&vop->irq_lock, flags);
  364. }
  365. static void vop_enable(struct drm_crtc *crtc)
  366. {
  367. struct vop *vop = to_vop(crtc);
  368. int ret;
  369. if (vop->is_enabled)
  370. return;
  371. ret = pm_runtime_get_sync(vop->dev);
  372. if (ret < 0) {
  373. dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
  374. return;
  375. }
  376. ret = clk_enable(vop->hclk);
  377. if (ret < 0) {
  378. dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
  379. return;
  380. }
  381. ret = clk_enable(vop->dclk);
  382. if (ret < 0) {
  383. dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
  384. goto err_disable_hclk;
  385. }
  386. ret = clk_enable(vop->aclk);
  387. if (ret < 0) {
  388. dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
  389. goto err_disable_dclk;
  390. }
  391. /*
  392. * Slave iommu shares power, irq and clock with vop. It was associated
  393. * automatically with this master device via common driver code.
  394. * Now that we have enabled the clock we attach it to the shared drm
  395. * mapping.
  396. */
  397. ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
  398. if (ret) {
  399. dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
  400. goto err_disable_aclk;
  401. }
  402. memcpy(vop->regs, vop->regsbak, vop->len);
  403. /*
  404. * At here, vop clock & iommu is enable, R/W vop regs would be safe.
  405. */
  406. vop->is_enabled = true;
  407. spin_lock(&vop->reg_lock);
  408. VOP_CTRL_SET(vop, standby, 0);
  409. spin_unlock(&vop->reg_lock);
  410. enable_irq(vop->irq);
  411. drm_crtc_vblank_on(crtc);
  412. return;
  413. err_disable_aclk:
  414. clk_disable(vop->aclk);
  415. err_disable_dclk:
  416. clk_disable(vop->dclk);
  417. err_disable_hclk:
  418. clk_disable(vop->hclk);
  419. }
  420. static void vop_crtc_disable(struct drm_crtc *crtc)
  421. {
  422. struct vop *vop = to_vop(crtc);
  423. int i;
  424. if (!vop->is_enabled)
  425. return;
  426. /*
  427. * We need to make sure that all windows are disabled before we
  428. * disable that crtc. Otherwise we might try to scan from a destroyed
  429. * buffer later.
  430. */
  431. for (i = 0; i < vop->data->win_size; i++) {
  432. struct vop_win *vop_win = &vop->win[i];
  433. const struct vop_win_data *win = vop_win->data;
  434. spin_lock(&vop->reg_lock);
  435. VOP_WIN_SET(vop, win, enable, 0);
  436. spin_unlock(&vop->reg_lock);
  437. }
  438. drm_crtc_vblank_off(crtc);
  439. /*
  440. * Vop standby will take effect at end of current frame,
  441. * if dsp hold valid irq happen, it means standby complete.
  442. *
  443. * we must wait standby complete when we want to disable aclk,
  444. * if not, memory bus maybe dead.
  445. */
  446. reinit_completion(&vop->dsp_hold_completion);
  447. vop_dsp_hold_valid_irq_enable(vop);
  448. spin_lock(&vop->reg_lock);
  449. VOP_CTRL_SET(vop, standby, 1);
  450. spin_unlock(&vop->reg_lock);
  451. wait_for_completion(&vop->dsp_hold_completion);
  452. vop_dsp_hold_valid_irq_disable(vop);
  453. disable_irq(vop->irq);
  454. vop->is_enabled = false;
  455. /*
  456. * vop standby complete, so iommu detach is safe.
  457. */
  458. rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
  459. clk_disable(vop->dclk);
  460. clk_disable(vop->aclk);
  461. clk_disable(vop->hclk);
  462. pm_runtime_put(vop->dev);
  463. }
  464. static void vop_plane_destroy(struct drm_plane *plane)
  465. {
  466. drm_plane_cleanup(plane);
  467. }
  468. static int vop_plane_prepare_fb(struct drm_plane *plane,
  469. const struct drm_plane_state *new_state)
  470. {
  471. if (plane->state->fb)
  472. drm_framebuffer_reference(plane->state->fb);
  473. return 0;
  474. }
  475. static void vop_plane_cleanup_fb(struct drm_plane *plane,
  476. const struct drm_plane_state *old_state)
  477. {
  478. if (old_state->fb)
  479. drm_framebuffer_unreference(old_state->fb);
  480. }
  481. static int vop_plane_atomic_check(struct drm_plane *plane,
  482. struct drm_plane_state *state)
  483. {
  484. struct drm_crtc *crtc = state->crtc;
  485. struct drm_crtc_state *crtc_state;
  486. struct drm_framebuffer *fb = state->fb;
  487. struct vop_win *vop_win = to_vop_win(plane);
  488. struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
  489. const struct vop_win_data *win = vop_win->data;
  490. bool visible;
  491. int ret;
  492. struct drm_rect *dest = &vop_plane_state->dest;
  493. struct drm_rect *src = &vop_plane_state->src;
  494. struct drm_rect clip;
  495. int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
  496. DRM_PLANE_HELPER_NO_SCALING;
  497. int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
  498. DRM_PLANE_HELPER_NO_SCALING;
  499. if (!crtc || !fb)
  500. goto out_disable;
  501. crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
  502. if (WARN_ON(!crtc_state))
  503. return -EINVAL;
  504. src->x1 = state->src_x;
  505. src->y1 = state->src_y;
  506. src->x2 = state->src_x + state->src_w;
  507. src->y2 = state->src_y + state->src_h;
  508. dest->x1 = state->crtc_x;
  509. dest->y1 = state->crtc_y;
  510. dest->x2 = state->crtc_x + state->crtc_w;
  511. dest->y2 = state->crtc_y + state->crtc_h;
  512. clip.x1 = 0;
  513. clip.y1 = 0;
  514. clip.x2 = crtc_state->adjusted_mode.hdisplay;
  515. clip.y2 = crtc_state->adjusted_mode.vdisplay;
  516. ret = drm_plane_helper_check_update(plane, crtc, state->fb,
  517. src, dest, &clip,
  518. min_scale,
  519. max_scale,
  520. true, true, &visible);
  521. if (ret)
  522. return ret;
  523. if (!visible)
  524. goto out_disable;
  525. vop_plane_state->format = vop_convert_format(fb->pixel_format);
  526. if (vop_plane_state->format < 0)
  527. return vop_plane_state->format;
  528. /*
  529. * Src.x1 can be odd when do clip, but yuv plane start point
  530. * need align with 2 pixel.
  531. */
  532. if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2))
  533. return -EINVAL;
  534. vop_plane_state->enable = true;
  535. return 0;
  536. out_disable:
  537. vop_plane_state->enable = false;
  538. return 0;
  539. }
  540. static void vop_plane_atomic_disable(struct drm_plane *plane,
  541. struct drm_plane_state *old_state)
  542. {
  543. struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
  544. struct vop_win *vop_win = to_vop_win(plane);
  545. const struct vop_win_data *win = vop_win->data;
  546. struct vop *vop = to_vop(old_state->crtc);
  547. if (!old_state->crtc)
  548. return;
  549. spin_lock(&vop->reg_lock);
  550. VOP_WIN_SET(vop, win, enable, 0);
  551. spin_unlock(&vop->reg_lock);
  552. vop_plane_state->enable = false;
  553. }
  554. static void vop_plane_atomic_update(struct drm_plane *plane,
  555. struct drm_plane_state *old_state)
  556. {
  557. struct drm_plane_state *state = plane->state;
  558. struct drm_crtc *crtc = state->crtc;
  559. struct vop_win *vop_win = to_vop_win(plane);
  560. struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
  561. const struct vop_win_data *win = vop_win->data;
  562. struct vop *vop = to_vop(state->crtc);
  563. struct drm_framebuffer *fb = state->fb;
  564. unsigned int actual_w, actual_h;
  565. unsigned int dsp_stx, dsp_sty;
  566. uint32_t act_info, dsp_info, dsp_st;
  567. struct drm_rect *src = &vop_plane_state->src;
  568. struct drm_rect *dest = &vop_plane_state->dest;
  569. struct drm_gem_object *obj, *uv_obj;
  570. struct rockchip_gem_object *rk_obj, *rk_uv_obj;
  571. unsigned long offset;
  572. dma_addr_t dma_addr;
  573. uint32_t val;
  574. bool rb_swap;
  575. /*
  576. * can't update plane when vop is disabled.
  577. */
  578. if (!crtc)
  579. return;
  580. if (WARN_ON(!vop->is_enabled))
  581. return;
  582. if (!vop_plane_state->enable) {
  583. vop_plane_atomic_disable(plane, old_state);
  584. return;
  585. }
  586. obj = rockchip_fb_get_gem_obj(fb, 0);
  587. rk_obj = to_rockchip_obj(obj);
  588. actual_w = drm_rect_width(src) >> 16;
  589. actual_h = drm_rect_height(src) >> 16;
  590. act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
  591. dsp_info = (drm_rect_height(dest) - 1) << 16;
  592. dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
  593. dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
  594. dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
  595. dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
  596. offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
  597. offset += (src->y1 >> 16) * fb->pitches[0];
  598. vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
  599. spin_lock(&vop->reg_lock);
  600. VOP_WIN_SET(vop, win, format, vop_plane_state->format);
  601. VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
  602. VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
  603. if (is_yuv_support(fb->pixel_format)) {
  604. int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
  605. int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
  606. int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
  607. uv_obj = rockchip_fb_get_gem_obj(fb, 1);
  608. rk_uv_obj = to_rockchip_obj(uv_obj);
  609. offset = (src->x1 >> 16) * bpp / hsub;
  610. offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
  611. dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
  612. VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
  613. VOP_WIN_SET(vop, win, uv_mst, dma_addr);
  614. }
  615. if (win->phy->scl)
  616. scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
  617. drm_rect_width(dest), drm_rect_height(dest),
  618. fb->pixel_format);
  619. VOP_WIN_SET(vop, win, act_info, act_info);
  620. VOP_WIN_SET(vop, win, dsp_info, dsp_info);
  621. VOP_WIN_SET(vop, win, dsp_st, dsp_st);
  622. rb_swap = has_rb_swapped(fb->pixel_format);
  623. VOP_WIN_SET(vop, win, rb_swap, rb_swap);
  624. if (is_alpha_support(fb->pixel_format)) {
  625. VOP_WIN_SET(vop, win, dst_alpha_ctl,
  626. DST_FACTOR_M0(ALPHA_SRC_INVERSE));
  627. val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
  628. SRC_ALPHA_M0(ALPHA_STRAIGHT) |
  629. SRC_BLEND_M0(ALPHA_PER_PIX) |
  630. SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
  631. SRC_FACTOR_M0(ALPHA_ONE);
  632. VOP_WIN_SET(vop, win, src_alpha_ctl, val);
  633. } else {
  634. VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
  635. }
  636. VOP_WIN_SET(vop, win, enable, 1);
  637. spin_unlock(&vop->reg_lock);
  638. }
  639. static const struct drm_plane_helper_funcs plane_helper_funcs = {
  640. .prepare_fb = vop_plane_prepare_fb,
  641. .cleanup_fb = vop_plane_cleanup_fb,
  642. .atomic_check = vop_plane_atomic_check,
  643. .atomic_update = vop_plane_atomic_update,
  644. .atomic_disable = vop_plane_atomic_disable,
  645. };
  646. void vop_atomic_plane_reset(struct drm_plane *plane)
  647. {
  648. struct vop_plane_state *vop_plane_state =
  649. to_vop_plane_state(plane->state);
  650. if (plane->state && plane->state->fb)
  651. drm_framebuffer_unreference(plane->state->fb);
  652. kfree(vop_plane_state);
  653. vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
  654. if (!vop_plane_state)
  655. return;
  656. plane->state = &vop_plane_state->base;
  657. plane->state->plane = plane;
  658. }
  659. struct drm_plane_state *
  660. vop_atomic_plane_duplicate_state(struct drm_plane *plane)
  661. {
  662. struct vop_plane_state *old_vop_plane_state;
  663. struct vop_plane_state *vop_plane_state;
  664. if (WARN_ON(!plane->state))
  665. return NULL;
  666. old_vop_plane_state = to_vop_plane_state(plane->state);
  667. vop_plane_state = kmemdup(old_vop_plane_state,
  668. sizeof(*vop_plane_state), GFP_KERNEL);
  669. if (!vop_plane_state)
  670. return NULL;
  671. __drm_atomic_helper_plane_duplicate_state(plane,
  672. &vop_plane_state->base);
  673. return &vop_plane_state->base;
  674. }
  675. static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
  676. struct drm_plane_state *state)
  677. {
  678. struct vop_plane_state *vop_state = to_vop_plane_state(state);
  679. __drm_atomic_helper_plane_destroy_state(state);
  680. kfree(vop_state);
  681. }
  682. static const struct drm_plane_funcs vop_plane_funcs = {
  683. .update_plane = drm_atomic_helper_update_plane,
  684. .disable_plane = drm_atomic_helper_disable_plane,
  685. .destroy = vop_plane_destroy,
  686. .reset = vop_atomic_plane_reset,
  687. .atomic_duplicate_state = vop_atomic_plane_duplicate_state,
  688. .atomic_destroy_state = vop_atomic_plane_destroy_state,
  689. };
  690. static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
  691. {
  692. struct vop *vop = to_vop(crtc);
  693. unsigned long flags;
  694. if (WARN_ON(!vop->is_enabled))
  695. return -EPERM;
  696. spin_lock_irqsave(&vop->irq_lock, flags);
  697. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
  698. spin_unlock_irqrestore(&vop->irq_lock, flags);
  699. return 0;
  700. }
  701. static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
  702. {
  703. struct vop *vop = to_vop(crtc);
  704. unsigned long flags;
  705. if (WARN_ON(!vop->is_enabled))
  706. return;
  707. spin_lock_irqsave(&vop->irq_lock, flags);
  708. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
  709. spin_unlock_irqrestore(&vop->irq_lock, flags);
  710. }
  711. static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
  712. {
  713. struct vop *vop = to_vop(crtc);
  714. reinit_completion(&vop->wait_update_complete);
  715. WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
  716. }
  717. static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
  718. struct drm_file *file_priv)
  719. {
  720. struct drm_device *drm = crtc->dev;
  721. struct vop *vop = to_vop(crtc);
  722. struct drm_pending_vblank_event *e;
  723. unsigned long flags;
  724. spin_lock_irqsave(&drm->event_lock, flags);
  725. e = vop->event;
  726. if (e && e->base.file_priv == file_priv) {
  727. vop->event = NULL;
  728. e->base.destroy(&e->base);
  729. file_priv->event_space += sizeof(e->event);
  730. }
  731. spin_unlock_irqrestore(&drm->event_lock, flags);
  732. }
  733. static const struct rockchip_crtc_funcs private_crtc_funcs = {
  734. .enable_vblank = vop_crtc_enable_vblank,
  735. .disable_vblank = vop_crtc_disable_vblank,
  736. .wait_for_update = vop_crtc_wait_for_update,
  737. .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
  738. };
  739. static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
  740. const struct drm_display_mode *mode,
  741. struct drm_display_mode *adjusted_mode)
  742. {
  743. struct vop *vop = to_vop(crtc);
  744. adjusted_mode->clock =
  745. clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
  746. return true;
  747. }
  748. static void vop_crtc_enable(struct drm_crtc *crtc)
  749. {
  750. struct vop *vop = to_vop(crtc);
  751. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
  752. struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
  753. u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
  754. u16 hdisplay = adjusted_mode->hdisplay;
  755. u16 htotal = adjusted_mode->htotal;
  756. u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
  757. u16 hact_end = hact_st + hdisplay;
  758. u16 vdisplay = adjusted_mode->vdisplay;
  759. u16 vtotal = adjusted_mode->vtotal;
  760. u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
  761. u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
  762. u16 vact_end = vact_st + vdisplay;
  763. uint32_t val;
  764. vop_enable(crtc);
  765. /*
  766. * If dclk rate is zero, mean that scanout is stop,
  767. * we don't need wait any more.
  768. */
  769. if (clk_get_rate(vop->dclk)) {
  770. /*
  771. * Rk3288 vop timing register is immediately, when configure
  772. * display timing on display time, may cause tearing.
  773. *
  774. * Vop standby will take effect at end of current frame,
  775. * if dsp hold valid irq happen, it means standby complete.
  776. *
  777. * mode set:
  778. * standby and wait complete --> |----
  779. * | display time
  780. * |----
  781. * |---> dsp hold irq
  782. * configure display timing --> |
  783. * standby exit |
  784. * | new frame start.
  785. */
  786. reinit_completion(&vop->dsp_hold_completion);
  787. vop_dsp_hold_valid_irq_enable(vop);
  788. spin_lock(&vop->reg_lock);
  789. VOP_CTRL_SET(vop, standby, 1);
  790. spin_unlock(&vop->reg_lock);
  791. wait_for_completion(&vop->dsp_hold_completion);
  792. vop_dsp_hold_valid_irq_disable(vop);
  793. }
  794. val = 0x8;
  795. val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
  796. val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
  797. VOP_CTRL_SET(vop, pin_pol, val);
  798. switch (s->output_type) {
  799. case DRM_MODE_CONNECTOR_LVDS:
  800. VOP_CTRL_SET(vop, rgb_en, 1);
  801. break;
  802. case DRM_MODE_CONNECTOR_eDP:
  803. VOP_CTRL_SET(vop, edp_en, 1);
  804. break;
  805. case DRM_MODE_CONNECTOR_HDMIA:
  806. VOP_CTRL_SET(vop, hdmi_en, 1);
  807. break;
  808. case DRM_MODE_CONNECTOR_DSI:
  809. VOP_CTRL_SET(vop, mipi_en, 1);
  810. break;
  811. default:
  812. DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
  813. }
  814. VOP_CTRL_SET(vop, out_mode, s->output_mode);
  815. VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
  816. val = hact_st << 16;
  817. val |= hact_end;
  818. VOP_CTRL_SET(vop, hact_st_end, val);
  819. VOP_CTRL_SET(vop, hpost_st_end, val);
  820. VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
  821. val = vact_st << 16;
  822. val |= vact_end;
  823. VOP_CTRL_SET(vop, vact_st_end, val);
  824. VOP_CTRL_SET(vop, vpost_st_end, val);
  825. clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
  826. VOP_CTRL_SET(vop, standby, 0);
  827. }
  828. static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
  829. struct drm_crtc_state *old_crtc_state)
  830. {
  831. struct vop *vop = to_vop(crtc);
  832. if (WARN_ON(!vop->is_enabled))
  833. return;
  834. spin_lock(&vop->reg_lock);
  835. vop_cfg_done(vop);
  836. spin_unlock(&vop->reg_lock);
  837. }
  838. static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
  839. struct drm_crtc_state *old_crtc_state)
  840. {
  841. struct vop *vop = to_vop(crtc);
  842. if (crtc->state->event) {
  843. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  844. vop->event = crtc->state->event;
  845. crtc->state->event = NULL;
  846. }
  847. }
  848. static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
  849. .enable = vop_crtc_enable,
  850. .disable = vop_crtc_disable,
  851. .mode_fixup = vop_crtc_mode_fixup,
  852. .atomic_flush = vop_crtc_atomic_flush,
  853. .atomic_begin = vop_crtc_atomic_begin,
  854. };
  855. static void vop_crtc_destroy(struct drm_crtc *crtc)
  856. {
  857. drm_crtc_cleanup(crtc);
  858. }
  859. static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  860. {
  861. struct rockchip_crtc_state *rockchip_state;
  862. rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
  863. if (!rockchip_state)
  864. return NULL;
  865. __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
  866. return &rockchip_state->base;
  867. }
  868. static void vop_crtc_destroy_state(struct drm_crtc *crtc,
  869. struct drm_crtc_state *state)
  870. {
  871. struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
  872. __drm_atomic_helper_crtc_destroy_state(&s->base);
  873. kfree(s);
  874. }
  875. static const struct drm_crtc_funcs vop_crtc_funcs = {
  876. .set_config = drm_atomic_helper_set_config,
  877. .page_flip = drm_atomic_helper_page_flip,
  878. .destroy = vop_crtc_destroy,
  879. .reset = drm_atomic_helper_crtc_reset,
  880. .atomic_duplicate_state = vop_crtc_duplicate_state,
  881. .atomic_destroy_state = vop_crtc_destroy_state,
  882. };
  883. static bool vop_win_pending_is_complete(struct vop_win *vop_win)
  884. {
  885. struct drm_plane *plane = &vop_win->base;
  886. struct vop_plane_state *state = to_vop_plane_state(plane->state);
  887. dma_addr_t yrgb_mst;
  888. if (!state->enable)
  889. return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
  890. yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
  891. return yrgb_mst == state->yrgb_mst;
  892. }
  893. static void vop_handle_vblank(struct vop *vop)
  894. {
  895. struct drm_device *drm = vop->drm_dev;
  896. struct drm_crtc *crtc = &vop->crtc;
  897. unsigned long flags;
  898. int i;
  899. for (i = 0; i < vop->data->win_size; i++) {
  900. if (!vop_win_pending_is_complete(&vop->win[i]))
  901. return;
  902. }
  903. if (vop->event) {
  904. spin_lock_irqsave(&drm->event_lock, flags);
  905. drm_crtc_send_vblank_event(crtc, vop->event);
  906. drm_crtc_vblank_put(crtc);
  907. vop->event = NULL;
  908. spin_unlock_irqrestore(&drm->event_lock, flags);
  909. }
  910. if (!completion_done(&vop->wait_update_complete))
  911. complete(&vop->wait_update_complete);
  912. }
  913. static irqreturn_t vop_isr(int irq, void *data)
  914. {
  915. struct vop *vop = data;
  916. struct drm_crtc *crtc = &vop->crtc;
  917. uint32_t active_irqs;
  918. unsigned long flags;
  919. int ret = IRQ_NONE;
  920. /*
  921. * interrupt register has interrupt status, enable and clear bits, we
  922. * must hold irq_lock to avoid a race with enable/disable_vblank().
  923. */
  924. spin_lock_irqsave(&vop->irq_lock, flags);
  925. active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
  926. /* Clear all active interrupt sources */
  927. if (active_irqs)
  928. VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
  929. spin_unlock_irqrestore(&vop->irq_lock, flags);
  930. /* This is expected for vop iommu irqs, since the irq is shared */
  931. if (!active_irqs)
  932. return IRQ_NONE;
  933. if (active_irqs & DSP_HOLD_VALID_INTR) {
  934. complete(&vop->dsp_hold_completion);
  935. active_irqs &= ~DSP_HOLD_VALID_INTR;
  936. ret = IRQ_HANDLED;
  937. }
  938. if (active_irqs & FS_INTR) {
  939. drm_crtc_handle_vblank(crtc);
  940. vop_handle_vblank(vop);
  941. active_irqs &= ~FS_INTR;
  942. ret = IRQ_HANDLED;
  943. }
  944. /* Unhandled irqs are spurious. */
  945. if (active_irqs)
  946. DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
  947. return ret;
  948. }
  949. static int vop_create_crtc(struct vop *vop)
  950. {
  951. const struct vop_data *vop_data = vop->data;
  952. struct device *dev = vop->dev;
  953. struct drm_device *drm_dev = vop->drm_dev;
  954. struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
  955. struct drm_crtc *crtc = &vop->crtc;
  956. struct device_node *port;
  957. int ret;
  958. int i;
  959. /*
  960. * Create drm_plane for primary and cursor planes first, since we need
  961. * to pass them to drm_crtc_init_with_planes, which sets the
  962. * "possible_crtcs" to the newly initialized crtc.
  963. */
  964. for (i = 0; i < vop_data->win_size; i++) {
  965. struct vop_win *vop_win = &vop->win[i];
  966. const struct vop_win_data *win_data = vop_win->data;
  967. if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
  968. win_data->type != DRM_PLANE_TYPE_CURSOR)
  969. continue;
  970. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  971. 0, &vop_plane_funcs,
  972. win_data->phy->data_formats,
  973. win_data->phy->nformats,
  974. win_data->type, NULL);
  975. if (ret) {
  976. DRM_ERROR("failed to initialize plane\n");
  977. goto err_cleanup_planes;
  978. }
  979. plane = &vop_win->base;
  980. drm_plane_helper_add(plane, &plane_helper_funcs);
  981. if (plane->type == DRM_PLANE_TYPE_PRIMARY)
  982. primary = plane;
  983. else if (plane->type == DRM_PLANE_TYPE_CURSOR)
  984. cursor = plane;
  985. }
  986. ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
  987. &vop_crtc_funcs, NULL);
  988. if (ret)
  989. goto err_cleanup_planes;
  990. drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
  991. /*
  992. * Create drm_planes for overlay windows with possible_crtcs restricted
  993. * to the newly created crtc.
  994. */
  995. for (i = 0; i < vop_data->win_size; i++) {
  996. struct vop_win *vop_win = &vop->win[i];
  997. const struct vop_win_data *win_data = vop_win->data;
  998. unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
  999. if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
  1000. continue;
  1001. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1002. possible_crtcs,
  1003. &vop_plane_funcs,
  1004. win_data->phy->data_formats,
  1005. win_data->phy->nformats,
  1006. win_data->type, NULL);
  1007. if (ret) {
  1008. DRM_ERROR("failed to initialize overlay plane\n");
  1009. goto err_cleanup_crtc;
  1010. }
  1011. drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
  1012. }
  1013. port = of_get_child_by_name(dev->of_node, "port");
  1014. if (!port) {
  1015. DRM_ERROR("no port node found in %s\n",
  1016. dev->of_node->full_name);
  1017. ret = -ENOENT;
  1018. goto err_cleanup_crtc;
  1019. }
  1020. init_completion(&vop->dsp_hold_completion);
  1021. init_completion(&vop->wait_update_complete);
  1022. crtc->port = port;
  1023. rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
  1024. return 0;
  1025. err_cleanup_crtc:
  1026. drm_crtc_cleanup(crtc);
  1027. err_cleanup_planes:
  1028. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1029. head)
  1030. drm_plane_cleanup(plane);
  1031. return ret;
  1032. }
  1033. static void vop_destroy_crtc(struct vop *vop)
  1034. {
  1035. struct drm_crtc *crtc = &vop->crtc;
  1036. struct drm_device *drm_dev = vop->drm_dev;
  1037. struct drm_plane *plane, *tmp;
  1038. rockchip_unregister_crtc_funcs(crtc);
  1039. of_node_put(crtc->port);
  1040. /*
  1041. * We need to cleanup the planes now. Why?
  1042. *
  1043. * The planes are "&vop->win[i].base". That means the memory is
  1044. * all part of the big "struct vop" chunk of memory. That memory
  1045. * was devm allocated and associated with this component. We need to
  1046. * free it ourselves before vop_unbind() finishes.
  1047. */
  1048. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1049. head)
  1050. vop_plane_destroy(plane);
  1051. /*
  1052. * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
  1053. * references the CRTC.
  1054. */
  1055. drm_crtc_cleanup(crtc);
  1056. }
  1057. static int vop_initial(struct vop *vop)
  1058. {
  1059. const struct vop_data *vop_data = vop->data;
  1060. const struct vop_reg_data *init_table = vop_data->init_table;
  1061. struct reset_control *ahb_rst;
  1062. int i, ret;
  1063. vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
  1064. if (IS_ERR(vop->hclk)) {
  1065. dev_err(vop->dev, "failed to get hclk source\n");
  1066. return PTR_ERR(vop->hclk);
  1067. }
  1068. vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
  1069. if (IS_ERR(vop->aclk)) {
  1070. dev_err(vop->dev, "failed to get aclk source\n");
  1071. return PTR_ERR(vop->aclk);
  1072. }
  1073. vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
  1074. if (IS_ERR(vop->dclk)) {
  1075. dev_err(vop->dev, "failed to get dclk source\n");
  1076. return PTR_ERR(vop->dclk);
  1077. }
  1078. ret = clk_prepare(vop->dclk);
  1079. if (ret < 0) {
  1080. dev_err(vop->dev, "failed to prepare dclk\n");
  1081. return ret;
  1082. }
  1083. /* Enable both the hclk and aclk to setup the vop */
  1084. ret = clk_prepare_enable(vop->hclk);
  1085. if (ret < 0) {
  1086. dev_err(vop->dev, "failed to prepare/enable hclk\n");
  1087. goto err_unprepare_dclk;
  1088. }
  1089. ret = clk_prepare_enable(vop->aclk);
  1090. if (ret < 0) {
  1091. dev_err(vop->dev, "failed to prepare/enable aclk\n");
  1092. goto err_disable_hclk;
  1093. }
  1094. /*
  1095. * do hclk_reset, reset all vop registers.
  1096. */
  1097. ahb_rst = devm_reset_control_get(vop->dev, "ahb");
  1098. if (IS_ERR(ahb_rst)) {
  1099. dev_err(vop->dev, "failed to get ahb reset\n");
  1100. ret = PTR_ERR(ahb_rst);
  1101. goto err_disable_aclk;
  1102. }
  1103. reset_control_assert(ahb_rst);
  1104. usleep_range(10, 20);
  1105. reset_control_deassert(ahb_rst);
  1106. memcpy(vop->regsbak, vop->regs, vop->len);
  1107. for (i = 0; i < vop_data->table_size; i++)
  1108. vop_writel(vop, init_table[i].offset, init_table[i].value);
  1109. for (i = 0; i < vop_data->win_size; i++) {
  1110. const struct vop_win_data *win = &vop_data->win[i];
  1111. VOP_WIN_SET(vop, win, enable, 0);
  1112. }
  1113. vop_cfg_done(vop);
  1114. /*
  1115. * do dclk_reset, let all config take affect.
  1116. */
  1117. vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
  1118. if (IS_ERR(vop->dclk_rst)) {
  1119. dev_err(vop->dev, "failed to get dclk reset\n");
  1120. ret = PTR_ERR(vop->dclk_rst);
  1121. goto err_disable_aclk;
  1122. }
  1123. reset_control_assert(vop->dclk_rst);
  1124. usleep_range(10, 20);
  1125. reset_control_deassert(vop->dclk_rst);
  1126. clk_disable(vop->hclk);
  1127. clk_disable(vop->aclk);
  1128. vop->is_enabled = false;
  1129. return 0;
  1130. err_disable_aclk:
  1131. clk_disable_unprepare(vop->aclk);
  1132. err_disable_hclk:
  1133. clk_disable_unprepare(vop->hclk);
  1134. err_unprepare_dclk:
  1135. clk_unprepare(vop->dclk);
  1136. return ret;
  1137. }
  1138. /*
  1139. * Initialize the vop->win array elements.
  1140. */
  1141. static void vop_win_init(struct vop *vop)
  1142. {
  1143. const struct vop_data *vop_data = vop->data;
  1144. unsigned int i;
  1145. for (i = 0; i < vop_data->win_size; i++) {
  1146. struct vop_win *vop_win = &vop->win[i];
  1147. const struct vop_win_data *win_data = &vop_data->win[i];
  1148. vop_win->data = win_data;
  1149. vop_win->vop = vop;
  1150. }
  1151. }
  1152. static int vop_bind(struct device *dev, struct device *master, void *data)
  1153. {
  1154. struct platform_device *pdev = to_platform_device(dev);
  1155. const struct vop_data *vop_data;
  1156. struct drm_device *drm_dev = data;
  1157. struct vop *vop;
  1158. struct resource *res;
  1159. size_t alloc_size;
  1160. int ret, irq;
  1161. vop_data = of_device_get_match_data(dev);
  1162. if (!vop_data)
  1163. return -ENODEV;
  1164. /* Allocate vop struct and its vop_win array */
  1165. alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
  1166. vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
  1167. if (!vop)
  1168. return -ENOMEM;
  1169. vop->dev = dev;
  1170. vop->data = vop_data;
  1171. vop->drm_dev = drm_dev;
  1172. dev_set_drvdata(dev, vop);
  1173. vop_win_init(vop);
  1174. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1175. vop->len = resource_size(res);
  1176. vop->regs = devm_ioremap_resource(dev, res);
  1177. if (IS_ERR(vop->regs))
  1178. return PTR_ERR(vop->regs);
  1179. vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
  1180. if (!vop->regsbak)
  1181. return -ENOMEM;
  1182. ret = vop_initial(vop);
  1183. if (ret < 0) {
  1184. dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
  1185. return ret;
  1186. }
  1187. irq = platform_get_irq(pdev, 0);
  1188. if (irq < 0) {
  1189. dev_err(dev, "cannot find irq for vop\n");
  1190. return irq;
  1191. }
  1192. vop->irq = (unsigned int)irq;
  1193. spin_lock_init(&vop->reg_lock);
  1194. spin_lock_init(&vop->irq_lock);
  1195. mutex_init(&vop->vsync_mutex);
  1196. ret = devm_request_irq(dev, vop->irq, vop_isr,
  1197. IRQF_SHARED, dev_name(dev), vop);
  1198. if (ret)
  1199. return ret;
  1200. /* IRQ is initially disabled; it gets enabled in power_on */
  1201. disable_irq(vop->irq);
  1202. ret = vop_create_crtc(vop);
  1203. if (ret)
  1204. return ret;
  1205. pm_runtime_enable(&pdev->dev);
  1206. return 0;
  1207. }
  1208. static void vop_unbind(struct device *dev, struct device *master, void *data)
  1209. {
  1210. struct vop *vop = dev_get_drvdata(dev);
  1211. pm_runtime_disable(dev);
  1212. vop_destroy_crtc(vop);
  1213. }
  1214. const struct component_ops vop_component_ops = {
  1215. .bind = vop_bind,
  1216. .unbind = vop_unbind,
  1217. };
  1218. EXPORT_SYMBOL_GPL(vop_component_ops);