rockchip_drm_vop.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502
  1. /*
  2. * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  3. * Author:Mark Yao <mark.yao@rock-chips.com>
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drm.h>
  15. #include <drm/drmP.h>
  16. #include <drm/drm_atomic.h>
  17. #include <drm/drm_crtc.h>
  18. #include <drm/drm_crtc_helper.h>
  19. #include <drm/drm_plane_helper.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/clk.h>
  24. #include <linux/of.h>
  25. #include <linux/of_device.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/component.h>
  28. #include <linux/reset.h>
  29. #include <linux/delay.h>
  30. #include "rockchip_drm_drv.h"
  31. #include "rockchip_drm_gem.h"
  32. #include "rockchip_drm_fb.h"
  33. #include "rockchip_drm_vop.h"
  34. #define __REG_SET_RELAXED(x, off, mask, shift, v) \
  35. vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
  36. #define __REG_SET_NORMAL(x, off, mask, shift, v) \
  37. vop_mask_write(x, off, (mask) << shift, (v) << shift)
  38. #define REG_SET(x, base, reg, v, mode) \
  39. __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
  40. #define REG_SET_MASK(x, base, reg, mask, v, mode) \
  41. __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
  42. #define VOP_WIN_SET(x, win, name, v) \
  43. REG_SET(x, win->base, win->phy->name, v, RELAXED)
  44. #define VOP_SCL_SET(x, win, name, v) \
  45. REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
  46. #define VOP_SCL_SET_EXT(x, win, name, v) \
  47. REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
  48. #define VOP_CTRL_SET(x, name, v) \
  49. REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
  50. #define VOP_INTR_GET(vop, name) \
  51. vop_read_reg(vop, 0, &vop->data->ctrl->name)
  52. #define VOP_INTR_SET(vop, name, mask, v) \
  53. REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
  54. #define VOP_INTR_SET_TYPE(vop, name, type, v) \
  55. do { \
  56. int i, reg = 0, mask = 0; \
  57. for (i = 0; i < vop->data->intr->nintrs; i++) { \
  58. if (vop->data->intr->intrs[i] & type) { \
  59. reg |= (v) << i; \
  60. mask |= 1 << i; \
  61. } \
  62. } \
  63. VOP_INTR_SET(vop, name, mask, reg); \
  64. } while (0)
  65. #define VOP_INTR_GET_TYPE(vop, name, type) \
  66. vop_get_intr_type(vop, &vop->data->intr->name, type)
  67. #define VOP_WIN_GET(x, win, name) \
  68. vop_read_reg(x, win->base, &win->phy->name)
  69. #define VOP_WIN_GET_YRGBADDR(vop, win) \
  70. vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
  71. #define to_vop(x) container_of(x, struct vop, crtc)
  72. #define to_vop_win(x) container_of(x, struct vop_win, base)
  73. #define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
  74. struct vop_plane_state {
  75. struct drm_plane_state base;
  76. int format;
  77. struct drm_rect src;
  78. struct drm_rect dest;
  79. dma_addr_t yrgb_mst;
  80. bool enable;
  81. };
  82. struct vop_win {
  83. struct drm_plane base;
  84. const struct vop_win_data *data;
  85. struct vop *vop;
  86. /* protected by dev->event_lock */
  87. bool enable;
  88. dma_addr_t yrgb_mst;
  89. };
  90. struct vop {
  91. struct drm_crtc crtc;
  92. struct device *dev;
  93. struct drm_device *drm_dev;
  94. bool is_enabled;
  95. /* mutex vsync_ work */
  96. struct mutex vsync_mutex;
  97. bool vsync_work_pending;
  98. struct completion dsp_hold_completion;
  99. struct completion wait_update_complete;
  100. /* protected by dev->event_lock */
  101. struct drm_pending_vblank_event *event;
  102. const struct vop_data *data;
  103. uint32_t *regsbak;
  104. void __iomem *regs;
  105. /* physical map length of vop register */
  106. uint32_t len;
  107. /* one time only one process allowed to config the register */
  108. spinlock_t reg_lock;
  109. /* lock vop irq reg */
  110. spinlock_t irq_lock;
  111. unsigned int irq;
  112. /* vop AHP clk */
  113. struct clk *hclk;
  114. /* vop dclk */
  115. struct clk *dclk;
  116. /* vop share memory frequency */
  117. struct clk *aclk;
  118. /* vop dclk reset */
  119. struct reset_control *dclk_rst;
  120. struct vop_win win[];
  121. };
  122. static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
  123. {
  124. writel(v, vop->regs + offset);
  125. vop->regsbak[offset >> 2] = v;
  126. }
  127. static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
  128. {
  129. return readl(vop->regs + offset);
  130. }
  131. static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
  132. const struct vop_reg *reg)
  133. {
  134. return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
  135. }
  136. static inline void vop_mask_write(struct vop *vop, uint32_t offset,
  137. uint32_t mask, uint32_t v)
  138. {
  139. if (mask) {
  140. uint32_t cached_val = vop->regsbak[offset >> 2];
  141. cached_val = (cached_val & ~mask) | v;
  142. writel(cached_val, vop->regs + offset);
  143. vop->regsbak[offset >> 2] = cached_val;
  144. }
  145. }
  146. static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
  147. uint32_t mask, uint32_t v)
  148. {
  149. if (mask) {
  150. uint32_t cached_val = vop->regsbak[offset >> 2];
  151. cached_val = (cached_val & ~mask) | v;
  152. writel_relaxed(cached_val, vop->regs + offset);
  153. vop->regsbak[offset >> 2] = cached_val;
  154. }
  155. }
  156. static inline uint32_t vop_get_intr_type(struct vop *vop,
  157. const struct vop_reg *reg, int type)
  158. {
  159. uint32_t i, ret = 0;
  160. uint32_t regs = vop_read_reg(vop, 0, reg);
  161. for (i = 0; i < vop->data->intr->nintrs; i++) {
  162. if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i))
  163. ret |= vop->data->intr->intrs[i];
  164. }
  165. return ret;
  166. }
  167. static inline void vop_cfg_done(struct vop *vop)
  168. {
  169. VOP_CTRL_SET(vop, cfg_done, 1);
  170. }
  171. static bool has_rb_swapped(uint32_t format)
  172. {
  173. switch (format) {
  174. case DRM_FORMAT_XBGR8888:
  175. case DRM_FORMAT_ABGR8888:
  176. case DRM_FORMAT_BGR888:
  177. case DRM_FORMAT_BGR565:
  178. return true;
  179. default:
  180. return false;
  181. }
  182. }
  183. static enum vop_data_format vop_convert_format(uint32_t format)
  184. {
  185. switch (format) {
  186. case DRM_FORMAT_XRGB8888:
  187. case DRM_FORMAT_ARGB8888:
  188. case DRM_FORMAT_XBGR8888:
  189. case DRM_FORMAT_ABGR8888:
  190. return VOP_FMT_ARGB8888;
  191. case DRM_FORMAT_RGB888:
  192. case DRM_FORMAT_BGR888:
  193. return VOP_FMT_RGB888;
  194. case DRM_FORMAT_RGB565:
  195. case DRM_FORMAT_BGR565:
  196. return VOP_FMT_RGB565;
  197. case DRM_FORMAT_NV12:
  198. return VOP_FMT_YUV420SP;
  199. case DRM_FORMAT_NV16:
  200. return VOP_FMT_YUV422SP;
  201. case DRM_FORMAT_NV24:
  202. return VOP_FMT_YUV444SP;
  203. default:
  204. DRM_ERROR("unsupport format[%08x]\n", format);
  205. return -EINVAL;
  206. }
  207. }
  208. static bool is_yuv_support(uint32_t format)
  209. {
  210. switch (format) {
  211. case DRM_FORMAT_NV12:
  212. case DRM_FORMAT_NV16:
  213. case DRM_FORMAT_NV24:
  214. return true;
  215. default:
  216. return false;
  217. }
  218. }
  219. static bool is_alpha_support(uint32_t format)
  220. {
  221. switch (format) {
  222. case DRM_FORMAT_ARGB8888:
  223. case DRM_FORMAT_ABGR8888:
  224. return true;
  225. default:
  226. return false;
  227. }
  228. }
  229. static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
  230. uint32_t dst, bool is_horizontal,
  231. int vsu_mode, int *vskiplines)
  232. {
  233. uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
  234. if (is_horizontal) {
  235. if (mode == SCALE_UP)
  236. val = GET_SCL_FT_BIC(src, dst);
  237. else if (mode == SCALE_DOWN)
  238. val = GET_SCL_FT_BILI_DN(src, dst);
  239. } else {
  240. if (mode == SCALE_UP) {
  241. if (vsu_mode == SCALE_UP_BIL)
  242. val = GET_SCL_FT_BILI_UP(src, dst);
  243. else
  244. val = GET_SCL_FT_BIC(src, dst);
  245. } else if (mode == SCALE_DOWN) {
  246. if (vskiplines) {
  247. *vskiplines = scl_get_vskiplines(src, dst);
  248. val = scl_get_bili_dn_vskip(src, dst,
  249. *vskiplines);
  250. } else {
  251. val = GET_SCL_FT_BILI_DN(src, dst);
  252. }
  253. }
  254. }
  255. return val;
  256. }
  257. static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
  258. uint32_t src_w, uint32_t src_h, uint32_t dst_w,
  259. uint32_t dst_h, uint32_t pixel_format)
  260. {
  261. uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
  262. uint16_t cbcr_hor_scl_mode = SCALE_NONE;
  263. uint16_t cbcr_ver_scl_mode = SCALE_NONE;
  264. int hsub = drm_format_horz_chroma_subsampling(pixel_format);
  265. int vsub = drm_format_vert_chroma_subsampling(pixel_format);
  266. bool is_yuv = is_yuv_support(pixel_format);
  267. uint16_t cbcr_src_w = src_w / hsub;
  268. uint16_t cbcr_src_h = src_h / vsub;
  269. uint16_t vsu_mode;
  270. uint16_t lb_mode;
  271. uint32_t val;
  272. int vskiplines = 0;
  273. if (dst_w > 3840) {
  274. DRM_ERROR("Maximum destination width (3840) exceeded\n");
  275. return;
  276. }
  277. if (!win->phy->scl->ext) {
  278. VOP_SCL_SET(vop, win, scale_yrgb_x,
  279. scl_cal_scale2(src_w, dst_w));
  280. VOP_SCL_SET(vop, win, scale_yrgb_y,
  281. scl_cal_scale2(src_h, dst_h));
  282. if (is_yuv) {
  283. VOP_SCL_SET(vop, win, scale_cbcr_x,
  284. scl_cal_scale2(cbcr_src_w, dst_w));
  285. VOP_SCL_SET(vop, win, scale_cbcr_y,
  286. scl_cal_scale2(cbcr_src_h, dst_h));
  287. }
  288. return;
  289. }
  290. yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
  291. yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
  292. if (is_yuv) {
  293. cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
  294. cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
  295. if (cbcr_hor_scl_mode == SCALE_DOWN)
  296. lb_mode = scl_vop_cal_lb_mode(dst_w, true);
  297. else
  298. lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
  299. } else {
  300. if (yrgb_hor_scl_mode == SCALE_DOWN)
  301. lb_mode = scl_vop_cal_lb_mode(dst_w, false);
  302. else
  303. lb_mode = scl_vop_cal_lb_mode(src_w, false);
  304. }
  305. VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
  306. if (lb_mode == LB_RGB_3840X2) {
  307. if (yrgb_ver_scl_mode != SCALE_NONE) {
  308. DRM_ERROR("ERROR : not allow yrgb ver scale\n");
  309. return;
  310. }
  311. if (cbcr_ver_scl_mode != SCALE_NONE) {
  312. DRM_ERROR("ERROR : not allow cbcr ver scale\n");
  313. return;
  314. }
  315. vsu_mode = SCALE_UP_BIL;
  316. } else if (lb_mode == LB_RGB_2560X4) {
  317. vsu_mode = SCALE_UP_BIL;
  318. } else {
  319. vsu_mode = SCALE_UP_BIC;
  320. }
  321. val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
  322. true, 0, NULL);
  323. VOP_SCL_SET(vop, win, scale_yrgb_x, val);
  324. val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
  325. false, vsu_mode, &vskiplines);
  326. VOP_SCL_SET(vop, win, scale_yrgb_y, val);
  327. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4);
  328. VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2);
  329. VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
  330. VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
  331. VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
  332. VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
  333. VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode);
  334. if (is_yuv) {
  335. val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
  336. dst_w, true, 0, NULL);
  337. VOP_SCL_SET(vop, win, scale_cbcr_x, val);
  338. val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
  339. dst_h, false, vsu_mode, &vskiplines);
  340. VOP_SCL_SET(vop, win, scale_cbcr_y, val);
  341. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4);
  342. VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2);
  343. VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
  344. VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
  345. VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
  346. VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
  347. VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode);
  348. }
  349. }
  350. static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
  351. {
  352. unsigned long flags;
  353. if (WARN_ON(!vop->is_enabled))
  354. return;
  355. spin_lock_irqsave(&vop->irq_lock, flags);
  356. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
  357. spin_unlock_irqrestore(&vop->irq_lock, flags);
  358. }
  359. static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
  360. {
  361. unsigned long flags;
  362. if (WARN_ON(!vop->is_enabled))
  363. return;
  364. spin_lock_irqsave(&vop->irq_lock, flags);
  365. VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0);
  366. spin_unlock_irqrestore(&vop->irq_lock, flags);
  367. }
  368. static void vop_enable(struct drm_crtc *crtc)
  369. {
  370. struct vop *vop = to_vop(crtc);
  371. int ret;
  372. ret = pm_runtime_get_sync(vop->dev);
  373. if (ret < 0) {
  374. dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
  375. return;
  376. }
  377. ret = clk_enable(vop->hclk);
  378. if (ret < 0) {
  379. dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
  380. return;
  381. }
  382. ret = clk_enable(vop->dclk);
  383. if (ret < 0) {
  384. dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
  385. goto err_disable_hclk;
  386. }
  387. ret = clk_enable(vop->aclk);
  388. if (ret < 0) {
  389. dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
  390. goto err_disable_dclk;
  391. }
  392. /*
  393. * Slave iommu shares power, irq and clock with vop. It was associated
  394. * automatically with this master device via common driver code.
  395. * Now that we have enabled the clock we attach it to the shared drm
  396. * mapping.
  397. */
  398. ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
  399. if (ret) {
  400. dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
  401. goto err_disable_aclk;
  402. }
  403. memcpy(vop->regs, vop->regsbak, vop->len);
  404. /*
  405. * At here, vop clock & iommu is enable, R/W vop regs would be safe.
  406. */
  407. vop->is_enabled = true;
  408. spin_lock(&vop->reg_lock);
  409. VOP_CTRL_SET(vop, standby, 0);
  410. spin_unlock(&vop->reg_lock);
  411. enable_irq(vop->irq);
  412. drm_crtc_vblank_on(crtc);
  413. return;
  414. err_disable_aclk:
  415. clk_disable(vop->aclk);
  416. err_disable_dclk:
  417. clk_disable(vop->dclk);
  418. err_disable_hclk:
  419. clk_disable(vop->hclk);
  420. }
  421. static void vop_crtc_disable(struct drm_crtc *crtc)
  422. {
  423. struct vop *vop = to_vop(crtc);
  424. int i;
  425. WARN_ON(vop->event);
  426. /*
  427. * We need to make sure that all windows are disabled before we
  428. * disable that crtc. Otherwise we might try to scan from a destroyed
  429. * buffer later.
  430. */
  431. for (i = 0; i < vop->data->win_size; i++) {
  432. struct vop_win *vop_win = &vop->win[i];
  433. const struct vop_win_data *win = vop_win->data;
  434. spin_lock(&vop->reg_lock);
  435. VOP_WIN_SET(vop, win, enable, 0);
  436. spin_unlock(&vop->reg_lock);
  437. }
  438. drm_crtc_vblank_off(crtc);
  439. /*
  440. * Vop standby will take effect at end of current frame,
  441. * if dsp hold valid irq happen, it means standby complete.
  442. *
  443. * we must wait standby complete when we want to disable aclk,
  444. * if not, memory bus maybe dead.
  445. */
  446. reinit_completion(&vop->dsp_hold_completion);
  447. vop_dsp_hold_valid_irq_enable(vop);
  448. spin_lock(&vop->reg_lock);
  449. VOP_CTRL_SET(vop, standby, 1);
  450. spin_unlock(&vop->reg_lock);
  451. wait_for_completion(&vop->dsp_hold_completion);
  452. vop_dsp_hold_valid_irq_disable(vop);
  453. disable_irq(vop->irq);
  454. vop->is_enabled = false;
  455. /*
  456. * vop standby complete, so iommu detach is safe.
  457. */
  458. rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
  459. clk_disable(vop->dclk);
  460. clk_disable(vop->aclk);
  461. clk_disable(vop->hclk);
  462. pm_runtime_put(vop->dev);
  463. if (crtc->state->event && !crtc->state->active) {
  464. spin_lock_irq(&crtc->dev->event_lock);
  465. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  466. spin_unlock_irq(&crtc->dev->event_lock);
  467. crtc->state->event = NULL;
  468. }
  469. }
  470. static void vop_plane_destroy(struct drm_plane *plane)
  471. {
  472. drm_plane_cleanup(plane);
  473. }
  474. static int vop_plane_prepare_fb(struct drm_plane *plane,
  475. const struct drm_plane_state *new_state)
  476. {
  477. if (plane->state->fb)
  478. drm_framebuffer_reference(plane->state->fb);
  479. return 0;
  480. }
  481. static void vop_plane_cleanup_fb(struct drm_plane *plane,
  482. const struct drm_plane_state *old_state)
  483. {
  484. if (old_state->fb)
  485. drm_framebuffer_unreference(old_state->fb);
  486. }
  487. static int vop_plane_atomic_check(struct drm_plane *plane,
  488. struct drm_plane_state *state)
  489. {
  490. struct drm_crtc *crtc = state->crtc;
  491. struct drm_crtc_state *crtc_state;
  492. struct drm_framebuffer *fb = state->fb;
  493. struct vop_win *vop_win = to_vop_win(plane);
  494. struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
  495. const struct vop_win_data *win = vop_win->data;
  496. bool visible;
  497. int ret;
  498. struct drm_rect *dest = &vop_plane_state->dest;
  499. struct drm_rect *src = &vop_plane_state->src;
  500. struct drm_rect clip;
  501. int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
  502. DRM_PLANE_HELPER_NO_SCALING;
  503. int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
  504. DRM_PLANE_HELPER_NO_SCALING;
  505. if (!crtc || !fb)
  506. goto out_disable;
  507. crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
  508. if (WARN_ON(!crtc_state))
  509. return -EINVAL;
  510. src->x1 = state->src_x;
  511. src->y1 = state->src_y;
  512. src->x2 = state->src_x + state->src_w;
  513. src->y2 = state->src_y + state->src_h;
  514. dest->x1 = state->crtc_x;
  515. dest->y1 = state->crtc_y;
  516. dest->x2 = state->crtc_x + state->crtc_w;
  517. dest->y2 = state->crtc_y + state->crtc_h;
  518. clip.x1 = 0;
  519. clip.y1 = 0;
  520. clip.x2 = crtc_state->adjusted_mode.hdisplay;
  521. clip.y2 = crtc_state->adjusted_mode.vdisplay;
  522. ret = drm_plane_helper_check_update(plane, crtc, state->fb,
  523. src, dest, &clip,
  524. state->rotation,
  525. min_scale,
  526. max_scale,
  527. true, true, &visible);
  528. if (ret)
  529. return ret;
  530. if (!visible)
  531. goto out_disable;
  532. vop_plane_state->format = vop_convert_format(fb->pixel_format);
  533. if (vop_plane_state->format < 0)
  534. return vop_plane_state->format;
  535. /*
  536. * Src.x1 can be odd when do clip, but yuv plane start point
  537. * need align with 2 pixel.
  538. */
  539. if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2))
  540. return -EINVAL;
  541. vop_plane_state->enable = true;
  542. return 0;
  543. out_disable:
  544. vop_plane_state->enable = false;
  545. return 0;
  546. }
  547. static void vop_plane_atomic_disable(struct drm_plane *plane,
  548. struct drm_plane_state *old_state)
  549. {
  550. struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
  551. struct vop_win *vop_win = to_vop_win(plane);
  552. const struct vop_win_data *win = vop_win->data;
  553. struct vop *vop = to_vop(old_state->crtc);
  554. if (!old_state->crtc)
  555. return;
  556. spin_lock_irq(&plane->dev->event_lock);
  557. vop_win->enable = false;
  558. vop_win->yrgb_mst = 0;
  559. spin_unlock_irq(&plane->dev->event_lock);
  560. spin_lock(&vop->reg_lock);
  561. VOP_WIN_SET(vop, win, enable, 0);
  562. spin_unlock(&vop->reg_lock);
  563. vop_plane_state->enable = false;
  564. }
  565. static void vop_plane_atomic_update(struct drm_plane *plane,
  566. struct drm_plane_state *old_state)
  567. {
  568. struct drm_plane_state *state = plane->state;
  569. struct drm_crtc *crtc = state->crtc;
  570. struct vop_win *vop_win = to_vop_win(plane);
  571. struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
  572. const struct vop_win_data *win = vop_win->data;
  573. struct vop *vop = to_vop(state->crtc);
  574. struct drm_framebuffer *fb = state->fb;
  575. unsigned int actual_w, actual_h;
  576. unsigned int dsp_stx, dsp_sty;
  577. uint32_t act_info, dsp_info, dsp_st;
  578. struct drm_rect *src = &vop_plane_state->src;
  579. struct drm_rect *dest = &vop_plane_state->dest;
  580. struct drm_gem_object *obj, *uv_obj;
  581. struct rockchip_gem_object *rk_obj, *rk_uv_obj;
  582. unsigned long offset;
  583. dma_addr_t dma_addr;
  584. uint32_t val;
  585. bool rb_swap;
  586. /*
  587. * can't update plane when vop is disabled.
  588. */
  589. if (WARN_ON(!crtc))
  590. return;
  591. if (WARN_ON(!vop->is_enabled))
  592. return;
  593. if (!vop_plane_state->enable) {
  594. vop_plane_atomic_disable(plane, old_state);
  595. return;
  596. }
  597. obj = rockchip_fb_get_gem_obj(fb, 0);
  598. rk_obj = to_rockchip_obj(obj);
  599. actual_w = drm_rect_width(src) >> 16;
  600. actual_h = drm_rect_height(src) >> 16;
  601. act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
  602. dsp_info = (drm_rect_height(dest) - 1) << 16;
  603. dsp_info |= (drm_rect_width(dest) - 1) & 0xffff;
  604. dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start;
  605. dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
  606. dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff);
  607. offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
  608. offset += (src->y1 >> 16) * fb->pitches[0];
  609. vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
  610. spin_lock_irq(&plane->dev->event_lock);
  611. vop_win->enable = true;
  612. vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
  613. spin_unlock_irq(&plane->dev->event_lock);
  614. spin_lock(&vop->reg_lock);
  615. VOP_WIN_SET(vop, win, format, vop_plane_state->format);
  616. VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
  617. VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
  618. if (is_yuv_support(fb->pixel_format)) {
  619. int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
  620. int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
  621. int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
  622. uv_obj = rockchip_fb_get_gem_obj(fb, 1);
  623. rk_uv_obj = to_rockchip_obj(uv_obj);
  624. offset = (src->x1 >> 16) * bpp / hsub;
  625. offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
  626. dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
  627. VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
  628. VOP_WIN_SET(vop, win, uv_mst, dma_addr);
  629. }
  630. if (win->phy->scl)
  631. scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
  632. drm_rect_width(dest), drm_rect_height(dest),
  633. fb->pixel_format);
  634. VOP_WIN_SET(vop, win, act_info, act_info);
  635. VOP_WIN_SET(vop, win, dsp_info, dsp_info);
  636. VOP_WIN_SET(vop, win, dsp_st, dsp_st);
  637. rb_swap = has_rb_swapped(fb->pixel_format);
  638. VOP_WIN_SET(vop, win, rb_swap, rb_swap);
  639. if (is_alpha_support(fb->pixel_format)) {
  640. VOP_WIN_SET(vop, win, dst_alpha_ctl,
  641. DST_FACTOR_M0(ALPHA_SRC_INVERSE));
  642. val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
  643. SRC_ALPHA_M0(ALPHA_STRAIGHT) |
  644. SRC_BLEND_M0(ALPHA_PER_PIX) |
  645. SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
  646. SRC_FACTOR_M0(ALPHA_ONE);
  647. VOP_WIN_SET(vop, win, src_alpha_ctl, val);
  648. } else {
  649. VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
  650. }
  651. VOP_WIN_SET(vop, win, enable, 1);
  652. spin_unlock(&vop->reg_lock);
  653. }
  654. static const struct drm_plane_helper_funcs plane_helper_funcs = {
  655. .prepare_fb = vop_plane_prepare_fb,
  656. .cleanup_fb = vop_plane_cleanup_fb,
  657. .atomic_check = vop_plane_atomic_check,
  658. .atomic_update = vop_plane_atomic_update,
  659. .atomic_disable = vop_plane_atomic_disable,
  660. };
  661. static void vop_atomic_plane_reset(struct drm_plane *plane)
  662. {
  663. struct vop_plane_state *vop_plane_state =
  664. to_vop_plane_state(plane->state);
  665. if (plane->state && plane->state->fb)
  666. drm_framebuffer_unreference(plane->state->fb);
  667. kfree(vop_plane_state);
  668. vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
  669. if (!vop_plane_state)
  670. return;
  671. plane->state = &vop_plane_state->base;
  672. plane->state->plane = plane;
  673. }
  674. static struct drm_plane_state *
  675. vop_atomic_plane_duplicate_state(struct drm_plane *plane)
  676. {
  677. struct vop_plane_state *old_vop_plane_state;
  678. struct vop_plane_state *vop_plane_state;
  679. if (WARN_ON(!plane->state))
  680. return NULL;
  681. old_vop_plane_state = to_vop_plane_state(plane->state);
  682. vop_plane_state = kmemdup(old_vop_plane_state,
  683. sizeof(*vop_plane_state), GFP_KERNEL);
  684. if (!vop_plane_state)
  685. return NULL;
  686. __drm_atomic_helper_plane_duplicate_state(plane,
  687. &vop_plane_state->base);
  688. return &vop_plane_state->base;
  689. }
  690. static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
  691. struct drm_plane_state *state)
  692. {
  693. struct vop_plane_state *vop_state = to_vop_plane_state(state);
  694. __drm_atomic_helper_plane_destroy_state(state);
  695. kfree(vop_state);
  696. }
  697. static const struct drm_plane_funcs vop_plane_funcs = {
  698. .update_plane = drm_atomic_helper_update_plane,
  699. .disable_plane = drm_atomic_helper_disable_plane,
  700. .destroy = vop_plane_destroy,
  701. .reset = vop_atomic_plane_reset,
  702. .atomic_duplicate_state = vop_atomic_plane_duplicate_state,
  703. .atomic_destroy_state = vop_atomic_plane_destroy_state,
  704. };
  705. static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
  706. {
  707. struct vop *vop = to_vop(crtc);
  708. unsigned long flags;
  709. if (WARN_ON(!vop->is_enabled))
  710. return -EPERM;
  711. spin_lock_irqsave(&vop->irq_lock, flags);
  712. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
  713. spin_unlock_irqrestore(&vop->irq_lock, flags);
  714. return 0;
  715. }
  716. static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
  717. {
  718. struct vop *vop = to_vop(crtc);
  719. unsigned long flags;
  720. if (WARN_ON(!vop->is_enabled))
  721. return;
  722. spin_lock_irqsave(&vop->irq_lock, flags);
  723. VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0);
  724. spin_unlock_irqrestore(&vop->irq_lock, flags);
  725. }
  726. static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
  727. {
  728. struct vop *vop = to_vop(crtc);
  729. reinit_completion(&vop->wait_update_complete);
  730. WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
  731. }
  732. static const struct rockchip_crtc_funcs private_crtc_funcs = {
  733. .enable_vblank = vop_crtc_enable_vblank,
  734. .disable_vblank = vop_crtc_disable_vblank,
  735. .wait_for_update = vop_crtc_wait_for_update,
  736. };
  737. static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
  738. const struct drm_display_mode *mode,
  739. struct drm_display_mode *adjusted_mode)
  740. {
  741. struct vop *vop = to_vop(crtc);
  742. adjusted_mode->clock =
  743. clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
  744. return true;
  745. }
  746. static void vop_crtc_enable(struct drm_crtc *crtc)
  747. {
  748. struct vop *vop = to_vop(crtc);
  749. struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
  750. struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
  751. u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
  752. u16 hdisplay = adjusted_mode->hdisplay;
  753. u16 htotal = adjusted_mode->htotal;
  754. u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
  755. u16 hact_end = hact_st + hdisplay;
  756. u16 vdisplay = adjusted_mode->vdisplay;
  757. u16 vtotal = adjusted_mode->vtotal;
  758. u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
  759. u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
  760. u16 vact_end = vact_st + vdisplay;
  761. uint32_t val;
  762. WARN_ON(vop->event);
  763. vop_enable(crtc);
  764. /*
  765. * If dclk rate is zero, mean that scanout is stop,
  766. * we don't need wait any more.
  767. */
  768. if (clk_get_rate(vop->dclk)) {
  769. /*
  770. * Rk3288 vop timing register is immediately, when configure
  771. * display timing on display time, may cause tearing.
  772. *
  773. * Vop standby will take effect at end of current frame,
  774. * if dsp hold valid irq happen, it means standby complete.
  775. *
  776. * mode set:
  777. * standby and wait complete --> |----
  778. * | display time
  779. * |----
  780. * |---> dsp hold irq
  781. * configure display timing --> |
  782. * standby exit |
  783. * | new frame start.
  784. */
  785. reinit_completion(&vop->dsp_hold_completion);
  786. vop_dsp_hold_valid_irq_enable(vop);
  787. spin_lock(&vop->reg_lock);
  788. VOP_CTRL_SET(vop, standby, 1);
  789. spin_unlock(&vop->reg_lock);
  790. wait_for_completion(&vop->dsp_hold_completion);
  791. vop_dsp_hold_valid_irq_disable(vop);
  792. }
  793. val = 0x8;
  794. val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
  795. val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
  796. VOP_CTRL_SET(vop, pin_pol, val);
  797. switch (s->output_type) {
  798. case DRM_MODE_CONNECTOR_LVDS:
  799. VOP_CTRL_SET(vop, rgb_en, 1);
  800. break;
  801. case DRM_MODE_CONNECTOR_eDP:
  802. VOP_CTRL_SET(vop, edp_en, 1);
  803. break;
  804. case DRM_MODE_CONNECTOR_HDMIA:
  805. VOP_CTRL_SET(vop, hdmi_en, 1);
  806. break;
  807. case DRM_MODE_CONNECTOR_DSI:
  808. VOP_CTRL_SET(vop, mipi_en, 1);
  809. break;
  810. default:
  811. DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
  812. }
  813. VOP_CTRL_SET(vop, out_mode, s->output_mode);
  814. VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
  815. val = hact_st << 16;
  816. val |= hact_end;
  817. VOP_CTRL_SET(vop, hact_st_end, val);
  818. VOP_CTRL_SET(vop, hpost_st_end, val);
  819. VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
  820. val = vact_st << 16;
  821. val |= vact_end;
  822. VOP_CTRL_SET(vop, vact_st_end, val);
  823. VOP_CTRL_SET(vop, vpost_st_end, val);
  824. clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
  825. VOP_CTRL_SET(vop, standby, 0);
  826. }
  827. static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
  828. struct drm_crtc_state *old_crtc_state)
  829. {
  830. struct vop *vop = to_vop(crtc);
  831. if (WARN_ON(!vop->is_enabled))
  832. return;
  833. spin_lock(&vop->reg_lock);
  834. vop_cfg_done(vop);
  835. spin_unlock(&vop->reg_lock);
  836. }
  837. static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
  838. struct drm_crtc_state *old_crtc_state)
  839. {
  840. struct vop *vop = to_vop(crtc);
  841. spin_lock_irq(&crtc->dev->event_lock);
  842. if (crtc->state->event) {
  843. WARN_ON(drm_crtc_vblank_get(crtc) != 0);
  844. WARN_ON(vop->event);
  845. vop->event = crtc->state->event;
  846. crtc->state->event = NULL;
  847. }
  848. spin_unlock_irq(&crtc->dev->event_lock);
  849. }
  850. static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
  851. .enable = vop_crtc_enable,
  852. .disable = vop_crtc_disable,
  853. .mode_fixup = vop_crtc_mode_fixup,
  854. .atomic_flush = vop_crtc_atomic_flush,
  855. .atomic_begin = vop_crtc_atomic_begin,
  856. };
  857. static void vop_crtc_destroy(struct drm_crtc *crtc)
  858. {
  859. drm_crtc_cleanup(crtc);
  860. }
  861. static void vop_crtc_reset(struct drm_crtc *crtc)
  862. {
  863. if (crtc->state)
  864. __drm_atomic_helper_crtc_destroy_state(crtc->state);
  865. kfree(crtc->state);
  866. crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
  867. if (crtc->state)
  868. crtc->state->crtc = crtc;
  869. }
  870. static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
  871. {
  872. struct rockchip_crtc_state *rockchip_state;
  873. rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
  874. if (!rockchip_state)
  875. return NULL;
  876. __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
  877. return &rockchip_state->base;
  878. }
  879. static void vop_crtc_destroy_state(struct drm_crtc *crtc,
  880. struct drm_crtc_state *state)
  881. {
  882. struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
  883. __drm_atomic_helper_crtc_destroy_state(&s->base);
  884. kfree(s);
  885. }
  886. static const struct drm_crtc_funcs vop_crtc_funcs = {
  887. .set_config = drm_atomic_helper_set_config,
  888. .page_flip = drm_atomic_helper_page_flip,
  889. .destroy = vop_crtc_destroy,
  890. .reset = vop_crtc_reset,
  891. .atomic_duplicate_state = vop_crtc_duplicate_state,
  892. .atomic_destroy_state = vop_crtc_destroy_state,
  893. };
  894. static bool vop_win_pending_is_complete(struct vop_win *vop_win)
  895. {
  896. dma_addr_t yrgb_mst;
  897. if (!vop_win->enable)
  898. return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
  899. yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
  900. return yrgb_mst == vop_win->yrgb_mst;
  901. }
  902. static void vop_handle_vblank(struct vop *vop)
  903. {
  904. struct drm_device *drm = vop->drm_dev;
  905. struct drm_crtc *crtc = &vop->crtc;
  906. unsigned long flags;
  907. int i;
  908. for (i = 0; i < vop->data->win_size; i++) {
  909. if (!vop_win_pending_is_complete(&vop->win[i]))
  910. return;
  911. }
  912. spin_lock_irqsave(&drm->event_lock, flags);
  913. if (vop->event) {
  914. drm_crtc_send_vblank_event(crtc, vop->event);
  915. drm_crtc_vblank_put(crtc);
  916. vop->event = NULL;
  917. }
  918. spin_unlock_irqrestore(&drm->event_lock, flags);
  919. if (!completion_done(&vop->wait_update_complete))
  920. complete(&vop->wait_update_complete);
  921. }
  922. static irqreturn_t vop_isr(int irq, void *data)
  923. {
  924. struct vop *vop = data;
  925. struct drm_crtc *crtc = &vop->crtc;
  926. uint32_t active_irqs;
  927. unsigned long flags;
  928. int ret = IRQ_NONE;
  929. /*
  930. * interrupt register has interrupt status, enable and clear bits, we
  931. * must hold irq_lock to avoid a race with enable/disable_vblank().
  932. */
  933. spin_lock_irqsave(&vop->irq_lock, flags);
  934. active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
  935. /* Clear all active interrupt sources */
  936. if (active_irqs)
  937. VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
  938. spin_unlock_irqrestore(&vop->irq_lock, flags);
  939. /* This is expected for vop iommu irqs, since the irq is shared */
  940. if (!active_irqs)
  941. return IRQ_NONE;
  942. if (active_irqs & DSP_HOLD_VALID_INTR) {
  943. complete(&vop->dsp_hold_completion);
  944. active_irqs &= ~DSP_HOLD_VALID_INTR;
  945. ret = IRQ_HANDLED;
  946. }
  947. if (active_irqs & FS_INTR) {
  948. drm_crtc_handle_vblank(crtc);
  949. vop_handle_vblank(vop);
  950. active_irqs &= ~FS_INTR;
  951. ret = IRQ_HANDLED;
  952. }
  953. /* Unhandled irqs are spurious. */
  954. if (active_irqs)
  955. DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
  956. return ret;
  957. }
  958. static int vop_create_crtc(struct vop *vop)
  959. {
  960. const struct vop_data *vop_data = vop->data;
  961. struct device *dev = vop->dev;
  962. struct drm_device *drm_dev = vop->drm_dev;
  963. struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
  964. struct drm_crtc *crtc = &vop->crtc;
  965. struct device_node *port;
  966. int ret;
  967. int i;
  968. /*
  969. * Create drm_plane for primary and cursor planes first, since we need
  970. * to pass them to drm_crtc_init_with_planes, which sets the
  971. * "possible_crtcs" to the newly initialized crtc.
  972. */
  973. for (i = 0; i < vop_data->win_size; i++) {
  974. struct vop_win *vop_win = &vop->win[i];
  975. const struct vop_win_data *win_data = vop_win->data;
  976. if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
  977. win_data->type != DRM_PLANE_TYPE_CURSOR)
  978. continue;
  979. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  980. 0, &vop_plane_funcs,
  981. win_data->phy->data_formats,
  982. win_data->phy->nformats,
  983. win_data->type, NULL);
  984. if (ret) {
  985. DRM_ERROR("failed to initialize plane\n");
  986. goto err_cleanup_planes;
  987. }
  988. plane = &vop_win->base;
  989. drm_plane_helper_add(plane, &plane_helper_funcs);
  990. if (plane->type == DRM_PLANE_TYPE_PRIMARY)
  991. primary = plane;
  992. else if (plane->type == DRM_PLANE_TYPE_CURSOR)
  993. cursor = plane;
  994. }
  995. ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
  996. &vop_crtc_funcs, NULL);
  997. if (ret)
  998. goto err_cleanup_planes;
  999. drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
  1000. /*
  1001. * Create drm_planes for overlay windows with possible_crtcs restricted
  1002. * to the newly created crtc.
  1003. */
  1004. for (i = 0; i < vop_data->win_size; i++) {
  1005. struct vop_win *vop_win = &vop->win[i];
  1006. const struct vop_win_data *win_data = vop_win->data;
  1007. unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
  1008. if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
  1009. continue;
  1010. ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
  1011. possible_crtcs,
  1012. &vop_plane_funcs,
  1013. win_data->phy->data_formats,
  1014. win_data->phy->nformats,
  1015. win_data->type, NULL);
  1016. if (ret) {
  1017. DRM_ERROR("failed to initialize overlay plane\n");
  1018. goto err_cleanup_crtc;
  1019. }
  1020. drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
  1021. }
  1022. port = of_get_child_by_name(dev->of_node, "port");
  1023. if (!port) {
  1024. DRM_ERROR("no port node found in %s\n",
  1025. dev->of_node->full_name);
  1026. ret = -ENOENT;
  1027. goto err_cleanup_crtc;
  1028. }
  1029. init_completion(&vop->dsp_hold_completion);
  1030. init_completion(&vop->wait_update_complete);
  1031. crtc->port = port;
  1032. rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
  1033. return 0;
  1034. err_cleanup_crtc:
  1035. drm_crtc_cleanup(crtc);
  1036. err_cleanup_planes:
  1037. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1038. head)
  1039. drm_plane_cleanup(plane);
  1040. return ret;
  1041. }
  1042. static void vop_destroy_crtc(struct vop *vop)
  1043. {
  1044. struct drm_crtc *crtc = &vop->crtc;
  1045. struct drm_device *drm_dev = vop->drm_dev;
  1046. struct drm_plane *plane, *tmp;
  1047. rockchip_unregister_crtc_funcs(crtc);
  1048. of_node_put(crtc->port);
  1049. /*
  1050. * We need to cleanup the planes now. Why?
  1051. *
  1052. * The planes are "&vop->win[i].base". That means the memory is
  1053. * all part of the big "struct vop" chunk of memory. That memory
  1054. * was devm allocated and associated with this component. We need to
  1055. * free it ourselves before vop_unbind() finishes.
  1056. */
  1057. list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
  1058. head)
  1059. vop_plane_destroy(plane);
  1060. /*
  1061. * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
  1062. * references the CRTC.
  1063. */
  1064. drm_crtc_cleanup(crtc);
  1065. }
  1066. static int vop_initial(struct vop *vop)
  1067. {
  1068. const struct vop_data *vop_data = vop->data;
  1069. const struct vop_reg_data *init_table = vop_data->init_table;
  1070. struct reset_control *ahb_rst;
  1071. int i, ret;
  1072. vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
  1073. if (IS_ERR(vop->hclk)) {
  1074. dev_err(vop->dev, "failed to get hclk source\n");
  1075. return PTR_ERR(vop->hclk);
  1076. }
  1077. vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
  1078. if (IS_ERR(vop->aclk)) {
  1079. dev_err(vop->dev, "failed to get aclk source\n");
  1080. return PTR_ERR(vop->aclk);
  1081. }
  1082. vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
  1083. if (IS_ERR(vop->dclk)) {
  1084. dev_err(vop->dev, "failed to get dclk source\n");
  1085. return PTR_ERR(vop->dclk);
  1086. }
  1087. ret = clk_prepare(vop->dclk);
  1088. if (ret < 0) {
  1089. dev_err(vop->dev, "failed to prepare dclk\n");
  1090. return ret;
  1091. }
  1092. /* Enable both the hclk and aclk to setup the vop */
  1093. ret = clk_prepare_enable(vop->hclk);
  1094. if (ret < 0) {
  1095. dev_err(vop->dev, "failed to prepare/enable hclk\n");
  1096. goto err_unprepare_dclk;
  1097. }
  1098. ret = clk_prepare_enable(vop->aclk);
  1099. if (ret < 0) {
  1100. dev_err(vop->dev, "failed to prepare/enable aclk\n");
  1101. goto err_disable_hclk;
  1102. }
  1103. /*
  1104. * do hclk_reset, reset all vop registers.
  1105. */
  1106. ahb_rst = devm_reset_control_get(vop->dev, "ahb");
  1107. if (IS_ERR(ahb_rst)) {
  1108. dev_err(vop->dev, "failed to get ahb reset\n");
  1109. ret = PTR_ERR(ahb_rst);
  1110. goto err_disable_aclk;
  1111. }
  1112. reset_control_assert(ahb_rst);
  1113. usleep_range(10, 20);
  1114. reset_control_deassert(ahb_rst);
  1115. memcpy(vop->regsbak, vop->regs, vop->len);
  1116. for (i = 0; i < vop_data->table_size; i++)
  1117. vop_writel(vop, init_table[i].offset, init_table[i].value);
  1118. for (i = 0; i < vop_data->win_size; i++) {
  1119. const struct vop_win_data *win = &vop_data->win[i];
  1120. VOP_WIN_SET(vop, win, enable, 0);
  1121. }
  1122. vop_cfg_done(vop);
  1123. /*
  1124. * do dclk_reset, let all config take affect.
  1125. */
  1126. vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
  1127. if (IS_ERR(vop->dclk_rst)) {
  1128. dev_err(vop->dev, "failed to get dclk reset\n");
  1129. ret = PTR_ERR(vop->dclk_rst);
  1130. goto err_disable_aclk;
  1131. }
  1132. reset_control_assert(vop->dclk_rst);
  1133. usleep_range(10, 20);
  1134. reset_control_deassert(vop->dclk_rst);
  1135. clk_disable(vop->hclk);
  1136. clk_disable(vop->aclk);
  1137. vop->is_enabled = false;
  1138. return 0;
  1139. err_disable_aclk:
  1140. clk_disable_unprepare(vop->aclk);
  1141. err_disable_hclk:
  1142. clk_disable_unprepare(vop->hclk);
  1143. err_unprepare_dclk:
  1144. clk_unprepare(vop->dclk);
  1145. return ret;
  1146. }
  1147. /*
  1148. * Initialize the vop->win array elements.
  1149. */
  1150. static void vop_win_init(struct vop *vop)
  1151. {
  1152. const struct vop_data *vop_data = vop->data;
  1153. unsigned int i;
  1154. for (i = 0; i < vop_data->win_size; i++) {
  1155. struct vop_win *vop_win = &vop->win[i];
  1156. const struct vop_win_data *win_data = &vop_data->win[i];
  1157. vop_win->data = win_data;
  1158. vop_win->vop = vop;
  1159. }
  1160. }
  1161. static int vop_bind(struct device *dev, struct device *master, void *data)
  1162. {
  1163. struct platform_device *pdev = to_platform_device(dev);
  1164. const struct vop_data *vop_data;
  1165. struct drm_device *drm_dev = data;
  1166. struct vop *vop;
  1167. struct resource *res;
  1168. size_t alloc_size;
  1169. int ret, irq;
  1170. vop_data = of_device_get_match_data(dev);
  1171. if (!vop_data)
  1172. return -ENODEV;
  1173. /* Allocate vop struct and its vop_win array */
  1174. alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
  1175. vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
  1176. if (!vop)
  1177. return -ENOMEM;
  1178. vop->dev = dev;
  1179. vop->data = vop_data;
  1180. vop->drm_dev = drm_dev;
  1181. dev_set_drvdata(dev, vop);
  1182. vop_win_init(vop);
  1183. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1184. vop->len = resource_size(res);
  1185. vop->regs = devm_ioremap_resource(dev, res);
  1186. if (IS_ERR(vop->regs))
  1187. return PTR_ERR(vop->regs);
  1188. vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
  1189. if (!vop->regsbak)
  1190. return -ENOMEM;
  1191. ret = vop_initial(vop);
  1192. if (ret < 0) {
  1193. dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
  1194. return ret;
  1195. }
  1196. irq = platform_get_irq(pdev, 0);
  1197. if (irq < 0) {
  1198. dev_err(dev, "cannot find irq for vop\n");
  1199. return irq;
  1200. }
  1201. vop->irq = (unsigned int)irq;
  1202. spin_lock_init(&vop->reg_lock);
  1203. spin_lock_init(&vop->irq_lock);
  1204. mutex_init(&vop->vsync_mutex);
  1205. ret = devm_request_irq(dev, vop->irq, vop_isr,
  1206. IRQF_SHARED, dev_name(dev), vop);
  1207. if (ret)
  1208. return ret;
  1209. /* IRQ is initially disabled; it gets enabled in power_on */
  1210. disable_irq(vop->irq);
  1211. ret = vop_create_crtc(vop);
  1212. if (ret)
  1213. return ret;
  1214. pm_runtime_enable(&pdev->dev);
  1215. return 0;
  1216. }
  1217. static void vop_unbind(struct device *dev, struct device *master, void *data)
  1218. {
  1219. struct vop *vop = dev_get_drvdata(dev);
  1220. pm_runtime_disable(dev);
  1221. vop_destroy_crtc(vop);
  1222. }
  1223. const struct component_ops vop_component_ops = {
  1224. .bind = vop_bind,
  1225. .unbind = vop_unbind,
  1226. };
  1227. EXPORT_SYMBOL_GPL(vop_component_ops);