vc4_plane.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. /*
  2. * Copyright (C) 2015 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /**
  9. * DOC: VC4 plane module
  10. *
  11. * Each DRM plane is a layer of pixels being scanned out by the HVS.
  12. *
  13. * At atomic modeset check time, we compute the HVS display element
  14. * state that would be necessary for displaying the plane (giving us a
  15. * chance to figure out if a plane configuration is invalid), then at
  16. * atomic flush time the CRTC will ask us to write our element state
  17. * into the region of the HVS that it has allocated for us.
  18. */
  19. #include <drm/drm_atomic.h>
  20. #include <drm/drm_atomic_helper.h>
  21. #include <drm/drm_fb_cma_helper.h>
  22. #include <drm/drm_plane_helper.h>
  23. #include "vc4_drv.h"
  24. #include "vc4_regs.h"
  25. enum vc4_scaling_mode {
  26. VC4_SCALING_NONE,
  27. VC4_SCALING_TPZ,
  28. VC4_SCALING_PPF,
  29. };
  30. struct vc4_plane_state {
  31. struct drm_plane_state base;
  32. /* System memory copy of the display list for this element, computed
  33. * at atomic_check time.
  34. */
  35. u32 *dlist;
  36. u32 dlist_size; /* Number of dwords allocated for the display list */
  37. u32 dlist_count; /* Number of used dwords in the display list. */
  38. /* Offset in the dlist to various words, for pageflip or
  39. * cursor updates.
  40. */
  41. u32 pos0_offset;
  42. u32 pos2_offset;
  43. u32 ptr0_offset;
  44. /* Offset where the plane's dlist was last stored in the
  45. * hardware at vc4_crtc_atomic_flush() time.
  46. */
  47. u32 __iomem *hw_dlist;
  48. /* Clipped coordinates of the plane on the display. */
  49. int crtc_x, crtc_y, crtc_w, crtc_h;
  50. /* Clipped area being scanned from in the FB. */
  51. u32 src_x, src_y;
  52. u32 src_w[2], src_h[2];
  53. /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
  54. enum vc4_scaling_mode x_scaling[2], y_scaling[2];
  55. bool is_unity;
  56. bool is_yuv;
  57. /* Offset to start scanning out from the start of the plane's
  58. * BO.
  59. */
  60. u32 offsets[3];
  61. /* Our allocation in LBM for temporary storage during scaling. */
  62. struct drm_mm_node lbm;
  63. };
  64. static inline struct vc4_plane_state *
  65. to_vc4_plane_state(struct drm_plane_state *state)
  66. {
  67. return (struct vc4_plane_state *)state;
  68. }
  69. static const struct hvs_format {
  70. u32 drm; /* DRM_FORMAT_* */
  71. u32 hvs; /* HVS_FORMAT_* */
  72. u32 pixel_order;
  73. bool has_alpha;
  74. bool flip_cbcr;
  75. } hvs_formats[] = {
  76. {
  77. .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  78. .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
  79. },
  80. {
  81. .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  82. .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
  83. },
  84. {
  85. .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  86. .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
  87. },
  88. {
  89. .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
  90. .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
  91. },
  92. {
  93. .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
  94. .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
  95. },
  96. {
  97. .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
  98. .pixel_order = HVS_PIXEL_ORDER_XBGR, .has_alpha = false,
  99. },
  100. {
  101. .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
  102. .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
  103. },
  104. {
  105. .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
  106. .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = false,
  107. },
  108. {
  109. .drm = DRM_FORMAT_YUV422,
  110. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
  111. },
  112. {
  113. .drm = DRM_FORMAT_YVU422,
  114. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
  115. .flip_cbcr = true,
  116. },
  117. {
  118. .drm = DRM_FORMAT_YUV420,
  119. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
  120. },
  121. {
  122. .drm = DRM_FORMAT_YVU420,
  123. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
  124. .flip_cbcr = true,
  125. },
  126. {
  127. .drm = DRM_FORMAT_NV12,
  128. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
  129. },
  130. {
  131. .drm = DRM_FORMAT_NV16,
  132. .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
  133. },
  134. };
  135. static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
  136. {
  137. unsigned i;
  138. for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
  139. if (hvs_formats[i].drm == drm_format)
  140. return &hvs_formats[i];
  141. }
  142. return NULL;
  143. }
  144. static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
  145. {
  146. if (dst > src)
  147. return VC4_SCALING_PPF;
  148. else if (dst < src)
  149. return VC4_SCALING_TPZ;
  150. else
  151. return VC4_SCALING_NONE;
  152. }
  153. static bool plane_enabled(struct drm_plane_state *state)
  154. {
  155. return state->fb && state->crtc;
  156. }
  157. static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
  158. {
  159. struct vc4_plane_state *vc4_state;
  160. if (WARN_ON(!plane->state))
  161. return NULL;
  162. vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
  163. if (!vc4_state)
  164. return NULL;
  165. memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
  166. __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
  167. if (vc4_state->dlist) {
  168. vc4_state->dlist = kmemdup(vc4_state->dlist,
  169. vc4_state->dlist_count * 4,
  170. GFP_KERNEL);
  171. if (!vc4_state->dlist) {
  172. kfree(vc4_state);
  173. return NULL;
  174. }
  175. vc4_state->dlist_size = vc4_state->dlist_count;
  176. }
  177. return &vc4_state->base;
  178. }
  179. static void vc4_plane_destroy_state(struct drm_plane *plane,
  180. struct drm_plane_state *state)
  181. {
  182. struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
  183. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  184. if (vc4_state->lbm.allocated) {
  185. unsigned long irqflags;
  186. spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
  187. drm_mm_remove_node(&vc4_state->lbm);
  188. spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
  189. }
  190. kfree(vc4_state->dlist);
  191. __drm_atomic_helper_plane_destroy_state(&vc4_state->base);
  192. kfree(state);
  193. }
  194. /* Called during init to allocate the plane's atomic state. */
  195. static void vc4_plane_reset(struct drm_plane *plane)
  196. {
  197. struct vc4_plane_state *vc4_state;
  198. WARN_ON(plane->state);
  199. vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
  200. if (!vc4_state)
  201. return;
  202. plane->state = &vc4_state->base;
  203. vc4_state->base.plane = plane;
  204. }
  205. static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
  206. {
  207. if (vc4_state->dlist_count == vc4_state->dlist_size) {
  208. u32 new_size = max(4u, vc4_state->dlist_count * 2);
  209. u32 *new_dlist = kmalloc(new_size * 4, GFP_KERNEL);
  210. if (!new_dlist)
  211. return;
  212. memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
  213. kfree(vc4_state->dlist);
  214. vc4_state->dlist = new_dlist;
  215. vc4_state->dlist_size = new_size;
  216. }
  217. vc4_state->dlist[vc4_state->dlist_count++] = val;
  218. }
  219. /* Returns the scl0/scl1 field based on whether the dimensions need to
  220. * be up/down/non-scaled.
  221. *
  222. * This is a replication of a table from the spec.
  223. */
  224. static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
  225. {
  226. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  227. switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
  228. case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
  229. return SCALER_CTL0_SCL_H_PPF_V_PPF;
  230. case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
  231. return SCALER_CTL0_SCL_H_TPZ_V_PPF;
  232. case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
  233. return SCALER_CTL0_SCL_H_PPF_V_TPZ;
  234. case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
  235. return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
  236. case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
  237. return SCALER_CTL0_SCL_H_PPF_V_NONE;
  238. case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
  239. return SCALER_CTL0_SCL_H_NONE_V_PPF;
  240. case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
  241. return SCALER_CTL0_SCL_H_NONE_V_TPZ;
  242. case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
  243. return SCALER_CTL0_SCL_H_TPZ_V_NONE;
  244. default:
  245. case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
  246. /* The unity case is independently handled by
  247. * SCALER_CTL0_UNITY.
  248. */
  249. return 0;
  250. }
  251. }
  252. static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
  253. {
  254. struct drm_plane *plane = state->plane;
  255. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  256. struct drm_framebuffer *fb = state->fb;
  257. struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
  258. u32 subpixel_src_mask = (1 << 16) - 1;
  259. u32 format = fb->format->format;
  260. int num_planes = fb->format->num_planes;
  261. u32 h_subsample = 1;
  262. u32 v_subsample = 1;
  263. int i;
  264. for (i = 0; i < num_planes; i++)
  265. vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
  266. /* We don't support subpixel source positioning for scaling. */
  267. if ((state->src_x & subpixel_src_mask) ||
  268. (state->src_y & subpixel_src_mask) ||
  269. (state->src_w & subpixel_src_mask) ||
  270. (state->src_h & subpixel_src_mask)) {
  271. return -EINVAL;
  272. }
  273. vc4_state->src_x = state->src_x >> 16;
  274. vc4_state->src_y = state->src_y >> 16;
  275. vc4_state->src_w[0] = state->src_w >> 16;
  276. vc4_state->src_h[0] = state->src_h >> 16;
  277. vc4_state->crtc_x = state->crtc_x;
  278. vc4_state->crtc_y = state->crtc_y;
  279. vc4_state->crtc_w = state->crtc_w;
  280. vc4_state->crtc_h = state->crtc_h;
  281. vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
  282. vc4_state->crtc_w);
  283. vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
  284. vc4_state->crtc_h);
  285. if (num_planes > 1) {
  286. vc4_state->is_yuv = true;
  287. h_subsample = drm_format_horz_chroma_subsampling(format);
  288. v_subsample = drm_format_vert_chroma_subsampling(format);
  289. vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
  290. vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
  291. vc4_state->x_scaling[1] =
  292. vc4_get_scaling_mode(vc4_state->src_w[1],
  293. vc4_state->crtc_w);
  294. vc4_state->y_scaling[1] =
  295. vc4_get_scaling_mode(vc4_state->src_h[1],
  296. vc4_state->crtc_h);
  297. /* YUV conversion requires that scaling be enabled,
  298. * even on a plane that's otherwise 1:1. Choose TPZ
  299. * for simplicity.
  300. */
  301. if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
  302. vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
  303. if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
  304. vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
  305. }
  306. vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
  307. vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
  308. vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
  309. vc4_state->y_scaling[1] == VC4_SCALING_NONE);
  310. /* No configuring scaling on the cursor plane, since it gets
  311. non-vblank-synced updates, and scaling requires requires
  312. LBM changes which have to be vblank-synced.
  313. */
  314. if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
  315. return -EINVAL;
  316. /* Clamp the on-screen start x/y to 0. The hardware doesn't
  317. * support negative y, and negative x wastes bandwidth.
  318. */
  319. if (vc4_state->crtc_x < 0) {
  320. for (i = 0; i < num_planes; i++) {
  321. u32 cpp = fb->format->cpp[i];
  322. u32 subs = ((i == 0) ? 1 : h_subsample);
  323. vc4_state->offsets[i] += (cpp *
  324. (-vc4_state->crtc_x) / subs);
  325. }
  326. vc4_state->src_w[0] += vc4_state->crtc_x;
  327. vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
  328. vc4_state->crtc_x = 0;
  329. }
  330. if (vc4_state->crtc_y < 0) {
  331. for (i = 0; i < num_planes; i++) {
  332. u32 subs = ((i == 0) ? 1 : v_subsample);
  333. vc4_state->offsets[i] += (fb->pitches[i] *
  334. (-vc4_state->crtc_y) / subs);
  335. }
  336. vc4_state->src_h[0] += vc4_state->crtc_y;
  337. vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
  338. vc4_state->crtc_y = 0;
  339. }
  340. return 0;
  341. }
  342. static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
  343. {
  344. u32 scale, recip;
  345. scale = (1 << 16) * src / dst;
  346. /* The specs note that while the reciprocal would be defined
  347. * as (1<<32)/scale, ~0 is close enough.
  348. */
  349. recip = ~0 / scale;
  350. vc4_dlist_write(vc4_state,
  351. VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
  352. VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
  353. vc4_dlist_write(vc4_state,
  354. VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
  355. }
  356. static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
  357. {
  358. u32 scale = (1 << 16) * src / dst;
  359. vc4_dlist_write(vc4_state,
  360. SCALER_PPF_AGC |
  361. VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
  362. VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
  363. }
  364. static u32 vc4_lbm_size(struct drm_plane_state *state)
  365. {
  366. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  367. /* This is the worst case number. One of the two sizes will
  368. * be used depending on the scaling configuration.
  369. */
  370. u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
  371. u32 lbm;
  372. if (!vc4_state->is_yuv) {
  373. if (vc4_state->is_unity)
  374. return 0;
  375. else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
  376. lbm = pix_per_line * 8;
  377. else {
  378. /* In special cases, this multiplier might be 12. */
  379. lbm = pix_per_line * 16;
  380. }
  381. } else {
  382. /* There are cases for this going down to a multiplier
  383. * of 2, but according to the firmware source, the
  384. * table in the docs is somewhat wrong.
  385. */
  386. lbm = pix_per_line * 16;
  387. }
  388. lbm = roundup(lbm, 32);
  389. return lbm;
  390. }
  391. static void vc4_write_scaling_parameters(struct drm_plane_state *state,
  392. int channel)
  393. {
  394. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  395. /* Ch0 H-PPF Word 0: Scaling Parameters */
  396. if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
  397. vc4_write_ppf(vc4_state,
  398. vc4_state->src_w[channel], vc4_state->crtc_w);
  399. }
  400. /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
  401. if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
  402. vc4_write_ppf(vc4_state,
  403. vc4_state->src_h[channel], vc4_state->crtc_h);
  404. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  405. }
  406. /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
  407. if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
  408. vc4_write_tpz(vc4_state,
  409. vc4_state->src_w[channel], vc4_state->crtc_w);
  410. }
  411. /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
  412. if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
  413. vc4_write_tpz(vc4_state,
  414. vc4_state->src_h[channel], vc4_state->crtc_h);
  415. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  416. }
  417. }
  418. /* Writes out a full display list for an active plane to the plane's
  419. * private dlist state.
  420. */
  421. static int vc4_plane_mode_set(struct drm_plane *plane,
  422. struct drm_plane_state *state)
  423. {
  424. struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
  425. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  426. struct drm_framebuffer *fb = state->fb;
  427. u32 ctl0_offset = vc4_state->dlist_count;
  428. const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
  429. int num_planes = drm_format_num_planes(format->drm);
  430. u32 scl0, scl1, pitch0;
  431. u32 lbm_size, tiling;
  432. unsigned long irqflags;
  433. int ret, i;
  434. ret = vc4_plane_setup_clipping_and_scaling(state);
  435. if (ret)
  436. return ret;
  437. /* Allocate the LBM memory that the HVS will use for temporary
  438. * storage due to our scaling/format conversion.
  439. */
  440. lbm_size = vc4_lbm_size(state);
  441. if (lbm_size) {
  442. if (!vc4_state->lbm.allocated) {
  443. spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
  444. ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
  445. &vc4_state->lbm,
  446. lbm_size, 32, 0, 0);
  447. spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
  448. } else {
  449. WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
  450. }
  451. }
  452. if (ret)
  453. return ret;
  454. /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
  455. * and 4:4:4, scl1 should be set to scl0 so both channels of
  456. * the scaler do the same thing. For YUV, the Y plane needs
  457. * to be put in channel 1 and Cb/Cr in channel 0, so we swap
  458. * the scl fields here.
  459. */
  460. if (num_planes == 1) {
  461. scl0 = vc4_get_scl_field(state, 1);
  462. scl1 = scl0;
  463. } else {
  464. scl0 = vc4_get_scl_field(state, 1);
  465. scl1 = vc4_get_scl_field(state, 0);
  466. }
  467. switch (fb->modifier) {
  468. case DRM_FORMAT_MOD_LINEAR:
  469. tiling = SCALER_CTL0_TILING_LINEAR;
  470. pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
  471. break;
  472. case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
  473. tiling = SCALER_CTL0_TILING_256B_OR_T;
  474. pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET),
  475. VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L),
  476. VC4_SET_FIELD((vc4_state->src_w[0] + 31) >> 5,
  477. SCALER_PITCH0_TILE_WIDTH_R));
  478. break;
  479. default:
  480. DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
  481. (long long)fb->modifier);
  482. return -EINVAL;
  483. }
  484. /* Control word */
  485. vc4_dlist_write(vc4_state,
  486. SCALER_CTL0_VALID |
  487. (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
  488. (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
  489. VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
  490. (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
  491. VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
  492. VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
  493. /* Position Word 0: Image Positions and Alpha Value */
  494. vc4_state->pos0_offset = vc4_state->dlist_count;
  495. vc4_dlist_write(vc4_state,
  496. VC4_SET_FIELD(0xff, SCALER_POS0_FIXED_ALPHA) |
  497. VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
  498. VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
  499. /* Position Word 1: Scaled Image Dimensions. */
  500. if (!vc4_state->is_unity) {
  501. vc4_dlist_write(vc4_state,
  502. VC4_SET_FIELD(vc4_state->crtc_w,
  503. SCALER_POS1_SCL_WIDTH) |
  504. VC4_SET_FIELD(vc4_state->crtc_h,
  505. SCALER_POS1_SCL_HEIGHT));
  506. }
  507. /* Position Word 2: Source Image Size, Alpha Mode */
  508. vc4_state->pos2_offset = vc4_state->dlist_count;
  509. vc4_dlist_write(vc4_state,
  510. VC4_SET_FIELD(format->has_alpha ?
  511. SCALER_POS2_ALPHA_MODE_PIPELINE :
  512. SCALER_POS2_ALPHA_MODE_FIXED,
  513. SCALER_POS2_ALPHA_MODE) |
  514. VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
  515. VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
  516. /* Position Word 3: Context. Written by the HVS. */
  517. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  518. /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
  519. *
  520. * The pointers may be any byte address.
  521. */
  522. vc4_state->ptr0_offset = vc4_state->dlist_count;
  523. if (!format->flip_cbcr) {
  524. for (i = 0; i < num_planes; i++)
  525. vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
  526. } else {
  527. WARN_ON_ONCE(num_planes != 3);
  528. vc4_dlist_write(vc4_state, vc4_state->offsets[0]);
  529. vc4_dlist_write(vc4_state, vc4_state->offsets[2]);
  530. vc4_dlist_write(vc4_state, vc4_state->offsets[1]);
  531. }
  532. /* Pointer Context Word 0/1/2: Written by the HVS */
  533. for (i = 0; i < num_planes; i++)
  534. vc4_dlist_write(vc4_state, 0xc0c0c0c0);
  535. /* Pitch word 0 */
  536. vc4_dlist_write(vc4_state, pitch0);
  537. /* Pitch word 1/2 */
  538. for (i = 1; i < num_planes; i++) {
  539. vc4_dlist_write(vc4_state,
  540. VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
  541. }
  542. /* Colorspace conversion words */
  543. if (vc4_state->is_yuv) {
  544. vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5);
  545. vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5);
  546. vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
  547. }
  548. if (!vc4_state->is_unity) {
  549. /* LBM Base Address. */
  550. if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
  551. vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
  552. vc4_dlist_write(vc4_state, vc4_state->lbm.start);
  553. }
  554. if (num_planes > 1) {
  555. /* Emit Cb/Cr as channel 0 and Y as channel
  556. * 1. This matches how we set up scl0/scl1
  557. * above.
  558. */
  559. vc4_write_scaling_parameters(state, 1);
  560. }
  561. vc4_write_scaling_parameters(state, 0);
  562. /* If any PPF setup was done, then all the kernel
  563. * pointers get uploaded.
  564. */
  565. if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
  566. vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
  567. vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
  568. vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
  569. u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
  570. SCALER_PPF_KERNEL_OFFSET);
  571. /* HPPF plane 0 */
  572. vc4_dlist_write(vc4_state, kernel);
  573. /* VPPF plane 0 */
  574. vc4_dlist_write(vc4_state, kernel);
  575. /* HPPF plane 1 */
  576. vc4_dlist_write(vc4_state, kernel);
  577. /* VPPF plane 1 */
  578. vc4_dlist_write(vc4_state, kernel);
  579. }
  580. }
  581. vc4_state->dlist[ctl0_offset] |=
  582. VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
  583. return 0;
  584. }
  585. /* If a modeset involves changing the setup of a plane, the atomic
  586. * infrastructure will call this to validate a proposed plane setup.
  587. * However, if a plane isn't getting updated, this (and the
  588. * corresponding vc4_plane_atomic_update) won't get called. Thus, we
  589. * compute the dlist here and have all active plane dlists get updated
  590. * in the CRTC's flush.
  591. */
  592. static int vc4_plane_atomic_check(struct drm_plane *plane,
  593. struct drm_plane_state *state)
  594. {
  595. struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
  596. vc4_state->dlist_count = 0;
  597. if (plane_enabled(state))
  598. return vc4_plane_mode_set(plane, state);
  599. else
  600. return 0;
  601. }
  602. static void vc4_plane_atomic_update(struct drm_plane *plane,
  603. struct drm_plane_state *old_state)
  604. {
  605. /* No contents here. Since we don't know where in the CRTC's
  606. * dlist we should be stored, our dlist is uploaded to the
  607. * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
  608. * time.
  609. */
  610. }
  611. u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
  612. {
  613. struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
  614. int i;
  615. vc4_state->hw_dlist = dlist;
  616. /* Can't memcpy_toio() because it needs to be 32-bit writes. */
  617. for (i = 0; i < vc4_state->dlist_count; i++)
  618. writel(vc4_state->dlist[i], &dlist[i]);
  619. return vc4_state->dlist_count;
  620. }
  621. u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
  622. {
  623. const struct vc4_plane_state *vc4_state =
  624. container_of(state, typeof(*vc4_state), base);
  625. return vc4_state->dlist_count;
  626. }
  627. /* Updates the plane to immediately (well, once the FIFO needs
  628. * refilling) scan out from at a new framebuffer.
  629. */
  630. void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
  631. {
  632. struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
  633. struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
  634. uint32_t addr;
  635. /* We're skipping the address adjustment for negative origin,
  636. * because this is only called on the primary plane.
  637. */
  638. WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
  639. addr = bo->paddr + fb->offsets[0];
  640. /* Write the new address into the hardware immediately. The
  641. * scanout will start from this address as soon as the FIFO
  642. * needs to refill with pixels.
  643. */
  644. writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
  645. /* Also update the CPU-side dlist copy, so that any later
  646. * atomic updates that don't do a new modeset on our plane
  647. * also use our updated address.
  648. */
  649. vc4_state->dlist[vc4_state->ptr0_offset] = addr;
  650. }
  651. static int vc4_prepare_fb(struct drm_plane *plane,
  652. struct drm_plane_state *state)
  653. {
  654. struct vc4_bo *bo;
  655. struct dma_fence *fence;
  656. if ((plane->state->fb == state->fb) || !state->fb)
  657. return 0;
  658. bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
  659. fence = reservation_object_get_excl_rcu(bo->resv);
  660. drm_atomic_set_fence_for_plane(state, fence);
  661. return 0;
  662. }
  663. static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
  664. .atomic_check = vc4_plane_atomic_check,
  665. .atomic_update = vc4_plane_atomic_update,
  666. .prepare_fb = vc4_prepare_fb,
  667. };
  668. static void vc4_plane_destroy(struct drm_plane *plane)
  669. {
  670. drm_plane_helper_disable(plane);
  671. drm_plane_cleanup(plane);
  672. }
  673. /* Implements immediate (non-vblank-synced) updates of the cursor
  674. * position, or falls back to the atomic helper otherwise.
  675. */
  676. static int
  677. vc4_update_plane(struct drm_plane *plane,
  678. struct drm_crtc *crtc,
  679. struct drm_framebuffer *fb,
  680. int crtc_x, int crtc_y,
  681. unsigned int crtc_w, unsigned int crtc_h,
  682. uint32_t src_x, uint32_t src_y,
  683. uint32_t src_w, uint32_t src_h,
  684. struct drm_modeset_acquire_ctx *ctx)
  685. {
  686. struct drm_plane_state *plane_state;
  687. struct vc4_plane_state *vc4_state;
  688. if (plane != crtc->cursor)
  689. goto out;
  690. plane_state = plane->state;
  691. vc4_state = to_vc4_plane_state(plane_state);
  692. if (!plane_state)
  693. goto out;
  694. /* No configuring new scaling in the fast path. */
  695. if (crtc_w != plane_state->crtc_w ||
  696. crtc_h != plane_state->crtc_h ||
  697. src_w != plane_state->src_w ||
  698. src_h != plane_state->src_h) {
  699. goto out;
  700. }
  701. if (fb != plane_state->fb) {
  702. drm_atomic_set_fb_for_plane(plane->state, fb);
  703. vc4_plane_async_set_fb(plane, fb);
  704. }
  705. /* Set the cursor's position on the screen. This is the
  706. * expected change from the drm_mode_cursor_universal()
  707. * helper.
  708. */
  709. plane_state->crtc_x = crtc_x;
  710. plane_state->crtc_y = crtc_y;
  711. /* Allow changing the start position within the cursor BO, if
  712. * that matters.
  713. */
  714. plane_state->src_x = src_x;
  715. plane_state->src_y = src_y;
  716. /* Update the display list based on the new crtc_x/y. */
  717. vc4_plane_atomic_check(plane, plane_state);
  718. /* Note that we can't just call vc4_plane_write_dlist()
  719. * because that would smash the context data that the HVS is
  720. * currently using.
  721. */
  722. writel(vc4_state->dlist[vc4_state->pos0_offset],
  723. &vc4_state->hw_dlist[vc4_state->pos0_offset]);
  724. writel(vc4_state->dlist[vc4_state->pos2_offset],
  725. &vc4_state->hw_dlist[vc4_state->pos2_offset]);
  726. writel(vc4_state->dlist[vc4_state->ptr0_offset],
  727. &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
  728. return 0;
  729. out:
  730. return drm_atomic_helper_update_plane(plane, crtc, fb,
  731. crtc_x, crtc_y,
  732. crtc_w, crtc_h,
  733. src_x, src_y,
  734. src_w, src_h,
  735. ctx);
  736. }
  737. static const struct drm_plane_funcs vc4_plane_funcs = {
  738. .update_plane = vc4_update_plane,
  739. .disable_plane = drm_atomic_helper_disable_plane,
  740. .destroy = vc4_plane_destroy,
  741. .set_property = NULL,
  742. .reset = vc4_plane_reset,
  743. .atomic_duplicate_state = vc4_plane_duplicate_state,
  744. .atomic_destroy_state = vc4_plane_destroy_state,
  745. };
  746. struct drm_plane *vc4_plane_init(struct drm_device *dev,
  747. enum drm_plane_type type)
  748. {
  749. struct drm_plane *plane = NULL;
  750. struct vc4_plane *vc4_plane;
  751. u32 formats[ARRAY_SIZE(hvs_formats)];
  752. u32 num_formats = 0;
  753. int ret = 0;
  754. unsigned i;
  755. vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
  756. GFP_KERNEL);
  757. if (!vc4_plane)
  758. return ERR_PTR(-ENOMEM);
  759. for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
  760. /* Don't allow YUV in cursor planes, since that means
  761. * tuning on the scaler, which we don't allow for the
  762. * cursor.
  763. */
  764. if (type != DRM_PLANE_TYPE_CURSOR ||
  765. hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
  766. formats[num_formats++] = hvs_formats[i].drm;
  767. }
  768. }
  769. plane = &vc4_plane->base;
  770. ret = drm_universal_plane_init(dev, plane, 0,
  771. &vc4_plane_funcs,
  772. formats, num_formats,
  773. NULL, type, NULL);
  774. drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
  775. return plane;
  776. }