rcar_du_kms.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*
  2. * rcar_du_kms.c -- R-Car Display Unit Mode Setting
  3. *
  4. * Copyright (C) 2013-2014 Renesas Electronics Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <drm/drmP.h>
  14. #include <drm/drm_atomic.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_crtc.h>
  17. #include <drm/drm_crtc_helper.h>
  18. #include <drm/drm_fb_cma_helper.h>
  19. #include <drm/drm_gem_cma_helper.h>
  20. #include <linux/of_graph.h>
  21. #include <linux/wait.h>
  22. #include "rcar_du_crtc.h"
  23. #include "rcar_du_drv.h"
  24. #include "rcar_du_encoder.h"
  25. #include "rcar_du_kms.h"
  26. #include "rcar_du_lvdsenc.h"
  27. #include "rcar_du_regs.h"
  28. /* -----------------------------------------------------------------------------
  29. * Format helpers
  30. */
  31. static const struct rcar_du_format_info rcar_du_format_infos[] = {
  32. {
  33. .fourcc = DRM_FORMAT_RGB565,
  34. .bpp = 16,
  35. .planes = 1,
  36. .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
  37. .edf = PnDDCR4_EDF_NONE,
  38. }, {
  39. .fourcc = DRM_FORMAT_ARGB1555,
  40. .bpp = 16,
  41. .planes = 1,
  42. .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
  43. .edf = PnDDCR4_EDF_NONE,
  44. }, {
  45. .fourcc = DRM_FORMAT_XRGB1555,
  46. .bpp = 16,
  47. .planes = 1,
  48. .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
  49. .edf = PnDDCR4_EDF_NONE,
  50. }, {
  51. .fourcc = DRM_FORMAT_XRGB8888,
  52. .bpp = 32,
  53. .planes = 1,
  54. .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
  55. .edf = PnDDCR4_EDF_RGB888,
  56. }, {
  57. .fourcc = DRM_FORMAT_ARGB8888,
  58. .bpp = 32,
  59. .planes = 1,
  60. .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
  61. .edf = PnDDCR4_EDF_ARGB8888,
  62. }, {
  63. .fourcc = DRM_FORMAT_UYVY,
  64. .bpp = 16,
  65. .planes = 1,
  66. .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
  67. .edf = PnDDCR4_EDF_NONE,
  68. }, {
  69. .fourcc = DRM_FORMAT_YUYV,
  70. .bpp = 16,
  71. .planes = 1,
  72. .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
  73. .edf = PnDDCR4_EDF_NONE,
  74. }, {
  75. .fourcc = DRM_FORMAT_NV12,
  76. .bpp = 12,
  77. .planes = 2,
  78. .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
  79. .edf = PnDDCR4_EDF_NONE,
  80. }, {
  81. .fourcc = DRM_FORMAT_NV21,
  82. .bpp = 12,
  83. .planes = 2,
  84. .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
  85. .edf = PnDDCR4_EDF_NONE,
  86. }, {
  87. /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
  88. .fourcc = DRM_FORMAT_NV16,
  89. .bpp = 16,
  90. .planes = 2,
  91. .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
  92. .edf = PnDDCR4_EDF_NONE,
  93. },
  94. };
  95. const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
  96. {
  97. unsigned int i;
  98. for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
  99. if (rcar_du_format_infos[i].fourcc == fourcc)
  100. return &rcar_du_format_infos[i];
  101. }
  102. return NULL;
  103. }
  104. /* -----------------------------------------------------------------------------
  105. * Frame buffer
  106. */
  107. int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
  108. struct drm_mode_create_dumb *args)
  109. {
  110. struct rcar_du_device *rcdu = dev->dev_private;
  111. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  112. unsigned int align;
  113. /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
  114. * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
  115. */
  116. if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
  117. align = 128;
  118. else
  119. align = 16 * args->bpp / 8;
  120. args->pitch = roundup(min_pitch, align);
  121. return drm_gem_cma_dumb_create_internal(file, dev, args);
  122. }
  123. static struct drm_framebuffer *
  124. rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
  125. struct drm_mode_fb_cmd2 *mode_cmd)
  126. {
  127. struct rcar_du_device *rcdu = dev->dev_private;
  128. const struct rcar_du_format_info *format;
  129. unsigned int max_pitch;
  130. unsigned int align;
  131. unsigned int bpp;
  132. format = rcar_du_format_info(mode_cmd->pixel_format);
  133. if (format == NULL) {
  134. dev_dbg(dev->dev, "unsupported pixel format %08x\n",
  135. mode_cmd->pixel_format);
  136. return ERR_PTR(-EINVAL);
  137. }
  138. /*
  139. * The pitch and alignment constraints are expressed in pixels on the
  140. * hardware side and in bytes in the DRM API.
  141. */
  142. bpp = format->planes == 2 ? 1 : format->bpp / 8;
  143. max_pitch = 4096 * bpp;
  144. if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
  145. align = 128;
  146. else
  147. align = 16 * bpp;
  148. if (mode_cmd->pitches[0] & (align - 1) ||
  149. mode_cmd->pitches[0] >= max_pitch) {
  150. dev_dbg(dev->dev, "invalid pitch value %u\n",
  151. mode_cmd->pitches[0]);
  152. return ERR_PTR(-EINVAL);
  153. }
  154. if (format->planes == 2) {
  155. if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
  156. dev_dbg(dev->dev,
  157. "luma and chroma pitches do not match\n");
  158. return ERR_PTR(-EINVAL);
  159. }
  160. }
  161. return drm_fb_cma_create(dev, file_priv, mode_cmd);
  162. }
  163. static void rcar_du_output_poll_changed(struct drm_device *dev)
  164. {
  165. struct rcar_du_device *rcdu = dev->dev_private;
  166. drm_fbdev_cma_hotplug_event(rcdu->fbdev);
  167. }
  168. /* -----------------------------------------------------------------------------
  169. * Atomic Check and Update
  170. */
  171. /*
  172. * Atomic hardware plane allocator
  173. *
  174. * The hardware plane allocator is solely based on the atomic plane states
  175. * without keeping any external state to avoid races between .atomic_check()
  176. * and .atomic_commit().
  177. *
  178. * The core idea is to avoid using a free planes bitmask that would need to be
  179. * shared between check and commit handlers with a collective knowledge based on
  180. * the allocated hardware plane(s) for each KMS plane. The allocator then loops
  181. * over all plane states to compute the free planes bitmask, allocates hardware
  182. * planes based on that bitmask, and stores the result back in the plane states.
  183. *
  184. * For this to work we need to access the current state of planes not touched by
  185. * the atomic update. To ensure that it won't be modified, we need to lock all
  186. * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
  187. * updates from .atomic_check() up to completion (when swapping the states if
  188. * the check step has succeeded) or rollback (when freeing the states if the
  189. * check step has failed).
  190. *
  191. * Allocation is performed in the .atomic_check() handler and applied
  192. * automatically when the core swaps the old and new states.
  193. */
  194. static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
  195. struct rcar_du_plane_state *state)
  196. {
  197. const struct rcar_du_format_info *cur_format;
  198. cur_format = to_rcar_du_plane_state(plane->plane.state)->format;
  199. /* Lowering the number of planes doesn't strictly require reallocation
  200. * as the extra hardware plane will be freed when committing, but doing
  201. * so could lead to more fragmentation.
  202. */
  203. return !cur_format || cur_format->planes != state->format->planes;
  204. }
  205. static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
  206. {
  207. unsigned int mask;
  208. if (state->hwindex == -1)
  209. return 0;
  210. mask = 1 << state->hwindex;
  211. if (state->format->planes == 2)
  212. mask |= 1 << ((state->hwindex + 1) % 8);
  213. return mask;
  214. }
  215. static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
  216. {
  217. unsigned int i;
  218. for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
  219. if (!(free & (1 << i)))
  220. continue;
  221. if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
  222. break;
  223. }
  224. return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
  225. }
  226. static int rcar_du_atomic_check(struct drm_device *dev,
  227. struct drm_atomic_state *state)
  228. {
  229. struct rcar_du_device *rcdu = dev->dev_private;
  230. unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
  231. unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
  232. bool needs_realloc = false;
  233. unsigned int groups = 0;
  234. unsigned int i;
  235. int ret;
  236. ret = drm_atomic_helper_check(dev, state);
  237. if (ret < 0)
  238. return ret;
  239. /* Check if hardware planes need to be reallocated. */
  240. for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
  241. struct rcar_du_plane_state *plane_state;
  242. struct rcar_du_plane *plane;
  243. unsigned int index;
  244. if (!state->planes[i])
  245. continue;
  246. plane = to_rcar_plane(state->planes[i]);
  247. plane_state = to_rcar_du_plane_state(state->plane_states[i]);
  248. /* If the plane is being disabled we don't need to go through
  249. * the full reallocation procedure. Just mark the hardware
  250. * plane(s) as freed.
  251. */
  252. if (!plane_state->format) {
  253. index = plane - plane->group->planes.planes;
  254. group_freed_planes[plane->group->index] |= 1 << index;
  255. plane_state->hwindex = -1;
  256. continue;
  257. }
  258. /* If the plane needs to be reallocated mark it as such, and
  259. * mark the hardware plane(s) as free.
  260. */
  261. if (rcar_du_plane_needs_realloc(plane, plane_state)) {
  262. groups |= 1 << plane->group->index;
  263. needs_realloc = true;
  264. index = plane - plane->group->planes.planes;
  265. group_freed_planes[plane->group->index] |= 1 << index;
  266. plane_state->hwindex = -1;
  267. }
  268. }
  269. if (!needs_realloc)
  270. return 0;
  271. /* Grab all plane states for the groups that need reallocation to ensure
  272. * locking and avoid racy updates. This serializes the update operation,
  273. * but there's not much we can do about it as that's the hardware
  274. * design.
  275. *
  276. * Compute the used planes mask for each group at the same time to avoid
  277. * looping over the planes separately later.
  278. */
  279. while (groups) {
  280. unsigned int index = ffs(groups) - 1;
  281. struct rcar_du_group *group = &rcdu->groups[index];
  282. unsigned int used_planes = 0;
  283. for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
  284. struct rcar_du_plane *plane = &group->planes.planes[i];
  285. struct rcar_du_plane_state *plane_state;
  286. struct drm_plane_state *s;
  287. s = drm_atomic_get_plane_state(state, &plane->plane);
  288. if (IS_ERR(s))
  289. return PTR_ERR(s);
  290. /* If the plane has been freed in the above loop its
  291. * hardware planes must not be added to the used planes
  292. * bitmask. However, the current state doesn't reflect
  293. * the free state yet, as we've modified the new state
  294. * above. Use the local freed planes list to check for
  295. * that condition instead.
  296. */
  297. if (group_freed_planes[index] & (1 << i))
  298. continue;
  299. plane_state = to_rcar_du_plane_state(plane->plane.state);
  300. used_planes |= rcar_du_plane_hwmask(plane_state);
  301. }
  302. group_free_planes[index] = 0xff & ~used_planes;
  303. groups &= ~(1 << index);
  304. }
  305. /* Reallocate hardware planes for each plane that needs it. */
  306. for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
  307. struct rcar_du_plane_state *plane_state;
  308. struct rcar_du_plane *plane;
  309. int idx;
  310. if (!state->planes[i])
  311. continue;
  312. plane = to_rcar_plane(state->planes[i]);
  313. plane_state = to_rcar_du_plane_state(state->plane_states[i]);
  314. /* Skip planes that are being disabled or don't need to be
  315. * reallocated.
  316. */
  317. if (!plane_state->format ||
  318. !rcar_du_plane_needs_realloc(plane, plane_state))
  319. continue;
  320. idx = rcar_du_plane_hwalloc(plane_state->format->planes,
  321. group_free_planes[plane->group->index]);
  322. if (idx < 0) {
  323. dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
  324. __func__);
  325. return idx;
  326. }
  327. plane_state->hwindex = idx;
  328. group_free_planes[plane->group->index] &=
  329. ~rcar_du_plane_hwmask(plane_state);
  330. }
  331. return 0;
  332. }
  333. struct rcar_du_commit {
  334. struct work_struct work;
  335. struct drm_device *dev;
  336. struct drm_atomic_state *state;
  337. u32 crtcs;
  338. };
  339. static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
  340. {
  341. struct drm_device *dev = commit->dev;
  342. struct rcar_du_device *rcdu = dev->dev_private;
  343. struct drm_atomic_state *old_state = commit->state;
  344. /* Apply the atomic update. */
  345. drm_atomic_helper_commit_modeset_disables(dev, old_state);
  346. drm_atomic_helper_commit_modeset_enables(dev, old_state);
  347. drm_atomic_helper_commit_planes(dev, old_state);
  348. drm_atomic_helper_wait_for_vblanks(dev, old_state);
  349. drm_atomic_helper_cleanup_planes(dev, old_state);
  350. drm_atomic_state_free(old_state);
  351. /* Complete the commit, wake up any waiter. */
  352. spin_lock(&rcdu->commit.wait.lock);
  353. rcdu->commit.pending &= ~commit->crtcs;
  354. wake_up_all_locked(&rcdu->commit.wait);
  355. spin_unlock(&rcdu->commit.wait.lock);
  356. kfree(commit);
  357. }
  358. static void rcar_du_atomic_work(struct work_struct *work)
  359. {
  360. struct rcar_du_commit *commit =
  361. container_of(work, struct rcar_du_commit, work);
  362. rcar_du_atomic_complete(commit);
  363. }
  364. static int rcar_du_atomic_commit(struct drm_device *dev,
  365. struct drm_atomic_state *state, bool async)
  366. {
  367. struct rcar_du_device *rcdu = dev->dev_private;
  368. struct rcar_du_commit *commit;
  369. unsigned int i;
  370. int ret;
  371. ret = drm_atomic_helper_prepare_planes(dev, state);
  372. if (ret)
  373. return ret;
  374. /* Allocate the commit object. */
  375. commit = kzalloc(sizeof(*commit), GFP_KERNEL);
  376. if (commit == NULL)
  377. return -ENOMEM;
  378. INIT_WORK(&commit->work, rcar_du_atomic_work);
  379. commit->dev = dev;
  380. commit->state = state;
  381. /* Wait until all affected CRTCs have completed previous commits and
  382. * mark them as pending.
  383. */
  384. for (i = 0; i < dev->mode_config.num_crtc; ++i) {
  385. if (state->crtcs[i])
  386. commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
  387. }
  388. spin_lock(&rcdu->commit.wait.lock);
  389. ret = wait_event_interruptible_locked(rcdu->commit.wait,
  390. !(rcdu->commit.pending & commit->crtcs));
  391. if (ret == 0)
  392. rcdu->commit.pending |= commit->crtcs;
  393. spin_unlock(&rcdu->commit.wait.lock);
  394. if (ret) {
  395. kfree(commit);
  396. return ret;
  397. }
  398. /* Swap the state, this is the point of no return. */
  399. drm_atomic_helper_swap_state(dev, state);
  400. if (async)
  401. schedule_work(&commit->work);
  402. else
  403. rcar_du_atomic_complete(commit);
  404. return 0;
  405. }
  406. /* -----------------------------------------------------------------------------
  407. * Initialization
  408. */
  409. static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
  410. .fb_create = rcar_du_fb_create,
  411. .output_poll_changed = rcar_du_output_poll_changed,
  412. .atomic_check = rcar_du_atomic_check,
  413. .atomic_commit = rcar_du_atomic_commit,
  414. };
  415. static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
  416. enum rcar_du_output output,
  417. struct of_endpoint *ep)
  418. {
  419. static const struct {
  420. const char *compatible;
  421. enum rcar_du_encoder_type type;
  422. } encoders[] = {
  423. { "adi,adv7123", RCAR_DU_ENCODER_VGA },
  424. { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
  425. { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
  426. };
  427. enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
  428. struct device_node *connector = NULL;
  429. struct device_node *encoder = NULL;
  430. struct device_node *ep_node = NULL;
  431. struct device_node *entity_ep_node;
  432. struct device_node *entity;
  433. int ret;
  434. /*
  435. * Locate the connected entity and infer its type from the number of
  436. * endpoints.
  437. */
  438. entity = of_graph_get_remote_port_parent(ep->local_node);
  439. if (!entity) {
  440. dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
  441. ep->local_node->full_name);
  442. return 0;
  443. }
  444. entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
  445. for_each_endpoint_of_node(entity, ep_node) {
  446. if (ep_node == entity_ep_node)
  447. continue;
  448. /*
  449. * We've found one endpoint other than the input, this must
  450. * be an encoder. Locate the connector.
  451. */
  452. encoder = entity;
  453. connector = of_graph_get_remote_port_parent(ep_node);
  454. of_node_put(ep_node);
  455. if (!connector) {
  456. dev_warn(rcdu->dev,
  457. "no connector for encoder %s, skipping\n",
  458. encoder->full_name);
  459. of_node_put(entity_ep_node);
  460. of_node_put(encoder);
  461. return 0;
  462. }
  463. break;
  464. }
  465. of_node_put(entity_ep_node);
  466. if (encoder) {
  467. /*
  468. * If an encoder has been found, get its type based on its
  469. * compatible string.
  470. */
  471. unsigned int i;
  472. for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
  473. if (of_device_is_compatible(encoder,
  474. encoders[i].compatible)) {
  475. enc_type = encoders[i].type;
  476. break;
  477. }
  478. }
  479. if (i == ARRAY_SIZE(encoders)) {
  480. dev_warn(rcdu->dev,
  481. "unknown encoder type for %s, skipping\n",
  482. encoder->full_name);
  483. of_node_put(encoder);
  484. of_node_put(connector);
  485. return 0;
  486. }
  487. } else {
  488. /*
  489. * If no encoder has been found the entity must be the
  490. * connector.
  491. */
  492. connector = entity;
  493. }
  494. ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
  495. of_node_put(encoder);
  496. of_node_put(connector);
  497. return ret < 0 ? ret : 1;
  498. }
  499. static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
  500. {
  501. struct device_node *np = rcdu->dev->of_node;
  502. struct device_node *ep_node;
  503. unsigned int num_encoders = 0;
  504. /*
  505. * Iterate over the endpoints and create one encoder for each output
  506. * pipeline.
  507. */
  508. for_each_endpoint_of_node(np, ep_node) {
  509. enum rcar_du_output output;
  510. struct of_endpoint ep;
  511. unsigned int i;
  512. int ret;
  513. ret = of_graph_parse_endpoint(ep_node, &ep);
  514. if (ret < 0) {
  515. of_node_put(ep_node);
  516. return ret;
  517. }
  518. /* Find the output route corresponding to the port number. */
  519. for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
  520. if (rcdu->info->routes[i].possible_crtcs &&
  521. rcdu->info->routes[i].port == ep.port) {
  522. output = i;
  523. break;
  524. }
  525. }
  526. if (i == RCAR_DU_OUTPUT_MAX) {
  527. dev_warn(rcdu->dev,
  528. "port %u references unexisting output, skipping\n",
  529. ep.port);
  530. continue;
  531. }
  532. /* Process the output pipeline. */
  533. ret = rcar_du_encoders_init_one(rcdu, output, &ep);
  534. if (ret < 0) {
  535. if (ret == -EPROBE_DEFER) {
  536. of_node_put(ep_node);
  537. return ret;
  538. }
  539. dev_info(rcdu->dev,
  540. "encoder initialization failed, skipping\n");
  541. continue;
  542. }
  543. num_encoders += ret;
  544. }
  545. return num_encoders;
  546. }
  547. int rcar_du_modeset_init(struct rcar_du_device *rcdu)
  548. {
  549. static const unsigned int mmio_offsets[] = {
  550. DU0_REG_OFFSET, DU2_REG_OFFSET
  551. };
  552. struct drm_device *dev = rcdu->ddev;
  553. struct drm_encoder *encoder;
  554. struct drm_fbdev_cma *fbdev;
  555. unsigned int num_encoders;
  556. unsigned int num_groups;
  557. unsigned int i;
  558. int ret;
  559. drm_mode_config_init(dev);
  560. dev->mode_config.min_width = 0;
  561. dev->mode_config.min_height = 0;
  562. dev->mode_config.max_width = 4095;
  563. dev->mode_config.max_height = 2047;
  564. dev->mode_config.funcs = &rcar_du_mode_config_funcs;
  565. rcdu->num_crtcs = rcdu->info->num_crtcs;
  566. /* Initialize the groups. */
  567. num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
  568. for (i = 0; i < num_groups; ++i) {
  569. struct rcar_du_group *rgrp = &rcdu->groups[i];
  570. mutex_init(&rgrp->lock);
  571. rgrp->dev = rcdu;
  572. rgrp->mmio_offset = mmio_offsets[i];
  573. rgrp->index = i;
  574. ret = rcar_du_planes_init(rgrp);
  575. if (ret < 0)
  576. return ret;
  577. }
  578. /* Create the CRTCs. */
  579. for (i = 0; i < rcdu->num_crtcs; ++i) {
  580. struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
  581. ret = rcar_du_crtc_create(rgrp, i);
  582. if (ret < 0)
  583. return ret;
  584. }
  585. /* Initialize the encoders. */
  586. ret = rcar_du_lvdsenc_init(rcdu);
  587. if (ret < 0)
  588. return ret;
  589. ret = rcar_du_encoders_init(rcdu);
  590. if (ret < 0)
  591. return ret;
  592. if (ret == 0) {
  593. dev_err(rcdu->dev, "error: no encoder could be initialized\n");
  594. return -EINVAL;
  595. }
  596. num_encoders = ret;
  597. /* Set the possible CRTCs and possible clones. There's always at least
  598. * one way for all encoders to clone each other, set all bits in the
  599. * possible clones field.
  600. */
  601. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  602. struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
  603. const struct rcar_du_output_routing *route =
  604. &rcdu->info->routes[renc->output];
  605. encoder->possible_crtcs = route->possible_crtcs;
  606. encoder->possible_clones = (1 << num_encoders) - 1;
  607. }
  608. drm_mode_config_reset(dev);
  609. drm_kms_helper_poll_init(dev);
  610. if (dev->mode_config.num_connector) {
  611. fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
  612. dev->mode_config.num_connector);
  613. if (IS_ERR(fbdev))
  614. return PTR_ERR(fbdev);
  615. rcdu->fbdev = fbdev;
  616. } else {
  617. dev_info(rcdu->dev,
  618. "no connector found, disabling fbdev emulation\n");
  619. }
  620. return 0;
  621. }