mdp4_kms.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_gem.h"
  19. #include "msm_mmu.h"
  20. #include "mdp4_kms.h"
  21. static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
  22. static int mdp4_hw_init(struct msm_kms *kms)
  23. {
  24. struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
  25. struct drm_device *dev = mdp4_kms->dev;
  26. uint32_t version, major, minor, dmap_cfg, vg_cfg;
  27. unsigned long clk;
  28. int ret = 0;
  29. pm_runtime_get_sync(dev->dev);
  30. mdp4_enable(mdp4_kms);
  31. version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
  32. mdp4_disable(mdp4_kms);
  33. major = FIELD(version, MDP4_VERSION_MAJOR);
  34. minor = FIELD(version, MDP4_VERSION_MINOR);
  35. DBG("found MDP4 version v%d.%d", major, minor);
  36. if (major != 4) {
  37. dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
  38. major, minor);
  39. ret = -ENXIO;
  40. goto out;
  41. }
  42. mdp4_kms->rev = minor;
  43. if (mdp4_kms->rev > 1) {
  44. mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
  45. mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
  46. }
  47. mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
  48. /* max read pending cmd config, 3 pending requests: */
  49. mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
  50. clk = clk_get_rate(mdp4_kms->clk);
  51. if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
  52. dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
  53. vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
  54. } else {
  55. dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
  56. vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
  57. }
  58. DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
  59. mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
  60. mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
  61. mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
  62. mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
  63. mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
  64. mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
  65. if (mdp4_kms->rev >= 2)
  66. mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
  67. mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
  68. /* disable CSC matrix / YUV by default: */
  69. mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
  70. mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
  71. mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
  72. mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
  73. mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
  74. mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
  75. if (mdp4_kms->rev > 1)
  76. mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
  77. dev->mode_config.allow_fb_modifiers = true;
  78. out:
  79. pm_runtime_put_sync(dev->dev);
  80. return ret;
  81. }
  82. static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
  83. {
  84. struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
  85. int i;
  86. struct drm_crtc *crtc;
  87. struct drm_crtc_state *crtc_state;
  88. mdp4_enable(mdp4_kms);
  89. /* see 119ecb7fd */
  90. for_each_crtc_in_state(state, crtc, crtc_state, i)
  91. drm_crtc_vblank_get(crtc);
  92. }
  93. static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
  94. {
  95. struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
  96. int i;
  97. struct drm_crtc *crtc;
  98. struct drm_crtc_state *crtc_state;
  99. /* see 119ecb7fd */
  100. for_each_crtc_in_state(state, crtc, crtc_state, i)
  101. drm_crtc_vblank_put(crtc);
  102. mdp4_disable(mdp4_kms);
  103. }
  104. static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms,
  105. struct drm_crtc *crtc)
  106. {
  107. mdp4_crtc_wait_for_commit_done(crtc);
  108. }
  109. static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
  110. struct drm_encoder *encoder)
  111. {
  112. /* if we had >1 encoder, we'd need something more clever: */
  113. switch (encoder->encoder_type) {
  114. case DRM_MODE_ENCODER_TMDS:
  115. return mdp4_dtv_round_pixclk(encoder, rate);
  116. case DRM_MODE_ENCODER_LVDS:
  117. case DRM_MODE_ENCODER_DSI:
  118. default:
  119. return rate;
  120. }
  121. }
  122. static const char * const iommu_ports[] = {
  123. "mdp_port0_cb0", "mdp_port1_cb0",
  124. };
  125. static void mdp4_destroy(struct msm_kms *kms)
  126. {
  127. struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
  128. struct device *dev = mdp4_kms->dev->dev;
  129. struct msm_gem_address_space *aspace = mdp4_kms->aspace;
  130. if (mdp4_kms->blank_cursor_iova)
  131. msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
  132. drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
  133. if (aspace) {
  134. aspace->mmu->funcs->detach(aspace->mmu,
  135. iommu_ports, ARRAY_SIZE(iommu_ports));
  136. msm_gem_address_space_destroy(aspace);
  137. }
  138. if (mdp4_kms->rpm_enabled)
  139. pm_runtime_disable(dev);
  140. kfree(mdp4_kms);
  141. }
  142. static const struct mdp_kms_funcs kms_funcs = {
  143. .base = {
  144. .hw_init = mdp4_hw_init,
  145. .irq_preinstall = mdp4_irq_preinstall,
  146. .irq_postinstall = mdp4_irq_postinstall,
  147. .irq_uninstall = mdp4_irq_uninstall,
  148. .irq = mdp4_irq,
  149. .enable_vblank = mdp4_enable_vblank,
  150. .disable_vblank = mdp4_disable_vblank,
  151. .prepare_commit = mdp4_prepare_commit,
  152. .complete_commit = mdp4_complete_commit,
  153. .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
  154. .get_format = mdp_get_format,
  155. .round_pixclk = mdp4_round_pixclk,
  156. .destroy = mdp4_destroy,
  157. },
  158. .set_irqmask = mdp4_set_irqmask,
  159. };
  160. int mdp4_disable(struct mdp4_kms *mdp4_kms)
  161. {
  162. DBG("");
  163. clk_disable_unprepare(mdp4_kms->clk);
  164. if (mdp4_kms->pclk)
  165. clk_disable_unprepare(mdp4_kms->pclk);
  166. clk_disable_unprepare(mdp4_kms->lut_clk);
  167. if (mdp4_kms->axi_clk)
  168. clk_disable_unprepare(mdp4_kms->axi_clk);
  169. return 0;
  170. }
  171. int mdp4_enable(struct mdp4_kms *mdp4_kms)
  172. {
  173. DBG("");
  174. clk_prepare_enable(mdp4_kms->clk);
  175. if (mdp4_kms->pclk)
  176. clk_prepare_enable(mdp4_kms->pclk);
  177. clk_prepare_enable(mdp4_kms->lut_clk);
  178. if (mdp4_kms->axi_clk)
  179. clk_prepare_enable(mdp4_kms->axi_clk);
  180. return 0;
  181. }
  182. static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
  183. {
  184. struct device_node *endpoint, *panel_node;
  185. struct device_node *np = dev->dev->of_node;
  186. /*
  187. * LVDS/LCDC is the first port described in the list of ports in the
  188. * MDP4 DT node.
  189. */
  190. endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
  191. if (!endpoint) {
  192. DBG("no LVDS remote endpoint\n");
  193. return NULL;
  194. }
  195. panel_node = of_graph_get_remote_port_parent(endpoint);
  196. if (!panel_node) {
  197. DBG("no valid panel node in LVDS endpoint\n");
  198. of_node_put(endpoint);
  199. return NULL;
  200. }
  201. of_node_put(endpoint);
  202. return panel_node;
  203. }
  204. static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
  205. int intf_type)
  206. {
  207. struct drm_device *dev = mdp4_kms->dev;
  208. struct msm_drm_private *priv = dev->dev_private;
  209. struct drm_encoder *encoder;
  210. struct drm_connector *connector;
  211. struct device_node *panel_node;
  212. struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM];
  213. int i, dsi_id;
  214. int ret;
  215. switch (intf_type) {
  216. case DRM_MODE_ENCODER_LVDS:
  217. /*
  218. * bail out early if there is no panel node (no need to
  219. * initialize LCDC encoder and LVDS connector)
  220. */
  221. panel_node = mdp4_detect_lcdc_panel(dev);
  222. if (!panel_node)
  223. return 0;
  224. encoder = mdp4_lcdc_encoder_init(dev, panel_node);
  225. if (IS_ERR(encoder)) {
  226. dev_err(dev->dev, "failed to construct LCDC encoder\n");
  227. return PTR_ERR(encoder);
  228. }
  229. /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
  230. encoder->possible_crtcs = 1 << DMA_P;
  231. connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
  232. if (IS_ERR(connector)) {
  233. dev_err(dev->dev, "failed to initialize LVDS connector\n");
  234. return PTR_ERR(connector);
  235. }
  236. priv->encoders[priv->num_encoders++] = encoder;
  237. priv->connectors[priv->num_connectors++] = connector;
  238. break;
  239. case DRM_MODE_ENCODER_TMDS:
  240. encoder = mdp4_dtv_encoder_init(dev);
  241. if (IS_ERR(encoder)) {
  242. dev_err(dev->dev, "failed to construct DTV encoder\n");
  243. return PTR_ERR(encoder);
  244. }
  245. /* DTV can be hooked to DMA_E: */
  246. encoder->possible_crtcs = 1 << 1;
  247. if (priv->hdmi) {
  248. /* Construct bridge/connector for HDMI: */
  249. ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
  250. if (ret) {
  251. dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
  252. return ret;
  253. }
  254. }
  255. priv->encoders[priv->num_encoders++] = encoder;
  256. break;
  257. case DRM_MODE_ENCODER_DSI:
  258. /* only DSI1 supported for now */
  259. dsi_id = 0;
  260. if (!priv->dsi[dsi_id])
  261. break;
  262. for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
  263. dsi_encs[i] = mdp4_dsi_encoder_init(dev);
  264. if (IS_ERR(dsi_encs[i])) {
  265. ret = PTR_ERR(dsi_encs[i]);
  266. dev_err(dev->dev,
  267. "failed to construct DSI encoder: %d\n",
  268. ret);
  269. return ret;
  270. }
  271. /* TODO: Add DMA_S later? */
  272. dsi_encs[i]->possible_crtcs = 1 << DMA_P;
  273. priv->encoders[priv->num_encoders++] = dsi_encs[i];
  274. }
  275. ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs);
  276. if (ret) {
  277. dev_err(dev->dev, "failed to initialize DSI: %d\n",
  278. ret);
  279. return ret;
  280. }
  281. break;
  282. default:
  283. dev_err(dev->dev, "Invalid or unsupported interface\n");
  284. return -EINVAL;
  285. }
  286. return 0;
  287. }
  288. static int modeset_init(struct mdp4_kms *mdp4_kms)
  289. {
  290. struct drm_device *dev = mdp4_kms->dev;
  291. struct msm_drm_private *priv = dev->dev_private;
  292. struct drm_plane *plane;
  293. struct drm_crtc *crtc;
  294. int i, ret;
  295. static const enum mdp4_pipe rgb_planes[] = {
  296. RGB1, RGB2,
  297. };
  298. static const enum mdp4_pipe vg_planes[] = {
  299. VG1, VG2,
  300. };
  301. static const enum mdp4_dma mdp4_crtcs[] = {
  302. DMA_P, DMA_E,
  303. };
  304. static const char * const mdp4_crtc_names[] = {
  305. "DMA_P", "DMA_E",
  306. };
  307. static const int mdp4_intfs[] = {
  308. DRM_MODE_ENCODER_LVDS,
  309. DRM_MODE_ENCODER_DSI,
  310. DRM_MODE_ENCODER_TMDS,
  311. };
  312. /* construct non-private planes: */
  313. for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
  314. plane = mdp4_plane_init(dev, vg_planes[i], false);
  315. if (IS_ERR(plane)) {
  316. dev_err(dev->dev,
  317. "failed to construct plane for VG%d\n", i + 1);
  318. ret = PTR_ERR(plane);
  319. goto fail;
  320. }
  321. priv->planes[priv->num_planes++] = plane;
  322. }
  323. for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
  324. plane = mdp4_plane_init(dev, rgb_planes[i], true);
  325. if (IS_ERR(plane)) {
  326. dev_err(dev->dev,
  327. "failed to construct plane for RGB%d\n", i + 1);
  328. ret = PTR_ERR(plane);
  329. goto fail;
  330. }
  331. crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
  332. mdp4_crtcs[i]);
  333. if (IS_ERR(crtc)) {
  334. dev_err(dev->dev, "failed to construct crtc for %s\n",
  335. mdp4_crtc_names[i]);
  336. ret = PTR_ERR(crtc);
  337. goto fail;
  338. }
  339. priv->crtcs[priv->num_crtcs++] = crtc;
  340. }
  341. /*
  342. * we currently set up two relatively fixed paths:
  343. *
  344. * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
  345. * or
  346. * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
  347. *
  348. * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
  349. */
  350. for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
  351. ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
  352. if (ret) {
  353. dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
  354. i, ret);
  355. goto fail;
  356. }
  357. }
  358. return 0;
  359. fail:
  360. return ret;
  361. }
  362. struct msm_kms *mdp4_kms_init(struct drm_device *dev)
  363. {
  364. struct platform_device *pdev = dev->platformdev;
  365. struct mdp4_platform_config *config = mdp4_get_config(pdev);
  366. struct mdp4_kms *mdp4_kms;
  367. struct msm_kms *kms = NULL;
  368. struct msm_gem_address_space *aspace;
  369. int irq, ret;
  370. mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
  371. if (!mdp4_kms) {
  372. dev_err(dev->dev, "failed to allocate kms\n");
  373. ret = -ENOMEM;
  374. goto fail;
  375. }
  376. mdp_kms_init(&mdp4_kms->base, &kms_funcs);
  377. kms = &mdp4_kms->base.base;
  378. mdp4_kms->dev = dev;
  379. mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
  380. if (IS_ERR(mdp4_kms->mmio)) {
  381. ret = PTR_ERR(mdp4_kms->mmio);
  382. goto fail;
  383. }
  384. irq = platform_get_irq(pdev, 0);
  385. if (irq < 0) {
  386. ret = irq;
  387. dev_err(dev->dev, "failed to get irq: %d\n", ret);
  388. goto fail;
  389. }
  390. kms->irq = irq;
  391. /* NOTE: driver for this regulator still missing upstream.. use
  392. * _get_exclusive() and ignore the error if it does not exist
  393. * (and hope that the bootloader left it on for us)
  394. */
  395. mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
  396. if (IS_ERR(mdp4_kms->vdd))
  397. mdp4_kms->vdd = NULL;
  398. if (mdp4_kms->vdd) {
  399. ret = regulator_enable(mdp4_kms->vdd);
  400. if (ret) {
  401. dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
  402. goto fail;
  403. }
  404. }
  405. mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
  406. if (IS_ERR(mdp4_kms->clk)) {
  407. dev_err(dev->dev, "failed to get core_clk\n");
  408. ret = PTR_ERR(mdp4_kms->clk);
  409. goto fail;
  410. }
  411. mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
  412. if (IS_ERR(mdp4_kms->pclk))
  413. mdp4_kms->pclk = NULL;
  414. // XXX if (rev >= MDP_REV_42) { ???
  415. mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
  416. if (IS_ERR(mdp4_kms->lut_clk)) {
  417. dev_err(dev->dev, "failed to get lut_clk\n");
  418. ret = PTR_ERR(mdp4_kms->lut_clk);
  419. goto fail;
  420. }
  421. mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
  422. if (IS_ERR(mdp4_kms->axi_clk)) {
  423. dev_err(dev->dev, "failed to get axi_clk\n");
  424. ret = PTR_ERR(mdp4_kms->axi_clk);
  425. goto fail;
  426. }
  427. clk_set_rate(mdp4_kms->clk, config->max_clk);
  428. clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
  429. pm_runtime_enable(dev->dev);
  430. mdp4_kms->rpm_enabled = true;
  431. /* make sure things are off before attaching iommu (bootloader could
  432. * have left things on, in which case we'll start getting faults if
  433. * we don't disable):
  434. */
  435. mdp4_enable(mdp4_kms);
  436. mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
  437. mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
  438. mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
  439. mdp4_disable(mdp4_kms);
  440. mdelay(16);
  441. if (config->iommu) {
  442. aspace = msm_gem_address_space_create(&pdev->dev,
  443. config->iommu, "mdp4");
  444. if (IS_ERR(aspace)) {
  445. ret = PTR_ERR(aspace);
  446. goto fail;
  447. }
  448. mdp4_kms->aspace = aspace;
  449. ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
  450. ARRAY_SIZE(iommu_ports));
  451. if (ret)
  452. goto fail;
  453. } else {
  454. dev_info(dev->dev, "no iommu, fallback to phys "
  455. "contig buffers for scanout\n");
  456. aspace = NULL;
  457. }
  458. mdp4_kms->id = msm_register_address_space(dev, aspace);
  459. if (mdp4_kms->id < 0) {
  460. ret = mdp4_kms->id;
  461. dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
  462. goto fail;
  463. }
  464. ret = modeset_init(mdp4_kms);
  465. if (ret) {
  466. dev_err(dev->dev, "modeset_init failed: %d\n", ret);
  467. goto fail;
  468. }
  469. mutex_lock(&dev->struct_mutex);
  470. mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
  471. mutex_unlock(&dev->struct_mutex);
  472. if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
  473. ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
  474. dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
  475. mdp4_kms->blank_cursor_bo = NULL;
  476. goto fail;
  477. }
  478. ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
  479. &mdp4_kms->blank_cursor_iova);
  480. if (ret) {
  481. dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
  482. goto fail;
  483. }
  484. dev->mode_config.min_width = 0;
  485. dev->mode_config.min_height = 0;
  486. dev->mode_config.max_width = 2048;
  487. dev->mode_config.max_height = 2048;
  488. return kms;
  489. fail:
  490. if (kms)
  491. mdp4_destroy(kms);
  492. return ERR_PTR(ret);
  493. }
  494. static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
  495. {
  496. static struct mdp4_platform_config config = {};
  497. /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
  498. config.max_clk = 266667000;
  499. config.iommu = iommu_domain_alloc(&platform_bus_type);
  500. if (config.iommu) {
  501. config.iommu->geometry.aperture_start = 0x1000;
  502. config.iommu->geometry.aperture_end = 0xffffffff;
  503. }
  504. return &config;
  505. }