dpu_kms.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354
  1. /*
  2. * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  19. #include <drm/drm_crtc.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/dma-buf.h>
  23. #include "msm_drv.h"
  24. #include "msm_mmu.h"
  25. #include "msm_gem.h"
  26. #include "dpu_kms.h"
  27. #include "dpu_core_irq.h"
  28. #include "dpu_formats.h"
  29. #include "dpu_hw_vbif.h"
  30. #include "dpu_vbif.h"
  31. #include "dpu_encoder.h"
  32. #include "dpu_plane.h"
  33. #include "dpu_crtc.h"
  34. #define CREATE_TRACE_POINTS
  35. #include "dpu_trace.h"
  36. static const char * const iommu_ports[] = {
  37. "mdp_0",
  38. };
  39. /*
  40. * To enable overall DRM driver logging
  41. * # echo 0x2 > /sys/module/drm/parameters/debug
  42. *
  43. * To enable DRM driver h/w logging
  44. * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  45. *
  46. * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
  47. */
  48. #define DPU_DEBUGFS_DIR "msm_dpu"
  49. #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
  50. static int dpu_kms_hw_init(struct msm_kms *kms);
  51. static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
  52. static unsigned long dpu_iomap_size(struct platform_device *pdev,
  53. const char *name)
  54. {
  55. struct resource *res;
  56. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  57. if (!res) {
  58. DRM_ERROR("failed to get memory resource: %s\n", name);
  59. return 0;
  60. }
  61. return resource_size(res);
  62. }
  63. #ifdef CONFIG_DEBUG_FS
  64. static int _dpu_danger_signal_status(struct seq_file *s,
  65. bool danger_status)
  66. {
  67. struct dpu_kms *kms = (struct dpu_kms *)s->private;
  68. struct msm_drm_private *priv;
  69. struct dpu_danger_safe_status status;
  70. int i;
  71. if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
  72. DPU_ERROR("invalid arg(s)\n");
  73. return 0;
  74. }
  75. priv = kms->dev->dev_private;
  76. memset(&status, 0, sizeof(struct dpu_danger_safe_status));
  77. pm_runtime_get_sync(&kms->pdev->dev);
  78. if (danger_status) {
  79. seq_puts(s, "\nDanger signal status:\n");
  80. if (kms->hw_mdp->ops.get_danger_status)
  81. kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
  82. &status);
  83. } else {
  84. seq_puts(s, "\nSafe signal status:\n");
  85. if (kms->hw_mdp->ops.get_danger_status)
  86. kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
  87. &status);
  88. }
  89. pm_runtime_put_sync(&kms->pdev->dev);
  90. seq_printf(s, "MDP : 0x%x\n", status.mdp);
  91. for (i = SSPP_VIG0; i < SSPP_MAX; i++)
  92. seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
  93. status.sspp[i]);
  94. seq_puts(s, "\n");
  95. return 0;
  96. }
  97. #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
  98. static int __prefix ## _open(struct inode *inode, struct file *file) \
  99. { \
  100. return single_open(file, __prefix ## _show, inode->i_private); \
  101. } \
  102. static const struct file_operations __prefix ## _fops = { \
  103. .owner = THIS_MODULE, \
  104. .open = __prefix ## _open, \
  105. .release = single_release, \
  106. .read = seq_read, \
  107. .llseek = seq_lseek, \
  108. }
  109. static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
  110. {
  111. return _dpu_danger_signal_status(s, true);
  112. }
  113. DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
  114. static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
  115. {
  116. return _dpu_danger_signal_status(s, false);
  117. }
  118. DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
  119. static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
  120. {
  121. debugfs_remove_recursive(dpu_kms->debugfs_danger);
  122. dpu_kms->debugfs_danger = NULL;
  123. }
  124. static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
  125. struct dentry *parent)
  126. {
  127. dpu_kms->debugfs_danger = debugfs_create_dir("danger",
  128. parent);
  129. if (!dpu_kms->debugfs_danger) {
  130. DPU_ERROR("failed to create danger debugfs\n");
  131. return -EINVAL;
  132. }
  133. debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
  134. dpu_kms, &dpu_debugfs_danger_stats_fops);
  135. debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
  136. dpu_kms, &dpu_debugfs_safe_stats_fops);
  137. return 0;
  138. }
  139. static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
  140. {
  141. struct dpu_debugfs_regset32 *regset;
  142. struct dpu_kms *dpu_kms;
  143. struct drm_device *dev;
  144. struct msm_drm_private *priv;
  145. void __iomem *base;
  146. uint32_t i, addr;
  147. if (!s || !s->private)
  148. return 0;
  149. regset = s->private;
  150. dpu_kms = regset->dpu_kms;
  151. if (!dpu_kms || !dpu_kms->mmio)
  152. return 0;
  153. dev = dpu_kms->dev;
  154. if (!dev)
  155. return 0;
  156. priv = dev->dev_private;
  157. if (!priv)
  158. return 0;
  159. base = dpu_kms->mmio + regset->offset;
  160. /* insert padding spaces, if needed */
  161. if (regset->offset & 0xF) {
  162. seq_printf(s, "[%x]", regset->offset & ~0xF);
  163. for (i = 0; i < (regset->offset & 0xF); i += 4)
  164. seq_puts(s, " ");
  165. }
  166. pm_runtime_get_sync(&dpu_kms->pdev->dev);
  167. /* main register output */
  168. for (i = 0; i < regset->blk_len; i += 4) {
  169. addr = regset->offset + i;
  170. if ((addr & 0xF) == 0x0)
  171. seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
  172. seq_printf(s, " %08x", readl_relaxed(base + i));
  173. }
  174. seq_puts(s, "\n");
  175. pm_runtime_put_sync(&dpu_kms->pdev->dev);
  176. return 0;
  177. }
  178. static int dpu_debugfs_open_regset32(struct inode *inode,
  179. struct file *file)
  180. {
  181. return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
  182. }
  183. static const struct file_operations dpu_fops_regset32 = {
  184. .open = dpu_debugfs_open_regset32,
  185. .read = seq_read,
  186. .llseek = seq_lseek,
  187. .release = single_release,
  188. };
  189. void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
  190. uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
  191. {
  192. if (regset) {
  193. regset->offset = offset;
  194. regset->blk_len = length;
  195. regset->dpu_kms = dpu_kms;
  196. }
  197. }
  198. void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
  199. void *parent, struct dpu_debugfs_regset32 *regset)
  200. {
  201. if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
  202. return NULL;
  203. /* make sure offset is a multiple of 4 */
  204. regset->offset = round_down(regset->offset, 4);
  205. return debugfs_create_file(name, mode, parent,
  206. regset, &dpu_fops_regset32);
  207. }
  208. static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
  209. {
  210. void *p;
  211. int rc;
  212. p = dpu_hw_util_get_log_mask_ptr();
  213. if (!dpu_kms || !p)
  214. return -EINVAL;
  215. dpu_kms->debugfs_root = debugfs_create_dir("debug",
  216. dpu_kms->dev->primary->debugfs_root);
  217. if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
  218. DRM_ERROR("debugfs create_dir failed %ld\n",
  219. PTR_ERR(dpu_kms->debugfs_root));
  220. return PTR_ERR(dpu_kms->debugfs_root);
  221. }
  222. rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
  223. if (rc) {
  224. DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
  225. return rc;
  226. }
  227. /* allow root to be NULL */
  228. debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
  229. (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
  230. (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
  231. (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
  232. rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
  233. if (rc) {
  234. DPU_ERROR("failed to init perf %d\n", rc);
  235. return rc;
  236. }
  237. return 0;
  238. }
  239. static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
  240. {
  241. /* don't need to NULL check debugfs_root */
  242. if (dpu_kms) {
  243. dpu_debugfs_vbif_destroy(dpu_kms);
  244. dpu_debugfs_danger_destroy(dpu_kms);
  245. dpu_debugfs_core_irq_destroy(dpu_kms);
  246. debugfs_remove_recursive(dpu_kms->debugfs_root);
  247. }
  248. }
  249. #else
  250. static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
  251. {
  252. }
  253. #endif
  254. static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
  255. {
  256. return dpu_crtc_vblank(crtc, true);
  257. }
  258. static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
  259. {
  260. dpu_crtc_vblank(crtc, false);
  261. }
  262. static void dpu_kms_prepare_commit(struct msm_kms *kms,
  263. struct drm_atomic_state *state)
  264. {
  265. struct dpu_kms *dpu_kms;
  266. struct msm_drm_private *priv;
  267. struct drm_device *dev;
  268. struct drm_encoder *encoder;
  269. if (!kms)
  270. return;
  271. dpu_kms = to_dpu_kms(kms);
  272. dev = dpu_kms->dev;
  273. if (!dev || !dev->dev_private)
  274. return;
  275. priv = dev->dev_private;
  276. pm_runtime_get_sync(&dpu_kms->pdev->dev);
  277. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
  278. if (encoder->crtc != NULL)
  279. dpu_encoder_prepare_commit(encoder);
  280. }
  281. /*
  282. * Override the encoder enable since we need to setup the inline rotator and do
  283. * some crtc magic before enabling any bridge that might be present.
  284. */
  285. void dpu_kms_encoder_enable(struct drm_encoder *encoder)
  286. {
  287. const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
  288. struct drm_crtc *crtc = encoder->crtc;
  289. /* Forward this enable call to the commit hook */
  290. if (funcs && funcs->commit)
  291. funcs->commit(encoder);
  292. if (crtc && crtc->state->active) {
  293. trace_dpu_kms_enc_enable(DRMID(crtc));
  294. dpu_crtc_commit_kickoff(crtc);
  295. }
  296. }
  297. static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
  298. {
  299. struct drm_crtc *crtc;
  300. struct drm_crtc_state *crtc_state;
  301. int i;
  302. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  303. /* If modeset is required, kickoff is run in encoder_enable */
  304. if (drm_atomic_crtc_needs_modeset(crtc_state))
  305. continue;
  306. if (crtc->state->active) {
  307. trace_dpu_kms_commit(DRMID(crtc));
  308. dpu_crtc_commit_kickoff(crtc);
  309. }
  310. }
  311. }
  312. static void dpu_kms_complete_commit(struct msm_kms *kms,
  313. struct drm_atomic_state *old_state)
  314. {
  315. struct dpu_kms *dpu_kms;
  316. struct msm_drm_private *priv;
  317. struct drm_crtc *crtc;
  318. struct drm_crtc_state *old_crtc_state;
  319. int i;
  320. if (!kms || !old_state)
  321. return;
  322. dpu_kms = to_dpu_kms(kms);
  323. if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
  324. return;
  325. priv = dpu_kms->dev->dev_private;
  326. DPU_ATRACE_BEGIN("kms_complete_commit");
  327. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  328. dpu_crtc_complete_commit(crtc, old_crtc_state);
  329. pm_runtime_put_sync(&dpu_kms->pdev->dev);
  330. DPU_ATRACE_END("kms_complete_commit");
  331. }
  332. static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
  333. struct drm_crtc *crtc)
  334. {
  335. struct drm_encoder *encoder;
  336. struct drm_device *dev;
  337. int ret;
  338. if (!kms || !crtc || !crtc->state) {
  339. DPU_ERROR("invalid params\n");
  340. return;
  341. }
  342. dev = crtc->dev;
  343. if (!crtc->state->enable) {
  344. DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  345. return;
  346. }
  347. if (!crtc->state->active) {
  348. DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  349. return;
  350. }
  351. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  352. if (encoder->crtc != crtc)
  353. continue;
  354. /*
  355. * Wait for post-flush if necessary to delay before
  356. * plane_cleanup. For example, wait for vsync in case of video
  357. * mode panels. This may be a no-op for command mode panels.
  358. */
  359. trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
  360. ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
  361. if (ret && ret != -EWOULDBLOCK) {
  362. DPU_ERROR("wait for commit done returned %d\n", ret);
  363. break;
  364. }
  365. }
  366. }
  367. static void _dpu_kms_initialize_dsi(struct drm_device *dev,
  368. struct msm_drm_private *priv,
  369. struct dpu_kms *dpu_kms)
  370. {
  371. struct drm_encoder *encoder = NULL;
  372. int i, rc;
  373. /*TODO: Support two independent DSI connectors */
  374. encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
  375. if (IS_ERR_OR_NULL(encoder)) {
  376. DPU_ERROR("encoder init failed for dsi display\n");
  377. return;
  378. }
  379. priv->encoders[priv->num_encoders++] = encoder;
  380. for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
  381. if (!priv->dsi[i]) {
  382. DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
  383. return;
  384. }
  385. rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
  386. if (rc) {
  387. DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
  388. i, rc);
  389. continue;
  390. }
  391. }
  392. }
  393. /**
  394. * _dpu_kms_setup_displays - create encoders, bridges and connectors
  395. * for underlying displays
  396. * @dev: Pointer to drm device structure
  397. * @priv: Pointer to private drm device data
  398. * @dpu_kms: Pointer to dpu kms structure
  399. * Returns: Zero on success
  400. */
  401. static void _dpu_kms_setup_displays(struct drm_device *dev,
  402. struct msm_drm_private *priv,
  403. struct dpu_kms *dpu_kms)
  404. {
  405. _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
  406. /**
  407. * Extend this function to initialize other
  408. * types of displays
  409. */
  410. }
  411. static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
  412. {
  413. struct msm_drm_private *priv;
  414. int i;
  415. if (!dpu_kms) {
  416. DPU_ERROR("invalid dpu_kms\n");
  417. return;
  418. } else if (!dpu_kms->dev) {
  419. DPU_ERROR("invalid dev\n");
  420. return;
  421. } else if (!dpu_kms->dev->dev_private) {
  422. DPU_ERROR("invalid dev_private\n");
  423. return;
  424. }
  425. priv = dpu_kms->dev->dev_private;
  426. for (i = 0; i < priv->num_crtcs; i++)
  427. priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
  428. priv->num_crtcs = 0;
  429. for (i = 0; i < priv->num_planes; i++)
  430. priv->planes[i]->funcs->destroy(priv->planes[i]);
  431. priv->num_planes = 0;
  432. for (i = 0; i < priv->num_connectors; i++)
  433. priv->connectors[i]->funcs->destroy(priv->connectors[i]);
  434. priv->num_connectors = 0;
  435. for (i = 0; i < priv->num_encoders; i++)
  436. priv->encoders[i]->funcs->destroy(priv->encoders[i]);
  437. priv->num_encoders = 0;
  438. }
  439. static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
  440. {
  441. struct drm_device *dev;
  442. struct drm_plane *primary_planes[MAX_PLANES], *plane;
  443. struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
  444. struct drm_crtc *crtc;
  445. struct msm_drm_private *priv;
  446. struct dpu_mdss_cfg *catalog;
  447. int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
  448. int max_crtc_count;
  449. if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
  450. DPU_ERROR("invalid dpu_kms\n");
  451. return -EINVAL;
  452. }
  453. dev = dpu_kms->dev;
  454. priv = dev->dev_private;
  455. catalog = dpu_kms->catalog;
  456. /*
  457. * Create encoder and query display drivers to create
  458. * bridges and connectors
  459. */
  460. _dpu_kms_setup_displays(dev, priv, dpu_kms);
  461. max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
  462. /* Create the planes, keeping track of one primary/cursor per crtc */
  463. for (i = 0; i < catalog->sspp_count; i++) {
  464. enum drm_plane_type type;
  465. if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
  466. && cursor_planes_idx < max_crtc_count)
  467. type = DRM_PLANE_TYPE_CURSOR;
  468. else if (primary_planes_idx < max_crtc_count)
  469. type = DRM_PLANE_TYPE_PRIMARY;
  470. else
  471. type = DRM_PLANE_TYPE_OVERLAY;
  472. DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
  473. type, catalog->sspp[i].features,
  474. catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
  475. plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
  476. (1UL << max_crtc_count) - 1, 0);
  477. if (IS_ERR(plane)) {
  478. DPU_ERROR("dpu_plane_init failed\n");
  479. ret = PTR_ERR(plane);
  480. goto fail;
  481. }
  482. priv->planes[priv->num_planes++] = plane;
  483. if (type == DRM_PLANE_TYPE_CURSOR)
  484. cursor_planes[cursor_planes_idx++] = plane;
  485. else if (type == DRM_PLANE_TYPE_PRIMARY)
  486. primary_planes[primary_planes_idx++] = plane;
  487. }
  488. max_crtc_count = min(max_crtc_count, primary_planes_idx);
  489. /* Create one CRTC per encoder */
  490. for (i = 0; i < max_crtc_count; i++) {
  491. crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
  492. if (IS_ERR(crtc)) {
  493. ret = PTR_ERR(crtc);
  494. goto fail;
  495. }
  496. priv->crtcs[priv->num_crtcs++] = crtc;
  497. }
  498. /* All CRTCs are compatible with all encoders */
  499. for (i = 0; i < priv->num_encoders; i++)
  500. priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
  501. return 0;
  502. fail:
  503. _dpu_kms_drm_obj_destroy(dpu_kms);
  504. return ret;
  505. }
  506. #ifdef CONFIG_DEBUG_FS
  507. static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
  508. {
  509. struct dpu_kms *dpu_kms = to_dpu_kms(kms);
  510. struct drm_device *dev;
  511. int rc;
  512. if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
  513. DPU_ERROR("invalid dpu_kms\n");
  514. return -EINVAL;
  515. }
  516. dev = dpu_kms->dev;
  517. rc = _dpu_debugfs_init(dpu_kms);
  518. if (rc)
  519. DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
  520. return rc;
  521. }
  522. #endif
  523. static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
  524. struct drm_encoder *encoder)
  525. {
  526. return rate;
  527. }
  528. static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
  529. {
  530. struct drm_device *dev;
  531. int i;
  532. dev = dpu_kms->dev;
  533. if (!dev)
  534. return;
  535. if (dpu_kms->hw_intr)
  536. dpu_hw_intr_destroy(dpu_kms->hw_intr);
  537. dpu_kms->hw_intr = NULL;
  538. if (dpu_kms->power_event)
  539. dpu_power_handle_unregister_event(
  540. &dpu_kms->phandle, dpu_kms->power_event);
  541. /* safe to call these more than once during shutdown */
  542. _dpu_debugfs_destroy(dpu_kms);
  543. _dpu_kms_mmu_destroy(dpu_kms);
  544. if (dpu_kms->catalog) {
  545. for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
  546. u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
  547. if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
  548. dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
  549. }
  550. }
  551. if (dpu_kms->rm_init)
  552. dpu_rm_destroy(&dpu_kms->rm);
  553. dpu_kms->rm_init = false;
  554. if (dpu_kms->catalog)
  555. dpu_hw_catalog_deinit(dpu_kms->catalog);
  556. dpu_kms->catalog = NULL;
  557. if (dpu_kms->core_client)
  558. dpu_power_client_destroy(&dpu_kms->phandle,
  559. dpu_kms->core_client);
  560. dpu_kms->core_client = NULL;
  561. if (dpu_kms->vbif[VBIF_NRT])
  562. devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
  563. dpu_kms->vbif[VBIF_NRT] = NULL;
  564. if (dpu_kms->vbif[VBIF_RT])
  565. devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
  566. dpu_kms->vbif[VBIF_RT] = NULL;
  567. if (dpu_kms->mmio)
  568. devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
  569. dpu_kms->mmio = NULL;
  570. }
  571. static void dpu_kms_destroy(struct msm_kms *kms)
  572. {
  573. struct dpu_kms *dpu_kms;
  574. if (!kms) {
  575. DPU_ERROR("invalid kms\n");
  576. return;
  577. }
  578. dpu_kms = to_dpu_kms(kms);
  579. dpu_dbg_destroy();
  580. _dpu_kms_hw_destroy(dpu_kms);
  581. }
  582. static int dpu_kms_pm_suspend(struct device *dev)
  583. {
  584. struct drm_device *ddev;
  585. struct drm_modeset_acquire_ctx ctx;
  586. struct drm_atomic_state *state;
  587. struct dpu_kms *dpu_kms;
  588. int ret = 0, num_crtcs = 0;
  589. if (!dev)
  590. return -EINVAL;
  591. ddev = dev_get_drvdata(dev);
  592. if (!ddev || !ddev_to_msm_kms(ddev))
  593. return -EINVAL;
  594. dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
  595. /* disable hot-plug polling */
  596. drm_kms_helper_poll_disable(ddev);
  597. /* acquire modeset lock(s) */
  598. drm_modeset_acquire_init(&ctx, 0);
  599. retry:
  600. DPU_ATRACE_BEGIN("kms_pm_suspend");
  601. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  602. if (ret)
  603. goto unlock;
  604. /* save current state for resume */
  605. if (dpu_kms->suspend_state)
  606. drm_atomic_state_put(dpu_kms->suspend_state);
  607. dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
  608. if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
  609. DRM_ERROR("failed to back up suspend state\n");
  610. dpu_kms->suspend_state = NULL;
  611. goto unlock;
  612. }
  613. /* create atomic state to disable all CRTCs */
  614. state = drm_atomic_state_alloc(ddev);
  615. if (IS_ERR_OR_NULL(state)) {
  616. DRM_ERROR("failed to allocate crtc disable state\n");
  617. goto unlock;
  618. }
  619. state->acquire_ctx = &ctx;
  620. /* check for nothing to do */
  621. if (num_crtcs == 0) {
  622. DRM_DEBUG("all crtcs are already in the off state\n");
  623. drm_atomic_state_put(state);
  624. goto suspended;
  625. }
  626. /* commit the "disable all" state */
  627. ret = drm_atomic_commit(state);
  628. if (ret < 0) {
  629. DRM_ERROR("failed to disable crtcs, %d\n", ret);
  630. drm_atomic_state_put(state);
  631. goto unlock;
  632. }
  633. suspended:
  634. dpu_kms->suspend_block = true;
  635. unlock:
  636. if (ret == -EDEADLK) {
  637. drm_modeset_backoff(&ctx);
  638. goto retry;
  639. }
  640. drm_modeset_drop_locks(&ctx);
  641. drm_modeset_acquire_fini(&ctx);
  642. DPU_ATRACE_END("kms_pm_suspend");
  643. return 0;
  644. }
  645. static int dpu_kms_pm_resume(struct device *dev)
  646. {
  647. struct drm_device *ddev;
  648. struct dpu_kms *dpu_kms;
  649. int ret;
  650. if (!dev)
  651. return -EINVAL;
  652. ddev = dev_get_drvdata(dev);
  653. if (!ddev || !ddev_to_msm_kms(ddev))
  654. return -EINVAL;
  655. dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
  656. DPU_ATRACE_BEGIN("kms_pm_resume");
  657. drm_mode_config_reset(ddev);
  658. drm_modeset_lock_all(ddev);
  659. dpu_kms->suspend_block = false;
  660. if (dpu_kms->suspend_state) {
  661. dpu_kms->suspend_state->acquire_ctx =
  662. ddev->mode_config.acquire_ctx;
  663. ret = drm_atomic_commit(dpu_kms->suspend_state);
  664. if (ret < 0) {
  665. DRM_ERROR("failed to restore state, %d\n", ret);
  666. drm_atomic_state_put(dpu_kms->suspend_state);
  667. }
  668. dpu_kms->suspend_state = NULL;
  669. }
  670. drm_modeset_unlock_all(ddev);
  671. /* enable hot-plug polling */
  672. drm_kms_helper_poll_enable(ddev);
  673. DPU_ATRACE_END("kms_pm_resume");
  674. return 0;
  675. }
  676. static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
  677. struct drm_encoder *encoder,
  678. bool cmd_mode)
  679. {
  680. struct msm_display_info info;
  681. struct msm_drm_private *priv = encoder->dev->dev_private;
  682. int i, rc = 0;
  683. memset(&info, 0, sizeof(info));
  684. info.intf_type = encoder->encoder_type;
  685. info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
  686. MSM_DISPLAY_CAP_VID_MODE;
  687. /* TODO: No support for DSI swap */
  688. for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
  689. if (priv->dsi[i]) {
  690. info.h_tile_instance[info.num_of_h_tiles] = i;
  691. info.num_of_h_tiles++;
  692. }
  693. }
  694. rc = dpu_encoder_setup(encoder->dev, encoder, &info);
  695. if (rc)
  696. DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
  697. encoder->base.id, rc);
  698. }
  699. static const struct msm_kms_funcs kms_funcs = {
  700. .hw_init = dpu_kms_hw_init,
  701. .irq_preinstall = dpu_irq_preinstall,
  702. .irq_postinstall = dpu_irq_postinstall,
  703. .irq_uninstall = dpu_irq_uninstall,
  704. .irq = dpu_irq,
  705. .prepare_commit = dpu_kms_prepare_commit,
  706. .commit = dpu_kms_commit,
  707. .complete_commit = dpu_kms_complete_commit,
  708. .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
  709. .enable_vblank = dpu_kms_enable_vblank,
  710. .disable_vblank = dpu_kms_disable_vblank,
  711. .check_modified_format = dpu_format_check_modified_format,
  712. .get_format = dpu_get_msm_format,
  713. .round_pixclk = dpu_kms_round_pixclk,
  714. .pm_suspend = dpu_kms_pm_suspend,
  715. .pm_resume = dpu_kms_pm_resume,
  716. .destroy = dpu_kms_destroy,
  717. .set_encoder_mode = _dpu_kms_set_encoder_mode,
  718. #ifdef CONFIG_DEBUG_FS
  719. .debugfs_init = dpu_kms_debugfs_init,
  720. #endif
  721. };
  722. /* the caller api needs to turn on clock before calling it */
  723. static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
  724. {
  725. dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
  726. }
  727. static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
  728. {
  729. struct msm_mmu *mmu;
  730. mmu = dpu_kms->base.aspace->mmu;
  731. mmu->funcs->detach(mmu, (const char **)iommu_ports,
  732. ARRAY_SIZE(iommu_ports));
  733. msm_gem_address_space_put(dpu_kms->base.aspace);
  734. return 0;
  735. }
  736. static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
  737. {
  738. struct iommu_domain *domain;
  739. struct msm_gem_address_space *aspace;
  740. int ret;
  741. domain = iommu_domain_alloc(&platform_bus_type);
  742. if (!domain)
  743. return 0;
  744. aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
  745. domain, "dpu1");
  746. if (IS_ERR(aspace)) {
  747. ret = PTR_ERR(aspace);
  748. goto fail;
  749. }
  750. dpu_kms->base.aspace = aspace;
  751. ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
  752. ARRAY_SIZE(iommu_ports));
  753. if (ret) {
  754. DPU_ERROR("failed to attach iommu %d\n", ret);
  755. msm_gem_address_space_put(aspace);
  756. goto fail;
  757. }
  758. return 0;
  759. fail:
  760. _dpu_kms_mmu_destroy(dpu_kms);
  761. return ret;
  762. }
  763. static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
  764. char *clock_name)
  765. {
  766. struct dss_module_power *mp = &dpu_kms->mp;
  767. int i;
  768. for (i = 0; i < mp->num_clk; i++) {
  769. if (!strcmp(mp->clk_config[i].clk_name, clock_name))
  770. return &mp->clk_config[i];
  771. }
  772. return NULL;
  773. }
  774. u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
  775. {
  776. struct dss_clk *clk;
  777. clk = _dpu_kms_get_clk(dpu_kms, clock_name);
  778. if (!clk)
  779. return -EINVAL;
  780. return clk_get_rate(clk->clk);
  781. }
  782. static void dpu_kms_handle_power_event(u32 event_type, void *usr)
  783. {
  784. struct dpu_kms *dpu_kms = usr;
  785. if (!dpu_kms)
  786. return;
  787. dpu_vbif_init_memtypes(dpu_kms);
  788. }
  789. static int dpu_kms_hw_init(struct msm_kms *kms)
  790. {
  791. struct dpu_kms *dpu_kms;
  792. struct drm_device *dev;
  793. struct msm_drm_private *priv;
  794. int i, rc = -EINVAL;
  795. if (!kms) {
  796. DPU_ERROR("invalid kms\n");
  797. goto end;
  798. }
  799. dpu_kms = to_dpu_kms(kms);
  800. dev = dpu_kms->dev;
  801. if (!dev) {
  802. DPU_ERROR("invalid device\n");
  803. goto end;
  804. }
  805. rc = dpu_dbg_init(&dpu_kms->pdev->dev);
  806. if (rc) {
  807. DRM_ERROR("failed to init dpu dbg: %d\n", rc);
  808. goto end;
  809. }
  810. priv = dev->dev_private;
  811. if (!priv) {
  812. DPU_ERROR("invalid private data\n");
  813. goto dbg_destroy;
  814. }
  815. dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
  816. if (IS_ERR(dpu_kms->mmio)) {
  817. rc = PTR_ERR(dpu_kms->mmio);
  818. DPU_ERROR("mdp register memory map failed: %d\n", rc);
  819. dpu_kms->mmio = NULL;
  820. goto error;
  821. }
  822. DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
  823. dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
  824. dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
  825. if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
  826. rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
  827. DPU_ERROR("vbif register memory map failed: %d\n", rc);
  828. dpu_kms->vbif[VBIF_RT] = NULL;
  829. goto error;
  830. }
  831. dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
  832. dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
  833. if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
  834. dpu_kms->vbif[VBIF_NRT] = NULL;
  835. DPU_DEBUG("VBIF NRT is not defined");
  836. } else {
  837. dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
  838. "vbif_nrt");
  839. }
  840. dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
  841. if (IS_ERR(dpu_kms->reg_dma)) {
  842. dpu_kms->reg_dma = NULL;
  843. DPU_DEBUG("REG_DMA is not defined");
  844. } else {
  845. dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
  846. }
  847. dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
  848. "core");
  849. if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
  850. rc = PTR_ERR(dpu_kms->core_client);
  851. if (!dpu_kms->core_client)
  852. rc = -EINVAL;
  853. DPU_ERROR("dpu power client create failed: %d\n", rc);
  854. dpu_kms->core_client = NULL;
  855. goto error;
  856. }
  857. pm_runtime_get_sync(&dpu_kms->pdev->dev);
  858. _dpu_kms_core_hw_rev_init(dpu_kms);
  859. pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
  860. dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
  861. if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
  862. rc = PTR_ERR(dpu_kms->catalog);
  863. if (!dpu_kms->catalog)
  864. rc = -EINVAL;
  865. DPU_ERROR("catalog init failed: %d\n", rc);
  866. dpu_kms->catalog = NULL;
  867. goto power_error;
  868. }
  869. dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
  870. /*
  871. * Now we need to read the HW catalog and initialize resources such as
  872. * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
  873. */
  874. rc = _dpu_kms_mmu_init(dpu_kms);
  875. if (rc) {
  876. DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
  877. goto power_error;
  878. }
  879. rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
  880. dpu_kms->dev);
  881. if (rc) {
  882. DPU_ERROR("rm init failed: %d\n", rc);
  883. goto power_error;
  884. }
  885. dpu_kms->rm_init = true;
  886. dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
  887. if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
  888. rc = PTR_ERR(dpu_kms->hw_mdp);
  889. if (!dpu_kms->hw_mdp)
  890. rc = -EINVAL;
  891. DPU_ERROR("failed to get hw_mdp: %d\n", rc);
  892. dpu_kms->hw_mdp = NULL;
  893. goto power_error;
  894. }
  895. for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
  896. u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
  897. dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
  898. dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
  899. if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
  900. rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
  901. if (!dpu_kms->hw_vbif[vbif_idx])
  902. rc = -EINVAL;
  903. DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
  904. dpu_kms->hw_vbif[vbif_idx] = NULL;
  905. goto power_error;
  906. }
  907. }
  908. rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
  909. &dpu_kms->phandle,
  910. _dpu_kms_get_clk(dpu_kms, "core"));
  911. if (rc) {
  912. DPU_ERROR("failed to init perf %d\n", rc);
  913. goto perf_err;
  914. }
  915. dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
  916. if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
  917. rc = PTR_ERR(dpu_kms->hw_intr);
  918. DPU_ERROR("hw_intr init failed: %d\n", rc);
  919. dpu_kms->hw_intr = NULL;
  920. goto hw_intr_init_err;
  921. }
  922. /*
  923. * _dpu_kms_drm_obj_init should create the DRM related objects
  924. * i.e. CRTCs, planes, encoders, connectors and so forth
  925. */
  926. rc = _dpu_kms_drm_obj_init(dpu_kms);
  927. if (rc) {
  928. DPU_ERROR("modeset init failed: %d\n", rc);
  929. goto drm_obj_init_err;
  930. }
  931. dev->mode_config.min_width = 0;
  932. dev->mode_config.min_height = 0;
  933. /*
  934. * max crtc width is equal to the max mixer width * 2 and max height is
  935. * is 4K
  936. */
  937. dev->mode_config.max_width =
  938. dpu_kms->catalog->caps->max_mixer_width * 2;
  939. dev->mode_config.max_height = 4096;
  940. /*
  941. * Support format modifiers for compression etc.
  942. */
  943. dev->mode_config.allow_fb_modifiers = true;
  944. /*
  945. * Handle (re)initializations during power enable
  946. */
  947. dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
  948. dpu_kms->power_event = dpu_power_handle_register_event(
  949. &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
  950. dpu_kms_handle_power_event, dpu_kms, "kms");
  951. pm_runtime_put_sync(&dpu_kms->pdev->dev);
  952. return 0;
  953. drm_obj_init_err:
  954. dpu_core_perf_destroy(&dpu_kms->perf);
  955. hw_intr_init_err:
  956. perf_err:
  957. power_error:
  958. pm_runtime_put_sync(&dpu_kms->pdev->dev);
  959. error:
  960. _dpu_kms_hw_destroy(dpu_kms);
  961. dbg_destroy:
  962. dpu_dbg_destroy();
  963. end:
  964. return rc;
  965. }
  966. struct msm_kms *dpu_kms_init(struct drm_device *dev)
  967. {
  968. struct msm_drm_private *priv;
  969. struct dpu_kms *dpu_kms;
  970. int irq;
  971. if (!dev || !dev->dev_private) {
  972. DPU_ERROR("drm device node invalid\n");
  973. return ERR_PTR(-EINVAL);
  974. }
  975. priv = dev->dev_private;
  976. dpu_kms = to_dpu_kms(priv->kms);
  977. irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
  978. if (irq < 0) {
  979. DPU_ERROR("failed to get irq: %d\n", irq);
  980. return ERR_PTR(irq);
  981. }
  982. dpu_kms->base.irq = irq;
  983. return &dpu_kms->base;
  984. }
  985. static int dpu_bind(struct device *dev, struct device *master, void *data)
  986. {
  987. struct drm_device *ddev = dev_get_drvdata(master);
  988. struct platform_device *pdev = to_platform_device(dev);
  989. struct msm_drm_private *priv = ddev->dev_private;
  990. struct dpu_kms *dpu_kms;
  991. struct dss_module_power *mp;
  992. int ret = 0;
  993. dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
  994. if (!dpu_kms)
  995. return -ENOMEM;
  996. mp = &dpu_kms->mp;
  997. ret = msm_dss_parse_clock(pdev, mp);
  998. if (ret) {
  999. DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
  1000. return ret;
  1001. }
  1002. dpu_power_resource_init(pdev, &dpu_kms->phandle);
  1003. platform_set_drvdata(pdev, dpu_kms);
  1004. msm_kms_init(&dpu_kms->base, &kms_funcs);
  1005. dpu_kms->dev = ddev;
  1006. dpu_kms->pdev = pdev;
  1007. pm_runtime_enable(&pdev->dev);
  1008. dpu_kms->rpm_enabled = true;
  1009. priv->kms = &dpu_kms->base;
  1010. return ret;
  1011. }
  1012. static void dpu_unbind(struct device *dev, struct device *master, void *data)
  1013. {
  1014. struct platform_device *pdev = to_platform_device(dev);
  1015. struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
  1016. struct dss_module_power *mp = &dpu_kms->mp;
  1017. dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
  1018. msm_dss_put_clk(mp->clk_config, mp->num_clk);
  1019. devm_kfree(&pdev->dev, mp->clk_config);
  1020. mp->num_clk = 0;
  1021. if (dpu_kms->rpm_enabled)
  1022. pm_runtime_disable(&pdev->dev);
  1023. }
  1024. static const struct component_ops dpu_ops = {
  1025. .bind = dpu_bind,
  1026. .unbind = dpu_unbind,
  1027. };
  1028. static int dpu_dev_probe(struct platform_device *pdev)
  1029. {
  1030. return component_add(&pdev->dev, &dpu_ops);
  1031. }
  1032. static int dpu_dev_remove(struct platform_device *pdev)
  1033. {
  1034. component_del(&pdev->dev, &dpu_ops);
  1035. return 0;
  1036. }
  1037. static int __maybe_unused dpu_runtime_suspend(struct device *dev)
  1038. {
  1039. int rc = -1;
  1040. struct platform_device *pdev = to_platform_device(dev);
  1041. struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
  1042. struct drm_device *ddev;
  1043. struct dss_module_power *mp = &dpu_kms->mp;
  1044. ddev = dpu_kms->dev;
  1045. if (!ddev) {
  1046. DPU_ERROR("invalid drm_device\n");
  1047. goto exit;
  1048. }
  1049. rc = dpu_power_resource_enable(&dpu_kms->phandle,
  1050. dpu_kms->core_client, false);
  1051. if (rc)
  1052. DPU_ERROR("resource disable failed: %d\n", rc);
  1053. rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
  1054. if (rc)
  1055. DPU_ERROR("clock disable failed rc:%d\n", rc);
  1056. exit:
  1057. return rc;
  1058. }
  1059. static int __maybe_unused dpu_runtime_resume(struct device *dev)
  1060. {
  1061. int rc = -1;
  1062. struct platform_device *pdev = to_platform_device(dev);
  1063. struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
  1064. struct drm_device *ddev;
  1065. struct dss_module_power *mp = &dpu_kms->mp;
  1066. ddev = dpu_kms->dev;
  1067. if (!ddev) {
  1068. DPU_ERROR("invalid drm_device\n");
  1069. goto exit;
  1070. }
  1071. rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
  1072. if (rc) {
  1073. DPU_ERROR("clock enable failed rc:%d\n", rc);
  1074. goto exit;
  1075. }
  1076. rc = dpu_power_resource_enable(&dpu_kms->phandle,
  1077. dpu_kms->core_client, true);
  1078. if (rc)
  1079. DPU_ERROR("resource enable failed: %d\n", rc);
  1080. exit:
  1081. return rc;
  1082. }
  1083. static const struct dev_pm_ops dpu_pm_ops = {
  1084. SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
  1085. };
  1086. static const struct of_device_id dpu_dt_match[] = {
  1087. { .compatible = "qcom,sdm845-dpu", },
  1088. {}
  1089. };
  1090. MODULE_DEVICE_TABLE(of, dpu_dt_match);
  1091. static struct platform_driver dpu_driver = {
  1092. .probe = dpu_dev_probe,
  1093. .remove = dpu_dev_remove,
  1094. .driver = {
  1095. .name = "msm_dpu",
  1096. .of_match_table = dpu_dt_match,
  1097. .pm = &dpu_pm_ops,
  1098. },
  1099. };
  1100. void __init msm_dpu_register(void)
  1101. {
  1102. platform_driver_register(&dpu_driver);
  1103. }
  1104. void __exit msm_dpu_unregister(void)
  1105. {
  1106. platform_driver_unregister(&dpu_driver);
  1107. }