mdp5_mdss.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/irqdomain.h>
  17. #include <linux/irq.h>
  18. #include "msm_drv.h"
  19. #include "mdp5_kms.h"
  20. #define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
  21. struct mdp5_mdss {
  22. struct msm_mdss base;
  23. void __iomem *mmio, *vbif;
  24. struct regulator *vdd;
  25. struct clk *ahb_clk;
  26. struct clk *axi_clk;
  27. struct clk *vsync_clk;
  28. struct {
  29. volatile unsigned long enabled_mask;
  30. struct irq_domain *domain;
  31. } irqcontroller;
  32. };
  33. static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
  34. {
  35. msm_writel(data, mdp5_mdss->mmio + reg);
  36. }
  37. static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
  38. {
  39. return msm_readl(mdp5_mdss->mmio + reg);
  40. }
  41. static irqreturn_t mdss_irq(int irq, void *arg)
  42. {
  43. struct mdp5_mdss *mdp5_mdss = arg;
  44. u32 intr;
  45. intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
  46. VERB("intr=%08x", intr);
  47. while (intr) {
  48. irq_hw_number_t hwirq = fls(intr) - 1;
  49. generic_handle_irq(irq_find_mapping(
  50. mdp5_mdss->irqcontroller.domain, hwirq));
  51. intr &= ~(1 << hwirq);
  52. }
  53. return IRQ_HANDLED;
  54. }
  55. /*
  56. * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
  57. * can register to get their irq's delivered
  58. */
  59. #define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
  60. MDSS_HW_INTR_STATUS_INTR_DSI0 | \
  61. MDSS_HW_INTR_STATUS_INTR_DSI1 | \
  62. MDSS_HW_INTR_STATUS_INTR_HDMI | \
  63. MDSS_HW_INTR_STATUS_INTR_EDP)
  64. static void mdss_hw_mask_irq(struct irq_data *irqd)
  65. {
  66. struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
  67. smp_mb__before_atomic();
  68. clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
  69. smp_mb__after_atomic();
  70. }
  71. static void mdss_hw_unmask_irq(struct irq_data *irqd)
  72. {
  73. struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
  74. smp_mb__before_atomic();
  75. set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
  76. smp_mb__after_atomic();
  77. }
  78. static struct irq_chip mdss_hw_irq_chip = {
  79. .name = "mdss",
  80. .irq_mask = mdss_hw_mask_irq,
  81. .irq_unmask = mdss_hw_unmask_irq,
  82. };
  83. static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
  84. irq_hw_number_t hwirq)
  85. {
  86. struct mdp5_mdss *mdp5_mdss = d->host_data;
  87. if (!(VALID_IRQS & (1 << hwirq)))
  88. return -EPERM;
  89. irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
  90. irq_set_chip_data(irq, mdp5_mdss);
  91. return 0;
  92. }
  93. static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
  94. .map = mdss_hw_irqdomain_map,
  95. .xlate = irq_domain_xlate_onecell,
  96. };
  97. static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
  98. {
  99. struct device *dev = mdp5_mdss->base.dev->dev;
  100. struct irq_domain *d;
  101. d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
  102. mdp5_mdss);
  103. if (!d) {
  104. dev_err(dev, "mdss irq domain add failed\n");
  105. return -ENXIO;
  106. }
  107. mdp5_mdss->irqcontroller.enabled_mask = 0;
  108. mdp5_mdss->irqcontroller.domain = d;
  109. return 0;
  110. }
  111. static int mdp5_mdss_enable(struct msm_mdss *mdss)
  112. {
  113. struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
  114. DBG("");
  115. clk_prepare_enable(mdp5_mdss->ahb_clk);
  116. if (mdp5_mdss->axi_clk)
  117. clk_prepare_enable(mdp5_mdss->axi_clk);
  118. if (mdp5_mdss->vsync_clk)
  119. clk_prepare_enable(mdp5_mdss->vsync_clk);
  120. return 0;
  121. }
  122. static int mdp5_mdss_disable(struct msm_mdss *mdss)
  123. {
  124. struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
  125. DBG("");
  126. if (mdp5_mdss->vsync_clk)
  127. clk_disable_unprepare(mdp5_mdss->vsync_clk);
  128. if (mdp5_mdss->axi_clk)
  129. clk_disable_unprepare(mdp5_mdss->axi_clk);
  130. clk_disable_unprepare(mdp5_mdss->ahb_clk);
  131. return 0;
  132. }
  133. static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
  134. {
  135. struct platform_device *pdev =
  136. to_platform_device(mdp5_mdss->base.dev->dev);
  137. mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
  138. if (IS_ERR(mdp5_mdss->ahb_clk))
  139. mdp5_mdss->ahb_clk = NULL;
  140. mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
  141. if (IS_ERR(mdp5_mdss->axi_clk))
  142. mdp5_mdss->axi_clk = NULL;
  143. mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
  144. if (IS_ERR(mdp5_mdss->vsync_clk))
  145. mdp5_mdss->vsync_clk = NULL;
  146. return 0;
  147. }
  148. static void mdp5_mdss_destroy(struct drm_device *dev)
  149. {
  150. struct msm_drm_private *priv = dev->dev_private;
  151. struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
  152. if (!mdp5_mdss)
  153. return;
  154. irq_domain_remove(mdp5_mdss->irqcontroller.domain);
  155. mdp5_mdss->irqcontroller.domain = NULL;
  156. regulator_disable(mdp5_mdss->vdd);
  157. pm_runtime_disable(dev->dev);
  158. }
  159. static const struct msm_mdss_funcs mdss_funcs = {
  160. .enable = mdp5_mdss_enable,
  161. .disable = mdp5_mdss_disable,
  162. .destroy = mdp5_mdss_destroy,
  163. };
  164. int mdp5_mdss_init(struct drm_device *dev)
  165. {
  166. struct platform_device *pdev = to_platform_device(dev->dev);
  167. struct msm_drm_private *priv = dev->dev_private;
  168. struct mdp5_mdss *mdp5_mdss;
  169. int ret;
  170. DBG("");
  171. if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
  172. return 0;
  173. mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
  174. if (!mdp5_mdss) {
  175. ret = -ENOMEM;
  176. goto fail;
  177. }
  178. mdp5_mdss->base.dev = dev;
  179. mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
  180. if (IS_ERR(mdp5_mdss->mmio)) {
  181. ret = PTR_ERR(mdp5_mdss->mmio);
  182. goto fail;
  183. }
  184. mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
  185. if (IS_ERR(mdp5_mdss->vbif)) {
  186. ret = PTR_ERR(mdp5_mdss->vbif);
  187. goto fail;
  188. }
  189. ret = msm_mdss_get_clocks(mdp5_mdss);
  190. if (ret) {
  191. dev_err(dev->dev, "failed to get clocks: %d\n", ret);
  192. goto fail;
  193. }
  194. /* Regulator to enable GDSCs in downstream kernels */
  195. mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
  196. if (IS_ERR(mdp5_mdss->vdd)) {
  197. ret = PTR_ERR(mdp5_mdss->vdd);
  198. goto fail;
  199. }
  200. ret = regulator_enable(mdp5_mdss->vdd);
  201. if (ret) {
  202. dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
  203. ret);
  204. goto fail;
  205. }
  206. ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
  207. mdss_irq, 0, "mdss_isr", mdp5_mdss);
  208. if (ret) {
  209. dev_err(dev->dev, "failed to init irq: %d\n", ret);
  210. goto fail_irq;
  211. }
  212. ret = mdss_irq_domain_init(mdp5_mdss);
  213. if (ret) {
  214. dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
  215. goto fail_irq;
  216. }
  217. mdp5_mdss->base.funcs = &mdss_funcs;
  218. priv->mdss = &mdp5_mdss->base;
  219. pm_runtime_enable(dev->dev);
  220. return 0;
  221. fail_irq:
  222. regulator_disable(mdp5_mdss->vdd);
  223. fail:
  224. return ret;
  225. }