msm_drv.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_debugfs.h"
  19. #include "msm_gpu.h"
  20. #include "msm_kms.h"
  21. static void msm_fb_output_poll_changed(struct drm_device *dev)
  22. {
  23. struct msm_drm_private *priv = dev->dev_private;
  24. if (priv->fbdev)
  25. drm_fb_helper_hotplug_event(priv->fbdev);
  26. }
  27. static const struct drm_mode_config_funcs mode_config_funcs = {
  28. .fb_create = msm_framebuffer_create,
  29. .output_poll_changed = msm_fb_output_poll_changed,
  30. .atomic_check = msm_atomic_check,
  31. .atomic_commit = msm_atomic_commit,
  32. };
  33. int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
  34. {
  35. struct msm_drm_private *priv = dev->dev_private;
  36. int idx = priv->num_mmus++;
  37. if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
  38. return -EINVAL;
  39. priv->mmus[idx] = mmu;
  40. return idx;
  41. }
  42. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  43. static bool reglog = false;
  44. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  45. module_param(reglog, bool, 0600);
  46. #else
  47. #define reglog 0
  48. #endif
  49. #ifdef CONFIG_DRM_FBDEV_EMULATION
  50. static bool fbdev = true;
  51. MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
  52. module_param(fbdev, bool, 0600);
  53. #endif
  54. static char *vram = "16m";
  55. MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
  56. module_param(vram, charp, 0);
  57. /*
  58. * Util/helpers:
  59. */
  60. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  61. const char *dbgname)
  62. {
  63. struct resource *res;
  64. unsigned long size;
  65. void __iomem *ptr;
  66. if (name)
  67. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  68. else
  69. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  70. if (!res) {
  71. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  72. return ERR_PTR(-EINVAL);
  73. }
  74. size = resource_size(res);
  75. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  76. if (!ptr) {
  77. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  78. return ERR_PTR(-ENOMEM);
  79. }
  80. if (reglog)
  81. printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
  82. return ptr;
  83. }
  84. void msm_writel(u32 data, void __iomem *addr)
  85. {
  86. if (reglog)
  87. printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
  88. writel(data, addr);
  89. }
  90. u32 msm_readl(const void __iomem *addr)
  91. {
  92. u32 val = readl(addr);
  93. if (reglog)
  94. printk(KERN_ERR "IO:R %p %08x\n", addr, val);
  95. return val;
  96. }
  97. struct vblank_event {
  98. struct list_head node;
  99. int crtc_id;
  100. bool enable;
  101. };
  102. static void vblank_ctrl_worker(struct work_struct *work)
  103. {
  104. struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
  105. struct msm_vblank_ctrl, work);
  106. struct msm_drm_private *priv = container_of(vbl_ctrl,
  107. struct msm_drm_private, vblank_ctrl);
  108. struct msm_kms *kms = priv->kms;
  109. struct vblank_event *vbl_ev, *tmp;
  110. unsigned long flags;
  111. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  112. list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
  113. list_del(&vbl_ev->node);
  114. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  115. if (vbl_ev->enable)
  116. kms->funcs->enable_vblank(kms,
  117. priv->crtcs[vbl_ev->crtc_id]);
  118. else
  119. kms->funcs->disable_vblank(kms,
  120. priv->crtcs[vbl_ev->crtc_id]);
  121. kfree(vbl_ev);
  122. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  123. }
  124. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  125. }
  126. static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
  127. int crtc_id, bool enable)
  128. {
  129. struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
  130. struct vblank_event *vbl_ev;
  131. unsigned long flags;
  132. vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
  133. if (!vbl_ev)
  134. return -ENOMEM;
  135. vbl_ev->crtc_id = crtc_id;
  136. vbl_ev->enable = enable;
  137. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  138. list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
  139. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  140. queue_work(priv->wq, &vbl_ctrl->work);
  141. return 0;
  142. }
  143. /*
  144. * DRM operations:
  145. */
  146. static int msm_unload(struct drm_device *dev)
  147. {
  148. struct msm_drm_private *priv = dev->dev_private;
  149. struct msm_kms *kms = priv->kms;
  150. struct msm_gpu *gpu = priv->gpu;
  151. struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
  152. struct vblank_event *vbl_ev, *tmp;
  153. /* We must cancel and cleanup any pending vblank enable/disable
  154. * work before drm_irq_uninstall() to avoid work re-enabling an
  155. * irq after uninstall has disabled it.
  156. */
  157. cancel_work_sync(&vbl_ctrl->work);
  158. list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
  159. list_del(&vbl_ev->node);
  160. kfree(vbl_ev);
  161. }
  162. drm_kms_helper_poll_fini(dev);
  163. #ifdef CONFIG_DRM_FBDEV_EMULATION
  164. if (fbdev && priv->fbdev)
  165. msm_fbdev_free(dev);
  166. #endif
  167. drm_mode_config_cleanup(dev);
  168. drm_vblank_cleanup(dev);
  169. pm_runtime_get_sync(dev->dev);
  170. drm_irq_uninstall(dev);
  171. pm_runtime_put_sync(dev->dev);
  172. flush_workqueue(priv->wq);
  173. destroy_workqueue(priv->wq);
  174. if (kms) {
  175. pm_runtime_disable(dev->dev);
  176. kms->funcs->destroy(kms);
  177. }
  178. if (gpu) {
  179. mutex_lock(&dev->struct_mutex);
  180. gpu->funcs->pm_suspend(gpu);
  181. mutex_unlock(&dev->struct_mutex);
  182. gpu->funcs->destroy(gpu);
  183. }
  184. if (priv->vram.paddr) {
  185. DEFINE_DMA_ATTRS(attrs);
  186. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  187. drm_mm_takedown(&priv->vram.mm);
  188. dma_free_attrs(dev->dev, priv->vram.size, NULL,
  189. priv->vram.paddr, &attrs);
  190. }
  191. component_unbind_all(dev->dev, dev);
  192. dev->dev_private = NULL;
  193. kfree(priv);
  194. return 0;
  195. }
  196. static int get_mdp_ver(struct platform_device *pdev)
  197. {
  198. struct device *dev = &pdev->dev;
  199. return (int) (unsigned long) of_device_get_match_data(dev);
  200. }
  201. #include <linux/of_address.h>
  202. static int msm_init_vram(struct drm_device *dev)
  203. {
  204. struct msm_drm_private *priv = dev->dev_private;
  205. struct device_node *node;
  206. unsigned long size = 0;
  207. int ret = 0;
  208. /* In the device-tree world, we could have a 'memory-region'
  209. * phandle, which gives us a link to our "vram". Allocating
  210. * is all nicely abstracted behind the dma api, but we need
  211. * to know the entire size to allocate it all in one go. There
  212. * are two cases:
  213. * 1) device with no IOMMU, in which case we need exclusive
  214. * access to a VRAM carveout big enough for all gpu
  215. * buffers
  216. * 2) device with IOMMU, but where the bootloader puts up
  217. * a splash screen. In this case, the VRAM carveout
  218. * need only be large enough for fbdev fb. But we need
  219. * exclusive access to the buffer to avoid the kernel
  220. * using those pages for other purposes (which appears
  221. * as corruption on screen before we have a chance to
  222. * load and do initial modeset)
  223. */
  224. node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
  225. if (node) {
  226. struct resource r;
  227. ret = of_address_to_resource(node, 0, &r);
  228. if (ret)
  229. return ret;
  230. size = r.end - r.start;
  231. DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
  232. /* if we have no IOMMU, then we need to use carveout allocator.
  233. * Grab the entire CMA chunk carved out in early startup in
  234. * mach-msm:
  235. */
  236. } else if (!iommu_present(&platform_bus_type)) {
  237. DRM_INFO("using %s VRAM carveout\n", vram);
  238. size = memparse(vram, NULL);
  239. }
  240. if (size) {
  241. DEFINE_DMA_ATTRS(attrs);
  242. void *p;
  243. priv->vram.size = size;
  244. drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
  245. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  246. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  247. /* note that for no-kernel-mapping, the vaddr returned
  248. * is bogus, but non-null if allocation succeeded:
  249. */
  250. p = dma_alloc_attrs(dev->dev, size,
  251. &priv->vram.paddr, GFP_KERNEL, &attrs);
  252. if (!p) {
  253. dev_err(dev->dev, "failed to allocate VRAM\n");
  254. priv->vram.paddr = 0;
  255. return -ENOMEM;
  256. }
  257. dev_info(dev->dev, "VRAM: %08x->%08x\n",
  258. (uint32_t)priv->vram.paddr,
  259. (uint32_t)(priv->vram.paddr + size));
  260. }
  261. return ret;
  262. }
  263. static int msm_load(struct drm_device *dev, unsigned long flags)
  264. {
  265. struct platform_device *pdev = dev->platformdev;
  266. struct msm_drm_private *priv;
  267. struct msm_kms *kms;
  268. int ret;
  269. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  270. if (!priv) {
  271. dev_err(dev->dev, "failed to allocate private data\n");
  272. return -ENOMEM;
  273. }
  274. dev->dev_private = priv;
  275. priv->wq = alloc_ordered_workqueue("msm", 0);
  276. init_waitqueue_head(&priv->fence_event);
  277. init_waitqueue_head(&priv->pending_crtcs_event);
  278. INIT_LIST_HEAD(&priv->inactive_list);
  279. INIT_LIST_HEAD(&priv->fence_cbs);
  280. INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
  281. INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
  282. spin_lock_init(&priv->vblank_ctrl.lock);
  283. drm_mode_config_init(dev);
  284. platform_set_drvdata(pdev, dev);
  285. /* Bind all our sub-components: */
  286. ret = component_bind_all(dev->dev, dev);
  287. if (ret)
  288. return ret;
  289. ret = msm_init_vram(dev);
  290. if (ret)
  291. goto fail;
  292. switch (get_mdp_ver(pdev)) {
  293. case 4:
  294. kms = mdp4_kms_init(dev);
  295. break;
  296. case 5:
  297. kms = mdp5_kms_init(dev);
  298. break;
  299. default:
  300. kms = ERR_PTR(-ENODEV);
  301. break;
  302. }
  303. if (IS_ERR(kms)) {
  304. /*
  305. * NOTE: once we have GPU support, having no kms should not
  306. * be considered fatal.. ideally we would still support gpu
  307. * and (for example) use dmabuf/prime to share buffers with
  308. * imx drm driver on iMX5
  309. */
  310. dev_err(dev->dev, "failed to load kms\n");
  311. ret = PTR_ERR(kms);
  312. goto fail;
  313. }
  314. priv->kms = kms;
  315. if (kms) {
  316. pm_runtime_enable(dev->dev);
  317. ret = kms->funcs->hw_init(kms);
  318. if (ret) {
  319. dev_err(dev->dev, "kms hw init failed: %d\n", ret);
  320. goto fail;
  321. }
  322. }
  323. dev->mode_config.funcs = &mode_config_funcs;
  324. ret = drm_vblank_init(dev, priv->num_crtcs);
  325. if (ret < 0) {
  326. dev_err(dev->dev, "failed to initialize vblank\n");
  327. goto fail;
  328. }
  329. pm_runtime_get_sync(dev->dev);
  330. ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
  331. pm_runtime_put_sync(dev->dev);
  332. if (ret < 0) {
  333. dev_err(dev->dev, "failed to install IRQ handler\n");
  334. goto fail;
  335. }
  336. drm_mode_config_reset(dev);
  337. #ifdef CONFIG_DRM_FBDEV_EMULATION
  338. if (fbdev)
  339. priv->fbdev = msm_fbdev_init(dev);
  340. #endif
  341. ret = msm_debugfs_late_init(dev);
  342. if (ret)
  343. goto fail;
  344. drm_kms_helper_poll_init(dev);
  345. return 0;
  346. fail:
  347. msm_unload(dev);
  348. return ret;
  349. }
  350. static void load_gpu(struct drm_device *dev)
  351. {
  352. static DEFINE_MUTEX(init_lock);
  353. struct msm_drm_private *priv = dev->dev_private;
  354. mutex_lock(&init_lock);
  355. if (!priv->gpu)
  356. priv->gpu = adreno_load_gpu(dev);
  357. mutex_unlock(&init_lock);
  358. }
  359. static int msm_open(struct drm_device *dev, struct drm_file *file)
  360. {
  361. struct msm_file_private *ctx;
  362. /* For now, load gpu on open.. to avoid the requirement of having
  363. * firmware in the initrd.
  364. */
  365. load_gpu(dev);
  366. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  367. if (!ctx)
  368. return -ENOMEM;
  369. file->driver_priv = ctx;
  370. return 0;
  371. }
  372. static void msm_preclose(struct drm_device *dev, struct drm_file *file)
  373. {
  374. struct msm_drm_private *priv = dev->dev_private;
  375. struct msm_file_private *ctx = file->driver_priv;
  376. struct msm_kms *kms = priv->kms;
  377. mutex_lock(&dev->struct_mutex);
  378. if (ctx == priv->lastctx)
  379. priv->lastctx = NULL;
  380. mutex_unlock(&dev->struct_mutex);
  381. kfree(ctx);
  382. }
  383. static void msm_lastclose(struct drm_device *dev)
  384. {
  385. struct msm_drm_private *priv = dev->dev_private;
  386. if (priv->fbdev)
  387. drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
  388. }
  389. static irqreturn_t msm_irq(int irq, void *arg)
  390. {
  391. struct drm_device *dev = arg;
  392. struct msm_drm_private *priv = dev->dev_private;
  393. struct msm_kms *kms = priv->kms;
  394. BUG_ON(!kms);
  395. return kms->funcs->irq(kms);
  396. }
  397. static void msm_irq_preinstall(struct drm_device *dev)
  398. {
  399. struct msm_drm_private *priv = dev->dev_private;
  400. struct msm_kms *kms = priv->kms;
  401. BUG_ON(!kms);
  402. kms->funcs->irq_preinstall(kms);
  403. }
  404. static int msm_irq_postinstall(struct drm_device *dev)
  405. {
  406. struct msm_drm_private *priv = dev->dev_private;
  407. struct msm_kms *kms = priv->kms;
  408. BUG_ON(!kms);
  409. return kms->funcs->irq_postinstall(kms);
  410. }
  411. static void msm_irq_uninstall(struct drm_device *dev)
  412. {
  413. struct msm_drm_private *priv = dev->dev_private;
  414. struct msm_kms *kms = priv->kms;
  415. BUG_ON(!kms);
  416. kms->funcs->irq_uninstall(kms);
  417. }
  418. static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
  419. {
  420. struct msm_drm_private *priv = dev->dev_private;
  421. struct msm_kms *kms = priv->kms;
  422. if (!kms)
  423. return -ENXIO;
  424. DBG("dev=%p, crtc=%u", dev, pipe);
  425. return vblank_ctrl_queue_work(priv, pipe, true);
  426. }
  427. static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
  428. {
  429. struct msm_drm_private *priv = dev->dev_private;
  430. struct msm_kms *kms = priv->kms;
  431. if (!kms)
  432. return;
  433. DBG("dev=%p, crtc=%u", dev, pipe);
  434. vblank_ctrl_queue_work(priv, pipe, false);
  435. }
  436. /*
  437. * Fences:
  438. */
  439. int msm_wait_fence(struct drm_device *dev, uint32_t fence,
  440. ktime_t *timeout , bool interruptible)
  441. {
  442. struct msm_drm_private *priv = dev->dev_private;
  443. int ret;
  444. if (!priv->gpu)
  445. return 0;
  446. if (fence > priv->gpu->submitted_fence) {
  447. DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
  448. fence, priv->gpu->submitted_fence);
  449. return -EINVAL;
  450. }
  451. if (!timeout) {
  452. /* no-wait: */
  453. ret = fence_completed(dev, fence) ? 0 : -EBUSY;
  454. } else {
  455. ktime_t now = ktime_get();
  456. unsigned long remaining_jiffies;
  457. if (ktime_compare(*timeout, now) < 0) {
  458. remaining_jiffies = 0;
  459. } else {
  460. ktime_t rem = ktime_sub(*timeout, now);
  461. struct timespec ts = ktime_to_timespec(rem);
  462. remaining_jiffies = timespec_to_jiffies(&ts);
  463. }
  464. if (interruptible)
  465. ret = wait_event_interruptible_timeout(priv->fence_event,
  466. fence_completed(dev, fence),
  467. remaining_jiffies);
  468. else
  469. ret = wait_event_timeout(priv->fence_event,
  470. fence_completed(dev, fence),
  471. remaining_jiffies);
  472. if (ret == 0) {
  473. DBG("timeout waiting for fence: %u (completed: %u)",
  474. fence, priv->completed_fence);
  475. ret = -ETIMEDOUT;
  476. } else if (ret != -ERESTARTSYS) {
  477. ret = 0;
  478. }
  479. }
  480. return ret;
  481. }
  482. int msm_queue_fence_cb(struct drm_device *dev,
  483. struct msm_fence_cb *cb, uint32_t fence)
  484. {
  485. struct msm_drm_private *priv = dev->dev_private;
  486. int ret = 0;
  487. mutex_lock(&dev->struct_mutex);
  488. if (!list_empty(&cb->work.entry)) {
  489. ret = -EINVAL;
  490. } else if (fence > priv->completed_fence) {
  491. cb->fence = fence;
  492. list_add_tail(&cb->work.entry, &priv->fence_cbs);
  493. } else {
  494. queue_work(priv->wq, &cb->work);
  495. }
  496. mutex_unlock(&dev->struct_mutex);
  497. return ret;
  498. }
  499. /* called from workqueue */
  500. void msm_update_fence(struct drm_device *dev, uint32_t fence)
  501. {
  502. struct msm_drm_private *priv = dev->dev_private;
  503. mutex_lock(&dev->struct_mutex);
  504. priv->completed_fence = max(fence, priv->completed_fence);
  505. while (!list_empty(&priv->fence_cbs)) {
  506. struct msm_fence_cb *cb;
  507. cb = list_first_entry(&priv->fence_cbs,
  508. struct msm_fence_cb, work.entry);
  509. if (cb->fence > priv->completed_fence)
  510. break;
  511. list_del_init(&cb->work.entry);
  512. queue_work(priv->wq, &cb->work);
  513. }
  514. mutex_unlock(&dev->struct_mutex);
  515. wake_up_all(&priv->fence_event);
  516. }
  517. void __msm_fence_worker(struct work_struct *work)
  518. {
  519. struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
  520. cb->func(cb);
  521. }
  522. /*
  523. * DRM ioctls:
  524. */
  525. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  526. struct drm_file *file)
  527. {
  528. struct msm_drm_private *priv = dev->dev_private;
  529. struct drm_msm_param *args = data;
  530. struct msm_gpu *gpu;
  531. /* for now, we just have 3d pipe.. eventually this would need to
  532. * be more clever to dispatch to appropriate gpu module:
  533. */
  534. if (args->pipe != MSM_PIPE_3D0)
  535. return -EINVAL;
  536. gpu = priv->gpu;
  537. if (!gpu)
  538. return -ENXIO;
  539. return gpu->funcs->get_param(gpu, args->param, &args->value);
  540. }
  541. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  542. struct drm_file *file)
  543. {
  544. struct drm_msm_gem_new *args = data;
  545. if (args->flags & ~MSM_BO_FLAGS) {
  546. DRM_ERROR("invalid flags: %08x\n", args->flags);
  547. return -EINVAL;
  548. }
  549. return msm_gem_new_handle(dev, file, args->size,
  550. args->flags, &args->handle);
  551. }
  552. static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
  553. {
  554. return ktime_set(timeout.tv_sec, timeout.tv_nsec);
  555. }
  556. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  557. struct drm_file *file)
  558. {
  559. struct drm_msm_gem_cpu_prep *args = data;
  560. struct drm_gem_object *obj;
  561. ktime_t timeout = to_ktime(args->timeout);
  562. int ret;
  563. if (args->op & ~MSM_PREP_FLAGS) {
  564. DRM_ERROR("invalid op: %08x\n", args->op);
  565. return -EINVAL;
  566. }
  567. obj = drm_gem_object_lookup(dev, file, args->handle);
  568. if (!obj)
  569. return -ENOENT;
  570. ret = msm_gem_cpu_prep(obj, args->op, &timeout);
  571. drm_gem_object_unreference_unlocked(obj);
  572. return ret;
  573. }
  574. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  575. struct drm_file *file)
  576. {
  577. struct drm_msm_gem_cpu_fini *args = data;
  578. struct drm_gem_object *obj;
  579. int ret;
  580. obj = drm_gem_object_lookup(dev, file, args->handle);
  581. if (!obj)
  582. return -ENOENT;
  583. ret = msm_gem_cpu_fini(obj);
  584. drm_gem_object_unreference_unlocked(obj);
  585. return ret;
  586. }
  587. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  588. struct drm_file *file)
  589. {
  590. struct drm_msm_gem_info *args = data;
  591. struct drm_gem_object *obj;
  592. int ret = 0;
  593. if (args->pad)
  594. return -EINVAL;
  595. obj = drm_gem_object_lookup(dev, file, args->handle);
  596. if (!obj)
  597. return -ENOENT;
  598. args->offset = msm_gem_mmap_offset(obj);
  599. drm_gem_object_unreference_unlocked(obj);
  600. return ret;
  601. }
  602. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  603. struct drm_file *file)
  604. {
  605. struct drm_msm_wait_fence *args = data;
  606. ktime_t timeout = to_ktime(args->timeout);
  607. if (args->pad) {
  608. DRM_ERROR("invalid pad: %08x\n", args->pad);
  609. return -EINVAL;
  610. }
  611. return msm_wait_fence(dev, args->fence, &timeout, true);
  612. }
  613. static const struct drm_ioctl_desc msm_ioctls[] = {
  614. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
  615. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  616. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
  617. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  618. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  619. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
  620. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
  621. };
  622. static const struct vm_operations_struct vm_ops = {
  623. .fault = msm_gem_fault,
  624. .open = drm_gem_vm_open,
  625. .close = drm_gem_vm_close,
  626. };
  627. static const struct file_operations fops = {
  628. .owner = THIS_MODULE,
  629. .open = drm_open,
  630. .release = drm_release,
  631. .unlocked_ioctl = drm_ioctl,
  632. #ifdef CONFIG_COMPAT
  633. .compat_ioctl = drm_compat_ioctl,
  634. #endif
  635. .poll = drm_poll,
  636. .read = drm_read,
  637. .llseek = no_llseek,
  638. .mmap = msm_gem_mmap,
  639. };
  640. static struct drm_driver msm_driver = {
  641. .driver_features = DRIVER_HAVE_IRQ |
  642. DRIVER_GEM |
  643. DRIVER_PRIME |
  644. DRIVER_RENDER |
  645. DRIVER_ATOMIC |
  646. DRIVER_MODESET,
  647. .load = msm_load,
  648. .unload = msm_unload,
  649. .open = msm_open,
  650. .preclose = msm_preclose,
  651. .lastclose = msm_lastclose,
  652. .set_busid = drm_platform_set_busid,
  653. .irq_handler = msm_irq,
  654. .irq_preinstall = msm_irq_preinstall,
  655. .irq_postinstall = msm_irq_postinstall,
  656. .irq_uninstall = msm_irq_uninstall,
  657. .get_vblank_counter = drm_vblank_no_hw_counter,
  658. .enable_vblank = msm_enable_vblank,
  659. .disable_vblank = msm_disable_vblank,
  660. .gem_free_object = msm_gem_free_object,
  661. .gem_vm_ops = &vm_ops,
  662. .dumb_create = msm_gem_dumb_create,
  663. .dumb_map_offset = msm_gem_dumb_map_offset,
  664. .dumb_destroy = drm_gem_dumb_destroy,
  665. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  666. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  667. .gem_prime_export = drm_gem_prime_export,
  668. .gem_prime_import = drm_gem_prime_import,
  669. .gem_prime_pin = msm_gem_prime_pin,
  670. .gem_prime_unpin = msm_gem_prime_unpin,
  671. .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
  672. .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
  673. .gem_prime_vmap = msm_gem_prime_vmap,
  674. .gem_prime_vunmap = msm_gem_prime_vunmap,
  675. .gem_prime_mmap = msm_gem_prime_mmap,
  676. #ifdef CONFIG_DEBUG_FS
  677. .debugfs_init = msm_debugfs_init,
  678. .debugfs_cleanup = msm_debugfs_cleanup,
  679. #endif
  680. .ioctls = msm_ioctls,
  681. .num_ioctls = DRM_MSM_NUM_IOCTLS,
  682. .fops = &fops,
  683. .name = "msm",
  684. .desc = "MSM Snapdragon DRM",
  685. .date = "20130625",
  686. .major = 1,
  687. .minor = 0,
  688. };
  689. #ifdef CONFIG_PM_SLEEP
  690. static int msm_pm_suspend(struct device *dev)
  691. {
  692. struct drm_device *ddev = dev_get_drvdata(dev);
  693. drm_kms_helper_poll_disable(ddev);
  694. return 0;
  695. }
  696. static int msm_pm_resume(struct device *dev)
  697. {
  698. struct drm_device *ddev = dev_get_drvdata(dev);
  699. drm_kms_helper_poll_enable(ddev);
  700. return 0;
  701. }
  702. #endif
  703. static const struct dev_pm_ops msm_pm_ops = {
  704. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  705. };
  706. /*
  707. * Componentized driver support:
  708. */
  709. /*
  710. * NOTE: duplication of the same code as exynos or imx (or probably any other).
  711. * so probably some room for some helpers
  712. */
  713. static int compare_of(struct device *dev, void *data)
  714. {
  715. return dev->of_node == data;
  716. }
  717. static int add_components(struct device *dev, struct component_match **matchptr,
  718. const char *name)
  719. {
  720. struct device_node *np = dev->of_node;
  721. unsigned i;
  722. for (i = 0; ; i++) {
  723. struct device_node *node;
  724. node = of_parse_phandle(np, name, i);
  725. if (!node)
  726. break;
  727. component_match_add(dev, matchptr, compare_of, node);
  728. }
  729. return 0;
  730. }
  731. static int msm_drm_bind(struct device *dev)
  732. {
  733. return drm_platform_init(&msm_driver, to_platform_device(dev));
  734. }
  735. static void msm_drm_unbind(struct device *dev)
  736. {
  737. drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
  738. }
  739. static const struct component_master_ops msm_drm_ops = {
  740. .bind = msm_drm_bind,
  741. .unbind = msm_drm_unbind,
  742. };
  743. /*
  744. * Platform driver:
  745. */
  746. static int msm_pdev_probe(struct platform_device *pdev)
  747. {
  748. struct component_match *match = NULL;
  749. add_components(&pdev->dev, &match, "connectors");
  750. add_components(&pdev->dev, &match, "gpus");
  751. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  752. return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
  753. }
  754. static int msm_pdev_remove(struct platform_device *pdev)
  755. {
  756. component_master_del(&pdev->dev, &msm_drm_ops);
  757. return 0;
  758. }
  759. static const struct platform_device_id msm_id[] = {
  760. { "mdp", 0 },
  761. { }
  762. };
  763. static const struct of_device_id dt_match[] = {
  764. { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
  765. { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
  766. /* to support downstream DT files */
  767. { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
  768. {}
  769. };
  770. MODULE_DEVICE_TABLE(of, dt_match);
  771. static struct platform_driver msm_platform_driver = {
  772. .probe = msm_pdev_probe,
  773. .remove = msm_pdev_remove,
  774. .driver = {
  775. .name = "msm",
  776. .of_match_table = dt_match,
  777. .pm = &msm_pm_ops,
  778. },
  779. .id_table = msm_id,
  780. };
  781. static int __init msm_drm_register(void)
  782. {
  783. DBG("init");
  784. msm_dsi_register();
  785. msm_edp_register();
  786. msm_hdmi_register();
  787. adreno_register();
  788. return platform_driver_register(&msm_platform_driver);
  789. }
  790. static void __exit msm_drm_unregister(void)
  791. {
  792. DBG("fini");
  793. platform_driver_unregister(&msm_platform_driver);
  794. msm_hdmi_unregister();
  795. adreno_unregister();
  796. msm_edp_unregister();
  797. msm_dsi_unregister();
  798. }
  799. module_init(msm_drm_register);
  800. module_exit(msm_drm_unregister);
  801. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  802. MODULE_DESCRIPTION("MSM DRM Driver");
  803. MODULE_LICENSE("GPL");