msm_drv.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <drm/drm_of.h>
  18. #include "msm_drv.h"
  19. #include "msm_debugfs.h"
  20. #include "msm_fence.h"
  21. #include "msm_gpu.h"
  22. #include "msm_kms.h"
  23. /*
  24. * MSM driver version:
  25. * - 1.0.0 - initial interface
  26. * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
  27. * - 1.2.0 - adds explicit fence support for submit ioctl
  28. */
  29. #define MSM_VERSION_MAJOR 1
  30. #define MSM_VERSION_MINOR 2
  31. #define MSM_VERSION_PATCHLEVEL 0
  32. static void msm_fb_output_poll_changed(struct drm_device *dev)
  33. {
  34. struct msm_drm_private *priv = dev->dev_private;
  35. if (priv->fbdev)
  36. drm_fb_helper_hotplug_event(priv->fbdev);
  37. }
  38. static const struct drm_mode_config_funcs mode_config_funcs = {
  39. .fb_create = msm_framebuffer_create,
  40. .output_poll_changed = msm_fb_output_poll_changed,
  41. .atomic_check = msm_atomic_check,
  42. .atomic_commit = msm_atomic_commit,
  43. .atomic_state_alloc = msm_atomic_state_alloc,
  44. .atomic_state_clear = msm_atomic_state_clear,
  45. .atomic_state_free = msm_atomic_state_free,
  46. };
  47. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  48. static bool reglog = false;
  49. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  50. module_param(reglog, bool, 0600);
  51. #else
  52. #define reglog 0
  53. #endif
  54. #ifdef CONFIG_DRM_FBDEV_EMULATION
  55. static bool fbdev = true;
  56. MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
  57. module_param(fbdev, bool, 0600);
  58. #endif
  59. static char *vram = "16m";
  60. MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
  61. module_param(vram, charp, 0);
  62. bool dumpstate = false;
  63. MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
  64. module_param(dumpstate, bool, 0600);
  65. static bool modeset = true;
  66. MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
  67. module_param(modeset, bool, 0600);
  68. /*
  69. * Util/helpers:
  70. */
  71. struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
  72. {
  73. struct clk *clk;
  74. char name2[32];
  75. clk = devm_clk_get(&pdev->dev, name);
  76. if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
  77. return clk;
  78. snprintf(name2, sizeof(name2), "%s_clk", name);
  79. clk = devm_clk_get(&pdev->dev, name2);
  80. if (!IS_ERR(clk))
  81. dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
  82. "\"%s\" instead of \"%s\"\n", name, name2);
  83. return clk;
  84. }
  85. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  86. const char *dbgname)
  87. {
  88. struct resource *res;
  89. unsigned long size;
  90. void __iomem *ptr;
  91. if (name)
  92. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  93. else
  94. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  95. if (!res) {
  96. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  97. return ERR_PTR(-EINVAL);
  98. }
  99. size = resource_size(res);
  100. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  101. if (!ptr) {
  102. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  103. return ERR_PTR(-ENOMEM);
  104. }
  105. if (reglog)
  106. printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
  107. return ptr;
  108. }
  109. void msm_writel(u32 data, void __iomem *addr)
  110. {
  111. if (reglog)
  112. printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
  113. writel(data, addr);
  114. }
  115. u32 msm_readl(const void __iomem *addr)
  116. {
  117. u32 val = readl(addr);
  118. if (reglog)
  119. pr_err("IO:R %p %08x\n", addr, val);
  120. return val;
  121. }
  122. struct vblank_event {
  123. struct list_head node;
  124. int crtc_id;
  125. bool enable;
  126. };
  127. static void vblank_ctrl_worker(struct work_struct *work)
  128. {
  129. struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
  130. struct msm_vblank_ctrl, work);
  131. struct msm_drm_private *priv = container_of(vbl_ctrl,
  132. struct msm_drm_private, vblank_ctrl);
  133. struct msm_kms *kms = priv->kms;
  134. struct vblank_event *vbl_ev, *tmp;
  135. unsigned long flags;
  136. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  137. list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
  138. list_del(&vbl_ev->node);
  139. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  140. if (vbl_ev->enable)
  141. kms->funcs->enable_vblank(kms,
  142. priv->crtcs[vbl_ev->crtc_id]);
  143. else
  144. kms->funcs->disable_vblank(kms,
  145. priv->crtcs[vbl_ev->crtc_id]);
  146. kfree(vbl_ev);
  147. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  148. }
  149. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  150. }
  151. static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
  152. int crtc_id, bool enable)
  153. {
  154. struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
  155. struct vblank_event *vbl_ev;
  156. unsigned long flags;
  157. vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
  158. if (!vbl_ev)
  159. return -ENOMEM;
  160. vbl_ev->crtc_id = crtc_id;
  161. vbl_ev->enable = enable;
  162. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  163. list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
  164. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  165. queue_work(priv->wq, &vbl_ctrl->work);
  166. return 0;
  167. }
  168. static int msm_drm_uninit(struct device *dev)
  169. {
  170. struct platform_device *pdev = to_platform_device(dev);
  171. struct drm_device *ddev = platform_get_drvdata(pdev);
  172. struct msm_drm_private *priv = ddev->dev_private;
  173. struct msm_kms *kms = priv->kms;
  174. struct msm_gpu *gpu = priv->gpu;
  175. struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
  176. struct vblank_event *vbl_ev, *tmp;
  177. /* We must cancel and cleanup any pending vblank enable/disable
  178. * work before drm_irq_uninstall() to avoid work re-enabling an
  179. * irq after uninstall has disabled it.
  180. */
  181. cancel_work_sync(&vbl_ctrl->work);
  182. list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
  183. list_del(&vbl_ev->node);
  184. kfree(vbl_ev);
  185. }
  186. msm_gem_shrinker_cleanup(ddev);
  187. drm_kms_helper_poll_fini(ddev);
  188. drm_dev_unregister(ddev);
  189. msm_perf_debugfs_cleanup(priv);
  190. msm_rd_debugfs_cleanup(priv);
  191. #ifdef CONFIG_DRM_FBDEV_EMULATION
  192. if (fbdev && priv->fbdev)
  193. msm_fbdev_free(ddev);
  194. #endif
  195. drm_mode_config_cleanup(ddev);
  196. pm_runtime_get_sync(dev);
  197. drm_irq_uninstall(ddev);
  198. pm_runtime_put_sync(dev);
  199. flush_workqueue(priv->wq);
  200. destroy_workqueue(priv->wq);
  201. flush_workqueue(priv->atomic_wq);
  202. destroy_workqueue(priv->atomic_wq);
  203. if (kms && kms->funcs)
  204. kms->funcs->destroy(kms);
  205. if (gpu) {
  206. mutex_lock(&ddev->struct_mutex);
  207. // XXX what do we do here?
  208. //pm_runtime_enable(&pdev->dev);
  209. gpu->funcs->pm_suspend(gpu);
  210. mutex_unlock(&ddev->struct_mutex);
  211. gpu->funcs->destroy(gpu);
  212. }
  213. if (priv->vram.paddr) {
  214. unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
  215. drm_mm_takedown(&priv->vram.mm);
  216. dma_free_attrs(dev, priv->vram.size, NULL,
  217. priv->vram.paddr, attrs);
  218. }
  219. component_unbind_all(dev, ddev);
  220. msm_mdss_destroy(ddev);
  221. ddev->dev_private = NULL;
  222. drm_dev_unref(ddev);
  223. kfree(priv);
  224. return 0;
  225. }
  226. static int get_mdp_ver(struct platform_device *pdev)
  227. {
  228. struct device *dev = &pdev->dev;
  229. return (int) (unsigned long) of_device_get_match_data(dev);
  230. }
  231. #include <linux/of_address.h>
  232. static int msm_init_vram(struct drm_device *dev)
  233. {
  234. struct msm_drm_private *priv = dev->dev_private;
  235. struct device_node *node;
  236. unsigned long size = 0;
  237. int ret = 0;
  238. /* In the device-tree world, we could have a 'memory-region'
  239. * phandle, which gives us a link to our "vram". Allocating
  240. * is all nicely abstracted behind the dma api, but we need
  241. * to know the entire size to allocate it all in one go. There
  242. * are two cases:
  243. * 1) device with no IOMMU, in which case we need exclusive
  244. * access to a VRAM carveout big enough for all gpu
  245. * buffers
  246. * 2) device with IOMMU, but where the bootloader puts up
  247. * a splash screen. In this case, the VRAM carveout
  248. * need only be large enough for fbdev fb. But we need
  249. * exclusive access to the buffer to avoid the kernel
  250. * using those pages for other purposes (which appears
  251. * as corruption on screen before we have a chance to
  252. * load and do initial modeset)
  253. */
  254. node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
  255. if (node) {
  256. struct resource r;
  257. ret = of_address_to_resource(node, 0, &r);
  258. of_node_put(node);
  259. if (ret)
  260. return ret;
  261. size = r.end - r.start;
  262. DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
  263. /* if we have no IOMMU, then we need to use carveout allocator.
  264. * Grab the entire CMA chunk carved out in early startup in
  265. * mach-msm:
  266. */
  267. } else if (!iommu_present(&platform_bus_type)) {
  268. DRM_INFO("using %s VRAM carveout\n", vram);
  269. size = memparse(vram, NULL);
  270. }
  271. if (size) {
  272. unsigned long attrs = 0;
  273. void *p;
  274. priv->vram.size = size;
  275. drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
  276. spin_lock_init(&priv->vram.lock);
  277. attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  278. attrs |= DMA_ATTR_WRITE_COMBINE;
  279. /* note that for no-kernel-mapping, the vaddr returned
  280. * is bogus, but non-null if allocation succeeded:
  281. */
  282. p = dma_alloc_attrs(dev->dev, size,
  283. &priv->vram.paddr, GFP_KERNEL, attrs);
  284. if (!p) {
  285. dev_err(dev->dev, "failed to allocate VRAM\n");
  286. priv->vram.paddr = 0;
  287. return -ENOMEM;
  288. }
  289. dev_info(dev->dev, "VRAM: %08x->%08x\n",
  290. (uint32_t)priv->vram.paddr,
  291. (uint32_t)(priv->vram.paddr + size));
  292. }
  293. return ret;
  294. }
  295. static int msm_drm_init(struct device *dev, struct drm_driver *drv)
  296. {
  297. struct platform_device *pdev = to_platform_device(dev);
  298. struct drm_device *ddev;
  299. struct msm_drm_private *priv;
  300. struct msm_kms *kms;
  301. int ret;
  302. ddev = drm_dev_alloc(drv, dev);
  303. if (IS_ERR(ddev)) {
  304. dev_err(dev, "failed to allocate drm_device\n");
  305. return PTR_ERR(ddev);
  306. }
  307. platform_set_drvdata(pdev, ddev);
  308. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  309. if (!priv) {
  310. drm_dev_unref(ddev);
  311. return -ENOMEM;
  312. }
  313. ddev->dev_private = priv;
  314. priv->dev = ddev;
  315. ret = msm_mdss_init(ddev);
  316. if (ret) {
  317. kfree(priv);
  318. drm_dev_unref(ddev);
  319. return ret;
  320. }
  321. priv->wq = alloc_ordered_workqueue("msm", 0);
  322. priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
  323. init_waitqueue_head(&priv->pending_crtcs_event);
  324. INIT_LIST_HEAD(&priv->inactive_list);
  325. INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
  326. INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
  327. spin_lock_init(&priv->vblank_ctrl.lock);
  328. drm_mode_config_init(ddev);
  329. /* Bind all our sub-components: */
  330. ret = component_bind_all(dev, ddev);
  331. if (ret) {
  332. msm_mdss_destroy(ddev);
  333. kfree(priv);
  334. drm_dev_unref(ddev);
  335. return ret;
  336. }
  337. ret = msm_init_vram(ddev);
  338. if (ret)
  339. goto fail;
  340. msm_gem_shrinker_init(ddev);
  341. switch (get_mdp_ver(pdev)) {
  342. case 4:
  343. kms = mdp4_kms_init(ddev);
  344. priv->kms = kms;
  345. break;
  346. case 5:
  347. kms = mdp5_kms_init(ddev);
  348. break;
  349. default:
  350. kms = ERR_PTR(-ENODEV);
  351. break;
  352. }
  353. if (IS_ERR(kms)) {
  354. /*
  355. * NOTE: once we have GPU support, having no kms should not
  356. * be considered fatal.. ideally we would still support gpu
  357. * and (for example) use dmabuf/prime to share buffers with
  358. * imx drm driver on iMX5
  359. */
  360. dev_err(dev, "failed to load kms\n");
  361. ret = PTR_ERR(kms);
  362. goto fail;
  363. }
  364. if (kms) {
  365. ret = kms->funcs->hw_init(kms);
  366. if (ret) {
  367. dev_err(dev, "kms hw init failed: %d\n", ret);
  368. goto fail;
  369. }
  370. }
  371. ddev->mode_config.funcs = &mode_config_funcs;
  372. ret = drm_vblank_init(ddev, priv->num_crtcs);
  373. if (ret < 0) {
  374. dev_err(dev, "failed to initialize vblank\n");
  375. goto fail;
  376. }
  377. if (kms) {
  378. pm_runtime_get_sync(dev);
  379. ret = drm_irq_install(ddev, kms->irq);
  380. pm_runtime_put_sync(dev);
  381. if (ret < 0) {
  382. dev_err(dev, "failed to install IRQ handler\n");
  383. goto fail;
  384. }
  385. }
  386. ret = drm_dev_register(ddev, 0);
  387. if (ret)
  388. goto fail;
  389. drm_mode_config_reset(ddev);
  390. #ifdef CONFIG_DRM_FBDEV_EMULATION
  391. if (fbdev)
  392. priv->fbdev = msm_fbdev_init(ddev);
  393. #endif
  394. ret = msm_debugfs_late_init(ddev);
  395. if (ret)
  396. goto fail;
  397. drm_kms_helper_poll_init(ddev);
  398. return 0;
  399. fail:
  400. msm_drm_uninit(dev);
  401. return ret;
  402. }
  403. /*
  404. * DRM operations:
  405. */
  406. static void load_gpu(struct drm_device *dev)
  407. {
  408. static DEFINE_MUTEX(init_lock);
  409. struct msm_drm_private *priv = dev->dev_private;
  410. mutex_lock(&init_lock);
  411. if (!priv->gpu)
  412. priv->gpu = adreno_load_gpu(dev);
  413. mutex_unlock(&init_lock);
  414. }
  415. static int msm_open(struct drm_device *dev, struct drm_file *file)
  416. {
  417. struct msm_file_private *ctx;
  418. /* For now, load gpu on open.. to avoid the requirement of having
  419. * firmware in the initrd.
  420. */
  421. load_gpu(dev);
  422. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  423. if (!ctx)
  424. return -ENOMEM;
  425. file->driver_priv = ctx;
  426. return 0;
  427. }
  428. static void msm_postclose(struct drm_device *dev, struct drm_file *file)
  429. {
  430. struct msm_drm_private *priv = dev->dev_private;
  431. struct msm_file_private *ctx = file->driver_priv;
  432. mutex_lock(&dev->struct_mutex);
  433. if (ctx == priv->lastctx)
  434. priv->lastctx = NULL;
  435. mutex_unlock(&dev->struct_mutex);
  436. kfree(ctx);
  437. }
  438. static void msm_lastclose(struct drm_device *dev)
  439. {
  440. struct msm_drm_private *priv = dev->dev_private;
  441. if (priv->fbdev)
  442. drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
  443. }
  444. static irqreturn_t msm_irq(int irq, void *arg)
  445. {
  446. struct drm_device *dev = arg;
  447. struct msm_drm_private *priv = dev->dev_private;
  448. struct msm_kms *kms = priv->kms;
  449. BUG_ON(!kms);
  450. return kms->funcs->irq(kms);
  451. }
  452. static void msm_irq_preinstall(struct drm_device *dev)
  453. {
  454. struct msm_drm_private *priv = dev->dev_private;
  455. struct msm_kms *kms = priv->kms;
  456. BUG_ON(!kms);
  457. kms->funcs->irq_preinstall(kms);
  458. }
  459. static int msm_irq_postinstall(struct drm_device *dev)
  460. {
  461. struct msm_drm_private *priv = dev->dev_private;
  462. struct msm_kms *kms = priv->kms;
  463. BUG_ON(!kms);
  464. return kms->funcs->irq_postinstall(kms);
  465. }
  466. static void msm_irq_uninstall(struct drm_device *dev)
  467. {
  468. struct msm_drm_private *priv = dev->dev_private;
  469. struct msm_kms *kms = priv->kms;
  470. BUG_ON(!kms);
  471. kms->funcs->irq_uninstall(kms);
  472. }
  473. static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
  474. {
  475. struct msm_drm_private *priv = dev->dev_private;
  476. struct msm_kms *kms = priv->kms;
  477. if (!kms)
  478. return -ENXIO;
  479. DBG("dev=%p, crtc=%u", dev, pipe);
  480. return vblank_ctrl_queue_work(priv, pipe, true);
  481. }
  482. static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
  483. {
  484. struct msm_drm_private *priv = dev->dev_private;
  485. struct msm_kms *kms = priv->kms;
  486. if (!kms)
  487. return;
  488. DBG("dev=%p, crtc=%u", dev, pipe);
  489. vblank_ctrl_queue_work(priv, pipe, false);
  490. }
  491. /*
  492. * DRM ioctls:
  493. */
  494. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  495. struct drm_file *file)
  496. {
  497. struct msm_drm_private *priv = dev->dev_private;
  498. struct drm_msm_param *args = data;
  499. struct msm_gpu *gpu;
  500. /* for now, we just have 3d pipe.. eventually this would need to
  501. * be more clever to dispatch to appropriate gpu module:
  502. */
  503. if (args->pipe != MSM_PIPE_3D0)
  504. return -EINVAL;
  505. gpu = priv->gpu;
  506. if (!gpu)
  507. return -ENXIO;
  508. return gpu->funcs->get_param(gpu, args->param, &args->value);
  509. }
  510. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  511. struct drm_file *file)
  512. {
  513. struct drm_msm_gem_new *args = data;
  514. if (args->flags & ~MSM_BO_FLAGS) {
  515. DRM_ERROR("invalid flags: %08x\n", args->flags);
  516. return -EINVAL;
  517. }
  518. return msm_gem_new_handle(dev, file, args->size,
  519. args->flags, &args->handle);
  520. }
  521. static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
  522. {
  523. return ktime_set(timeout.tv_sec, timeout.tv_nsec);
  524. }
  525. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  526. struct drm_file *file)
  527. {
  528. struct drm_msm_gem_cpu_prep *args = data;
  529. struct drm_gem_object *obj;
  530. ktime_t timeout = to_ktime(args->timeout);
  531. int ret;
  532. if (args->op & ~MSM_PREP_FLAGS) {
  533. DRM_ERROR("invalid op: %08x\n", args->op);
  534. return -EINVAL;
  535. }
  536. obj = drm_gem_object_lookup(file, args->handle);
  537. if (!obj)
  538. return -ENOENT;
  539. ret = msm_gem_cpu_prep(obj, args->op, &timeout);
  540. drm_gem_object_unreference_unlocked(obj);
  541. return ret;
  542. }
  543. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  544. struct drm_file *file)
  545. {
  546. struct drm_msm_gem_cpu_fini *args = data;
  547. struct drm_gem_object *obj;
  548. int ret;
  549. obj = drm_gem_object_lookup(file, args->handle);
  550. if (!obj)
  551. return -ENOENT;
  552. ret = msm_gem_cpu_fini(obj);
  553. drm_gem_object_unreference_unlocked(obj);
  554. return ret;
  555. }
  556. static int msm_ioctl_gem_info_iova(struct drm_device *dev,
  557. struct drm_gem_object *obj, uint64_t *iova)
  558. {
  559. struct msm_drm_private *priv = dev->dev_private;
  560. if (!priv->gpu)
  561. return -EINVAL;
  562. return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
  563. }
  564. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  565. struct drm_file *file)
  566. {
  567. struct drm_msm_gem_info *args = data;
  568. struct drm_gem_object *obj;
  569. int ret = 0;
  570. if (args->flags & ~MSM_INFO_FLAGS)
  571. return -EINVAL;
  572. obj = drm_gem_object_lookup(file, args->handle);
  573. if (!obj)
  574. return -ENOENT;
  575. if (args->flags & MSM_INFO_IOVA) {
  576. uint64_t iova;
  577. ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
  578. if (!ret)
  579. args->offset = iova;
  580. } else {
  581. args->offset = msm_gem_mmap_offset(obj);
  582. }
  583. drm_gem_object_unreference_unlocked(obj);
  584. return ret;
  585. }
  586. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  587. struct drm_file *file)
  588. {
  589. struct msm_drm_private *priv = dev->dev_private;
  590. struct drm_msm_wait_fence *args = data;
  591. ktime_t timeout = to_ktime(args->timeout);
  592. if (args->pad) {
  593. DRM_ERROR("invalid pad: %08x\n", args->pad);
  594. return -EINVAL;
  595. }
  596. if (!priv->gpu)
  597. return 0;
  598. return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
  599. }
  600. static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
  601. struct drm_file *file)
  602. {
  603. struct drm_msm_gem_madvise *args = data;
  604. struct drm_gem_object *obj;
  605. int ret;
  606. switch (args->madv) {
  607. case MSM_MADV_DONTNEED:
  608. case MSM_MADV_WILLNEED:
  609. break;
  610. default:
  611. return -EINVAL;
  612. }
  613. ret = mutex_lock_interruptible(&dev->struct_mutex);
  614. if (ret)
  615. return ret;
  616. obj = drm_gem_object_lookup(file, args->handle);
  617. if (!obj) {
  618. ret = -ENOENT;
  619. goto unlock;
  620. }
  621. ret = msm_gem_madvise(obj, args->madv);
  622. if (ret >= 0) {
  623. args->retained = ret;
  624. ret = 0;
  625. }
  626. drm_gem_object_unreference(obj);
  627. unlock:
  628. mutex_unlock(&dev->struct_mutex);
  629. return ret;
  630. }
  631. static const struct drm_ioctl_desc msm_ioctls[] = {
  632. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
  633. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  634. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
  635. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  636. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  637. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
  638. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
  639. DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
  640. };
  641. static const struct vm_operations_struct vm_ops = {
  642. .fault = msm_gem_fault,
  643. .open = drm_gem_vm_open,
  644. .close = drm_gem_vm_close,
  645. };
  646. static const struct file_operations fops = {
  647. .owner = THIS_MODULE,
  648. .open = drm_open,
  649. .release = drm_release,
  650. .unlocked_ioctl = drm_ioctl,
  651. .compat_ioctl = drm_compat_ioctl,
  652. .poll = drm_poll,
  653. .read = drm_read,
  654. .llseek = no_llseek,
  655. .mmap = msm_gem_mmap,
  656. };
  657. static struct drm_driver msm_driver = {
  658. .driver_features = DRIVER_HAVE_IRQ |
  659. DRIVER_GEM |
  660. DRIVER_PRIME |
  661. DRIVER_RENDER |
  662. DRIVER_ATOMIC |
  663. DRIVER_MODESET,
  664. .open = msm_open,
  665. .postclose = msm_postclose,
  666. .lastclose = msm_lastclose,
  667. .irq_handler = msm_irq,
  668. .irq_preinstall = msm_irq_preinstall,
  669. .irq_postinstall = msm_irq_postinstall,
  670. .irq_uninstall = msm_irq_uninstall,
  671. .enable_vblank = msm_enable_vblank,
  672. .disable_vblank = msm_disable_vblank,
  673. .gem_free_object = msm_gem_free_object,
  674. .gem_vm_ops = &vm_ops,
  675. .dumb_create = msm_gem_dumb_create,
  676. .dumb_map_offset = msm_gem_dumb_map_offset,
  677. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  678. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  679. .gem_prime_export = drm_gem_prime_export,
  680. .gem_prime_import = drm_gem_prime_import,
  681. .gem_prime_res_obj = msm_gem_prime_res_obj,
  682. .gem_prime_pin = msm_gem_prime_pin,
  683. .gem_prime_unpin = msm_gem_prime_unpin,
  684. .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
  685. .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
  686. .gem_prime_vmap = msm_gem_prime_vmap,
  687. .gem_prime_vunmap = msm_gem_prime_vunmap,
  688. .gem_prime_mmap = msm_gem_prime_mmap,
  689. #ifdef CONFIG_DEBUG_FS
  690. .debugfs_init = msm_debugfs_init,
  691. #endif
  692. .ioctls = msm_ioctls,
  693. .num_ioctls = ARRAY_SIZE(msm_ioctls),
  694. .fops = &fops,
  695. .name = "msm",
  696. .desc = "MSM Snapdragon DRM",
  697. .date = "20130625",
  698. .major = MSM_VERSION_MAJOR,
  699. .minor = MSM_VERSION_MINOR,
  700. .patchlevel = MSM_VERSION_PATCHLEVEL,
  701. };
  702. #ifdef CONFIG_PM_SLEEP
  703. static int msm_pm_suspend(struct device *dev)
  704. {
  705. struct drm_device *ddev = dev_get_drvdata(dev);
  706. drm_kms_helper_poll_disable(ddev);
  707. return 0;
  708. }
  709. static int msm_pm_resume(struct device *dev)
  710. {
  711. struct drm_device *ddev = dev_get_drvdata(dev);
  712. drm_kms_helper_poll_enable(ddev);
  713. return 0;
  714. }
  715. #endif
  716. #ifdef CONFIG_PM
  717. static int msm_runtime_suspend(struct device *dev)
  718. {
  719. struct drm_device *ddev = dev_get_drvdata(dev);
  720. struct msm_drm_private *priv = ddev->dev_private;
  721. DBG("");
  722. if (priv->mdss)
  723. return msm_mdss_disable(priv->mdss);
  724. return 0;
  725. }
  726. static int msm_runtime_resume(struct device *dev)
  727. {
  728. struct drm_device *ddev = dev_get_drvdata(dev);
  729. struct msm_drm_private *priv = ddev->dev_private;
  730. DBG("");
  731. if (priv->mdss)
  732. return msm_mdss_enable(priv->mdss);
  733. return 0;
  734. }
  735. #endif
  736. static const struct dev_pm_ops msm_pm_ops = {
  737. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  738. SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
  739. };
  740. /*
  741. * Componentized driver support:
  742. */
  743. /*
  744. * NOTE: duplication of the same code as exynos or imx (or probably any other).
  745. * so probably some room for some helpers
  746. */
  747. static int compare_of(struct device *dev, void *data)
  748. {
  749. return dev->of_node == data;
  750. }
  751. /*
  752. * Identify what components need to be added by parsing what remote-endpoints
  753. * our MDP output ports are connected to. In the case of LVDS on MDP4, there
  754. * is no external component that we need to add since LVDS is within MDP4
  755. * itself.
  756. */
  757. static int add_components_mdp(struct device *mdp_dev,
  758. struct component_match **matchptr)
  759. {
  760. struct device_node *np = mdp_dev->of_node;
  761. struct device_node *ep_node;
  762. struct device *master_dev;
  763. /*
  764. * on MDP4 based platforms, the MDP platform device is the component
  765. * master that adds other display interface components to itself.
  766. *
  767. * on MDP5 based platforms, the MDSS platform device is the component
  768. * master that adds MDP5 and other display interface components to
  769. * itself.
  770. */
  771. if (of_device_is_compatible(np, "qcom,mdp4"))
  772. master_dev = mdp_dev;
  773. else
  774. master_dev = mdp_dev->parent;
  775. for_each_endpoint_of_node(np, ep_node) {
  776. struct device_node *intf;
  777. struct of_endpoint ep;
  778. int ret;
  779. ret = of_graph_parse_endpoint(ep_node, &ep);
  780. if (ret) {
  781. dev_err(mdp_dev, "unable to parse port endpoint\n");
  782. of_node_put(ep_node);
  783. return ret;
  784. }
  785. /*
  786. * The LCDC/LVDS port on MDP4 is a speacial case where the
  787. * remote-endpoint isn't a component that we need to add
  788. */
  789. if (of_device_is_compatible(np, "qcom,mdp4") &&
  790. ep.port == 0)
  791. continue;
  792. /*
  793. * It's okay if some of the ports don't have a remote endpoint
  794. * specified. It just means that the port isn't connected to
  795. * any external interface.
  796. */
  797. intf = of_graph_get_remote_port_parent(ep_node);
  798. if (!intf)
  799. continue;
  800. drm_of_component_match_add(master_dev, matchptr, compare_of,
  801. intf);
  802. of_node_put(intf);
  803. }
  804. return 0;
  805. }
  806. static int compare_name_mdp(struct device *dev, void *data)
  807. {
  808. return (strstr(dev_name(dev), "mdp") != NULL);
  809. }
  810. static int add_display_components(struct device *dev,
  811. struct component_match **matchptr)
  812. {
  813. struct device *mdp_dev;
  814. int ret;
  815. /*
  816. * MDP5 based devices don't have a flat hierarchy. There is a top level
  817. * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
  818. * children devices, find the MDP5 node, and then add the interfaces
  819. * to our components list.
  820. */
  821. if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
  822. ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
  823. if (ret) {
  824. dev_err(dev, "failed to populate children devices\n");
  825. return ret;
  826. }
  827. mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
  828. if (!mdp_dev) {
  829. dev_err(dev, "failed to find MDSS MDP node\n");
  830. of_platform_depopulate(dev);
  831. return -ENODEV;
  832. }
  833. put_device(mdp_dev);
  834. /* add the MDP component itself */
  835. drm_of_component_match_add(dev, matchptr, compare_of,
  836. mdp_dev->of_node);
  837. } else {
  838. /* MDP4 */
  839. mdp_dev = dev;
  840. }
  841. ret = add_components_mdp(mdp_dev, matchptr);
  842. if (ret)
  843. of_platform_depopulate(dev);
  844. return ret;
  845. }
  846. /*
  847. * We don't know what's the best binding to link the gpu with the drm device.
  848. * Fow now, we just hunt for all the possible gpus that we support, and add them
  849. * as components.
  850. */
  851. static const struct of_device_id msm_gpu_match[] = {
  852. { .compatible = "qcom,adreno" },
  853. { .compatible = "qcom,adreno-3xx" },
  854. { .compatible = "qcom,kgsl-3d0" },
  855. { },
  856. };
  857. static int add_gpu_components(struct device *dev,
  858. struct component_match **matchptr)
  859. {
  860. struct device_node *np;
  861. np = of_find_matching_node(NULL, msm_gpu_match);
  862. if (!np)
  863. return 0;
  864. drm_of_component_match_add(dev, matchptr, compare_of, np);
  865. of_node_put(np);
  866. return 0;
  867. }
  868. static int msm_drm_bind(struct device *dev)
  869. {
  870. return msm_drm_init(dev, &msm_driver);
  871. }
  872. static void msm_drm_unbind(struct device *dev)
  873. {
  874. msm_drm_uninit(dev);
  875. }
  876. static const struct component_master_ops msm_drm_ops = {
  877. .bind = msm_drm_bind,
  878. .unbind = msm_drm_unbind,
  879. };
  880. /*
  881. * Platform driver:
  882. */
  883. static int msm_pdev_probe(struct platform_device *pdev)
  884. {
  885. struct component_match *match = NULL;
  886. int ret;
  887. ret = add_display_components(&pdev->dev, &match);
  888. if (ret)
  889. return ret;
  890. ret = add_gpu_components(&pdev->dev, &match);
  891. if (ret)
  892. return ret;
  893. /* on all devices that I am aware of, iommu's which can map
  894. * any address the cpu can see are used:
  895. */
  896. ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
  897. if (ret)
  898. return ret;
  899. return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
  900. }
  901. static int msm_pdev_remove(struct platform_device *pdev)
  902. {
  903. component_master_del(&pdev->dev, &msm_drm_ops);
  904. of_platform_depopulate(&pdev->dev);
  905. return 0;
  906. }
  907. static const struct of_device_id dt_match[] = {
  908. { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
  909. { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
  910. {}
  911. };
  912. MODULE_DEVICE_TABLE(of, dt_match);
  913. static struct platform_driver msm_platform_driver = {
  914. .probe = msm_pdev_probe,
  915. .remove = msm_pdev_remove,
  916. .driver = {
  917. .name = "msm",
  918. .of_match_table = dt_match,
  919. .pm = &msm_pm_ops,
  920. },
  921. };
  922. static int __init msm_drm_register(void)
  923. {
  924. if (!modeset)
  925. return -EINVAL;
  926. DBG("init");
  927. msm_mdp_register();
  928. msm_dsi_register();
  929. msm_edp_register();
  930. msm_hdmi_register();
  931. adreno_register();
  932. return platform_driver_register(&msm_platform_driver);
  933. }
  934. static void __exit msm_drm_unregister(void)
  935. {
  936. DBG("fini");
  937. platform_driver_unregister(&msm_platform_driver);
  938. msm_hdmi_unregister();
  939. adreno_unregister();
  940. msm_edp_unregister();
  941. msm_dsi_unregister();
  942. msm_mdp_unregister();
  943. }
  944. module_init(msm_drm_register);
  945. module_exit(msm_drm_unregister);
  946. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  947. MODULE_DESCRIPTION("MSM DRM Driver");
  948. MODULE_LICENSE("GPL");