etnaviv_drv.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/component.h>
  17. #include <linux/of_platform.h>
  18. #include <drm/drm_of.h>
  19. #include "etnaviv_cmdbuf.h"
  20. #include "etnaviv_drv.h"
  21. #include "etnaviv_gpu.h"
  22. #include "etnaviv_gem.h"
  23. #include "etnaviv_mmu.h"
  24. #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
  25. static bool reglog;
  26. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  27. module_param(reglog, bool, 0600);
  28. #else
  29. #define reglog 0
  30. #endif
  31. void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
  32. const char *dbgname)
  33. {
  34. struct resource *res;
  35. void __iomem *ptr;
  36. if (name)
  37. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  38. else
  39. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  40. ptr = devm_ioremap_resource(&pdev->dev, res);
  41. if (IS_ERR(ptr)) {
  42. dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
  43. PTR_ERR(ptr));
  44. return ptr;
  45. }
  46. if (reglog)
  47. dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
  48. dbgname, ptr, (size_t)resource_size(res));
  49. return ptr;
  50. }
  51. void etnaviv_writel(u32 data, void __iomem *addr)
  52. {
  53. if (reglog)
  54. printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
  55. writel(data, addr);
  56. }
  57. u32 etnaviv_readl(const void __iomem *addr)
  58. {
  59. u32 val = readl(addr);
  60. if (reglog)
  61. printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
  62. return val;
  63. }
  64. /*
  65. * DRM operations:
  66. */
  67. static void load_gpu(struct drm_device *dev)
  68. {
  69. struct etnaviv_drm_private *priv = dev->dev_private;
  70. unsigned int i;
  71. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  72. struct etnaviv_gpu *g = priv->gpu[i];
  73. if (g) {
  74. int ret;
  75. ret = etnaviv_gpu_init(g);
  76. if (ret)
  77. priv->gpu[i] = NULL;
  78. }
  79. }
  80. }
  81. static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
  82. {
  83. struct etnaviv_file_private *ctx;
  84. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  85. if (!ctx)
  86. return -ENOMEM;
  87. file->driver_priv = ctx;
  88. return 0;
  89. }
  90. static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
  91. {
  92. struct etnaviv_drm_private *priv = dev->dev_private;
  93. struct etnaviv_file_private *ctx = file->driver_priv;
  94. unsigned int i;
  95. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  96. struct etnaviv_gpu *gpu = priv->gpu[i];
  97. if (gpu) {
  98. mutex_lock(&gpu->lock);
  99. if (gpu->lastctx == ctx)
  100. gpu->lastctx = NULL;
  101. mutex_unlock(&gpu->lock);
  102. }
  103. }
  104. kfree(ctx);
  105. }
  106. /*
  107. * DRM debugfs:
  108. */
  109. #ifdef CONFIG_DEBUG_FS
  110. static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
  111. {
  112. struct etnaviv_drm_private *priv = dev->dev_private;
  113. etnaviv_gem_describe_objects(priv, m);
  114. return 0;
  115. }
  116. static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
  117. {
  118. struct drm_printer p = drm_seq_file_printer(m);
  119. read_lock(&dev->vma_offset_manager->vm_lock);
  120. drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
  121. read_unlock(&dev->vma_offset_manager->vm_lock);
  122. return 0;
  123. }
  124. static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  125. {
  126. struct drm_printer p = drm_seq_file_printer(m);
  127. seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
  128. mutex_lock(&gpu->mmu->lock);
  129. drm_mm_print(&gpu->mmu->mm, &p);
  130. mutex_unlock(&gpu->mmu->lock);
  131. return 0;
  132. }
  133. static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
  134. {
  135. struct etnaviv_cmdbuf *buf = gpu->buffer;
  136. u32 size = buf->size;
  137. u32 *ptr = buf->vaddr;
  138. u32 i;
  139. seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
  140. buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
  141. size - buf->user_size);
  142. for (i = 0; i < size / 4; i++) {
  143. if (i && !(i % 4))
  144. seq_puts(m, "\n");
  145. if (i % 4 == 0)
  146. seq_printf(m, "\t0x%p: ", ptr + i);
  147. seq_printf(m, "%08x ", *(ptr + i));
  148. }
  149. seq_puts(m, "\n");
  150. }
  151. static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  152. {
  153. seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
  154. mutex_lock(&gpu->lock);
  155. etnaviv_buffer_dump(gpu, m);
  156. mutex_unlock(&gpu->lock);
  157. return 0;
  158. }
  159. static int show_unlocked(struct seq_file *m, void *arg)
  160. {
  161. struct drm_info_node *node = (struct drm_info_node *) m->private;
  162. struct drm_device *dev = node->minor->dev;
  163. int (*show)(struct drm_device *dev, struct seq_file *m) =
  164. node->info_ent->data;
  165. return show(dev, m);
  166. }
  167. static int show_each_gpu(struct seq_file *m, void *arg)
  168. {
  169. struct drm_info_node *node = (struct drm_info_node *) m->private;
  170. struct drm_device *dev = node->minor->dev;
  171. struct etnaviv_drm_private *priv = dev->dev_private;
  172. struct etnaviv_gpu *gpu;
  173. int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
  174. node->info_ent->data;
  175. unsigned int i;
  176. int ret = 0;
  177. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  178. gpu = priv->gpu[i];
  179. if (!gpu)
  180. continue;
  181. ret = show(gpu, m);
  182. if (ret < 0)
  183. break;
  184. }
  185. return ret;
  186. }
  187. static struct drm_info_list etnaviv_debugfs_list[] = {
  188. {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
  189. {"gem", show_unlocked, 0, etnaviv_gem_show},
  190. { "mm", show_unlocked, 0, etnaviv_mm_show },
  191. {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
  192. {"ring", show_each_gpu, 0, etnaviv_ring_show},
  193. };
  194. static int etnaviv_debugfs_init(struct drm_minor *minor)
  195. {
  196. struct drm_device *dev = minor->dev;
  197. int ret;
  198. ret = drm_debugfs_create_files(etnaviv_debugfs_list,
  199. ARRAY_SIZE(etnaviv_debugfs_list),
  200. minor->debugfs_root, minor);
  201. if (ret) {
  202. dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
  203. return ret;
  204. }
  205. return ret;
  206. }
  207. #endif
  208. /*
  209. * DRM ioctls:
  210. */
  211. static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
  212. struct drm_file *file)
  213. {
  214. struct etnaviv_drm_private *priv = dev->dev_private;
  215. struct drm_etnaviv_param *args = data;
  216. struct etnaviv_gpu *gpu;
  217. if (args->pipe >= ETNA_MAX_PIPES)
  218. return -EINVAL;
  219. gpu = priv->gpu[args->pipe];
  220. if (!gpu)
  221. return -ENXIO;
  222. return etnaviv_gpu_get_param(gpu, args->param, &args->value);
  223. }
  224. static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
  225. struct drm_file *file)
  226. {
  227. struct drm_etnaviv_gem_new *args = data;
  228. if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
  229. ETNA_BO_FORCE_MMU))
  230. return -EINVAL;
  231. return etnaviv_gem_new_handle(dev, file, args->size,
  232. args->flags, &args->handle);
  233. }
  234. #define TS(t) ((struct timespec){ \
  235. .tv_sec = (t).tv_sec, \
  236. .tv_nsec = (t).tv_nsec \
  237. })
  238. static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  239. struct drm_file *file)
  240. {
  241. struct drm_etnaviv_gem_cpu_prep *args = data;
  242. struct drm_gem_object *obj;
  243. int ret;
  244. if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
  245. return -EINVAL;
  246. obj = drm_gem_object_lookup(file, args->handle);
  247. if (!obj)
  248. return -ENOENT;
  249. ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  250. drm_gem_object_put_unlocked(obj);
  251. return ret;
  252. }
  253. static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  254. struct drm_file *file)
  255. {
  256. struct drm_etnaviv_gem_cpu_fini *args = data;
  257. struct drm_gem_object *obj;
  258. int ret;
  259. if (args->flags)
  260. return -EINVAL;
  261. obj = drm_gem_object_lookup(file, args->handle);
  262. if (!obj)
  263. return -ENOENT;
  264. ret = etnaviv_gem_cpu_fini(obj);
  265. drm_gem_object_put_unlocked(obj);
  266. return ret;
  267. }
  268. static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
  269. struct drm_file *file)
  270. {
  271. struct drm_etnaviv_gem_info *args = data;
  272. struct drm_gem_object *obj;
  273. int ret;
  274. if (args->pad)
  275. return -EINVAL;
  276. obj = drm_gem_object_lookup(file, args->handle);
  277. if (!obj)
  278. return -ENOENT;
  279. ret = etnaviv_gem_mmap_offset(obj, &args->offset);
  280. drm_gem_object_put_unlocked(obj);
  281. return ret;
  282. }
  283. static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
  284. struct drm_file *file)
  285. {
  286. struct drm_etnaviv_wait_fence *args = data;
  287. struct etnaviv_drm_private *priv = dev->dev_private;
  288. struct timespec *timeout = &TS(args->timeout);
  289. struct etnaviv_gpu *gpu;
  290. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  291. return -EINVAL;
  292. if (args->pipe >= ETNA_MAX_PIPES)
  293. return -EINVAL;
  294. gpu = priv->gpu[args->pipe];
  295. if (!gpu)
  296. return -ENXIO;
  297. if (args->flags & ETNA_WAIT_NONBLOCK)
  298. timeout = NULL;
  299. return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
  300. timeout);
  301. }
  302. static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
  303. struct drm_file *file)
  304. {
  305. struct drm_etnaviv_gem_userptr *args = data;
  306. int access;
  307. if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
  308. args->flags == 0)
  309. return -EINVAL;
  310. if (offset_in_page(args->user_ptr | args->user_size) ||
  311. (uintptr_t)args->user_ptr != args->user_ptr ||
  312. (u32)args->user_size != args->user_size ||
  313. args->user_ptr & ~PAGE_MASK)
  314. return -EINVAL;
  315. if (args->flags & ETNA_USERPTR_WRITE)
  316. access = VERIFY_WRITE;
  317. else
  318. access = VERIFY_READ;
  319. if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
  320. args->user_size))
  321. return -EFAULT;
  322. return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
  323. args->user_size, args->flags,
  324. &args->handle);
  325. }
  326. static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
  327. struct drm_file *file)
  328. {
  329. struct etnaviv_drm_private *priv = dev->dev_private;
  330. struct drm_etnaviv_gem_wait *args = data;
  331. struct timespec *timeout = &TS(args->timeout);
  332. struct drm_gem_object *obj;
  333. struct etnaviv_gpu *gpu;
  334. int ret;
  335. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  336. return -EINVAL;
  337. if (args->pipe >= ETNA_MAX_PIPES)
  338. return -EINVAL;
  339. gpu = priv->gpu[args->pipe];
  340. if (!gpu)
  341. return -ENXIO;
  342. obj = drm_gem_object_lookup(file, args->handle);
  343. if (!obj)
  344. return -ENOENT;
  345. if (args->flags & ETNA_WAIT_NONBLOCK)
  346. timeout = NULL;
  347. ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
  348. drm_gem_object_put_unlocked(obj);
  349. return ret;
  350. }
  351. static const struct drm_ioctl_desc etnaviv_ioctls[] = {
  352. #define ETNA_IOCTL(n, func, flags) \
  353. DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
  354. ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
  355. ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  356. ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
  357. ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  358. ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  359. ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
  360. ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
  361. ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
  362. ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
  363. };
  364. static const struct vm_operations_struct vm_ops = {
  365. .fault = etnaviv_gem_fault,
  366. .open = drm_gem_vm_open,
  367. .close = drm_gem_vm_close,
  368. };
  369. static const struct file_operations fops = {
  370. .owner = THIS_MODULE,
  371. .open = drm_open,
  372. .release = drm_release,
  373. .unlocked_ioctl = drm_ioctl,
  374. .compat_ioctl = drm_compat_ioctl,
  375. .poll = drm_poll,
  376. .read = drm_read,
  377. .llseek = no_llseek,
  378. .mmap = etnaviv_gem_mmap,
  379. };
  380. static struct drm_driver etnaviv_drm_driver = {
  381. .driver_features = DRIVER_GEM |
  382. DRIVER_PRIME |
  383. DRIVER_RENDER,
  384. .open = etnaviv_open,
  385. .postclose = etnaviv_postclose,
  386. .gem_free_object_unlocked = etnaviv_gem_free_object,
  387. .gem_vm_ops = &vm_ops,
  388. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  389. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  390. .gem_prime_export = drm_gem_prime_export,
  391. .gem_prime_import = drm_gem_prime_import,
  392. .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
  393. .gem_prime_pin = etnaviv_gem_prime_pin,
  394. .gem_prime_unpin = etnaviv_gem_prime_unpin,
  395. .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
  396. .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
  397. .gem_prime_vmap = etnaviv_gem_prime_vmap,
  398. .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
  399. .gem_prime_mmap = etnaviv_gem_prime_mmap,
  400. #ifdef CONFIG_DEBUG_FS
  401. .debugfs_init = etnaviv_debugfs_init,
  402. #endif
  403. .ioctls = etnaviv_ioctls,
  404. .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
  405. .fops = &fops,
  406. .name = "etnaviv",
  407. .desc = "etnaviv DRM",
  408. .date = "20151214",
  409. .major = 1,
  410. .minor = 1,
  411. };
  412. /*
  413. * Platform driver:
  414. */
  415. static int etnaviv_bind(struct device *dev)
  416. {
  417. struct etnaviv_drm_private *priv;
  418. struct drm_device *drm;
  419. int ret;
  420. drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
  421. if (IS_ERR(drm))
  422. return PTR_ERR(drm);
  423. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  424. if (!priv) {
  425. dev_err(dev, "failed to allocate private data\n");
  426. ret = -ENOMEM;
  427. goto out_unref;
  428. }
  429. drm->dev_private = priv;
  430. priv->wq = alloc_ordered_workqueue("etnaviv", 0);
  431. if (!priv->wq) {
  432. ret = -ENOMEM;
  433. goto out_wq;
  434. }
  435. mutex_init(&priv->gem_lock);
  436. INIT_LIST_HEAD(&priv->gem_list);
  437. priv->num_gpus = 0;
  438. dev_set_drvdata(dev, drm);
  439. ret = component_bind_all(dev, drm);
  440. if (ret < 0)
  441. goto out_bind;
  442. load_gpu(drm);
  443. ret = drm_dev_register(drm, 0);
  444. if (ret)
  445. goto out_register;
  446. return 0;
  447. out_register:
  448. component_unbind_all(dev, drm);
  449. out_bind:
  450. flush_workqueue(priv->wq);
  451. destroy_workqueue(priv->wq);
  452. out_wq:
  453. kfree(priv);
  454. out_unref:
  455. drm_dev_unref(drm);
  456. return ret;
  457. }
  458. static void etnaviv_unbind(struct device *dev)
  459. {
  460. struct drm_device *drm = dev_get_drvdata(dev);
  461. struct etnaviv_drm_private *priv = drm->dev_private;
  462. drm_dev_unregister(drm);
  463. flush_workqueue(priv->wq);
  464. destroy_workqueue(priv->wq);
  465. component_unbind_all(dev, drm);
  466. drm->dev_private = NULL;
  467. kfree(priv);
  468. drm_dev_unref(drm);
  469. }
  470. static const struct component_master_ops etnaviv_master_ops = {
  471. .bind = etnaviv_bind,
  472. .unbind = etnaviv_unbind,
  473. };
  474. static int compare_of(struct device *dev, void *data)
  475. {
  476. struct device_node *np = data;
  477. return dev->of_node == np;
  478. }
  479. static int compare_str(struct device *dev, void *data)
  480. {
  481. return !strcmp(dev_name(dev), data);
  482. }
  483. static int etnaviv_pdev_probe(struct platform_device *pdev)
  484. {
  485. struct device *dev = &pdev->dev;
  486. struct device_node *node = dev->of_node;
  487. struct component_match *match = NULL;
  488. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  489. if (node) {
  490. struct device_node *core_node;
  491. int i;
  492. for (i = 0; ; i++) {
  493. core_node = of_parse_phandle(node, "cores", i);
  494. if (!core_node)
  495. break;
  496. drm_of_component_match_add(&pdev->dev, &match,
  497. compare_of, core_node);
  498. of_node_put(core_node);
  499. }
  500. } else if (dev->platform_data) {
  501. char **names = dev->platform_data;
  502. unsigned i;
  503. for (i = 0; names[i]; i++)
  504. component_match_add(dev, &match, compare_str, names[i]);
  505. }
  506. return component_master_add_with_match(dev, &etnaviv_master_ops, match);
  507. }
  508. static int etnaviv_pdev_remove(struct platform_device *pdev)
  509. {
  510. component_master_del(&pdev->dev, &etnaviv_master_ops);
  511. return 0;
  512. }
  513. static const struct of_device_id dt_match[] = {
  514. { .compatible = "fsl,imx-gpu-subsystem" },
  515. { .compatible = "marvell,dove-gpu-subsystem" },
  516. {}
  517. };
  518. MODULE_DEVICE_TABLE(of, dt_match);
  519. static struct platform_driver etnaviv_platform_driver = {
  520. .probe = etnaviv_pdev_probe,
  521. .remove = etnaviv_pdev_remove,
  522. .driver = {
  523. .name = "etnaviv",
  524. .of_match_table = dt_match,
  525. },
  526. };
  527. static int __init etnaviv_init(void)
  528. {
  529. int ret;
  530. etnaviv_validate_init();
  531. ret = platform_driver_register(&etnaviv_gpu_driver);
  532. if (ret != 0)
  533. return ret;
  534. ret = platform_driver_register(&etnaviv_platform_driver);
  535. if (ret != 0)
  536. platform_driver_unregister(&etnaviv_gpu_driver);
  537. return ret;
  538. }
  539. module_init(etnaviv_init);
  540. static void __exit etnaviv_exit(void)
  541. {
  542. platform_driver_unregister(&etnaviv_gpu_driver);
  543. platform_driver_unregister(&etnaviv_platform_driver);
  544. }
  545. module_exit(etnaviv_exit);
  546. MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
  547. MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  548. MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
  549. MODULE_DESCRIPTION("etnaviv DRM Driver");
  550. MODULE_LICENSE("GPL v2");
  551. MODULE_ALIAS("platform:etnaviv");