drm.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include "drm.h"
  16. #include "gem.h"
  17. #define DRIVER_NAME "tegra"
  18. #define DRIVER_DESC "NVIDIA Tegra graphics"
  19. #define DRIVER_DATE "20120330"
  20. #define DRIVER_MAJOR 0
  21. #define DRIVER_MINOR 0
  22. #define DRIVER_PATCHLEVEL 0
  23. #define CARVEOUT_SZ SZ_64M
  24. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  25. struct tegra_drm_file {
  26. struct idr contexts;
  27. struct mutex lock;
  28. };
  29. static int tegra_atomic_check(struct drm_device *drm,
  30. struct drm_atomic_state *state)
  31. {
  32. int err;
  33. err = drm_atomic_helper_check_modeset(drm, state);
  34. if (err < 0)
  35. return err;
  36. err = tegra_display_hub_atomic_check(drm, state);
  37. if (err < 0)
  38. return err;
  39. err = drm_atomic_normalize_zpos(drm, state);
  40. if (err < 0)
  41. return err;
  42. err = drm_atomic_helper_check_planes(drm, state);
  43. if (err < 0)
  44. return err;
  45. if (state->legacy_cursor_update)
  46. state->async_update = !drm_atomic_helper_async_check(drm, state);
  47. return 0;
  48. }
  49. static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  50. .fb_create = tegra_fb_create,
  51. #ifdef CONFIG_DRM_FBDEV_EMULATION
  52. .output_poll_changed = drm_fb_helper_output_poll_changed,
  53. #endif
  54. .atomic_check = tegra_atomic_check,
  55. .atomic_commit = drm_atomic_helper_commit,
  56. };
  57. static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  58. {
  59. struct drm_device *drm = old_state->dev;
  60. struct tegra_drm *tegra = drm->dev_private;
  61. if (tegra->hub) {
  62. drm_atomic_helper_commit_modeset_disables(drm, old_state);
  63. tegra_display_hub_atomic_commit(drm, old_state);
  64. drm_atomic_helper_commit_planes(drm, old_state, 0);
  65. drm_atomic_helper_commit_modeset_enables(drm, old_state);
  66. drm_atomic_helper_commit_hw_done(old_state);
  67. drm_atomic_helper_wait_for_vblanks(drm, old_state);
  68. drm_atomic_helper_cleanup_planes(drm, old_state);
  69. } else {
  70. drm_atomic_helper_commit_tail_rpm(old_state);
  71. }
  72. }
  73. static const struct drm_mode_config_helper_funcs
  74. tegra_drm_mode_config_helpers = {
  75. .atomic_commit_tail = tegra_atomic_commit_tail,
  76. };
  77. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  78. {
  79. struct host1x_device *device = to_host1x_device(drm->dev);
  80. struct tegra_drm *tegra;
  81. int err;
  82. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  83. if (!tegra)
  84. return -ENOMEM;
  85. if (iommu_present(&platform_bus_type)) {
  86. u64 carveout_start, carveout_end, gem_start, gem_end;
  87. struct iommu_domain_geometry *geometry;
  88. unsigned long order;
  89. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  90. if (!tegra->domain) {
  91. err = -ENOMEM;
  92. goto free;
  93. }
  94. geometry = &tegra->domain->geometry;
  95. gem_start = geometry->aperture_start;
  96. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  97. carveout_start = gem_end + 1;
  98. carveout_end = geometry->aperture_end;
  99. order = __ffs(tegra->domain->pgsize_bitmap);
  100. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  101. carveout_start >> order);
  102. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  103. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  104. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  105. mutex_init(&tegra->mm_lock);
  106. DRM_DEBUG("IOMMU apertures:\n");
  107. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  108. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  109. carveout_end);
  110. }
  111. mutex_init(&tegra->clients_lock);
  112. INIT_LIST_HEAD(&tegra->clients);
  113. drm->dev_private = tegra;
  114. tegra->drm = drm;
  115. drm_mode_config_init(drm);
  116. drm->mode_config.min_width = 0;
  117. drm->mode_config.min_height = 0;
  118. drm->mode_config.max_width = 4096;
  119. drm->mode_config.max_height = 4096;
  120. drm->mode_config.allow_fb_modifiers = true;
  121. drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
  122. drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
  123. err = tegra_drm_fb_prepare(drm);
  124. if (err < 0)
  125. goto config;
  126. drm_kms_helper_poll_init(drm);
  127. err = host1x_device_init(device);
  128. if (err < 0)
  129. goto fbdev;
  130. if (tegra->hub) {
  131. err = tegra_display_hub_prepare(tegra->hub);
  132. if (err < 0)
  133. goto device;
  134. }
  135. /*
  136. * We don't use the drm_irq_install() helpers provided by the DRM
  137. * core, so we need to set this manually in order to allow the
  138. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  139. */
  140. drm->irq_enabled = true;
  141. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  142. drm->max_vblank_count = 0xffffffff;
  143. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  144. if (err < 0)
  145. goto hub;
  146. drm_mode_config_reset(drm);
  147. err = tegra_drm_fb_init(drm);
  148. if (err < 0)
  149. goto hub;
  150. return 0;
  151. hub:
  152. if (tegra->hub)
  153. tegra_display_hub_cleanup(tegra->hub);
  154. device:
  155. host1x_device_exit(device);
  156. fbdev:
  157. drm_kms_helper_poll_fini(drm);
  158. tegra_drm_fb_free(drm);
  159. config:
  160. drm_mode_config_cleanup(drm);
  161. if (tegra->domain) {
  162. iommu_domain_free(tegra->domain);
  163. drm_mm_takedown(&tegra->mm);
  164. mutex_destroy(&tegra->mm_lock);
  165. put_iova_domain(&tegra->carveout.domain);
  166. }
  167. free:
  168. kfree(tegra);
  169. return err;
  170. }
  171. static void tegra_drm_unload(struct drm_device *drm)
  172. {
  173. struct host1x_device *device = to_host1x_device(drm->dev);
  174. struct tegra_drm *tegra = drm->dev_private;
  175. int err;
  176. drm_kms_helper_poll_fini(drm);
  177. tegra_drm_fb_exit(drm);
  178. drm_atomic_helper_shutdown(drm);
  179. drm_mode_config_cleanup(drm);
  180. err = host1x_device_exit(device);
  181. if (err < 0)
  182. return;
  183. if (tegra->domain) {
  184. iommu_domain_free(tegra->domain);
  185. drm_mm_takedown(&tegra->mm);
  186. mutex_destroy(&tegra->mm_lock);
  187. put_iova_domain(&tegra->carveout.domain);
  188. }
  189. kfree(tegra);
  190. }
  191. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  192. {
  193. struct tegra_drm_file *fpriv;
  194. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  195. if (!fpriv)
  196. return -ENOMEM;
  197. idr_init(&fpriv->contexts);
  198. mutex_init(&fpriv->lock);
  199. filp->driver_priv = fpriv;
  200. return 0;
  201. }
  202. static void tegra_drm_context_free(struct tegra_drm_context *context)
  203. {
  204. context->client->ops->close_channel(context);
  205. kfree(context);
  206. }
  207. static struct host1x_bo *
  208. host1x_bo_lookup(struct drm_file *file, u32 handle)
  209. {
  210. struct drm_gem_object *gem;
  211. struct tegra_bo *bo;
  212. gem = drm_gem_object_lookup(file, handle);
  213. if (!gem)
  214. return NULL;
  215. bo = to_tegra_bo(gem);
  216. return &bo->base;
  217. }
  218. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  219. struct drm_tegra_reloc __user *src,
  220. struct drm_device *drm,
  221. struct drm_file *file)
  222. {
  223. u32 cmdbuf, target;
  224. int err;
  225. err = get_user(cmdbuf, &src->cmdbuf.handle);
  226. if (err < 0)
  227. return err;
  228. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  229. if (err < 0)
  230. return err;
  231. err = get_user(target, &src->target.handle);
  232. if (err < 0)
  233. return err;
  234. err = get_user(dest->target.offset, &src->target.offset);
  235. if (err < 0)
  236. return err;
  237. err = get_user(dest->shift, &src->shift);
  238. if (err < 0)
  239. return err;
  240. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  241. if (!dest->cmdbuf.bo)
  242. return -ENOENT;
  243. dest->target.bo = host1x_bo_lookup(file, target);
  244. if (!dest->target.bo)
  245. return -ENOENT;
  246. return 0;
  247. }
  248. static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
  249. struct drm_tegra_waitchk __user *src,
  250. struct drm_file *file)
  251. {
  252. u32 cmdbuf;
  253. int err;
  254. err = get_user(cmdbuf, &src->handle);
  255. if (err < 0)
  256. return err;
  257. err = get_user(dest->offset, &src->offset);
  258. if (err < 0)
  259. return err;
  260. err = get_user(dest->syncpt_id, &src->syncpt);
  261. if (err < 0)
  262. return err;
  263. err = get_user(dest->thresh, &src->thresh);
  264. if (err < 0)
  265. return err;
  266. dest->bo = host1x_bo_lookup(file, cmdbuf);
  267. if (!dest->bo)
  268. return -ENOENT;
  269. return 0;
  270. }
  271. int tegra_drm_submit(struct tegra_drm_context *context,
  272. struct drm_tegra_submit *args, struct drm_device *drm,
  273. struct drm_file *file)
  274. {
  275. unsigned int num_cmdbufs = args->num_cmdbufs;
  276. unsigned int num_relocs = args->num_relocs;
  277. unsigned int num_waitchks = args->num_waitchks;
  278. struct drm_tegra_cmdbuf __user *user_cmdbufs;
  279. struct drm_tegra_reloc __user *user_relocs;
  280. struct drm_tegra_waitchk __user *user_waitchks;
  281. struct drm_tegra_syncpt __user *user_syncpt;
  282. struct drm_tegra_syncpt syncpt;
  283. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  284. struct drm_gem_object **refs;
  285. struct host1x_syncpt *sp;
  286. struct host1x_job *job;
  287. unsigned int num_refs;
  288. int err;
  289. user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
  290. user_relocs = u64_to_user_ptr(args->relocs);
  291. user_waitchks = u64_to_user_ptr(args->waitchks);
  292. user_syncpt = u64_to_user_ptr(args->syncpts);
  293. /* We don't yet support other than one syncpt_incr struct per submit */
  294. if (args->num_syncpts != 1)
  295. return -EINVAL;
  296. /* We don't yet support waitchks */
  297. if (args->num_waitchks != 0)
  298. return -EINVAL;
  299. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  300. args->num_relocs, args->num_waitchks);
  301. if (!job)
  302. return -ENOMEM;
  303. job->num_relocs = args->num_relocs;
  304. job->num_waitchk = args->num_waitchks;
  305. job->client = (u32)args->context;
  306. job->class = context->client->base.class;
  307. job->serialize = true;
  308. /*
  309. * Track referenced BOs so that they can be unreferenced after the
  310. * submission is complete.
  311. */
  312. num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
  313. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  314. if (!refs) {
  315. err = -ENOMEM;
  316. goto put;
  317. }
  318. /* reuse as an iterator later */
  319. num_refs = 0;
  320. while (num_cmdbufs) {
  321. struct drm_tegra_cmdbuf cmdbuf;
  322. struct host1x_bo *bo;
  323. struct tegra_bo *obj;
  324. u64 offset;
  325. if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
  326. err = -EFAULT;
  327. goto fail;
  328. }
  329. /*
  330. * The maximum number of CDMA gather fetches is 16383, a higher
  331. * value means the words count is malformed.
  332. */
  333. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  334. err = -EINVAL;
  335. goto fail;
  336. }
  337. bo = host1x_bo_lookup(file, cmdbuf.handle);
  338. if (!bo) {
  339. err = -ENOENT;
  340. goto fail;
  341. }
  342. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  343. obj = host1x_to_tegra_bo(bo);
  344. refs[num_refs++] = &obj->gem;
  345. /*
  346. * Gather buffer base address must be 4-bytes aligned,
  347. * unaligned offset is malformed and cause commands stream
  348. * corruption on the buffer address relocation.
  349. */
  350. if (offset & 3 || offset >= obj->gem.size) {
  351. err = -EINVAL;
  352. goto fail;
  353. }
  354. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  355. num_cmdbufs--;
  356. user_cmdbufs++;
  357. }
  358. /* copy and resolve relocations from submit */
  359. while (num_relocs--) {
  360. struct host1x_reloc *reloc;
  361. struct tegra_bo *obj;
  362. err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
  363. &user_relocs[num_relocs], drm,
  364. file);
  365. if (err < 0)
  366. goto fail;
  367. reloc = &job->relocarray[num_relocs];
  368. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  369. refs[num_refs++] = &obj->gem;
  370. /*
  371. * The unaligned cmdbuf offset will cause an unaligned write
  372. * during of the relocations patching, corrupting the commands
  373. * stream.
  374. */
  375. if (reloc->cmdbuf.offset & 3 ||
  376. reloc->cmdbuf.offset >= obj->gem.size) {
  377. err = -EINVAL;
  378. goto fail;
  379. }
  380. obj = host1x_to_tegra_bo(reloc->target.bo);
  381. refs[num_refs++] = &obj->gem;
  382. if (reloc->target.offset >= obj->gem.size) {
  383. err = -EINVAL;
  384. goto fail;
  385. }
  386. }
  387. /* copy and resolve waitchks from submit */
  388. while (num_waitchks--) {
  389. struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
  390. struct tegra_bo *obj;
  391. err = host1x_waitchk_copy_from_user(
  392. wait, &user_waitchks[num_waitchks], file);
  393. if (err < 0)
  394. goto fail;
  395. obj = host1x_to_tegra_bo(wait->bo);
  396. refs[num_refs++] = &obj->gem;
  397. /*
  398. * The unaligned offset will cause an unaligned write during
  399. * of the waitchks patching, corrupting the commands stream.
  400. */
  401. if (wait->offset & 3 ||
  402. wait->offset >= obj->gem.size) {
  403. err = -EINVAL;
  404. goto fail;
  405. }
  406. }
  407. if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
  408. err = -EFAULT;
  409. goto fail;
  410. }
  411. /* check whether syncpoint ID is valid */
  412. sp = host1x_syncpt_get(host1x, syncpt.id);
  413. if (!sp) {
  414. err = -ENOENT;
  415. goto fail;
  416. }
  417. job->is_addr_reg = context->client->ops->is_addr_reg;
  418. job->is_valid_class = context->client->ops->is_valid_class;
  419. job->syncpt_incrs = syncpt.incrs;
  420. job->syncpt_id = syncpt.id;
  421. job->timeout = 10000;
  422. if (args->timeout && args->timeout < 10000)
  423. job->timeout = args->timeout;
  424. err = host1x_job_pin(job, context->client->base.dev);
  425. if (err)
  426. goto fail;
  427. err = host1x_job_submit(job);
  428. if (err) {
  429. host1x_job_unpin(job);
  430. goto fail;
  431. }
  432. args->fence = job->syncpt_end;
  433. fail:
  434. while (num_refs--)
  435. drm_gem_object_put_unlocked(refs[num_refs]);
  436. kfree(refs);
  437. put:
  438. host1x_job_put(job);
  439. return err;
  440. }
  441. #ifdef CONFIG_DRM_TEGRA_STAGING
  442. static int tegra_gem_create(struct drm_device *drm, void *data,
  443. struct drm_file *file)
  444. {
  445. struct drm_tegra_gem_create *args = data;
  446. struct tegra_bo *bo;
  447. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  448. &args->handle);
  449. if (IS_ERR(bo))
  450. return PTR_ERR(bo);
  451. return 0;
  452. }
  453. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  454. struct drm_file *file)
  455. {
  456. struct drm_tegra_gem_mmap *args = data;
  457. struct drm_gem_object *gem;
  458. struct tegra_bo *bo;
  459. gem = drm_gem_object_lookup(file, args->handle);
  460. if (!gem)
  461. return -EINVAL;
  462. bo = to_tegra_bo(gem);
  463. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  464. drm_gem_object_put_unlocked(gem);
  465. return 0;
  466. }
  467. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  468. struct drm_file *file)
  469. {
  470. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  471. struct drm_tegra_syncpt_read *args = data;
  472. struct host1x_syncpt *sp;
  473. sp = host1x_syncpt_get(host, args->id);
  474. if (!sp)
  475. return -EINVAL;
  476. args->value = host1x_syncpt_read_min(sp);
  477. return 0;
  478. }
  479. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  480. struct drm_file *file)
  481. {
  482. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  483. struct drm_tegra_syncpt_incr *args = data;
  484. struct host1x_syncpt *sp;
  485. sp = host1x_syncpt_get(host1x, args->id);
  486. if (!sp)
  487. return -EINVAL;
  488. return host1x_syncpt_incr(sp);
  489. }
  490. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  491. struct drm_file *file)
  492. {
  493. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  494. struct drm_tegra_syncpt_wait *args = data;
  495. struct host1x_syncpt *sp;
  496. sp = host1x_syncpt_get(host1x, args->id);
  497. if (!sp)
  498. return -EINVAL;
  499. return host1x_syncpt_wait(sp, args->thresh,
  500. msecs_to_jiffies(args->timeout),
  501. &args->value);
  502. }
  503. static int tegra_client_open(struct tegra_drm_file *fpriv,
  504. struct tegra_drm_client *client,
  505. struct tegra_drm_context *context)
  506. {
  507. int err;
  508. err = client->ops->open_channel(client, context);
  509. if (err < 0)
  510. return err;
  511. err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
  512. if (err < 0) {
  513. client->ops->close_channel(context);
  514. return err;
  515. }
  516. context->client = client;
  517. context->id = err;
  518. return 0;
  519. }
  520. static int tegra_open_channel(struct drm_device *drm, void *data,
  521. struct drm_file *file)
  522. {
  523. struct tegra_drm_file *fpriv = file->driver_priv;
  524. struct tegra_drm *tegra = drm->dev_private;
  525. struct drm_tegra_open_channel *args = data;
  526. struct tegra_drm_context *context;
  527. struct tegra_drm_client *client;
  528. int err = -ENODEV;
  529. context = kzalloc(sizeof(*context), GFP_KERNEL);
  530. if (!context)
  531. return -ENOMEM;
  532. mutex_lock(&fpriv->lock);
  533. list_for_each_entry(client, &tegra->clients, list)
  534. if (client->base.class == args->client) {
  535. err = tegra_client_open(fpriv, client, context);
  536. if (err < 0)
  537. break;
  538. args->context = context->id;
  539. break;
  540. }
  541. if (err < 0)
  542. kfree(context);
  543. mutex_unlock(&fpriv->lock);
  544. return err;
  545. }
  546. static int tegra_close_channel(struct drm_device *drm, void *data,
  547. struct drm_file *file)
  548. {
  549. struct tegra_drm_file *fpriv = file->driver_priv;
  550. struct drm_tegra_close_channel *args = data;
  551. struct tegra_drm_context *context;
  552. int err = 0;
  553. mutex_lock(&fpriv->lock);
  554. context = idr_find(&fpriv->contexts, args->context);
  555. if (!context) {
  556. err = -EINVAL;
  557. goto unlock;
  558. }
  559. idr_remove(&fpriv->contexts, context->id);
  560. tegra_drm_context_free(context);
  561. unlock:
  562. mutex_unlock(&fpriv->lock);
  563. return err;
  564. }
  565. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  566. struct drm_file *file)
  567. {
  568. struct tegra_drm_file *fpriv = file->driver_priv;
  569. struct drm_tegra_get_syncpt *args = data;
  570. struct tegra_drm_context *context;
  571. struct host1x_syncpt *syncpt;
  572. int err = 0;
  573. mutex_lock(&fpriv->lock);
  574. context = idr_find(&fpriv->contexts, args->context);
  575. if (!context) {
  576. err = -ENODEV;
  577. goto unlock;
  578. }
  579. if (args->index >= context->client->base.num_syncpts) {
  580. err = -EINVAL;
  581. goto unlock;
  582. }
  583. syncpt = context->client->base.syncpts[args->index];
  584. args->id = host1x_syncpt_id(syncpt);
  585. unlock:
  586. mutex_unlock(&fpriv->lock);
  587. return err;
  588. }
  589. static int tegra_submit(struct drm_device *drm, void *data,
  590. struct drm_file *file)
  591. {
  592. struct tegra_drm_file *fpriv = file->driver_priv;
  593. struct drm_tegra_submit *args = data;
  594. struct tegra_drm_context *context;
  595. int err;
  596. mutex_lock(&fpriv->lock);
  597. context = idr_find(&fpriv->contexts, args->context);
  598. if (!context) {
  599. err = -ENODEV;
  600. goto unlock;
  601. }
  602. err = context->client->ops->submit(context, args, drm, file);
  603. unlock:
  604. mutex_unlock(&fpriv->lock);
  605. return err;
  606. }
  607. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  608. struct drm_file *file)
  609. {
  610. struct tegra_drm_file *fpriv = file->driver_priv;
  611. struct drm_tegra_get_syncpt_base *args = data;
  612. struct tegra_drm_context *context;
  613. struct host1x_syncpt_base *base;
  614. struct host1x_syncpt *syncpt;
  615. int err = 0;
  616. mutex_lock(&fpriv->lock);
  617. context = idr_find(&fpriv->contexts, args->context);
  618. if (!context) {
  619. err = -ENODEV;
  620. goto unlock;
  621. }
  622. if (args->syncpt >= context->client->base.num_syncpts) {
  623. err = -EINVAL;
  624. goto unlock;
  625. }
  626. syncpt = context->client->base.syncpts[args->syncpt];
  627. base = host1x_syncpt_get_base(syncpt);
  628. if (!base) {
  629. err = -ENXIO;
  630. goto unlock;
  631. }
  632. args->id = host1x_syncpt_base_id(base);
  633. unlock:
  634. mutex_unlock(&fpriv->lock);
  635. return err;
  636. }
  637. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  638. struct drm_file *file)
  639. {
  640. struct drm_tegra_gem_set_tiling *args = data;
  641. enum tegra_bo_tiling_mode mode;
  642. struct drm_gem_object *gem;
  643. unsigned long value = 0;
  644. struct tegra_bo *bo;
  645. switch (args->mode) {
  646. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  647. mode = TEGRA_BO_TILING_MODE_PITCH;
  648. if (args->value != 0)
  649. return -EINVAL;
  650. break;
  651. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  652. mode = TEGRA_BO_TILING_MODE_TILED;
  653. if (args->value != 0)
  654. return -EINVAL;
  655. break;
  656. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  657. mode = TEGRA_BO_TILING_MODE_BLOCK;
  658. if (args->value > 5)
  659. return -EINVAL;
  660. value = args->value;
  661. break;
  662. default:
  663. return -EINVAL;
  664. }
  665. gem = drm_gem_object_lookup(file, args->handle);
  666. if (!gem)
  667. return -ENOENT;
  668. bo = to_tegra_bo(gem);
  669. bo->tiling.mode = mode;
  670. bo->tiling.value = value;
  671. drm_gem_object_put_unlocked(gem);
  672. return 0;
  673. }
  674. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  675. struct drm_file *file)
  676. {
  677. struct drm_tegra_gem_get_tiling *args = data;
  678. struct drm_gem_object *gem;
  679. struct tegra_bo *bo;
  680. int err = 0;
  681. gem = drm_gem_object_lookup(file, args->handle);
  682. if (!gem)
  683. return -ENOENT;
  684. bo = to_tegra_bo(gem);
  685. switch (bo->tiling.mode) {
  686. case TEGRA_BO_TILING_MODE_PITCH:
  687. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  688. args->value = 0;
  689. break;
  690. case TEGRA_BO_TILING_MODE_TILED:
  691. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  692. args->value = 0;
  693. break;
  694. case TEGRA_BO_TILING_MODE_BLOCK:
  695. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  696. args->value = bo->tiling.value;
  697. break;
  698. default:
  699. err = -EINVAL;
  700. break;
  701. }
  702. drm_gem_object_put_unlocked(gem);
  703. return err;
  704. }
  705. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  706. struct drm_file *file)
  707. {
  708. struct drm_tegra_gem_set_flags *args = data;
  709. struct drm_gem_object *gem;
  710. struct tegra_bo *bo;
  711. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  712. return -EINVAL;
  713. gem = drm_gem_object_lookup(file, args->handle);
  714. if (!gem)
  715. return -ENOENT;
  716. bo = to_tegra_bo(gem);
  717. bo->flags = 0;
  718. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  719. bo->flags |= TEGRA_BO_BOTTOM_UP;
  720. drm_gem_object_put_unlocked(gem);
  721. return 0;
  722. }
  723. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  724. struct drm_file *file)
  725. {
  726. struct drm_tegra_gem_get_flags *args = data;
  727. struct drm_gem_object *gem;
  728. struct tegra_bo *bo;
  729. gem = drm_gem_object_lookup(file, args->handle);
  730. if (!gem)
  731. return -ENOENT;
  732. bo = to_tegra_bo(gem);
  733. args->flags = 0;
  734. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  735. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  736. drm_gem_object_put_unlocked(gem);
  737. return 0;
  738. }
  739. #endif
  740. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  741. #ifdef CONFIG_DRM_TEGRA_STAGING
  742. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
  743. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  744. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
  745. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  746. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  747. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  748. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  749. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  750. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  751. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  752. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  753. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  754. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  755. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  756. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  757. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  758. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  759. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  760. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  761. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  762. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  763. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  764. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  765. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  766. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  767. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  768. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  769. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  770. #endif
  771. };
  772. static const struct file_operations tegra_drm_fops = {
  773. .owner = THIS_MODULE,
  774. .open = drm_open,
  775. .release = drm_release,
  776. .unlocked_ioctl = drm_ioctl,
  777. .mmap = tegra_drm_mmap,
  778. .poll = drm_poll,
  779. .read = drm_read,
  780. .compat_ioctl = drm_compat_ioctl,
  781. .llseek = noop_llseek,
  782. };
  783. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  784. {
  785. struct tegra_drm_context *context = p;
  786. tegra_drm_context_free(context);
  787. return 0;
  788. }
  789. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  790. {
  791. struct tegra_drm_file *fpriv = file->driver_priv;
  792. mutex_lock(&fpriv->lock);
  793. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  794. mutex_unlock(&fpriv->lock);
  795. idr_destroy(&fpriv->contexts);
  796. mutex_destroy(&fpriv->lock);
  797. kfree(fpriv);
  798. }
  799. #ifdef CONFIG_DEBUG_FS
  800. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  801. {
  802. struct drm_info_node *node = (struct drm_info_node *)s->private;
  803. struct drm_device *drm = node->minor->dev;
  804. struct drm_framebuffer *fb;
  805. mutex_lock(&drm->mode_config.fb_lock);
  806. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  807. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  808. fb->base.id, fb->width, fb->height,
  809. fb->format->depth,
  810. fb->format->cpp[0] * 8,
  811. drm_framebuffer_read_refcount(fb));
  812. }
  813. mutex_unlock(&drm->mode_config.fb_lock);
  814. return 0;
  815. }
  816. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  817. {
  818. struct drm_info_node *node = (struct drm_info_node *)s->private;
  819. struct drm_device *drm = node->minor->dev;
  820. struct tegra_drm *tegra = drm->dev_private;
  821. struct drm_printer p = drm_seq_file_printer(s);
  822. if (tegra->domain) {
  823. mutex_lock(&tegra->mm_lock);
  824. drm_mm_print(&tegra->mm, &p);
  825. mutex_unlock(&tegra->mm_lock);
  826. }
  827. return 0;
  828. }
  829. static struct drm_info_list tegra_debugfs_list[] = {
  830. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  831. { "iova", tegra_debugfs_iova, 0 },
  832. };
  833. static int tegra_debugfs_init(struct drm_minor *minor)
  834. {
  835. return drm_debugfs_create_files(tegra_debugfs_list,
  836. ARRAY_SIZE(tegra_debugfs_list),
  837. minor->debugfs_root, minor);
  838. }
  839. #endif
  840. static struct drm_driver tegra_drm_driver = {
  841. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  842. DRIVER_ATOMIC | DRIVER_RENDER,
  843. .load = tegra_drm_load,
  844. .unload = tegra_drm_unload,
  845. .open = tegra_drm_open,
  846. .postclose = tegra_drm_postclose,
  847. .lastclose = drm_fb_helper_lastclose,
  848. #if defined(CONFIG_DEBUG_FS)
  849. .debugfs_init = tegra_debugfs_init,
  850. #endif
  851. .gem_free_object_unlocked = tegra_bo_free_object,
  852. .gem_vm_ops = &tegra_bo_vm_ops,
  853. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  854. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  855. .gem_prime_export = tegra_gem_prime_export,
  856. .gem_prime_import = tegra_gem_prime_import,
  857. .dumb_create = tegra_bo_dumb_create,
  858. .ioctls = tegra_drm_ioctls,
  859. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  860. .fops = &tegra_drm_fops,
  861. .name = DRIVER_NAME,
  862. .desc = DRIVER_DESC,
  863. .date = DRIVER_DATE,
  864. .major = DRIVER_MAJOR,
  865. .minor = DRIVER_MINOR,
  866. .patchlevel = DRIVER_PATCHLEVEL,
  867. };
  868. int tegra_drm_register_client(struct tegra_drm *tegra,
  869. struct tegra_drm_client *client)
  870. {
  871. mutex_lock(&tegra->clients_lock);
  872. list_add_tail(&client->list, &tegra->clients);
  873. mutex_unlock(&tegra->clients_lock);
  874. return 0;
  875. }
  876. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  877. struct tegra_drm_client *client)
  878. {
  879. mutex_lock(&tegra->clients_lock);
  880. list_del_init(&client->list);
  881. mutex_unlock(&tegra->clients_lock);
  882. return 0;
  883. }
  884. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
  885. {
  886. struct iova *alloc;
  887. void *virt;
  888. gfp_t gfp;
  889. int err;
  890. if (tegra->domain)
  891. size = iova_align(&tegra->carveout.domain, size);
  892. else
  893. size = PAGE_ALIGN(size);
  894. gfp = GFP_KERNEL | __GFP_ZERO;
  895. if (!tegra->domain) {
  896. /*
  897. * Many units only support 32-bit addresses, even on 64-bit
  898. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  899. * virtual address space, force allocations to be in the
  900. * lower 32-bit range.
  901. */
  902. gfp |= GFP_DMA;
  903. }
  904. virt = (void *)__get_free_pages(gfp, get_order(size));
  905. if (!virt)
  906. return ERR_PTR(-ENOMEM);
  907. if (!tegra->domain) {
  908. /*
  909. * If IOMMU is disabled, devices address physical memory
  910. * directly.
  911. */
  912. *dma = virt_to_phys(virt);
  913. return virt;
  914. }
  915. alloc = alloc_iova(&tegra->carveout.domain,
  916. size >> tegra->carveout.shift,
  917. tegra->carveout.limit, true);
  918. if (!alloc) {
  919. err = -EBUSY;
  920. goto free_pages;
  921. }
  922. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  923. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  924. size, IOMMU_READ | IOMMU_WRITE);
  925. if (err < 0)
  926. goto free_iova;
  927. return virt;
  928. free_iova:
  929. __free_iova(&tegra->carveout.domain, alloc);
  930. free_pages:
  931. free_pages((unsigned long)virt, get_order(size));
  932. return ERR_PTR(err);
  933. }
  934. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  935. dma_addr_t dma)
  936. {
  937. if (tegra->domain)
  938. size = iova_align(&tegra->carveout.domain, size);
  939. else
  940. size = PAGE_ALIGN(size);
  941. if (tegra->domain) {
  942. iommu_unmap(tegra->domain, dma, size);
  943. free_iova(&tegra->carveout.domain,
  944. iova_pfn(&tegra->carveout.domain, dma));
  945. }
  946. free_pages((unsigned long)virt, get_order(size));
  947. }
  948. static int host1x_drm_probe(struct host1x_device *dev)
  949. {
  950. struct drm_driver *driver = &tegra_drm_driver;
  951. struct drm_device *drm;
  952. int err;
  953. drm = drm_dev_alloc(driver, &dev->dev);
  954. if (IS_ERR(drm))
  955. return PTR_ERR(drm);
  956. dev_set_drvdata(&dev->dev, drm);
  957. err = drm_dev_register(drm, 0);
  958. if (err < 0)
  959. goto unref;
  960. return 0;
  961. unref:
  962. drm_dev_unref(drm);
  963. return err;
  964. }
  965. static int host1x_drm_remove(struct host1x_device *dev)
  966. {
  967. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  968. drm_dev_unregister(drm);
  969. drm_dev_unref(drm);
  970. return 0;
  971. }
  972. #ifdef CONFIG_PM_SLEEP
  973. static int host1x_drm_suspend(struct device *dev)
  974. {
  975. struct drm_device *drm = dev_get_drvdata(dev);
  976. struct tegra_drm *tegra = drm->dev_private;
  977. drm_kms_helper_poll_disable(drm);
  978. tegra_drm_fb_suspend(drm);
  979. tegra->state = drm_atomic_helper_suspend(drm);
  980. if (IS_ERR(tegra->state)) {
  981. tegra_drm_fb_resume(drm);
  982. drm_kms_helper_poll_enable(drm);
  983. return PTR_ERR(tegra->state);
  984. }
  985. return 0;
  986. }
  987. static int host1x_drm_resume(struct device *dev)
  988. {
  989. struct drm_device *drm = dev_get_drvdata(dev);
  990. struct tegra_drm *tegra = drm->dev_private;
  991. drm_atomic_helper_resume(drm, tegra->state);
  992. tegra_drm_fb_resume(drm);
  993. drm_kms_helper_poll_enable(drm);
  994. return 0;
  995. }
  996. #endif
  997. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  998. host1x_drm_resume);
  999. static const struct of_device_id host1x_drm_subdevs[] = {
  1000. { .compatible = "nvidia,tegra20-dc", },
  1001. { .compatible = "nvidia,tegra20-hdmi", },
  1002. { .compatible = "nvidia,tegra20-gr2d", },
  1003. { .compatible = "nvidia,tegra20-gr3d", },
  1004. { .compatible = "nvidia,tegra30-dc", },
  1005. { .compatible = "nvidia,tegra30-hdmi", },
  1006. { .compatible = "nvidia,tegra30-gr2d", },
  1007. { .compatible = "nvidia,tegra30-gr3d", },
  1008. { .compatible = "nvidia,tegra114-dsi", },
  1009. { .compatible = "nvidia,tegra114-hdmi", },
  1010. { .compatible = "nvidia,tegra114-gr3d", },
  1011. { .compatible = "nvidia,tegra124-dc", },
  1012. { .compatible = "nvidia,tegra124-sor", },
  1013. { .compatible = "nvidia,tegra124-hdmi", },
  1014. { .compatible = "nvidia,tegra124-dsi", },
  1015. { .compatible = "nvidia,tegra124-vic", },
  1016. { .compatible = "nvidia,tegra132-dsi", },
  1017. { .compatible = "nvidia,tegra210-dc", },
  1018. { .compatible = "nvidia,tegra210-dsi", },
  1019. { .compatible = "nvidia,tegra210-sor", },
  1020. { .compatible = "nvidia,tegra210-sor1", },
  1021. { .compatible = "nvidia,tegra210-vic", },
  1022. { .compatible = "nvidia,tegra186-display", },
  1023. { .compatible = "nvidia,tegra186-dc", },
  1024. { .compatible = "nvidia,tegra186-sor", },
  1025. { .compatible = "nvidia,tegra186-sor1", },
  1026. { .compatible = "nvidia,tegra186-vic", },
  1027. { /* sentinel */ }
  1028. };
  1029. static struct host1x_driver host1x_drm_driver = {
  1030. .driver = {
  1031. .name = "drm",
  1032. .pm = &host1x_drm_pm_ops,
  1033. },
  1034. .probe = host1x_drm_probe,
  1035. .remove = host1x_drm_remove,
  1036. .subdevs = host1x_drm_subdevs,
  1037. };
  1038. static struct platform_driver * const drivers[] = {
  1039. &tegra_display_hub_driver,
  1040. &tegra_dc_driver,
  1041. &tegra_hdmi_driver,
  1042. &tegra_dsi_driver,
  1043. &tegra_dpaux_driver,
  1044. &tegra_sor_driver,
  1045. &tegra_gr2d_driver,
  1046. &tegra_gr3d_driver,
  1047. &tegra_vic_driver,
  1048. };
  1049. static int __init host1x_drm_init(void)
  1050. {
  1051. int err;
  1052. err = host1x_driver_register(&host1x_drm_driver);
  1053. if (err < 0)
  1054. return err;
  1055. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1056. if (err < 0)
  1057. goto unregister_host1x;
  1058. return 0;
  1059. unregister_host1x:
  1060. host1x_driver_unregister(&host1x_drm_driver);
  1061. return err;
  1062. }
  1063. module_init(host1x_drm_init);
  1064. static void __exit host1x_drm_exit(void)
  1065. {
  1066. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1067. host1x_driver_unregister(&host1x_drm_driver);
  1068. }
  1069. module_exit(host1x_drm_exit);
  1070. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1071. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1072. MODULE_LICENSE("GPL v2");