drm.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include "drm.h"
  16. #include "gem.h"
  17. #define DRIVER_NAME "tegra"
  18. #define DRIVER_DESC "NVIDIA Tegra graphics"
  19. #define DRIVER_DATE "20120330"
  20. #define DRIVER_MAJOR 0
  21. #define DRIVER_MINOR 0
  22. #define DRIVER_PATCHLEVEL 0
  23. #define CARVEOUT_SZ SZ_64M
  24. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  25. struct tegra_drm_file {
  26. struct idr contexts;
  27. struct mutex lock;
  28. };
  29. static int tegra_atomic_check(struct drm_device *drm,
  30. struct drm_atomic_state *state)
  31. {
  32. int err;
  33. err = drm_atomic_helper_check_modeset(drm, state);
  34. if (err < 0)
  35. return err;
  36. err = drm_atomic_normalize_zpos(drm, state);
  37. if (err < 0)
  38. return err;
  39. err = drm_atomic_helper_check_planes(drm, state);
  40. if (err < 0)
  41. return err;
  42. if (state->legacy_cursor_update)
  43. state->async_update = !drm_atomic_helper_async_check(drm, state);
  44. return 0;
  45. }
  46. static struct drm_atomic_state *
  47. tegra_atomic_state_alloc(struct drm_device *drm)
  48. {
  49. struct tegra_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
  50. if (!state || drm_atomic_state_init(drm, &state->base) < 0) {
  51. kfree(state);
  52. return NULL;
  53. }
  54. return &state->base;
  55. }
  56. static void tegra_atomic_state_clear(struct drm_atomic_state *state)
  57. {
  58. struct tegra_atomic_state *tegra = to_tegra_atomic_state(state);
  59. drm_atomic_state_default_clear(state);
  60. tegra->clk_disp = NULL;
  61. tegra->dc = NULL;
  62. tegra->rate = 0;
  63. }
  64. static void tegra_atomic_state_free(struct drm_atomic_state *state)
  65. {
  66. drm_atomic_state_default_release(state);
  67. kfree(state);
  68. }
  69. static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  70. .fb_create = tegra_fb_create,
  71. #ifdef CONFIG_DRM_FBDEV_EMULATION
  72. .output_poll_changed = drm_fb_helper_output_poll_changed,
  73. #endif
  74. .atomic_check = tegra_atomic_check,
  75. .atomic_commit = drm_atomic_helper_commit,
  76. .atomic_state_alloc = tegra_atomic_state_alloc,
  77. .atomic_state_clear = tegra_atomic_state_clear,
  78. .atomic_state_free = tegra_atomic_state_free,
  79. };
  80. static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  81. {
  82. struct drm_device *drm = old_state->dev;
  83. struct tegra_drm *tegra = drm->dev_private;
  84. if (tegra->hub) {
  85. drm_atomic_helper_commit_modeset_disables(drm, old_state);
  86. tegra_display_hub_atomic_commit(drm, old_state);
  87. drm_atomic_helper_commit_planes(drm, old_state, 0);
  88. drm_atomic_helper_commit_modeset_enables(drm, old_state);
  89. drm_atomic_helper_commit_hw_done(old_state);
  90. drm_atomic_helper_wait_for_vblanks(drm, old_state);
  91. drm_atomic_helper_cleanup_planes(drm, old_state);
  92. } else {
  93. drm_atomic_helper_commit_tail_rpm(old_state);
  94. }
  95. }
  96. static const struct drm_mode_config_helper_funcs
  97. tegra_drm_mode_config_helpers = {
  98. .atomic_commit_tail = tegra_atomic_commit_tail,
  99. };
  100. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  101. {
  102. struct host1x_device *device = to_host1x_device(drm->dev);
  103. struct tegra_drm *tegra;
  104. int err;
  105. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  106. if (!tegra)
  107. return -ENOMEM;
  108. if (iommu_present(&platform_bus_type)) {
  109. u64 carveout_start, carveout_end, gem_start, gem_end;
  110. struct iommu_domain_geometry *geometry;
  111. unsigned long order;
  112. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  113. if (!tegra->domain) {
  114. err = -ENOMEM;
  115. goto free;
  116. }
  117. geometry = &tegra->domain->geometry;
  118. gem_start = geometry->aperture_start;
  119. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  120. carveout_start = gem_end + 1;
  121. carveout_end = geometry->aperture_end;
  122. order = __ffs(tegra->domain->pgsize_bitmap);
  123. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  124. carveout_start >> order);
  125. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  126. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  127. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  128. mutex_init(&tegra->mm_lock);
  129. DRM_DEBUG("IOMMU apertures:\n");
  130. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  131. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  132. carveout_end);
  133. }
  134. mutex_init(&tegra->clients_lock);
  135. INIT_LIST_HEAD(&tegra->clients);
  136. drm->dev_private = tegra;
  137. tegra->drm = drm;
  138. drm_mode_config_init(drm);
  139. drm->mode_config.min_width = 0;
  140. drm->mode_config.min_height = 0;
  141. drm->mode_config.max_width = 4096;
  142. drm->mode_config.max_height = 4096;
  143. drm->mode_config.allow_fb_modifiers = true;
  144. drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
  145. drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
  146. err = tegra_drm_fb_prepare(drm);
  147. if (err < 0)
  148. goto config;
  149. drm_kms_helper_poll_init(drm);
  150. err = host1x_device_init(device);
  151. if (err < 0)
  152. goto fbdev;
  153. if (tegra->hub) {
  154. err = tegra_display_hub_prepare(tegra->hub);
  155. if (err < 0)
  156. goto device;
  157. }
  158. /*
  159. * We don't use the drm_irq_install() helpers provided by the DRM
  160. * core, so we need to set this manually in order to allow the
  161. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  162. */
  163. drm->irq_enabled = true;
  164. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  165. drm->max_vblank_count = 0xffffffff;
  166. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  167. if (err < 0)
  168. goto hub;
  169. drm_mode_config_reset(drm);
  170. err = tegra_drm_fb_init(drm);
  171. if (err < 0)
  172. goto hub;
  173. return 0;
  174. hub:
  175. if (tegra->hub)
  176. tegra_display_hub_cleanup(tegra->hub);
  177. device:
  178. host1x_device_exit(device);
  179. fbdev:
  180. drm_kms_helper_poll_fini(drm);
  181. tegra_drm_fb_free(drm);
  182. config:
  183. drm_mode_config_cleanup(drm);
  184. if (tegra->domain) {
  185. iommu_domain_free(tegra->domain);
  186. drm_mm_takedown(&tegra->mm);
  187. mutex_destroy(&tegra->mm_lock);
  188. put_iova_domain(&tegra->carveout.domain);
  189. }
  190. free:
  191. kfree(tegra);
  192. return err;
  193. }
  194. static void tegra_drm_unload(struct drm_device *drm)
  195. {
  196. struct host1x_device *device = to_host1x_device(drm->dev);
  197. struct tegra_drm *tegra = drm->dev_private;
  198. int err;
  199. drm_kms_helper_poll_fini(drm);
  200. tegra_drm_fb_exit(drm);
  201. drm_mode_config_cleanup(drm);
  202. err = host1x_device_exit(device);
  203. if (err < 0)
  204. return;
  205. if (tegra->domain) {
  206. iommu_domain_free(tegra->domain);
  207. drm_mm_takedown(&tegra->mm);
  208. mutex_destroy(&tegra->mm_lock);
  209. put_iova_domain(&tegra->carveout.domain);
  210. }
  211. kfree(tegra);
  212. }
  213. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  214. {
  215. struct tegra_drm_file *fpriv;
  216. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  217. if (!fpriv)
  218. return -ENOMEM;
  219. idr_init(&fpriv->contexts);
  220. mutex_init(&fpriv->lock);
  221. filp->driver_priv = fpriv;
  222. return 0;
  223. }
  224. static void tegra_drm_context_free(struct tegra_drm_context *context)
  225. {
  226. context->client->ops->close_channel(context);
  227. kfree(context);
  228. }
  229. static struct host1x_bo *
  230. host1x_bo_lookup(struct drm_file *file, u32 handle)
  231. {
  232. struct drm_gem_object *gem;
  233. struct tegra_bo *bo;
  234. gem = drm_gem_object_lookup(file, handle);
  235. if (!gem)
  236. return NULL;
  237. bo = to_tegra_bo(gem);
  238. return &bo->base;
  239. }
  240. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  241. struct drm_tegra_reloc __user *src,
  242. struct drm_device *drm,
  243. struct drm_file *file)
  244. {
  245. u32 cmdbuf, target;
  246. int err;
  247. err = get_user(cmdbuf, &src->cmdbuf.handle);
  248. if (err < 0)
  249. return err;
  250. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  251. if (err < 0)
  252. return err;
  253. err = get_user(target, &src->target.handle);
  254. if (err < 0)
  255. return err;
  256. err = get_user(dest->target.offset, &src->target.offset);
  257. if (err < 0)
  258. return err;
  259. err = get_user(dest->shift, &src->shift);
  260. if (err < 0)
  261. return err;
  262. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  263. if (!dest->cmdbuf.bo)
  264. return -ENOENT;
  265. dest->target.bo = host1x_bo_lookup(file, target);
  266. if (!dest->target.bo)
  267. return -ENOENT;
  268. return 0;
  269. }
  270. static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
  271. struct drm_tegra_waitchk __user *src,
  272. struct drm_file *file)
  273. {
  274. u32 cmdbuf;
  275. int err;
  276. err = get_user(cmdbuf, &src->handle);
  277. if (err < 0)
  278. return err;
  279. err = get_user(dest->offset, &src->offset);
  280. if (err < 0)
  281. return err;
  282. err = get_user(dest->syncpt_id, &src->syncpt);
  283. if (err < 0)
  284. return err;
  285. err = get_user(dest->thresh, &src->thresh);
  286. if (err < 0)
  287. return err;
  288. dest->bo = host1x_bo_lookup(file, cmdbuf);
  289. if (!dest->bo)
  290. return -ENOENT;
  291. return 0;
  292. }
  293. int tegra_drm_submit(struct tegra_drm_context *context,
  294. struct drm_tegra_submit *args, struct drm_device *drm,
  295. struct drm_file *file)
  296. {
  297. unsigned int num_cmdbufs = args->num_cmdbufs;
  298. unsigned int num_relocs = args->num_relocs;
  299. unsigned int num_waitchks = args->num_waitchks;
  300. struct drm_tegra_cmdbuf __user *user_cmdbufs;
  301. struct drm_tegra_reloc __user *user_relocs;
  302. struct drm_tegra_waitchk __user *user_waitchks;
  303. struct drm_tegra_syncpt __user *user_syncpt;
  304. struct drm_tegra_syncpt syncpt;
  305. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  306. struct drm_gem_object **refs;
  307. struct host1x_syncpt *sp;
  308. struct host1x_job *job;
  309. unsigned int num_refs;
  310. int err;
  311. user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
  312. user_relocs = u64_to_user_ptr(args->relocs);
  313. user_waitchks = u64_to_user_ptr(args->waitchks);
  314. user_syncpt = u64_to_user_ptr(args->syncpts);
  315. /* We don't yet support other than one syncpt_incr struct per submit */
  316. if (args->num_syncpts != 1)
  317. return -EINVAL;
  318. /* We don't yet support waitchks */
  319. if (args->num_waitchks != 0)
  320. return -EINVAL;
  321. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  322. args->num_relocs, args->num_waitchks);
  323. if (!job)
  324. return -ENOMEM;
  325. job->num_relocs = args->num_relocs;
  326. job->num_waitchk = args->num_waitchks;
  327. job->client = (u32)args->context;
  328. job->class = context->client->base.class;
  329. job->serialize = true;
  330. /*
  331. * Track referenced BOs so that they can be unreferenced after the
  332. * submission is complete.
  333. */
  334. num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
  335. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  336. if (!refs) {
  337. err = -ENOMEM;
  338. goto put;
  339. }
  340. /* reuse as an iterator later */
  341. num_refs = 0;
  342. while (num_cmdbufs) {
  343. struct drm_tegra_cmdbuf cmdbuf;
  344. struct host1x_bo *bo;
  345. struct tegra_bo *obj;
  346. u64 offset;
  347. if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
  348. err = -EFAULT;
  349. goto fail;
  350. }
  351. /*
  352. * The maximum number of CDMA gather fetches is 16383, a higher
  353. * value means the words count is malformed.
  354. */
  355. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  356. err = -EINVAL;
  357. goto fail;
  358. }
  359. bo = host1x_bo_lookup(file, cmdbuf.handle);
  360. if (!bo) {
  361. err = -ENOENT;
  362. goto fail;
  363. }
  364. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  365. obj = host1x_to_tegra_bo(bo);
  366. refs[num_refs++] = &obj->gem;
  367. /*
  368. * Gather buffer base address must be 4-bytes aligned,
  369. * unaligned offset is malformed and cause commands stream
  370. * corruption on the buffer address relocation.
  371. */
  372. if (offset & 3 || offset >= obj->gem.size) {
  373. err = -EINVAL;
  374. goto fail;
  375. }
  376. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  377. num_cmdbufs--;
  378. user_cmdbufs++;
  379. }
  380. /* copy and resolve relocations from submit */
  381. while (num_relocs--) {
  382. struct host1x_reloc *reloc;
  383. struct tegra_bo *obj;
  384. err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
  385. &user_relocs[num_relocs], drm,
  386. file);
  387. if (err < 0)
  388. goto fail;
  389. reloc = &job->relocarray[num_relocs];
  390. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  391. refs[num_refs++] = &obj->gem;
  392. /*
  393. * The unaligned cmdbuf offset will cause an unaligned write
  394. * during of the relocations patching, corrupting the commands
  395. * stream.
  396. */
  397. if (reloc->cmdbuf.offset & 3 ||
  398. reloc->cmdbuf.offset >= obj->gem.size) {
  399. err = -EINVAL;
  400. goto fail;
  401. }
  402. obj = host1x_to_tegra_bo(reloc->target.bo);
  403. refs[num_refs++] = &obj->gem;
  404. if (reloc->target.offset >= obj->gem.size) {
  405. err = -EINVAL;
  406. goto fail;
  407. }
  408. }
  409. /* copy and resolve waitchks from submit */
  410. while (num_waitchks--) {
  411. struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
  412. struct tegra_bo *obj;
  413. err = host1x_waitchk_copy_from_user(
  414. wait, &user_waitchks[num_waitchks], file);
  415. if (err < 0)
  416. goto fail;
  417. obj = host1x_to_tegra_bo(wait->bo);
  418. refs[num_refs++] = &obj->gem;
  419. /*
  420. * The unaligned offset will cause an unaligned write during
  421. * of the waitchks patching, corrupting the commands stream.
  422. */
  423. if (wait->offset & 3 ||
  424. wait->offset >= obj->gem.size) {
  425. err = -EINVAL;
  426. goto fail;
  427. }
  428. }
  429. if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
  430. err = -EFAULT;
  431. goto fail;
  432. }
  433. /* check whether syncpoint ID is valid */
  434. sp = host1x_syncpt_get(host1x, syncpt.id);
  435. if (!sp) {
  436. err = -ENOENT;
  437. goto fail;
  438. }
  439. job->is_addr_reg = context->client->ops->is_addr_reg;
  440. job->is_valid_class = context->client->ops->is_valid_class;
  441. job->syncpt_incrs = syncpt.incrs;
  442. job->syncpt_id = syncpt.id;
  443. job->timeout = 10000;
  444. if (args->timeout && args->timeout < 10000)
  445. job->timeout = args->timeout;
  446. err = host1x_job_pin(job, context->client->base.dev);
  447. if (err)
  448. goto fail;
  449. err = host1x_job_submit(job);
  450. if (err) {
  451. host1x_job_unpin(job);
  452. goto fail;
  453. }
  454. args->fence = job->syncpt_end;
  455. fail:
  456. while (num_refs--)
  457. drm_gem_object_put_unlocked(refs[num_refs]);
  458. kfree(refs);
  459. put:
  460. host1x_job_put(job);
  461. return err;
  462. }
  463. #ifdef CONFIG_DRM_TEGRA_STAGING
  464. static int tegra_gem_create(struct drm_device *drm, void *data,
  465. struct drm_file *file)
  466. {
  467. struct drm_tegra_gem_create *args = data;
  468. struct tegra_bo *bo;
  469. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  470. &args->handle);
  471. if (IS_ERR(bo))
  472. return PTR_ERR(bo);
  473. return 0;
  474. }
  475. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  476. struct drm_file *file)
  477. {
  478. struct drm_tegra_gem_mmap *args = data;
  479. struct drm_gem_object *gem;
  480. struct tegra_bo *bo;
  481. gem = drm_gem_object_lookup(file, args->handle);
  482. if (!gem)
  483. return -EINVAL;
  484. bo = to_tegra_bo(gem);
  485. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  486. drm_gem_object_put_unlocked(gem);
  487. return 0;
  488. }
  489. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  490. struct drm_file *file)
  491. {
  492. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  493. struct drm_tegra_syncpt_read *args = data;
  494. struct host1x_syncpt *sp;
  495. sp = host1x_syncpt_get(host, args->id);
  496. if (!sp)
  497. return -EINVAL;
  498. args->value = host1x_syncpt_read_min(sp);
  499. return 0;
  500. }
  501. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  502. struct drm_file *file)
  503. {
  504. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  505. struct drm_tegra_syncpt_incr *args = data;
  506. struct host1x_syncpt *sp;
  507. sp = host1x_syncpt_get(host1x, args->id);
  508. if (!sp)
  509. return -EINVAL;
  510. return host1x_syncpt_incr(sp);
  511. }
  512. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  513. struct drm_file *file)
  514. {
  515. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  516. struct drm_tegra_syncpt_wait *args = data;
  517. struct host1x_syncpt *sp;
  518. sp = host1x_syncpt_get(host1x, args->id);
  519. if (!sp)
  520. return -EINVAL;
  521. return host1x_syncpt_wait(sp, args->thresh,
  522. msecs_to_jiffies(args->timeout),
  523. &args->value);
  524. }
  525. static int tegra_client_open(struct tegra_drm_file *fpriv,
  526. struct tegra_drm_client *client,
  527. struct tegra_drm_context *context)
  528. {
  529. int err;
  530. err = client->ops->open_channel(client, context);
  531. if (err < 0)
  532. return err;
  533. err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
  534. if (err < 0) {
  535. client->ops->close_channel(context);
  536. return err;
  537. }
  538. context->client = client;
  539. context->id = err;
  540. return 0;
  541. }
  542. static int tegra_open_channel(struct drm_device *drm, void *data,
  543. struct drm_file *file)
  544. {
  545. struct tegra_drm_file *fpriv = file->driver_priv;
  546. struct tegra_drm *tegra = drm->dev_private;
  547. struct drm_tegra_open_channel *args = data;
  548. struct tegra_drm_context *context;
  549. struct tegra_drm_client *client;
  550. int err = -ENODEV;
  551. context = kzalloc(sizeof(*context), GFP_KERNEL);
  552. if (!context)
  553. return -ENOMEM;
  554. mutex_lock(&fpriv->lock);
  555. list_for_each_entry(client, &tegra->clients, list)
  556. if (client->base.class == args->client) {
  557. err = tegra_client_open(fpriv, client, context);
  558. if (err < 0)
  559. break;
  560. args->context = context->id;
  561. break;
  562. }
  563. if (err < 0)
  564. kfree(context);
  565. mutex_unlock(&fpriv->lock);
  566. return err;
  567. }
  568. static int tegra_close_channel(struct drm_device *drm, void *data,
  569. struct drm_file *file)
  570. {
  571. struct tegra_drm_file *fpriv = file->driver_priv;
  572. struct drm_tegra_close_channel *args = data;
  573. struct tegra_drm_context *context;
  574. int err = 0;
  575. mutex_lock(&fpriv->lock);
  576. context = idr_find(&fpriv->contexts, args->context);
  577. if (!context) {
  578. err = -EINVAL;
  579. goto unlock;
  580. }
  581. idr_remove(&fpriv->contexts, context->id);
  582. tegra_drm_context_free(context);
  583. unlock:
  584. mutex_unlock(&fpriv->lock);
  585. return err;
  586. }
  587. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  588. struct drm_file *file)
  589. {
  590. struct tegra_drm_file *fpriv = file->driver_priv;
  591. struct drm_tegra_get_syncpt *args = data;
  592. struct tegra_drm_context *context;
  593. struct host1x_syncpt *syncpt;
  594. int err = 0;
  595. mutex_lock(&fpriv->lock);
  596. context = idr_find(&fpriv->contexts, args->context);
  597. if (!context) {
  598. err = -ENODEV;
  599. goto unlock;
  600. }
  601. if (args->index >= context->client->base.num_syncpts) {
  602. err = -EINVAL;
  603. goto unlock;
  604. }
  605. syncpt = context->client->base.syncpts[args->index];
  606. args->id = host1x_syncpt_id(syncpt);
  607. unlock:
  608. mutex_unlock(&fpriv->lock);
  609. return err;
  610. }
  611. static int tegra_submit(struct drm_device *drm, void *data,
  612. struct drm_file *file)
  613. {
  614. struct tegra_drm_file *fpriv = file->driver_priv;
  615. struct drm_tegra_submit *args = data;
  616. struct tegra_drm_context *context;
  617. int err;
  618. mutex_lock(&fpriv->lock);
  619. context = idr_find(&fpriv->contexts, args->context);
  620. if (!context) {
  621. err = -ENODEV;
  622. goto unlock;
  623. }
  624. err = context->client->ops->submit(context, args, drm, file);
  625. unlock:
  626. mutex_unlock(&fpriv->lock);
  627. return err;
  628. }
  629. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  630. struct drm_file *file)
  631. {
  632. struct tegra_drm_file *fpriv = file->driver_priv;
  633. struct drm_tegra_get_syncpt_base *args = data;
  634. struct tegra_drm_context *context;
  635. struct host1x_syncpt_base *base;
  636. struct host1x_syncpt *syncpt;
  637. int err = 0;
  638. mutex_lock(&fpriv->lock);
  639. context = idr_find(&fpriv->contexts, args->context);
  640. if (!context) {
  641. err = -ENODEV;
  642. goto unlock;
  643. }
  644. if (args->syncpt >= context->client->base.num_syncpts) {
  645. err = -EINVAL;
  646. goto unlock;
  647. }
  648. syncpt = context->client->base.syncpts[args->syncpt];
  649. base = host1x_syncpt_get_base(syncpt);
  650. if (!base) {
  651. err = -ENXIO;
  652. goto unlock;
  653. }
  654. args->id = host1x_syncpt_base_id(base);
  655. unlock:
  656. mutex_unlock(&fpriv->lock);
  657. return err;
  658. }
  659. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  660. struct drm_file *file)
  661. {
  662. struct drm_tegra_gem_set_tiling *args = data;
  663. enum tegra_bo_tiling_mode mode;
  664. struct drm_gem_object *gem;
  665. unsigned long value = 0;
  666. struct tegra_bo *bo;
  667. switch (args->mode) {
  668. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  669. mode = TEGRA_BO_TILING_MODE_PITCH;
  670. if (args->value != 0)
  671. return -EINVAL;
  672. break;
  673. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  674. mode = TEGRA_BO_TILING_MODE_TILED;
  675. if (args->value != 0)
  676. return -EINVAL;
  677. break;
  678. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  679. mode = TEGRA_BO_TILING_MODE_BLOCK;
  680. if (args->value > 5)
  681. return -EINVAL;
  682. value = args->value;
  683. break;
  684. default:
  685. return -EINVAL;
  686. }
  687. gem = drm_gem_object_lookup(file, args->handle);
  688. if (!gem)
  689. return -ENOENT;
  690. bo = to_tegra_bo(gem);
  691. bo->tiling.mode = mode;
  692. bo->tiling.value = value;
  693. drm_gem_object_put_unlocked(gem);
  694. return 0;
  695. }
  696. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  697. struct drm_file *file)
  698. {
  699. struct drm_tegra_gem_get_tiling *args = data;
  700. struct drm_gem_object *gem;
  701. struct tegra_bo *bo;
  702. int err = 0;
  703. gem = drm_gem_object_lookup(file, args->handle);
  704. if (!gem)
  705. return -ENOENT;
  706. bo = to_tegra_bo(gem);
  707. switch (bo->tiling.mode) {
  708. case TEGRA_BO_TILING_MODE_PITCH:
  709. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  710. args->value = 0;
  711. break;
  712. case TEGRA_BO_TILING_MODE_TILED:
  713. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  714. args->value = 0;
  715. break;
  716. case TEGRA_BO_TILING_MODE_BLOCK:
  717. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  718. args->value = bo->tiling.value;
  719. break;
  720. default:
  721. err = -EINVAL;
  722. break;
  723. }
  724. drm_gem_object_put_unlocked(gem);
  725. return err;
  726. }
  727. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  728. struct drm_file *file)
  729. {
  730. struct drm_tegra_gem_set_flags *args = data;
  731. struct drm_gem_object *gem;
  732. struct tegra_bo *bo;
  733. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  734. return -EINVAL;
  735. gem = drm_gem_object_lookup(file, args->handle);
  736. if (!gem)
  737. return -ENOENT;
  738. bo = to_tegra_bo(gem);
  739. bo->flags = 0;
  740. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  741. bo->flags |= TEGRA_BO_BOTTOM_UP;
  742. drm_gem_object_put_unlocked(gem);
  743. return 0;
  744. }
  745. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  746. struct drm_file *file)
  747. {
  748. struct drm_tegra_gem_get_flags *args = data;
  749. struct drm_gem_object *gem;
  750. struct tegra_bo *bo;
  751. gem = drm_gem_object_lookup(file, args->handle);
  752. if (!gem)
  753. return -ENOENT;
  754. bo = to_tegra_bo(gem);
  755. args->flags = 0;
  756. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  757. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  758. drm_gem_object_put_unlocked(gem);
  759. return 0;
  760. }
  761. #endif
  762. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  763. #ifdef CONFIG_DRM_TEGRA_STAGING
  764. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
  765. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  766. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
  767. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  768. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  769. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  770. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  771. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  772. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  773. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  774. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  775. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  776. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  777. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  778. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  779. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  780. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  781. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  782. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  783. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  784. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  785. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  786. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  787. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  788. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  789. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  790. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  791. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  792. #endif
  793. };
  794. static const struct file_operations tegra_drm_fops = {
  795. .owner = THIS_MODULE,
  796. .open = drm_open,
  797. .release = drm_release,
  798. .unlocked_ioctl = drm_ioctl,
  799. .mmap = tegra_drm_mmap,
  800. .poll = drm_poll,
  801. .read = drm_read,
  802. .compat_ioctl = drm_compat_ioctl,
  803. .llseek = noop_llseek,
  804. };
  805. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  806. {
  807. struct tegra_drm_context *context = p;
  808. tegra_drm_context_free(context);
  809. return 0;
  810. }
  811. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  812. {
  813. struct tegra_drm_file *fpriv = file->driver_priv;
  814. mutex_lock(&fpriv->lock);
  815. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  816. mutex_unlock(&fpriv->lock);
  817. idr_destroy(&fpriv->contexts);
  818. mutex_destroy(&fpriv->lock);
  819. kfree(fpriv);
  820. }
  821. #ifdef CONFIG_DEBUG_FS
  822. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  823. {
  824. struct drm_info_node *node = (struct drm_info_node *)s->private;
  825. struct drm_device *drm = node->minor->dev;
  826. struct drm_framebuffer *fb;
  827. mutex_lock(&drm->mode_config.fb_lock);
  828. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  829. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  830. fb->base.id, fb->width, fb->height,
  831. fb->format->depth,
  832. fb->format->cpp[0] * 8,
  833. drm_framebuffer_read_refcount(fb));
  834. }
  835. mutex_unlock(&drm->mode_config.fb_lock);
  836. return 0;
  837. }
  838. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  839. {
  840. struct drm_info_node *node = (struct drm_info_node *)s->private;
  841. struct drm_device *drm = node->minor->dev;
  842. struct tegra_drm *tegra = drm->dev_private;
  843. struct drm_printer p = drm_seq_file_printer(s);
  844. if (tegra->domain) {
  845. mutex_lock(&tegra->mm_lock);
  846. drm_mm_print(&tegra->mm, &p);
  847. mutex_unlock(&tegra->mm_lock);
  848. }
  849. return 0;
  850. }
  851. static struct drm_info_list tegra_debugfs_list[] = {
  852. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  853. { "iova", tegra_debugfs_iova, 0 },
  854. };
  855. static int tegra_debugfs_init(struct drm_minor *minor)
  856. {
  857. return drm_debugfs_create_files(tegra_debugfs_list,
  858. ARRAY_SIZE(tegra_debugfs_list),
  859. minor->debugfs_root, minor);
  860. }
  861. #endif
  862. static struct drm_driver tegra_drm_driver = {
  863. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  864. DRIVER_ATOMIC | DRIVER_RENDER,
  865. .load = tegra_drm_load,
  866. .unload = tegra_drm_unload,
  867. .open = tegra_drm_open,
  868. .postclose = tegra_drm_postclose,
  869. .lastclose = drm_fb_helper_lastclose,
  870. #if defined(CONFIG_DEBUG_FS)
  871. .debugfs_init = tegra_debugfs_init,
  872. #endif
  873. .gem_free_object_unlocked = tegra_bo_free_object,
  874. .gem_vm_ops = &tegra_bo_vm_ops,
  875. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  876. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  877. .gem_prime_export = tegra_gem_prime_export,
  878. .gem_prime_import = tegra_gem_prime_import,
  879. .dumb_create = tegra_bo_dumb_create,
  880. .ioctls = tegra_drm_ioctls,
  881. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  882. .fops = &tegra_drm_fops,
  883. .name = DRIVER_NAME,
  884. .desc = DRIVER_DESC,
  885. .date = DRIVER_DATE,
  886. .major = DRIVER_MAJOR,
  887. .minor = DRIVER_MINOR,
  888. .patchlevel = DRIVER_PATCHLEVEL,
  889. };
  890. int tegra_drm_register_client(struct tegra_drm *tegra,
  891. struct tegra_drm_client *client)
  892. {
  893. mutex_lock(&tegra->clients_lock);
  894. list_add_tail(&client->list, &tegra->clients);
  895. mutex_unlock(&tegra->clients_lock);
  896. return 0;
  897. }
  898. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  899. struct tegra_drm_client *client)
  900. {
  901. mutex_lock(&tegra->clients_lock);
  902. list_del_init(&client->list);
  903. mutex_unlock(&tegra->clients_lock);
  904. return 0;
  905. }
  906. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
  907. {
  908. struct iova *alloc;
  909. void *virt;
  910. gfp_t gfp;
  911. int err;
  912. if (tegra->domain)
  913. size = iova_align(&tegra->carveout.domain, size);
  914. else
  915. size = PAGE_ALIGN(size);
  916. gfp = GFP_KERNEL | __GFP_ZERO;
  917. if (!tegra->domain) {
  918. /*
  919. * Many units only support 32-bit addresses, even on 64-bit
  920. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  921. * virtual address space, force allocations to be in the
  922. * lower 32-bit range.
  923. */
  924. gfp |= GFP_DMA;
  925. }
  926. virt = (void *)__get_free_pages(gfp, get_order(size));
  927. if (!virt)
  928. return ERR_PTR(-ENOMEM);
  929. if (!tegra->domain) {
  930. /*
  931. * If IOMMU is disabled, devices address physical memory
  932. * directly.
  933. */
  934. *dma = virt_to_phys(virt);
  935. return virt;
  936. }
  937. alloc = alloc_iova(&tegra->carveout.domain,
  938. size >> tegra->carveout.shift,
  939. tegra->carveout.limit, true);
  940. if (!alloc) {
  941. err = -EBUSY;
  942. goto free_pages;
  943. }
  944. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  945. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  946. size, IOMMU_READ | IOMMU_WRITE);
  947. if (err < 0)
  948. goto free_iova;
  949. return virt;
  950. free_iova:
  951. __free_iova(&tegra->carveout.domain, alloc);
  952. free_pages:
  953. free_pages((unsigned long)virt, get_order(size));
  954. return ERR_PTR(err);
  955. }
  956. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  957. dma_addr_t dma)
  958. {
  959. if (tegra->domain)
  960. size = iova_align(&tegra->carveout.domain, size);
  961. else
  962. size = PAGE_ALIGN(size);
  963. if (tegra->domain) {
  964. iommu_unmap(tegra->domain, dma, size);
  965. free_iova(&tegra->carveout.domain,
  966. iova_pfn(&tegra->carveout.domain, dma));
  967. }
  968. free_pages((unsigned long)virt, get_order(size));
  969. }
  970. static int host1x_drm_probe(struct host1x_device *dev)
  971. {
  972. struct drm_driver *driver = &tegra_drm_driver;
  973. struct drm_device *drm;
  974. int err;
  975. drm = drm_dev_alloc(driver, &dev->dev);
  976. if (IS_ERR(drm))
  977. return PTR_ERR(drm);
  978. dev_set_drvdata(&dev->dev, drm);
  979. err = drm_dev_register(drm, 0);
  980. if (err < 0)
  981. goto unref;
  982. return 0;
  983. unref:
  984. drm_dev_unref(drm);
  985. return err;
  986. }
  987. static int host1x_drm_remove(struct host1x_device *dev)
  988. {
  989. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  990. drm_dev_unregister(drm);
  991. drm_dev_unref(drm);
  992. return 0;
  993. }
  994. #ifdef CONFIG_PM_SLEEP
  995. static int host1x_drm_suspend(struct device *dev)
  996. {
  997. struct drm_device *drm = dev_get_drvdata(dev);
  998. struct tegra_drm *tegra = drm->dev_private;
  999. drm_kms_helper_poll_disable(drm);
  1000. tegra_drm_fb_suspend(drm);
  1001. tegra->state = drm_atomic_helper_suspend(drm);
  1002. if (IS_ERR(tegra->state)) {
  1003. tegra_drm_fb_resume(drm);
  1004. drm_kms_helper_poll_enable(drm);
  1005. return PTR_ERR(tegra->state);
  1006. }
  1007. return 0;
  1008. }
  1009. static int host1x_drm_resume(struct device *dev)
  1010. {
  1011. struct drm_device *drm = dev_get_drvdata(dev);
  1012. struct tegra_drm *tegra = drm->dev_private;
  1013. drm_atomic_helper_resume(drm, tegra->state);
  1014. tegra_drm_fb_resume(drm);
  1015. drm_kms_helper_poll_enable(drm);
  1016. return 0;
  1017. }
  1018. #endif
  1019. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  1020. host1x_drm_resume);
  1021. static const struct of_device_id host1x_drm_subdevs[] = {
  1022. { .compatible = "nvidia,tegra20-dc", },
  1023. { .compatible = "nvidia,tegra20-hdmi", },
  1024. { .compatible = "nvidia,tegra20-gr2d", },
  1025. { .compatible = "nvidia,tegra20-gr3d", },
  1026. { .compatible = "nvidia,tegra30-dc", },
  1027. { .compatible = "nvidia,tegra30-hdmi", },
  1028. { .compatible = "nvidia,tegra30-gr2d", },
  1029. { .compatible = "nvidia,tegra30-gr3d", },
  1030. { .compatible = "nvidia,tegra114-dsi", },
  1031. { .compatible = "nvidia,tegra114-hdmi", },
  1032. { .compatible = "nvidia,tegra114-gr3d", },
  1033. { .compatible = "nvidia,tegra124-dc", },
  1034. { .compatible = "nvidia,tegra124-sor", },
  1035. { .compatible = "nvidia,tegra124-hdmi", },
  1036. { .compatible = "nvidia,tegra124-dsi", },
  1037. { .compatible = "nvidia,tegra124-vic", },
  1038. { .compatible = "nvidia,tegra132-dsi", },
  1039. { .compatible = "nvidia,tegra210-dc", },
  1040. { .compatible = "nvidia,tegra210-dsi", },
  1041. { .compatible = "nvidia,tegra210-sor", },
  1042. { .compatible = "nvidia,tegra210-sor1", },
  1043. { .compatible = "nvidia,tegra210-vic", },
  1044. { .compatible = "nvidia,tegra186-display", },
  1045. { .compatible = "nvidia,tegra186-dc", },
  1046. { .compatible = "nvidia,tegra186-sor", },
  1047. { .compatible = "nvidia,tegra186-sor1", },
  1048. { .compatible = "nvidia,tegra186-vic", },
  1049. { /* sentinel */ }
  1050. };
  1051. static struct host1x_driver host1x_drm_driver = {
  1052. .driver = {
  1053. .name = "drm",
  1054. .pm = &host1x_drm_pm_ops,
  1055. },
  1056. .probe = host1x_drm_probe,
  1057. .remove = host1x_drm_remove,
  1058. .subdevs = host1x_drm_subdevs,
  1059. };
  1060. static struct platform_driver * const drivers[] = {
  1061. &tegra_display_hub_driver,
  1062. &tegra_dc_driver,
  1063. &tegra_hdmi_driver,
  1064. &tegra_dsi_driver,
  1065. &tegra_dpaux_driver,
  1066. &tegra_sor_driver,
  1067. &tegra_gr2d_driver,
  1068. &tegra_gr3d_driver,
  1069. &tegra_vic_driver,
  1070. };
  1071. static int __init host1x_drm_init(void)
  1072. {
  1073. int err;
  1074. err = host1x_driver_register(&host1x_drm_driver);
  1075. if (err < 0)
  1076. return err;
  1077. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1078. if (err < 0)
  1079. goto unregister_host1x;
  1080. return 0;
  1081. unregister_host1x:
  1082. host1x_driver_unregister(&host1x_drm_driver);
  1083. return err;
  1084. }
  1085. module_init(host1x_drm_init);
  1086. static void __exit host1x_drm_exit(void)
  1087. {
  1088. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1089. host1x_driver_unregister(&host1x_drm_driver);
  1090. }
  1091. module_exit(host1x_drm_exit);
  1092. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1093. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1094. MODULE_LICENSE("GPL v2");