drm.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include "drm.h"
  16. #include "gem.h"
  17. #define DRIVER_NAME "tegra"
  18. #define DRIVER_DESC "NVIDIA Tegra graphics"
  19. #define DRIVER_DATE "20120330"
  20. #define DRIVER_MAJOR 0
  21. #define DRIVER_MINOR 0
  22. #define DRIVER_PATCHLEVEL 0
  23. #define CARVEOUT_SZ SZ_64M
  24. struct tegra_drm_file {
  25. struct idr contexts;
  26. struct mutex lock;
  27. };
  28. static void tegra_atomic_schedule(struct tegra_drm *tegra,
  29. struct drm_atomic_state *state)
  30. {
  31. tegra->commit.state = state;
  32. schedule_work(&tegra->commit.work);
  33. }
  34. static void tegra_atomic_complete(struct tegra_drm *tegra,
  35. struct drm_atomic_state *state)
  36. {
  37. struct drm_device *drm = tegra->drm;
  38. /*
  39. * Everything below can be run asynchronously without the need to grab
  40. * any modeset locks at all under one condition: It must be guaranteed
  41. * that the asynchronous work has either been cancelled (if the driver
  42. * supports it, which at least requires that the framebuffers get
  43. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  44. * before the new state gets committed on the software side with
  45. * drm_atomic_helper_swap_state().
  46. *
  47. * This scheme allows new atomic state updates to be prepared and
  48. * checked in parallel to the asynchronous completion of the previous
  49. * update. Which is important since compositors need to figure out the
  50. * composition of the next frame right after having submitted the
  51. * current layout.
  52. */
  53. drm_atomic_helper_commit_modeset_disables(drm, state);
  54. drm_atomic_helper_commit_modeset_enables(drm, state);
  55. drm_atomic_helper_commit_planes(drm, state,
  56. DRM_PLANE_COMMIT_ACTIVE_ONLY);
  57. drm_atomic_helper_wait_for_vblanks(drm, state);
  58. drm_atomic_helper_cleanup_planes(drm, state);
  59. drm_atomic_state_put(state);
  60. }
  61. static void tegra_atomic_work(struct work_struct *work)
  62. {
  63. struct tegra_drm *tegra = container_of(work, struct tegra_drm,
  64. commit.work);
  65. tegra_atomic_complete(tegra, tegra->commit.state);
  66. }
  67. static int tegra_atomic_commit(struct drm_device *drm,
  68. struct drm_atomic_state *state, bool nonblock)
  69. {
  70. struct tegra_drm *tegra = drm->dev_private;
  71. int err;
  72. err = drm_atomic_helper_prepare_planes(drm, state);
  73. if (err)
  74. return err;
  75. /* serialize outstanding nonblocking commits */
  76. mutex_lock(&tegra->commit.lock);
  77. flush_work(&tegra->commit.work);
  78. /*
  79. * This is the point of no return - everything below never fails except
  80. * when the hw goes bonghits. Which means we can commit the new state on
  81. * the software side now.
  82. */
  83. drm_atomic_helper_swap_state(state, true);
  84. drm_atomic_state_get(state);
  85. if (nonblock)
  86. tegra_atomic_schedule(tegra, state);
  87. else
  88. tegra_atomic_complete(tegra, state);
  89. mutex_unlock(&tegra->commit.lock);
  90. return 0;
  91. }
  92. static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
  93. .fb_create = tegra_fb_create,
  94. #ifdef CONFIG_DRM_FBDEV_EMULATION
  95. .output_poll_changed = tegra_fb_output_poll_changed,
  96. #endif
  97. .atomic_check = drm_atomic_helper_check,
  98. .atomic_commit = tegra_atomic_commit,
  99. };
  100. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  101. {
  102. struct host1x_device *device = to_host1x_device(drm->dev);
  103. struct tegra_drm *tegra;
  104. int err;
  105. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  106. if (!tegra)
  107. return -ENOMEM;
  108. if (iommu_present(&platform_bus_type)) {
  109. u64 carveout_start, carveout_end, gem_start, gem_end;
  110. struct iommu_domain_geometry *geometry;
  111. unsigned long order;
  112. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  113. if (!tegra->domain) {
  114. err = -ENOMEM;
  115. goto free;
  116. }
  117. geometry = &tegra->domain->geometry;
  118. gem_start = geometry->aperture_start;
  119. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  120. carveout_start = gem_end + 1;
  121. carveout_end = geometry->aperture_end;
  122. order = __ffs(tegra->domain->pgsize_bitmap);
  123. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  124. carveout_start >> order,
  125. carveout_end >> order);
  126. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  127. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  128. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  129. mutex_init(&tegra->mm_lock);
  130. DRM_DEBUG("IOMMU apertures:\n");
  131. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  132. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  133. carveout_end);
  134. }
  135. mutex_init(&tegra->clients_lock);
  136. INIT_LIST_HEAD(&tegra->clients);
  137. mutex_init(&tegra->commit.lock);
  138. INIT_WORK(&tegra->commit.work, tegra_atomic_work);
  139. drm->dev_private = tegra;
  140. tegra->drm = drm;
  141. drm_mode_config_init(drm);
  142. drm->mode_config.min_width = 0;
  143. drm->mode_config.min_height = 0;
  144. drm->mode_config.max_width = 4096;
  145. drm->mode_config.max_height = 4096;
  146. drm->mode_config.allow_fb_modifiers = true;
  147. drm->mode_config.funcs = &tegra_drm_mode_funcs;
  148. err = tegra_drm_fb_prepare(drm);
  149. if (err < 0)
  150. goto config;
  151. drm_kms_helper_poll_init(drm);
  152. err = host1x_device_init(device);
  153. if (err < 0)
  154. goto fbdev;
  155. /*
  156. * We don't use the drm_irq_install() helpers provided by the DRM
  157. * core, so we need to set this manually in order to allow the
  158. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  159. */
  160. drm->irq_enabled = true;
  161. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  162. drm->max_vblank_count = 0xffffffff;
  163. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  164. if (err < 0)
  165. goto device;
  166. drm_mode_config_reset(drm);
  167. err = tegra_drm_fb_init(drm);
  168. if (err < 0)
  169. goto vblank;
  170. return 0;
  171. vblank:
  172. drm_vblank_cleanup(drm);
  173. device:
  174. host1x_device_exit(device);
  175. fbdev:
  176. drm_kms_helper_poll_fini(drm);
  177. tegra_drm_fb_free(drm);
  178. config:
  179. drm_mode_config_cleanup(drm);
  180. if (tegra->domain) {
  181. iommu_domain_free(tegra->domain);
  182. drm_mm_takedown(&tegra->mm);
  183. mutex_destroy(&tegra->mm_lock);
  184. put_iova_domain(&tegra->carveout.domain);
  185. }
  186. free:
  187. kfree(tegra);
  188. return err;
  189. }
  190. static void tegra_drm_unload(struct drm_device *drm)
  191. {
  192. struct host1x_device *device = to_host1x_device(drm->dev);
  193. struct tegra_drm *tegra = drm->dev_private;
  194. int err;
  195. drm_kms_helper_poll_fini(drm);
  196. tegra_drm_fb_exit(drm);
  197. drm_mode_config_cleanup(drm);
  198. drm_vblank_cleanup(drm);
  199. err = host1x_device_exit(device);
  200. if (err < 0)
  201. return;
  202. if (tegra->domain) {
  203. iommu_domain_free(tegra->domain);
  204. drm_mm_takedown(&tegra->mm);
  205. mutex_destroy(&tegra->mm_lock);
  206. put_iova_domain(&tegra->carveout.domain);
  207. }
  208. kfree(tegra);
  209. }
  210. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  211. {
  212. struct tegra_drm_file *fpriv;
  213. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  214. if (!fpriv)
  215. return -ENOMEM;
  216. idr_init(&fpriv->contexts);
  217. mutex_init(&fpriv->lock);
  218. filp->driver_priv = fpriv;
  219. return 0;
  220. }
  221. static void tegra_drm_context_free(struct tegra_drm_context *context)
  222. {
  223. context->client->ops->close_channel(context);
  224. kfree(context);
  225. }
  226. static void tegra_drm_lastclose(struct drm_device *drm)
  227. {
  228. #ifdef CONFIG_DRM_FBDEV_EMULATION
  229. struct tegra_drm *tegra = drm->dev_private;
  230. tegra_fbdev_restore_mode(tegra->fbdev);
  231. #endif
  232. }
  233. static struct host1x_bo *
  234. host1x_bo_lookup(struct drm_file *file, u32 handle)
  235. {
  236. struct drm_gem_object *gem;
  237. struct tegra_bo *bo;
  238. gem = drm_gem_object_lookup(file, handle);
  239. if (!gem)
  240. return NULL;
  241. drm_gem_object_unreference_unlocked(gem);
  242. bo = to_tegra_bo(gem);
  243. return &bo->base;
  244. }
  245. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  246. struct drm_tegra_reloc __user *src,
  247. struct drm_device *drm,
  248. struct drm_file *file)
  249. {
  250. u32 cmdbuf, target;
  251. int err;
  252. err = get_user(cmdbuf, &src->cmdbuf.handle);
  253. if (err < 0)
  254. return err;
  255. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  256. if (err < 0)
  257. return err;
  258. err = get_user(target, &src->target.handle);
  259. if (err < 0)
  260. return err;
  261. err = get_user(dest->target.offset, &src->target.offset);
  262. if (err < 0)
  263. return err;
  264. err = get_user(dest->shift, &src->shift);
  265. if (err < 0)
  266. return err;
  267. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  268. if (!dest->cmdbuf.bo)
  269. return -ENOENT;
  270. dest->target.bo = host1x_bo_lookup(file, target);
  271. if (!dest->target.bo)
  272. return -ENOENT;
  273. return 0;
  274. }
  275. int tegra_drm_submit(struct tegra_drm_context *context,
  276. struct drm_tegra_submit *args, struct drm_device *drm,
  277. struct drm_file *file)
  278. {
  279. unsigned int num_cmdbufs = args->num_cmdbufs;
  280. unsigned int num_relocs = args->num_relocs;
  281. unsigned int num_waitchks = args->num_waitchks;
  282. struct drm_tegra_cmdbuf __user *cmdbufs =
  283. (void __user *)(uintptr_t)args->cmdbufs;
  284. struct drm_tegra_reloc __user *relocs =
  285. (void __user *)(uintptr_t)args->relocs;
  286. struct drm_tegra_waitchk __user *waitchks =
  287. (void __user *)(uintptr_t)args->waitchks;
  288. struct drm_tegra_syncpt syncpt;
  289. struct host1x_job *job;
  290. int err;
  291. /* We don't yet support other than one syncpt_incr struct per submit */
  292. if (args->num_syncpts != 1)
  293. return -EINVAL;
  294. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  295. args->num_relocs, args->num_waitchks);
  296. if (!job)
  297. return -ENOMEM;
  298. job->num_relocs = args->num_relocs;
  299. job->num_waitchk = args->num_waitchks;
  300. job->client = (u32)args->context;
  301. job->class = context->client->base.class;
  302. job->serialize = true;
  303. while (num_cmdbufs) {
  304. struct drm_tegra_cmdbuf cmdbuf;
  305. struct host1x_bo *bo;
  306. if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
  307. err = -EFAULT;
  308. goto fail;
  309. }
  310. bo = host1x_bo_lookup(file, cmdbuf.handle);
  311. if (!bo) {
  312. err = -ENOENT;
  313. goto fail;
  314. }
  315. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  316. num_cmdbufs--;
  317. cmdbufs++;
  318. }
  319. /* copy and resolve relocations from submit */
  320. while (num_relocs--) {
  321. err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
  322. &relocs[num_relocs], drm,
  323. file);
  324. if (err < 0)
  325. goto fail;
  326. }
  327. if (copy_from_user(job->waitchk, waitchks,
  328. sizeof(*waitchks) * num_waitchks)) {
  329. err = -EFAULT;
  330. goto fail;
  331. }
  332. if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
  333. sizeof(syncpt))) {
  334. err = -EFAULT;
  335. goto fail;
  336. }
  337. job->is_addr_reg = context->client->ops->is_addr_reg;
  338. job->syncpt_incrs = syncpt.incrs;
  339. job->syncpt_id = syncpt.id;
  340. job->timeout = 10000;
  341. if (args->timeout && args->timeout < 10000)
  342. job->timeout = args->timeout;
  343. err = host1x_job_pin(job, context->client->base.dev);
  344. if (err)
  345. goto fail;
  346. err = host1x_job_submit(job);
  347. if (err)
  348. goto fail_submit;
  349. args->fence = job->syncpt_end;
  350. host1x_job_put(job);
  351. return 0;
  352. fail_submit:
  353. host1x_job_unpin(job);
  354. fail:
  355. host1x_job_put(job);
  356. return err;
  357. }
  358. #ifdef CONFIG_DRM_TEGRA_STAGING
  359. static struct tegra_drm_context *
  360. tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
  361. {
  362. struct tegra_drm_context *context;
  363. mutex_lock(&file->lock);
  364. context = idr_find(&file->contexts, id);
  365. mutex_unlock(&file->lock);
  366. return context;
  367. }
  368. static int tegra_gem_create(struct drm_device *drm, void *data,
  369. struct drm_file *file)
  370. {
  371. struct drm_tegra_gem_create *args = data;
  372. struct tegra_bo *bo;
  373. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  374. &args->handle);
  375. if (IS_ERR(bo))
  376. return PTR_ERR(bo);
  377. return 0;
  378. }
  379. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  380. struct drm_file *file)
  381. {
  382. struct drm_tegra_gem_mmap *args = data;
  383. struct drm_gem_object *gem;
  384. struct tegra_bo *bo;
  385. gem = drm_gem_object_lookup(file, args->handle);
  386. if (!gem)
  387. return -EINVAL;
  388. bo = to_tegra_bo(gem);
  389. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  390. drm_gem_object_unreference_unlocked(gem);
  391. return 0;
  392. }
  393. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  394. struct drm_file *file)
  395. {
  396. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  397. struct drm_tegra_syncpt_read *args = data;
  398. struct host1x_syncpt *sp;
  399. sp = host1x_syncpt_get(host, args->id);
  400. if (!sp)
  401. return -EINVAL;
  402. args->value = host1x_syncpt_read_min(sp);
  403. return 0;
  404. }
  405. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  406. struct drm_file *file)
  407. {
  408. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  409. struct drm_tegra_syncpt_incr *args = data;
  410. struct host1x_syncpt *sp;
  411. sp = host1x_syncpt_get(host1x, args->id);
  412. if (!sp)
  413. return -EINVAL;
  414. return host1x_syncpt_incr(sp);
  415. }
  416. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  417. struct drm_file *file)
  418. {
  419. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  420. struct drm_tegra_syncpt_wait *args = data;
  421. struct host1x_syncpt *sp;
  422. sp = host1x_syncpt_get(host1x, args->id);
  423. if (!sp)
  424. return -EINVAL;
  425. return host1x_syncpt_wait(sp, args->thresh, args->timeout,
  426. &args->value);
  427. }
  428. static int tegra_client_open(struct tegra_drm_file *fpriv,
  429. struct tegra_drm_client *client,
  430. struct tegra_drm_context *context)
  431. {
  432. int err;
  433. err = client->ops->open_channel(client, context);
  434. if (err < 0)
  435. return err;
  436. err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
  437. if (err < 0) {
  438. client->ops->close_channel(context);
  439. return err;
  440. }
  441. context->client = client;
  442. context->id = err;
  443. return 0;
  444. }
  445. static int tegra_open_channel(struct drm_device *drm, void *data,
  446. struct drm_file *file)
  447. {
  448. struct tegra_drm_file *fpriv = file->driver_priv;
  449. struct tegra_drm *tegra = drm->dev_private;
  450. struct drm_tegra_open_channel *args = data;
  451. struct tegra_drm_context *context;
  452. struct tegra_drm_client *client;
  453. int err = -ENODEV;
  454. context = kzalloc(sizeof(*context), GFP_KERNEL);
  455. if (!context)
  456. return -ENOMEM;
  457. mutex_lock(&fpriv->lock);
  458. list_for_each_entry(client, &tegra->clients, list)
  459. if (client->base.class == args->client) {
  460. err = tegra_client_open(fpriv, client, context);
  461. if (err < 0)
  462. break;
  463. args->context = context->id;
  464. break;
  465. }
  466. if (err < 0)
  467. kfree(context);
  468. mutex_unlock(&fpriv->lock);
  469. return err;
  470. }
  471. static int tegra_close_channel(struct drm_device *drm, void *data,
  472. struct drm_file *file)
  473. {
  474. struct tegra_drm_file *fpriv = file->driver_priv;
  475. struct drm_tegra_close_channel *args = data;
  476. struct tegra_drm_context *context;
  477. int err = 0;
  478. mutex_lock(&fpriv->lock);
  479. context = tegra_drm_file_get_context(fpriv, args->context);
  480. if (!context) {
  481. err = -EINVAL;
  482. goto unlock;
  483. }
  484. idr_remove(&fpriv->contexts, context->id);
  485. tegra_drm_context_free(context);
  486. unlock:
  487. mutex_unlock(&fpriv->lock);
  488. return err;
  489. }
  490. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  491. struct drm_file *file)
  492. {
  493. struct tegra_drm_file *fpriv = file->driver_priv;
  494. struct drm_tegra_get_syncpt *args = data;
  495. struct tegra_drm_context *context;
  496. struct host1x_syncpt *syncpt;
  497. int err = 0;
  498. mutex_lock(&fpriv->lock);
  499. context = tegra_drm_file_get_context(fpriv, args->context);
  500. if (!context) {
  501. err = -ENODEV;
  502. goto unlock;
  503. }
  504. if (args->index >= context->client->base.num_syncpts) {
  505. err = -EINVAL;
  506. goto unlock;
  507. }
  508. syncpt = context->client->base.syncpts[args->index];
  509. args->id = host1x_syncpt_id(syncpt);
  510. unlock:
  511. mutex_unlock(&fpriv->lock);
  512. return err;
  513. }
  514. static int tegra_submit(struct drm_device *drm, void *data,
  515. struct drm_file *file)
  516. {
  517. struct tegra_drm_file *fpriv = file->driver_priv;
  518. struct drm_tegra_submit *args = data;
  519. struct tegra_drm_context *context;
  520. int err;
  521. mutex_lock(&fpriv->lock);
  522. context = tegra_drm_file_get_context(fpriv, args->context);
  523. if (!context) {
  524. err = -ENODEV;
  525. goto unlock;
  526. }
  527. err = context->client->ops->submit(context, args, drm, file);
  528. unlock:
  529. mutex_unlock(&fpriv->lock);
  530. return err;
  531. }
  532. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  533. struct drm_file *file)
  534. {
  535. struct tegra_drm_file *fpriv = file->driver_priv;
  536. struct drm_tegra_get_syncpt_base *args = data;
  537. struct tegra_drm_context *context;
  538. struct host1x_syncpt_base *base;
  539. struct host1x_syncpt *syncpt;
  540. int err = 0;
  541. mutex_lock(&fpriv->lock);
  542. context = tegra_drm_file_get_context(fpriv, args->context);
  543. if (!context) {
  544. err = -ENODEV;
  545. goto unlock;
  546. }
  547. if (args->syncpt >= context->client->base.num_syncpts) {
  548. err = -EINVAL;
  549. goto unlock;
  550. }
  551. syncpt = context->client->base.syncpts[args->syncpt];
  552. base = host1x_syncpt_get_base(syncpt);
  553. if (!base) {
  554. err = -ENXIO;
  555. goto unlock;
  556. }
  557. args->id = host1x_syncpt_base_id(base);
  558. unlock:
  559. mutex_unlock(&fpriv->lock);
  560. return err;
  561. }
  562. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  563. struct drm_file *file)
  564. {
  565. struct drm_tegra_gem_set_tiling *args = data;
  566. enum tegra_bo_tiling_mode mode;
  567. struct drm_gem_object *gem;
  568. unsigned long value = 0;
  569. struct tegra_bo *bo;
  570. switch (args->mode) {
  571. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  572. mode = TEGRA_BO_TILING_MODE_PITCH;
  573. if (args->value != 0)
  574. return -EINVAL;
  575. break;
  576. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  577. mode = TEGRA_BO_TILING_MODE_TILED;
  578. if (args->value != 0)
  579. return -EINVAL;
  580. break;
  581. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  582. mode = TEGRA_BO_TILING_MODE_BLOCK;
  583. if (args->value > 5)
  584. return -EINVAL;
  585. value = args->value;
  586. break;
  587. default:
  588. return -EINVAL;
  589. }
  590. gem = drm_gem_object_lookup(file, args->handle);
  591. if (!gem)
  592. return -ENOENT;
  593. bo = to_tegra_bo(gem);
  594. bo->tiling.mode = mode;
  595. bo->tiling.value = value;
  596. drm_gem_object_unreference_unlocked(gem);
  597. return 0;
  598. }
  599. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  600. struct drm_file *file)
  601. {
  602. struct drm_tegra_gem_get_tiling *args = data;
  603. struct drm_gem_object *gem;
  604. struct tegra_bo *bo;
  605. int err = 0;
  606. gem = drm_gem_object_lookup(file, args->handle);
  607. if (!gem)
  608. return -ENOENT;
  609. bo = to_tegra_bo(gem);
  610. switch (bo->tiling.mode) {
  611. case TEGRA_BO_TILING_MODE_PITCH:
  612. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  613. args->value = 0;
  614. break;
  615. case TEGRA_BO_TILING_MODE_TILED:
  616. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  617. args->value = 0;
  618. break;
  619. case TEGRA_BO_TILING_MODE_BLOCK:
  620. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  621. args->value = bo->tiling.value;
  622. break;
  623. default:
  624. err = -EINVAL;
  625. break;
  626. }
  627. drm_gem_object_unreference_unlocked(gem);
  628. return err;
  629. }
  630. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  631. struct drm_file *file)
  632. {
  633. struct drm_tegra_gem_set_flags *args = data;
  634. struct drm_gem_object *gem;
  635. struct tegra_bo *bo;
  636. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  637. return -EINVAL;
  638. gem = drm_gem_object_lookup(file, args->handle);
  639. if (!gem)
  640. return -ENOENT;
  641. bo = to_tegra_bo(gem);
  642. bo->flags = 0;
  643. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  644. bo->flags |= TEGRA_BO_BOTTOM_UP;
  645. drm_gem_object_unreference_unlocked(gem);
  646. return 0;
  647. }
  648. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  649. struct drm_file *file)
  650. {
  651. struct drm_tegra_gem_get_flags *args = data;
  652. struct drm_gem_object *gem;
  653. struct tegra_bo *bo;
  654. gem = drm_gem_object_lookup(file, args->handle);
  655. if (!gem)
  656. return -ENOENT;
  657. bo = to_tegra_bo(gem);
  658. args->flags = 0;
  659. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  660. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  661. drm_gem_object_unreference_unlocked(gem);
  662. return 0;
  663. }
  664. #endif
  665. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  666. #ifdef CONFIG_DRM_TEGRA_STAGING
  667. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
  668. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
  669. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
  670. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
  671. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
  672. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
  673. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
  674. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
  675. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
  676. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
  677. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
  678. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
  679. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
  680. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
  681. #endif
  682. };
  683. static const struct file_operations tegra_drm_fops = {
  684. .owner = THIS_MODULE,
  685. .open = drm_open,
  686. .release = drm_release,
  687. .unlocked_ioctl = drm_ioctl,
  688. .mmap = tegra_drm_mmap,
  689. .poll = drm_poll,
  690. .read = drm_read,
  691. .compat_ioctl = drm_compat_ioctl,
  692. .llseek = noop_llseek,
  693. };
  694. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  695. {
  696. struct tegra_drm_context *context = p;
  697. tegra_drm_context_free(context);
  698. return 0;
  699. }
  700. static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
  701. {
  702. struct tegra_drm_file *fpriv = file->driver_priv;
  703. mutex_lock(&fpriv->lock);
  704. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  705. mutex_unlock(&fpriv->lock);
  706. idr_destroy(&fpriv->contexts);
  707. mutex_destroy(&fpriv->lock);
  708. kfree(fpriv);
  709. }
  710. #ifdef CONFIG_DEBUG_FS
  711. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  712. {
  713. struct drm_info_node *node = (struct drm_info_node *)s->private;
  714. struct drm_device *drm = node->minor->dev;
  715. struct drm_framebuffer *fb;
  716. mutex_lock(&drm->mode_config.fb_lock);
  717. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  718. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  719. fb->base.id, fb->width, fb->height,
  720. fb->format->depth,
  721. fb->format->cpp[0] * 8,
  722. drm_framebuffer_read_refcount(fb));
  723. }
  724. mutex_unlock(&drm->mode_config.fb_lock);
  725. return 0;
  726. }
  727. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  728. {
  729. struct drm_info_node *node = (struct drm_info_node *)s->private;
  730. struct drm_device *drm = node->minor->dev;
  731. struct tegra_drm *tegra = drm->dev_private;
  732. struct drm_printer p = drm_seq_file_printer(s);
  733. mutex_lock(&tegra->mm_lock);
  734. drm_mm_print(&tegra->mm, &p);
  735. mutex_unlock(&tegra->mm_lock);
  736. return 0;
  737. }
  738. static struct drm_info_list tegra_debugfs_list[] = {
  739. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  740. { "iova", tegra_debugfs_iova, 0 },
  741. };
  742. static int tegra_debugfs_init(struct drm_minor *minor)
  743. {
  744. return drm_debugfs_create_files(tegra_debugfs_list,
  745. ARRAY_SIZE(tegra_debugfs_list),
  746. minor->debugfs_root, minor);
  747. }
  748. #endif
  749. static struct drm_driver tegra_drm_driver = {
  750. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  751. DRIVER_ATOMIC,
  752. .load = tegra_drm_load,
  753. .unload = tegra_drm_unload,
  754. .open = tegra_drm_open,
  755. .preclose = tegra_drm_preclose,
  756. .lastclose = tegra_drm_lastclose,
  757. #if defined(CONFIG_DEBUG_FS)
  758. .debugfs_init = tegra_debugfs_init,
  759. #endif
  760. .gem_free_object_unlocked = tegra_bo_free_object,
  761. .gem_vm_ops = &tegra_bo_vm_ops,
  762. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  763. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  764. .gem_prime_export = tegra_gem_prime_export,
  765. .gem_prime_import = tegra_gem_prime_import,
  766. .dumb_create = tegra_bo_dumb_create,
  767. .dumb_map_offset = tegra_bo_dumb_map_offset,
  768. .dumb_destroy = drm_gem_dumb_destroy,
  769. .ioctls = tegra_drm_ioctls,
  770. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  771. .fops = &tegra_drm_fops,
  772. .name = DRIVER_NAME,
  773. .desc = DRIVER_DESC,
  774. .date = DRIVER_DATE,
  775. .major = DRIVER_MAJOR,
  776. .minor = DRIVER_MINOR,
  777. .patchlevel = DRIVER_PATCHLEVEL,
  778. };
  779. int tegra_drm_register_client(struct tegra_drm *tegra,
  780. struct tegra_drm_client *client)
  781. {
  782. mutex_lock(&tegra->clients_lock);
  783. list_add_tail(&client->list, &tegra->clients);
  784. mutex_unlock(&tegra->clients_lock);
  785. return 0;
  786. }
  787. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  788. struct tegra_drm_client *client)
  789. {
  790. mutex_lock(&tegra->clients_lock);
  791. list_del_init(&client->list);
  792. mutex_unlock(&tegra->clients_lock);
  793. return 0;
  794. }
  795. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
  796. dma_addr_t *dma)
  797. {
  798. struct iova *alloc;
  799. void *virt;
  800. gfp_t gfp;
  801. int err;
  802. if (tegra->domain)
  803. size = iova_align(&tegra->carveout.domain, size);
  804. else
  805. size = PAGE_ALIGN(size);
  806. gfp = GFP_KERNEL | __GFP_ZERO;
  807. if (!tegra->domain) {
  808. /*
  809. * Many units only support 32-bit addresses, even on 64-bit
  810. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  811. * virtual address space, force allocations to be in the
  812. * lower 32-bit range.
  813. */
  814. gfp |= GFP_DMA;
  815. }
  816. virt = (void *)__get_free_pages(gfp, get_order(size));
  817. if (!virt)
  818. return ERR_PTR(-ENOMEM);
  819. if (!tegra->domain) {
  820. /*
  821. * If IOMMU is disabled, devices address physical memory
  822. * directly.
  823. */
  824. *dma = virt_to_phys(virt);
  825. return virt;
  826. }
  827. alloc = alloc_iova(&tegra->carveout.domain,
  828. size >> tegra->carveout.shift,
  829. tegra->carveout.limit, true);
  830. if (!alloc) {
  831. err = -EBUSY;
  832. goto free_pages;
  833. }
  834. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  835. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  836. size, IOMMU_READ | IOMMU_WRITE);
  837. if (err < 0)
  838. goto free_iova;
  839. return virt;
  840. free_iova:
  841. __free_iova(&tegra->carveout.domain, alloc);
  842. free_pages:
  843. free_pages((unsigned long)virt, get_order(size));
  844. return ERR_PTR(err);
  845. }
  846. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  847. dma_addr_t dma)
  848. {
  849. if (tegra->domain)
  850. size = iova_align(&tegra->carveout.domain, size);
  851. else
  852. size = PAGE_ALIGN(size);
  853. if (tegra->domain) {
  854. iommu_unmap(tegra->domain, dma, size);
  855. free_iova(&tegra->carveout.domain,
  856. iova_pfn(&tegra->carveout.domain, dma));
  857. }
  858. free_pages((unsigned long)virt, get_order(size));
  859. }
  860. static int host1x_drm_probe(struct host1x_device *dev)
  861. {
  862. struct drm_driver *driver = &tegra_drm_driver;
  863. struct drm_device *drm;
  864. int err;
  865. drm = drm_dev_alloc(driver, &dev->dev);
  866. if (IS_ERR(drm))
  867. return PTR_ERR(drm);
  868. dev_set_drvdata(&dev->dev, drm);
  869. err = drm_dev_register(drm, 0);
  870. if (err < 0)
  871. goto unref;
  872. return 0;
  873. unref:
  874. drm_dev_unref(drm);
  875. return err;
  876. }
  877. static int host1x_drm_remove(struct host1x_device *dev)
  878. {
  879. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  880. drm_dev_unregister(drm);
  881. drm_dev_unref(drm);
  882. return 0;
  883. }
  884. #ifdef CONFIG_PM_SLEEP
  885. static int host1x_drm_suspend(struct device *dev)
  886. {
  887. struct drm_device *drm = dev_get_drvdata(dev);
  888. struct tegra_drm *tegra = drm->dev_private;
  889. drm_kms_helper_poll_disable(drm);
  890. tegra_drm_fb_suspend(drm);
  891. tegra->state = drm_atomic_helper_suspend(drm);
  892. if (IS_ERR(tegra->state)) {
  893. tegra_drm_fb_resume(drm);
  894. drm_kms_helper_poll_enable(drm);
  895. return PTR_ERR(tegra->state);
  896. }
  897. return 0;
  898. }
  899. static int host1x_drm_resume(struct device *dev)
  900. {
  901. struct drm_device *drm = dev_get_drvdata(dev);
  902. struct tegra_drm *tegra = drm->dev_private;
  903. drm_atomic_helper_resume(drm, tegra->state);
  904. tegra_drm_fb_resume(drm);
  905. drm_kms_helper_poll_enable(drm);
  906. return 0;
  907. }
  908. #endif
  909. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  910. host1x_drm_resume);
  911. static const struct of_device_id host1x_drm_subdevs[] = {
  912. { .compatible = "nvidia,tegra20-dc", },
  913. { .compatible = "nvidia,tegra20-hdmi", },
  914. { .compatible = "nvidia,tegra20-gr2d", },
  915. { .compatible = "nvidia,tegra20-gr3d", },
  916. { .compatible = "nvidia,tegra30-dc", },
  917. { .compatible = "nvidia,tegra30-hdmi", },
  918. { .compatible = "nvidia,tegra30-gr2d", },
  919. { .compatible = "nvidia,tegra30-gr3d", },
  920. { .compatible = "nvidia,tegra114-dsi", },
  921. { .compatible = "nvidia,tegra114-hdmi", },
  922. { .compatible = "nvidia,tegra114-gr3d", },
  923. { .compatible = "nvidia,tegra124-dc", },
  924. { .compatible = "nvidia,tegra124-sor", },
  925. { .compatible = "nvidia,tegra124-hdmi", },
  926. { .compatible = "nvidia,tegra124-dsi", },
  927. { .compatible = "nvidia,tegra124-vic", },
  928. { .compatible = "nvidia,tegra132-dsi", },
  929. { .compatible = "nvidia,tegra210-dc", },
  930. { .compatible = "nvidia,tegra210-dsi", },
  931. { .compatible = "nvidia,tegra210-sor", },
  932. { .compatible = "nvidia,tegra210-sor1", },
  933. { .compatible = "nvidia,tegra210-vic", },
  934. { /* sentinel */ }
  935. };
  936. static struct host1x_driver host1x_drm_driver = {
  937. .driver = {
  938. .name = "drm",
  939. .pm = &host1x_drm_pm_ops,
  940. },
  941. .probe = host1x_drm_probe,
  942. .remove = host1x_drm_remove,
  943. .subdevs = host1x_drm_subdevs,
  944. };
  945. static struct platform_driver * const drivers[] = {
  946. &tegra_dc_driver,
  947. &tegra_hdmi_driver,
  948. &tegra_dsi_driver,
  949. &tegra_dpaux_driver,
  950. &tegra_sor_driver,
  951. &tegra_gr2d_driver,
  952. &tegra_gr3d_driver,
  953. &tegra_vic_driver,
  954. };
  955. static int __init host1x_drm_init(void)
  956. {
  957. int err;
  958. err = host1x_driver_register(&host1x_drm_driver);
  959. if (err < 0)
  960. return err;
  961. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  962. if (err < 0)
  963. goto unregister_host1x;
  964. return 0;
  965. unregister_host1x:
  966. host1x_driver_unregister(&host1x_drm_driver);
  967. return err;
  968. }
  969. module_init(host1x_drm_init);
  970. static void __exit host1x_drm_exit(void)
  971. {
  972. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  973. host1x_driver_unregister(&host1x_drm_driver);
  974. }
  975. module_exit(host1x_drm_exit);
  976. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  977. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  978. MODULE_LICENSE("GPL v2");