vc4_bo.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836
  1. /*
  2. * Copyright © 2015 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /**
  9. * DOC: VC4 GEM BO management support
  10. *
  11. * The VC4 GPU architecture (both scanout and rendering) has direct
  12. * access to system memory with no MMU in between. To support it, we
  13. * use the GEM CMA helper functions to allocate contiguous ranges of
  14. * physical memory for our BOs.
  15. *
  16. * Since the CMA allocator is very slow, we keep a cache of recently
  17. * freed BOs around so that the kernel's allocation of objects for 3D
  18. * rendering can return quickly.
  19. */
  20. #include <linux/dma-buf.h>
  21. #include "vc4_drv.h"
  22. #include "uapi/drm/vc4_drm.h"
  23. static const char * const bo_type_names[] = {
  24. "kernel",
  25. "V3D",
  26. "V3D shader",
  27. "dumb",
  28. "binner",
  29. "RCL",
  30. "BCL",
  31. "kernel BO cache",
  32. };
  33. static bool is_user_label(int label)
  34. {
  35. return label >= VC4_BO_TYPE_COUNT;
  36. }
  37. static void vc4_bo_stats_dump(struct vc4_dev *vc4)
  38. {
  39. int i;
  40. for (i = 0; i < vc4->num_labels; i++) {
  41. if (!vc4->bo_labels[i].num_allocated)
  42. continue;
  43. DRM_INFO("%30s: %6dkb BOs (%d)\n",
  44. vc4->bo_labels[i].name,
  45. vc4->bo_labels[i].size_allocated / 1024,
  46. vc4->bo_labels[i].num_allocated);
  47. }
  48. }
  49. #ifdef CONFIG_DEBUG_FS
  50. int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
  51. {
  52. struct drm_info_node *node = (struct drm_info_node *)m->private;
  53. struct drm_device *dev = node->minor->dev;
  54. struct vc4_dev *vc4 = to_vc4_dev(dev);
  55. int i;
  56. mutex_lock(&vc4->bo_lock);
  57. for (i = 0; i < vc4->num_labels; i++) {
  58. if (!vc4->bo_labels[i].num_allocated)
  59. continue;
  60. seq_printf(m, "%30s: %6dkb BOs (%d)\n",
  61. vc4->bo_labels[i].name,
  62. vc4->bo_labels[i].size_allocated / 1024,
  63. vc4->bo_labels[i].num_allocated);
  64. }
  65. mutex_unlock(&vc4->bo_lock);
  66. return 0;
  67. }
  68. #endif
  69. /* Takes ownership of *name and returns the appropriate slot for it in
  70. * the bo_labels[] array, extending it as necessary.
  71. *
  72. * This is inefficient and could use a hash table instead of walking
  73. * an array and strcmp()ing. However, the assumption is that user
  74. * labeling will be infrequent (scanout buffers and other long-lived
  75. * objects, or debug driver builds), so we can live with it for now.
  76. */
  77. static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
  78. {
  79. int i;
  80. int free_slot = -1;
  81. for (i = 0; i < vc4->num_labels; i++) {
  82. if (!vc4->bo_labels[i].name) {
  83. free_slot = i;
  84. } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
  85. kfree(name);
  86. return i;
  87. }
  88. }
  89. if (free_slot != -1) {
  90. WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
  91. vc4->bo_labels[free_slot].name = name;
  92. return free_slot;
  93. } else {
  94. u32 new_label_count = vc4->num_labels + 1;
  95. struct vc4_label *new_labels =
  96. krealloc(vc4->bo_labels,
  97. new_label_count * sizeof(*new_labels),
  98. GFP_KERNEL);
  99. if (!new_labels) {
  100. kfree(name);
  101. return -1;
  102. }
  103. free_slot = vc4->num_labels;
  104. vc4->bo_labels = new_labels;
  105. vc4->num_labels = new_label_count;
  106. vc4->bo_labels[free_slot].name = name;
  107. vc4->bo_labels[free_slot].num_allocated = 0;
  108. vc4->bo_labels[free_slot].size_allocated = 0;
  109. return free_slot;
  110. }
  111. }
  112. static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
  113. {
  114. struct vc4_bo *bo = to_vc4_bo(gem_obj);
  115. struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
  116. lockdep_assert_held(&vc4->bo_lock);
  117. if (label != -1) {
  118. vc4->bo_labels[label].num_allocated++;
  119. vc4->bo_labels[label].size_allocated += gem_obj->size;
  120. }
  121. vc4->bo_labels[bo->label].num_allocated--;
  122. vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
  123. if (vc4->bo_labels[bo->label].num_allocated == 0 &&
  124. is_user_label(bo->label)) {
  125. /* Free user BO label slots on last unreference.
  126. * Slots are just where we track the stats for a given
  127. * name, and once a name is unused we can reuse that
  128. * slot.
  129. */
  130. kfree(vc4->bo_labels[bo->label].name);
  131. vc4->bo_labels[bo->label].name = NULL;
  132. }
  133. bo->label = label;
  134. }
  135. static uint32_t bo_page_index(size_t size)
  136. {
  137. return (size / PAGE_SIZE) - 1;
  138. }
  139. static void vc4_bo_destroy(struct vc4_bo *bo)
  140. {
  141. struct drm_gem_object *obj = &bo->base.base;
  142. struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
  143. lockdep_assert_held(&vc4->bo_lock);
  144. vc4_bo_set_label(obj, -1);
  145. if (bo->validated_shader) {
  146. kfree(bo->validated_shader->texture_samples);
  147. kfree(bo->validated_shader);
  148. bo->validated_shader = NULL;
  149. }
  150. reservation_object_fini(&bo->_resv);
  151. drm_gem_cma_free_object(obj);
  152. }
  153. static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
  154. {
  155. struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
  156. lockdep_assert_held(&vc4->bo_lock);
  157. list_del(&bo->unref_head);
  158. list_del(&bo->size_head);
  159. }
  160. static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
  161. size_t size)
  162. {
  163. struct vc4_dev *vc4 = to_vc4_dev(dev);
  164. uint32_t page_index = bo_page_index(size);
  165. if (vc4->bo_cache.size_list_size <= page_index) {
  166. uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
  167. page_index + 1);
  168. struct list_head *new_list;
  169. uint32_t i;
  170. new_list = kmalloc_array(new_size, sizeof(struct list_head),
  171. GFP_KERNEL);
  172. if (!new_list)
  173. return NULL;
  174. /* Rebase the old cached BO lists to their new list
  175. * head locations.
  176. */
  177. for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
  178. struct list_head *old_list =
  179. &vc4->bo_cache.size_list[i];
  180. if (list_empty(old_list))
  181. INIT_LIST_HEAD(&new_list[i]);
  182. else
  183. list_replace(old_list, &new_list[i]);
  184. }
  185. /* And initialize the brand new BO list heads. */
  186. for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
  187. INIT_LIST_HEAD(&new_list[i]);
  188. kfree(vc4->bo_cache.size_list);
  189. vc4->bo_cache.size_list = new_list;
  190. vc4->bo_cache.size_list_size = new_size;
  191. }
  192. return &vc4->bo_cache.size_list[page_index];
  193. }
  194. static void vc4_bo_cache_purge(struct drm_device *dev)
  195. {
  196. struct vc4_dev *vc4 = to_vc4_dev(dev);
  197. mutex_lock(&vc4->bo_lock);
  198. while (!list_empty(&vc4->bo_cache.time_list)) {
  199. struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
  200. struct vc4_bo, unref_head);
  201. vc4_bo_remove_from_cache(bo);
  202. vc4_bo_destroy(bo);
  203. }
  204. mutex_unlock(&vc4->bo_lock);
  205. }
  206. static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
  207. uint32_t size,
  208. enum vc4_kernel_bo_type type)
  209. {
  210. struct vc4_dev *vc4 = to_vc4_dev(dev);
  211. uint32_t page_index = bo_page_index(size);
  212. struct vc4_bo *bo = NULL;
  213. size = roundup(size, PAGE_SIZE);
  214. mutex_lock(&vc4->bo_lock);
  215. if (page_index >= vc4->bo_cache.size_list_size)
  216. goto out;
  217. if (list_empty(&vc4->bo_cache.size_list[page_index]))
  218. goto out;
  219. bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
  220. struct vc4_bo, size_head);
  221. vc4_bo_remove_from_cache(bo);
  222. kref_init(&bo->base.base.refcount);
  223. out:
  224. if (bo)
  225. vc4_bo_set_label(&bo->base.base, type);
  226. mutex_unlock(&vc4->bo_lock);
  227. return bo;
  228. }
  229. /**
  230. * vc4_gem_create_object - Implementation of driver->gem_create_object.
  231. * @dev: DRM device
  232. * @size: Size in bytes of the memory the object will reference
  233. *
  234. * This lets the CMA helpers allocate object structs for us, and keep
  235. * our BO stats correct.
  236. */
  237. struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
  238. {
  239. struct vc4_dev *vc4 = to_vc4_dev(dev);
  240. struct vc4_bo *bo;
  241. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  242. if (!bo)
  243. return ERR_PTR(-ENOMEM);
  244. mutex_lock(&vc4->bo_lock);
  245. bo->label = VC4_BO_TYPE_KERNEL;
  246. vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
  247. vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
  248. mutex_unlock(&vc4->bo_lock);
  249. bo->resv = &bo->_resv;
  250. reservation_object_init(bo->resv);
  251. return &bo->base.base;
  252. }
  253. struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
  254. bool allow_unzeroed, enum vc4_kernel_bo_type type)
  255. {
  256. size_t size = roundup(unaligned_size, PAGE_SIZE);
  257. struct vc4_dev *vc4 = to_vc4_dev(dev);
  258. struct drm_gem_cma_object *cma_obj;
  259. struct vc4_bo *bo;
  260. if (size == 0)
  261. return ERR_PTR(-EINVAL);
  262. /* First, try to get a vc4_bo from the kernel BO cache. */
  263. bo = vc4_bo_get_from_cache(dev, size, type);
  264. if (bo) {
  265. if (!allow_unzeroed)
  266. memset(bo->base.vaddr, 0, bo->base.base.size);
  267. return bo;
  268. }
  269. cma_obj = drm_gem_cma_create(dev, size);
  270. if (IS_ERR(cma_obj)) {
  271. /*
  272. * If we've run out of CMA memory, kill the cache of
  273. * CMA allocations we've got laying around and try again.
  274. */
  275. vc4_bo_cache_purge(dev);
  276. cma_obj = drm_gem_cma_create(dev, size);
  277. if (IS_ERR(cma_obj)) {
  278. DRM_ERROR("Failed to allocate from CMA:\n");
  279. vc4_bo_stats_dump(vc4);
  280. return ERR_PTR(-ENOMEM);
  281. }
  282. }
  283. bo = to_vc4_bo(&cma_obj->base);
  284. mutex_lock(&vc4->bo_lock);
  285. vc4_bo_set_label(&cma_obj->base, type);
  286. mutex_unlock(&vc4->bo_lock);
  287. return bo;
  288. }
  289. int vc4_dumb_create(struct drm_file *file_priv,
  290. struct drm_device *dev,
  291. struct drm_mode_create_dumb *args)
  292. {
  293. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  294. struct vc4_bo *bo = NULL;
  295. int ret;
  296. if (args->pitch < min_pitch)
  297. args->pitch = min_pitch;
  298. if (args->size < args->pitch * args->height)
  299. args->size = args->pitch * args->height;
  300. bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
  301. if (IS_ERR(bo))
  302. return PTR_ERR(bo);
  303. ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
  304. drm_gem_object_put_unlocked(&bo->base.base);
  305. return ret;
  306. }
  307. static void vc4_bo_cache_free_old(struct drm_device *dev)
  308. {
  309. struct vc4_dev *vc4 = to_vc4_dev(dev);
  310. unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
  311. lockdep_assert_held(&vc4->bo_lock);
  312. while (!list_empty(&vc4->bo_cache.time_list)) {
  313. struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
  314. struct vc4_bo, unref_head);
  315. if (time_before(expire_time, bo->free_time)) {
  316. mod_timer(&vc4->bo_cache.time_timer,
  317. round_jiffies_up(jiffies +
  318. msecs_to_jiffies(1000)));
  319. return;
  320. }
  321. vc4_bo_remove_from_cache(bo);
  322. vc4_bo_destroy(bo);
  323. }
  324. }
  325. /* Called on the last userspace/kernel unreference of the BO. Returns
  326. * it to the BO cache if possible, otherwise frees it.
  327. */
  328. void vc4_free_object(struct drm_gem_object *gem_bo)
  329. {
  330. struct drm_device *dev = gem_bo->dev;
  331. struct vc4_dev *vc4 = to_vc4_dev(dev);
  332. struct vc4_bo *bo = to_vc4_bo(gem_bo);
  333. struct list_head *cache_list;
  334. mutex_lock(&vc4->bo_lock);
  335. /* If the object references someone else's memory, we can't cache it.
  336. */
  337. if (gem_bo->import_attach) {
  338. vc4_bo_destroy(bo);
  339. goto out;
  340. }
  341. /* Don't cache if it was publicly named. */
  342. if (gem_bo->name) {
  343. vc4_bo_destroy(bo);
  344. goto out;
  345. }
  346. /* If this object was partially constructed but CMA allocation
  347. * had failed, just free it.
  348. */
  349. if (!bo->base.vaddr) {
  350. vc4_bo_destroy(bo);
  351. goto out;
  352. }
  353. cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
  354. if (!cache_list) {
  355. vc4_bo_destroy(bo);
  356. goto out;
  357. }
  358. if (bo->validated_shader) {
  359. kfree(bo->validated_shader->texture_samples);
  360. kfree(bo->validated_shader);
  361. bo->validated_shader = NULL;
  362. }
  363. bo->t_format = false;
  364. bo->free_time = jiffies;
  365. list_add(&bo->size_head, cache_list);
  366. list_add(&bo->unref_head, &vc4->bo_cache.time_list);
  367. vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
  368. vc4_bo_cache_free_old(dev);
  369. out:
  370. mutex_unlock(&vc4->bo_lock);
  371. }
  372. static void vc4_bo_cache_time_work(struct work_struct *work)
  373. {
  374. struct vc4_dev *vc4 =
  375. container_of(work, struct vc4_dev, bo_cache.time_work);
  376. struct drm_device *dev = vc4->dev;
  377. mutex_lock(&vc4->bo_lock);
  378. vc4_bo_cache_free_old(dev);
  379. mutex_unlock(&vc4->bo_lock);
  380. }
  381. static void vc4_bo_cache_time_timer(unsigned long data)
  382. {
  383. struct drm_device *dev = (struct drm_device *)data;
  384. struct vc4_dev *vc4 = to_vc4_dev(dev);
  385. schedule_work(&vc4->bo_cache.time_work);
  386. }
  387. struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
  388. {
  389. struct vc4_bo *bo = to_vc4_bo(obj);
  390. return bo->resv;
  391. }
  392. struct dma_buf *
  393. vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
  394. {
  395. struct vc4_bo *bo = to_vc4_bo(obj);
  396. if (bo->validated_shader) {
  397. DRM_DEBUG("Attempting to export shader BO\n");
  398. return ERR_PTR(-EINVAL);
  399. }
  400. return drm_gem_prime_export(dev, obj, flags);
  401. }
  402. int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
  403. {
  404. struct drm_gem_object *gem_obj;
  405. struct vc4_bo *bo;
  406. int ret;
  407. ret = drm_gem_mmap(filp, vma);
  408. if (ret)
  409. return ret;
  410. gem_obj = vma->vm_private_data;
  411. bo = to_vc4_bo(gem_obj);
  412. if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
  413. DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
  414. return -EINVAL;
  415. }
  416. /*
  417. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
  418. * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
  419. * the whole buffer.
  420. */
  421. vma->vm_flags &= ~VM_PFNMAP;
  422. vma->vm_pgoff = 0;
  423. ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
  424. bo->base.paddr, vma->vm_end - vma->vm_start);
  425. if (ret)
  426. drm_gem_vm_close(vma);
  427. return ret;
  428. }
  429. int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
  430. {
  431. struct vc4_bo *bo = to_vc4_bo(obj);
  432. if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
  433. DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
  434. return -EINVAL;
  435. }
  436. return drm_gem_cma_prime_mmap(obj, vma);
  437. }
  438. void *vc4_prime_vmap(struct drm_gem_object *obj)
  439. {
  440. struct vc4_bo *bo = to_vc4_bo(obj);
  441. if (bo->validated_shader) {
  442. DRM_DEBUG("mmaping of shader BOs not allowed.\n");
  443. return ERR_PTR(-EINVAL);
  444. }
  445. return drm_gem_cma_prime_vmap(obj);
  446. }
  447. struct drm_gem_object *
  448. vc4_prime_import_sg_table(struct drm_device *dev,
  449. struct dma_buf_attachment *attach,
  450. struct sg_table *sgt)
  451. {
  452. struct drm_gem_object *obj;
  453. struct vc4_bo *bo;
  454. obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
  455. if (IS_ERR(obj))
  456. return obj;
  457. bo = to_vc4_bo(obj);
  458. bo->resv = attach->dmabuf->resv;
  459. return obj;
  460. }
  461. int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
  462. struct drm_file *file_priv)
  463. {
  464. struct drm_vc4_create_bo *args = data;
  465. struct vc4_bo *bo = NULL;
  466. int ret;
  467. /*
  468. * We can't allocate from the BO cache, because the BOs don't
  469. * get zeroed, and that might leak data between users.
  470. */
  471. bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
  472. if (IS_ERR(bo))
  473. return PTR_ERR(bo);
  474. ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
  475. drm_gem_object_put_unlocked(&bo->base.base);
  476. return ret;
  477. }
  478. int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
  479. struct drm_file *file_priv)
  480. {
  481. struct drm_vc4_mmap_bo *args = data;
  482. struct drm_gem_object *gem_obj;
  483. gem_obj = drm_gem_object_lookup(file_priv, args->handle);
  484. if (!gem_obj) {
  485. DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
  486. return -EINVAL;
  487. }
  488. /* The mmap offset was set up at BO allocation time. */
  489. args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
  490. drm_gem_object_put_unlocked(gem_obj);
  491. return 0;
  492. }
  493. int
  494. vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
  495. struct drm_file *file_priv)
  496. {
  497. struct drm_vc4_create_shader_bo *args = data;
  498. struct vc4_bo *bo = NULL;
  499. int ret;
  500. if (args->size == 0)
  501. return -EINVAL;
  502. if (args->size % sizeof(u64) != 0)
  503. return -EINVAL;
  504. if (args->flags != 0) {
  505. DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
  506. return -EINVAL;
  507. }
  508. if (args->pad != 0) {
  509. DRM_INFO("Pad set: 0x%08x\n", args->pad);
  510. return -EINVAL;
  511. }
  512. bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
  513. if (IS_ERR(bo))
  514. return PTR_ERR(bo);
  515. if (copy_from_user(bo->base.vaddr,
  516. (void __user *)(uintptr_t)args->data,
  517. args->size)) {
  518. ret = -EFAULT;
  519. goto fail;
  520. }
  521. /* Clear the rest of the memory from allocating from the BO
  522. * cache.
  523. */
  524. memset(bo->base.vaddr + args->size, 0,
  525. bo->base.base.size - args->size);
  526. bo->validated_shader = vc4_validate_shader(&bo->base);
  527. if (!bo->validated_shader) {
  528. ret = -EINVAL;
  529. goto fail;
  530. }
  531. /* We have to create the handle after validation, to avoid
  532. * races for users to do doing things like mmap the shader BO.
  533. */
  534. ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
  535. fail:
  536. drm_gem_object_put_unlocked(&bo->base.base);
  537. return ret;
  538. }
  539. /**
  540. * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO.
  541. * @dev: DRM device
  542. * @data: ioctl argument
  543. * @file_priv: DRM file for this fd
  544. *
  545. * The tiling state of the BO decides the default modifier of an fb if
  546. * no specific modifier was set by userspace, and the return value of
  547. * vc4_get_tiling_ioctl() (so that userspace can treat a BO it
  548. * received from dmabuf as the same tiling format as the producer
  549. * used).
  550. */
  551. int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
  552. struct drm_file *file_priv)
  553. {
  554. struct drm_vc4_set_tiling *args = data;
  555. struct drm_gem_object *gem_obj;
  556. struct vc4_bo *bo;
  557. bool t_format;
  558. if (args->flags != 0)
  559. return -EINVAL;
  560. switch (args->modifier) {
  561. case DRM_FORMAT_MOD_NONE:
  562. t_format = false;
  563. break;
  564. case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
  565. t_format = true;
  566. break;
  567. default:
  568. return -EINVAL;
  569. }
  570. gem_obj = drm_gem_object_lookup(file_priv, args->handle);
  571. if (!gem_obj) {
  572. DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
  573. return -ENOENT;
  574. }
  575. bo = to_vc4_bo(gem_obj);
  576. bo->t_format = t_format;
  577. drm_gem_object_put_unlocked(gem_obj);
  578. return 0;
  579. }
  580. /**
  581. * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO.
  582. * @dev: DRM device
  583. * @data: ioctl argument
  584. * @file_priv: DRM file for this fd
  585. *
  586. * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
  587. */
  588. int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
  589. struct drm_file *file_priv)
  590. {
  591. struct drm_vc4_get_tiling *args = data;
  592. struct drm_gem_object *gem_obj;
  593. struct vc4_bo *bo;
  594. if (args->flags != 0 || args->modifier != 0)
  595. return -EINVAL;
  596. gem_obj = drm_gem_object_lookup(file_priv, args->handle);
  597. if (!gem_obj) {
  598. DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
  599. return -ENOENT;
  600. }
  601. bo = to_vc4_bo(gem_obj);
  602. if (bo->t_format)
  603. args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
  604. else
  605. args->modifier = DRM_FORMAT_MOD_NONE;
  606. drm_gem_object_put_unlocked(gem_obj);
  607. return 0;
  608. }
  609. int vc4_bo_cache_init(struct drm_device *dev)
  610. {
  611. struct vc4_dev *vc4 = to_vc4_dev(dev);
  612. int i;
  613. /* Create the initial set of BO labels that the kernel will
  614. * use. This lets us avoid a bunch of string reallocation in
  615. * the kernel's draw and BO allocation paths.
  616. */
  617. vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
  618. GFP_KERNEL);
  619. if (!vc4->bo_labels)
  620. return -ENOMEM;
  621. vc4->num_labels = VC4_BO_TYPE_COUNT;
  622. BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
  623. for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
  624. vc4->bo_labels[i].name = bo_type_names[i];
  625. mutex_init(&vc4->bo_lock);
  626. INIT_LIST_HEAD(&vc4->bo_cache.time_list);
  627. INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
  628. setup_timer(&vc4->bo_cache.time_timer,
  629. vc4_bo_cache_time_timer,
  630. (unsigned long)dev);
  631. return 0;
  632. }
  633. void vc4_bo_cache_destroy(struct drm_device *dev)
  634. {
  635. struct vc4_dev *vc4 = to_vc4_dev(dev);
  636. int i;
  637. del_timer(&vc4->bo_cache.time_timer);
  638. cancel_work_sync(&vc4->bo_cache.time_work);
  639. vc4_bo_cache_purge(dev);
  640. for (i = 0; i < vc4->num_labels; i++) {
  641. if (vc4->bo_labels[i].num_allocated) {
  642. DRM_ERROR("Destroying BO cache with %d %s "
  643. "BOs still allocated\n",
  644. vc4->bo_labels[i].num_allocated,
  645. vc4->bo_labels[i].name);
  646. }
  647. if (is_user_label(i))
  648. kfree(vc4->bo_labels[i].name);
  649. }
  650. kfree(vc4->bo_labels);
  651. }
  652. int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
  653. struct drm_file *file_priv)
  654. {
  655. struct vc4_dev *vc4 = to_vc4_dev(dev);
  656. struct drm_vc4_label_bo *args = data;
  657. char *name;
  658. struct drm_gem_object *gem_obj;
  659. int ret = 0, label;
  660. if (!args->len)
  661. return -EINVAL;
  662. name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
  663. if (IS_ERR(name))
  664. return PTR_ERR(name);
  665. gem_obj = drm_gem_object_lookup(file_priv, args->handle);
  666. if (!gem_obj) {
  667. DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
  668. kfree(name);
  669. return -ENOENT;
  670. }
  671. mutex_lock(&vc4->bo_lock);
  672. label = vc4_get_user_label(vc4, name);
  673. if (label != -1)
  674. vc4_bo_set_label(gem_obj, label);
  675. else
  676. ret = -ENOMEM;
  677. mutex_unlock(&vc4->bo_lock);
  678. drm_gem_object_put_unlocked(gem_obj);
  679. return ret;
  680. }